code
stringlengths 87
55.2k
| code_codestyle
int64 0
349
| style_context
stringlengths 135
49.1k
| style_context_codestyle
int64 0
349
| label
int64 0
1
|
---|---|---|---|---|
'''simple docstring'''
from math import sqrt
def lowerCAmelCase_ ( snake_case_ : int ) -> int:
'''simple docstring'''
UpperCAmelCase_ = 0
for i in range(1 , int(sqrt(snake_case_ ) + 1 ) ):
if n % i == 0 and i != sqrt(snake_case_ ):
total += i + n // i
elif i == sqrt(snake_case_ ):
total += i
return total - n
def lowerCAmelCase_ ( snake_case_ : int = 1_00_00 ) -> int:
'''simple docstring'''
UpperCAmelCase_ = sum(
i
for i in range(1 , snake_case_ )
if sum_of_divisors(sum_of_divisors(snake_case_ ) ) == i and sum_of_divisors(snake_case_ ) != i )
return total
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 1 | '''simple docstring'''
import logging
import os
from dataclasses import dataclass
from typing import List, Optional, Union
import tqdm
from filelock import FileLock
from transformers import (
BartTokenizer,
BartTokenizerFast,
DataProcessor,
PreTrainedTokenizer,
RobertaTokenizer,
RobertaTokenizerFast,
XLMRobertaTokenizer,
is_tf_available,
is_torch_available,
)
SCREAMING_SNAKE_CASE_: str =logging.getLogger(__name__)
@dataclass(frozen=UpperCamelCase__ )
class __A :
a__ : str
a__ : str
a__ : Optional[str] = None
a__ : Optional[str] = None
a__ : Optional[str] = None
@dataclass(frozen=UpperCamelCase__ )
class __A :
a__ : List[int]
a__ : Optional[List[int]] = None
a__ : Optional[List[int]] = None
a__ : Optional[Union[int, float]] = None
a__ : Optional[int] = None
if is_torch_available():
import torch
from torch.utils.data import Dataset
class __A ( UpperCamelCase__ ):
a__ : List[InputFeatures]
def __init__(self : Any , __a : str , __a : PreTrainedTokenizer , __a : str , __a : Optional[int] = None , __a : Dict=False , __a : bool = False , ):
UpperCAmelCase_ = hans_processors[task]()
UpperCAmelCase_ = os.path.join(
__a , "cached_{}_{}_{}_{}".format(
"dev" if evaluate else "train" , tokenizer.__class__.__name__ , str(__a ) , __a , ) , )
UpperCAmelCase_ = processor.get_labels()
if tokenizer.__class__ in (
RobertaTokenizer,
RobertaTokenizerFast,
XLMRobertaTokenizer,
BartTokenizer,
BartTokenizerFast,
):
# HACK(label indices are swapped in RoBERTa pretrained model)
UpperCAmelCase_ , UpperCAmelCase_ = label_list[2], label_list[1]
UpperCAmelCase_ = label_list
# Make sure only the first process in distributed training processes the dataset,
# and the others will use the cache.
UpperCAmelCase_ = cached_features_file + ".lock"
with FileLock(__a ):
if os.path.exists(__a ) and not overwrite_cache:
logger.info(f"""Loading features from cached file {cached_features_file}""" )
UpperCAmelCase_ = torch.load(__a )
else:
logger.info(f"""Creating features from dataset file at {data_dir}""" )
UpperCAmelCase_ = (
processor.get_dev_examples(__a ) if evaluate else processor.get_train_examples(__a )
)
logger.info("Training examples: %s" , len(__a ) )
UpperCAmelCase_ = hans_convert_examples_to_features(__a , __a , __a , __a )
logger.info("Saving features into cached file %s" , __a )
torch.save(self.features , __a )
def __len__(self : List[Any] ):
return len(self.features )
def __getitem__(self : Any , __a : Optional[Any] ):
return self.features[i]
def _lowercase (self : Union[str, Any] ):
return self.label_list
if is_tf_available():
import tensorflow as tf
class __A :
a__ : List[InputFeatures]
def __init__(self : Union[str, Any] , __a : str , __a : PreTrainedTokenizer , __a : str , __a : Optional[int] = 128 , __a : Any=False , __a : bool = False , ):
UpperCAmelCase_ = hans_processors[task]()
UpperCAmelCase_ = processor.get_labels()
if tokenizer.__class__ in (
RobertaTokenizer,
RobertaTokenizerFast,
XLMRobertaTokenizer,
BartTokenizer,
BartTokenizerFast,
):
# HACK(label indices are swapped in RoBERTa pretrained model)
UpperCAmelCase_ , UpperCAmelCase_ = label_list[2], label_list[1]
UpperCAmelCase_ = label_list
UpperCAmelCase_ = processor.get_dev_examples(__a ) if evaluate else processor.get_train_examples(__a )
UpperCAmelCase_ = hans_convert_examples_to_features(__a , __a , __a , __a )
def gen():
for ex_index, ex in tqdm.tqdm(enumerate(self.features ) , desc="convert examples to features" ):
if ex_index % 10000 == 0:
logger.info("Writing example %d of %d" % (ex_index, len(__a )) )
yield (
{
"example_id": 0,
"input_ids": ex.input_ids,
"attention_mask": ex.attention_mask,
"token_type_ids": ex.token_type_ids,
},
ex.label,
)
UpperCAmelCase_ = tf.data.Dataset.from_generator(
__a , (
{
"example_id": tf.intaa,
"input_ids": tf.intaa,
"attention_mask": tf.intaa,
"token_type_ids": tf.intaa,
},
tf.intaa,
) , (
{
"example_id": tf.TensorShape([] ),
"input_ids": tf.TensorShape([None, None] ),
"attention_mask": tf.TensorShape([None, None] ),
"token_type_ids": tf.TensorShape([None, None] ),
},
tf.TensorShape([] ),
) , )
def _lowercase (self : int ):
return self.dataset
def __len__(self : Any ):
return len(self.features )
def __getitem__(self : int , __a : Union[str, Any] ):
return self.features[i]
def _lowercase (self : int ):
return self.label_list
class __A ( UpperCamelCase__ ):
def _lowercase (self : List[Any] , __a : Dict ):
return self._create_examples(self._read_tsv(os.path.join(__a , "heuristics_train_set.txt" ) ) , "train" )
def _lowercase (self : Any , __a : List[Any] ):
return self._create_examples(self._read_tsv(os.path.join(__a , "heuristics_evaluation_set.txt" ) ) , "dev" )
def _lowercase (self : Any ):
return ["contradiction", "entailment", "neutral"]
def _lowercase (self : Union[str, Any] , __a : Optional[int] , __a : Union[str, Any] ):
UpperCAmelCase_ = []
for i, line in enumerate(__a ):
if i == 0:
continue
UpperCAmelCase_ = "%s-%s" % (set_type, line[0])
UpperCAmelCase_ = line[5]
UpperCAmelCase_ = line[6]
UpperCAmelCase_ = line[7][2:] if line[7].startswith("ex" ) else line[7]
UpperCAmelCase_ = line[0]
examples.append(InputExample(guid=__a , text_a=__a , text_b=__a , label=__a , pairID=__a ) )
return examples
def lowerCAmelCase_ ( snake_case_ : List[InputExample] , snake_case_ : List[str] , snake_case_ : int , snake_case_ : PreTrainedTokenizer , ) -> Optional[Any]:
'''simple docstring'''
UpperCAmelCase_ = {label: i for i, label in enumerate(snake_case_ )}
UpperCAmelCase_ = []
for ex_index, example in tqdm.tqdm(enumerate(snake_case_ ) , desc="convert examples to features" ):
if ex_index % 1_00_00 == 0:
logger.info("Writing example %d" % (ex_index) )
UpperCAmelCase_ = tokenizer(
example.text_a , example.text_b , add_special_tokens=snake_case_ , max_length=snake_case_ , padding="max_length" , truncation=snake_case_ , return_overflowing_tokens=snake_case_ , )
UpperCAmelCase_ = label_map[example.label] if example.label in label_map else 0
UpperCAmelCase_ = int(example.pairID )
features.append(InputFeatures(**snake_case_ , label=snake_case_ , pairID=snake_case_ ) )
for i, example in enumerate(examples[:5] ):
logger.info("*** Example ***" )
logger.info(f"""guid: {example}""" )
logger.info(f"""features: {features[i]}""" )
return features
SCREAMING_SNAKE_CASE_: int ={
'hans': 3,
}
SCREAMING_SNAKE_CASE_: Any ={
'hans': HansProcessor,
}
| 1 | 1 |
'''simple docstring'''
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch
if is_torch_available():
import torch
from transformers.activations import gelu_new, gelu_python, get_activation
@require_torch
class __A ( unittest.TestCase ):
def _lowercase (self : Optional[Any] ):
UpperCAmelCase_ = torch.tensor([-100, -1, -0.1, 0, 0.1, 1.0, 100] )
UpperCAmelCase_ = get_activation("gelu" )
self.assertTrue(torch.allclose(gelu_python(__a ) , torch_builtin(__a ) ) )
self.assertFalse(torch.allclose(gelu_python(__a ) , gelu_new(__a ) ) )
def _lowercase (self : List[str] ):
UpperCAmelCase_ = torch.tensor([-100, -1, -0.1, 0, 0.1, 1.0, 100] )
UpperCAmelCase_ = get_activation("gelu" )
UpperCAmelCase_ = get_activation("gelu_10" )
UpperCAmelCase_ = torch_builtin(__a )
UpperCAmelCase_ = geluaa(__a )
UpperCAmelCase_ = torch.where(y_gelu_aa < 10.0 , 1 , 0 )
self.assertTrue(torch.max(__a ).item() == 10.0 )
self.assertTrue(torch.allclose(y_gelu * clipped_mask , y_gelu_aa * clipped_mask ) )
def _lowercase (self : Optional[int] ):
get_activation("gelu" )
get_activation("gelu_10" )
get_activation("gelu_fast" )
get_activation("gelu_new" )
get_activation("gelu_python" )
get_activation("gelu_pytorch_tanh" )
get_activation("linear" )
get_activation("mish" )
get_activation("quick_gelu" )
get_activation("relu" )
get_activation("sigmoid" )
get_activation("silu" )
get_activation("swish" )
get_activation("tanh" )
with self.assertRaises(__a ):
get_activation("bogus" )
with self.assertRaises(__a ):
get_activation(__a )
def _lowercase (self : Optional[Any] ):
UpperCAmelCase_ = get_activation("gelu" )
UpperCAmelCase_ = 1
UpperCAmelCase_ = get_activation("gelu" )
self.assertEqual(acta.a , 1 )
with self.assertRaises(__a ):
UpperCAmelCase_ = acta.a
| 1 | '''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
SCREAMING_SNAKE_CASE_: Dict =logging.get_logger(__name__)
SCREAMING_SNAKE_CASE_: Tuple ={}
class __A ( UpperCamelCase__ ):
a__ : int = """llama"""
a__ : Any = ["""past_key_values"""]
def __init__(self : List[str] , __a : List[str]=32000 , __a : Tuple=4096 , __a : List[Any]=11008 , __a : Dict=32 , __a : Tuple=32 , __a : Any=None , __a : Any="silu" , __a : List[Any]=2048 , __a : List[Any]=0.02 , __a : str=1E-6 , __a : Optional[Any]=True , __a : Union[str, Any]=0 , __a : Any=1 , __a : Dict=2 , __a : Dict=1 , __a : str=False , __a : str=None , **__a : Optional[Any] , ):
UpperCAmelCase_ = vocab_size
UpperCAmelCase_ = max_position_embeddings
UpperCAmelCase_ = hidden_size
UpperCAmelCase_ = intermediate_size
UpperCAmelCase_ = num_hidden_layers
UpperCAmelCase_ = num_attention_heads
# for backward compatibility
if num_key_value_heads is None:
UpperCAmelCase_ = num_attention_heads
UpperCAmelCase_ = num_key_value_heads
UpperCAmelCase_ = hidden_act
UpperCAmelCase_ = initializer_range
UpperCAmelCase_ = rms_norm_eps
UpperCAmelCase_ = pretraining_tp
UpperCAmelCase_ = use_cache
UpperCAmelCase_ = rope_scaling
self._rope_scaling_validation()
super().__init__(
pad_token_id=__a , bos_token_id=__a , eos_token_id=__a , tie_word_embeddings=__a , **__a , )
def _lowercase (self : List[str] ):
if self.rope_scaling is None:
return
if not isinstance(self.rope_scaling , __a ) or len(self.rope_scaling ) != 2:
raise ValueError(
"`rope_scaling` must be a dictionary with with two fields, `name` and `factor`, "
f"""got {self.rope_scaling}""" )
UpperCAmelCase_ = self.rope_scaling.get("type" , __a )
UpperCAmelCase_ = self.rope_scaling.get("factor" , __a )
if rope_scaling_type is None or rope_scaling_type not in ["linear", "dynamic"]:
raise ValueError(
f"""`rope_scaling`'s name field must be one of ['linear', 'dynamic'], got {rope_scaling_type}""" )
if rope_scaling_factor is None or not isinstance(__a , __a ) or rope_scaling_factor <= 1.0:
raise ValueError(f"""`rope_scaling`'s factor field must be an float > 1, got {rope_scaling_factor}""" )
| 1 | 1 |
'''simple docstring'''
import colorsys
from PIL import Image # type: ignore
def lowerCAmelCase_ ( snake_case_ : float , snake_case_ : float , snake_case_ : int ) -> float:
'''simple docstring'''
UpperCAmelCase_ = x
UpperCAmelCase_ = y
for step in range(snake_case_ ): # noqa: B007
UpperCAmelCase_ = a * a - b * b + x
UpperCAmelCase_ = 2 * a * b + y
UpperCAmelCase_ = a_new
# divergence happens for all complex number with an absolute value
# greater than 4
if a * a + b * b > 4:
break
return step / (max_step - 1)
def lowerCAmelCase_ ( snake_case_ : float ) -> tuple:
'''simple docstring'''
if distance == 1:
return (0, 0, 0)
else:
return (2_55, 2_55, 2_55)
def lowerCAmelCase_ ( snake_case_ : float ) -> tuple:
'''simple docstring'''
if distance == 1:
return (0, 0, 0)
else:
return tuple(round(i * 2_55 ) for i in colorsys.hsv_to_rgb(snake_case_ , 1 , 1 ) )
def lowerCAmelCase_ ( snake_case_ : int = 8_00 , snake_case_ : int = 6_00 , snake_case_ : float = -0.6 , snake_case_ : float = 0 , snake_case_ : float = 3.2 , snake_case_ : int = 50 , snake_case_ : bool = True , ) -> Image.Image:
'''simple docstring'''
UpperCAmelCase_ = Image.new("RGB" , (image_width, image_height) )
UpperCAmelCase_ = img.load()
# loop through the image-coordinates
for image_x in range(snake_case_ ):
for image_y in range(snake_case_ ):
# determine the figure-coordinates based on the image-coordinates
UpperCAmelCase_ = figure_width / image_width * image_height
UpperCAmelCase_ = figure_center_x + (image_x / image_width - 0.5) * figure_width
UpperCAmelCase_ = figure_center_y + (image_y / image_height - 0.5) * figure_height
UpperCAmelCase_ = get_distance(snake_case_ , snake_case_ , snake_case_ )
# color the corresponding pixel based on the selected coloring-function
if use_distance_color_coding:
UpperCAmelCase_ = get_color_coded_rgb(snake_case_ )
else:
UpperCAmelCase_ = get_black_and_white_rgb(snake_case_ )
return img
if __name__ == "__main__":
import doctest
doctest.testmod()
# colored version, full figure
SCREAMING_SNAKE_CASE_: Tuple =get_image()
# uncomment for colored version, different section, zoomed in
# img = get_image(figure_center_x = -0.6, figure_center_y = -0.4,
# figure_width = 0.8)
# uncomment for black and white version, full figure
# img = get_image(use_distance_color_coding = False)
# uncomment to save the image
# img.save("mandelbrot.png")
img.show()
| 1 | '''simple docstring'''
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, DDIMScheduler, DDPMScheduler, StableDiffusionUpscalePipeline, UNetaDConditionModel
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
enable_full_determinism()
class __A ( unittest.TestCase ):
def _lowercase (self : Tuple ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def _lowercase (self : str ):
UpperCAmelCase_ = 1
UpperCAmelCase_ = 3
UpperCAmelCase_ = (32, 32)
UpperCAmelCase_ = floats_tensor((batch_size, num_channels) + sizes , rng=random.Random(0 ) ).to(__a )
return image
@property
def _lowercase (self : int ):
torch.manual_seed(0 )
UpperCAmelCase_ = UNetaDConditionModel(
block_out_channels=(32, 32, 64) , layers_per_block=2 , sample_size=32 , in_channels=7 , out_channels=4 , down_block_types=("DownBlock2D", "CrossAttnDownBlock2D", "CrossAttnDownBlock2D") , up_block_types=("CrossAttnUpBlock2D", "CrossAttnUpBlock2D", "UpBlock2D") , cross_attention_dim=32 , attention_head_dim=8 , use_linear_projection=__a , only_cross_attention=(True, True, False) , num_class_embeds=100 , )
return model
@property
def _lowercase (self : Any ):
torch.manual_seed(0 )
UpperCAmelCase_ = AutoencoderKL(
block_out_channels=[32, 32, 64] , in_channels=3 , out_channels=3 , down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D", "DownEncoderBlock2D"] , up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D"] , latent_channels=4 , )
return model
@property
def _lowercase (self : Optional[Any] ):
torch.manual_seed(0 )
UpperCAmelCase_ = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , hidden_act="gelu" , projection_dim=512 , )
return CLIPTextModel(__a )
def _lowercase (self : Any ):
UpperCAmelCase_ = "cpu" # ensure determinism for the device-dependent torch.Generator
UpperCAmelCase_ = self.dummy_cond_unet_upscale
UpperCAmelCase_ = DDPMScheduler()
UpperCAmelCase_ = DDIMScheduler(prediction_type="v_prediction" )
UpperCAmelCase_ = self.dummy_vae
UpperCAmelCase_ = self.dummy_text_encoder
UpperCAmelCase_ = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
UpperCAmelCase_ = self.dummy_image.cpu().permute(0 , 2 , 3 , 1 )[0]
UpperCAmelCase_ = Image.fromarray(np.uinta(__a ) ).convert("RGB" ).resize((64, 64) )
# make sure here that pndm scheduler skips prk
UpperCAmelCase_ = StableDiffusionUpscalePipeline(
unet=__a , low_res_scheduler=__a , scheduler=__a , vae=__a , text_encoder=__a , tokenizer=__a , max_noise_level=350 , )
UpperCAmelCase_ = sd_pipe.to(__a )
sd_pipe.set_progress_bar_config(disable=__a )
UpperCAmelCase_ = "A painting of a squirrel eating a burger"
UpperCAmelCase_ = torch.Generator(device=__a ).manual_seed(0 )
UpperCAmelCase_ = sd_pipe(
[prompt] , image=__a , generator=__a , guidance_scale=6.0 , noise_level=20 , num_inference_steps=2 , output_type="np" , )
UpperCAmelCase_ = output.images
UpperCAmelCase_ = torch.Generator(device=__a ).manual_seed(0 )
UpperCAmelCase_ = sd_pipe(
[prompt] , image=__a , generator=__a , guidance_scale=6.0 , noise_level=20 , num_inference_steps=2 , output_type="np" , return_dict=__a , )[0]
UpperCAmelCase_ = image[0, -3:, -3:, -1]
UpperCAmelCase_ = image_from_tuple[0, -3:, -3:, -1]
UpperCAmelCase_ = low_res_image.size[0] * 4
assert image.shape == (1, expected_height_width, expected_height_width, 3)
UpperCAmelCase_ = np.array([0.31_13, 0.39_10, 0.42_72, 0.48_59, 0.50_61, 0.46_52, 0.53_62, 0.57_15, 0.56_61] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
def _lowercase (self : Optional[int] ):
UpperCAmelCase_ = "cpu" # ensure determinism for the device-dependent torch.Generator
UpperCAmelCase_ = self.dummy_cond_unet_upscale
UpperCAmelCase_ = DDPMScheduler()
UpperCAmelCase_ = DDIMScheduler(prediction_type="v_prediction" )
UpperCAmelCase_ = self.dummy_vae
UpperCAmelCase_ = self.dummy_text_encoder
UpperCAmelCase_ = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
UpperCAmelCase_ = self.dummy_image.cpu().permute(0 , 2 , 3 , 1 )[0]
UpperCAmelCase_ = Image.fromarray(np.uinta(__a ) ).convert("RGB" ).resize((64, 64) )
# make sure here that pndm scheduler skips prk
UpperCAmelCase_ = StableDiffusionUpscalePipeline(
unet=__a , low_res_scheduler=__a , scheduler=__a , vae=__a , text_encoder=__a , tokenizer=__a , max_noise_level=350 , )
UpperCAmelCase_ = sd_pipe.to(__a )
sd_pipe.set_progress_bar_config(disable=__a )
UpperCAmelCase_ = "A painting of a squirrel eating a burger"
UpperCAmelCase_ = sd_pipe(
2 * [prompt] , image=2 * [low_res_image] , guidance_scale=6.0 , noise_level=20 , num_inference_steps=2 , output_type="np" , )
UpperCAmelCase_ = output.images
assert image.shape[0] == 2
UpperCAmelCase_ = torch.Generator(device=__a ).manual_seed(0 )
UpperCAmelCase_ = sd_pipe(
[prompt] , image=__a , generator=__a , num_images_per_prompt=2 , guidance_scale=6.0 , noise_level=20 , num_inference_steps=2 , output_type="np" , )
UpperCAmelCase_ = output.images
assert image.shape[0] == 2
@unittest.skipIf(torch_device != "cuda" , "This test requires a GPU" )
def _lowercase (self : str ):
UpperCAmelCase_ = self.dummy_cond_unet_upscale
UpperCAmelCase_ = DDPMScheduler()
UpperCAmelCase_ = DDIMScheduler(prediction_type="v_prediction" )
UpperCAmelCase_ = self.dummy_vae
UpperCAmelCase_ = self.dummy_text_encoder
UpperCAmelCase_ = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
UpperCAmelCase_ = self.dummy_image.cpu().permute(0 , 2 , 3 , 1 )[0]
UpperCAmelCase_ = Image.fromarray(np.uinta(__a ) ).convert("RGB" ).resize((64, 64) )
# put models in fp16, except vae as it overflows in fp16
UpperCAmelCase_ = unet.half()
UpperCAmelCase_ = text_encoder.half()
# make sure here that pndm scheduler skips prk
UpperCAmelCase_ = StableDiffusionUpscalePipeline(
unet=__a , low_res_scheduler=__a , scheduler=__a , vae=__a , text_encoder=__a , tokenizer=__a , max_noise_level=350 , )
UpperCAmelCase_ = sd_pipe.to(__a )
sd_pipe.set_progress_bar_config(disable=__a )
UpperCAmelCase_ = "A painting of a squirrel eating a burger"
UpperCAmelCase_ = torch.manual_seed(0 )
UpperCAmelCase_ = sd_pipe(
[prompt] , image=__a , generator=__a , num_inference_steps=2 , output_type="np" , ).images
UpperCAmelCase_ = low_res_image.size[0] * 4
assert image.shape == (1, expected_height_width, expected_height_width, 3)
@slow
@require_torch_gpu
class __A ( unittest.TestCase ):
def _lowercase (self : List[str] ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _lowercase (self : List[Any] ):
UpperCAmelCase_ = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/sd2-upscale/low_res_cat.png" )
UpperCAmelCase_ = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-upscale"
"/upsampled_cat.npy" )
UpperCAmelCase_ = "stabilityai/stable-diffusion-x4-upscaler"
UpperCAmelCase_ = StableDiffusionUpscalePipeline.from_pretrained(__a )
pipe.to(__a )
pipe.set_progress_bar_config(disable=__a )
pipe.enable_attention_slicing()
UpperCAmelCase_ = "a cat sitting on a park bench"
UpperCAmelCase_ = torch.manual_seed(0 )
UpperCAmelCase_ = pipe(
prompt=__a , image=__a , generator=__a , output_type="np" , )
UpperCAmelCase_ = output.images[0]
assert image.shape == (512, 512, 3)
assert np.abs(expected_image - image ).max() < 1E-3
def _lowercase (self : Tuple ):
UpperCAmelCase_ = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/sd2-upscale/low_res_cat.png" )
UpperCAmelCase_ = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-upscale"
"/upsampled_cat_fp16.npy" )
UpperCAmelCase_ = "stabilityai/stable-diffusion-x4-upscaler"
UpperCAmelCase_ = StableDiffusionUpscalePipeline.from_pretrained(
__a , torch_dtype=torch.floataa , )
pipe.to(__a )
pipe.set_progress_bar_config(disable=__a )
pipe.enable_attention_slicing()
UpperCAmelCase_ = "a cat sitting on a park bench"
UpperCAmelCase_ = torch.manual_seed(0 )
UpperCAmelCase_ = pipe(
prompt=__a , image=__a , generator=__a , output_type="np" , )
UpperCAmelCase_ = output.images[0]
assert image.shape == (512, 512, 3)
assert np.abs(expected_image - image ).max() < 5E-1
def _lowercase (self : List[Any] ):
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
UpperCAmelCase_ = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/sd2-upscale/low_res_cat.png" )
UpperCAmelCase_ = "stabilityai/stable-diffusion-x4-upscaler"
UpperCAmelCase_ = StableDiffusionUpscalePipeline.from_pretrained(
__a , torch_dtype=torch.floataa , )
pipe.to(__a )
pipe.set_progress_bar_config(disable=__a )
pipe.enable_attention_slicing(1 )
pipe.enable_sequential_cpu_offload()
UpperCAmelCase_ = "a cat sitting on a park bench"
UpperCAmelCase_ = torch.manual_seed(0 )
UpperCAmelCase_ = pipe(
prompt=__a , image=__a , generator=__a , num_inference_steps=5 , output_type="np" , )
UpperCAmelCase_ = torch.cuda.max_memory_allocated()
# make sure that less than 2.9 GB is allocated
assert mem_bytes < 2.9 * 10**9
| 1 | 1 |
'''simple docstring'''
import warnings
from ...utils import logging
from .image_processing_imagegpt import ImageGPTImageProcessor
SCREAMING_SNAKE_CASE_: str =logging.get_logger(__name__)
class __A ( UpperCamelCase__ ):
def __init__(self : Optional[Any] , *__a : int , **__a : str ):
warnings.warn(
"The class ImageGPTFeatureExtractor is deprecated and will be removed in version 5 of Transformers."
" Please use ImageGPTImageProcessor instead." , __a , )
super().__init__(*__a , **__a )
| 1 | '''simple docstring'''
from typing import Callable, Dict, Optional, Tuple
import torch
from torch import nn
from torch.distributions import (
AffineTransform,
Distribution,
Independent,
NegativeBinomial,
Normal,
StudentT,
TransformedDistribution,
)
class __A ( UpperCamelCase__ ):
def __init__(self : int , __a : Distribution , __a : Dict=None , __a : int=None , __a : Any=0 ):
UpperCAmelCase_ = 1.0 if scale is None else scale
UpperCAmelCase_ = 0.0 if loc is None else loc
super().__init__(__a , [AffineTransform(loc=self.loc , scale=self.scale , event_dim=__a )] )
@property
def _lowercase (self : Union[str, Any] ):
return self.base_dist.mean * self.scale + self.loc
@property
def _lowercase (self : List[Any] ):
return self.base_dist.variance * self.scale**2
@property
def _lowercase (self : List[Any] ):
return self.variance.sqrt()
class __A ( nn.Module ):
def __init__(self : Optional[int] , __a : int , __a : Dict[str, int] , __a : Callable[..., Tuple[torch.Tensor]] , **__a : List[str] ):
super().__init__(**__a )
UpperCAmelCase_ = args_dim
UpperCAmelCase_ = nn.ModuleList([nn.Linear(__a , __a ) for dim in args_dim.values()] )
UpperCAmelCase_ = domain_map
def _lowercase (self : List[str] , __a : torch.Tensor ):
UpperCAmelCase_ = [proj(__a ) for proj in self.proj]
return self.domain_map(*__a )
class __A ( nn.Module ):
def __init__(self : Union[str, Any] , __a : List[str] ):
super().__init__()
UpperCAmelCase_ = function
def _lowercase (self : Optional[int] , __a : List[str] , *__a : Optional[int] ):
return self.function(__a , *__a )
class __A :
a__ : type
a__ : int
a__ : Dict[str, int]
def __init__(self : List[Any] , __a : int = 1 ):
UpperCAmelCase_ = dim
UpperCAmelCase_ = {k: dim * self.args_dim[k] for k in self.args_dim}
def _lowercase (self : Any , __a : Any ):
if self.dim == 1:
return self.distribution_class(*__a )
else:
return Independent(self.distribution_class(*__a ) , 1 )
def _lowercase (self : List[str] , __a : Union[str, Any] , __a : Optional[torch.Tensor] = None , __a : Optional[torch.Tensor] = None , ):
UpperCAmelCase_ = self._base_distribution(__a )
if loc is None and scale is None:
return distr
else:
return AffineTransformed(__a , loc=__a , scale=__a , event_dim=self.event_dim )
@property
def _lowercase (self : Any ):
return () if self.dim == 1 else (self.dim,)
@property
def _lowercase (self : Dict ):
return len(self.event_shape )
@property
def _lowercase (self : Tuple ):
return 0.0
def _lowercase (self : List[str] , __a : int ):
return ParameterProjection(
in_features=__a , args_dim=self.args_dim , domain_map=LambdaLayer(self.domain_map ) , )
def _lowercase (self : Optional[int] , *__a : torch.Tensor ):
raise NotImplementedError()
@staticmethod
def _lowercase (__a : torch.Tensor ):
return (x + torch.sqrt(torch.square(__a ) + 4.0 )) / 2.0
class __A ( UpperCamelCase__ ):
a__ : Dict[str, int] = {"df": 1, "loc": 1, "scale": 1}
a__ : type = StudentT
@classmethod
def _lowercase (cls : Union[str, Any] , __a : torch.Tensor , __a : torch.Tensor , __a : torch.Tensor ):
UpperCAmelCase_ = cls.squareplus(__a ).clamp_min(torch.finfo(scale.dtype ).eps )
UpperCAmelCase_ = 2.0 + cls.squareplus(__a )
return df.squeeze(-1 ), loc.squeeze(-1 ), scale.squeeze(-1 )
class __A ( UpperCamelCase__ ):
a__ : Dict[str, int] = {"loc": 1, "scale": 1}
a__ : type = Normal
@classmethod
def _lowercase (cls : Tuple , __a : torch.Tensor , __a : torch.Tensor ):
UpperCAmelCase_ = cls.squareplus(__a ).clamp_min(torch.finfo(scale.dtype ).eps )
return loc.squeeze(-1 ), scale.squeeze(-1 )
class __A ( UpperCamelCase__ ):
a__ : Dict[str, int] = {"total_count": 1, "logits": 1}
a__ : type = NegativeBinomial
@classmethod
def _lowercase (cls : Optional[Any] , __a : torch.Tensor , __a : torch.Tensor ):
UpperCAmelCase_ = cls.squareplus(__a )
return total_count.squeeze(-1 ), logits.squeeze(-1 )
def _lowercase (self : List[str] , __a : str ):
UpperCAmelCase_ , UpperCAmelCase_ = distr_args
if self.dim == 1:
return self.distribution_class(total_count=__a , logits=__a )
else:
return Independent(self.distribution_class(total_count=__a , logits=__a ) , 1 )
def _lowercase (self : Optional[Any] , __a : int , __a : Optional[torch.Tensor] = None , __a : Optional[torch.Tensor] = None ):
UpperCAmelCase_ , UpperCAmelCase_ = distr_args
if scale is not None:
# See scaling property of Gamma.
logits += scale.log()
return self._base_distribution((total_count, logits) )
| 1 | 1 |
'''simple docstring'''
import warnings
from diffusers import StableDiffusionImgaImgPipeline # noqa F401
warnings.warn(
'The `image_to_image.py` script is outdated. Please use directly `from diffusers import'
' StableDiffusionImg2ImgPipeline` instead.'
)
| 1 | '''simple docstring'''
import math_equivalence # From: git+https://github.com/hendrycks/math.git
import datasets
SCREAMING_SNAKE_CASE_: Optional[Any] ='\\n@article{hendrycksmath2021,\n title={Measuring Mathematical Problem Solving With the MATH Dataset},\n author={Dan Hendrycks\n and Collin Burns\n and Saurav Kadavath\n and Akul Arora\n and Steven Basart\n and Eric Tang\n and Dawn Song\n and Jacob Steinhardt},\n journal={arXiv preprint arXiv:2103.03874},\n year={2021}\n}\n'
SCREAMING_SNAKE_CASE_: Union[str, Any] ='\\nThis metric is used to assess performance on the Mathematics Aptitude Test of Heuristics (MATH) dataset.\nIt first canonicalizes the inputs (e.g., converting "1/2" to "\\frac{1}{2}") and then computes accuracy.\n'
SCREAMING_SNAKE_CASE_: List[Any] =r'\nCalculates accuracy after canonicalizing inputs.\n\nArgs:\n predictions: list of predictions to score. Each prediction\n is a string that contains natural language and LaTex.\n references: list of reference for each prediction. Each\n reference is a string that contains natural language\n and LaTex.\nReturns:\n accuracy: accuracy after canonicalizing inputs\n (e.g., converting "1/2" to "\\frac{1}{2}")\n\nExamples:\n >>> metric = datasets.load_metric("competition_math")\n >>> results = metric.compute(references=["\\frac{1}{2}"], predictions=["1/2"])\n >>> print(results)\n {\'accuracy\': 1.0}\n'
@datasets.utils.file_utils.add_end_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __A ( datasets.Metric ):
def _lowercase (self : Optional[Any] ):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Value("string" ),
"references": datasets.Value("string" ),
} ) , homepage="https://github.com/hendrycks/math" , codebase_urls=["https://github.com/hendrycks/math"] , )
def _lowercase (self : Tuple , __a : Optional[int] , __a : List[Any] ):
UpperCAmelCase_ = 0.0
for i, j in zip(__a , __a ):
n_correct += 1.0 if math_equivalence.is_equiv(__a , __a ) else 0.0
UpperCAmelCase_ = n_correct / len(__a )
return {
"accuracy": accuracy,
}
| 1 | 1 |
'''simple docstring'''
from __future__ import annotations
from numpy import array, cos, cross, floataa, radians, sin
from numpy.typing import NDArray
def lowerCAmelCase_ ( snake_case_ : float , snake_case_ : float , snake_case_ : bool = False ) -> list[float]:
'''simple docstring'''
if radian_mode:
return [magnitude * cos(snake_case_ ), magnitude * sin(snake_case_ )]
return [magnitude * cos(radians(snake_case_ ) ), magnitude * sin(radians(snake_case_ ) )]
def lowerCAmelCase_ ( snake_case_ : NDArray[floataa] , snake_case_ : NDArray[floataa] , snake_case_ : float = 10**-1 ) -> bool:
'''simple docstring'''
UpperCAmelCase_ = cross(snake_case_ , snake_case_ )
UpperCAmelCase_ = sum(snake_case_ )
return abs(snake_case_ ) < eps
if __name__ == "__main__":
# Test to check if it works
SCREAMING_SNAKE_CASE_: List[Any] =array(
[
polar_force(718.4, 1_80 - 30),
polar_force(879.54, 45),
polar_force(1_00, -90),
]
)
SCREAMING_SNAKE_CASE_: NDArray[floataa] =array([[0, 0], [0, 0], [0, 0]])
assert in_static_equilibrium(forces, location)
# Problem 1 in image_data/2D_problems.jpg
SCREAMING_SNAKE_CASE_: Union[str, Any] =array(
[
polar_force(30 * 9.81, 15),
polar_force(2_15, 1_80 - 45),
polar_force(2_64, 90 - 30),
]
)
SCREAMING_SNAKE_CASE_: Any =array([[0, 0], [0, 0], [0, 0]])
assert in_static_equilibrium(forces, location)
# Problem in image_data/2D_problems_1.jpg
SCREAMING_SNAKE_CASE_: List[Any] =array([[0, -20_00], [0, -12_00], [0, 1_56_00], [0, -1_24_00]])
SCREAMING_SNAKE_CASE_: Union[str, Any] =array([[0, 0], [6, 0], [10, 0], [12, 0]])
assert in_static_equilibrium(forces, location)
import doctest
doctest.testmod()
| 1 | '''simple docstring'''
from copy import deepcopy
import torch
import torch.nn.functional as F
from torch.optim import AdamW
from torch.optim.lr_scheduler import LambdaLR
from torch.utils.data import DataLoader
from accelerate.accelerator import Accelerator
from accelerate.state import GradientState
from accelerate.test_utils import RegressionDataset, RegressionModel
from accelerate.utils import DistributedType, is_torch_version, set_seed
def lowerCAmelCase_ ( snake_case_ : Dict , snake_case_ : Union[str, Any] , snake_case_ : Optional[Any] , snake_case_ : List[Any] ) -> List[Any]:
'''simple docstring'''
for param, grad_param in zip(model_a.parameters() , model_b.parameters() ):
if not param.requires_grad:
continue
if not did_step:
# Grads should not be in sync
assert (
torch.allclose(param.grad , grad_param.grad ) is False
), f"""Gradients in sync when they should not be at iteration {iteration}:\nmodel_a grad ({param.grad}) == model_b grad ({grad_param.grad})"""
else:
# Grads should be in sync
assert (
torch.allclose(param.grad , grad_param.grad ) is True
), f"""Gradients not in sync when they should be at iteration {iteration}:\nmodel_a grad ({param.grad}) != model_b grad ({grad_param.grad})"""
def lowerCAmelCase_ ( snake_case_ : Any , snake_case_ : Tuple , snake_case_ : Any , snake_case_ : List[Any] , snake_case_ : str=True ) -> Optional[Any]:
'''simple docstring'''
model.train()
UpperCAmelCase_ = model(snake_case_ )
UpperCAmelCase_ = F.mse_loss(snake_case_ , target.to(output.device ) )
if not do_backward:
loss /= accelerator.gradient_accumulation_steps
loss.backward()
else:
accelerator.backward(snake_case_ )
def lowerCAmelCase_ ( snake_case_ : Optional[Any] , snake_case_ : Any=False ) -> Dict:
'''simple docstring'''
set_seed(42 )
UpperCAmelCase_ = RegressionModel()
UpperCAmelCase_ = deepcopy(snake_case_ )
UpperCAmelCase_ = RegressionDataset(length=80 )
UpperCAmelCase_ = DataLoader(snake_case_ , batch_size=16 )
model.to(accelerator.device )
if sched:
UpperCAmelCase_ = AdamW(params=model.parameters() , lr=1E-3 )
UpperCAmelCase_ = AdamW(params=ddp_model.parameters() , lr=1E-3 )
UpperCAmelCase_ = LambdaLR(snake_case_ , lr_lambda=lambda snake_case_ : epoch**0.65 )
UpperCAmelCase_ = LambdaLR(snake_case_ , lr_lambda=lambda snake_case_ : epoch**0.65 )
# Make a copy of `model`
if sched:
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = accelerator.prepare(snake_case_ , snake_case_ , snake_case_ , snake_case_ )
else:
UpperCAmelCase_ , UpperCAmelCase_ = accelerator.prepare(snake_case_ , snake_case_ )
if sched:
return (model, opt, sched, dataloader, ddp_model, ddp_opt, ddp_sched)
return model, ddp_model, dataloader
def lowerCAmelCase_ ( snake_case_ : Any ) -> int:
'''simple docstring'''
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = get_training_setup(snake_case_ )
# Use a single batch
UpperCAmelCase_ , UpperCAmelCase_ = next(iter(snake_case_ ) ).values()
for iteration in range(3 ):
# Gather the distributed inputs and targs for the base model
UpperCAmelCase_ , UpperCAmelCase_ = accelerator.gather((ddp_input, ddp_target) )
UpperCAmelCase_ , UpperCAmelCase_ = input.to(accelerator.device ), target.to(accelerator.device )
# Perform our initial ground truth step in non "DDP"
step_model(snake_case_ , snake_case_ , snake_case_ , snake_case_ )
# Do "gradient accumulation" (noop)
if iteration % 2 == 0:
# Accumulate grads locally
with accelerator.no_sync(snake_case_ ):
step_model(snake_case_ , snake_case_ , snake_case_ , snake_case_ )
else:
# Sync grads
step_model(snake_case_ , snake_case_ , snake_case_ , snake_case_ )
# Since `no_sync` is a noop, `ddp_model` and `model` grads should always be in sync
check_model_parameters(snake_case_ , snake_case_ , snake_case_ , snake_case_ )
for param, ddp_param in zip(model.parameters() , ddp_model.parameters() ):
if not param.requires_grad:
continue
assert torch.allclose(
param.grad , ddp_param.grad ), f"""Gradients not in sync when they should be:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})"""
# Shuffle ddp_input on each iteration
torch.manual_seed(13_37 + iteration )
UpperCAmelCase_ = ddp_input[torch.randperm(len(snake_case_ ) )]
def lowerCAmelCase_ ( snake_case_ : Tuple ) -> str:
'''simple docstring'''
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = get_training_setup(snake_case_ )
# Use a single batch
UpperCAmelCase_ , UpperCAmelCase_ = next(iter(snake_case_ ) ).values()
for iteration in range(3 ):
# Gather the distributed inputs and targs for the base model
UpperCAmelCase_ , UpperCAmelCase_ = accelerator.gather((ddp_input, ddp_target) )
UpperCAmelCase_ , UpperCAmelCase_ = input.to(accelerator.device ), target.to(accelerator.device )
# Perform our initial ground truth step in non "DDP"
step_model(snake_case_ , snake_case_ , snake_case_ , snake_case_ )
# Do "gradient accumulation" (noop)
if iteration % 2 == 0:
# Accumulate grads locally
with accelerator.no_sync(snake_case_ ):
step_model(snake_case_ , snake_case_ , snake_case_ , snake_case_ )
else:
# Sync grads
step_model(snake_case_ , snake_case_ , snake_case_ , snake_case_ )
# DDP model and model should only be in sync when not (iteration % 2 == 0)
for param, ddp_param in zip(model.parameters() , ddp_model.parameters() ):
if not param.requires_grad:
continue
if iteration % 2 == 0:
# Grads should not be in sync
assert (
torch.allclose(param.grad , ddp_param.grad ) is False
), f"""Gradients in sync when they should not be:\nModel grad ({param.grad}) == DDP grad ({ddp_param.grad})"""
else:
# Grads should be in sync
assert (
torch.allclose(param.grad , ddp_param.grad ) is True
), f"""Gradients not in sync when they should be:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})"""
# Shuffle ddp_input on each iteration
torch.manual_seed(13_37 + iteration )
UpperCAmelCase_ = ddp_input[torch.randperm(len(snake_case_ ) )]
def lowerCAmelCase_ ( snake_case_ : Optional[int]=False , snake_case_ : str=False ) -> List[str]:
'''simple docstring'''
UpperCAmelCase_ = Accelerator(
split_batches=snake_case_ , dispatch_batches=snake_case_ , gradient_accumulation_steps=2 )
# Test that context manager behaves properly
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = get_training_setup(snake_case_ )
for iteration, batch in enumerate(snake_case_ ):
UpperCAmelCase_ , UpperCAmelCase_ = batch.values()
# Gather the distributed inputs and targs for the base model
UpperCAmelCase_ , UpperCAmelCase_ = accelerator.gather((ddp_input, ddp_target) )
UpperCAmelCase_ , UpperCAmelCase_ = input.to(accelerator.device ), target.to(accelerator.device )
# Perform our initial ground truth step in non "DDP"
step_model(snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ )
# Do "gradient accumulation" (noop)
with accelerator.accumulate(snake_case_ ):
step_model(snake_case_ , snake_case_ , snake_case_ , snake_case_ )
# DDP model and model should only be in sync when not (iteration % 2 == 0)
for param, ddp_param in zip(model.parameters() , ddp_model.parameters() ):
if not param.requires_grad:
continue
if ((iteration + 1) % 2 == 0) or (iteration == len(snake_case_ ) - 1):
# Grads should be in sync
assert (
torch.allclose(param.grad , ddp_param.grad ) is True
), f"""Gradients not in sync when they should be at iteration {iteration}:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})"""
else:
# Grads should not be in sync
assert (
torch.allclose(param.grad , ddp_param.grad ) is False
), f"""Gradients in sync when they should not be at iteration {iteration}:\nModel grad ({param.grad}) == DDP grad ({ddp_param.grad})"""
# Shuffle ddp_input on each iteration
torch.manual_seed(13_37 + iteration )
UpperCAmelCase_ = ddp_input[torch.randperm(len(snake_case_ ) )]
GradientState._reset_state()
def lowerCAmelCase_ ( snake_case_ : Optional[Any]=False , snake_case_ : Tuple=False ) -> Union[str, Any]:
'''simple docstring'''
UpperCAmelCase_ = Accelerator(
split_batches=snake_case_ , dispatch_batches=snake_case_ , gradient_accumulation_steps=2 )
# Test that context manager behaves properly
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = get_training_setup(snake_case_ , snake_case_ )
for iteration, batch in enumerate(snake_case_ ):
UpperCAmelCase_ , UpperCAmelCase_ = batch.values()
# Gather the distributed inputs and targs for the base model
UpperCAmelCase_ , UpperCAmelCase_ = accelerator.gather((ddp_input, ddp_target) )
UpperCAmelCase_ , UpperCAmelCase_ = input.to(accelerator.device ), target.to(accelerator.device )
# Perform our initial ground truth step in non "DDP"
model.train()
ddp_model.train()
step_model(snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ )
opt.step()
if ((iteration + 1) % 2 == 0) or ((iteration + 1) == len(snake_case_ )):
if split_batches:
sched.step()
else:
for _ in range(accelerator.num_processes ):
sched.step()
opt.zero_grad()
# Perform gradient accumulation under wrapper
with accelerator.accumulate(snake_case_ ):
step_model(snake_case_ , snake_case_ , snake_case_ , snake_case_ )
ddp_opt.step()
ddp_sched.step()
ddp_opt.zero_grad()
# Learning rates should be the same
assert (
opt.param_groups[0]["lr"] == ddp_opt.param_groups[0]["lr"]
), f"""Learning rates found in each optimizer did not align\nopt: {opt.param_groups[0]["lr"]}\nDDP opt: {ddp_opt.param_groups[0]["lr"]}\n"""
UpperCAmelCase_ = (((iteration + 1) % 2) == 0) or ((iteration + 1) == len(snake_case_ ))
if accelerator.num_processes > 1:
check_model_parameters(snake_case_ , snake_case_ , snake_case_ , snake_case_ )
# Shuffle ddp_input on each iteration
torch.manual_seed(13_37 + iteration )
GradientState._reset_state()
def lowerCAmelCase_ ( ) -> List[Any]:
'''simple docstring'''
UpperCAmelCase_ = Accelerator()
UpperCAmelCase_ = RegressionDataset(length=80 )
UpperCAmelCase_ = DataLoader(snake_case_ , batch_size=16 )
UpperCAmelCase_ = RegressionDataset(length=96 )
UpperCAmelCase_ = DataLoader(snake_case_ , batch_size=16 )
UpperCAmelCase_ , UpperCAmelCase_ = accelerator.prepare(snake_case_ , snake_case_ )
assert accelerator.gradient_state.active_dataloader is None
for iteration, _ in enumerate(snake_case_ ):
assert id(accelerator.gradient_state.active_dataloader ) == id(snake_case_ )
if iteration < len(snake_case_ ) - 1:
assert not accelerator.gradient_state.end_of_dataloader
if iteration == 1:
for batch_num, _ in enumerate(snake_case_ ):
assert id(accelerator.gradient_state.active_dataloader ) == id(snake_case_ )
if batch_num < len(snake_case_ ) - 1:
assert not accelerator.gradient_state.end_of_dataloader
else:
assert accelerator.gradient_state.end_of_dataloader
else:
assert accelerator.gradient_state.end_of_dataloader
assert accelerator.gradient_state.active_dataloader is None
def lowerCAmelCase_ ( ) -> str:
'''simple docstring'''
UpperCAmelCase_ = Accelerator()
UpperCAmelCase_ = accelerator.state
if state.local_process_index == 0:
print("**Test `accumulate` gradient accumulation with dataloader break**" )
test_dataloader_break()
if state.distributed_type == DistributedType.NO:
if state.local_process_index == 0:
print("**Test NOOP `no_sync` context manager**" )
test_noop_sync(snake_case_ )
if state.distributed_type in (DistributedType.MULTI_GPU, DistributedType.MULTI_CPU):
if state.local_process_index == 0:
print("**Test Distributed `no_sync` context manager**" )
test_distributed_sync(snake_case_ )
if state.distributed_type == DistributedType.MULTI_GPU:
for split_batch in [True, False]:
for dispatch_batches in [True, False]:
if state.local_process_index == 0:
print(
"**Test `accumulate` gradient accumulation, " , f"""`split_batches={split_batch}` and `dispatch_batches={dispatch_batches}`**""" , )
test_gradient_accumulation(snake_case_ , snake_case_ )
# Currently will break on torch 2.0 +, need to investigate why
if is_torch_version("<" , "2.0" ) or state.distributed_type == DistributedType.NO:
if state.local_process_index == 0:
print(
"**Test `accumulate` gradient accumulation with optimizer and scheduler, " , "`split_batches=False`, `dispatch_batches=False`**" , )
test_gradient_accumulation_with_opt_and_scheduler()
if state.distributed_type == DistributedType.MULTI_GPU:
for split_batch in [True, False]:
for dispatch_batches in [True, False]:
if not split_batch and not dispatch_batches:
continue
if state.local_process_index == 0:
print(
"**Test `accumulate` gradient accumulation with optimizer and scheduler, " , f"""`split_batches={split_batch}` and `dispatch_batches={dispatch_batches}`**""" , )
test_gradient_accumulation_with_opt_and_scheduler(snake_case_ , snake_case_ )
def lowerCAmelCase_ ( snake_case_ : Dict ) -> int:
'''simple docstring'''
main()
if __name__ == "__main__":
main()
| 1 | 1 |
'''simple docstring'''
import numpy as np
from scipy.spatial.distance import cdist
from sklearn.metrics import fa_score
import datasets
SCREAMING_SNAKE_CASE_: List[str] ='\\n @inproceedings{kakwani2020indicnlpsuite,\n title={{IndicNLPSuite: Monolingual Corpora, Evaluation Benchmarks and Pre-trained Multilingual Language Models for Indian Languages}},\n author={Divyanshu Kakwani and Anoop Kunchukuttan and Satish Golla and Gokul N.C. and Avik Bhattacharyya and Mitesh M. Khapra and Pratyush Kumar},\n year={2020},\n booktitle={Findings of EMNLP},\n}\n'
SCREAMING_SNAKE_CASE_: Optional[Any] ='\\n IndicGLUE is a natural language understanding benchmark for Indian languages. It contains a wide\n variety of tasks and covers 11 major Indian languages - as, bn, gu, hi, kn, ml, mr, or, pa, ta, te.\n'
SCREAMING_SNAKE_CASE_: Optional[Any] ='\nCompute IndicGLUE evaluation metric associated to each IndicGLUE dataset.\nArgs:\n predictions: list of predictions to score (as int64),\n except for \'cvit-mkb-clsr\' where each prediction is a vector (of float32).\n references: list of ground truth labels corresponding to the predictions (as int64),\n except for \'cvit-mkb-clsr\' where each reference is a vector (of float32).\nReturns: depending on the IndicGLUE subset, one or several of:\n "accuracy": Accuracy\n "f1": F1 score\n "precision": Precision@10\nExamples:\n\n >>> indic_glue_metric = datasets.load_metric(\'indic_glue\', \'wnli\') # \'wnli\' or any of ["copa", "sna", "csqa", "wstp", "inltkh", "bbca", "iitp-mr", "iitp-pr", "actsa-sc", "md"]\n >>> references = [0, 1]\n >>> predictions = [0, 1]\n >>> results = indic_glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'accuracy\': 1.0}\n\n >>> indic_glue_metric = datasets.load_metric(\'indic_glue\', \'wiki-ner\')\n >>> references = [0, 1]\n >>> predictions = [0, 1]\n >>> results = indic_glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'accuracy\': 1.0, \'f1\': 1.0}\n\n >>> indic_glue_metric = datasets.load_metric(\'indic_glue\', \'cvit-mkb-clsr\')\n >>> references = [[0.5, 0.5, 0.5], [0.1, 0.2, 0.3]]\n >>> predictions = [[0.5, 0.5, 0.5], [0.1, 0.2, 0.3]]\n >>> results = indic_glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'precision@10\': 1.0}\n\n'
def lowerCAmelCase_ ( snake_case_ : Optional[Any] , snake_case_ : List[str] ) -> str:
'''simple docstring'''
return float((preds == labels).mean() )
def lowerCAmelCase_ ( snake_case_ : int , snake_case_ : Dict ) -> int:
'''simple docstring'''
UpperCAmelCase_ = simple_accuracy(snake_case_ , snake_case_ )
UpperCAmelCase_ = float(fa_score(y_true=snake_case_ , y_pred=snake_case_ ) )
return {
"accuracy": acc,
"f1": fa,
}
def lowerCAmelCase_ ( snake_case_ : List[Any] , snake_case_ : str ) -> Optional[Any]:
'''simple docstring'''
UpperCAmelCase_ = np.array(snake_case_ )
UpperCAmelCase_ = np.array(snake_case_ )
UpperCAmelCase_ = en_sentvecs.shape[0]
# mean centering
UpperCAmelCase_ = en_sentvecs - np.mean(snake_case_ , axis=0 )
UpperCAmelCase_ = in_sentvecs - np.mean(snake_case_ , axis=0 )
UpperCAmelCase_ = cdist(snake_case_ , snake_case_ , "cosine" )
UpperCAmelCase_ = np.array(range(snake_case_ ) )
UpperCAmelCase_ = sim.argsort(axis=1 )[:, :10]
UpperCAmelCase_ = np.any(preds == actual[:, None] , axis=1 )
return float(matches.mean() )
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __A ( datasets.Metric ):
def _lowercase (self : List[str] ):
if self.config_name not in [
"wnli",
"copa",
"sna",
"csqa",
"wstp",
"inltkh",
"bbca",
"cvit-mkb-clsr",
"iitp-mr",
"iitp-pr",
"actsa-sc",
"md",
"wiki-ner",
]:
raise KeyError(
"You should supply a configuration name selected in "
"[\"wnli\", \"copa\", \"sna\", \"csqa\", \"wstp\", \"inltkh\", \"bbca\", "
"\"cvit-mkb-clsr\", \"iitp-mr\", \"iitp-pr\", \"actsa-sc\", \"md\", "
"\"wiki-ner\"]" )
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Value("int64" )
if self.config_name != "cvit-mkb-clsr"
else datasets.Sequence(datasets.Value("float32" ) ),
"references": datasets.Value("int64" )
if self.config_name != "cvit-mkb-clsr"
else datasets.Sequence(datasets.Value("float32" ) ),
} ) , codebase_urls=[] , reference_urls=[] , format="numpy" if self.config_name != "cvit-mkb-clsr" else None , )
def _lowercase (self : Tuple , __a : Any , __a : Union[str, Any] ):
if self.config_name == "cvit-mkb-clsr":
return {"precision@10": precision_at_aa(__a , __a )}
elif self.config_name in ["wiki-ner"]:
return acc_and_fa(__a , __a )
elif self.config_name in [
"wnli",
"copa",
"sna",
"csqa",
"wstp",
"inltkh",
"bbca",
"iitp-mr",
"iitp-pr",
"actsa-sc",
"md",
]:
return {"accuracy": simple_accuracy(__a , __a )}
else:
raise KeyError(
"You should supply a configuration name selected in "
"[\"wnli\", \"copa\", \"sna\", \"csqa\", \"wstp\", \"inltkh\", \"bbca\", "
"\"cvit-mkb-clsr\", \"iitp-mr\", \"iitp-pr\", \"actsa-sc\", \"md\", "
"\"wiki-ner\"]" )
| 1 | '''simple docstring'''
def lowerCAmelCase_ ( snake_case_ : int , snake_case_ : int ) -> int:
'''simple docstring'''
return x if y == 0 else greatest_common_divisor(snake_case_ , x % y )
def lowerCAmelCase_ ( snake_case_ : int , snake_case_ : int ) -> int:
'''simple docstring'''
return (x * y) // greatest_common_divisor(snake_case_ , snake_case_ )
def lowerCAmelCase_ ( snake_case_ : int = 20 ) -> int:
'''simple docstring'''
UpperCAmelCase_ = 1
for i in range(1 , n + 1 ):
UpperCAmelCase_ = lcm(snake_case_ , snake_case_ )
return g
if __name__ == "__main__":
print(f"{solution() = }")
| 1 | 1 |
'''simple docstring'''
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import torch
from ..models.clipseg import CLIPSegForImageSegmentation
from ..utils import is_vision_available, requires_backends
from .base import PipelineTool
if is_vision_available():
from PIL import Image
class __A ( UpperCamelCase__ ):
a__ : Any = (
"""This is a tool that creates a segmentation mask of an image according to a label. It cannot create an image."""
"""It takes two arguments named `image` which should be the original image, and `label` which should be a text """
"""describing the elements what should be identified in the segmentation mask. The tool returns the mask."""
)
a__ : int = """CIDAS/clipseg-rd64-refined"""
a__ : List[Any] = """image_segmenter"""
a__ : str = CLIPSegForImageSegmentation
a__ : List[Any] = ["""image""", """text"""]
a__ : int = ["""image"""]
def __init__(self : Optional[int] , *__a : int , **__a : Dict ):
requires_backends(self , ["vision"] )
super().__init__(*__a , **__a )
def _lowercase (self : int , __a : "Image" , __a : str ):
return self.pre_processor(text=[label] , images=[image] , padding=__a , return_tensors="pt" )
def _lowercase (self : List[str] , __a : Optional[int] ):
with torch.no_grad():
UpperCAmelCase_ = self.model(**__a ).logits
return logits
def _lowercase (self : int , __a : Any ):
UpperCAmelCase_ = outputs.cpu().detach().numpy()
UpperCAmelCase_ = 0
UpperCAmelCase_ = 1
return Image.fromarray((array * 255).astype(np.uinta ) )
| 1 | '''simple docstring'''
import os
from math import logaa
def lowerCAmelCase_ ( snake_case_ : str = "base_exp.txt" ) -> int:
'''simple docstring'''
UpperCAmelCase_ = 0
UpperCAmelCase_ = 0
for i, line in enumerate(open(os.path.join(os.path.dirname(snake_case_ ) , snake_case_ ) ) ):
UpperCAmelCase_ , UpperCAmelCase_ = list(map(snake_case_ , line.split("," ) ) )
if x * logaa(snake_case_ ) > largest:
UpperCAmelCase_ = x * logaa(snake_case_ )
UpperCAmelCase_ = i + 1
return result
if __name__ == "__main__":
print(solution())
| 1 | 1 |
'''simple docstring'''
import unittest
from diffusers.pipelines.pipeline_utils import is_safetensors_compatible
class __A ( unittest.TestCase ):
def _lowercase (self : Any ):
UpperCAmelCase_ = [
"safety_checker/pytorch_model.bin",
"safety_checker/model.safetensors",
"vae/diffusion_pytorch_model.bin",
"vae/diffusion_pytorch_model.safetensors",
"text_encoder/pytorch_model.bin",
"text_encoder/model.safetensors",
"unet/diffusion_pytorch_model.bin",
"unet/diffusion_pytorch_model.safetensors",
]
self.assertTrue(is_safetensors_compatible(__a ) )
def _lowercase (self : int ):
UpperCAmelCase_ = [
"unet/diffusion_pytorch_model.bin",
"unet/diffusion_pytorch_model.safetensors",
]
self.assertTrue(is_safetensors_compatible(__a ) )
def _lowercase (self : Optional[Any] ):
UpperCAmelCase_ = [
"safety_checker/pytorch_model.bin",
"safety_checker/model.safetensors",
"vae/diffusion_pytorch_model.bin",
"vae/diffusion_pytorch_model.safetensors",
"text_encoder/pytorch_model.bin",
"text_encoder/model.safetensors",
"unet/diffusion_pytorch_model.bin",
# Removed: 'unet/diffusion_pytorch_model.safetensors',
]
self.assertFalse(is_safetensors_compatible(__a ) )
def _lowercase (self : Union[str, Any] ):
UpperCAmelCase_ = [
"text_encoder/pytorch_model.bin",
"text_encoder/model.safetensors",
]
self.assertTrue(is_safetensors_compatible(__a ) )
def _lowercase (self : str ):
UpperCAmelCase_ = [
"safety_checker/pytorch_model.bin",
"safety_checker/model.safetensors",
"vae/diffusion_pytorch_model.bin",
"vae/diffusion_pytorch_model.safetensors",
"text_encoder/pytorch_model.bin",
# Removed: 'text_encoder/model.safetensors',
"unet/diffusion_pytorch_model.bin",
"unet/diffusion_pytorch_model.safetensors",
]
self.assertFalse(is_safetensors_compatible(__a ) )
def _lowercase (self : Dict ):
UpperCAmelCase_ = [
"safety_checker/pytorch_model.fp16.bin",
"safety_checker/model.fp16.safetensors",
"vae/diffusion_pytorch_model.fp16.bin",
"vae/diffusion_pytorch_model.fp16.safetensors",
"text_encoder/pytorch_model.fp16.bin",
"text_encoder/model.fp16.safetensors",
"unet/diffusion_pytorch_model.fp16.bin",
"unet/diffusion_pytorch_model.fp16.safetensors",
]
UpperCAmelCase_ = "fp16"
self.assertTrue(is_safetensors_compatible(__a , variant=__a ) )
def _lowercase (self : Optional[Any] ):
UpperCAmelCase_ = [
"unet/diffusion_pytorch_model.fp16.bin",
"unet/diffusion_pytorch_model.fp16.safetensors",
]
UpperCAmelCase_ = "fp16"
self.assertTrue(is_safetensors_compatible(__a , variant=__a ) )
def _lowercase (self : Optional[int] ):
# pass variant but use the non-variant filenames
UpperCAmelCase_ = [
"unet/diffusion_pytorch_model.bin",
"unet/diffusion_pytorch_model.safetensors",
]
UpperCAmelCase_ = "fp16"
self.assertTrue(is_safetensors_compatible(__a , variant=__a ) )
def _lowercase (self : Union[str, Any] ):
UpperCAmelCase_ = [
"safety_checker/pytorch_model.fp16.bin",
"safety_checker/model.fp16.safetensors",
"vae/diffusion_pytorch_model.fp16.bin",
"vae/diffusion_pytorch_model.fp16.safetensors",
"text_encoder/pytorch_model.fp16.bin",
"text_encoder/model.fp16.safetensors",
"unet/diffusion_pytorch_model.fp16.bin",
# Removed: 'unet/diffusion_pytorch_model.fp16.safetensors',
]
UpperCAmelCase_ = "fp16"
self.assertFalse(is_safetensors_compatible(__a , variant=__a ) )
def _lowercase (self : str ):
UpperCAmelCase_ = [
"text_encoder/pytorch_model.fp16.bin",
"text_encoder/model.fp16.safetensors",
]
UpperCAmelCase_ = "fp16"
self.assertTrue(is_safetensors_compatible(__a , variant=__a ) )
def _lowercase (self : Union[str, Any] ):
# pass variant but use the non-variant filenames
UpperCAmelCase_ = [
"text_encoder/pytorch_model.bin",
"text_encoder/model.safetensors",
]
UpperCAmelCase_ = "fp16"
self.assertTrue(is_safetensors_compatible(__a , variant=__a ) )
def _lowercase (self : int ):
UpperCAmelCase_ = [
"safety_checker/pytorch_model.fp16.bin",
"safety_checker/model.fp16.safetensors",
"vae/diffusion_pytorch_model.fp16.bin",
"vae/diffusion_pytorch_model.fp16.safetensors",
"text_encoder/pytorch_model.fp16.bin",
# 'text_encoder/model.fp16.safetensors',
"unet/diffusion_pytorch_model.fp16.bin",
"unet/diffusion_pytorch_model.fp16.safetensors",
]
UpperCAmelCase_ = "fp16"
self.assertFalse(is_safetensors_compatible(__a , variant=__a ) )
| 1 | '''simple docstring'''
import argparse
import io
import requests
import torch
from omegaconf import OmegaConf
from diffusers import AutoencoderKL
from diffusers.pipelines.stable_diffusion.convert_from_ckpt import (
assign_to_checkpoint,
conv_attn_to_linear,
create_vae_diffusers_config,
renew_vae_attention_paths,
renew_vae_resnet_paths,
)
def lowerCAmelCase_ ( snake_case_ : List[Any] , snake_case_ : int ) -> Optional[int]:
'''simple docstring'''
UpperCAmelCase_ = checkpoint
UpperCAmelCase_ = {}
UpperCAmelCase_ = vae_state_dict["encoder.conv_in.weight"]
UpperCAmelCase_ = vae_state_dict["encoder.conv_in.bias"]
UpperCAmelCase_ = vae_state_dict["encoder.conv_out.weight"]
UpperCAmelCase_ = vae_state_dict["encoder.conv_out.bias"]
UpperCAmelCase_ = vae_state_dict["encoder.norm_out.weight"]
UpperCAmelCase_ = vae_state_dict["encoder.norm_out.bias"]
UpperCAmelCase_ = vae_state_dict["decoder.conv_in.weight"]
UpperCAmelCase_ = vae_state_dict["decoder.conv_in.bias"]
UpperCAmelCase_ = vae_state_dict["decoder.conv_out.weight"]
UpperCAmelCase_ = vae_state_dict["decoder.conv_out.bias"]
UpperCAmelCase_ = vae_state_dict["decoder.norm_out.weight"]
UpperCAmelCase_ = vae_state_dict["decoder.norm_out.bias"]
UpperCAmelCase_ = vae_state_dict["quant_conv.weight"]
UpperCAmelCase_ = vae_state_dict["quant_conv.bias"]
UpperCAmelCase_ = vae_state_dict["post_quant_conv.weight"]
UpperCAmelCase_ = vae_state_dict["post_quant_conv.bias"]
# Retrieves the keys for the encoder down blocks only
UpperCAmelCase_ = len({".".join(layer.split("." )[:3] ) for layer in vae_state_dict if "encoder.down" in layer} )
UpperCAmelCase_ = {
layer_id: [key for key in vae_state_dict if f"""down.{layer_id}""" in key] for layer_id in range(snake_case_ )
}
# Retrieves the keys for the decoder up blocks only
UpperCAmelCase_ = len({".".join(layer.split("." )[:3] ) for layer in vae_state_dict if "decoder.up" in layer} )
UpperCAmelCase_ = {
layer_id: [key for key in vae_state_dict if f"""up.{layer_id}""" in key] for layer_id in range(snake_case_ )
}
for i in range(snake_case_ ):
UpperCAmelCase_ = [key for key in down_blocks[i] if f"""down.{i}""" in key and f"""down.{i}.downsample""" not in key]
if f"""encoder.down.{i}.downsample.conv.weight""" in vae_state_dict:
UpperCAmelCase_ = vae_state_dict.pop(
f"""encoder.down.{i}.downsample.conv.weight""" )
UpperCAmelCase_ = vae_state_dict.pop(
f"""encoder.down.{i}.downsample.conv.bias""" )
UpperCAmelCase_ = renew_vae_resnet_paths(snake_case_ )
UpperCAmelCase_ = {"old": f"""down.{i}.block""", "new": f"""down_blocks.{i}.resnets"""}
assign_to_checkpoint(snake_case_ , snake_case_ , snake_case_ , additional_replacements=[meta_path] , config=snake_case_ )
UpperCAmelCase_ = [key for key in vae_state_dict if "encoder.mid.block" in key]
UpperCAmelCase_ = 2
for i in range(1 , num_mid_res_blocks + 1 ):
UpperCAmelCase_ = [key for key in mid_resnets if f"""encoder.mid.block_{i}""" in key]
UpperCAmelCase_ = renew_vae_resnet_paths(snake_case_ )
UpperCAmelCase_ = {"old": f"""mid.block_{i}""", "new": f"""mid_block.resnets.{i - 1}"""}
assign_to_checkpoint(snake_case_ , snake_case_ , snake_case_ , additional_replacements=[meta_path] , config=snake_case_ )
UpperCAmelCase_ = [key for key in vae_state_dict if "encoder.mid.attn" in key]
UpperCAmelCase_ = renew_vae_attention_paths(snake_case_ )
UpperCAmelCase_ = {"old": "mid.attn_1", "new": "mid_block.attentions.0"}
assign_to_checkpoint(snake_case_ , snake_case_ , snake_case_ , additional_replacements=[meta_path] , config=snake_case_ )
conv_attn_to_linear(snake_case_ )
for i in range(snake_case_ ):
UpperCAmelCase_ = num_up_blocks - 1 - i
UpperCAmelCase_ = [
key for key in up_blocks[block_id] if f"""up.{block_id}""" in key and f"""up.{block_id}.upsample""" not in key
]
if f"""decoder.up.{block_id}.upsample.conv.weight""" in vae_state_dict:
UpperCAmelCase_ = vae_state_dict[
f"""decoder.up.{block_id}.upsample.conv.weight"""
]
UpperCAmelCase_ = vae_state_dict[
f"""decoder.up.{block_id}.upsample.conv.bias"""
]
UpperCAmelCase_ = renew_vae_resnet_paths(snake_case_ )
UpperCAmelCase_ = {"old": f"""up.{block_id}.block""", "new": f"""up_blocks.{i}.resnets"""}
assign_to_checkpoint(snake_case_ , snake_case_ , snake_case_ , additional_replacements=[meta_path] , config=snake_case_ )
UpperCAmelCase_ = [key for key in vae_state_dict if "decoder.mid.block" in key]
UpperCAmelCase_ = 2
for i in range(1 , num_mid_res_blocks + 1 ):
UpperCAmelCase_ = [key for key in mid_resnets if f"""decoder.mid.block_{i}""" in key]
UpperCAmelCase_ = renew_vae_resnet_paths(snake_case_ )
UpperCAmelCase_ = {"old": f"""mid.block_{i}""", "new": f"""mid_block.resnets.{i - 1}"""}
assign_to_checkpoint(snake_case_ , snake_case_ , snake_case_ , additional_replacements=[meta_path] , config=snake_case_ )
UpperCAmelCase_ = [key for key in vae_state_dict if "decoder.mid.attn" in key]
UpperCAmelCase_ = renew_vae_attention_paths(snake_case_ )
UpperCAmelCase_ = {"old": "mid.attn_1", "new": "mid_block.attentions.0"}
assign_to_checkpoint(snake_case_ , snake_case_ , snake_case_ , additional_replacements=[meta_path] , config=snake_case_ )
conv_attn_to_linear(snake_case_ )
return new_checkpoint
def lowerCAmelCase_ ( snake_case_ : str , snake_case_ : str , ) -> Dict:
'''simple docstring'''
UpperCAmelCase_ = requests.get(
" https://raw.githubusercontent.com/CompVis/stable-diffusion/main/configs/stable-diffusion/v1-inference.yaml" )
UpperCAmelCase_ = io.BytesIO(r.content )
UpperCAmelCase_ = OmegaConf.load(snake_case_ )
UpperCAmelCase_ = 5_12
UpperCAmelCase_ = "cuda" if torch.cuda.is_available() else "cpu"
if checkpoint_path.endswith("safetensors" ):
from safetensors import safe_open
UpperCAmelCase_ = {}
with safe_open(snake_case_ , framework="pt" , device="cpu" ) as f:
for key in f.keys():
UpperCAmelCase_ = f.get_tensor(snake_case_ )
else:
UpperCAmelCase_ = torch.load(snake_case_ , map_location=snake_case_ )["state_dict"]
# Convert the VAE model.
UpperCAmelCase_ = create_vae_diffusers_config(snake_case_ , image_size=snake_case_ )
UpperCAmelCase_ = custom_convert_ldm_vae_checkpoint(snake_case_ , snake_case_ )
UpperCAmelCase_ = AutoencoderKL(**snake_case_ )
vae.load_state_dict(snake_case_ )
vae.save_pretrained(snake_case_ )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE_: Optional[int] =argparse.ArgumentParser()
parser.add_argument('--vae_pt_path', default=None, type=str, required=True, help='Path to the VAE.pt to convert.')
parser.add_argument('--dump_path', default=None, type=str, required=True, help='Path to the VAE.pt to convert.')
SCREAMING_SNAKE_CASE_: str =parser.parse_args()
vae_pt_to_vae_diffuser(args.vae_pt_path, args.dump_path)
| 1 | 1 |
'''simple docstring'''
import copy
from typing import Any, Dict, List, Optional, Union
import numpy as np
import torch
from ...audio_utils import mel_filter_bank, spectrogram, window_function
from ...feature_extraction_sequence_utils import SequenceFeatureExtractor
from ...feature_extraction_utils import BatchFeature
from ...utils import TensorType, logging
SCREAMING_SNAKE_CASE_: Union[str, Any] =logging.get_logger(__name__)
class __A ( UpperCamelCase__ ):
a__ : Optional[int] = ["""input_features""", """is_longer"""]
def __init__(self : Tuple , __a : Optional[int]=64 , __a : Union[str, Any]=48000 , __a : str=480 , __a : Optional[int]=10 , __a : List[Any]=1024 , __a : Optional[Any]=0.0 , __a : Tuple=False , __a : float = 0 , __a : float = 14000 , __a : int = None , __a : str = "fusion" , __a : str = "repeatpad" , **__a : Any , ):
super().__init__(
feature_size=__a , sampling_rate=__a , padding_value=__a , return_attention_mask=__a , **__a , )
UpperCAmelCase_ = top_db
UpperCAmelCase_ = truncation
UpperCAmelCase_ = padding
UpperCAmelCase_ = fft_window_size
UpperCAmelCase_ = (fft_window_size >> 1) + 1
UpperCAmelCase_ = hop_length
UpperCAmelCase_ = max_length_s
UpperCAmelCase_ = max_length_s * sampling_rate
UpperCAmelCase_ = sampling_rate
UpperCAmelCase_ = frequency_min
UpperCAmelCase_ = frequency_max
UpperCAmelCase_ = mel_filter_bank(
num_frequency_bins=self.nb_frequency_bins , num_mel_filters=__a , min_frequency=__a , max_frequency=__a , sampling_rate=__a , norm=__a , mel_scale="htk" , )
UpperCAmelCase_ = mel_filter_bank(
num_frequency_bins=self.nb_frequency_bins , num_mel_filters=__a , min_frequency=__a , max_frequency=__a , sampling_rate=__a , norm="slaney" , mel_scale="slaney" , )
def _lowercase (self : List[str] ):
UpperCAmelCase_ = copy.deepcopy(self.__dict__ )
UpperCAmelCase_ = self.__class__.__name__
if "mel_filters" in output:
del output["mel_filters"]
if "mel_filters_slaney" in output:
del output["mel_filters_slaney"]
return output
def _lowercase (self : str , __a : np.array , __a : Optional[np.array] = None ):
UpperCAmelCase_ = spectrogram(
__a , window_function(self.fft_window_size , "hann" ) , frame_length=self.fft_window_size , hop_length=self.hop_length , power=2.0 , mel_filters=__a , log_mel="dB" , )
return log_mel_spectrogram.T
def _lowercase (self : List[Any] , __a : Dict , __a : Optional[Any] , __a : Tuple ):
UpperCAmelCase_ = np.array_split(list(range(0 , total_frames - chunk_frames + 1 ) ) , 3 )
if len(ranges[1] ) == 0:
# if the audio is too short, we just use the first chunk
UpperCAmelCase_ = [0]
if len(ranges[2] ) == 0:
# if the audio is too short, we just use the first chunk
UpperCAmelCase_ = [0]
# randomly choose index for each part
UpperCAmelCase_ = np.random.choice(ranges[0] )
UpperCAmelCase_ = np.random.choice(ranges[1] )
UpperCAmelCase_ = np.random.choice(ranges[2] )
UpperCAmelCase_ = mel[idx_front : idx_front + chunk_frames, :]
UpperCAmelCase_ = mel[idx_middle : idx_middle + chunk_frames, :]
UpperCAmelCase_ = mel[idx_back : idx_back + chunk_frames, :]
UpperCAmelCase_ = torch.tensor(mel[None, None, :] )
UpperCAmelCase_ = torch.nn.functional.interpolate(
__a , size=[chunk_frames, 64] , mode="bilinear" , align_corners=__a )
UpperCAmelCase_ = mel_shrink[0][0].numpy()
UpperCAmelCase_ = np.stack([mel_shrink, mel_chunk_front, mel_chunk_middle, mel_chunk_back] , axis=0 )
return mel_fusion
def _lowercase (self : Optional[Any] , __a : np.array , __a : Any , __a : List[str] , __a : Optional[Any] ):
if waveform.shape[0] > max_length:
if truncation == "rand_trunc":
UpperCAmelCase_ = True
# random crop to max_length (for compatibility) -> this should be handled by self.pad
UpperCAmelCase_ = len(__a ) - max_length
UpperCAmelCase_ = np.random.randint(0 , overflow + 1 )
UpperCAmelCase_ = waveform[idx : idx + max_length]
UpperCAmelCase_ = self._np_extract_fbank_features(__a , self.mel_filters_slaney )[None, :]
elif truncation == "fusion":
UpperCAmelCase_ = self._np_extract_fbank_features(__a , self.mel_filters )
UpperCAmelCase_ = max_length // self.hop_length + 1 # the +1 related to how the spectrogram is computed
UpperCAmelCase_ = mel.shape[0]
if chunk_frames == total_frames:
# there is a corner case where the audio length is larger than max_length but smaller than max_length+hop_length.
# In this case, we just use the whole audio.
UpperCAmelCase_ = np.stack([mel, mel, mel, mel] , axis=0 )
UpperCAmelCase_ = False
else:
UpperCAmelCase_ = self._random_mel_fusion(__a , __a , __a )
UpperCAmelCase_ = True
else:
raise NotImplementedError(f"""data_truncating {truncation} not implemented""" )
else:
UpperCAmelCase_ = False
# only use repeat as a new possible value for padding. you repeat the audio before applying the usual max_length padding
if waveform.shape[0] < max_length:
if padding == "repeat":
UpperCAmelCase_ = int(max_length / len(__a ) )
UpperCAmelCase_ = np.stack(np.tile(__a , n_repeat + 1 ) )[:max_length]
if padding == "repeatpad":
UpperCAmelCase_ = int(max_length / len(__a ) )
UpperCAmelCase_ = np.stack(np.tile(__a , __a ) )
UpperCAmelCase_ = np.pad(__a , (0, max_length - waveform.shape[0]) , mode="constant" , constant_values=0 )
if truncation == "fusion":
UpperCAmelCase_ = self._np_extract_fbank_features(__a , self.mel_filters )
UpperCAmelCase_ = np.stack([input_mel, input_mel, input_mel, input_mel] , axis=0 )
else:
UpperCAmelCase_ = self._np_extract_fbank_features(__a , self.mel_filters_slaney )[None, :]
return input_mel, longer
def __call__(self : Any , __a : Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]] , __a : str = None , __a : Optional[str] = None , __a : Optional[int] = None , __a : Optional[int] = None , __a : Optional[Union[str, TensorType]] = None , **__a : int , ):
UpperCAmelCase_ = truncation if truncation is not None else self.truncation
UpperCAmelCase_ = padding if padding else self.padding
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
f"""The model corresponding to this feature extractor: {self.__class__.__name__} was trained using a"""
f""" sampling rate of {self.sampling_rate}. Please make sure that the provided `raw_speech` input"""
f""" was sampled with {self.sampling_rate} and not {sampling_rate}.""" )
else:
logger.warning(
"It is strongly recommended to pass the `sampling_rate` argument to this function. "
"Failing to do so can result in silent errors that might be hard to debug." )
UpperCAmelCase_ = isinstance(__a , np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(f"""Only mono-channel audio is supported for input to {self}""" )
UpperCAmelCase_ = is_batched_numpy or (
isinstance(__a , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) ))
)
if is_batched:
UpperCAmelCase_ = [np.asarray(__a , dtype=np.floataa ) for speech in raw_speech]
elif not is_batched and not isinstance(__a , np.ndarray ):
UpperCAmelCase_ = np.asarray(__a , dtype=np.floataa )
elif isinstance(__a , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
UpperCAmelCase_ = raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
UpperCAmelCase_ = [np.asarray(__a )]
# convert to mel spectrogram, truncate and pad if needed.
UpperCAmelCase_ = [
self._get_input_mel(__a , max_length if max_length else self.nb_max_samples , __a , __a )
for waveform in raw_speech
]
UpperCAmelCase_ = []
UpperCAmelCase_ = []
for mel, longer in padded_inputs:
input_mel.append(__a )
is_longer.append(__a )
if truncation == "fusion" and sum(__a ) == 0:
# if no audio is longer than 10s, then randomly select one audio to be longer
UpperCAmelCase_ = np.random.randint(0 , len(__a ) )
UpperCAmelCase_ = True
if isinstance(input_mel[0] , __a ):
UpperCAmelCase_ = [np.asarray(__a , dtype=np.floataa ) for feature in input_mel]
# is_longer is a list of bool
UpperCAmelCase_ = [[longer] for longer in is_longer]
UpperCAmelCase_ = {"input_features": input_mel, "is_longer": is_longer}
UpperCAmelCase_ = BatchFeature(__a )
if return_tensors is not None:
UpperCAmelCase_ = input_features.convert_to_tensors(__a )
return input_features
| 1 | '''simple docstring'''
import inspect
import unittest
import numpy as np
from transformers import ViTConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor
if is_flax_available():
import jax
from transformers.models.vit.modeling_flax_vit import FlaxViTForImageClassification, FlaxViTModel
class __A ( unittest.TestCase ):
def __init__(self : str , __a : Optional[Any] , __a : Optional[Any]=13 , __a : int=30 , __a : Union[str, Any]=2 , __a : Dict=3 , __a : List[Any]=True , __a : Optional[Any]=True , __a : List[Any]=32 , __a : Any=5 , __a : str=4 , __a : Optional[int]=37 , __a : Optional[int]="gelu" , __a : List[str]=0.1 , __a : Tuple=0.1 , __a : List[str]=10 , __a : Optional[int]=0.02 , ):
UpperCAmelCase_ = parent
UpperCAmelCase_ = batch_size
UpperCAmelCase_ = image_size
UpperCAmelCase_ = patch_size
UpperCAmelCase_ = num_channels
UpperCAmelCase_ = is_training
UpperCAmelCase_ = use_labels
UpperCAmelCase_ = hidden_size
UpperCAmelCase_ = num_hidden_layers
UpperCAmelCase_ = num_attention_heads
UpperCAmelCase_ = intermediate_size
UpperCAmelCase_ = hidden_act
UpperCAmelCase_ = hidden_dropout_prob
UpperCAmelCase_ = attention_probs_dropout_prob
UpperCAmelCase_ = type_sequence_label_size
UpperCAmelCase_ = initializer_range
# in ViT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
UpperCAmelCase_ = (image_size // patch_size) ** 2
UpperCAmelCase_ = num_patches + 1
def _lowercase (self : Any ):
UpperCAmelCase_ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCAmelCase_ = ViTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=__a , initializer_range=self.initializer_range , )
return config, pixel_values
def _lowercase (self : Dict , __a : Any , __a : List[Any] ):
UpperCAmelCase_ = FlaxViTModel(config=__a )
UpperCAmelCase_ = model(__a )
# expected sequence length = num_patches + 1 (we add 1 for the [CLS] token)
UpperCAmelCase_ = (self.image_size, self.image_size)
UpperCAmelCase_ = (self.patch_size, self.patch_size)
UpperCAmelCase_ = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, num_patches + 1, self.hidden_size) )
def _lowercase (self : Tuple , __a : str , __a : Any ):
UpperCAmelCase_ = self.type_sequence_label_size
UpperCAmelCase_ = FlaxViTForImageClassification(config=__a )
UpperCAmelCase_ = model(__a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
UpperCAmelCase_ = 1
UpperCAmelCase_ = FlaxViTForImageClassification(__a )
UpperCAmelCase_ = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
UpperCAmelCase_ = model(__a )
def _lowercase (self : Optional[Any] ):
UpperCAmelCase_ = self.prepare_config_and_inputs()
(
(
UpperCAmelCase_
) , (
UpperCAmelCase_
) ,
) = config_and_inputs
UpperCAmelCase_ = {"pixel_values": pixel_values}
return config, inputs_dict
@require_flax
class __A ( UpperCamelCase__ , unittest.TestCase ):
a__ : Tuple = (FlaxViTModel, FlaxViTForImageClassification) if is_flax_available() else ()
def _lowercase (self : Any ):
UpperCAmelCase_ = FlaxViTModelTester(self )
UpperCAmelCase_ = ConfigTester(self , config_class=__a , has_text_modality=__a , hidden_size=37 )
def _lowercase (self : Tuple ):
self.config_tester.run_common_tests()
def _lowercase (self : str ):
UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__a )
def _lowercase (self : str ):
UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__a )
def _lowercase (self : Tuple ):
UpperCAmelCase_ , UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase_ = model_class(__a )
UpperCAmelCase_ = inspect.signature(model.__call__ )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCAmelCase_ = [*signature.parameters.keys()]
UpperCAmelCase_ = ["pixel_values"]
self.assertListEqual(arg_names[:1] , __a )
def _lowercase (self : Optional[Any] ):
UpperCAmelCase_ , UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
UpperCAmelCase_ = self._prepare_for_class(__a , __a )
UpperCAmelCase_ = model_class(__a )
@jax.jit
def model_jitted(__a : Tuple , **__a : List[Any] ):
return model(pixel_values=__a , **__a )
with self.subTest("JIT Enabled" ):
UpperCAmelCase_ = model_jitted(**__a ).to_tuple()
with self.subTest("JIT Disabled" ):
with jax.disable_jit():
UpperCAmelCase_ = model_jitted(**__a ).to_tuple()
self.assertEqual(len(__a ) , len(__a ) )
for jitted_output, output in zip(__a , __a ):
self.assertEqual(jitted_output.shape , output.shape )
@slow
def _lowercase (self : Tuple ):
for model_class_name in self.all_model_classes:
UpperCAmelCase_ = model_class_name.from_pretrained("google/vit-base-patch16-224" )
UpperCAmelCase_ = model(np.ones((1, 3, 224, 224) ) )
self.assertIsNotNone(__a )
| 1 | 1 |
'''simple docstring'''
import unittest
from transformers import SPIECE_UNDERLINE, XLNetTokenizer, XLNetTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
SCREAMING_SNAKE_CASE_: int =get_tests_dir('fixtures/test_sentencepiece.model')
@require_sentencepiece
@require_tokenizers
class __A ( UpperCamelCase__ , unittest.TestCase ):
a__ : Any = XLNetTokenizer
a__ : Tuple = XLNetTokenizerFast
a__ : int = True
a__ : Optional[int] = True
def _lowercase (self : Union[str, Any] ):
super().setUp()
# We have a SentencePiece fixture for testing
UpperCAmelCase_ = XLNetTokenizer(__a , keep_accents=__a )
tokenizer.sanitize_special_tokens()
tokenizer.save_pretrained(self.tmpdirname )
def _lowercase (self : Dict ):
UpperCAmelCase_ = "<s>"
UpperCAmelCase_ = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(__a ) , __a )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(__a ) , __a )
def _lowercase (self : Optional[Any] ):
UpperCAmelCase_ = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , "<unk>" )
self.assertEqual(vocab_keys[1] , "<s>" )
self.assertEqual(vocab_keys[-1] , "<eod>" )
self.assertEqual(len(__a ) , 1006 )
def _lowercase (self : Tuple ):
self.assertEqual(self.get_tokenizer().vocab_size , 1000 )
def _lowercase (self : Tuple ):
UpperCAmelCase_ = XLNetTokenizer(__a , keep_accents=__a )
UpperCAmelCase_ = tokenizer.tokenize("This is a test" )
self.assertListEqual(__a , ["▁This", "▁is", "▁a", "▁t", "est"] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(__a ) , [285, 46, 10, 170, 382] )
UpperCAmelCase_ = tokenizer.tokenize("I was born in 92000, and this is falsé." )
self.assertListEqual(
__a , [
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"9",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"é",
".",
] , )
UpperCAmelCase_ = tokenizer.convert_tokens_to_ids(__a )
self.assertListEqual(__a , [8, 21, 84, 55, 24, 19, 7, 0, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 0, 4] )
UpperCAmelCase_ = tokenizer.convert_ids_to_tokens(__a )
self.assertListEqual(
__a , [
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"<unk>",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"<unk>",
".",
] , )
def _lowercase (self : Any ):
UpperCAmelCase_ = XLNetTokenizer(__a , do_lower_case=__a )
UpperCAmelCase_ = tokenizer.tokenize("I was born in 92000, and this is falsé." )
self.assertListEqual(
__a , [
SPIECE_UNDERLINE + "",
"i",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"9",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"se",
".",
] , )
self.assertListEqual(tokenizer.tokenize("H\u00E9llo" ) , ["▁he", "ll", "o"] )
def _lowercase (self : int ):
UpperCAmelCase_ = XLNetTokenizer(__a , do_lower_case=__a )
UpperCAmelCase_ = tokenizer.tokenize("I was born in 92000, and this is falsé." )
self.assertListEqual(
__a , [
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"9",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"se",
".",
] , )
@slow
def _lowercase (self : str ):
UpperCAmelCase_ = XLNetTokenizer.from_pretrained("xlnet-base-cased" )
UpperCAmelCase_ = tokenizer.encode("sequence builders" , add_special_tokens=__a )
UpperCAmelCase_ = tokenizer.encode("multi-sequence build" , add_special_tokens=__a )
UpperCAmelCase_ = tokenizer.build_inputs_with_special_tokens(__a )
UpperCAmelCase_ = tokenizer.build_inputs_with_special_tokens(__a , __a )
assert encoded_sentence == text + [4, 3]
assert encoded_pair == text + [4] + text_a + [4, 3]
@slow
def _lowercase (self : Optional[int] ):
# fmt: off
UpperCAmelCase_ = {"input_ids": [[17, 21442, 270, 17, 10, 14645, 318, 34, 17, 4546, 3145, 787, 13, 7752, 22018, 23, 21, 17, 4546, 3145, 787, 13, 3352, 14431, 13, 5500, 11, 1176, 580, 13, 16819, 4797, 23, 17, 10, 17135, 658, 19, 457, 7932, 13, 184, 19, 3154, 17135, 6468, 19, 1404, 12269, 19, 4229, 5356, 16264, 46, 19, 17, 20545, 10395, 9, 9, 9, 11, 28, 6421, 9531, 20729, 17, 10, 353, 17022, 11, 21, 6421, 9531, 16949, 17, 10, 11509, 753, 11, 33, 95, 2421, 7385, 956, 14431, 2626, 25, 842, 7385, 4836, 21, 1429, 2272, 9855, 3120, 161, 24738, 19, 13203, 658, 218, 787, 21, 430, 18482, 847, 2637, 9, 4, 3], [5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 322, 22178, 27, 1064, 22, 956, 13, 11101, 1429, 5854, 24313, 18953, 40, 422, 24366, 68, 1758, 37, 10483, 14257, 31, 207, 263, 21, 203, 3773, 25, 71, 9735, 9, 4, 3], [5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 32, 2049, 3442, 17, 13894, 3380, 23, 95, 18, 17634, 2288, 9, 4, 3]], "token_type_ids": [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2], [3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2], [3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2]], "attention_mask": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=__a , model_name="xlnet-base-cased" , revision="c841166438c31ec7ca9a106dee7bb312b73ae511" , )
| 1 | '''simple docstring'''
import os
import shutil
import tempfile
from unittest import TestCase
from unittest.mock import patch
import numpy as np
from datasets import Dataset
from transformers.models.realm.configuration_realm import RealmConfig
from transformers.models.realm.retrieval_realm import _REALM_BLOCK_RECORDS_FILENAME, RealmRetriever
from transformers.models.realm.tokenization_realm import VOCAB_FILES_NAMES, RealmTokenizer
class __A ( UpperCamelCase__ ):
def _lowercase (self : Optional[Any] ):
UpperCAmelCase_ = tempfile.mkdtemp()
UpperCAmelCase_ = 5
# Realm tok
UpperCAmelCase_ = [
"[UNK]",
"[CLS]",
"[SEP]",
"[PAD]",
"[MASK]",
"test",
"question",
"this",
"is",
"the",
"first",
"second",
"third",
"fourth",
"fifth",
"record",
"want",
"##want",
"##ed",
"wa",
"un",
"runn",
"##ing",
",",
"low",
"lowest",
]
UpperCAmelCase_ = os.path.join(self.tmpdirname , "realm_tokenizer" )
os.makedirs(__a , exist_ok=__a )
UpperCAmelCase_ = os.path.join(__a , VOCAB_FILES_NAMES["vocab_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in vocab_tokens] ) )
UpperCAmelCase_ = os.path.join(self.tmpdirname , "realm_block_records" )
os.makedirs(__a , exist_ok=__a )
def _lowercase (self : Optional[Any] ):
return RealmTokenizer.from_pretrained(os.path.join(self.tmpdirname , "realm_tokenizer" ) )
def _lowercase (self : Any ):
shutil.rmtree(self.tmpdirname )
def _lowercase (self : List[Any] ):
UpperCAmelCase_ = RealmConfig(num_block_records=self.num_block_records )
return config
def _lowercase (self : List[str] ):
UpperCAmelCase_ = Dataset.from_dict(
{
"id": ["0", "1"],
"question": ["foo", "bar"],
"answers": [["Foo", "Bar"], ["Bar"]],
} )
return dataset
def _lowercase (self : Any ):
UpperCAmelCase_ = np.array(
[
B"This is the first record",
B"This is the second record",
B"This is the third record",
B"This is the fourth record",
B"This is the fifth record",
B"This is a longer longer longer record",
] , dtype=__a , )
return block_records
def _lowercase (self : Union[str, Any] ):
UpperCAmelCase_ = RealmRetriever(
block_records=self.get_dummy_block_records() , tokenizer=self.get_tokenizer() , )
return retriever
def _lowercase (self : int ):
UpperCAmelCase_ = self.get_config()
UpperCAmelCase_ = self.get_dummy_retriever()
UpperCAmelCase_ = retriever.tokenizer
UpperCAmelCase_ = np.array([0, 3] , dtype="long" )
UpperCAmelCase_ = tokenizer(["Test question"] ).input_ids
UpperCAmelCase_ = tokenizer(
["the fourth"] , add_special_tokens=__a , return_token_type_ids=__a , return_attention_mask=__a , ).input_ids
UpperCAmelCase_ = config.reader_seq_len
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = retriever(
__a , __a , answer_ids=__a , max_length=__a , return_tensors="np" )
self.assertEqual(len(__a ) , 2 )
self.assertEqual(len(__a ) , 2 )
self.assertEqual(len(__a ) , 2 )
self.assertEqual(concat_inputs.input_ids.shape , (2, 10) )
self.assertEqual(concat_inputs.attention_mask.shape , (2, 10) )
self.assertEqual(concat_inputs.token_type_ids.shape , (2, 10) )
self.assertEqual(concat_inputs.special_tokens_mask.shape , (2, 10) )
self.assertEqual(
tokenizer.convert_ids_to_tokens(concat_inputs.input_ids[0] ) , ["[CLS]", "test", "question", "[SEP]", "this", "is", "the", "first", "record", "[SEP]"] , )
self.assertEqual(
tokenizer.convert_ids_to_tokens(concat_inputs.input_ids[1] ) , ["[CLS]", "test", "question", "[SEP]", "this", "is", "the", "fourth", "record", "[SEP]"] , )
def _lowercase (self : List[Any] ):
UpperCAmelCase_ = self.get_config()
UpperCAmelCase_ = self.get_dummy_retriever()
UpperCAmelCase_ = retriever.tokenizer
UpperCAmelCase_ = np.array([0, 3, 5] , dtype="long" )
UpperCAmelCase_ = tokenizer(["Test question"] ).input_ids
UpperCAmelCase_ = tokenizer(
["the fourth", "longer longer"] , add_special_tokens=__a , return_token_type_ids=__a , return_attention_mask=__a , ).input_ids
UpperCAmelCase_ = config.reader_seq_len
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = retriever(
__a , __a , answer_ids=__a , max_length=__a , return_tensors="np" )
self.assertEqual([False, True, True] , __a )
self.assertEqual([[-1, -1, -1], [6, -1, -1], [6, 7, 8]] , __a )
self.assertEqual([[-1, -1, -1], [7, -1, -1], [7, 8, 9]] , __a )
def _lowercase (self : Optional[Any] ):
UpperCAmelCase_ = self.get_dummy_retriever()
retriever.save_pretrained(os.path.join(self.tmpdirname , "realm_block_records" ) )
# Test local path
UpperCAmelCase_ = retriever.from_pretrained(os.path.join(self.tmpdirname , "realm_block_records" ) )
self.assertEqual(retriever.block_records[0] , B"This is the first record" )
# Test mocked remote path
with patch("transformers.models.realm.retrieval_realm.hf_hub_download" ) as mock_hf_hub_download:
UpperCAmelCase_ = os.path.join(
os.path.join(self.tmpdirname , "realm_block_records" ) , _REALM_BLOCK_RECORDS_FILENAME )
UpperCAmelCase_ = RealmRetriever.from_pretrained("google/realm-cc-news-pretrained-openqa" )
self.assertEqual(retriever.block_records[0] , B"This is the first record" )
| 1 | 1 |
'''simple docstring'''
import argparse
import io
import requests
import torch
from omegaconf import OmegaConf
from diffusers import AutoencoderKL
from diffusers.pipelines.stable_diffusion.convert_from_ckpt import (
assign_to_checkpoint,
conv_attn_to_linear,
create_vae_diffusers_config,
renew_vae_attention_paths,
renew_vae_resnet_paths,
)
def lowerCAmelCase_ ( snake_case_ : List[Any] , snake_case_ : int ) -> Optional[int]:
'''simple docstring'''
UpperCAmelCase_ = checkpoint
UpperCAmelCase_ = {}
UpperCAmelCase_ = vae_state_dict["encoder.conv_in.weight"]
UpperCAmelCase_ = vae_state_dict["encoder.conv_in.bias"]
UpperCAmelCase_ = vae_state_dict["encoder.conv_out.weight"]
UpperCAmelCase_ = vae_state_dict["encoder.conv_out.bias"]
UpperCAmelCase_ = vae_state_dict["encoder.norm_out.weight"]
UpperCAmelCase_ = vae_state_dict["encoder.norm_out.bias"]
UpperCAmelCase_ = vae_state_dict["decoder.conv_in.weight"]
UpperCAmelCase_ = vae_state_dict["decoder.conv_in.bias"]
UpperCAmelCase_ = vae_state_dict["decoder.conv_out.weight"]
UpperCAmelCase_ = vae_state_dict["decoder.conv_out.bias"]
UpperCAmelCase_ = vae_state_dict["decoder.norm_out.weight"]
UpperCAmelCase_ = vae_state_dict["decoder.norm_out.bias"]
UpperCAmelCase_ = vae_state_dict["quant_conv.weight"]
UpperCAmelCase_ = vae_state_dict["quant_conv.bias"]
UpperCAmelCase_ = vae_state_dict["post_quant_conv.weight"]
UpperCAmelCase_ = vae_state_dict["post_quant_conv.bias"]
# Retrieves the keys for the encoder down blocks only
UpperCAmelCase_ = len({".".join(layer.split("." )[:3] ) for layer in vae_state_dict if "encoder.down" in layer} )
UpperCAmelCase_ = {
layer_id: [key for key in vae_state_dict if f"""down.{layer_id}""" in key] for layer_id in range(snake_case_ )
}
# Retrieves the keys for the decoder up blocks only
UpperCAmelCase_ = len({".".join(layer.split("." )[:3] ) for layer in vae_state_dict if "decoder.up" in layer} )
UpperCAmelCase_ = {
layer_id: [key for key in vae_state_dict if f"""up.{layer_id}""" in key] for layer_id in range(snake_case_ )
}
for i in range(snake_case_ ):
UpperCAmelCase_ = [key for key in down_blocks[i] if f"""down.{i}""" in key and f"""down.{i}.downsample""" not in key]
if f"""encoder.down.{i}.downsample.conv.weight""" in vae_state_dict:
UpperCAmelCase_ = vae_state_dict.pop(
f"""encoder.down.{i}.downsample.conv.weight""" )
UpperCAmelCase_ = vae_state_dict.pop(
f"""encoder.down.{i}.downsample.conv.bias""" )
UpperCAmelCase_ = renew_vae_resnet_paths(snake_case_ )
UpperCAmelCase_ = {"old": f"""down.{i}.block""", "new": f"""down_blocks.{i}.resnets"""}
assign_to_checkpoint(snake_case_ , snake_case_ , snake_case_ , additional_replacements=[meta_path] , config=snake_case_ )
UpperCAmelCase_ = [key for key in vae_state_dict if "encoder.mid.block" in key]
UpperCAmelCase_ = 2
for i in range(1 , num_mid_res_blocks + 1 ):
UpperCAmelCase_ = [key for key in mid_resnets if f"""encoder.mid.block_{i}""" in key]
UpperCAmelCase_ = renew_vae_resnet_paths(snake_case_ )
UpperCAmelCase_ = {"old": f"""mid.block_{i}""", "new": f"""mid_block.resnets.{i - 1}"""}
assign_to_checkpoint(snake_case_ , snake_case_ , snake_case_ , additional_replacements=[meta_path] , config=snake_case_ )
UpperCAmelCase_ = [key for key in vae_state_dict if "encoder.mid.attn" in key]
UpperCAmelCase_ = renew_vae_attention_paths(snake_case_ )
UpperCAmelCase_ = {"old": "mid.attn_1", "new": "mid_block.attentions.0"}
assign_to_checkpoint(snake_case_ , snake_case_ , snake_case_ , additional_replacements=[meta_path] , config=snake_case_ )
conv_attn_to_linear(snake_case_ )
for i in range(snake_case_ ):
UpperCAmelCase_ = num_up_blocks - 1 - i
UpperCAmelCase_ = [
key for key in up_blocks[block_id] if f"""up.{block_id}""" in key and f"""up.{block_id}.upsample""" not in key
]
if f"""decoder.up.{block_id}.upsample.conv.weight""" in vae_state_dict:
UpperCAmelCase_ = vae_state_dict[
f"""decoder.up.{block_id}.upsample.conv.weight"""
]
UpperCAmelCase_ = vae_state_dict[
f"""decoder.up.{block_id}.upsample.conv.bias"""
]
UpperCAmelCase_ = renew_vae_resnet_paths(snake_case_ )
UpperCAmelCase_ = {"old": f"""up.{block_id}.block""", "new": f"""up_blocks.{i}.resnets"""}
assign_to_checkpoint(snake_case_ , snake_case_ , snake_case_ , additional_replacements=[meta_path] , config=snake_case_ )
UpperCAmelCase_ = [key for key in vae_state_dict if "decoder.mid.block" in key]
UpperCAmelCase_ = 2
for i in range(1 , num_mid_res_blocks + 1 ):
UpperCAmelCase_ = [key for key in mid_resnets if f"""decoder.mid.block_{i}""" in key]
UpperCAmelCase_ = renew_vae_resnet_paths(snake_case_ )
UpperCAmelCase_ = {"old": f"""mid.block_{i}""", "new": f"""mid_block.resnets.{i - 1}"""}
assign_to_checkpoint(snake_case_ , snake_case_ , snake_case_ , additional_replacements=[meta_path] , config=snake_case_ )
UpperCAmelCase_ = [key for key in vae_state_dict if "decoder.mid.attn" in key]
UpperCAmelCase_ = renew_vae_attention_paths(snake_case_ )
UpperCAmelCase_ = {"old": "mid.attn_1", "new": "mid_block.attentions.0"}
assign_to_checkpoint(snake_case_ , snake_case_ , snake_case_ , additional_replacements=[meta_path] , config=snake_case_ )
conv_attn_to_linear(snake_case_ )
return new_checkpoint
def lowerCAmelCase_ ( snake_case_ : str , snake_case_ : str , ) -> Dict:
'''simple docstring'''
UpperCAmelCase_ = requests.get(
" https://raw.githubusercontent.com/CompVis/stable-diffusion/main/configs/stable-diffusion/v1-inference.yaml" )
UpperCAmelCase_ = io.BytesIO(r.content )
UpperCAmelCase_ = OmegaConf.load(snake_case_ )
UpperCAmelCase_ = 5_12
UpperCAmelCase_ = "cuda" if torch.cuda.is_available() else "cpu"
if checkpoint_path.endswith("safetensors" ):
from safetensors import safe_open
UpperCAmelCase_ = {}
with safe_open(snake_case_ , framework="pt" , device="cpu" ) as f:
for key in f.keys():
UpperCAmelCase_ = f.get_tensor(snake_case_ )
else:
UpperCAmelCase_ = torch.load(snake_case_ , map_location=snake_case_ )["state_dict"]
# Convert the VAE model.
UpperCAmelCase_ = create_vae_diffusers_config(snake_case_ , image_size=snake_case_ )
UpperCAmelCase_ = custom_convert_ldm_vae_checkpoint(snake_case_ , snake_case_ )
UpperCAmelCase_ = AutoencoderKL(**snake_case_ )
vae.load_state_dict(snake_case_ )
vae.save_pretrained(snake_case_ )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE_: Optional[int] =argparse.ArgumentParser()
parser.add_argument('--vae_pt_path', default=None, type=str, required=True, help='Path to the VAE.pt to convert.')
parser.add_argument('--dump_path', default=None, type=str, required=True, help='Path to the VAE.pt to convert.')
SCREAMING_SNAKE_CASE_: str =parser.parse_args()
vae_pt_to_vae_diffuser(args.vae_pt_path, args.dump_path)
| 1 | '''simple docstring'''
from math import log
from scipy.constants import Boltzmann, physical_constants
SCREAMING_SNAKE_CASE_: Optional[int] =3_00 # TEMPERATURE (unit = K)
def lowerCAmelCase_ ( snake_case_ : float , snake_case_ : float , snake_case_ : float , ) -> float:
'''simple docstring'''
if donor_conc <= 0:
raise ValueError("Donor concentration should be positive" )
elif acceptor_conc <= 0:
raise ValueError("Acceptor concentration should be positive" )
elif intrinsic_conc <= 0:
raise ValueError("Intrinsic concentration should be positive" )
elif donor_conc <= intrinsic_conc:
raise ValueError(
"Donor concentration should be greater than intrinsic concentration" )
elif acceptor_conc <= intrinsic_conc:
raise ValueError(
"Acceptor concentration should be greater than intrinsic concentration" )
else:
return (
Boltzmann
* T
* log((donor_conc * acceptor_conc) / intrinsic_conc**2 )
/ physical_constants["electron volt"][0]
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 1 | 1 |
'''simple docstring'''
import argparse
from argparse import Namespace
import torch
from torch import nn
from transformers import XGLMConfig, XGLMForCausalLM
def lowerCAmelCase_ ( snake_case_ : Union[str, Any] ) -> Any:
'''simple docstring'''
UpperCAmelCase_ = [
"decoder.version",
"decoder.output_projection.weight",
"_float_tensor",
"decoder.embed_positions._float_tensor",
]
for k in ignore_keys:
state_dict.pop(snake_case_ , snake_case_ )
def lowerCAmelCase_ ( snake_case_ : List[Any] ) -> int:
'''simple docstring'''
UpperCAmelCase_ , UpperCAmelCase_ = emb.weight.shape
UpperCAmelCase_ = nn.Linear(snake_case_ , snake_case_ , bias=snake_case_ )
UpperCAmelCase_ = emb.weight.data
return lin_layer
def lowerCAmelCase_ ( snake_case_ : int ) -> str:
'''simple docstring'''
UpperCAmelCase_ = torch.load(snake_case_ , map_location="cpu" )
UpperCAmelCase_ = Namespace(**checkpoint["cfg"]["model"] )
UpperCAmelCase_ = checkpoint["model"]
remove_ignore_keys_(snake_case_ )
UpperCAmelCase_ = state_dict["decoder.embed_tokens.weight"].shape[0]
UpperCAmelCase_ = {key.replace("decoder" , "model" ): val for key, val in state_dict.items()}
UpperCAmelCase_ = XGLMConfig(
vocab_size=snake_case_ , max_position_embeddings=args.max_target_positions , num_layers=args.decoder_layers , attention_heads=args.decoder_attention_heads , ffn_dim=args.decoder_ffn_embed_dim , d_model=args.decoder_embed_dim , layerdrop=args.decoder_layerdrop , dropout=args.dropout , attention_dropout=args.attention_dropout , activation_dropout=args.activation_dropout , activation_function="gelu" , scale_embedding=not args.no_scale_embedding , tie_word_embeddings=args.share_decoder_input_output_embed , )
UpperCAmelCase_ = XGLMForCausalLM(snake_case_ )
UpperCAmelCase_ = model.load_state_dict(snake_case_ , strict=snake_case_ )
print(snake_case_ )
UpperCAmelCase_ = make_linear_from_emb(model.model.embed_tokens )
return model
if __name__ == "__main__":
SCREAMING_SNAKE_CASE_: str =argparse.ArgumentParser()
# Required parameters
parser.add_argument('fairseq_path', type=str, help='path to a model.pt on local filesystem.')
parser.add_argument('pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
SCREAMING_SNAKE_CASE_: int =parser.parse_args()
SCREAMING_SNAKE_CASE_: Union[str, Any] =convert_fairseq_xglm_checkpoint_from_disk(args.fairseq_path)
model.save_pretrained(args.pytorch_dump_folder_path)
| 1 | '''simple docstring'''
import math
def lowerCAmelCase_ ( ) -> None:
'''simple docstring'''
UpperCAmelCase_ = input("Enter message: " )
UpperCAmelCase_ = int(input(f"""Enter key [2-{len(snake_case_ ) - 1}]: """ ) )
UpperCAmelCase_ = input("Encryption/Decryption [e/d]: " )
if mode.lower().startswith("e" ):
UpperCAmelCase_ = encrypt_message(snake_case_ , snake_case_ )
elif mode.lower().startswith("d" ):
UpperCAmelCase_ = decrypt_message(snake_case_ , snake_case_ )
# Append pipe symbol (vertical bar) to identify spaces at the end.
print(f"""Output:\n{text + "|"}""" )
def lowerCAmelCase_ ( snake_case_ : int , snake_case_ : str ) -> str:
'''simple docstring'''
UpperCAmelCase_ = [""] * key
for col in range(snake_case_ ):
UpperCAmelCase_ = col
while pointer < len(snake_case_ ):
cipher_text[col] += message[pointer]
pointer += key
return "".join(snake_case_ )
def lowerCAmelCase_ ( snake_case_ : int , snake_case_ : str ) -> str:
'''simple docstring'''
UpperCAmelCase_ = math.ceil(len(snake_case_ ) / key )
UpperCAmelCase_ = key
UpperCAmelCase_ = (num_cols * num_rows) - len(snake_case_ )
UpperCAmelCase_ = [""] * num_cols
UpperCAmelCase_ = 0
UpperCAmelCase_ = 0
for symbol in message:
plain_text[col] += symbol
col += 1
if (
(col == num_cols)
or (col == num_cols - 1)
and (row >= num_rows - num_shaded_boxes)
):
UpperCAmelCase_ = 0
row += 1
return "".join(snake_case_ )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 1 | 1 |
'''simple docstring'''
import gc
import random
import unittest
import numpy as np
import torch
from diffusers import (
DDIMScheduler,
KandinskyVaaControlnetPipeline,
KandinskyVaaPriorPipeline,
UNetaDConditionModel,
VQModel,
)
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class __A ( UpperCamelCase__ , unittest.TestCase ):
a__ : Dict = KandinskyVaaControlnetPipeline
a__ : Dict = ["""image_embeds""", """negative_image_embeds""", """hint"""]
a__ : List[Any] = ["""image_embeds""", """negative_image_embeds""", """hint"""]
a__ : Optional[int] = [
"""generator""",
"""height""",
"""width""",
"""latents""",
"""guidance_scale""",
"""num_inference_steps""",
"""return_dict""",
"""guidance_scale""",
"""num_images_per_prompt""",
"""output_type""",
"""return_dict""",
]
a__ : Union[str, Any] = False
@property
def _lowercase (self : Union[str, Any] ):
return 32
@property
def _lowercase (self : str ):
return 32
@property
def _lowercase (self : int ):
return self.time_input_dim
@property
def _lowercase (self : Optional[Any] ):
return self.time_input_dim * 4
@property
def _lowercase (self : int ):
return 100
@property
def _lowercase (self : Dict ):
torch.manual_seed(0 )
UpperCAmelCase_ = {
"in_channels": 8,
# Out channels is double in channels because predicts mean and variance
"out_channels": 8,
"addition_embed_type": "image_hint",
"down_block_types": ("ResnetDownsampleBlock2D", "SimpleCrossAttnDownBlock2D"),
"up_block_types": ("SimpleCrossAttnUpBlock2D", "ResnetUpsampleBlock2D"),
"mid_block_type": "UNetMidBlock2DSimpleCrossAttn",
"block_out_channels": (self.block_out_channels_a, self.block_out_channels_a * 2),
"layers_per_block": 1,
"encoder_hid_dim": self.text_embedder_hidden_size,
"encoder_hid_dim_type": "image_proj",
"cross_attention_dim": self.cross_attention_dim,
"attention_head_dim": 4,
"resnet_time_scale_shift": "scale_shift",
"class_embed_type": None,
}
UpperCAmelCase_ = UNetaDConditionModel(**__a )
return model
@property
def _lowercase (self : Optional[int] ):
return {
"block_out_channels": [32, 32, 64, 64],
"down_block_types": [
"DownEncoderBlock2D",
"DownEncoderBlock2D",
"DownEncoderBlock2D",
"AttnDownEncoderBlock2D",
],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": ["AttnUpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D"],
"vq_embed_dim": 4,
}
@property
def _lowercase (self : Tuple ):
torch.manual_seed(0 )
UpperCAmelCase_ = VQModel(**self.dummy_movq_kwargs )
return model
def _lowercase (self : str ):
UpperCAmelCase_ = self.dummy_unet
UpperCAmelCase_ = self.dummy_movq
UpperCAmelCase_ = DDIMScheduler(
num_train_timesteps=1000 , beta_schedule="linear" , beta_start=0.0_00_85 , beta_end=0.0_12 , clip_sample=__a , set_alpha_to_one=__a , steps_offset=1 , prediction_type="epsilon" , thresholding=__a , )
UpperCAmelCase_ = {
"unet": unet,
"scheduler": scheduler,
"movq": movq,
}
return components
def _lowercase (self : str , __a : str , __a : List[str]=0 ):
UpperCAmelCase_ = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(__a ) ).to(__a )
UpperCAmelCase_ = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(seed + 1 ) ).to(
__a )
# create hint
UpperCAmelCase_ = floats_tensor((1, 3, 64, 64) , rng=random.Random(__a ) ).to(__a )
if str(__a ).startswith("mps" ):
UpperCAmelCase_ = torch.manual_seed(__a )
else:
UpperCAmelCase_ = torch.Generator(device=__a ).manual_seed(__a )
UpperCAmelCase_ = {
"image_embeds": image_embeds,
"negative_image_embeds": negative_image_embeds,
"hint": hint,
"generator": generator,
"height": 64,
"width": 64,
"guidance_scale": 4.0,
"num_inference_steps": 2,
"output_type": "np",
}
return inputs
def _lowercase (self : Union[str, Any] ):
UpperCAmelCase_ = "cpu"
UpperCAmelCase_ = self.get_dummy_components()
UpperCAmelCase_ = self.pipeline_class(**__a )
UpperCAmelCase_ = pipe.to(__a )
pipe.set_progress_bar_config(disable=__a )
UpperCAmelCase_ = pipe(**self.get_dummy_inputs(__a ) )
UpperCAmelCase_ = output.images
UpperCAmelCase_ = pipe(
**self.get_dummy_inputs(__a ) , return_dict=__a , )[0]
UpperCAmelCase_ = image[0, -3:, -3:, -1]
UpperCAmelCase_ = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
UpperCAmelCase_ = np.array(
[0.6_95_98_26, 0.86_82_79, 0.7_55_80_92, 0.68_76_94_67, 0.85_80_58_04, 0.65_97_74_96, 0.44_88_53_02, 0.5_95_91_11, 0.4_25_15_95] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
), f""" expected_slice {expected_slice}, but got {image_slice.flatten()}"""
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
), f""" expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}"""
@slow
@require_torch_gpu
class __A ( unittest.TestCase ):
def _lowercase (self : List[str] ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _lowercase (self : List[str] ):
UpperCAmelCase_ = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/kandinskyv22/kandinskyv22_controlnet_robotcat_fp16.npy" )
UpperCAmelCase_ = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/kandinskyv22/hint_image_cat.png" )
UpperCAmelCase_ = torch.from_numpy(np.array(__a ) ).float() / 2_55.0
UpperCAmelCase_ = hint.permute(2 , 0 , 1 ).unsqueeze(0 )
UpperCAmelCase_ = KandinskyVaaPriorPipeline.from_pretrained(
"kandinsky-community/kandinsky-2-2-prior" , torch_dtype=torch.floataa )
pipe_prior.to(__a )
UpperCAmelCase_ = KandinskyVaaControlnetPipeline.from_pretrained(
"kandinsky-community/kandinsky-2-2-controlnet-depth" , torch_dtype=torch.floataa )
UpperCAmelCase_ = pipeline.to(__a )
pipeline.set_progress_bar_config(disable=__a )
UpperCAmelCase_ = "A robot, 4k photo"
UpperCAmelCase_ = torch.Generator(device="cuda" ).manual_seed(0 )
UpperCAmelCase_ , UpperCAmelCase_ = pipe_prior(
__a , generator=__a , num_inference_steps=5 , negative_prompt="" , ).to_tuple()
UpperCAmelCase_ = torch.Generator(device="cuda" ).manual_seed(0 )
UpperCAmelCase_ = pipeline(
image_embeds=__a , negative_image_embeds=__a , hint=__a , generator=__a , num_inference_steps=100 , output_type="np" , )
UpperCAmelCase_ = output.images[0]
assert image.shape == (512, 512, 3)
assert_mean_pixel_difference(__a , __a )
| 1 | '''simple docstring'''
import json
import logging
import os
import sys
from pathlib import Path
import finetune_rag
from transformers.file_utils import is_apex_available
from transformers.testing_utils import (
TestCasePlus,
execute_subprocess_async,
require_ray,
require_torch_gpu,
require_torch_multi_gpu,
)
logging.basicConfig(level=logging.DEBUG)
SCREAMING_SNAKE_CASE_: Optional[int] =logging.getLogger()
SCREAMING_SNAKE_CASE_: Union[str, Any] =logging.StreamHandler(sys.stdout)
logger.addHandler(stream_handler)
class __A ( UpperCamelCase__ ):
def _lowercase (self : Optional[Any] , __a : str ):
os.makedirs(__a , exist_ok=__a )
UpperCAmelCase_ = {"source": "What is love ?", "target": "life"}
UpperCAmelCase_ = {"train": 12, "val": 2, "test": 2}
for split in ["train", "test", "val"]:
for field in ["source", "target"]:
UpperCAmelCase_ = "\n".join([contents[field]] * n_lines[split] )
with open(os.path.join(__a , f"""{split}.{field}""" ) , "w" ) as f:
f.write(__a )
def _lowercase (self : Optional[int] , __a : int , __a : str = "pytorch" ):
UpperCAmelCase_ = self.get_auto_remove_tmp_dir()
UpperCAmelCase_ = os.path.join(__a , "output" )
UpperCAmelCase_ = os.path.join(__a , "data" )
self._create_dummy_data(data_dir=__a )
UpperCAmelCase_ = f"""
--data_dir {data_dir} \
--output_dir {output_dir} \
--model_name_or_path facebook/rag-sequence-base \
--model_type rag_sequence \
--do_train \
--do_predict \
--n_val -1 \
--val_check_interval 1.0 \
--train_batch_size 2 \
--eval_batch_size 1 \
--max_source_length 25 \
--max_target_length 25 \
--val_max_target_length 25 \
--test_max_target_length 25 \
--label_smoothing 0.1 \
--dropout 0.1 \
--attention_dropout 0.1 \
--weight_decay 0.001 \
--adam_epsilon 1e-08 \
--max_grad_norm 0.1 \
--lr_scheduler polynomial \
--learning_rate 3e-04 \
--num_train_epochs 1 \
--warmup_steps 4 \
--gradient_accumulation_steps 1 \
--distributed-port 8787 \
--use_dummy_dataset 1 \
--distributed_retriever {distributed_retriever} \
""".split()
if gpus > 0:
testargs.append(f"""--gpus={gpus}""" )
if is_apex_available():
testargs.append("--fp16" )
else:
testargs.append("--gpus=0" )
testargs.append("--distributed_backend=ddp_cpu" )
testargs.append("--num_processes=2" )
UpperCAmelCase_ = [sys.executable, str(Path(finetune_rag.__file__ ).resolve() )] + testargs
execute_subprocess_async(__a , env=self.get_env() )
UpperCAmelCase_ = os.path.join(__a , "metrics.json" )
with open(__a ) as f:
UpperCAmelCase_ = json.load(__a )
return result
@require_torch_gpu
def _lowercase (self : Optional[int] ):
UpperCAmelCase_ = self._run_finetune(gpus=1 )
self.assertGreaterEqual(result["test"][0]["test_avg_em"] , 0.2 )
@require_torch_multi_gpu
def _lowercase (self : Dict ):
UpperCAmelCase_ = self._run_finetune(gpus=2 )
self.assertGreaterEqual(result["test"][0]["test_avg_em"] , 0.2 )
@require_torch_gpu
@require_ray
def _lowercase (self : Optional[int] ):
UpperCAmelCase_ = self._run_finetune(gpus=1 , distributed_retriever="ray" )
self.assertGreaterEqual(result["test"][0]["test_avg_em"] , 0.2 )
@require_torch_multi_gpu
@require_ray
def _lowercase (self : Any ):
UpperCAmelCase_ = self._run_finetune(gpus=1 , distributed_retriever="ray" )
self.assertGreaterEqual(result["test"][0]["test_avg_em"] , 0.2 )
| 1 | 1 |
'''simple docstring'''
import torch
from diffusers import UnCLIPScheduler
from .test_schedulers import SchedulerCommonTest
class __A ( UpperCamelCase__ ):
a__ : Optional[int] = (UnCLIPScheduler,)
def _lowercase (self : str , **__a : Optional[Any] ):
UpperCAmelCase_ = {
"num_train_timesteps": 1000,
"variance_type": "fixed_small_log",
"clip_sample": True,
"clip_sample_range": 1.0,
"prediction_type": "epsilon",
}
config.update(**__a )
return config
def _lowercase (self : Any ):
for timesteps in [1, 5, 100, 1000]:
self.check_over_configs(num_train_timesteps=__a )
def _lowercase (self : Union[str, Any] ):
for variance in ["fixed_small_log", "learned_range"]:
self.check_over_configs(variance_type=__a )
def _lowercase (self : Union[str, Any] ):
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=__a )
def _lowercase (self : List[Any] ):
for clip_sample_range in [1, 5, 10, 20]:
self.check_over_configs(clip_sample_range=__a )
def _lowercase (self : Union[str, Any] ):
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(prediction_type=__a )
def _lowercase (self : Optional[Any] ):
for time_step in [0, 500, 999]:
for prev_timestep in [None, 5, 100, 250, 500, 750]:
if prev_timestep is not None and prev_timestep >= time_step:
continue
self.check_over_forward(time_step=__a , prev_timestep=__a )
def _lowercase (self : Union[str, Any] ):
UpperCAmelCase_ = self.scheduler_classes[0]
UpperCAmelCase_ = self.get_scheduler_config(variance_type="fixed_small_log" )
UpperCAmelCase_ = scheduler_class(**__a )
assert torch.sum(torch.abs(scheduler._get_variance(0 ) - 1.0000E-10 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(487 ) - 0.0_54_96_25 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(999 ) - 0.9_99_49_87 ) ) < 1E-5
def _lowercase (self : List[str] ):
UpperCAmelCase_ = self.scheduler_classes[0]
UpperCAmelCase_ = self.get_scheduler_config(variance_type="learned_range" )
UpperCAmelCase_ = scheduler_class(**__a )
UpperCAmelCase_ = 0.5
assert scheduler._get_variance(1 , predicted_variance=__a ) - -10.1_71_27_90 < 1E-5
assert scheduler._get_variance(487 , predicted_variance=__a ) - -5.7_99_80_52 < 1E-5
assert scheduler._get_variance(999 , predicted_variance=__a ) - -0.0_01_00_11 < 1E-5
def _lowercase (self : Optional[int] ):
UpperCAmelCase_ = self.scheduler_classes[0]
UpperCAmelCase_ = self.get_scheduler_config()
UpperCAmelCase_ = scheduler_class(**__a )
UpperCAmelCase_ = scheduler.timesteps
UpperCAmelCase_ = self.dummy_model()
UpperCAmelCase_ = self.dummy_sample_deter
UpperCAmelCase_ = torch.manual_seed(0 )
for i, t in enumerate(__a ):
# 1. predict noise residual
UpperCAmelCase_ = model(__a , __a )
# 2. predict previous mean of sample x_t-1
UpperCAmelCase_ = scheduler.step(__a , __a , __a , generator=__a ).prev_sample
UpperCAmelCase_ = pred_prev_sample
UpperCAmelCase_ = torch.sum(torch.abs(__a ) )
UpperCAmelCase_ = torch.mean(torch.abs(__a ) )
assert abs(result_sum.item() - 2_52.2_68_24_95 ) < 1E-2
assert abs(result_mean.item() - 0.3_28_47_43 ) < 1E-3
def _lowercase (self : Tuple ):
UpperCAmelCase_ = self.scheduler_classes[0]
UpperCAmelCase_ = self.get_scheduler_config()
UpperCAmelCase_ = scheduler_class(**__a )
scheduler.set_timesteps(25 )
UpperCAmelCase_ = scheduler.timesteps
UpperCAmelCase_ = self.dummy_model()
UpperCAmelCase_ = self.dummy_sample_deter
UpperCAmelCase_ = torch.manual_seed(0 )
for i, t in enumerate(__a ):
# 1. predict noise residual
UpperCAmelCase_ = model(__a , __a )
if i + 1 == timesteps.shape[0]:
UpperCAmelCase_ = None
else:
UpperCAmelCase_ = timesteps[i + 1]
# 2. predict previous mean of sample x_t-1
UpperCAmelCase_ = scheduler.step(
__a , __a , __a , prev_timestep=__a , generator=__a ).prev_sample
UpperCAmelCase_ = pred_prev_sample
UpperCAmelCase_ = torch.sum(torch.abs(__a ) )
UpperCAmelCase_ = torch.mean(torch.abs(__a ) )
assert abs(result_sum.item() - 2_58.2_04_49_83 ) < 1E-2
assert abs(result_mean.item() - 0.3_36_20_38 ) < 1E-3
def _lowercase (self : List[str] ):
pass
def _lowercase (self : Any ):
pass
| 1 | '''simple docstring'''
from multiprocessing import Lock, Pipe, Process
# lock used to ensure that two processes do not access a pipe at the same time
SCREAMING_SNAKE_CASE_: Optional[int] =Lock()
def lowerCAmelCase_ ( snake_case_ : Dict , snake_case_ : Dict , snake_case_ : Optional[int] , snake_case_ : Tuple , snake_case_ : Any , snake_case_ : Optional[Any] , snake_case_ : Union[str, Any] ) -> Optional[Any]:
'''simple docstring'''
global process_lock
# we perform n swaps since after n swaps we know we are sorted
# we *could* stop early if we are sorted already, but it takes as long to
# find out we are sorted as it does to sort the list with this algorithm
for i in range(0 , 10 ):
if (i + position) % 2 == 0 and r_send is not None:
# send your value to your right neighbor
process_lock.acquire()
r_send[1].send(snake_case_ )
process_lock.release()
# receive your right neighbor's value
process_lock.acquire()
UpperCAmelCase_ = rr_cv[0].recv()
process_lock.release()
# take the lower value since you are on the left
UpperCAmelCase_ = min(snake_case_ , snake_case_ )
elif (i + position) % 2 != 0 and l_send is not None:
# send your value to your left neighbor
process_lock.acquire()
l_send[1].send(snake_case_ )
process_lock.release()
# receive your left neighbor's value
process_lock.acquire()
UpperCAmelCase_ = lr_cv[0].recv()
process_lock.release()
# take the higher value since you are on the right
UpperCAmelCase_ = max(snake_case_ , snake_case_ )
# after all swaps are performed, send the values back to main
result_pipe[1].send(snake_case_ )
def lowerCAmelCase_ ( snake_case_ : Optional[Any] ) -> Dict:
'''simple docstring'''
UpperCAmelCase_ = []
UpperCAmelCase_ = []
# initialize the list of pipes where the values will be retrieved
for _ in arr:
result_pipe.append(Pipe() )
# creates the processes
# the first and last process only have one neighbor so they are made outside
# of the loop
UpperCAmelCase_ = Pipe()
UpperCAmelCase_ = Pipe()
process_array_.append(
Process(
target=snake_case_ , args=(0, arr[0], None, temp_rs, None, temp_rr, result_pipe[0]) , ) )
UpperCAmelCase_ = temp_rs
UpperCAmelCase_ = temp_rr
for i in range(1 , len(snake_case_ ) - 1 ):
UpperCAmelCase_ = Pipe()
UpperCAmelCase_ = Pipe()
process_array_.append(
Process(
target=snake_case_ , args=(i, arr[i], temp_ls, temp_rs, temp_lr, temp_rr, result_pipe[i]) , ) )
UpperCAmelCase_ = temp_rs
UpperCAmelCase_ = temp_rr
process_array_.append(
Process(
target=snake_case_ , args=(
len(snake_case_ ) - 1,
arr[len(snake_case_ ) - 1],
temp_ls,
None,
temp_lr,
None,
result_pipe[len(snake_case_ ) - 1],
) , ) )
# start the processes
for p in process_array_:
p.start()
# wait for the processes to end and write their values to the list
for p in range(0 , len(snake_case_ ) ):
UpperCAmelCase_ = result_pipe[p][0].recv()
process_array_[p].join()
return arr
def lowerCAmelCase_ ( ) -> str:
'''simple docstring'''
UpperCAmelCase_ = list(range(10 , 0 , -1 ) )
print("Initial List" )
print(*snake_case_ )
UpperCAmelCase_ = odd_even_transposition(snake_case_ )
print("Sorted List\n" )
print(*snake_case_ )
if __name__ == "__main__":
main()
| 1 | 1 |
'''simple docstring'''
from __future__ import annotations
from math import pi, sqrt
def lowerCAmelCase_ ( snake_case_ : float , snake_case_ : float ) -> tuple:
'''simple docstring'''
if inductance <= 0:
raise ValueError("Inductance cannot be 0 or negative" )
elif capacitance <= 0:
raise ValueError("Capacitance cannot be 0 or negative" )
else:
return (
"Resonant frequency",
float(1 / (2 * pi * (sqrt(inductance * capacitance ))) ),
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 1 | '''simple docstring'''
def lowerCAmelCase_ ( snake_case_ : int , snake_case_ : int ) -> str:
'''simple docstring'''
if a < 0 or b < 0:
raise ValueError("the value of both inputs must be positive" )
UpperCAmelCase_ = str(bin(snake_case_ ) )[2:] # remove the leading "0b"
UpperCAmelCase_ = str(bin(snake_case_ ) )[2:]
UpperCAmelCase_ = max(len(snake_case_ ) , len(snake_case_ ) )
return "0b" + "".join(
str(int("1" in (char_a, char_b) ) )
for char_a, char_b in zip(a_binary.zfill(snake_case_ ) , b_binary.zfill(snake_case_ ) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 1 | 1 |
'''simple docstring'''
import inspect
import unittest
from transformers import DecisionTransformerConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import DecisionTransformerModel
from transformers.models.decision_transformer.modeling_decision_transformer import (
DECISION_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
)
class __A :
def __init__(self : Any , __a : List[str] , __a : List[Any]=13 , __a : Dict=7 , __a : Optional[int]=6 , __a : Tuple=17 , __a : Any=23 , __a : Dict=11 , __a : Optional[int]=True , ):
UpperCAmelCase_ = parent
UpperCAmelCase_ = batch_size
UpperCAmelCase_ = seq_length
UpperCAmelCase_ = act_dim
UpperCAmelCase_ = state_dim
UpperCAmelCase_ = hidden_size
UpperCAmelCase_ = max_length
UpperCAmelCase_ = is_training
def _lowercase (self : List[str] ):
UpperCAmelCase_ = floats_tensor((self.batch_size, self.seq_length, self.state_dim) )
UpperCAmelCase_ = floats_tensor((self.batch_size, self.seq_length, self.act_dim) )
UpperCAmelCase_ = floats_tensor((self.batch_size, self.seq_length, 1) )
UpperCAmelCase_ = floats_tensor((self.batch_size, self.seq_length, 1) )
UpperCAmelCase_ = ids_tensor((self.batch_size, self.seq_length) , vocab_size=1000 )
UpperCAmelCase_ = random_attention_mask((self.batch_size, self.seq_length) )
UpperCAmelCase_ = self.get_config()
return (
config,
states,
actions,
rewards,
returns_to_go,
timesteps,
attention_mask,
)
def _lowercase (self : Dict ):
return DecisionTransformerConfig(
batch_size=self.batch_size , seq_length=self.seq_length , act_dim=self.act_dim , state_dim=self.state_dim , hidden_size=self.hidden_size , max_length=self.max_length , )
def _lowercase (self : int , __a : Dict , __a : Optional[int] , __a : str , __a : str , __a : str , __a : Optional[int] , __a : Optional[Any] , ):
UpperCAmelCase_ = DecisionTransformerModel(config=__a )
model.to(__a )
model.eval()
UpperCAmelCase_ = model(__a , __a , __a , __a , __a , __a )
self.parent.assertEqual(result.state_preds.shape , states.shape )
self.parent.assertEqual(result.action_preds.shape , actions.shape )
self.parent.assertEqual(result.return_preds.shape , returns_to_go.shape )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.seq_length * 3, self.hidden_size) ) # seq length *3 as there are 3 modelities: states, returns and actions
def _lowercase (self : Optional[Any] ):
UpperCAmelCase_ = self.prepare_config_and_inputs()
(
(
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) ,
) = config_and_inputs
UpperCAmelCase_ = {
"states": states,
"actions": actions,
"rewards": rewards,
"returns_to_go": returns_to_go,
"timesteps": timesteps,
"attention_mask": attention_mask,
}
return config, inputs_dict
@require_torch
class __A ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , unittest.TestCase ):
a__ : Any = (DecisionTransformerModel,) if is_torch_available() else ()
a__ : List[Any] = ()
a__ : int = {"""feature-extraction""": DecisionTransformerModel} if is_torch_available() else {}
# Ignoring of a failing test from GenerationTesterMixin, as the model does not use inputs_ids
a__ : Tuple = False
# Ignoring of a failing tests from ModelTesterMixin, as the model does not implement these features
a__ : int = False
a__ : Tuple = False
a__ : Dict = False
a__ : List[Any] = False
a__ : Dict = False
a__ : Tuple = False
a__ : Any = False
a__ : Tuple = False
a__ : Union[str, Any] = False
def _lowercase (self : Dict ):
UpperCAmelCase_ = DecisionTransformerModelTester(self )
UpperCAmelCase_ = ConfigTester(self , config_class=__a , hidden_size=37 )
def _lowercase (self : Dict ):
self.config_tester.run_common_tests()
def _lowercase (self : int ):
UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__a )
@slow
def _lowercase (self : str ):
for model_name in DECISION_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase_ = DecisionTransformerModel.from_pretrained(__a )
self.assertIsNotNone(__a )
def _lowercase (self : Union[str, Any] ):
UpperCAmelCase_ , UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase_ = model_class(__a )
UpperCAmelCase_ = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCAmelCase_ = [*signature.parameters.keys()]
UpperCAmelCase_ = [
"states",
"actions",
"rewards",
"returns_to_go",
"timesteps",
"attention_mask",
]
self.assertListEqual(arg_names[: len(__a )] , __a )
@require_torch
class __A ( unittest.TestCase ):
@slow
def _lowercase (self : Optional[int] ):
UpperCAmelCase_ = 2 # number of steps of autoregressive prediction we will perform
UpperCAmelCase_ = 10 # defined by the RL environment, may be normalized
UpperCAmelCase_ = DecisionTransformerModel.from_pretrained("edbeeching/decision-transformer-gym-hopper-expert" )
UpperCAmelCase_ = model.to(__a )
UpperCAmelCase_ = model.config
torch.manual_seed(0 )
UpperCAmelCase_ = torch.randn(1 , 1 , config.state_dim ).to(device=__a , dtype=torch.floataa ) # env.reset()
UpperCAmelCase_ = torch.tensor(
[[0.24_27_93, -0.28_69_30_74, 0.8_74_26_13], [0.67_81_52_74, -0.08_10_10_85, -0.12_95_21_47]] , device=__a )
UpperCAmelCase_ = torch.tensor(__a , device=__a , dtype=torch.floataa ).reshape(1 , 1 , 1 )
UpperCAmelCase_ = state
UpperCAmelCase_ = torch.zeros(1 , 0 , config.act_dim , device=__a , dtype=torch.floataa )
UpperCAmelCase_ = torch.zeros(1 , 0 , device=__a , dtype=torch.floataa )
UpperCAmelCase_ = torch.tensor(0 , device=__a , dtype=torch.long ).reshape(1 , 1 )
for step in range(__a ):
UpperCAmelCase_ = torch.cat([actions, torch.zeros(1 , 1 , config.act_dim , device=__a )] , dim=1 )
UpperCAmelCase_ = torch.cat([rewards, torch.zeros(1 , 1 , device=__a )] , dim=1 )
UpperCAmelCase_ = torch.ones(1 , states.shape[1] ).to(dtype=torch.long , device=states.device )
with torch.no_grad():
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = model(
states=__a , actions=__a , rewards=__a , returns_to_go=__a , timesteps=__a , attention_mask=__a , return_dict=__a , )
self.assertEqual(action_pred.shape , actions.shape )
self.assertTrue(torch.allclose(action_pred[0, -1] , expected_outputs[step] , atol=1E-4 ) )
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = ( # env.step(action)
torch.randn(1 , 1 , config.state_dim ).to(device=__a , dtype=torch.floataa ),
1.0,
False,
{},
)
UpperCAmelCase_ = action_pred[0, -1]
UpperCAmelCase_ = torch.cat([states, state] , dim=1 )
UpperCAmelCase_ = returns_to_go[0, -1] - reward
UpperCAmelCase_ = torch.cat([returns_to_go, pred_return.reshape(1 , 1 , 1 )] , dim=1 )
UpperCAmelCase_ = torch.cat(
[timesteps, torch.ones((1, 1) , device=__a , dtype=torch.long ) * (step + 1)] , dim=1 )
| 1 | '''simple docstring'''
from __future__ import annotations
def lowerCAmelCase_ ( snake_case_ : list , snake_case_ : int | None = None , snake_case_ : int | None = None ) -> None:
'''simple docstring'''
if start is None:
UpperCAmelCase_ = 0
if end is None:
UpperCAmelCase_ = len(snake_case_ ) - 1
if start >= end:
return
UpperCAmelCase_ = (start + end) // 2
slowsort(snake_case_ , snake_case_ , snake_case_ )
slowsort(snake_case_ , mid + 1 , snake_case_ )
if sequence[end] < sequence[mid]:
UpperCAmelCase_ , UpperCAmelCase_ = sequence[mid], sequence[end]
slowsort(snake_case_ , snake_case_ , end - 1 )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 1 | 1 |
'''simple docstring'''
# NOTE: This file is deprecated and will be removed in a future version.
# It only exists so that temporarely `from diffusers.pipelines import DiffusionPipeline` works
from ...utils import deprecate
from ..controlnet.pipeline_flax_controlnet import FlaxStableDiffusionControlNetPipeline # noqa: F401
deprecate(
'stable diffusion controlnet',
'0.22.0',
'Importing `FlaxStableDiffusionControlNetPipeline` from diffusers.pipelines.stable_diffusion.flax_pipeline_stable_diffusion_controlnet is deprecated. Please import `from diffusers import FlaxStableDiffusionControlNetPipeline` instead.',
standard_warn=False,
stacklevel=3,
)
| 1 | '''simple docstring'''
from transformers import DistilBertTokenizer, DistilBertTokenizerFast
from transformers.testing_utils import require_tokenizers, slow
from ..bert.test_tokenization_bert import BertTokenizationTest
@require_tokenizers
class __A ( UpperCamelCase__ ):
a__ : Optional[Any] = DistilBertTokenizer
a__ : Any = DistilBertTokenizerFast
a__ : str = True
@slow
def _lowercase (self : int ):
UpperCAmelCase_ = DistilBertTokenizer.from_pretrained("distilbert-base-uncased" )
UpperCAmelCase_ = tokenizer.encode("sequence builders" , add_special_tokens=__a )
UpperCAmelCase_ = tokenizer.encode("multi-sequence build" , add_special_tokens=__a )
UpperCAmelCase_ = tokenizer.build_inputs_with_special_tokens(__a )
UpperCAmelCase_ = tokenizer.build_inputs_with_special_tokens(__a , __a )
assert encoded_sentence == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id]
assert encoded_pair == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] + text_a + [
tokenizer.sep_token_id
]
| 1 | 1 |
'''simple docstring'''
import os
from tempfile import TemporaryDirectory
from unittest import TestCase
import pytest
from absl.testing import parameterized
from datasets import config
from datasets.arrow_reader import HF_GCP_BASE_URL
from datasets.builder import DatasetBuilder
from datasets.dataset_dict import IterableDatasetDict
from datasets.iterable_dataset import IterableDataset
from datasets.load import dataset_module_factory, import_main_class
from datasets.utils.file_utils import cached_path
SCREAMING_SNAKE_CASE_: str =[
{'dataset': 'wikipedia', 'config_name': '20220301.de'},
{'dataset': 'wikipedia', 'config_name': '20220301.en'},
{'dataset': 'wikipedia', 'config_name': '20220301.fr'},
{'dataset': 'wikipedia', 'config_name': '20220301.frr'},
{'dataset': 'wikipedia', 'config_name': '20220301.it'},
{'dataset': 'wikipedia', 'config_name': '20220301.simple'},
{'dataset': 'snli', 'config_name': 'plain_text'},
{'dataset': 'eli5', 'config_name': 'LFQA_reddit'},
{'dataset': 'wiki40b', 'config_name': 'en'},
{'dataset': 'wiki_dpr', 'config_name': 'psgs_w100.nq.compressed'},
{'dataset': 'wiki_dpr', 'config_name': 'psgs_w100.nq.no_index'},
{'dataset': 'wiki_dpr', 'config_name': 'psgs_w100.multiset.no_index'},
{'dataset': 'natural_questions', 'config_name': 'default'},
]
def lowerCAmelCase_ ( snake_case_ : Union[str, Any]=True ) -> Union[str, Any]:
'''simple docstring'''
if with_config:
return [
{
"testcase_name": d["dataset"] + "/" + d["config_name"],
"dataset": d["dataset"],
"config_name": d["config_name"],
}
for d in DATASETS_ON_HF_GCP
]
else:
return [
{"testcase_name": dataset, "dataset": dataset} for dataset in {d["dataset"] for d in DATASETS_ON_HF_GCP}
]
@parameterized.named_parameters(list_datasets_on_hf_gcp_parameters(with_config=UpperCamelCase__ ) )
class __A ( UpperCamelCase__ ):
a__ : Tuple = None
a__ : List[Any] = None
def _lowercase (self : Optional[int] , __a : Optional[int] , __a : Optional[Any] ):
with TemporaryDirectory() as tmp_dir:
UpperCAmelCase_ = dataset_module_factory(__a , cache_dir=__a )
UpperCAmelCase_ = import_main_class(dataset_module.module_path , dataset=__a )
UpperCAmelCase_ = builder_cls(
cache_dir=__a , config_name=__a , hash=dataset_module.hash , )
UpperCAmelCase_ = "/".join(
[
HF_GCP_BASE_URL,
builder_instance._relative_data_dir(with_hash=__a ).replace(os.sep , "/" ),
config.DATASET_INFO_FILENAME,
] )
UpperCAmelCase_ = cached_path(__a , cache_dir=__a )
self.assertTrue(os.path.exists(__a ) )
@pytest.mark.integration
def lowerCAmelCase_ ( snake_case_ : Any ) -> List[Any]:
'''simple docstring'''
UpperCAmelCase_ = tmp_path_factory.mktemp("test_hf_gcp" ) / "test_wikipedia_simple"
UpperCAmelCase_ = dataset_module_factory("wikipedia" , cache_dir=snake_case_ )
UpperCAmelCase_ = import_main_class(dataset_module.module_path )
UpperCAmelCase_ = builder_cls(
cache_dir=snake_case_ , config_name="20220301.frr" , hash=dataset_module.hash , )
# use the HF cloud storage, not the original download_and_prepare that uses apache-beam
UpperCAmelCase_ = None
builder_instance.download_and_prepare()
UpperCAmelCase_ = builder_instance.as_dataset()
assert ds
@pytest.mark.integration
def lowerCAmelCase_ ( snake_case_ : List[Any] ) -> Dict:
'''simple docstring'''
UpperCAmelCase_ = dataset_module_factory("wikipedia" , cache_dir=snake_case_ )
UpperCAmelCase_ = import_main_class(dataset_module.module_path , dataset=snake_case_ )
UpperCAmelCase_ = builder_cls(
cache_dir=snake_case_ , config_name="20220301.frr" , hash=dataset_module.hash , )
UpperCAmelCase_ = builder_instance.as_streaming_dataset()
assert ds
assert isinstance(snake_case_ , snake_case_ )
assert "train" in ds
assert isinstance(ds["train"] , snake_case_ )
assert next(iter(ds["train"] ) )
| 1 | '''simple docstring'''
import argparse
import json
from collections import OrderedDict
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
ConditionalDetrConfig,
ConditionalDetrForObjectDetection,
ConditionalDetrForSegmentation,
ConditionalDetrImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
SCREAMING_SNAKE_CASE_: Optional[int] =logging.get_logger(__name__)
# here we list all keys to be renamed (original name on the left, our name on the right)
SCREAMING_SNAKE_CASE_: Tuple =[]
for i in range(6):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append(
(f"transformer.encoder.layers.{i}.self_attn.out_proj.weight", f"encoder.layers.{i}.self_attn.out_proj.weight")
)
rename_keys.append(
(f"transformer.encoder.layers.{i}.self_attn.out_proj.bias", f"encoder.layers.{i}.self_attn.out_proj.bias")
)
rename_keys.append((f"transformer.encoder.layers.{i}.linear1.weight", f"encoder.layers.{i}.fc1.weight"))
rename_keys.append((f"transformer.encoder.layers.{i}.linear1.bias", f"encoder.layers.{i}.fc1.bias"))
rename_keys.append((f"transformer.encoder.layers.{i}.linear2.weight", f"encoder.layers.{i}.fc2.weight"))
rename_keys.append((f"transformer.encoder.layers.{i}.linear2.bias", f"encoder.layers.{i}.fc2.bias"))
rename_keys.append(
(f"transformer.encoder.layers.{i}.norm1.weight", f"encoder.layers.{i}.self_attn_layer_norm.weight")
)
rename_keys.append((f"transformer.encoder.layers.{i}.norm1.bias", f"encoder.layers.{i}.self_attn_layer_norm.bias"))
rename_keys.append((f"transformer.encoder.layers.{i}.norm2.weight", f"encoder.layers.{i}.final_layer_norm.weight"))
rename_keys.append((f"transformer.encoder.layers.{i}.norm2.bias", f"encoder.layers.{i}.final_layer_norm.bias"))
# decoder layers: 2 times output projection, 2 feedforward neural networks and 3 layernorms
rename_keys.append(
(f"transformer.decoder.layers.{i}.self_attn.out_proj.weight", f"decoder.layers.{i}.self_attn.out_proj.weight")
)
rename_keys.append(
(f"transformer.decoder.layers.{i}.self_attn.out_proj.bias", f"decoder.layers.{i}.self_attn.out_proj.bias")
)
rename_keys.append(
(
f"transformer.decoder.layers.{i}.cross_attn.out_proj.weight",
f"decoder.layers.{i}.encoder_attn.out_proj.weight",
)
)
rename_keys.append(
(
f"transformer.decoder.layers.{i}.cross_attn.out_proj.bias",
f"decoder.layers.{i}.encoder_attn.out_proj.bias",
)
)
rename_keys.append((f"transformer.decoder.layers.{i}.linear1.weight", f"decoder.layers.{i}.fc1.weight"))
rename_keys.append((f"transformer.decoder.layers.{i}.linear1.bias", f"decoder.layers.{i}.fc1.bias"))
rename_keys.append((f"transformer.decoder.layers.{i}.linear2.weight", f"decoder.layers.{i}.fc2.weight"))
rename_keys.append((f"transformer.decoder.layers.{i}.linear2.bias", f"decoder.layers.{i}.fc2.bias"))
rename_keys.append(
(f"transformer.decoder.layers.{i}.norm1.weight", f"decoder.layers.{i}.self_attn_layer_norm.weight")
)
rename_keys.append((f"transformer.decoder.layers.{i}.norm1.bias", f"decoder.layers.{i}.self_attn_layer_norm.bias"))
rename_keys.append(
(f"transformer.decoder.layers.{i}.norm2.weight", f"decoder.layers.{i}.encoder_attn_layer_norm.weight")
)
rename_keys.append(
(f"transformer.decoder.layers.{i}.norm2.bias", f"decoder.layers.{i}.encoder_attn_layer_norm.bias")
)
rename_keys.append((f"transformer.decoder.layers.{i}.norm3.weight", f"decoder.layers.{i}.final_layer_norm.weight"))
rename_keys.append((f"transformer.decoder.layers.{i}.norm3.bias", f"decoder.layers.{i}.final_layer_norm.bias"))
# q, k, v projections in self/cross-attention in decoder for conditional DETR
rename_keys.append(
(f"transformer.decoder.layers.{i}.sa_qcontent_proj.weight", f"decoder.layers.{i}.sa_qcontent_proj.weight")
)
rename_keys.append(
(f"transformer.decoder.layers.{i}.sa_kcontent_proj.weight", f"decoder.layers.{i}.sa_kcontent_proj.weight")
)
rename_keys.append(
(f"transformer.decoder.layers.{i}.sa_qpos_proj.weight", f"decoder.layers.{i}.sa_qpos_proj.weight")
)
rename_keys.append(
(f"transformer.decoder.layers.{i}.sa_kpos_proj.weight", f"decoder.layers.{i}.sa_kpos_proj.weight")
)
rename_keys.append((f"transformer.decoder.layers.{i}.sa_v_proj.weight", f"decoder.layers.{i}.sa_v_proj.weight"))
rename_keys.append(
(f"transformer.decoder.layers.{i}.ca_qcontent_proj.weight", f"decoder.layers.{i}.ca_qcontent_proj.weight")
)
# rename_keys.append((f"transformer.decoder.layers.{i}.ca_qpos_proj.weight", f"decoder.layers.{i}.ca_qpos_proj.weight"))
rename_keys.append(
(f"transformer.decoder.layers.{i}.ca_kcontent_proj.weight", f"decoder.layers.{i}.ca_kcontent_proj.weight")
)
rename_keys.append(
(f"transformer.decoder.layers.{i}.ca_kpos_proj.weight", f"decoder.layers.{i}.ca_kpos_proj.weight")
)
rename_keys.append((f"transformer.decoder.layers.{i}.ca_v_proj.weight", f"decoder.layers.{i}.ca_v_proj.weight"))
rename_keys.append(
(f"transformer.decoder.layers.{i}.ca_qpos_sine_proj.weight", f"decoder.layers.{i}.ca_qpos_sine_proj.weight")
)
rename_keys.append(
(f"transformer.decoder.layers.{i}.sa_qcontent_proj.bias", f"decoder.layers.{i}.sa_qcontent_proj.bias")
)
rename_keys.append(
(f"transformer.decoder.layers.{i}.sa_kcontent_proj.bias", f"decoder.layers.{i}.sa_kcontent_proj.bias")
)
rename_keys.append((f"transformer.decoder.layers.{i}.sa_qpos_proj.bias", f"decoder.layers.{i}.sa_qpos_proj.bias"))
rename_keys.append((f"transformer.decoder.layers.{i}.sa_kpos_proj.bias", f"decoder.layers.{i}.sa_kpos_proj.bias"))
rename_keys.append((f"transformer.decoder.layers.{i}.sa_v_proj.bias", f"decoder.layers.{i}.sa_v_proj.bias"))
rename_keys.append(
(f"transformer.decoder.layers.{i}.ca_qcontent_proj.bias", f"decoder.layers.{i}.ca_qcontent_proj.bias")
)
# rename_keys.append((f"transformer.decoder.layers.{i}.ca_qpos_proj.bias", f"decoder.layers.{i}.ca_qpos_proj.bias"))
rename_keys.append(
(f"transformer.decoder.layers.{i}.ca_kcontent_proj.bias", f"decoder.layers.{i}.ca_kcontent_proj.bias")
)
rename_keys.append((f"transformer.decoder.layers.{i}.ca_kpos_proj.bias", f"decoder.layers.{i}.ca_kpos_proj.bias"))
rename_keys.append((f"transformer.decoder.layers.{i}.ca_v_proj.bias", f"decoder.layers.{i}.ca_v_proj.bias"))
rename_keys.append(
(f"transformer.decoder.layers.{i}.ca_qpos_sine_proj.bias", f"decoder.layers.{i}.ca_qpos_sine_proj.bias")
)
# convolutional projection + query embeddings + layernorm of decoder + class and bounding box heads
# for conditional DETR, also convert reference point head and query scale MLP
rename_keys.extend(
[
('input_proj.weight', 'input_projection.weight'),
('input_proj.bias', 'input_projection.bias'),
('query_embed.weight', 'query_position_embeddings.weight'),
('transformer.decoder.norm.weight', 'decoder.layernorm.weight'),
('transformer.decoder.norm.bias', 'decoder.layernorm.bias'),
('class_embed.weight', 'class_labels_classifier.weight'),
('class_embed.bias', 'class_labels_classifier.bias'),
('bbox_embed.layers.0.weight', 'bbox_predictor.layers.0.weight'),
('bbox_embed.layers.0.bias', 'bbox_predictor.layers.0.bias'),
('bbox_embed.layers.1.weight', 'bbox_predictor.layers.1.weight'),
('bbox_embed.layers.1.bias', 'bbox_predictor.layers.1.bias'),
('bbox_embed.layers.2.weight', 'bbox_predictor.layers.2.weight'),
('bbox_embed.layers.2.bias', 'bbox_predictor.layers.2.bias'),
('transformer.decoder.ref_point_head.layers.0.weight', 'decoder.ref_point_head.layers.0.weight'),
('transformer.decoder.ref_point_head.layers.0.bias', 'decoder.ref_point_head.layers.0.bias'),
('transformer.decoder.ref_point_head.layers.1.weight', 'decoder.ref_point_head.layers.1.weight'),
('transformer.decoder.ref_point_head.layers.1.bias', 'decoder.ref_point_head.layers.1.bias'),
('transformer.decoder.query_scale.layers.0.weight', 'decoder.query_scale.layers.0.weight'),
('transformer.decoder.query_scale.layers.0.bias', 'decoder.query_scale.layers.0.bias'),
('transformer.decoder.query_scale.layers.1.weight', 'decoder.query_scale.layers.1.weight'),
('transformer.decoder.query_scale.layers.1.bias', 'decoder.query_scale.layers.1.bias'),
('transformer.decoder.layers.0.ca_qpos_proj.weight', 'decoder.layers.0.ca_qpos_proj.weight'),
('transformer.decoder.layers.0.ca_qpos_proj.bias', 'decoder.layers.0.ca_qpos_proj.bias'),
]
)
def lowerCAmelCase_ ( snake_case_ : Dict , snake_case_ : Any , snake_case_ : Optional[int] ) -> Dict:
'''simple docstring'''
UpperCAmelCase_ = state_dict.pop(snake_case_ )
UpperCAmelCase_ = val
def lowerCAmelCase_ ( snake_case_ : int ) -> List[Any]:
'''simple docstring'''
UpperCAmelCase_ = OrderedDict()
for key, value in state_dict.items():
if "backbone.0.body" in key:
UpperCAmelCase_ = key.replace("backbone.0.body" , "backbone.conv_encoder.model" )
UpperCAmelCase_ = value
else:
UpperCAmelCase_ = value
return new_state_dict
def lowerCAmelCase_ ( snake_case_ : List[Any] , snake_case_ : Dict=False ) -> List[str]:
'''simple docstring'''
UpperCAmelCase_ = ""
if is_panoptic:
UpperCAmelCase_ = "conditional_detr."
# first: transformer encoder
for i in range(6 ):
# read in weights + bias of input projection layer (in PyTorch's MultiHeadAttention, this is a single matrix + bias)
UpperCAmelCase_ = state_dict.pop(f"""{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_weight""" )
UpperCAmelCase_ = state_dict.pop(f"""{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_bias""" )
# next, add query, keys and values (in that order) to the state dict
UpperCAmelCase_ = in_proj_weight[:2_56, :]
UpperCAmelCase_ = in_proj_bias[:2_56]
UpperCAmelCase_ = in_proj_weight[2_56:5_12, :]
UpperCAmelCase_ = in_proj_bias[2_56:5_12]
UpperCAmelCase_ = in_proj_weight[-2_56:, :]
UpperCAmelCase_ = in_proj_bias[-2_56:]
def lowerCAmelCase_ ( ) -> Dict:
'''simple docstring'''
UpperCAmelCase_ = "http://images.cocodataset.org/val2017/000000039769.jpg"
UpperCAmelCase_ = Image.open(requests.get(snake_case_ , stream=snake_case_ ).raw )
return im
@torch.no_grad()
def lowerCAmelCase_ ( snake_case_ : Optional[int] , snake_case_ : Dict ) -> Optional[int]:
'''simple docstring'''
UpperCAmelCase_ = ConditionalDetrConfig()
# set backbone and dilation attributes
if "resnet101" in model_name:
UpperCAmelCase_ = "resnet101"
if "dc5" in model_name:
UpperCAmelCase_ = True
UpperCAmelCase_ = "panoptic" in model_name
if is_panoptic:
UpperCAmelCase_ = 2_50
else:
UpperCAmelCase_ = 91
UpperCAmelCase_ = "huggingface/label-files"
UpperCAmelCase_ = "coco-detection-id2label.json"
UpperCAmelCase_ = json.load(open(hf_hub_download(snake_case_ , snake_case_ , repo_type="dataset" ) , "r" ) )
UpperCAmelCase_ = {int(snake_case_ ): v for k, v in idalabel.items()}
UpperCAmelCase_ = idalabel
UpperCAmelCase_ = {v: k for k, v in idalabel.items()}
# load image processor
UpperCAmelCase_ = "coco_panoptic" if is_panoptic else "coco_detection"
UpperCAmelCase_ = ConditionalDetrImageProcessor(format=snake_case_ )
# prepare image
UpperCAmelCase_ = prepare_img()
UpperCAmelCase_ = image_processor(images=snake_case_ , return_tensors="pt" )
UpperCAmelCase_ = encoding["pixel_values"]
logger.info(f"""Converting model {model_name}...""" )
# load original model from torch hub
UpperCAmelCase_ = torch.hub.load("DeppMeng/ConditionalDETR" , snake_case_ , pretrained=snake_case_ ).eval()
UpperCAmelCase_ = conditional_detr.state_dict()
# rename keys
for src, dest in rename_keys:
if is_panoptic:
UpperCAmelCase_ = "conditional_detr." + src
rename_key(snake_case_ , snake_case_ , snake_case_ )
UpperCAmelCase_ = rename_backbone_keys(snake_case_ )
# query, key and value matrices need special treatment
read_in_q_k_v(snake_case_ , is_panoptic=snake_case_ )
# important: we need to prepend a prefix to each of the base model keys as the head models use different attributes for them
UpperCAmelCase_ = "conditional_detr.model." if is_panoptic else "model."
for key in state_dict.copy().keys():
if is_panoptic:
if (
key.startswith("conditional_detr" )
and not key.startswith("class_labels_classifier" )
and not key.startswith("bbox_predictor" )
):
UpperCAmelCase_ = state_dict.pop(snake_case_ )
UpperCAmelCase_ = val
elif "class_labels_classifier" in key or "bbox_predictor" in key:
UpperCAmelCase_ = state_dict.pop(snake_case_ )
UpperCAmelCase_ = val
elif key.startswith("bbox_attention" ) or key.startswith("mask_head" ):
continue
else:
UpperCAmelCase_ = state_dict.pop(snake_case_ )
UpperCAmelCase_ = val
else:
if not key.startswith("class_labels_classifier" ) and not key.startswith("bbox_predictor" ):
UpperCAmelCase_ = state_dict.pop(snake_case_ )
UpperCAmelCase_ = val
# finally, create HuggingFace model and load state dict
UpperCAmelCase_ = ConditionalDetrForSegmentation(snake_case_ ) if is_panoptic else ConditionalDetrForObjectDetection(snake_case_ )
model.load_state_dict(snake_case_ )
model.eval()
model.push_to_hub(repo_id=snake_case_ , organization="DepuMeng" , commit_message="Add model" )
# verify our conversion
UpperCAmelCase_ = conditional_detr(snake_case_ )
UpperCAmelCase_ = model(snake_case_ )
assert torch.allclose(outputs.logits , original_outputs["pred_logits"] , atol=1E-4 )
assert torch.allclose(outputs.pred_boxes , original_outputs["pred_boxes"] , atol=1E-4 )
if is_panoptic:
assert torch.allclose(outputs.pred_masks , original_outputs["pred_masks"] , atol=1E-4 )
# Save model and image processor
logger.info(f"""Saving PyTorch model and image processor to {pytorch_dump_folder_path}...""" )
Path(snake_case_ ).mkdir(exist_ok=snake_case_ )
model.save_pretrained(snake_case_ )
image_processor.save_pretrained(snake_case_ )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE_: List[str] =argparse.ArgumentParser()
parser.add_argument(
'--model_name',
default='conditional_detr_resnet50',
type=str,
help='Name of the CONDITIONAL_DETR model you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the folder to output PyTorch model.'
)
SCREAMING_SNAKE_CASE_: int =parser.parse_args()
convert_conditional_detr_checkpoint(args.model_name, args.pytorch_dump_folder_path)
| 1 | 1 |
'''simple docstring'''
from __future__ import annotations
import math
def lowerCAmelCase_ ( snake_case_ : int ) -> list[int]:
'''simple docstring'''
if num <= 0:
UpperCAmelCase_ = f"""{num}: Invalid input, please enter a positive integer."""
raise ValueError(snake_case_ )
UpperCAmelCase_ = [True] * (num + 1)
UpperCAmelCase_ = []
UpperCAmelCase_ = 2
UpperCAmelCase_ = int(math.sqrt(snake_case_ ) )
while start <= end:
# If start is a prime
if sieve[start] is True:
prime.append(snake_case_ )
# Set multiples of start be False
for i in range(start * start , num + 1 , snake_case_ ):
if sieve[i] is True:
UpperCAmelCase_ = False
start += 1
for j in range(end + 1 , num + 1 ):
if sieve[j] is True:
prime.append(snake_case_ )
return prime
if __name__ == "__main__":
print(prime_sieve(int(input('Enter a positive integer: ').strip())))
| 1 | '''simple docstring'''
import warnings
from ...utils import logging
from .image_processing_clip import CLIPImageProcessor
SCREAMING_SNAKE_CASE_: Union[str, Any] =logging.get_logger(__name__)
class __A ( UpperCamelCase__ ):
def __init__(self : int , *__a : Dict , **__a : str ):
warnings.warn(
"The class CLIPFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"
" use CLIPImageProcessor instead." , __a , )
super().__init__(*__a , **__a )
| 1 | 1 |
'''simple docstring'''
import logging
import os
import threading
import time
try:
import warnings
except ImportError:
SCREAMING_SNAKE_CASE_: Tuple =None
try:
import msvcrt
except ImportError:
SCREAMING_SNAKE_CASE_: Any =None
try:
import fcntl
except ImportError:
SCREAMING_SNAKE_CASE_: int =None
# Backward compatibility
# ------------------------------------------------
try:
TimeoutError
except NameError:
SCREAMING_SNAKE_CASE_: Tuple =OSError
# Data
# ------------------------------------------------
SCREAMING_SNAKE_CASE_: int =[
'Timeout',
'BaseFileLock',
'WindowsFileLock',
'UnixFileLock',
'SoftFileLock',
'FileLock',
]
SCREAMING_SNAKE_CASE_: Union[str, Any] ='3.0.12'
SCREAMING_SNAKE_CASE_: Any =None
def lowerCAmelCase_ ( ) -> Dict:
'''simple docstring'''
global _logger
UpperCAmelCase_ = _logger or logging.getLogger(__name__ )
return _logger
class __A ( UpperCamelCase__ ):
def __init__(self : Optional[Any] , __a : Optional[int] ):
UpperCAmelCase_ = lock_file
return None
def __str__(self : Optional[int] ):
UpperCAmelCase_ = f"""The file lock '{self.lock_file}' could not be acquired."""
return temp
class __A :
def __init__(self : Optional[int] , __a : Optional[Any] ):
UpperCAmelCase_ = lock
return None
def __enter__(self : int ):
return self.lock
def __exit__(self : Dict , __a : int , __a : Any , __a : Union[str, Any] ):
self.lock.release()
return None
class __A :
def __init__(self : Optional[int] , __a : str , __a : Dict=-1 , __a : Tuple=None ):
UpperCAmelCase_ = max_filename_length if max_filename_length is not None else 255
# Hash the filename if it's too long
UpperCAmelCase_ = self.hash_filename_if_too_long(__a , __a )
# The path to the lock file.
UpperCAmelCase_ = lock_file
# The file descriptor for the *_lock_file* as it is returned by the
# os.open() function.
# This file lock is only NOT None, if the object currently holds the
# lock.
UpperCAmelCase_ = None
# The default timeout value.
UpperCAmelCase_ = timeout
# We use this lock primarily for the lock counter.
UpperCAmelCase_ = threading.Lock()
# The lock counter is used for implementing the nested locking
# mechanism. Whenever the lock is acquired, the counter is increased and
# the lock is only released, when this value is 0 again.
UpperCAmelCase_ = 0
return None
@property
def _lowercase (self : Optional[int] ):
return self._lock_file
@property
def _lowercase (self : int ):
return self._timeout
@timeout.setter
def _lowercase (self : Optional[Any] , __a : Optional[Any] ):
UpperCAmelCase_ = float(__a )
return None
def _lowercase (self : List[str] ):
raise NotImplementedError()
def _lowercase (self : Union[str, Any] ):
raise NotImplementedError()
@property
def _lowercase (self : str ):
return self._lock_file_fd is not None
def _lowercase (self : Tuple , __a : Optional[Any]=None , __a : List[Any]=0.05 ):
# Use the default timeout, if no timeout is provided.
if timeout is None:
UpperCAmelCase_ = self.timeout
# Increment the number right at the beginning.
# We can still undo it, if something fails.
with self._thread_lock:
self._lock_counter += 1
UpperCAmelCase_ = id(self )
UpperCAmelCase_ = self._lock_file
UpperCAmelCase_ = time.time()
try:
while True:
with self._thread_lock:
if not self.is_locked:
logger().debug(f"""Attempting to acquire lock {lock_id} on {lock_filename}""" )
self._acquire()
if self.is_locked:
logger().debug(f"""Lock {lock_id} acquired on {lock_filename}""" )
break
elif timeout >= 0 and time.time() - start_time > timeout:
logger().debug(f"""Timeout on acquiring lock {lock_id} on {lock_filename}""" )
raise Timeout(self._lock_file )
else:
logger().debug(
f"""Lock {lock_id} not acquired on {lock_filename}, waiting {poll_intervall} seconds ...""" )
time.sleep(__a )
except: # noqa
# Something did go wrong, so decrement the counter.
with self._thread_lock:
UpperCAmelCase_ = max(0 , self._lock_counter - 1 )
raise
return _Acquire_ReturnProxy(lock=self )
def _lowercase (self : Any , __a : Optional[Any]=False ):
with self._thread_lock:
if self.is_locked:
self._lock_counter -= 1
if self._lock_counter == 0 or force:
UpperCAmelCase_ = id(self )
UpperCAmelCase_ = self._lock_file
logger().debug(f"""Attempting to release lock {lock_id} on {lock_filename}""" )
self._release()
UpperCAmelCase_ = 0
logger().debug(f"""Lock {lock_id} released on {lock_filename}""" )
return None
def __enter__(self : Optional[int] ):
self.acquire()
return self
def __exit__(self : int , __a : str , __a : List[str] , __a : Any ):
self.release()
return None
def __del__(self : Tuple ):
self.release(force=__a )
return None
def _lowercase (self : str , __a : str , __a : int ):
UpperCAmelCase_ = os.path.basename(__a )
if len(__a ) > max_length and max_length > 0:
UpperCAmelCase_ = os.path.dirname(__a )
UpperCAmelCase_ = str(hash(__a ) )
UpperCAmelCase_ = filename[: max_length - len(__a ) - 8] + "..." + hashed_filename + ".lock"
return os.path.join(__a , __a )
else:
return path
class __A ( UpperCamelCase__ ):
def __init__(self : int , __a : Optional[int] , __a : Optional[Any]=-1 , __a : List[Any]=None ):
from .file_utils import relative_to_absolute_path
super().__init__(__a , timeout=__a , max_filename_length=__a )
UpperCAmelCase_ = "\\\\?\\" + relative_to_absolute_path(self.lock_file )
def _lowercase (self : str ):
UpperCAmelCase_ = os.O_RDWR | os.O_CREAT | os.O_TRUNC
try:
UpperCAmelCase_ = os.open(self._lock_file , __a )
except OSError:
pass
else:
try:
msvcrt.locking(__a , msvcrt.LK_NBLCK , 1 )
except OSError:
os.close(__a )
else:
UpperCAmelCase_ = fd
return None
def _lowercase (self : Optional[Any] ):
UpperCAmelCase_ = self._lock_file_fd
UpperCAmelCase_ = None
msvcrt.locking(__a , msvcrt.LK_UNLCK , 1 )
os.close(__a )
try:
os.remove(self._lock_file )
# Probably another instance of the application
# that acquired the file lock.
except OSError:
pass
return None
class __A ( UpperCamelCase__ ):
def __init__(self : Union[str, Any] , __a : List[Any] , __a : Any=-1 , __a : str=None ):
UpperCAmelCase_ = os.statvfs(os.path.dirname(__a ) ).f_namemax
super().__init__(__a , timeout=__a , max_filename_length=__a )
def _lowercase (self : str ):
UpperCAmelCase_ = os.O_RDWR | os.O_CREAT | os.O_TRUNC
UpperCAmelCase_ = os.open(self._lock_file , __a )
try:
fcntl.flock(__a , fcntl.LOCK_EX | fcntl.LOCK_NB )
except OSError:
os.close(__a )
else:
UpperCAmelCase_ = fd
return None
def _lowercase (self : Union[str, Any] ):
# Do not remove the lockfile:
#
# https://github.com/benediktschmitt/py-filelock/issues/31
# https://stackoverflow.com/questions/17708885/flock-removing-locked-file-without-race-condition
UpperCAmelCase_ = self._lock_file_fd
UpperCAmelCase_ = None
fcntl.flock(__a , fcntl.LOCK_UN )
os.close(__a )
return None
class __A ( UpperCamelCase__ ):
def _lowercase (self : Union[str, Any] ):
UpperCAmelCase_ = os.O_WRONLY | os.O_CREAT | os.O_EXCL | os.O_TRUNC
try:
UpperCAmelCase_ = os.open(self._lock_file , __a )
except OSError:
pass
else:
UpperCAmelCase_ = fd
return None
def _lowercase (self : str ):
os.close(self._lock_file_fd )
UpperCAmelCase_ = None
try:
os.remove(self._lock_file )
# The file is already deleted and that's what we want.
except OSError:
pass
return None
SCREAMING_SNAKE_CASE_: Any =None
if msvcrt:
SCREAMING_SNAKE_CASE_: List[str] =WindowsFileLock
elif fcntl:
SCREAMING_SNAKE_CASE_: List[Any] =UnixFileLock
else:
SCREAMING_SNAKE_CASE_: Optional[Any] =SoftFileLock
if warnings is not None:
warnings.warn('only soft file lock is available')
| 1 | '''simple docstring'''
from __future__ import annotations
import queue
class __A :
def __init__(self : Optional[Any] , __a : str ):
UpperCAmelCase_ = data
UpperCAmelCase_ = None
UpperCAmelCase_ = None
def lowerCAmelCase_ ( ) -> TreeNode:
'''simple docstring'''
print("\n********Press N to stop entering at any point of time********\n" )
UpperCAmelCase_ = input("Enter the value of the root node: " ).strip().lower()
UpperCAmelCase_ = queue.Queue()
UpperCAmelCase_ = TreeNode(int(snake_case_ ) )
q.put(snake_case_ )
while not q.empty():
UpperCAmelCase_ = q.get()
UpperCAmelCase_ = f"""Enter the left node of {node_found.data}: """
UpperCAmelCase_ = input(snake_case_ ).strip().lower() or "n"
if check == "n":
return tree_node
UpperCAmelCase_ = TreeNode(int(snake_case_ ) )
UpperCAmelCase_ = left_node
q.put(snake_case_ )
UpperCAmelCase_ = f"""Enter the right node of {node_found.data}: """
UpperCAmelCase_ = input(snake_case_ ).strip().lower() or "n"
if check == "n":
return tree_node
UpperCAmelCase_ = TreeNode(int(snake_case_ ) )
UpperCAmelCase_ = right_node
q.put(snake_case_ )
raise
def lowerCAmelCase_ ( snake_case_ : TreeNode ) -> None:
'''simple docstring'''
if not isinstance(snake_case_ , snake_case_ ) or not node:
return
print(node.data , end="," )
pre_order(node.left )
pre_order(node.right )
def lowerCAmelCase_ ( snake_case_ : TreeNode ) -> None:
'''simple docstring'''
if not isinstance(snake_case_ , snake_case_ ) or not node:
return
in_order(node.left )
print(node.data , end="," )
in_order(node.right )
def lowerCAmelCase_ ( snake_case_ : TreeNode ) -> None:
'''simple docstring'''
if not isinstance(snake_case_ , snake_case_ ) or not node:
return
post_order(node.left )
post_order(node.right )
print(node.data , end="," )
def lowerCAmelCase_ ( snake_case_ : TreeNode ) -> None:
'''simple docstring'''
if not isinstance(snake_case_ , snake_case_ ) or not node:
return
UpperCAmelCase_ = queue.Queue()
q.put(snake_case_ )
while not q.empty():
UpperCAmelCase_ = q.get()
print(node_dequeued.data , end="," )
if node_dequeued.left:
q.put(node_dequeued.left )
if node_dequeued.right:
q.put(node_dequeued.right )
def lowerCAmelCase_ ( snake_case_ : TreeNode ) -> None:
'''simple docstring'''
if not isinstance(snake_case_ , snake_case_ ) or not node:
return
UpperCAmelCase_ = queue.Queue()
q.put(snake_case_ )
while not q.empty():
UpperCAmelCase_ = []
while not q.empty():
UpperCAmelCase_ = q.get()
print(node_dequeued.data , end="," )
if node_dequeued.left:
list_.append(node_dequeued.left )
if node_dequeued.right:
list_.append(node_dequeued.right )
print()
for node in list_:
q.put(snake_case_ )
def lowerCAmelCase_ ( snake_case_ : TreeNode ) -> None:
'''simple docstring'''
if not isinstance(snake_case_ , snake_case_ ) or not node:
return
UpperCAmelCase_ = []
UpperCAmelCase_ = node
while n or stack:
while n: # start from root node, find its left child
print(n.data , end="," )
stack.append(snake_case_ )
UpperCAmelCase_ = n.left
# end of while means current node doesn't have left child
UpperCAmelCase_ = stack.pop()
# start to traverse its right child
UpperCAmelCase_ = n.right
def lowerCAmelCase_ ( snake_case_ : TreeNode ) -> None:
'''simple docstring'''
if not isinstance(snake_case_ , snake_case_ ) or not node:
return
UpperCAmelCase_ = []
UpperCAmelCase_ = node
while n or stack:
while n:
stack.append(snake_case_ )
UpperCAmelCase_ = n.left
UpperCAmelCase_ = stack.pop()
print(n.data , end="," )
UpperCAmelCase_ = n.right
def lowerCAmelCase_ ( snake_case_ : TreeNode ) -> None:
'''simple docstring'''
if not isinstance(snake_case_ , snake_case_ ) or not node:
return
UpperCAmelCase_ , UpperCAmelCase_ = [], []
UpperCAmelCase_ = node
stacka.append(snake_case_ )
while stacka: # to find the reversed order of post order, store it in stack2
UpperCAmelCase_ = stacka.pop()
if n.left:
stacka.append(n.left )
if n.right:
stacka.append(n.right )
stacka.append(snake_case_ )
while stacka: # pop up from stack2 will be the post order
print(stacka.pop().data , end="," )
def lowerCAmelCase_ ( snake_case_ : str = "" , snake_case_ : Any=50 , snake_case_ : Union[str, Any]="*" ) -> str:
'''simple docstring'''
if not s:
return "\n" + width * char
UpperCAmelCase_ , UpperCAmelCase_ = divmod(width - len(snake_case_ ) - 2 , 2 )
return f"""{left * char} {s} {(left + extra) * char}"""
if __name__ == "__main__":
import doctest
doctest.testmod()
print(prompt('Binary Tree Traversals'))
SCREAMING_SNAKE_CASE_: TreeNode =build_tree()
print(prompt('Pre Order Traversal'))
pre_order(node)
print(prompt() + '\n')
print(prompt('In Order Traversal'))
in_order(node)
print(prompt() + '\n')
print(prompt('Post Order Traversal'))
post_order(node)
print(prompt() + '\n')
print(prompt('Level Order Traversal'))
level_order(node)
print(prompt() + '\n')
print(prompt('Actual Level Order Traversal'))
level_order_actual(node)
print('*' * 50 + '\n')
print(prompt('Pre Order Traversal - Iteration Version'))
pre_order_iter(node)
print(prompt() + '\n')
print(prompt('In Order Traversal - Iteration Version'))
in_order_iter(node)
print(prompt() + '\n')
print(prompt('Post Order Traversal - Iteration Version'))
post_order_iter(node)
print(prompt())
| 1 | 1 |
'''simple docstring'''
from __future__ import annotations
from dataclasses import dataclass
@dataclass
class __A :
a__ : float
a__ : TreeNode | None = None
a__ : TreeNode | None = None
def lowerCAmelCase_ ( snake_case_ : TreeNode | None ) -> bool:
'''simple docstring'''
def is_valid_tree(snake_case_ : TreeNode | None ) -> bool:
if node is None:
return True
if not isinstance(snake_case_ , snake_case_ ):
return False
try:
float(node.data )
except (TypeError, ValueError):
return False
return is_valid_tree(node.left ) and is_valid_tree(node.right )
if not is_valid_tree(snake_case_ ):
raise ValueError(
"Each node should be type of TreeNode and data should be float." )
def is_binary_search_tree_recursive_check(
snake_case_ : TreeNode | None , snake_case_ : float , snake_case_ : float ) -> bool:
if node is None:
return True
return (
left_bound < node.data < right_bound
and is_binary_search_tree_recursive_check(node.left , snake_case_ , node.data )
and is_binary_search_tree_recursive_check(
node.right , node.data , snake_case_ )
)
return is_binary_search_tree_recursive_check(snake_case_ , -float("inf" ) , float("inf" ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 1 | '''simple docstring'''
from typing import Dict
import numpy as np
from ..utils import add_end_docstrings, is_tf_available, is_torch_available, logging
from .base import PIPELINE_INIT_ARGS, GenericTensor, Pipeline, PipelineException
if is_tf_available():
import tensorflow as tf
from ..tf_utils import stable_softmax
if is_torch_available():
import torch
SCREAMING_SNAKE_CASE_: Optional[int] =logging.get_logger(__name__)
@add_end_docstrings(
UpperCamelCase__ , r"""
top_k (`int`, defaults to 5):
The number of predictions to return.
targets (`str` or `List[str]`, *optional*):
When passed, the model will limit the scores to the passed targets instead of looking up in the whole
vocab. If the provided targets are not in the model vocab, they will be tokenized and the first resulting
token will be used (with a warning, and that might be slower).
""" , )
class __A ( UpperCamelCase__ ):
def _lowercase (self : str , __a : GenericTensor ):
if self.framework == "tf":
UpperCAmelCase_ = tf.where(input_ids == self.tokenizer.mask_token_id ).numpy()
elif self.framework == "pt":
UpperCAmelCase_ = torch.nonzero(input_ids == self.tokenizer.mask_token_id , as_tuple=__a )
else:
raise ValueError("Unsupported framework" )
return masked_index
def _lowercase (self : Tuple , __a : GenericTensor ):
UpperCAmelCase_ = self.get_masked_index(__a )
UpperCAmelCase_ = np.prod(masked_index.shape )
if numel < 1:
raise PipelineException(
"fill-mask" , self.model.base_model_prefix , f"""No mask_token ({self.tokenizer.mask_token}) found on the input""" , )
def _lowercase (self : List[Any] , __a : GenericTensor ):
if isinstance(__a , __a ):
for model_input in model_inputs:
self._ensure_exactly_one_mask_token(model_input["input_ids"][0] )
else:
for input_ids in model_inputs["input_ids"]:
self._ensure_exactly_one_mask_token(__a )
def _lowercase (self : Tuple , __a : Dict , __a : List[str]=None , **__a : Any ):
if return_tensors is None:
UpperCAmelCase_ = self.framework
UpperCAmelCase_ = self.tokenizer(__a , return_tensors=__a )
self.ensure_exactly_one_mask_token(__a )
return model_inputs
def _lowercase (self : str , __a : Optional[int] ):
UpperCAmelCase_ = self.model(**__a )
UpperCAmelCase_ = model_inputs["input_ids"]
return model_outputs
def _lowercase (self : List[str] , __a : Tuple , __a : int=5 , __a : Dict=None ):
# Cap top_k if there are targets
if target_ids is not None and target_ids.shape[0] < top_k:
UpperCAmelCase_ = target_ids.shape[0]
UpperCAmelCase_ = model_outputs["input_ids"][0]
UpperCAmelCase_ = model_outputs["logits"]
if self.framework == "tf":
UpperCAmelCase_ = tf.where(input_ids == self.tokenizer.mask_token_id ).numpy()[:, 0]
UpperCAmelCase_ = outputs.numpy()
UpperCAmelCase_ = outputs[0, masked_index, :]
UpperCAmelCase_ = stable_softmax(__a , axis=-1 )
if target_ids is not None:
UpperCAmelCase_ = tf.gather_nd(tf.squeeze(__a , 0 ) , target_ids.reshape(-1 , 1 ) )
UpperCAmelCase_ = tf.expand_dims(__a , 0 )
UpperCAmelCase_ = tf.math.top_k(__a , k=__a )
UpperCAmelCase_ , UpperCAmelCase_ = topk.values.numpy(), topk.indices.numpy()
else:
UpperCAmelCase_ = torch.nonzero(input_ids == self.tokenizer.mask_token_id , as_tuple=__a ).squeeze(-1 )
# Fill mask pipeline supports only one ${mask_token} per sample
UpperCAmelCase_ = outputs[0, masked_index, :]
UpperCAmelCase_ = logits.softmax(dim=-1 )
if target_ids is not None:
UpperCAmelCase_ = probs[..., target_ids]
UpperCAmelCase_ , UpperCAmelCase_ = probs.topk(__a )
UpperCAmelCase_ = []
UpperCAmelCase_ = values.shape[0] == 1
for i, (_values, _predictions) in enumerate(zip(values.tolist() , predictions.tolist() ) ):
UpperCAmelCase_ = []
for v, p in zip(_values , _predictions ):
# Copy is important since we're going to modify this array in place
UpperCAmelCase_ = input_ids.numpy().copy()
if target_ids is not None:
UpperCAmelCase_ = target_ids[p].tolist()
UpperCAmelCase_ = p
# Filter padding out:
UpperCAmelCase_ = tokens[np.where(tokens != self.tokenizer.pad_token_id )]
# Originally we skip special tokens to give readable output.
# For multi masks though, the other [MASK] would be removed otherwise
# making the output look odd, so we add them back
UpperCAmelCase_ = self.tokenizer.decode(__a , skip_special_tokens=__a )
UpperCAmelCase_ = {"score": v, "token": p, "token_str": self.tokenizer.decode([p] ), "sequence": sequence}
row.append(__a )
result.append(__a )
if single_mask:
return result[0]
return result
def _lowercase (self : Dict , __a : List[Any] , __a : List[str]=None ):
if isinstance(__a , __a ):
UpperCAmelCase_ = [targets]
try:
UpperCAmelCase_ = self.tokenizer.get_vocab()
except Exception:
UpperCAmelCase_ = {}
UpperCAmelCase_ = []
for target in targets:
UpperCAmelCase_ = vocab.get(__a , __a )
if id_ is None:
UpperCAmelCase_ = self.tokenizer(
__a , add_special_tokens=__a , return_attention_mask=__a , return_token_type_ids=__a , max_length=1 , truncation=__a , )["input_ids"]
if len(__a ) == 0:
logger.warning(
f"""The specified target token `{target}` does not exist in the model vocabulary. """
"We cannot replace it with anything meaningful, ignoring it" )
continue
UpperCAmelCase_ = input_ids[0]
# XXX: If users encounter this pass
# it becomes pretty slow, so let's make sure
# The warning enables them to fix the input to
# get faster performance.
logger.warning(
f"""The specified target token `{target}` does not exist in the model vocabulary. """
f"""Replacing with `{self.tokenizer.convert_ids_to_tokens(id_ )}`.""" )
target_ids.append(id_ )
UpperCAmelCase_ = list(set(__a ) )
if len(__a ) == 0:
raise ValueError("At least one target must be provided when passed." )
UpperCAmelCase_ = np.array(__a )
return target_ids
def _lowercase (self : Tuple , __a : Dict=None , __a : List[str]=None ):
UpperCAmelCase_ = {}
if targets is not None:
UpperCAmelCase_ = self.get_target_ids(__a , __a )
UpperCAmelCase_ = target_ids
if top_k is not None:
UpperCAmelCase_ = top_k
if self.tokenizer.mask_token_id is None:
raise PipelineException(
"fill-mask" , self.model.base_model_prefix , "The tokenizer does not define a `mask_token`." )
return {}, {}, postprocess_params
def __call__(self : Union[str, Any] , __a : str , *__a : Any , **__a : Tuple ):
UpperCAmelCase_ = super().__call__(__a , **__a )
if isinstance(__a , __a ) and len(__a ) == 1:
return outputs[0]
return outputs
| 1 | 1 |
'''simple docstring'''
import argparse
import json
from dataclasses import dataclass, field
from functools import partial
from pathlib import Path
from typing import Callable, Dict, List, Tuple
import timm
import torch
import torch.nn as nn
from classy_vision.models.regnet import RegNet, RegNetParams, RegNetYaagf, RegNetYaagf, RegNetYaaagf
from huggingface_hub import cached_download, hf_hub_url
from torch import Tensor
from vissl.models.model_helpers import get_trunk_forward_outputs
from transformers import AutoImageProcessor, RegNetConfig, RegNetForImageClassification, RegNetModel
from transformers.utils import logging
logging.set_verbosity_info()
SCREAMING_SNAKE_CASE_: Tuple =logging.get_logger()
@dataclass
class __A :
a__ : nn.Module
a__ : List[nn.Module] = field(default_factory=UpperCamelCase__ )
a__ : list = field(default_factory=UpperCamelCase__ )
def _lowercase (self : List[Any] , __a : int , __a : Tensor , __a : Tensor ):
UpperCAmelCase_ = len(list(m.modules() ) ) == 1 or isinstance(__a , nn.Convad ) or isinstance(__a , nn.BatchNormad )
if has_not_submodules:
self.traced.append(__a )
def __call__(self : int , __a : Tensor ):
for m in self.module.modules():
self.handles.append(m.register_forward_hook(self._forward_hook ) )
self.module(__a )
[x.remove() for x in self.handles]
return self
@property
def _lowercase (self : Optional[Any] ):
# check the len of the state_dict keys to see if we have learnable params
return list(filter(lambda __a : len(list(x.state_dict().keys() ) ) > 0 , self.traced ) )
@dataclass
class __A :
a__ : nn.Module
a__ : nn.Module
a__ : int = 1
a__ : List = field(default_factory=UpperCamelCase__ )
a__ : List = field(default_factory=UpperCamelCase__ )
a__ : bool = True
def __call__(self : List[Any] , __a : Tensor ):
UpperCAmelCase_ = Tracker(self.dest )(__a ).parametrized
UpperCAmelCase_ = Tracker(self.src )(__a ).parametrized
UpperCAmelCase_ = list(filter(lambda __a : type(__a ) not in self.src_skip , __a ) )
UpperCAmelCase_ = list(filter(lambda __a : type(__a ) not in self.dest_skip , __a ) )
if len(__a ) != len(__a ) and self.raise_if_mismatch:
raise Exception(
f"""Numbers of operations are different. Source module has {len(__a )} operations while"""
f""" destination module has {len(__a )}.""" )
for dest_m, src_m in zip(__a , __a ):
dest_m.load_state_dict(src_m.state_dict() )
if self.verbose == 1:
print(f"""Transfered from={src_m} to={dest_m}""" )
class __A ( nn.Module ):
def __init__(self : str , __a : nn.Module ):
super().__init__()
UpperCAmelCase_ = []
# - get the stem
feature_blocks.append(("conv1", model.stem) )
# - get all the feature blocks
for k, v in model.trunk_output.named_children():
assert k.startswith("block" ), f"""Unexpected layer name {k}"""
UpperCAmelCase_ = len(__a ) + 1
feature_blocks.append((f"""res{block_index}""", v) )
UpperCAmelCase_ = nn.ModuleDict(__a )
def _lowercase (self : Any , __a : Tensor ):
return get_trunk_forward_outputs(
__a , out_feat_keys=__a , feature_blocks=self._feature_blocks , )
class __A ( UpperCamelCase__ ):
def _lowercase (self : str , __a : str ):
UpperCAmelCase_ = x.split("-" )
return x_split[0] + x_split[1] + "_" + "".join(x_split[2:] )
def __getitem__(self : Any , __a : str ):
# default to timm!
if x not in self:
UpperCAmelCase_ = self.convert_name_to_timm(__a )
UpperCAmelCase_ = partial(lambda: (timm.create_model(__a , pretrained=__a ).eval(), None) )
else:
UpperCAmelCase_ = super().__getitem__(__a )
return val
class __A ( UpperCamelCase__ ):
def __getitem__(self : List[Any] , __a : str ):
if "seer" in x and "in1k" not in x:
UpperCAmelCase_ = RegNetModel
else:
UpperCAmelCase_ = RegNetForImageClassification
return val
def lowerCAmelCase_ ( snake_case_ : str , snake_case_ : int , snake_case_ : List[Tuple[str, str]] ) -> Union[str, Any]:
'''simple docstring'''
for from_key, to_key in keys:
UpperCAmelCase_ = from_state_dict[from_key].clone()
print(f"""Copied key={from_key} to={to_key}""" )
return to_state_dict
def lowerCAmelCase_ ( snake_case_ : str , snake_case_ : Callable[[], nn.Module] , snake_case_ : Callable[[], nn.Module] , snake_case_ : RegNetConfig , snake_case_ : Path , snake_case_ : bool = True , ) -> int:
'''simple docstring'''
print(f"""Converting {name}...""" )
with torch.no_grad():
UpperCAmelCase_ , UpperCAmelCase_ = from_model_func()
UpperCAmelCase_ = our_model_func(snake_case_ ).eval()
UpperCAmelCase_ = ModuleTransfer(src=snake_case_ , dest=snake_case_ , raise_if_mismatch=snake_case_ )
UpperCAmelCase_ = torch.randn((1, 3, 2_24, 2_24) )
module_transfer(snake_case_ )
if from_state_dict is not None:
UpperCAmelCase_ = []
# for seer - in1k finetuned we have to manually copy the head
if "seer" in name and "in1k" in name:
UpperCAmelCase_ = [("0.clf.0.weight", "classifier.1.weight"), ("0.clf.0.bias", "classifier.1.bias")]
UpperCAmelCase_ = manually_copy_vissl_head(snake_case_ , our_model.state_dict() , snake_case_ )
our_model.load_state_dict(snake_case_ )
UpperCAmelCase_ = our_model(snake_case_ , output_hidden_states=snake_case_ )
UpperCAmelCase_ = (
our_outputs.logits if isinstance(snake_case_ , snake_case_ ) else our_outputs.last_hidden_state
)
UpperCAmelCase_ = from_model(snake_case_ )
UpperCAmelCase_ = from_output[-1] if type(snake_case_ ) is list else from_output
# now since I don't want to use any config files, vissl seer model doesn't actually have an head, so let's just check the last hidden state
if "seer" in name and "in1k" in name:
UpperCAmelCase_ = our_outputs.hidden_states[-1]
assert torch.allclose(snake_case_ , snake_case_ ), "The model logits don't match the original one."
if push_to_hub:
our_model.push_to_hub(
repo_path_or_name=save_directory / name , commit_message="Add model" , use_temp_dir=snake_case_ , )
UpperCAmelCase_ = 2_24 if "seer" not in name else 3_84
# we can use the convnext one
UpperCAmelCase_ = AutoImageProcessor.from_pretrained("facebook/convnext-base-224-22k-1k" , size=snake_case_ )
image_processor.push_to_hub(
repo_path_or_name=save_directory / name , commit_message="Add image processor" , use_temp_dir=snake_case_ , )
print(f"""Pushed {name}""" )
def lowerCAmelCase_ ( snake_case_ : Path , snake_case_ : str = None , snake_case_ : bool = True ) -> Tuple:
'''simple docstring'''
UpperCAmelCase_ = "imagenet-1k-id2label.json"
UpperCAmelCase_ = 10_00
UpperCAmelCase_ = (1, num_labels)
UpperCAmelCase_ = "huggingface/label-files"
UpperCAmelCase_ = num_labels
UpperCAmelCase_ = json.load(open(cached_download(hf_hub_url(snake_case_ , snake_case_ , repo_type="dataset" ) ) , "r" ) )
UpperCAmelCase_ = {int(snake_case_ ): v for k, v in idalabel.items()}
UpperCAmelCase_ = idalabel
UpperCAmelCase_ = {v: k for k, v in idalabel.items()}
UpperCAmelCase_ = partial(snake_case_ , num_labels=snake_case_ , idalabel=snake_case_ , labelaid=snake_case_ )
UpperCAmelCase_ = {
"regnet-x-002": ImageNetPreTrainedConfig(
depths=[1, 1, 4, 7] , hidden_sizes=[24, 56, 1_52, 3_68] , groups_width=8 , layer_type="x" ),
"regnet-x-004": ImageNetPreTrainedConfig(
depths=[1, 2, 7, 12] , hidden_sizes=[32, 64, 1_60, 3_84] , groups_width=16 , layer_type="x" ),
"regnet-x-006": ImageNetPreTrainedConfig(
depths=[1, 3, 5, 7] , hidden_sizes=[48, 96, 2_40, 5_28] , groups_width=24 , layer_type="x" ),
"regnet-x-008": ImageNetPreTrainedConfig(
depths=[1, 3, 7, 5] , hidden_sizes=[64, 1_28, 2_88, 6_72] , groups_width=16 , layer_type="x" ),
"regnet-x-016": ImageNetPreTrainedConfig(
depths=[2, 4, 10, 2] , hidden_sizes=[72, 1_68, 4_08, 9_12] , groups_width=24 , layer_type="x" ),
"regnet-x-032": ImageNetPreTrainedConfig(
depths=[2, 6, 15, 2] , hidden_sizes=[96, 1_92, 4_32, 10_08] , groups_width=48 , layer_type="x" ),
"regnet-x-040": ImageNetPreTrainedConfig(
depths=[2, 5, 14, 2] , hidden_sizes=[80, 2_40, 5_60, 13_60] , groups_width=40 , layer_type="x" ),
"regnet-x-064": ImageNetPreTrainedConfig(
depths=[2, 4, 10, 1] , hidden_sizes=[1_68, 3_92, 7_84, 16_24] , groups_width=56 , layer_type="x" ),
"regnet-x-080": ImageNetPreTrainedConfig(
depths=[2, 5, 15, 1] , hidden_sizes=[80, 2_40, 7_20, 19_20] , groups_width=1_20 , layer_type="x" ),
"regnet-x-120": ImageNetPreTrainedConfig(
depths=[2, 5, 11, 1] , hidden_sizes=[2_24, 4_48, 8_96, 22_40] , groups_width=1_12 , layer_type="x" ),
"regnet-x-160": ImageNetPreTrainedConfig(
depths=[2, 6, 13, 1] , hidden_sizes=[2_56, 5_12, 8_96, 20_48] , groups_width=1_28 , layer_type="x" ),
"regnet-x-320": ImageNetPreTrainedConfig(
depths=[2, 7, 13, 1] , hidden_sizes=[3_36, 6_72, 13_44, 25_20] , groups_width=1_68 , layer_type="x" ),
# y variant
"regnet-y-002": ImageNetPreTrainedConfig(depths=[1, 1, 4, 7] , hidden_sizes=[24, 56, 1_52, 3_68] , groups_width=8 ),
"regnet-y-004": ImageNetPreTrainedConfig(
depths=[1, 3, 6, 6] , hidden_sizes=[48, 1_04, 2_08, 4_40] , groups_width=8 ),
"regnet-y-006": ImageNetPreTrainedConfig(
depths=[1, 3, 7, 4] , hidden_sizes=[48, 1_12, 2_56, 6_08] , groups_width=16 ),
"regnet-y-008": ImageNetPreTrainedConfig(
depths=[1, 3, 8, 2] , hidden_sizes=[64, 1_28, 3_20, 7_68] , groups_width=16 ),
"regnet-y-016": ImageNetPreTrainedConfig(
depths=[2, 6, 17, 2] , hidden_sizes=[48, 1_20, 3_36, 8_88] , groups_width=24 ),
"regnet-y-032": ImageNetPreTrainedConfig(
depths=[2, 5, 13, 1] , hidden_sizes=[72, 2_16, 5_76, 15_12] , groups_width=24 ),
"regnet-y-040": ImageNetPreTrainedConfig(
depths=[2, 6, 12, 2] , hidden_sizes=[1_28, 1_92, 5_12, 10_88] , groups_width=64 ),
"regnet-y-064": ImageNetPreTrainedConfig(
depths=[2, 7, 14, 2] , hidden_sizes=[1_44, 2_88, 5_76, 12_96] , groups_width=72 ),
"regnet-y-080": ImageNetPreTrainedConfig(
depths=[2, 4, 10, 1] , hidden_sizes=[1_68, 4_48, 8_96, 20_16] , groups_width=56 ),
"regnet-y-120": ImageNetPreTrainedConfig(
depths=[2, 5, 11, 1] , hidden_sizes=[2_24, 4_48, 8_96, 22_40] , groups_width=1_12 ),
"regnet-y-160": ImageNetPreTrainedConfig(
depths=[2, 4, 11, 1] , hidden_sizes=[2_24, 4_48, 12_32, 30_24] , groups_width=1_12 ),
"regnet-y-320": ImageNetPreTrainedConfig(
depths=[2, 5, 12, 1] , hidden_sizes=[2_32, 6_96, 13_92, 37_12] , groups_width=2_32 ),
# models created by SEER -> https://arxiv.org/abs/2202.08360
"regnet-y-320-seer": RegNetConfig(depths=[2, 5, 12, 1] , hidden_sizes=[2_32, 6_96, 13_92, 37_12] , groups_width=2_32 ),
"regnet-y-640-seer": RegNetConfig(depths=[2, 5, 12, 1] , hidden_sizes=[3_28, 9_84, 19_68, 49_20] , groups_width=3_28 ),
"regnet-y-1280-seer": RegNetConfig(
depths=[2, 7, 17, 1] , hidden_sizes=[5_28, 10_56, 29_04, 73_92] , groups_width=2_64 ),
"regnet-y-2560-seer": RegNetConfig(
depths=[3, 7, 16, 1] , hidden_sizes=[6_40, 16_96, 25_44, 50_88] , groups_width=6_40 ),
"regnet-y-10b-seer": ImageNetPreTrainedConfig(
depths=[2, 7, 17, 1] , hidden_sizes=[20_20, 40_40, 1_11_10, 2_82_80] , groups_width=10_10 ),
# finetuned on imagenet
"regnet-y-320-seer-in1k": ImageNetPreTrainedConfig(
depths=[2, 5, 12, 1] , hidden_sizes=[2_32, 6_96, 13_92, 37_12] , groups_width=2_32 ),
"regnet-y-640-seer-in1k": ImageNetPreTrainedConfig(
depths=[2, 5, 12, 1] , hidden_sizes=[3_28, 9_84, 19_68, 49_20] , groups_width=3_28 ),
"regnet-y-1280-seer-in1k": ImageNetPreTrainedConfig(
depths=[2, 7, 17, 1] , hidden_sizes=[5_28, 10_56, 29_04, 73_92] , groups_width=2_64 ),
"regnet-y-2560-seer-in1k": ImageNetPreTrainedConfig(
depths=[3, 7, 16, 1] , hidden_sizes=[6_40, 16_96, 25_44, 50_88] , groups_width=6_40 ),
"regnet-y-10b-seer-in1k": ImageNetPreTrainedConfig(
depths=[2, 7, 17, 1] , hidden_sizes=[20_20, 40_40, 1_11_10, 2_82_80] , groups_width=10_10 ),
}
UpperCAmelCase_ = NameToOurModelFuncMap()
UpperCAmelCase_ = NameToFromModelFuncMap()
# add seer weights logic
def load_using_classy_vision(snake_case_ : str , snake_case_ : Callable[[], nn.Module] ) -> Tuple[nn.Module, Dict]:
UpperCAmelCase_ = torch.hub.load_state_dict_from_url(snake_case_ , model_dir=str(snake_case_ ) , map_location="cpu" )
UpperCAmelCase_ = model_func()
# check if we have a head, if yes add it
UpperCAmelCase_ = files["classy_state_dict"]["base_model"]["model"]
UpperCAmelCase_ = model_state_dict["trunk"]
model.load_state_dict(snake_case_ )
return model.eval(), model_state_dict["heads"]
# pretrained
UpperCAmelCase_ = partial(
snake_case_ , "https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_regnet32d/seer_regnet32gf_model_iteration244000.torch" , lambda: FakeRegNetVisslWrapper(RegNetYaagf() ) , )
UpperCAmelCase_ = partial(
snake_case_ , "https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_regnet64/seer_regnet64gf_model_final_checkpoint_phase0.torch" , lambda: FakeRegNetVisslWrapper(RegNetYaagf() ) , )
UpperCAmelCase_ = partial(
snake_case_ , "https://dl.fbaipublicfiles.com/vissl/model_zoo/swav_ig1b_regnet128Gf_cnstant_bs32_node16_sinkhorn10_proto16k_syncBN64_warmup8k/model_final_checkpoint_phase0.torch" , lambda: FakeRegNetVisslWrapper(RegNetYaaagf() ) , )
UpperCAmelCase_ = partial(
snake_case_ , "https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_regnet10B/model_iteration124500_conso.torch" , lambda: FakeRegNetVisslWrapper(
RegNet(RegNetParams(depth=27 , group_width=10_10 , w_a=17_44 , w_a=620.83 , w_m=2.52 ) ) ) , )
# IN1K finetuned
UpperCAmelCase_ = partial(
snake_case_ , "https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_finetuned/seer_regnet32_finetuned_in1k_model_final_checkpoint_phase78.torch" , lambda: FakeRegNetVisslWrapper(RegNetYaagf() ) , )
UpperCAmelCase_ = partial(
snake_case_ , "https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_finetuned/seer_regnet64_finetuned_in1k_model_final_checkpoint_phase78.torch" , lambda: FakeRegNetVisslWrapper(RegNetYaagf() ) , )
UpperCAmelCase_ = partial(
snake_case_ , "https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_finetuned/seer_regnet128_finetuned_in1k_model_final_checkpoint_phase78.torch" , lambda: FakeRegNetVisslWrapper(RegNetYaaagf() ) , )
UpperCAmelCase_ = partial(
snake_case_ , "https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_finetuned/seer_10b_finetuned_in1k_model_phase28_conso.torch" , lambda: FakeRegNetVisslWrapper(
RegNet(RegNetParams(depth=27 , group_width=10_10 , w_a=17_44 , w_a=620.83 , w_m=2.52 ) ) ) , )
if model_name:
convert_weight_and_push(
snake_case_ , names_to_from_model_map[model_name] , names_to_ours_model_map[model_name] , names_to_config[model_name] , snake_case_ , snake_case_ , )
else:
for model_name, config in names_to_config.items():
convert_weight_and_push(
snake_case_ , names_to_from_model_map[model_name] , names_to_ours_model_map[model_name] , snake_case_ , snake_case_ , snake_case_ , )
return config, expected_shape
if __name__ == "__main__":
SCREAMING_SNAKE_CASE_: Dict =argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--model_name',
default=None,
type=str,
help=(
'The name of the model you wish to convert, it must be one of the supported regnet* architecture,'
' currently: regnetx-*, regnety-*. If `None`, all of them will the converted.'
),
)
parser.add_argument(
'--pytorch_dump_folder_path',
default=None,
type=Path,
required=True,
help='Path to the output PyTorch model directory.',
)
parser.add_argument(
'--push_to_hub',
default=True,
type=bool,
required=False,
help='If True, push model and image processor to the hub.',
)
SCREAMING_SNAKE_CASE_: List[str] =parser.parse_args()
SCREAMING_SNAKE_CASE_: Path =args.pytorch_dump_folder_path
pytorch_dump_folder_path.mkdir(exist_ok=True, parents=True)
convert_weights_and_push(pytorch_dump_folder_path, args.model_name, args.push_to_hub)
| 1 | '''simple docstring'''
import logging
import os
from dataclasses import dataclass
from typing import List, Optional, Union
import tqdm
from filelock import FileLock
from transformers import (
BartTokenizer,
BartTokenizerFast,
DataProcessor,
PreTrainedTokenizer,
RobertaTokenizer,
RobertaTokenizerFast,
XLMRobertaTokenizer,
is_tf_available,
is_torch_available,
)
SCREAMING_SNAKE_CASE_: str =logging.getLogger(__name__)
@dataclass(frozen=UpperCamelCase__ )
class __A :
a__ : str
a__ : str
a__ : Optional[str] = None
a__ : Optional[str] = None
a__ : Optional[str] = None
@dataclass(frozen=UpperCamelCase__ )
class __A :
a__ : List[int]
a__ : Optional[List[int]] = None
a__ : Optional[List[int]] = None
a__ : Optional[Union[int, float]] = None
a__ : Optional[int] = None
if is_torch_available():
import torch
from torch.utils.data import Dataset
class __A ( UpperCamelCase__ ):
a__ : List[InputFeatures]
def __init__(self : Any , __a : str , __a : PreTrainedTokenizer , __a : str , __a : Optional[int] = None , __a : Dict=False , __a : bool = False , ):
UpperCAmelCase_ = hans_processors[task]()
UpperCAmelCase_ = os.path.join(
__a , "cached_{}_{}_{}_{}".format(
"dev" if evaluate else "train" , tokenizer.__class__.__name__ , str(__a ) , __a , ) , )
UpperCAmelCase_ = processor.get_labels()
if tokenizer.__class__ in (
RobertaTokenizer,
RobertaTokenizerFast,
XLMRobertaTokenizer,
BartTokenizer,
BartTokenizerFast,
):
# HACK(label indices are swapped in RoBERTa pretrained model)
UpperCAmelCase_ , UpperCAmelCase_ = label_list[2], label_list[1]
UpperCAmelCase_ = label_list
# Make sure only the first process in distributed training processes the dataset,
# and the others will use the cache.
UpperCAmelCase_ = cached_features_file + ".lock"
with FileLock(__a ):
if os.path.exists(__a ) and not overwrite_cache:
logger.info(f"""Loading features from cached file {cached_features_file}""" )
UpperCAmelCase_ = torch.load(__a )
else:
logger.info(f"""Creating features from dataset file at {data_dir}""" )
UpperCAmelCase_ = (
processor.get_dev_examples(__a ) if evaluate else processor.get_train_examples(__a )
)
logger.info("Training examples: %s" , len(__a ) )
UpperCAmelCase_ = hans_convert_examples_to_features(__a , __a , __a , __a )
logger.info("Saving features into cached file %s" , __a )
torch.save(self.features , __a )
def __len__(self : List[Any] ):
return len(self.features )
def __getitem__(self : Any , __a : Optional[Any] ):
return self.features[i]
def _lowercase (self : Union[str, Any] ):
return self.label_list
if is_tf_available():
import tensorflow as tf
class __A :
a__ : List[InputFeatures]
def __init__(self : Union[str, Any] , __a : str , __a : PreTrainedTokenizer , __a : str , __a : Optional[int] = 128 , __a : Any=False , __a : bool = False , ):
UpperCAmelCase_ = hans_processors[task]()
UpperCAmelCase_ = processor.get_labels()
if tokenizer.__class__ in (
RobertaTokenizer,
RobertaTokenizerFast,
XLMRobertaTokenizer,
BartTokenizer,
BartTokenizerFast,
):
# HACK(label indices are swapped in RoBERTa pretrained model)
UpperCAmelCase_ , UpperCAmelCase_ = label_list[2], label_list[1]
UpperCAmelCase_ = label_list
UpperCAmelCase_ = processor.get_dev_examples(__a ) if evaluate else processor.get_train_examples(__a )
UpperCAmelCase_ = hans_convert_examples_to_features(__a , __a , __a , __a )
def gen():
for ex_index, ex in tqdm.tqdm(enumerate(self.features ) , desc="convert examples to features" ):
if ex_index % 10000 == 0:
logger.info("Writing example %d of %d" % (ex_index, len(__a )) )
yield (
{
"example_id": 0,
"input_ids": ex.input_ids,
"attention_mask": ex.attention_mask,
"token_type_ids": ex.token_type_ids,
},
ex.label,
)
UpperCAmelCase_ = tf.data.Dataset.from_generator(
__a , (
{
"example_id": tf.intaa,
"input_ids": tf.intaa,
"attention_mask": tf.intaa,
"token_type_ids": tf.intaa,
},
tf.intaa,
) , (
{
"example_id": tf.TensorShape([] ),
"input_ids": tf.TensorShape([None, None] ),
"attention_mask": tf.TensorShape([None, None] ),
"token_type_ids": tf.TensorShape([None, None] ),
},
tf.TensorShape([] ),
) , )
def _lowercase (self : int ):
return self.dataset
def __len__(self : Any ):
return len(self.features )
def __getitem__(self : int , __a : Union[str, Any] ):
return self.features[i]
def _lowercase (self : int ):
return self.label_list
class __A ( UpperCamelCase__ ):
def _lowercase (self : List[Any] , __a : Dict ):
return self._create_examples(self._read_tsv(os.path.join(__a , "heuristics_train_set.txt" ) ) , "train" )
def _lowercase (self : Any , __a : List[Any] ):
return self._create_examples(self._read_tsv(os.path.join(__a , "heuristics_evaluation_set.txt" ) ) , "dev" )
def _lowercase (self : Any ):
return ["contradiction", "entailment", "neutral"]
def _lowercase (self : Union[str, Any] , __a : Optional[int] , __a : Union[str, Any] ):
UpperCAmelCase_ = []
for i, line in enumerate(__a ):
if i == 0:
continue
UpperCAmelCase_ = "%s-%s" % (set_type, line[0])
UpperCAmelCase_ = line[5]
UpperCAmelCase_ = line[6]
UpperCAmelCase_ = line[7][2:] if line[7].startswith("ex" ) else line[7]
UpperCAmelCase_ = line[0]
examples.append(InputExample(guid=__a , text_a=__a , text_b=__a , label=__a , pairID=__a ) )
return examples
def lowerCAmelCase_ ( snake_case_ : List[InputExample] , snake_case_ : List[str] , snake_case_ : int , snake_case_ : PreTrainedTokenizer , ) -> Optional[Any]:
'''simple docstring'''
UpperCAmelCase_ = {label: i for i, label in enumerate(snake_case_ )}
UpperCAmelCase_ = []
for ex_index, example in tqdm.tqdm(enumerate(snake_case_ ) , desc="convert examples to features" ):
if ex_index % 1_00_00 == 0:
logger.info("Writing example %d" % (ex_index) )
UpperCAmelCase_ = tokenizer(
example.text_a , example.text_b , add_special_tokens=snake_case_ , max_length=snake_case_ , padding="max_length" , truncation=snake_case_ , return_overflowing_tokens=snake_case_ , )
UpperCAmelCase_ = label_map[example.label] if example.label in label_map else 0
UpperCAmelCase_ = int(example.pairID )
features.append(InputFeatures(**snake_case_ , label=snake_case_ , pairID=snake_case_ ) )
for i, example in enumerate(examples[:5] ):
logger.info("*** Example ***" )
logger.info(f"""guid: {example}""" )
logger.info(f"""features: {features[i]}""" )
return features
SCREAMING_SNAKE_CASE_: int ={
'hans': 3,
}
SCREAMING_SNAKE_CASE_: Any ={
'hans': HansProcessor,
}
| 1 | 1 |
'''simple docstring'''
def lowerCAmelCase_ ( snake_case_ : str ) -> bool:
'''simple docstring'''
return credit_card_number.startswith(("34", "35", "37", "4", "5", "6") )
def lowerCAmelCase_ ( snake_case_ : str ) -> bool:
'''simple docstring'''
UpperCAmelCase_ = credit_card_number
UpperCAmelCase_ = 0
UpperCAmelCase_ = len(snake_case_ ) - 2
for i in range(snake_case_ , -1 , -2 ):
# double the value of every second digit
UpperCAmelCase_ = int(cc_number[i] )
digit *= 2
# If doubling of a number results in a two digit number
# i.e greater than 9(e.g., 6 × 2 = 12),
# then add the digits of the product (e.g., 12: 1 + 2 = 3, 15: 1 + 5 = 6),
# to get a single digit number.
if digit > 9:
digit %= 10
digit += 1
UpperCAmelCase_ = cc_number[:i] + str(snake_case_ ) + cc_number[i + 1 :]
total += digit
# Sum up the remaining digits
for i in range(len(snake_case_ ) - 1 , -1 , -2 ):
total += int(cc_number[i] )
return total % 10 == 0
def lowerCAmelCase_ ( snake_case_ : str ) -> bool:
'''simple docstring'''
UpperCAmelCase_ = f"""{credit_card_number} is an invalid credit card number because"""
if not credit_card_number.isdigit():
print(f"""{error_message} it has nonnumerical characters.""" )
return False
if not 13 <= len(snake_case_ ) <= 16:
print(f"""{error_message} of its length.""" )
return False
if not validate_initial_digits(snake_case_ ):
print(f"""{error_message} of its first two digits.""" )
return False
if not luhn_validation(snake_case_ ):
print(f"""{error_message} it fails the Luhn check.""" )
return False
print(f"""{credit_card_number} is a valid credit card number.""" )
return True
if __name__ == "__main__":
import doctest
doctest.testmod()
validate_credit_card_number('4111111111111111')
validate_credit_card_number('32323')
| 1 | '''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
SCREAMING_SNAKE_CASE_: Dict =logging.get_logger(__name__)
SCREAMING_SNAKE_CASE_: Tuple ={}
class __A ( UpperCamelCase__ ):
a__ : int = """llama"""
a__ : Any = ["""past_key_values"""]
def __init__(self : List[str] , __a : List[str]=32000 , __a : Tuple=4096 , __a : List[Any]=11008 , __a : Dict=32 , __a : Tuple=32 , __a : Any=None , __a : Any="silu" , __a : List[Any]=2048 , __a : List[Any]=0.02 , __a : str=1E-6 , __a : Optional[Any]=True , __a : Union[str, Any]=0 , __a : Any=1 , __a : Dict=2 , __a : Dict=1 , __a : str=False , __a : str=None , **__a : Optional[Any] , ):
UpperCAmelCase_ = vocab_size
UpperCAmelCase_ = max_position_embeddings
UpperCAmelCase_ = hidden_size
UpperCAmelCase_ = intermediate_size
UpperCAmelCase_ = num_hidden_layers
UpperCAmelCase_ = num_attention_heads
# for backward compatibility
if num_key_value_heads is None:
UpperCAmelCase_ = num_attention_heads
UpperCAmelCase_ = num_key_value_heads
UpperCAmelCase_ = hidden_act
UpperCAmelCase_ = initializer_range
UpperCAmelCase_ = rms_norm_eps
UpperCAmelCase_ = pretraining_tp
UpperCAmelCase_ = use_cache
UpperCAmelCase_ = rope_scaling
self._rope_scaling_validation()
super().__init__(
pad_token_id=__a , bos_token_id=__a , eos_token_id=__a , tie_word_embeddings=__a , **__a , )
def _lowercase (self : List[str] ):
if self.rope_scaling is None:
return
if not isinstance(self.rope_scaling , __a ) or len(self.rope_scaling ) != 2:
raise ValueError(
"`rope_scaling` must be a dictionary with with two fields, `name` and `factor`, "
f"""got {self.rope_scaling}""" )
UpperCAmelCase_ = self.rope_scaling.get("type" , __a )
UpperCAmelCase_ = self.rope_scaling.get("factor" , __a )
if rope_scaling_type is None or rope_scaling_type not in ["linear", "dynamic"]:
raise ValueError(
f"""`rope_scaling`'s name field must be one of ['linear', 'dynamic'], got {rope_scaling_type}""" )
if rope_scaling_factor is None or not isinstance(__a , __a ) or rope_scaling_factor <= 1.0:
raise ValueError(f"""`rope_scaling`'s factor field must be an float > 1, got {rope_scaling_factor}""" )
| 1 | 1 |
'''simple docstring'''
from json import JSONDecodeError # Workaround for requests.exceptions.JSONDecodeError
import requests
def lowerCAmelCase_ ( snake_case_ : str = "isbn/0140328726" ) -> dict:
'''simple docstring'''
UpperCAmelCase_ = olid.strip().strip("/" ) # Remove leading/trailing whitespace & slashes
if new_olid.count("/" ) != 1:
UpperCAmelCase_ = f"""{olid} is not a valid Open Library olid"""
raise ValueError(snake_case_ )
return requests.get(f"""https://openlibrary.org/{new_olid}.json""" ).json()
def lowerCAmelCase_ ( snake_case_ : dict ) -> dict:
'''simple docstring'''
UpperCAmelCase_ = {
"title": "Title",
"publish_date": "Publish date",
"authors": "Authors",
"number_of_pages": "Number of pages:",
"first_sentence": "First sentence",
"isbn_10": "ISBN (10)",
"isbn_13": "ISBN (13)",
}
UpperCAmelCase_ = {better_key: ol_book_data[key] for key, better_key in desired_keys.items()}
UpperCAmelCase_ = [
get_openlibrary_data(author["key"] )["name"] for author in data["Authors"]
]
UpperCAmelCase_ = data["First sentence"]["value"]
for key, value in data.items():
if isinstance(snake_case_ , snake_case_ ):
UpperCAmelCase_ = ", ".join(snake_case_ )
return data
if __name__ == "__main__":
import doctest
doctest.testmod()
while True:
SCREAMING_SNAKE_CASE_: Tuple =input('\nEnter the ISBN code to search (or \'quit\' to stop): ').strip()
if isbn.lower() in ("", "q", "quit", "exit", "stop"):
break
if len(isbn) not in (10, 13) or not isbn.isdigit():
print(f"Sorry, {isbn} is not a valid ISBN. Please, input a valid ISBN.")
continue
print(f"\nSearching Open Library for ISBN: {isbn}...\n")
try:
SCREAMING_SNAKE_CASE_: Union[str, Any] =summarize_book(get_openlibrary_data(f"isbn/{isbn}"))
print('\n'.join(f"{key}: {value}" for key, value in book_summary.items()))
except JSONDecodeError: # Workaround for requests.exceptions.RequestException:
print(f"Sorry, there are no results for ISBN: {isbn}.")
| 1 | '''simple docstring'''
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, DDIMScheduler, DDPMScheduler, StableDiffusionUpscalePipeline, UNetaDConditionModel
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
enable_full_determinism()
class __A ( unittest.TestCase ):
def _lowercase (self : Tuple ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def _lowercase (self : str ):
UpperCAmelCase_ = 1
UpperCAmelCase_ = 3
UpperCAmelCase_ = (32, 32)
UpperCAmelCase_ = floats_tensor((batch_size, num_channels) + sizes , rng=random.Random(0 ) ).to(__a )
return image
@property
def _lowercase (self : int ):
torch.manual_seed(0 )
UpperCAmelCase_ = UNetaDConditionModel(
block_out_channels=(32, 32, 64) , layers_per_block=2 , sample_size=32 , in_channels=7 , out_channels=4 , down_block_types=("DownBlock2D", "CrossAttnDownBlock2D", "CrossAttnDownBlock2D") , up_block_types=("CrossAttnUpBlock2D", "CrossAttnUpBlock2D", "UpBlock2D") , cross_attention_dim=32 , attention_head_dim=8 , use_linear_projection=__a , only_cross_attention=(True, True, False) , num_class_embeds=100 , )
return model
@property
def _lowercase (self : Any ):
torch.manual_seed(0 )
UpperCAmelCase_ = AutoencoderKL(
block_out_channels=[32, 32, 64] , in_channels=3 , out_channels=3 , down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D", "DownEncoderBlock2D"] , up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D"] , latent_channels=4 , )
return model
@property
def _lowercase (self : Optional[Any] ):
torch.manual_seed(0 )
UpperCAmelCase_ = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , hidden_act="gelu" , projection_dim=512 , )
return CLIPTextModel(__a )
def _lowercase (self : Any ):
UpperCAmelCase_ = "cpu" # ensure determinism for the device-dependent torch.Generator
UpperCAmelCase_ = self.dummy_cond_unet_upscale
UpperCAmelCase_ = DDPMScheduler()
UpperCAmelCase_ = DDIMScheduler(prediction_type="v_prediction" )
UpperCAmelCase_ = self.dummy_vae
UpperCAmelCase_ = self.dummy_text_encoder
UpperCAmelCase_ = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
UpperCAmelCase_ = self.dummy_image.cpu().permute(0 , 2 , 3 , 1 )[0]
UpperCAmelCase_ = Image.fromarray(np.uinta(__a ) ).convert("RGB" ).resize((64, 64) )
# make sure here that pndm scheduler skips prk
UpperCAmelCase_ = StableDiffusionUpscalePipeline(
unet=__a , low_res_scheduler=__a , scheduler=__a , vae=__a , text_encoder=__a , tokenizer=__a , max_noise_level=350 , )
UpperCAmelCase_ = sd_pipe.to(__a )
sd_pipe.set_progress_bar_config(disable=__a )
UpperCAmelCase_ = "A painting of a squirrel eating a burger"
UpperCAmelCase_ = torch.Generator(device=__a ).manual_seed(0 )
UpperCAmelCase_ = sd_pipe(
[prompt] , image=__a , generator=__a , guidance_scale=6.0 , noise_level=20 , num_inference_steps=2 , output_type="np" , )
UpperCAmelCase_ = output.images
UpperCAmelCase_ = torch.Generator(device=__a ).manual_seed(0 )
UpperCAmelCase_ = sd_pipe(
[prompt] , image=__a , generator=__a , guidance_scale=6.0 , noise_level=20 , num_inference_steps=2 , output_type="np" , return_dict=__a , )[0]
UpperCAmelCase_ = image[0, -3:, -3:, -1]
UpperCAmelCase_ = image_from_tuple[0, -3:, -3:, -1]
UpperCAmelCase_ = low_res_image.size[0] * 4
assert image.shape == (1, expected_height_width, expected_height_width, 3)
UpperCAmelCase_ = np.array([0.31_13, 0.39_10, 0.42_72, 0.48_59, 0.50_61, 0.46_52, 0.53_62, 0.57_15, 0.56_61] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
def _lowercase (self : Optional[int] ):
UpperCAmelCase_ = "cpu" # ensure determinism for the device-dependent torch.Generator
UpperCAmelCase_ = self.dummy_cond_unet_upscale
UpperCAmelCase_ = DDPMScheduler()
UpperCAmelCase_ = DDIMScheduler(prediction_type="v_prediction" )
UpperCAmelCase_ = self.dummy_vae
UpperCAmelCase_ = self.dummy_text_encoder
UpperCAmelCase_ = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
UpperCAmelCase_ = self.dummy_image.cpu().permute(0 , 2 , 3 , 1 )[0]
UpperCAmelCase_ = Image.fromarray(np.uinta(__a ) ).convert("RGB" ).resize((64, 64) )
# make sure here that pndm scheduler skips prk
UpperCAmelCase_ = StableDiffusionUpscalePipeline(
unet=__a , low_res_scheduler=__a , scheduler=__a , vae=__a , text_encoder=__a , tokenizer=__a , max_noise_level=350 , )
UpperCAmelCase_ = sd_pipe.to(__a )
sd_pipe.set_progress_bar_config(disable=__a )
UpperCAmelCase_ = "A painting of a squirrel eating a burger"
UpperCAmelCase_ = sd_pipe(
2 * [prompt] , image=2 * [low_res_image] , guidance_scale=6.0 , noise_level=20 , num_inference_steps=2 , output_type="np" , )
UpperCAmelCase_ = output.images
assert image.shape[0] == 2
UpperCAmelCase_ = torch.Generator(device=__a ).manual_seed(0 )
UpperCAmelCase_ = sd_pipe(
[prompt] , image=__a , generator=__a , num_images_per_prompt=2 , guidance_scale=6.0 , noise_level=20 , num_inference_steps=2 , output_type="np" , )
UpperCAmelCase_ = output.images
assert image.shape[0] == 2
@unittest.skipIf(torch_device != "cuda" , "This test requires a GPU" )
def _lowercase (self : str ):
UpperCAmelCase_ = self.dummy_cond_unet_upscale
UpperCAmelCase_ = DDPMScheduler()
UpperCAmelCase_ = DDIMScheduler(prediction_type="v_prediction" )
UpperCAmelCase_ = self.dummy_vae
UpperCAmelCase_ = self.dummy_text_encoder
UpperCAmelCase_ = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
UpperCAmelCase_ = self.dummy_image.cpu().permute(0 , 2 , 3 , 1 )[0]
UpperCAmelCase_ = Image.fromarray(np.uinta(__a ) ).convert("RGB" ).resize((64, 64) )
# put models in fp16, except vae as it overflows in fp16
UpperCAmelCase_ = unet.half()
UpperCAmelCase_ = text_encoder.half()
# make sure here that pndm scheduler skips prk
UpperCAmelCase_ = StableDiffusionUpscalePipeline(
unet=__a , low_res_scheduler=__a , scheduler=__a , vae=__a , text_encoder=__a , tokenizer=__a , max_noise_level=350 , )
UpperCAmelCase_ = sd_pipe.to(__a )
sd_pipe.set_progress_bar_config(disable=__a )
UpperCAmelCase_ = "A painting of a squirrel eating a burger"
UpperCAmelCase_ = torch.manual_seed(0 )
UpperCAmelCase_ = sd_pipe(
[prompt] , image=__a , generator=__a , num_inference_steps=2 , output_type="np" , ).images
UpperCAmelCase_ = low_res_image.size[0] * 4
assert image.shape == (1, expected_height_width, expected_height_width, 3)
@slow
@require_torch_gpu
class __A ( unittest.TestCase ):
def _lowercase (self : List[str] ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _lowercase (self : List[Any] ):
UpperCAmelCase_ = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/sd2-upscale/low_res_cat.png" )
UpperCAmelCase_ = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-upscale"
"/upsampled_cat.npy" )
UpperCAmelCase_ = "stabilityai/stable-diffusion-x4-upscaler"
UpperCAmelCase_ = StableDiffusionUpscalePipeline.from_pretrained(__a )
pipe.to(__a )
pipe.set_progress_bar_config(disable=__a )
pipe.enable_attention_slicing()
UpperCAmelCase_ = "a cat sitting on a park bench"
UpperCAmelCase_ = torch.manual_seed(0 )
UpperCAmelCase_ = pipe(
prompt=__a , image=__a , generator=__a , output_type="np" , )
UpperCAmelCase_ = output.images[0]
assert image.shape == (512, 512, 3)
assert np.abs(expected_image - image ).max() < 1E-3
def _lowercase (self : Tuple ):
UpperCAmelCase_ = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/sd2-upscale/low_res_cat.png" )
UpperCAmelCase_ = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-upscale"
"/upsampled_cat_fp16.npy" )
UpperCAmelCase_ = "stabilityai/stable-diffusion-x4-upscaler"
UpperCAmelCase_ = StableDiffusionUpscalePipeline.from_pretrained(
__a , torch_dtype=torch.floataa , )
pipe.to(__a )
pipe.set_progress_bar_config(disable=__a )
pipe.enable_attention_slicing()
UpperCAmelCase_ = "a cat sitting on a park bench"
UpperCAmelCase_ = torch.manual_seed(0 )
UpperCAmelCase_ = pipe(
prompt=__a , image=__a , generator=__a , output_type="np" , )
UpperCAmelCase_ = output.images[0]
assert image.shape == (512, 512, 3)
assert np.abs(expected_image - image ).max() < 5E-1
def _lowercase (self : List[Any] ):
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
UpperCAmelCase_ = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/sd2-upscale/low_res_cat.png" )
UpperCAmelCase_ = "stabilityai/stable-diffusion-x4-upscaler"
UpperCAmelCase_ = StableDiffusionUpscalePipeline.from_pretrained(
__a , torch_dtype=torch.floataa , )
pipe.to(__a )
pipe.set_progress_bar_config(disable=__a )
pipe.enable_attention_slicing(1 )
pipe.enable_sequential_cpu_offload()
UpperCAmelCase_ = "a cat sitting on a park bench"
UpperCAmelCase_ = torch.manual_seed(0 )
UpperCAmelCase_ = pipe(
prompt=__a , image=__a , generator=__a , num_inference_steps=5 , output_type="np" , )
UpperCAmelCase_ = torch.cuda.max_memory_allocated()
# make sure that less than 2.9 GB is allocated
assert mem_bytes < 2.9 * 10**9
| 1 | 1 |
'''simple docstring'''
def lowerCAmelCase_ ( snake_case_ : str , snake_case_ : bool = False ) -> str:
'''simple docstring'''
if not isinstance(snake_case_ , snake_case_ ):
UpperCAmelCase_ = f"""Expected string as input, found {type(snake_case_ )}"""
raise ValueError(snake_case_ )
if not isinstance(snake_case_ , snake_case_ ):
UpperCAmelCase_ = f"""Expected boolean as use_pascal parameter, found {type(snake_case_ )}"""
raise ValueError(snake_case_ )
UpperCAmelCase_ = input_str.split("_" )
UpperCAmelCase_ = 0 if use_pascal else 1
UpperCAmelCase_ = words[start_index:]
UpperCAmelCase_ = [word[0].upper() + word[1:] for word in words_to_capitalize]
UpperCAmelCase_ = "" if use_pascal else words[0]
return "".join([initial_word, *capitalized_words] )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 1 | '''simple docstring'''
from typing import Callable, Dict, Optional, Tuple
import torch
from torch import nn
from torch.distributions import (
AffineTransform,
Distribution,
Independent,
NegativeBinomial,
Normal,
StudentT,
TransformedDistribution,
)
class __A ( UpperCamelCase__ ):
def __init__(self : int , __a : Distribution , __a : Dict=None , __a : int=None , __a : Any=0 ):
UpperCAmelCase_ = 1.0 if scale is None else scale
UpperCAmelCase_ = 0.0 if loc is None else loc
super().__init__(__a , [AffineTransform(loc=self.loc , scale=self.scale , event_dim=__a )] )
@property
def _lowercase (self : Union[str, Any] ):
return self.base_dist.mean * self.scale + self.loc
@property
def _lowercase (self : List[Any] ):
return self.base_dist.variance * self.scale**2
@property
def _lowercase (self : List[Any] ):
return self.variance.sqrt()
class __A ( nn.Module ):
def __init__(self : Optional[int] , __a : int , __a : Dict[str, int] , __a : Callable[..., Tuple[torch.Tensor]] , **__a : List[str] ):
super().__init__(**__a )
UpperCAmelCase_ = args_dim
UpperCAmelCase_ = nn.ModuleList([nn.Linear(__a , __a ) for dim in args_dim.values()] )
UpperCAmelCase_ = domain_map
def _lowercase (self : List[str] , __a : torch.Tensor ):
UpperCAmelCase_ = [proj(__a ) for proj in self.proj]
return self.domain_map(*__a )
class __A ( nn.Module ):
def __init__(self : Union[str, Any] , __a : List[str] ):
super().__init__()
UpperCAmelCase_ = function
def _lowercase (self : Optional[int] , __a : List[str] , *__a : Optional[int] ):
return self.function(__a , *__a )
class __A :
a__ : type
a__ : int
a__ : Dict[str, int]
def __init__(self : List[Any] , __a : int = 1 ):
UpperCAmelCase_ = dim
UpperCAmelCase_ = {k: dim * self.args_dim[k] for k in self.args_dim}
def _lowercase (self : Any , __a : Any ):
if self.dim == 1:
return self.distribution_class(*__a )
else:
return Independent(self.distribution_class(*__a ) , 1 )
def _lowercase (self : List[str] , __a : Union[str, Any] , __a : Optional[torch.Tensor] = None , __a : Optional[torch.Tensor] = None , ):
UpperCAmelCase_ = self._base_distribution(__a )
if loc is None and scale is None:
return distr
else:
return AffineTransformed(__a , loc=__a , scale=__a , event_dim=self.event_dim )
@property
def _lowercase (self : Any ):
return () if self.dim == 1 else (self.dim,)
@property
def _lowercase (self : Dict ):
return len(self.event_shape )
@property
def _lowercase (self : Tuple ):
return 0.0
def _lowercase (self : List[str] , __a : int ):
return ParameterProjection(
in_features=__a , args_dim=self.args_dim , domain_map=LambdaLayer(self.domain_map ) , )
def _lowercase (self : Optional[int] , *__a : torch.Tensor ):
raise NotImplementedError()
@staticmethod
def _lowercase (__a : torch.Tensor ):
return (x + torch.sqrt(torch.square(__a ) + 4.0 )) / 2.0
class __A ( UpperCamelCase__ ):
a__ : Dict[str, int] = {"df": 1, "loc": 1, "scale": 1}
a__ : type = StudentT
@classmethod
def _lowercase (cls : Union[str, Any] , __a : torch.Tensor , __a : torch.Tensor , __a : torch.Tensor ):
UpperCAmelCase_ = cls.squareplus(__a ).clamp_min(torch.finfo(scale.dtype ).eps )
UpperCAmelCase_ = 2.0 + cls.squareplus(__a )
return df.squeeze(-1 ), loc.squeeze(-1 ), scale.squeeze(-1 )
class __A ( UpperCamelCase__ ):
a__ : Dict[str, int] = {"loc": 1, "scale": 1}
a__ : type = Normal
@classmethod
def _lowercase (cls : Tuple , __a : torch.Tensor , __a : torch.Tensor ):
UpperCAmelCase_ = cls.squareplus(__a ).clamp_min(torch.finfo(scale.dtype ).eps )
return loc.squeeze(-1 ), scale.squeeze(-1 )
class __A ( UpperCamelCase__ ):
a__ : Dict[str, int] = {"total_count": 1, "logits": 1}
a__ : type = NegativeBinomial
@classmethod
def _lowercase (cls : Optional[Any] , __a : torch.Tensor , __a : torch.Tensor ):
UpperCAmelCase_ = cls.squareplus(__a )
return total_count.squeeze(-1 ), logits.squeeze(-1 )
def _lowercase (self : List[str] , __a : str ):
UpperCAmelCase_ , UpperCAmelCase_ = distr_args
if self.dim == 1:
return self.distribution_class(total_count=__a , logits=__a )
else:
return Independent(self.distribution_class(total_count=__a , logits=__a ) , 1 )
def _lowercase (self : Optional[Any] , __a : int , __a : Optional[torch.Tensor] = None , __a : Optional[torch.Tensor] = None ):
UpperCAmelCase_ , UpperCAmelCase_ = distr_args
if scale is not None:
# See scaling property of Gamma.
logits += scale.log()
return self._base_distribution((total_count, logits) )
| 1 | 1 |
'''simple docstring'''
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
if is_torch_available():
from transformers import AutoModelForSeqaSeqLM, AutoTokenizer
@require_torch
@require_sentencepiece
@require_tokenizers
class __A ( unittest.TestCase ):
@slow
def _lowercase (self : Union[str, Any] ):
UpperCAmelCase_ = AutoModelForSeqaSeqLM.from_pretrained("google/mt5-small" , return_dict=__a ).to(__a )
UpperCAmelCase_ = AutoTokenizer.from_pretrained("google/mt5-small" )
UpperCAmelCase_ = tokenizer("Hello there" , return_tensors="pt" ).input_ids
UpperCAmelCase_ = tokenizer("Hi I am" , return_tensors="pt" ).input_ids
UpperCAmelCase_ = model(input_ids.to(__a ) , labels=labels.to(__a ) ).loss
UpperCAmelCase_ = -(labels.shape[-1] * loss.item())
UpperCAmelCase_ = -84.91_27
self.assertTrue(abs(mtf_score - EXPECTED_SCORE ) < 1E-4 )
| 1 | '''simple docstring'''
import math_equivalence # From: git+https://github.com/hendrycks/math.git
import datasets
SCREAMING_SNAKE_CASE_: Optional[Any] ='\\n@article{hendrycksmath2021,\n title={Measuring Mathematical Problem Solving With the MATH Dataset},\n author={Dan Hendrycks\n and Collin Burns\n and Saurav Kadavath\n and Akul Arora\n and Steven Basart\n and Eric Tang\n and Dawn Song\n and Jacob Steinhardt},\n journal={arXiv preprint arXiv:2103.03874},\n year={2021}\n}\n'
SCREAMING_SNAKE_CASE_: Union[str, Any] ='\\nThis metric is used to assess performance on the Mathematics Aptitude Test of Heuristics (MATH) dataset.\nIt first canonicalizes the inputs (e.g., converting "1/2" to "\\frac{1}{2}") and then computes accuracy.\n'
SCREAMING_SNAKE_CASE_: List[Any] =r'\nCalculates accuracy after canonicalizing inputs.\n\nArgs:\n predictions: list of predictions to score. Each prediction\n is a string that contains natural language and LaTex.\n references: list of reference for each prediction. Each\n reference is a string that contains natural language\n and LaTex.\nReturns:\n accuracy: accuracy after canonicalizing inputs\n (e.g., converting "1/2" to "\\frac{1}{2}")\n\nExamples:\n >>> metric = datasets.load_metric("competition_math")\n >>> results = metric.compute(references=["\\frac{1}{2}"], predictions=["1/2"])\n >>> print(results)\n {\'accuracy\': 1.0}\n'
@datasets.utils.file_utils.add_end_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __A ( datasets.Metric ):
def _lowercase (self : Optional[Any] ):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Value("string" ),
"references": datasets.Value("string" ),
} ) , homepage="https://github.com/hendrycks/math" , codebase_urls=["https://github.com/hendrycks/math"] , )
def _lowercase (self : Tuple , __a : Optional[int] , __a : List[Any] ):
UpperCAmelCase_ = 0.0
for i, j in zip(__a , __a ):
n_correct += 1.0 if math_equivalence.is_equiv(__a , __a ) else 0.0
UpperCAmelCase_ = n_correct / len(__a )
return {
"accuracy": accuracy,
}
| 1 | 1 |
'''simple docstring'''
import collections.abc
from typing import Optional, Tuple, Union
import torch
import torch.utils.checkpoint
from torch import nn
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
from ...activations import ACTaFN
from ...modeling_outputs import BaseModelOutputWithNoAttention, ImageClassifierOutputWithNoAttention
from ...modeling_utils import PreTrainedModel
from ...utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, logging
from .configuration_poolformer import PoolFormerConfig
SCREAMING_SNAKE_CASE_: str =logging.get_logger(__name__)
# General docstring
SCREAMING_SNAKE_CASE_: Optional[Any] ='PoolFormerConfig'
# Base docstring
SCREAMING_SNAKE_CASE_: Optional[int] ='sail/poolformer_s12'
SCREAMING_SNAKE_CASE_: Union[str, Any] =[1, 5_12, 7, 7]
# Image classification docstring
SCREAMING_SNAKE_CASE_: Dict ='sail/poolformer_s12'
SCREAMING_SNAKE_CASE_: Optional[int] ='tabby, tabby cat'
SCREAMING_SNAKE_CASE_: Union[str, Any] =[
'sail/poolformer_s12',
# See all PoolFormer models at https://huggingface.co/models?filter=poolformer
]
def lowerCAmelCase_ ( snake_case_ : List[str] , snake_case_ : float = 0.0 , snake_case_ : bool = False ) -> Optional[int]:
'''simple docstring'''
if drop_prob == 0.0 or not training:
return input
UpperCAmelCase_ = 1 - drop_prob
UpperCAmelCase_ = (input.shape[0],) + (1,) * (input.ndim - 1) # work with diff dim tensors, not just 2D ConvNets
UpperCAmelCase_ = keep_prob + torch.rand(snake_case_ , dtype=input.dtype , device=input.device )
random_tensor.floor_() # binarize
UpperCAmelCase_ = input.div(snake_case_ ) * random_tensor
return output
class __A ( nn.Module ):
def __init__(self : List[str] , __a : Optional[float] = None ):
super().__init__()
UpperCAmelCase_ = drop_prob
def _lowercase (self : List[str] , __a : torch.Tensor ):
return drop_path(__a , self.drop_prob , self.training )
def _lowercase (self : Tuple ):
return "p={}".format(self.drop_prob )
class __A ( nn.Module ):
def __init__(self : Union[str, Any] , __a : Union[str, Any] , __a : str , __a : List[Any] , __a : int , __a : List[str] , __a : Optional[Any]=None ):
super().__init__()
UpperCAmelCase_ = patch_size if isinstance(__a , collections.abc.Iterable ) else (patch_size, patch_size)
UpperCAmelCase_ = stride if isinstance(__a , collections.abc.Iterable ) else (stride, stride)
UpperCAmelCase_ = padding if isinstance(__a , collections.abc.Iterable ) else (padding, padding)
UpperCAmelCase_ = nn.Convad(__a , __a , kernel_size=__a , stride=__a , padding=__a )
UpperCAmelCase_ = norm_layer(__a ) if norm_layer else nn.Identity()
def _lowercase (self : List[str] , __a : Any ):
UpperCAmelCase_ = self.projection(__a )
UpperCAmelCase_ = self.norm(__a )
return embeddings
class __A ( nn.GroupNorm ):
def __init__(self : Optional[Any] , __a : Any , **__a : Tuple ):
super().__init__(1 , __a , **__a )
class __A ( nn.Module ):
def __init__(self : List[str] , __a : Any ):
super().__init__()
UpperCAmelCase_ = nn.AvgPoolad(__a , stride=1 , padding=pool_size // 2 , count_include_pad=__a )
def _lowercase (self : str , __a : int ):
return self.pool(__a ) - hidden_states
class __A ( nn.Module ):
def __init__(self : str , __a : Union[str, Any] , __a : Dict , __a : int , __a : Optional[Any] ):
super().__init__()
UpperCAmelCase_ = nn.Convad(__a , __a , 1 )
UpperCAmelCase_ = nn.Convad(__a , __a , 1 )
UpperCAmelCase_ = PoolFormerDropPath(__a )
if isinstance(config.hidden_act , __a ):
UpperCAmelCase_ = ACTaFN[config.hidden_act]
else:
UpperCAmelCase_ = config.hidden_act
def _lowercase (self : Dict , __a : Any ):
UpperCAmelCase_ = self.conva(__a )
UpperCAmelCase_ = self.act_fn(__a )
UpperCAmelCase_ = self.drop(__a )
UpperCAmelCase_ = self.conva(__a )
UpperCAmelCase_ = self.drop(__a )
return hidden_states
class __A ( nn.Module ):
def __init__(self : Dict , __a : Tuple , __a : Optional[Any] , __a : int , __a : str , __a : Union[str, Any] , __a : int ):
super().__init__()
UpperCAmelCase_ = PoolFormerPooling(__a )
UpperCAmelCase_ = PoolFormerOutput(__a , __a , __a , __a )
UpperCAmelCase_ = PoolFormerGroupNorm(__a )
UpperCAmelCase_ = PoolFormerGroupNorm(__a )
# Useful for training neural nets
UpperCAmelCase_ = PoolFormerDropPath(__a ) if drop_path > 0.0 else nn.Identity()
UpperCAmelCase_ = config.use_layer_scale
if config.use_layer_scale:
UpperCAmelCase_ = nn.Parameter(
config.layer_scale_init_value * torch.ones((__a) ) , requires_grad=__a )
UpperCAmelCase_ = nn.Parameter(
config.layer_scale_init_value * torch.ones((__a) ) , requires_grad=__a )
def _lowercase (self : Any , __a : Union[str, Any] ):
if self.use_layer_scale:
UpperCAmelCase_ = self.pooling(self.before_norm(__a ) )
UpperCAmelCase_ = self.layer_scale_a.unsqueeze(-1 ).unsqueeze(-1 ) * pooling_output
# First residual connection
UpperCAmelCase_ = hidden_states + self.drop_path(__a )
UpperCAmelCase_ = ()
UpperCAmelCase_ = self.output(self.after_norm(__a ) )
UpperCAmelCase_ = self.layer_scale_a.unsqueeze(-1 ).unsqueeze(-1 ) * layer_output
# Second residual connection
UpperCAmelCase_ = hidden_states + self.drop_path(__a )
UpperCAmelCase_ = (output,) + outputs
return outputs
else:
UpperCAmelCase_ = self.drop_path(self.pooling(self.before_norm(__a ) ) )
# First residual connection
UpperCAmelCase_ = pooling_output + hidden_states
UpperCAmelCase_ = ()
# Second residual connection inside the PoolFormerOutput block
UpperCAmelCase_ = self.drop_path(self.output(self.after_norm(__a ) ) )
UpperCAmelCase_ = hidden_states + layer_output
UpperCAmelCase_ = (output,) + outputs
return outputs
class __A ( nn.Module ):
def __init__(self : Union[str, Any] , __a : Tuple ):
super().__init__()
UpperCAmelCase_ = config
# stochastic depth decay rule
UpperCAmelCase_ = [x.item() for x in torch.linspace(0 , config.drop_path_rate , sum(config.depths ) )]
# patch embeddings
UpperCAmelCase_ = []
for i in range(config.num_encoder_blocks ):
embeddings.append(
PoolFormerEmbeddings(
patch_size=config.patch_sizes[i] , stride=config.strides[i] , padding=config.padding[i] , num_channels=config.num_channels if i == 0 else config.hidden_sizes[i - 1] , hidden_size=config.hidden_sizes[i] , ) )
UpperCAmelCase_ = nn.ModuleList(__a )
# Transformer blocks
UpperCAmelCase_ = []
UpperCAmelCase_ = 0
for i in range(config.num_encoder_blocks ):
# each block consists of layers
UpperCAmelCase_ = []
if i != 0:
cur += config.depths[i - 1]
for j in range(config.depths[i] ):
layers.append(
PoolFormerLayer(
__a , num_channels=config.hidden_sizes[i] , pool_size=config.pool_size , hidden_size=config.hidden_sizes[i] , intermediate_size=int(config.hidden_sizes[i] * config.mlp_ratio ) , drop_path=dpr[cur + j] , ) )
blocks.append(nn.ModuleList(__a ) )
UpperCAmelCase_ = nn.ModuleList(__a )
def _lowercase (self : Any , __a : int , __a : str=False , __a : Optional[Any]=True ):
UpperCAmelCase_ = () if output_hidden_states else None
UpperCAmelCase_ = pixel_values
for idx, layers in enumerate(zip(self.patch_embeddings , self.block ) ):
UpperCAmelCase_ , UpperCAmelCase_ = layers
# Get patch embeddings from hidden_states
UpperCAmelCase_ = embedding_layer(__a )
# Send the embeddings through the blocks
for _, blk in enumerate(__a ):
UpperCAmelCase_ = blk(__a )
UpperCAmelCase_ = layer_outputs[0]
if output_hidden_states:
UpperCAmelCase_ = all_hidden_states + (hidden_states,)
if not return_dict:
return tuple(v for v in [hidden_states, all_hidden_states] if v is not None )
return BaseModelOutputWithNoAttention(last_hidden_state=__a , hidden_states=__a )
class __A ( UpperCamelCase__ ):
a__ : List[Any] = PoolFormerConfig
a__ : Tuple = """poolformer"""
a__ : List[str] = """pixel_values"""
a__ : Dict = True
def _lowercase (self : Tuple , __a : Optional[Any] ):
if isinstance(__a , (nn.Linear, nn.Convad) ):
module.weight.data.normal_(mean=0.0 , std=self.config.initializer_range )
if module.bias is not None:
module.bias.data.zero_()
elif isinstance(__a , nn.LayerNorm ):
module.bias.data.zero_()
module.weight.data.fill_(1.0 )
def _lowercase (self : str , __a : Dict , __a : Dict=False ):
if isinstance(__a , __a ):
UpperCAmelCase_ = value
SCREAMING_SNAKE_CASE_: List[str] =r'\n This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) sub-class. Use\n it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and\n behavior.\n\n Parameters:\n config ([`PoolFormerConfig`]): Model configuration class with all the parameters of the model.\n Initializing with a config file does not load the weights associated with the model, only the\n configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.\n'
SCREAMING_SNAKE_CASE_: List[Any] =r'\n Args:\n pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):\n Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See\n [`PoolFormerImageProcessor.__call__`] for details.\n'
@add_start_docstrings(
"""The bare PoolFormer Model transformer outputting raw hidden-states without any specific head on top.""" , UpperCamelCase__ , )
class __A ( UpperCamelCase__ ):
def __init__(self : List[str] , __a : str ):
super().__init__(__a )
UpperCAmelCase_ = config
UpperCAmelCase_ = PoolFormerEncoder(__a )
# Initialize weights and apply final processing
self.post_init()
def _lowercase (self : List[str] ):
return self.embeddings.patch_embeddings
@add_start_docstrings_to_model_forward(__a )
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC , output_type=__a , config_class=_CONFIG_FOR_DOC , modality="vision" , expected_output=_EXPECTED_OUTPUT_SHAPE , )
def _lowercase (self : Optional[Any] , __a : Optional[torch.FloatTensor] = None , __a : Optional[bool] = None , __a : Optional[bool] = None , ):
UpperCAmelCase_ = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
UpperCAmelCase_ = return_dict if return_dict is not None else self.config.use_return_dict
if pixel_values is None:
raise ValueError("You have to specify pixel_values" )
UpperCAmelCase_ = self.encoder(
__a , output_hidden_states=__a , return_dict=__a , )
UpperCAmelCase_ = encoder_outputs[0]
if not return_dict:
return (sequence_output, None) + encoder_outputs[1:]
return BaseModelOutputWithNoAttention(
last_hidden_state=__a , hidden_states=encoder_outputs.hidden_states , )
class __A ( nn.Module ):
def __init__(self : Any , __a : str ):
super().__init__()
UpperCAmelCase_ = nn.Linear(config.hidden_size , config.hidden_size )
def _lowercase (self : List[str] , __a : Tuple ):
UpperCAmelCase_ = self.dense(__a )
return output
@add_start_docstrings(
"""
PoolFormer Model transformer with an image classification head on top
""" , UpperCamelCase__ , )
class __A ( UpperCamelCase__ ):
def __init__(self : Union[str, Any] , __a : List[str] ):
super().__init__(__a )
UpperCAmelCase_ = config.num_labels
UpperCAmelCase_ = PoolFormerModel(__a )
# Final norm
UpperCAmelCase_ = PoolFormerGroupNorm(config.hidden_sizes[-1] )
# Classifier head
UpperCAmelCase_ = (
nn.Linear(config.hidden_sizes[-1] , config.num_labels ) if config.num_labels > 0 else nn.Identity()
)
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(__a )
@add_code_sample_docstrings(
checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=__a , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , )
def _lowercase (self : List[Any] , __a : Optional[torch.FloatTensor] = None , __a : Optional[torch.LongTensor] = None , __a : Optional[bool] = None , __a : Optional[bool] = None , ):
UpperCAmelCase_ = return_dict if return_dict is not None else self.config.use_return_dict
UpperCAmelCase_ = self.poolformer(
__a , output_hidden_states=__a , return_dict=__a , )
UpperCAmelCase_ = outputs[0]
UpperCAmelCase_ = self.classifier(self.norm(__a ).mean([-2, -1] ) )
UpperCAmelCase_ = None
if labels is not None:
if self.config.problem_type is None:
if self.num_labels == 1:
UpperCAmelCase_ = "regression"
elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
UpperCAmelCase_ = "single_label_classification"
else:
UpperCAmelCase_ = "multi_label_classification"
if self.config.problem_type == "regression":
UpperCAmelCase_ = MSELoss()
if self.num_labels == 1:
UpperCAmelCase_ = loss_fct(logits.squeeze() , labels.squeeze() )
else:
UpperCAmelCase_ = loss_fct(__a , __a )
elif self.config.problem_type == "single_label_classification":
UpperCAmelCase_ = CrossEntropyLoss()
UpperCAmelCase_ = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
elif self.config.problem_type == "multi_label_classification":
UpperCAmelCase_ = BCEWithLogitsLoss()
UpperCAmelCase_ = loss_fct(__a , __a )
if not return_dict:
UpperCAmelCase_ = (logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return ImageClassifierOutputWithNoAttention(loss=__a , logits=__a , hidden_states=outputs.hidden_states )
| 1 | '''simple docstring'''
from copy import deepcopy
import torch
import torch.nn.functional as F
from torch.optim import AdamW
from torch.optim.lr_scheduler import LambdaLR
from torch.utils.data import DataLoader
from accelerate.accelerator import Accelerator
from accelerate.state import GradientState
from accelerate.test_utils import RegressionDataset, RegressionModel
from accelerate.utils import DistributedType, is_torch_version, set_seed
def lowerCAmelCase_ ( snake_case_ : Dict , snake_case_ : Union[str, Any] , snake_case_ : Optional[Any] , snake_case_ : List[Any] ) -> List[Any]:
'''simple docstring'''
for param, grad_param in zip(model_a.parameters() , model_b.parameters() ):
if not param.requires_grad:
continue
if not did_step:
# Grads should not be in sync
assert (
torch.allclose(param.grad , grad_param.grad ) is False
), f"""Gradients in sync when they should not be at iteration {iteration}:\nmodel_a grad ({param.grad}) == model_b grad ({grad_param.grad})"""
else:
# Grads should be in sync
assert (
torch.allclose(param.grad , grad_param.grad ) is True
), f"""Gradients not in sync when they should be at iteration {iteration}:\nmodel_a grad ({param.grad}) != model_b grad ({grad_param.grad})"""
def lowerCAmelCase_ ( snake_case_ : Any , snake_case_ : Tuple , snake_case_ : Any , snake_case_ : List[Any] , snake_case_ : str=True ) -> Optional[Any]:
'''simple docstring'''
model.train()
UpperCAmelCase_ = model(snake_case_ )
UpperCAmelCase_ = F.mse_loss(snake_case_ , target.to(output.device ) )
if not do_backward:
loss /= accelerator.gradient_accumulation_steps
loss.backward()
else:
accelerator.backward(snake_case_ )
def lowerCAmelCase_ ( snake_case_ : Optional[Any] , snake_case_ : Any=False ) -> Dict:
'''simple docstring'''
set_seed(42 )
UpperCAmelCase_ = RegressionModel()
UpperCAmelCase_ = deepcopy(snake_case_ )
UpperCAmelCase_ = RegressionDataset(length=80 )
UpperCAmelCase_ = DataLoader(snake_case_ , batch_size=16 )
model.to(accelerator.device )
if sched:
UpperCAmelCase_ = AdamW(params=model.parameters() , lr=1E-3 )
UpperCAmelCase_ = AdamW(params=ddp_model.parameters() , lr=1E-3 )
UpperCAmelCase_ = LambdaLR(snake_case_ , lr_lambda=lambda snake_case_ : epoch**0.65 )
UpperCAmelCase_ = LambdaLR(snake_case_ , lr_lambda=lambda snake_case_ : epoch**0.65 )
# Make a copy of `model`
if sched:
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = accelerator.prepare(snake_case_ , snake_case_ , snake_case_ , snake_case_ )
else:
UpperCAmelCase_ , UpperCAmelCase_ = accelerator.prepare(snake_case_ , snake_case_ )
if sched:
return (model, opt, sched, dataloader, ddp_model, ddp_opt, ddp_sched)
return model, ddp_model, dataloader
def lowerCAmelCase_ ( snake_case_ : Any ) -> int:
'''simple docstring'''
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = get_training_setup(snake_case_ )
# Use a single batch
UpperCAmelCase_ , UpperCAmelCase_ = next(iter(snake_case_ ) ).values()
for iteration in range(3 ):
# Gather the distributed inputs and targs for the base model
UpperCAmelCase_ , UpperCAmelCase_ = accelerator.gather((ddp_input, ddp_target) )
UpperCAmelCase_ , UpperCAmelCase_ = input.to(accelerator.device ), target.to(accelerator.device )
# Perform our initial ground truth step in non "DDP"
step_model(snake_case_ , snake_case_ , snake_case_ , snake_case_ )
# Do "gradient accumulation" (noop)
if iteration % 2 == 0:
# Accumulate grads locally
with accelerator.no_sync(snake_case_ ):
step_model(snake_case_ , snake_case_ , snake_case_ , snake_case_ )
else:
# Sync grads
step_model(snake_case_ , snake_case_ , snake_case_ , snake_case_ )
# Since `no_sync` is a noop, `ddp_model` and `model` grads should always be in sync
check_model_parameters(snake_case_ , snake_case_ , snake_case_ , snake_case_ )
for param, ddp_param in zip(model.parameters() , ddp_model.parameters() ):
if not param.requires_grad:
continue
assert torch.allclose(
param.grad , ddp_param.grad ), f"""Gradients not in sync when they should be:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})"""
# Shuffle ddp_input on each iteration
torch.manual_seed(13_37 + iteration )
UpperCAmelCase_ = ddp_input[torch.randperm(len(snake_case_ ) )]
def lowerCAmelCase_ ( snake_case_ : Tuple ) -> str:
'''simple docstring'''
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = get_training_setup(snake_case_ )
# Use a single batch
UpperCAmelCase_ , UpperCAmelCase_ = next(iter(snake_case_ ) ).values()
for iteration in range(3 ):
# Gather the distributed inputs and targs for the base model
UpperCAmelCase_ , UpperCAmelCase_ = accelerator.gather((ddp_input, ddp_target) )
UpperCAmelCase_ , UpperCAmelCase_ = input.to(accelerator.device ), target.to(accelerator.device )
# Perform our initial ground truth step in non "DDP"
step_model(snake_case_ , snake_case_ , snake_case_ , snake_case_ )
# Do "gradient accumulation" (noop)
if iteration % 2 == 0:
# Accumulate grads locally
with accelerator.no_sync(snake_case_ ):
step_model(snake_case_ , snake_case_ , snake_case_ , snake_case_ )
else:
# Sync grads
step_model(snake_case_ , snake_case_ , snake_case_ , snake_case_ )
# DDP model and model should only be in sync when not (iteration % 2 == 0)
for param, ddp_param in zip(model.parameters() , ddp_model.parameters() ):
if not param.requires_grad:
continue
if iteration % 2 == 0:
# Grads should not be in sync
assert (
torch.allclose(param.grad , ddp_param.grad ) is False
), f"""Gradients in sync when they should not be:\nModel grad ({param.grad}) == DDP grad ({ddp_param.grad})"""
else:
# Grads should be in sync
assert (
torch.allclose(param.grad , ddp_param.grad ) is True
), f"""Gradients not in sync when they should be:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})"""
# Shuffle ddp_input on each iteration
torch.manual_seed(13_37 + iteration )
UpperCAmelCase_ = ddp_input[torch.randperm(len(snake_case_ ) )]
def lowerCAmelCase_ ( snake_case_ : Optional[int]=False , snake_case_ : str=False ) -> List[str]:
'''simple docstring'''
UpperCAmelCase_ = Accelerator(
split_batches=snake_case_ , dispatch_batches=snake_case_ , gradient_accumulation_steps=2 )
# Test that context manager behaves properly
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = get_training_setup(snake_case_ )
for iteration, batch in enumerate(snake_case_ ):
UpperCAmelCase_ , UpperCAmelCase_ = batch.values()
# Gather the distributed inputs and targs for the base model
UpperCAmelCase_ , UpperCAmelCase_ = accelerator.gather((ddp_input, ddp_target) )
UpperCAmelCase_ , UpperCAmelCase_ = input.to(accelerator.device ), target.to(accelerator.device )
# Perform our initial ground truth step in non "DDP"
step_model(snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ )
# Do "gradient accumulation" (noop)
with accelerator.accumulate(snake_case_ ):
step_model(snake_case_ , snake_case_ , snake_case_ , snake_case_ )
# DDP model and model should only be in sync when not (iteration % 2 == 0)
for param, ddp_param in zip(model.parameters() , ddp_model.parameters() ):
if not param.requires_grad:
continue
if ((iteration + 1) % 2 == 0) or (iteration == len(snake_case_ ) - 1):
# Grads should be in sync
assert (
torch.allclose(param.grad , ddp_param.grad ) is True
), f"""Gradients not in sync when they should be at iteration {iteration}:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})"""
else:
# Grads should not be in sync
assert (
torch.allclose(param.grad , ddp_param.grad ) is False
), f"""Gradients in sync when they should not be at iteration {iteration}:\nModel grad ({param.grad}) == DDP grad ({ddp_param.grad})"""
# Shuffle ddp_input on each iteration
torch.manual_seed(13_37 + iteration )
UpperCAmelCase_ = ddp_input[torch.randperm(len(snake_case_ ) )]
GradientState._reset_state()
def lowerCAmelCase_ ( snake_case_ : Optional[Any]=False , snake_case_ : Tuple=False ) -> Union[str, Any]:
'''simple docstring'''
UpperCAmelCase_ = Accelerator(
split_batches=snake_case_ , dispatch_batches=snake_case_ , gradient_accumulation_steps=2 )
# Test that context manager behaves properly
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = get_training_setup(snake_case_ , snake_case_ )
for iteration, batch in enumerate(snake_case_ ):
UpperCAmelCase_ , UpperCAmelCase_ = batch.values()
# Gather the distributed inputs and targs for the base model
UpperCAmelCase_ , UpperCAmelCase_ = accelerator.gather((ddp_input, ddp_target) )
UpperCAmelCase_ , UpperCAmelCase_ = input.to(accelerator.device ), target.to(accelerator.device )
# Perform our initial ground truth step in non "DDP"
model.train()
ddp_model.train()
step_model(snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ )
opt.step()
if ((iteration + 1) % 2 == 0) or ((iteration + 1) == len(snake_case_ )):
if split_batches:
sched.step()
else:
for _ in range(accelerator.num_processes ):
sched.step()
opt.zero_grad()
# Perform gradient accumulation under wrapper
with accelerator.accumulate(snake_case_ ):
step_model(snake_case_ , snake_case_ , snake_case_ , snake_case_ )
ddp_opt.step()
ddp_sched.step()
ddp_opt.zero_grad()
# Learning rates should be the same
assert (
opt.param_groups[0]["lr"] == ddp_opt.param_groups[0]["lr"]
), f"""Learning rates found in each optimizer did not align\nopt: {opt.param_groups[0]["lr"]}\nDDP opt: {ddp_opt.param_groups[0]["lr"]}\n"""
UpperCAmelCase_ = (((iteration + 1) % 2) == 0) or ((iteration + 1) == len(snake_case_ ))
if accelerator.num_processes > 1:
check_model_parameters(snake_case_ , snake_case_ , snake_case_ , snake_case_ )
# Shuffle ddp_input on each iteration
torch.manual_seed(13_37 + iteration )
GradientState._reset_state()
def lowerCAmelCase_ ( ) -> List[Any]:
'''simple docstring'''
UpperCAmelCase_ = Accelerator()
UpperCAmelCase_ = RegressionDataset(length=80 )
UpperCAmelCase_ = DataLoader(snake_case_ , batch_size=16 )
UpperCAmelCase_ = RegressionDataset(length=96 )
UpperCAmelCase_ = DataLoader(snake_case_ , batch_size=16 )
UpperCAmelCase_ , UpperCAmelCase_ = accelerator.prepare(snake_case_ , snake_case_ )
assert accelerator.gradient_state.active_dataloader is None
for iteration, _ in enumerate(snake_case_ ):
assert id(accelerator.gradient_state.active_dataloader ) == id(snake_case_ )
if iteration < len(snake_case_ ) - 1:
assert not accelerator.gradient_state.end_of_dataloader
if iteration == 1:
for batch_num, _ in enumerate(snake_case_ ):
assert id(accelerator.gradient_state.active_dataloader ) == id(snake_case_ )
if batch_num < len(snake_case_ ) - 1:
assert not accelerator.gradient_state.end_of_dataloader
else:
assert accelerator.gradient_state.end_of_dataloader
else:
assert accelerator.gradient_state.end_of_dataloader
assert accelerator.gradient_state.active_dataloader is None
def lowerCAmelCase_ ( ) -> str:
'''simple docstring'''
UpperCAmelCase_ = Accelerator()
UpperCAmelCase_ = accelerator.state
if state.local_process_index == 0:
print("**Test `accumulate` gradient accumulation with dataloader break**" )
test_dataloader_break()
if state.distributed_type == DistributedType.NO:
if state.local_process_index == 0:
print("**Test NOOP `no_sync` context manager**" )
test_noop_sync(snake_case_ )
if state.distributed_type in (DistributedType.MULTI_GPU, DistributedType.MULTI_CPU):
if state.local_process_index == 0:
print("**Test Distributed `no_sync` context manager**" )
test_distributed_sync(snake_case_ )
if state.distributed_type == DistributedType.MULTI_GPU:
for split_batch in [True, False]:
for dispatch_batches in [True, False]:
if state.local_process_index == 0:
print(
"**Test `accumulate` gradient accumulation, " , f"""`split_batches={split_batch}` and `dispatch_batches={dispatch_batches}`**""" , )
test_gradient_accumulation(snake_case_ , snake_case_ )
# Currently will break on torch 2.0 +, need to investigate why
if is_torch_version("<" , "2.0" ) or state.distributed_type == DistributedType.NO:
if state.local_process_index == 0:
print(
"**Test `accumulate` gradient accumulation with optimizer and scheduler, " , "`split_batches=False`, `dispatch_batches=False`**" , )
test_gradient_accumulation_with_opt_and_scheduler()
if state.distributed_type == DistributedType.MULTI_GPU:
for split_batch in [True, False]:
for dispatch_batches in [True, False]:
if not split_batch and not dispatch_batches:
continue
if state.local_process_index == 0:
print(
"**Test `accumulate` gradient accumulation with optimizer and scheduler, " , f"""`split_batches={split_batch}` and `dispatch_batches={dispatch_batches}`**""" , )
test_gradient_accumulation_with_opt_and_scheduler(snake_case_ , snake_case_ )
def lowerCAmelCase_ ( snake_case_ : Dict ) -> int:
'''simple docstring'''
main()
if __name__ == "__main__":
main()
| 1 | 1 |
'''simple docstring'''
import warnings
from ...utils import logging
from .image_processing_flava import FlavaImageProcessor
SCREAMING_SNAKE_CASE_: str =logging.get_logger(__name__)
class __A ( UpperCamelCase__ ):
def __init__(self : List[Any] , *__a : List[str] , **__a : List[Any] ):
warnings.warn(
"The class FlavaFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"
" use FlavaImageProcessor instead." , __a , )
super().__init__(*__a , **__a )
| 1 | '''simple docstring'''
def lowerCAmelCase_ ( snake_case_ : int , snake_case_ : int ) -> int:
'''simple docstring'''
return x if y == 0 else greatest_common_divisor(snake_case_ , x % y )
def lowerCAmelCase_ ( snake_case_ : int , snake_case_ : int ) -> int:
'''simple docstring'''
return (x * y) // greatest_common_divisor(snake_case_ , snake_case_ )
def lowerCAmelCase_ ( snake_case_ : int = 20 ) -> int:
'''simple docstring'''
UpperCAmelCase_ = 1
for i in range(1 , n + 1 ):
UpperCAmelCase_ = lcm(snake_case_ , snake_case_ )
return g
if __name__ == "__main__":
print(f"{solution() = }")
| 1 | 1 |
'''simple docstring'''
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
SCREAMING_SNAKE_CASE_: Tuple =logging.get_logger(__name__)
class __A ( UpperCamelCase__ ):
a__ : Optional[Any] = ["""pixel_values"""]
def __init__(self : int , __a : bool = True , __a : Dict[str, int] = None , __a : float = None , __a : PILImageResampling = PILImageResampling.BILINEAR , __a : bool = True , __a : Union[int, float] = 1 / 255 , __a : bool = True , __a : Optional[Union[float, List[float]]] = None , __a : Optional[Union[float, List[float]]] = None , **__a : Any , ):
super().__init__(**__a )
UpperCAmelCase_ = size if size is not None else {"shortest_edge": 384}
UpperCAmelCase_ = get_size_dict(__a , default_to_square=__a )
UpperCAmelCase_ = do_resize
UpperCAmelCase_ = size
# Default value set here for backwards compatibility where the value in config is None
UpperCAmelCase_ = crop_pct if crop_pct is not None else 224 / 256
UpperCAmelCase_ = resample
UpperCAmelCase_ = do_rescale
UpperCAmelCase_ = rescale_factor
UpperCAmelCase_ = do_normalize
UpperCAmelCase_ = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
UpperCAmelCase_ = image_std if image_std is not None else IMAGENET_STANDARD_STD
def _lowercase (self : str , __a : np.ndarray , __a : Dict[str, int] , __a : float , __a : PILImageResampling = PILImageResampling.BICUBIC , __a : Optional[Union[str, ChannelDimension]] = None , **__a : Tuple , ):
UpperCAmelCase_ = get_size_dict(__a , default_to_square=__a )
if "shortest_edge" not in size:
raise ValueError(f"""Size dictionary must contain 'shortest_edge' key. Got {size.keys()}""" )
UpperCAmelCase_ = size["shortest_edge"]
if shortest_edge < 384:
# maintain same ratio, resizing shortest edge to shortest_edge/crop_pct
UpperCAmelCase_ = int(shortest_edge / crop_pct )
UpperCAmelCase_ = get_resize_output_image_size(__a , size=__a , default_to_square=__a )
UpperCAmelCase_ = resize(image=__a , size=__a , resample=__a , data_format=__a , **__a )
# then crop to (shortest_edge, shortest_edge)
return center_crop(image=__a , size=(shortest_edge, shortest_edge) , data_format=__a , **__a )
else:
# warping (no cropping) when evaluated at 384 or larger
return resize(
__a , size=(shortest_edge, shortest_edge) , resample=__a , data_format=__a , **__a )
def _lowercase (self : Optional[int] , __a : np.ndarray , __a : Union[int, float] , __a : Optional[Union[str, ChannelDimension]] = None , **__a : Optional[int] , ):
return rescale(__a , scale=__a , data_format=__a , **__a )
def _lowercase (self : Optional[int] , __a : np.ndarray , __a : Union[float, List[float]] , __a : Union[float, List[float]] , __a : Optional[Union[str, ChannelDimension]] = None , **__a : Any , ):
return normalize(__a , mean=__a , std=__a , data_format=__a , **__a )
def _lowercase (self : Optional[int] , __a : ImageInput , __a : bool = None , __a : Dict[str, int] = None , __a : float = None , __a : PILImageResampling = None , __a : bool = None , __a : float = None , __a : bool = None , __a : Optional[Union[float, List[float]]] = None , __a : Optional[Union[float, List[float]]] = None , __a : Optional[Union[str, TensorType]] = None , __a : ChannelDimension = ChannelDimension.FIRST , **__a : Tuple , ):
UpperCAmelCase_ = do_resize if do_resize is not None else self.do_resize
UpperCAmelCase_ = crop_pct if crop_pct is not None else self.crop_pct
UpperCAmelCase_ = resample if resample is not None else self.resample
UpperCAmelCase_ = do_rescale if do_rescale is not None else self.do_rescale
UpperCAmelCase_ = rescale_factor if rescale_factor is not None else self.rescale_factor
UpperCAmelCase_ = do_normalize if do_normalize is not None else self.do_normalize
UpperCAmelCase_ = image_mean if image_mean is not None else self.image_mean
UpperCAmelCase_ = image_std if image_std is not None else self.image_std
UpperCAmelCase_ = size if size is not None else self.size
UpperCAmelCase_ = get_size_dict(__a , default_to_square=__a )
UpperCAmelCase_ = make_list_of_images(__a )
if not valid_images(__a ):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray." )
if do_resize and size is None or resample is None:
raise ValueError("Size and resample must be specified if do_resize is True." )
if do_resize and size["shortest_edge"] < 384 and crop_pct is None:
raise ValueError("crop_pct must be specified if size < 384." )
if do_rescale and rescale_factor is None:
raise ValueError("Rescale factor must be specified if do_rescale is True." )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("Image mean and std must be specified if do_normalize is True." )
# All transformations expect numpy arrays.
UpperCAmelCase_ = [to_numpy_array(__a ) for image in images]
if do_resize:
UpperCAmelCase_ = [self.resize(image=__a , size=__a , crop_pct=__a , resample=__a ) for image in images]
if do_rescale:
UpperCAmelCase_ = [self.rescale(image=__a , scale=__a ) for image in images]
if do_normalize:
UpperCAmelCase_ = [self.normalize(image=__a , mean=__a , std=__a ) for image in images]
UpperCAmelCase_ = [to_channel_dimension_format(__a , __a ) for image in images]
UpperCAmelCase_ = {"pixel_values": images}
return BatchFeature(data=__a , tensor_type=__a )
| 1 | '''simple docstring'''
import os
from math import logaa
def lowerCAmelCase_ ( snake_case_ : str = "base_exp.txt" ) -> int:
'''simple docstring'''
UpperCAmelCase_ = 0
UpperCAmelCase_ = 0
for i, line in enumerate(open(os.path.join(os.path.dirname(snake_case_ ) , snake_case_ ) ) ):
UpperCAmelCase_ , UpperCAmelCase_ = list(map(snake_case_ , line.split("," ) ) )
if x * logaa(snake_case_ ) > largest:
UpperCAmelCase_ = x * logaa(snake_case_ )
UpperCAmelCase_ = i + 1
return result
if __name__ == "__main__":
print(solution())
| 1 | 1 |
'''simple docstring'''
import os
import unittest
from huggingface_hub.utils import are_progress_bars_disabled
import transformers.models.bart.tokenization_bart
from transformers import logging
from transformers.testing_utils import CaptureLogger, mockenv, mockenv_context
from transformers.utils.logging import disable_progress_bar, enable_progress_bar
class __A ( unittest.TestCase ):
def _lowercase (self : Tuple ):
UpperCAmelCase_ = logging.get_logger()
# the current default level is logging.WARNING
UpperCAmelCase_ = logging.get_verbosity()
logging.set_verbosity_error()
self.assertEqual(logger.getEffectiveLevel() , logging.get_verbosity() )
logging.set_verbosity_warning()
self.assertEqual(logger.getEffectiveLevel() , logging.get_verbosity() )
logging.set_verbosity_info()
self.assertEqual(logger.getEffectiveLevel() , logging.get_verbosity() )
logging.set_verbosity_debug()
self.assertEqual(logger.getEffectiveLevel() , logging.get_verbosity() )
# restore to the original level
logging.set_verbosity(__a )
def _lowercase (self : Tuple ):
UpperCAmelCase_ = logging.get_verbosity()
UpperCAmelCase_ = logging.get_logger("transformers.models.bart.tokenization_bart" )
UpperCAmelCase_ = "Testing 1, 2, 3"
# should be able to log warnings (if default settings weren't overridden by `pytest --log-level-all`)
if level_origin <= logging.WARNING:
with CaptureLogger(__a ) as cl:
logger.warning(__a )
self.assertEqual(cl.out , msg + "\n" )
# this is setting the level for all of `transformers.*` loggers
logging.set_verbosity_error()
# should not be able to log warnings
with CaptureLogger(__a ) as cl:
logger.warning(__a )
self.assertEqual(cl.out , "" )
# should be able to log warnings again
logging.set_verbosity_warning()
with CaptureLogger(__a ) as cl:
logger.warning(__a )
self.assertEqual(cl.out , msg + "\n" )
# restore to the original level
logging.set_verbosity(__a )
@mockenv(TRANSFORMERS_VERBOSITY="error" )
def _lowercase (self : Optional[Any] ):
# reset for the env var to take effect, next time some logger call is made
transformers.utils.logging._reset_library_root_logger()
# this action activates the env var
UpperCAmelCase_ = logging.get_logger("transformers.models.bart.tokenization_bart" )
UpperCAmelCase_ = os.getenv("TRANSFORMERS_VERBOSITY" , __a )
UpperCAmelCase_ = logging.log_levels[env_level_str]
UpperCAmelCase_ = logging.get_verbosity()
self.assertEqual(
__a , __a , f"""TRANSFORMERS_VERBOSITY={env_level_str}/{env_level}, but internal verbosity is {current_level}""" , )
# restore to the original level
UpperCAmelCase_ = ""
transformers.utils.logging._reset_library_root_logger()
@mockenv(TRANSFORMERS_VERBOSITY="super-error" )
def _lowercase (self : Optional[int] ):
# reset for the env var to take effect, next time some logger call is made
transformers.utils.logging._reset_library_root_logger()
UpperCAmelCase_ = logging.logging.getLogger()
with CaptureLogger(__a ) as cl:
# this action activates the env var
logging.get_logger("transformers.models.bart.tokenization_bart" )
self.assertIn("Unknown option TRANSFORMERS_VERBOSITY=super-error" , cl.out )
# no need to restore as nothing was changed
def _lowercase (self : Dict ):
# testing `logger.warning_advice()`
transformers.utils.logging._reset_library_root_logger()
UpperCAmelCase_ = logging.get_logger("transformers.models.bart.tokenization_bart" )
UpperCAmelCase_ = "Testing 1, 2, 3"
with mockenv_context(TRANSFORMERS_NO_ADVISORY_WARNINGS="1" ):
# nothing should be logged as env var disables this method
with CaptureLogger(__a ) as cl:
logger.warning_advice(__a )
self.assertEqual(cl.out , "" )
with mockenv_context(TRANSFORMERS_NO_ADVISORY_WARNINGS="" ):
# should log normally as TRANSFORMERS_NO_ADVISORY_WARNINGS is unset
with CaptureLogger(__a ) as cl:
logger.warning_advice(__a )
self.assertEqual(cl.out , msg + "\n" )
def lowerCAmelCase_ ( ) -> Optional[Any]:
'''simple docstring'''
disable_progress_bar()
assert are_progress_bars_disabled()
enable_progress_bar()
assert not are_progress_bars_disabled()
| 1 | '''simple docstring'''
import argparse
import io
import requests
import torch
from omegaconf import OmegaConf
from diffusers import AutoencoderKL
from diffusers.pipelines.stable_diffusion.convert_from_ckpt import (
assign_to_checkpoint,
conv_attn_to_linear,
create_vae_diffusers_config,
renew_vae_attention_paths,
renew_vae_resnet_paths,
)
def lowerCAmelCase_ ( snake_case_ : List[Any] , snake_case_ : int ) -> Optional[int]:
'''simple docstring'''
UpperCAmelCase_ = checkpoint
UpperCAmelCase_ = {}
UpperCAmelCase_ = vae_state_dict["encoder.conv_in.weight"]
UpperCAmelCase_ = vae_state_dict["encoder.conv_in.bias"]
UpperCAmelCase_ = vae_state_dict["encoder.conv_out.weight"]
UpperCAmelCase_ = vae_state_dict["encoder.conv_out.bias"]
UpperCAmelCase_ = vae_state_dict["encoder.norm_out.weight"]
UpperCAmelCase_ = vae_state_dict["encoder.norm_out.bias"]
UpperCAmelCase_ = vae_state_dict["decoder.conv_in.weight"]
UpperCAmelCase_ = vae_state_dict["decoder.conv_in.bias"]
UpperCAmelCase_ = vae_state_dict["decoder.conv_out.weight"]
UpperCAmelCase_ = vae_state_dict["decoder.conv_out.bias"]
UpperCAmelCase_ = vae_state_dict["decoder.norm_out.weight"]
UpperCAmelCase_ = vae_state_dict["decoder.norm_out.bias"]
UpperCAmelCase_ = vae_state_dict["quant_conv.weight"]
UpperCAmelCase_ = vae_state_dict["quant_conv.bias"]
UpperCAmelCase_ = vae_state_dict["post_quant_conv.weight"]
UpperCAmelCase_ = vae_state_dict["post_quant_conv.bias"]
# Retrieves the keys for the encoder down blocks only
UpperCAmelCase_ = len({".".join(layer.split("." )[:3] ) for layer in vae_state_dict if "encoder.down" in layer} )
UpperCAmelCase_ = {
layer_id: [key for key in vae_state_dict if f"""down.{layer_id}""" in key] for layer_id in range(snake_case_ )
}
# Retrieves the keys for the decoder up blocks only
UpperCAmelCase_ = len({".".join(layer.split("." )[:3] ) for layer in vae_state_dict if "decoder.up" in layer} )
UpperCAmelCase_ = {
layer_id: [key for key in vae_state_dict if f"""up.{layer_id}""" in key] for layer_id in range(snake_case_ )
}
for i in range(snake_case_ ):
UpperCAmelCase_ = [key for key in down_blocks[i] if f"""down.{i}""" in key and f"""down.{i}.downsample""" not in key]
if f"""encoder.down.{i}.downsample.conv.weight""" in vae_state_dict:
UpperCAmelCase_ = vae_state_dict.pop(
f"""encoder.down.{i}.downsample.conv.weight""" )
UpperCAmelCase_ = vae_state_dict.pop(
f"""encoder.down.{i}.downsample.conv.bias""" )
UpperCAmelCase_ = renew_vae_resnet_paths(snake_case_ )
UpperCAmelCase_ = {"old": f"""down.{i}.block""", "new": f"""down_blocks.{i}.resnets"""}
assign_to_checkpoint(snake_case_ , snake_case_ , snake_case_ , additional_replacements=[meta_path] , config=snake_case_ )
UpperCAmelCase_ = [key for key in vae_state_dict if "encoder.mid.block" in key]
UpperCAmelCase_ = 2
for i in range(1 , num_mid_res_blocks + 1 ):
UpperCAmelCase_ = [key for key in mid_resnets if f"""encoder.mid.block_{i}""" in key]
UpperCAmelCase_ = renew_vae_resnet_paths(snake_case_ )
UpperCAmelCase_ = {"old": f"""mid.block_{i}""", "new": f"""mid_block.resnets.{i - 1}"""}
assign_to_checkpoint(snake_case_ , snake_case_ , snake_case_ , additional_replacements=[meta_path] , config=snake_case_ )
UpperCAmelCase_ = [key for key in vae_state_dict if "encoder.mid.attn" in key]
UpperCAmelCase_ = renew_vae_attention_paths(snake_case_ )
UpperCAmelCase_ = {"old": "mid.attn_1", "new": "mid_block.attentions.0"}
assign_to_checkpoint(snake_case_ , snake_case_ , snake_case_ , additional_replacements=[meta_path] , config=snake_case_ )
conv_attn_to_linear(snake_case_ )
for i in range(snake_case_ ):
UpperCAmelCase_ = num_up_blocks - 1 - i
UpperCAmelCase_ = [
key for key in up_blocks[block_id] if f"""up.{block_id}""" in key and f"""up.{block_id}.upsample""" not in key
]
if f"""decoder.up.{block_id}.upsample.conv.weight""" in vae_state_dict:
UpperCAmelCase_ = vae_state_dict[
f"""decoder.up.{block_id}.upsample.conv.weight"""
]
UpperCAmelCase_ = vae_state_dict[
f"""decoder.up.{block_id}.upsample.conv.bias"""
]
UpperCAmelCase_ = renew_vae_resnet_paths(snake_case_ )
UpperCAmelCase_ = {"old": f"""up.{block_id}.block""", "new": f"""up_blocks.{i}.resnets"""}
assign_to_checkpoint(snake_case_ , snake_case_ , snake_case_ , additional_replacements=[meta_path] , config=snake_case_ )
UpperCAmelCase_ = [key for key in vae_state_dict if "decoder.mid.block" in key]
UpperCAmelCase_ = 2
for i in range(1 , num_mid_res_blocks + 1 ):
UpperCAmelCase_ = [key for key in mid_resnets if f"""decoder.mid.block_{i}""" in key]
UpperCAmelCase_ = renew_vae_resnet_paths(snake_case_ )
UpperCAmelCase_ = {"old": f"""mid.block_{i}""", "new": f"""mid_block.resnets.{i - 1}"""}
assign_to_checkpoint(snake_case_ , snake_case_ , snake_case_ , additional_replacements=[meta_path] , config=snake_case_ )
UpperCAmelCase_ = [key for key in vae_state_dict if "decoder.mid.attn" in key]
UpperCAmelCase_ = renew_vae_attention_paths(snake_case_ )
UpperCAmelCase_ = {"old": "mid.attn_1", "new": "mid_block.attentions.0"}
assign_to_checkpoint(snake_case_ , snake_case_ , snake_case_ , additional_replacements=[meta_path] , config=snake_case_ )
conv_attn_to_linear(snake_case_ )
return new_checkpoint
def lowerCAmelCase_ ( snake_case_ : str , snake_case_ : str , ) -> Dict:
'''simple docstring'''
UpperCAmelCase_ = requests.get(
" https://raw.githubusercontent.com/CompVis/stable-diffusion/main/configs/stable-diffusion/v1-inference.yaml" )
UpperCAmelCase_ = io.BytesIO(r.content )
UpperCAmelCase_ = OmegaConf.load(snake_case_ )
UpperCAmelCase_ = 5_12
UpperCAmelCase_ = "cuda" if torch.cuda.is_available() else "cpu"
if checkpoint_path.endswith("safetensors" ):
from safetensors import safe_open
UpperCAmelCase_ = {}
with safe_open(snake_case_ , framework="pt" , device="cpu" ) as f:
for key in f.keys():
UpperCAmelCase_ = f.get_tensor(snake_case_ )
else:
UpperCAmelCase_ = torch.load(snake_case_ , map_location=snake_case_ )["state_dict"]
# Convert the VAE model.
UpperCAmelCase_ = create_vae_diffusers_config(snake_case_ , image_size=snake_case_ )
UpperCAmelCase_ = custom_convert_ldm_vae_checkpoint(snake_case_ , snake_case_ )
UpperCAmelCase_ = AutoencoderKL(**snake_case_ )
vae.load_state_dict(snake_case_ )
vae.save_pretrained(snake_case_ )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE_: Optional[int] =argparse.ArgumentParser()
parser.add_argument('--vae_pt_path', default=None, type=str, required=True, help='Path to the VAE.pt to convert.')
parser.add_argument('--dump_path', default=None, type=str, required=True, help='Path to the VAE.pt to convert.')
SCREAMING_SNAKE_CASE_: str =parser.parse_args()
vae_pt_to_vae_diffuser(args.vae_pt_path, args.dump_path)
| 1 | 1 |
'''simple docstring'''
import unittest
from transformers import AutoTokenizer, FalconConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
FalconForCausalLM,
FalconForQuestionAnswering,
FalconForSequenceClassification,
FalconForTokenClassification,
FalconModel,
)
class __A :
def __init__(self : Dict , __a : Optional[Any] , __a : List[Any]=3 , __a : List[str]=7 , __a : int=True , __a : int=True , __a : Dict=False , __a : List[str]=True , __a : int=99 , __a : List[str]=32 , __a : Tuple=5 , __a : str=4 , __a : Optional[Any]=37 , __a : str="gelu" , __a : Tuple=0.1 , __a : Optional[int]=0.1 , __a : str=512 , __a : int=16 , __a : str=2 , __a : Tuple=0.02 , __a : str=3 , __a : Tuple=4 , __a : List[str]=None , ):
UpperCAmelCase_ = parent
UpperCAmelCase_ = batch_size
UpperCAmelCase_ = seq_length
UpperCAmelCase_ = is_training
UpperCAmelCase_ = use_input_mask
UpperCAmelCase_ = use_token_type_ids
UpperCAmelCase_ = use_labels
UpperCAmelCase_ = vocab_size
UpperCAmelCase_ = hidden_size
UpperCAmelCase_ = num_hidden_layers
UpperCAmelCase_ = num_attention_heads
UpperCAmelCase_ = intermediate_size
UpperCAmelCase_ = hidden_act
UpperCAmelCase_ = hidden_dropout_prob
UpperCAmelCase_ = attention_probs_dropout_prob
UpperCAmelCase_ = max_position_embeddings
UpperCAmelCase_ = type_vocab_size
UpperCAmelCase_ = type_sequence_label_size
UpperCAmelCase_ = initializer_range
UpperCAmelCase_ = num_labels
UpperCAmelCase_ = num_choices
UpperCAmelCase_ = scope
def _lowercase (self : Union[str, Any] ):
UpperCAmelCase_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCAmelCase_ = None
if self.use_input_mask:
UpperCAmelCase_ = random_attention_mask([self.batch_size, self.seq_length] )
UpperCAmelCase_ = None
UpperCAmelCase_ = None
UpperCAmelCase_ = None
UpperCAmelCase_ = None
if self.use_labels:
UpperCAmelCase_ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCAmelCase_ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
UpperCAmelCase_ = ids_tensor([self.batch_size] , self.num_choices )
UpperCAmelCase_ = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def _lowercase (self : Union[str, Any] ):
return FalconConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=__a , initializer_range=self.initializer_range , pad_token_id=1 , new_decoder_architecture=__a , )
def _lowercase (self : Optional[int] , __a : Union[str, Any] , __a : int , __a : Any , __a : int , __a : Union[str, Any] , __a : str , __a : int ):
UpperCAmelCase_ = FalconModel(config=__a )
model.to(__a )
model.eval()
UpperCAmelCase_ = model(__a , attention_mask=__a )
UpperCAmelCase_ = model(__a )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _lowercase (self : Union[str, Any] , __a : str , __a : Optional[int] , __a : List[Any] , __a : Optional[Any] , __a : str , __a : Any , __a : List[Any] , __a : int , __a : List[Any] , ):
UpperCAmelCase_ = True
UpperCAmelCase_ = FalconModel(__a )
model.to(__a )
model.eval()
UpperCAmelCase_ = model(
__a , attention_mask=__a , encoder_hidden_states=__a , encoder_attention_mask=__a , )
UpperCAmelCase_ = model(
__a , attention_mask=__a , encoder_hidden_states=__a , )
UpperCAmelCase_ = model(__a , attention_mask=__a )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _lowercase (self : Optional[Any] , __a : Dict , __a : Any , __a : str , __a : Dict , __a : List[Any] , __a : str , __a : Tuple , __a : int , __a : List[Any] , ):
UpperCAmelCase_ = FalconForCausalLM(config=__a )
model.to(__a )
model.eval()
UpperCAmelCase_ = model(__a , attention_mask=__a , labels=__a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _lowercase (self : Dict , __a : int , __a : Any , __a : Tuple , __a : Union[str, Any] , __a : Optional[Any] , __a : int , __a : Tuple , __a : Optional[int] , __a : Any , ):
UpperCAmelCase_ = True
UpperCAmelCase_ = True
UpperCAmelCase_ = FalconForCausalLM(config=__a )
model.to(__a )
model.eval()
# first forward pass
UpperCAmelCase_ = model(
__a , attention_mask=__a , encoder_hidden_states=__a , encoder_attention_mask=__a , use_cache=__a , )
UpperCAmelCase_ = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
UpperCAmelCase_ = ids_tensor((self.batch_size, 3) , config.vocab_size )
UpperCAmelCase_ = ids_tensor((self.batch_size, 3) , vocab_size=2 )
# append to next input_ids and
UpperCAmelCase_ = torch.cat([input_ids, next_tokens] , dim=-1 )
UpperCAmelCase_ = torch.cat([input_mask, next_mask] , dim=-1 )
UpperCAmelCase_ = model(
__a , attention_mask=__a , encoder_hidden_states=__a , encoder_attention_mask=__a , output_hidden_states=__a , )["hidden_states"][0]
UpperCAmelCase_ = model(
__a , attention_mask=__a , encoder_hidden_states=__a , encoder_attention_mask=__a , past_key_values=__a , output_hidden_states=__a , )["hidden_states"][0]
# select random slice
UpperCAmelCase_ = ids_tensor((1,) , output_from_past.shape[-1] ).item()
UpperCAmelCase_ = output_from_no_past[:, -3:, random_slice_idx].detach()
UpperCAmelCase_ = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(__a , __a , atol=1E-3 ) )
def _lowercase (self : Tuple ):
UpperCAmelCase_ = self.prepare_config_and_inputs()
(
(
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) ,
) = config_and_inputs
UpperCAmelCase_ = {"input_ids": input_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_torch
class __A ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , unittest.TestCase ):
a__ : str = (
(
FalconModel,
FalconForCausalLM,
FalconForSequenceClassification,
FalconForTokenClassification,
FalconForQuestionAnswering,
)
if is_torch_available()
else ()
)
a__ : int = (FalconForCausalLM,) if is_torch_available() else ()
a__ : Optional[int] = (
{
"""feature-extraction""": FalconModel,
"""text-classification""": FalconForSequenceClassification,
"""text-generation""": FalconForCausalLM,
"""question-answering""": FalconForQuestionAnswering,
"""token-classification""": FalconForTokenClassification,
"""zero-shot""": FalconForSequenceClassification,
}
if is_torch_available()
else {}
)
a__ : Any = False
a__ : Tuple = False
def _lowercase (self : int ):
UpperCAmelCase_ = FalconModelTester(self )
UpperCAmelCase_ = ConfigTester(self , config_class=__a , hidden_size=37 )
def _lowercase (self : Tuple ):
self.config_tester.run_common_tests()
def _lowercase (self : List[str] ):
UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__a )
def _lowercase (self : Optional[int] ):
UpperCAmelCase_ , *UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs()
for alibi in [True, False]:
UpperCAmelCase_ = alibi
self.model_tester.create_and_check_model(__a , *__a )
def _lowercase (self : Optional[int] ):
UpperCAmelCase_ , UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase_ = 3
UpperCAmelCase_ = input_dict["input_ids"]
UpperCAmelCase_ = input_ids.ne(1 ).to(__a )
UpperCAmelCase_ = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
UpperCAmelCase_ = FalconForSequenceClassification(__a )
model.to(__a )
model.eval()
UpperCAmelCase_ = model(__a , attention_mask=__a , labels=__a )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def _lowercase (self : Optional[Any] ):
UpperCAmelCase_ , UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase_ = 3
UpperCAmelCase_ = "single_label_classification"
UpperCAmelCase_ = input_dict["input_ids"]
UpperCAmelCase_ = input_ids.ne(1 ).to(__a )
UpperCAmelCase_ = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
UpperCAmelCase_ = FalconForSequenceClassification(__a )
model.to(__a )
model.eval()
UpperCAmelCase_ = model(__a , attention_mask=__a , labels=__a )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def _lowercase (self : Any ):
UpperCAmelCase_ , UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase_ = input_dict["input_ids"]
UpperCAmelCase_ = FalconForCausalLM(__a )
model.to(__a )
model.eval()
UpperCAmelCase_ = model(__a , use_cache=__a )
UpperCAmelCase_ = input_ids.shape[0]
UpperCAmelCase_ = model._convert_to_rw_cache(result.past_key_values )
UpperCAmelCase_ = model._convert_cache_to_standard_format(__a , __a )
for layer in range(len(__a ) ):
for tensor_idx in range(2 ):
self.assertTrue(rw_cache[layer][tensor_idx].ndim == 3 )
self.assertTrue(result.past_key_values[layer][tensor_idx].ndim == 4 )
self.assertTrue(
torch.all(result.past_key_values[layer][tensor_idx] == standard_cache[layer][tensor_idx] ) )
def _lowercase (self : Optional[int] ):
UpperCAmelCase_ , UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase_ = 3
UpperCAmelCase_ = "multi_label_classification"
UpperCAmelCase_ = input_dict["input_ids"]
UpperCAmelCase_ = input_ids.ne(1 ).to(__a )
UpperCAmelCase_ = ids_tensor(
[self.model_tester.batch_size, config.num_labels] , self.model_tester.type_sequence_label_size ).to(torch.float )
UpperCAmelCase_ = FalconForSequenceClassification(__a )
model.to(__a )
model.eval()
UpperCAmelCase_ = model(__a , attention_mask=__a , labels=__a )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def _lowercase (self : Tuple ):
# Falcon can have different numbers of KV-heads than the number of query heads, so we need
# to override this test to use the right head counts.
for model_class in self.all_generative_model_classes:
UpperCAmelCase_ , UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
# If it doesn't support cache, pass the test
if not hasattr(__a , "use_cache" ):
return
UpperCAmelCase_ = model_class(__a ).to(__a )
if "use_cache" not in inputs:
UpperCAmelCase_ = True
UpperCAmelCase_ = model(**__a )
# If "past_key_values" is not returned, pass the test (e.g. RWKV uses a different cache name and format)
if "past_key_values" not in outputs:
return
UpperCAmelCase_ = (
getattr(__a , "decoder_layers" , __a )
or getattr(__a , "num_decoder_layers" , __a )
or config.num_hidden_layers
)
UpperCAmelCase_ = getattr(__a , "num_kv_heads" , config.num_attention_heads )
UpperCAmelCase_ = getattr(__a , "d_model" , config.hidden_size )
UpperCAmelCase_ = embed_dim // num_attention_heads
UpperCAmelCase_ = outputs["past_key_values"]
self.assertEqual(len(__a ) , __a )
UpperCAmelCase_ , UpperCAmelCase_ = inputs["input_ids"].shape
for i in range(__a ):
if config.new_decoder_architecture:
UpperCAmelCase_ = config.num_attention_heads
elif config.multi_query:
UpperCAmelCase_ = 1
self.assertEqual(len(past_kv[0] ) , 2 ) # K V for the decoder = 2
self.assertEqual(
past_kv[i][0].shape , (batch_size, num_attention_heads, seq_length, per_head_embed_dim) )
self.assertEqual(
past_kv[i][1].shape , (batch_size, num_attention_heads, seq_length, per_head_embed_dim) )
@require_torch
class __A ( unittest.TestCase ):
@slow
def _lowercase (self : Union[str, Any] ):
UpperCAmelCase_ = AutoTokenizer.from_pretrained("Rocketknight1/falcon-rw-1b" )
UpperCAmelCase_ = FalconForCausalLM.from_pretrained("Rocketknight1/falcon-rw-1b" )
model.eval()
model.to(__a )
UpperCAmelCase_ = tokenizer("My favorite food is" , return_tensors="pt" ).to(__a )
UpperCAmelCase_ = (
"My favorite food is pizza. I love it so much that I have a pizza party every year for my birthday."
)
UpperCAmelCase_ = model.generate(**__a , do_sample=__a , max_new_tokens=19 )
UpperCAmelCase_ = tokenizer.batch_decode(__a )[0]
self.assertEqual(__a , __a )
@slow
def _lowercase (self : Dict ):
# The big models are way too big for the CI, so we use tiny random models that resemble their
# architectures but with much smaller and fewer layers
for repo in ["Rocketknight1/tiny-random-falcon-7b", "Rocketknight1/tiny-random-falcon-40b"]:
UpperCAmelCase_ = AutoTokenizer.from_pretrained(__a )
UpperCAmelCase_ = FalconForCausalLM.from_pretrained(__a )
model.eval()
model.to(__a )
UpperCAmelCase_ = tokenizer("My favorite food is" , return_tensors="pt" ).to(__a )
# We just test that these run without errors - the models are randomly initialized
# and so the actual text outputs will be garbage
model.generate(**__a , do_sample=__a , max_new_tokens=4 )
model.generate(**__a , do_sample=__a , max_new_tokens=4 )
model.generate(**__a , num_beams=2 , max_new_tokens=4 )
@slow
def _lowercase (self : Union[str, Any] ):
# The big models are way too big for the CI, so we use tiny random models that resemble their
# architectures but with much smaller and fewer layers
with torch.no_grad():
for repo in [
"Rocketknight1/falcon-rw-1b",
"Rocketknight1/tiny-random-falcon-7b",
"Rocketknight1/tiny-random-falcon-40b",
]:
UpperCAmelCase_ = AutoTokenizer.from_pretrained(__a )
UpperCAmelCase_ = FalconForCausalLM.from_pretrained(__a )
model.eval()
model.to(device=__a )
UpperCAmelCase_ = tokenizer("My favorite food is" , return_tensors="pt" ).to(__a )
# Test results are the same with and without cache
UpperCAmelCase_ = model.generate(**__a , do_sample=__a , max_new_tokens=20 , use_cache=__a )
UpperCAmelCase_ = model.generate(**__a , do_sample=__a , max_new_tokens=20 , use_cache=__a )
self.assertTrue((outputs_cache - outputs_no_cache).sum().item() == 0 )
| 1 | '''simple docstring'''
import inspect
import unittest
import numpy as np
from transformers import ViTConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor
if is_flax_available():
import jax
from transformers.models.vit.modeling_flax_vit import FlaxViTForImageClassification, FlaxViTModel
class __A ( unittest.TestCase ):
def __init__(self : str , __a : Optional[Any] , __a : Optional[Any]=13 , __a : int=30 , __a : Union[str, Any]=2 , __a : Dict=3 , __a : List[Any]=True , __a : Optional[Any]=True , __a : List[Any]=32 , __a : Any=5 , __a : str=4 , __a : Optional[int]=37 , __a : Optional[int]="gelu" , __a : List[str]=0.1 , __a : Tuple=0.1 , __a : List[str]=10 , __a : Optional[int]=0.02 , ):
UpperCAmelCase_ = parent
UpperCAmelCase_ = batch_size
UpperCAmelCase_ = image_size
UpperCAmelCase_ = patch_size
UpperCAmelCase_ = num_channels
UpperCAmelCase_ = is_training
UpperCAmelCase_ = use_labels
UpperCAmelCase_ = hidden_size
UpperCAmelCase_ = num_hidden_layers
UpperCAmelCase_ = num_attention_heads
UpperCAmelCase_ = intermediate_size
UpperCAmelCase_ = hidden_act
UpperCAmelCase_ = hidden_dropout_prob
UpperCAmelCase_ = attention_probs_dropout_prob
UpperCAmelCase_ = type_sequence_label_size
UpperCAmelCase_ = initializer_range
# in ViT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
UpperCAmelCase_ = (image_size // patch_size) ** 2
UpperCAmelCase_ = num_patches + 1
def _lowercase (self : Any ):
UpperCAmelCase_ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCAmelCase_ = ViTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=__a , initializer_range=self.initializer_range , )
return config, pixel_values
def _lowercase (self : Dict , __a : Any , __a : List[Any] ):
UpperCAmelCase_ = FlaxViTModel(config=__a )
UpperCAmelCase_ = model(__a )
# expected sequence length = num_patches + 1 (we add 1 for the [CLS] token)
UpperCAmelCase_ = (self.image_size, self.image_size)
UpperCAmelCase_ = (self.patch_size, self.patch_size)
UpperCAmelCase_ = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, num_patches + 1, self.hidden_size) )
def _lowercase (self : Tuple , __a : str , __a : Any ):
UpperCAmelCase_ = self.type_sequence_label_size
UpperCAmelCase_ = FlaxViTForImageClassification(config=__a )
UpperCAmelCase_ = model(__a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
UpperCAmelCase_ = 1
UpperCAmelCase_ = FlaxViTForImageClassification(__a )
UpperCAmelCase_ = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
UpperCAmelCase_ = model(__a )
def _lowercase (self : Optional[Any] ):
UpperCAmelCase_ = self.prepare_config_and_inputs()
(
(
UpperCAmelCase_
) , (
UpperCAmelCase_
) ,
) = config_and_inputs
UpperCAmelCase_ = {"pixel_values": pixel_values}
return config, inputs_dict
@require_flax
class __A ( UpperCamelCase__ , unittest.TestCase ):
a__ : Tuple = (FlaxViTModel, FlaxViTForImageClassification) if is_flax_available() else ()
def _lowercase (self : Any ):
UpperCAmelCase_ = FlaxViTModelTester(self )
UpperCAmelCase_ = ConfigTester(self , config_class=__a , has_text_modality=__a , hidden_size=37 )
def _lowercase (self : Tuple ):
self.config_tester.run_common_tests()
def _lowercase (self : str ):
UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__a )
def _lowercase (self : str ):
UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__a )
def _lowercase (self : Tuple ):
UpperCAmelCase_ , UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase_ = model_class(__a )
UpperCAmelCase_ = inspect.signature(model.__call__ )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCAmelCase_ = [*signature.parameters.keys()]
UpperCAmelCase_ = ["pixel_values"]
self.assertListEqual(arg_names[:1] , __a )
def _lowercase (self : Optional[Any] ):
UpperCAmelCase_ , UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
UpperCAmelCase_ = self._prepare_for_class(__a , __a )
UpperCAmelCase_ = model_class(__a )
@jax.jit
def model_jitted(__a : Tuple , **__a : List[Any] ):
return model(pixel_values=__a , **__a )
with self.subTest("JIT Enabled" ):
UpperCAmelCase_ = model_jitted(**__a ).to_tuple()
with self.subTest("JIT Disabled" ):
with jax.disable_jit():
UpperCAmelCase_ = model_jitted(**__a ).to_tuple()
self.assertEqual(len(__a ) , len(__a ) )
for jitted_output, output in zip(__a , __a ):
self.assertEqual(jitted_output.shape , output.shape )
@slow
def _lowercase (self : Tuple ):
for model_class_name in self.all_model_classes:
UpperCAmelCase_ = model_class_name.from_pretrained("google/vit-base-patch16-224" )
UpperCAmelCase_ = model(np.ones((1, 3, 224, 224) ) )
self.assertIsNotNone(__a )
| 1 | 1 |
'''simple docstring'''
from typing import Dict
import numpy as np
from ..utils import add_end_docstrings, is_tf_available, is_torch_available, logging
from .base import PIPELINE_INIT_ARGS, GenericTensor, Pipeline, PipelineException
if is_tf_available():
import tensorflow as tf
from ..tf_utils import stable_softmax
if is_torch_available():
import torch
SCREAMING_SNAKE_CASE_: Optional[int] =logging.get_logger(__name__)
@add_end_docstrings(
UpperCamelCase__ , r"""
top_k (`int`, defaults to 5):
The number of predictions to return.
targets (`str` or `List[str]`, *optional*):
When passed, the model will limit the scores to the passed targets instead of looking up in the whole
vocab. If the provided targets are not in the model vocab, they will be tokenized and the first resulting
token will be used (with a warning, and that might be slower).
""" , )
class __A ( UpperCamelCase__ ):
def _lowercase (self : str , __a : GenericTensor ):
if self.framework == "tf":
UpperCAmelCase_ = tf.where(input_ids == self.tokenizer.mask_token_id ).numpy()
elif self.framework == "pt":
UpperCAmelCase_ = torch.nonzero(input_ids == self.tokenizer.mask_token_id , as_tuple=__a )
else:
raise ValueError("Unsupported framework" )
return masked_index
def _lowercase (self : Tuple , __a : GenericTensor ):
UpperCAmelCase_ = self.get_masked_index(__a )
UpperCAmelCase_ = np.prod(masked_index.shape )
if numel < 1:
raise PipelineException(
"fill-mask" , self.model.base_model_prefix , f"""No mask_token ({self.tokenizer.mask_token}) found on the input""" , )
def _lowercase (self : List[Any] , __a : GenericTensor ):
if isinstance(__a , __a ):
for model_input in model_inputs:
self._ensure_exactly_one_mask_token(model_input["input_ids"][0] )
else:
for input_ids in model_inputs["input_ids"]:
self._ensure_exactly_one_mask_token(__a )
def _lowercase (self : Tuple , __a : Dict , __a : List[str]=None , **__a : Any ):
if return_tensors is None:
UpperCAmelCase_ = self.framework
UpperCAmelCase_ = self.tokenizer(__a , return_tensors=__a )
self.ensure_exactly_one_mask_token(__a )
return model_inputs
def _lowercase (self : str , __a : Optional[int] ):
UpperCAmelCase_ = self.model(**__a )
UpperCAmelCase_ = model_inputs["input_ids"]
return model_outputs
def _lowercase (self : List[str] , __a : Tuple , __a : int=5 , __a : Dict=None ):
# Cap top_k if there are targets
if target_ids is not None and target_ids.shape[0] < top_k:
UpperCAmelCase_ = target_ids.shape[0]
UpperCAmelCase_ = model_outputs["input_ids"][0]
UpperCAmelCase_ = model_outputs["logits"]
if self.framework == "tf":
UpperCAmelCase_ = tf.where(input_ids == self.tokenizer.mask_token_id ).numpy()[:, 0]
UpperCAmelCase_ = outputs.numpy()
UpperCAmelCase_ = outputs[0, masked_index, :]
UpperCAmelCase_ = stable_softmax(__a , axis=-1 )
if target_ids is not None:
UpperCAmelCase_ = tf.gather_nd(tf.squeeze(__a , 0 ) , target_ids.reshape(-1 , 1 ) )
UpperCAmelCase_ = tf.expand_dims(__a , 0 )
UpperCAmelCase_ = tf.math.top_k(__a , k=__a )
UpperCAmelCase_ , UpperCAmelCase_ = topk.values.numpy(), topk.indices.numpy()
else:
UpperCAmelCase_ = torch.nonzero(input_ids == self.tokenizer.mask_token_id , as_tuple=__a ).squeeze(-1 )
# Fill mask pipeline supports only one ${mask_token} per sample
UpperCAmelCase_ = outputs[0, masked_index, :]
UpperCAmelCase_ = logits.softmax(dim=-1 )
if target_ids is not None:
UpperCAmelCase_ = probs[..., target_ids]
UpperCAmelCase_ , UpperCAmelCase_ = probs.topk(__a )
UpperCAmelCase_ = []
UpperCAmelCase_ = values.shape[0] == 1
for i, (_values, _predictions) in enumerate(zip(values.tolist() , predictions.tolist() ) ):
UpperCAmelCase_ = []
for v, p in zip(_values , _predictions ):
# Copy is important since we're going to modify this array in place
UpperCAmelCase_ = input_ids.numpy().copy()
if target_ids is not None:
UpperCAmelCase_ = target_ids[p].tolist()
UpperCAmelCase_ = p
# Filter padding out:
UpperCAmelCase_ = tokens[np.where(tokens != self.tokenizer.pad_token_id )]
# Originally we skip special tokens to give readable output.
# For multi masks though, the other [MASK] would be removed otherwise
# making the output look odd, so we add them back
UpperCAmelCase_ = self.tokenizer.decode(__a , skip_special_tokens=__a )
UpperCAmelCase_ = {"score": v, "token": p, "token_str": self.tokenizer.decode([p] ), "sequence": sequence}
row.append(__a )
result.append(__a )
if single_mask:
return result[0]
return result
def _lowercase (self : Dict , __a : List[Any] , __a : List[str]=None ):
if isinstance(__a , __a ):
UpperCAmelCase_ = [targets]
try:
UpperCAmelCase_ = self.tokenizer.get_vocab()
except Exception:
UpperCAmelCase_ = {}
UpperCAmelCase_ = []
for target in targets:
UpperCAmelCase_ = vocab.get(__a , __a )
if id_ is None:
UpperCAmelCase_ = self.tokenizer(
__a , add_special_tokens=__a , return_attention_mask=__a , return_token_type_ids=__a , max_length=1 , truncation=__a , )["input_ids"]
if len(__a ) == 0:
logger.warning(
f"""The specified target token `{target}` does not exist in the model vocabulary. """
"We cannot replace it with anything meaningful, ignoring it" )
continue
UpperCAmelCase_ = input_ids[0]
# XXX: If users encounter this pass
# it becomes pretty slow, so let's make sure
# The warning enables them to fix the input to
# get faster performance.
logger.warning(
f"""The specified target token `{target}` does not exist in the model vocabulary. """
f"""Replacing with `{self.tokenizer.convert_ids_to_tokens(id_ )}`.""" )
target_ids.append(id_ )
UpperCAmelCase_ = list(set(__a ) )
if len(__a ) == 0:
raise ValueError("At least one target must be provided when passed." )
UpperCAmelCase_ = np.array(__a )
return target_ids
def _lowercase (self : Tuple , __a : Dict=None , __a : List[str]=None ):
UpperCAmelCase_ = {}
if targets is not None:
UpperCAmelCase_ = self.get_target_ids(__a , __a )
UpperCAmelCase_ = target_ids
if top_k is not None:
UpperCAmelCase_ = top_k
if self.tokenizer.mask_token_id is None:
raise PipelineException(
"fill-mask" , self.model.base_model_prefix , "The tokenizer does not define a `mask_token`." )
return {}, {}, postprocess_params
def __call__(self : Union[str, Any] , __a : str , *__a : Any , **__a : Tuple ):
UpperCAmelCase_ = super().__call__(__a , **__a )
if isinstance(__a , __a ) and len(__a ) == 1:
return outputs[0]
return outputs
| 1 | '''simple docstring'''
import os
import shutil
import tempfile
from unittest import TestCase
from unittest.mock import patch
import numpy as np
from datasets import Dataset
from transformers.models.realm.configuration_realm import RealmConfig
from transformers.models.realm.retrieval_realm import _REALM_BLOCK_RECORDS_FILENAME, RealmRetriever
from transformers.models.realm.tokenization_realm import VOCAB_FILES_NAMES, RealmTokenizer
class __A ( UpperCamelCase__ ):
def _lowercase (self : Optional[Any] ):
UpperCAmelCase_ = tempfile.mkdtemp()
UpperCAmelCase_ = 5
# Realm tok
UpperCAmelCase_ = [
"[UNK]",
"[CLS]",
"[SEP]",
"[PAD]",
"[MASK]",
"test",
"question",
"this",
"is",
"the",
"first",
"second",
"third",
"fourth",
"fifth",
"record",
"want",
"##want",
"##ed",
"wa",
"un",
"runn",
"##ing",
",",
"low",
"lowest",
]
UpperCAmelCase_ = os.path.join(self.tmpdirname , "realm_tokenizer" )
os.makedirs(__a , exist_ok=__a )
UpperCAmelCase_ = os.path.join(__a , VOCAB_FILES_NAMES["vocab_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in vocab_tokens] ) )
UpperCAmelCase_ = os.path.join(self.tmpdirname , "realm_block_records" )
os.makedirs(__a , exist_ok=__a )
def _lowercase (self : Optional[Any] ):
return RealmTokenizer.from_pretrained(os.path.join(self.tmpdirname , "realm_tokenizer" ) )
def _lowercase (self : Any ):
shutil.rmtree(self.tmpdirname )
def _lowercase (self : List[Any] ):
UpperCAmelCase_ = RealmConfig(num_block_records=self.num_block_records )
return config
def _lowercase (self : List[str] ):
UpperCAmelCase_ = Dataset.from_dict(
{
"id": ["0", "1"],
"question": ["foo", "bar"],
"answers": [["Foo", "Bar"], ["Bar"]],
} )
return dataset
def _lowercase (self : Any ):
UpperCAmelCase_ = np.array(
[
B"This is the first record",
B"This is the second record",
B"This is the third record",
B"This is the fourth record",
B"This is the fifth record",
B"This is a longer longer longer record",
] , dtype=__a , )
return block_records
def _lowercase (self : Union[str, Any] ):
UpperCAmelCase_ = RealmRetriever(
block_records=self.get_dummy_block_records() , tokenizer=self.get_tokenizer() , )
return retriever
def _lowercase (self : int ):
UpperCAmelCase_ = self.get_config()
UpperCAmelCase_ = self.get_dummy_retriever()
UpperCAmelCase_ = retriever.tokenizer
UpperCAmelCase_ = np.array([0, 3] , dtype="long" )
UpperCAmelCase_ = tokenizer(["Test question"] ).input_ids
UpperCAmelCase_ = tokenizer(
["the fourth"] , add_special_tokens=__a , return_token_type_ids=__a , return_attention_mask=__a , ).input_ids
UpperCAmelCase_ = config.reader_seq_len
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = retriever(
__a , __a , answer_ids=__a , max_length=__a , return_tensors="np" )
self.assertEqual(len(__a ) , 2 )
self.assertEqual(len(__a ) , 2 )
self.assertEqual(len(__a ) , 2 )
self.assertEqual(concat_inputs.input_ids.shape , (2, 10) )
self.assertEqual(concat_inputs.attention_mask.shape , (2, 10) )
self.assertEqual(concat_inputs.token_type_ids.shape , (2, 10) )
self.assertEqual(concat_inputs.special_tokens_mask.shape , (2, 10) )
self.assertEqual(
tokenizer.convert_ids_to_tokens(concat_inputs.input_ids[0] ) , ["[CLS]", "test", "question", "[SEP]", "this", "is", "the", "first", "record", "[SEP]"] , )
self.assertEqual(
tokenizer.convert_ids_to_tokens(concat_inputs.input_ids[1] ) , ["[CLS]", "test", "question", "[SEP]", "this", "is", "the", "fourth", "record", "[SEP]"] , )
def _lowercase (self : List[Any] ):
UpperCAmelCase_ = self.get_config()
UpperCAmelCase_ = self.get_dummy_retriever()
UpperCAmelCase_ = retriever.tokenizer
UpperCAmelCase_ = np.array([0, 3, 5] , dtype="long" )
UpperCAmelCase_ = tokenizer(["Test question"] ).input_ids
UpperCAmelCase_ = tokenizer(
["the fourth", "longer longer"] , add_special_tokens=__a , return_token_type_ids=__a , return_attention_mask=__a , ).input_ids
UpperCAmelCase_ = config.reader_seq_len
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = retriever(
__a , __a , answer_ids=__a , max_length=__a , return_tensors="np" )
self.assertEqual([False, True, True] , __a )
self.assertEqual([[-1, -1, -1], [6, -1, -1], [6, 7, 8]] , __a )
self.assertEqual([[-1, -1, -1], [7, -1, -1], [7, 8, 9]] , __a )
def _lowercase (self : Optional[Any] ):
UpperCAmelCase_ = self.get_dummy_retriever()
retriever.save_pretrained(os.path.join(self.tmpdirname , "realm_block_records" ) )
# Test local path
UpperCAmelCase_ = retriever.from_pretrained(os.path.join(self.tmpdirname , "realm_block_records" ) )
self.assertEqual(retriever.block_records[0] , B"This is the first record" )
# Test mocked remote path
with patch("transformers.models.realm.retrieval_realm.hf_hub_download" ) as mock_hf_hub_download:
UpperCAmelCase_ = os.path.join(
os.path.join(self.tmpdirname , "realm_block_records" ) , _REALM_BLOCK_RECORDS_FILENAME )
UpperCAmelCase_ = RealmRetriever.from_pretrained("google/realm-cc-news-pretrained-openqa" )
self.assertEqual(retriever.block_records[0] , B"This is the first record" )
| 1 | 1 |
'''simple docstring'''
import inspect
from typing import List, Optional, Tuple, Union
import numpy as np
import PIL
import torch
import torch.utils.checkpoint
from ...models import UNetaDModel, VQModel
from ...schedulers import (
DDIMScheduler,
DPMSolverMultistepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
)
from ...utils import PIL_INTERPOLATION, randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
def lowerCAmelCase_ ( snake_case_ : str ) -> Optional[Any]:
'''simple docstring'''
UpperCAmelCase_ , UpperCAmelCase_ = image.size
UpperCAmelCase_ , UpperCAmelCase_ = (x - x % 32 for x in (w, h)) # resize to integer multiple of 32
UpperCAmelCase_ = image.resize((w, h) , resample=PIL_INTERPOLATION["lanczos"] )
UpperCAmelCase_ = np.array(snake_case_ ).astype(np.floataa ) / 255.0
UpperCAmelCase_ = image[None].transpose(0 , 3 , 1 , 2 )
UpperCAmelCase_ = torch.from_numpy(snake_case_ )
return 2.0 * image - 1.0
class __A ( UpperCamelCase__ ):
def __init__(self : Any , __a : VQModel , __a : UNetaDModel , __a : Union[
DDIMScheduler,
PNDMScheduler,
LMSDiscreteScheduler,
EulerDiscreteScheduler,
EulerAncestralDiscreteScheduler,
DPMSolverMultistepScheduler,
] , ):
super().__init__()
self.register_modules(vqvae=__a , unet=__a , scheduler=__a )
@torch.no_grad()
def __call__(self : List[Any] , __a : Union[torch.Tensor, PIL.Image.Image] = None , __a : Optional[int] = 1 , __a : Optional[int] = 100 , __a : Optional[float] = 0.0 , __a : Optional[Union[torch.Generator, List[torch.Generator]]] = None , __a : Optional[str] = "pil" , __a : bool = True , ):
if isinstance(__a , PIL.Image.Image ):
UpperCAmelCase_ = 1
elif isinstance(__a , torch.Tensor ):
UpperCAmelCase_ = image.shape[0]
else:
raise ValueError(f"""`image` has to be of type `PIL.Image.Image` or `torch.Tensor` but is {type(__a )}""" )
if isinstance(__a , PIL.Image.Image ):
UpperCAmelCase_ = preprocess(__a )
UpperCAmelCase_ , UpperCAmelCase_ = image.shape[-2:]
# in_channels should be 6: 3 for latents, 3 for low resolution image
UpperCAmelCase_ = (batch_size, self.unet.config.in_channels // 2, height, width)
UpperCAmelCase_ = next(self.unet.parameters() ).dtype
UpperCAmelCase_ = randn_tensor(__a , generator=__a , device=self.device , dtype=__a )
UpperCAmelCase_ = image.to(device=self.device , dtype=__a )
# set timesteps and move to the correct device
self.scheduler.set_timesteps(__a , device=self.device )
UpperCAmelCase_ = self.scheduler.timesteps
# scale the initial noise by the standard deviation required by the scheduler
UpperCAmelCase_ = latents * self.scheduler.init_noise_sigma
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature.
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
# and should be between [0, 1]
UpperCAmelCase_ = "eta" in set(inspect.signature(self.scheduler.step ).parameters.keys() )
UpperCAmelCase_ = {}
if accepts_eta:
UpperCAmelCase_ = eta
for t in self.progress_bar(__a ):
# concat latents and low resolution image in the channel dimension.
UpperCAmelCase_ = torch.cat([latents, image] , dim=1 )
UpperCAmelCase_ = self.scheduler.scale_model_input(__a , __a )
# predict the noise residual
UpperCAmelCase_ = self.unet(__a , __a ).sample
# compute the previous noisy sample x_t -> x_t-1
UpperCAmelCase_ = self.scheduler.step(__a , __a , __a , **__a ).prev_sample
# decode the image latents with the VQVAE
UpperCAmelCase_ = self.vqvae.decode(__a ).sample
UpperCAmelCase_ = torch.clamp(__a , -1.0 , 1.0 )
UpperCAmelCase_ = image / 2 + 0.5
UpperCAmelCase_ = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
UpperCAmelCase_ = self.numpy_to_pil(__a )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=__a )
| 1 | '''simple docstring'''
from math import log
from scipy.constants import Boltzmann, physical_constants
SCREAMING_SNAKE_CASE_: Optional[int] =3_00 # TEMPERATURE (unit = K)
def lowerCAmelCase_ ( snake_case_ : float , snake_case_ : float , snake_case_ : float , ) -> float:
'''simple docstring'''
if donor_conc <= 0:
raise ValueError("Donor concentration should be positive" )
elif acceptor_conc <= 0:
raise ValueError("Acceptor concentration should be positive" )
elif intrinsic_conc <= 0:
raise ValueError("Intrinsic concentration should be positive" )
elif donor_conc <= intrinsic_conc:
raise ValueError(
"Donor concentration should be greater than intrinsic concentration" )
elif acceptor_conc <= intrinsic_conc:
raise ValueError(
"Acceptor concentration should be greater than intrinsic concentration" )
else:
return (
Boltzmann
* T
* log((donor_conc * acceptor_conc) / intrinsic_conc**2 )
/ physical_constants["electron volt"][0]
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 1 | 1 |
'''simple docstring'''
from typing import Any, Dict, Optional
import torch
import torch.nn.functional as F
from torch import nn
from ..utils import maybe_allow_in_graph
from .activations import get_activation
from .attention_processor import Attention
from .embeddings import CombinedTimestepLabelEmbeddings
@maybe_allow_in_graph
class __A ( nn.Module ):
def __init__(self : int , __a : int , __a : int , __a : int , __a : Dict=0.0 , __a : Optional[int] = None , __a : str = "geglu" , __a : Optional[int] = None , __a : bool = False , __a : bool = False , __a : bool = False , __a : bool = False , __a : bool = True , __a : str = "layer_norm" , __a : bool = False , ):
super().__init__()
UpperCAmelCase_ = only_cross_attention
UpperCAmelCase_ = (num_embeds_ada_norm is not None) and norm_type == "ada_norm_zero"
UpperCAmelCase_ = (num_embeds_ada_norm is not None) and norm_type == "ada_norm"
if norm_type in ("ada_norm", "ada_norm_zero") and num_embeds_ada_norm is None:
raise ValueError(
f"""`norm_type` is set to {norm_type}, but `num_embeds_ada_norm` is not defined. Please make sure to"""
f""" define `num_embeds_ada_norm` if setting `norm_type` to {norm_type}.""" )
# Define 3 blocks. Each block has its own normalization layer.
# 1. Self-Attn
if self.use_ada_layer_norm:
UpperCAmelCase_ = AdaLayerNorm(__a , __a )
elif self.use_ada_layer_norm_zero:
UpperCAmelCase_ = AdaLayerNormZero(__a , __a )
else:
UpperCAmelCase_ = nn.LayerNorm(__a , elementwise_affine=__a )
UpperCAmelCase_ = Attention(
query_dim=__a , heads=__a , dim_head=__a , dropout=__a , bias=__a , cross_attention_dim=cross_attention_dim if only_cross_attention else None , upcast_attention=__a , )
# 2. Cross-Attn
if cross_attention_dim is not None or double_self_attention:
# We currently only use AdaLayerNormZero for self attention where there will only be one attention block.
# I.e. the number of returned modulation chunks from AdaLayerZero would not make sense if returned during
# the second cross attention block.
UpperCAmelCase_ = (
AdaLayerNorm(__a , __a )
if self.use_ada_layer_norm
else nn.LayerNorm(__a , elementwise_affine=__a )
)
UpperCAmelCase_ = Attention(
query_dim=__a , cross_attention_dim=cross_attention_dim if not double_self_attention else None , heads=__a , dim_head=__a , dropout=__a , bias=__a , upcast_attention=__a , ) # is self-attn if encoder_hidden_states is none
else:
UpperCAmelCase_ = None
UpperCAmelCase_ = None
# 3. Feed-forward
UpperCAmelCase_ = nn.LayerNorm(__a , elementwise_affine=__a )
UpperCAmelCase_ = FeedForward(__a , dropout=__a , activation_fn=__a , final_dropout=__a )
# let chunk size default to None
UpperCAmelCase_ = None
UpperCAmelCase_ = 0
def _lowercase (self : str , __a : Optional[int] , __a : int ):
# Sets chunk feed-forward
UpperCAmelCase_ = chunk_size
UpperCAmelCase_ = dim
def _lowercase (self : Tuple , __a : torch.FloatTensor , __a : Optional[torch.FloatTensor] = None , __a : Optional[torch.FloatTensor] = None , __a : Optional[torch.FloatTensor] = None , __a : Optional[torch.LongTensor] = None , __a : Dict[str, Any] = None , __a : Optional[torch.LongTensor] = None , ):
# Notice that normalization is always applied before the real computation in the following blocks.
# 1. Self-Attention
if self.use_ada_layer_norm:
UpperCAmelCase_ = self.norma(__a , __a )
elif self.use_ada_layer_norm_zero:
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = self.norma(
__a , __a , __a , hidden_dtype=hidden_states.dtype )
else:
UpperCAmelCase_ = self.norma(__a )
UpperCAmelCase_ = cross_attention_kwargs if cross_attention_kwargs is not None else {}
UpperCAmelCase_ = self.attna(
__a , encoder_hidden_states=encoder_hidden_states if self.only_cross_attention else None , attention_mask=__a , **__a , )
if self.use_ada_layer_norm_zero:
UpperCAmelCase_ = gate_msa.unsqueeze(1 ) * attn_output
UpperCAmelCase_ = attn_output + hidden_states
# 2. Cross-Attention
if self.attna is not None:
UpperCAmelCase_ = (
self.norma(__a , __a ) if self.use_ada_layer_norm else self.norma(__a )
)
UpperCAmelCase_ = self.attna(
__a , encoder_hidden_states=__a , attention_mask=__a , **__a , )
UpperCAmelCase_ = attn_output + hidden_states
# 3. Feed-forward
UpperCAmelCase_ = self.norma(__a )
if self.use_ada_layer_norm_zero:
UpperCAmelCase_ = norm_hidden_states * (1 + scale_mlp[:, None]) + shift_mlp[:, None]
if self._chunk_size is not None:
# "feed_forward_chunk_size" can be used to save memory
if norm_hidden_states.shape[self._chunk_dim] % self._chunk_size != 0:
raise ValueError(
f"""`hidden_states` dimension to be chunked: {norm_hidden_states.shape[self._chunk_dim]} has to be divisible by chunk size: {self._chunk_size}. Make sure to set an appropriate `chunk_size` when calling `unet.enable_forward_chunking`.""" )
UpperCAmelCase_ = norm_hidden_states.shape[self._chunk_dim] // self._chunk_size
UpperCAmelCase_ = torch.cat(
[self.ff(__a ) for hid_slice in norm_hidden_states.chunk(__a , dim=self._chunk_dim )] , dim=self._chunk_dim , )
else:
UpperCAmelCase_ = self.ff(__a )
if self.use_ada_layer_norm_zero:
UpperCAmelCase_ = gate_mlp.unsqueeze(1 ) * ff_output
UpperCAmelCase_ = ff_output + hidden_states
return hidden_states
class __A ( nn.Module ):
def __init__(self : int , __a : int , __a : Optional[int] = None , __a : int = 4 , __a : float = 0.0 , __a : str = "geglu" , __a : bool = False , ):
super().__init__()
UpperCAmelCase_ = int(dim * mult )
UpperCAmelCase_ = dim_out if dim_out is not None else dim
if activation_fn == "gelu":
UpperCAmelCase_ = GELU(__a , __a )
if activation_fn == "gelu-approximate":
UpperCAmelCase_ = GELU(__a , __a , approximate="tanh" )
elif activation_fn == "geglu":
UpperCAmelCase_ = GEGLU(__a , __a )
elif activation_fn == "geglu-approximate":
UpperCAmelCase_ = ApproximateGELU(__a , __a )
UpperCAmelCase_ = nn.ModuleList([] )
# project in
self.net.append(__a )
# project dropout
self.net.append(nn.Dropout(__a ) )
# project out
self.net.append(nn.Linear(__a , __a ) )
# FF as used in Vision Transformer, MLP-Mixer, etc. have a final dropout
if final_dropout:
self.net.append(nn.Dropout(__a ) )
def _lowercase (self : Optional[int] , __a : Optional[int] ):
for module in self.net:
UpperCAmelCase_ = module(__a )
return hidden_states
class __A ( nn.Module ):
def __init__(self : Optional[int] , __a : int , __a : int , __a : str = "none" ):
super().__init__()
UpperCAmelCase_ = nn.Linear(__a , __a )
UpperCAmelCase_ = approximate
def _lowercase (self : Optional[int] , __a : Dict ):
if gate.device.type != "mps":
return F.gelu(__a , approximate=self.approximate )
# mps: gelu is not implemented for float16
return F.gelu(gate.to(dtype=torch.floataa ) , approximate=self.approximate ).to(dtype=gate.dtype )
def _lowercase (self : List[str] , __a : Union[str, Any] ):
UpperCAmelCase_ = self.proj(__a )
UpperCAmelCase_ = self.gelu(__a )
return hidden_states
class __A ( nn.Module ):
def __init__(self : List[str] , __a : int , __a : int ):
super().__init__()
UpperCAmelCase_ = nn.Linear(__a , dim_out * 2 )
def _lowercase (self : Union[str, Any] , __a : List[Any] ):
if gate.device.type != "mps":
return F.gelu(__a )
# mps: gelu is not implemented for float16
return F.gelu(gate.to(dtype=torch.floataa ) ).to(dtype=gate.dtype )
def _lowercase (self : List[str] , __a : Union[str, Any] ):
UpperCAmelCase_ , UpperCAmelCase_ = self.proj(__a ).chunk(2 , dim=-1 )
return hidden_states * self.gelu(__a )
class __A ( nn.Module ):
def __init__(self : Optional[int] , __a : int , __a : int ):
super().__init__()
UpperCAmelCase_ = nn.Linear(__a , __a )
def _lowercase (self : Union[str, Any] , __a : Optional[int] ):
UpperCAmelCase_ = self.proj(__a )
return x * torch.sigmoid(1.7_02 * x )
class __A ( nn.Module ):
def __init__(self : Any , __a : str , __a : Optional[Any] ):
super().__init__()
UpperCAmelCase_ = nn.Embedding(__a , __a )
UpperCAmelCase_ = nn.SiLU()
UpperCAmelCase_ = nn.Linear(__a , embedding_dim * 2 )
UpperCAmelCase_ = nn.LayerNorm(__a , elementwise_affine=__a )
def _lowercase (self : Tuple , __a : int , __a : List[Any] ):
UpperCAmelCase_ = self.linear(self.silu(self.emb(__a ) ) )
UpperCAmelCase_ , UpperCAmelCase_ = torch.chunk(__a , 2 )
UpperCAmelCase_ = self.norm(__a ) * (1 + scale) + shift
return x
class __A ( nn.Module ):
def __init__(self : List[str] , __a : Optional[Any] , __a : List[Any] ):
super().__init__()
UpperCAmelCase_ = CombinedTimestepLabelEmbeddings(__a , __a )
UpperCAmelCase_ = nn.SiLU()
UpperCAmelCase_ = nn.Linear(__a , 6 * embedding_dim , bias=__a )
UpperCAmelCase_ = nn.LayerNorm(__a , elementwise_affine=__a , eps=1E-6 )
def _lowercase (self : str , __a : Optional[int] , __a : Any , __a : Any , __a : str=None ):
UpperCAmelCase_ = self.linear(self.silu(self.emb(__a , __a , hidden_dtype=__a ) ) )
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = emb.chunk(6 , dim=1 )
UpperCAmelCase_ = self.norm(__a ) * (1 + scale_msa[:, None]) + shift_msa[:, None]
return x, gate_msa, shift_mlp, scale_mlp, gate_mlp
class __A ( nn.Module ):
def __init__(self : Optional[Any] , __a : int , __a : int , __a : int , __a : Optional[str] = None , __a : float = 1E-5 ):
super().__init__()
UpperCAmelCase_ = num_groups
UpperCAmelCase_ = eps
if act_fn is None:
UpperCAmelCase_ = None
else:
UpperCAmelCase_ = get_activation(__a )
UpperCAmelCase_ = nn.Linear(__a , out_dim * 2 )
def _lowercase (self : Optional[int] , __a : List[str] , __a : str ):
if self.act:
UpperCAmelCase_ = self.act(__a )
UpperCAmelCase_ = self.linear(__a )
UpperCAmelCase_ = emb[:, :, None, None]
UpperCAmelCase_ , UpperCAmelCase_ = emb.chunk(2 , dim=1 )
UpperCAmelCase_ = F.group_norm(__a , self.num_groups , eps=self.eps )
UpperCAmelCase_ = x * (1 + scale) + shift
return x
| 1 | '''simple docstring'''
import math
def lowerCAmelCase_ ( ) -> None:
'''simple docstring'''
UpperCAmelCase_ = input("Enter message: " )
UpperCAmelCase_ = int(input(f"""Enter key [2-{len(snake_case_ ) - 1}]: """ ) )
UpperCAmelCase_ = input("Encryption/Decryption [e/d]: " )
if mode.lower().startswith("e" ):
UpperCAmelCase_ = encrypt_message(snake_case_ , snake_case_ )
elif mode.lower().startswith("d" ):
UpperCAmelCase_ = decrypt_message(snake_case_ , snake_case_ )
# Append pipe symbol (vertical bar) to identify spaces at the end.
print(f"""Output:\n{text + "|"}""" )
def lowerCAmelCase_ ( snake_case_ : int , snake_case_ : str ) -> str:
'''simple docstring'''
UpperCAmelCase_ = [""] * key
for col in range(snake_case_ ):
UpperCAmelCase_ = col
while pointer < len(snake_case_ ):
cipher_text[col] += message[pointer]
pointer += key
return "".join(snake_case_ )
def lowerCAmelCase_ ( snake_case_ : int , snake_case_ : str ) -> str:
'''simple docstring'''
UpperCAmelCase_ = math.ceil(len(snake_case_ ) / key )
UpperCAmelCase_ = key
UpperCAmelCase_ = (num_cols * num_rows) - len(snake_case_ )
UpperCAmelCase_ = [""] * num_cols
UpperCAmelCase_ = 0
UpperCAmelCase_ = 0
for symbol in message:
plain_text[col] += symbol
col += 1
if (
(col == num_cols)
or (col == num_cols - 1)
and (row >= num_rows - num_shaded_boxes)
):
UpperCAmelCase_ = 0
row += 1
return "".join(snake_case_ )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 1 | 1 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
SCREAMING_SNAKE_CASE_: List[str] =logging.get_logger(__name__)
SCREAMING_SNAKE_CASE_: Dict ={
'facebook/nllb-moe-54B': 'https://huggingface.co/facebook/nllb-moe-54b/resolve/main/config.json',
}
class __A ( UpperCamelCase__ ):
a__ : Union[str, Any] = """nllb-moe"""
a__ : Optional[Any] = ["""past_key_values"""]
a__ : Dict = {"""num_attention_heads""": """encoder_attention_heads""", """hidden_size""": """d_model"""}
def __init__(self : Optional[Any] , __a : Tuple=128112 , __a : Optional[int]=1024 , __a : List[str]=12 , __a : int=4096 , __a : str=16 , __a : Any=12 , __a : List[str]=4096 , __a : str=16 , __a : Any=0.05 , __a : str=0.05 , __a : Dict=True , __a : Dict=True , __a : Optional[Any]="relu" , __a : List[str]=1024 , __a : Any=0.1 , __a : Union[str, Any]=0.1 , __a : List[str]=0.0 , __a : int=0.02 , __a : Optional[int]=2 , __a : List[Any]=True , __a : Any=False , __a : List[str]="float32" , __a : List[str]=False , __a : Optional[int]=128 , __a : Any=64 , __a : Any=4 , __a : List[str]=4 , __a : Union[str, Any]=0.0_01 , __a : Optional[Any]=0.0_01 , __a : List[Any]="all" , __a : Tuple=False , __a : Any=False , __a : Dict=1.0 , __a : int=0.2 , __a : Dict=1 , __a : Dict=0 , __a : Optional[int]=2 , __a : Any=False , **__a : Optional[int] , ):
UpperCAmelCase_ = vocab_size
UpperCAmelCase_ = max_position_embeddings
UpperCAmelCase_ = d_model
UpperCAmelCase_ = encoder_ffn_dim
UpperCAmelCase_ = encoder_layers
UpperCAmelCase_ = encoder_attention_heads
UpperCAmelCase_ = decoder_ffn_dim
UpperCAmelCase_ = decoder_layers
UpperCAmelCase_ = decoder_attention_heads
UpperCAmelCase_ = dropout
UpperCAmelCase_ = attention_dropout
UpperCAmelCase_ = activation_dropout
UpperCAmelCase_ = activation_function
UpperCAmelCase_ = init_std
UpperCAmelCase_ = encoder_layerdrop
UpperCAmelCase_ = decoder_layerdrop
UpperCAmelCase_ = use_cache
UpperCAmelCase_ = encoder_layers
UpperCAmelCase_ = scale_embedding # scale factor will be sqrt(d_model) if True
UpperCAmelCase_ = router_z_loss_coef
UpperCAmelCase_ = router_aux_loss_coef
UpperCAmelCase_ = decoder_sparse_step
UpperCAmelCase_ = encoder_sparse_step
UpperCAmelCase_ = num_experts
UpperCAmelCase_ = expert_capacity
UpperCAmelCase_ = router_bias
if router_dtype not in ["float32", "float16", "bfloat16"]:
raise ValueError(f"""`router_dtype` must be one of 'float32', 'float16' or 'bfloat16', got {router_dtype}""" )
UpperCAmelCase_ = router_dtype
UpperCAmelCase_ = router_ignore_padding_tokens
UpperCAmelCase_ = batch_prioritized_routing
UpperCAmelCase_ = second_expert_policy
UpperCAmelCase_ = normalize_router_prob_before_dropping
UpperCAmelCase_ = moe_eval_capacity_token_fraction
UpperCAmelCase_ = moe_token_dropout
UpperCAmelCase_ = output_router_logits
super().__init__(
pad_token_id=__a , bos_token_id=__a , eos_token_id=__a , is_encoder_decoder=__a , decoder_start_token_id=__a , **__a , )
| 1 | '''simple docstring'''
import json
import logging
import os
import sys
from pathlib import Path
import finetune_rag
from transformers.file_utils import is_apex_available
from transformers.testing_utils import (
TestCasePlus,
execute_subprocess_async,
require_ray,
require_torch_gpu,
require_torch_multi_gpu,
)
logging.basicConfig(level=logging.DEBUG)
SCREAMING_SNAKE_CASE_: Optional[int] =logging.getLogger()
SCREAMING_SNAKE_CASE_: Union[str, Any] =logging.StreamHandler(sys.stdout)
logger.addHandler(stream_handler)
class __A ( UpperCamelCase__ ):
def _lowercase (self : Optional[Any] , __a : str ):
os.makedirs(__a , exist_ok=__a )
UpperCAmelCase_ = {"source": "What is love ?", "target": "life"}
UpperCAmelCase_ = {"train": 12, "val": 2, "test": 2}
for split in ["train", "test", "val"]:
for field in ["source", "target"]:
UpperCAmelCase_ = "\n".join([contents[field]] * n_lines[split] )
with open(os.path.join(__a , f"""{split}.{field}""" ) , "w" ) as f:
f.write(__a )
def _lowercase (self : Optional[int] , __a : int , __a : str = "pytorch" ):
UpperCAmelCase_ = self.get_auto_remove_tmp_dir()
UpperCAmelCase_ = os.path.join(__a , "output" )
UpperCAmelCase_ = os.path.join(__a , "data" )
self._create_dummy_data(data_dir=__a )
UpperCAmelCase_ = f"""
--data_dir {data_dir} \
--output_dir {output_dir} \
--model_name_or_path facebook/rag-sequence-base \
--model_type rag_sequence \
--do_train \
--do_predict \
--n_val -1 \
--val_check_interval 1.0 \
--train_batch_size 2 \
--eval_batch_size 1 \
--max_source_length 25 \
--max_target_length 25 \
--val_max_target_length 25 \
--test_max_target_length 25 \
--label_smoothing 0.1 \
--dropout 0.1 \
--attention_dropout 0.1 \
--weight_decay 0.001 \
--adam_epsilon 1e-08 \
--max_grad_norm 0.1 \
--lr_scheduler polynomial \
--learning_rate 3e-04 \
--num_train_epochs 1 \
--warmup_steps 4 \
--gradient_accumulation_steps 1 \
--distributed-port 8787 \
--use_dummy_dataset 1 \
--distributed_retriever {distributed_retriever} \
""".split()
if gpus > 0:
testargs.append(f"""--gpus={gpus}""" )
if is_apex_available():
testargs.append("--fp16" )
else:
testargs.append("--gpus=0" )
testargs.append("--distributed_backend=ddp_cpu" )
testargs.append("--num_processes=2" )
UpperCAmelCase_ = [sys.executable, str(Path(finetune_rag.__file__ ).resolve() )] + testargs
execute_subprocess_async(__a , env=self.get_env() )
UpperCAmelCase_ = os.path.join(__a , "metrics.json" )
with open(__a ) as f:
UpperCAmelCase_ = json.load(__a )
return result
@require_torch_gpu
def _lowercase (self : Optional[int] ):
UpperCAmelCase_ = self._run_finetune(gpus=1 )
self.assertGreaterEqual(result["test"][0]["test_avg_em"] , 0.2 )
@require_torch_multi_gpu
def _lowercase (self : Dict ):
UpperCAmelCase_ = self._run_finetune(gpus=2 )
self.assertGreaterEqual(result["test"][0]["test_avg_em"] , 0.2 )
@require_torch_gpu
@require_ray
def _lowercase (self : Optional[int] ):
UpperCAmelCase_ = self._run_finetune(gpus=1 , distributed_retriever="ray" )
self.assertGreaterEqual(result["test"][0]["test_avg_em"] , 0.2 )
@require_torch_multi_gpu
@require_ray
def _lowercase (self : Any ):
UpperCAmelCase_ = self._run_finetune(gpus=1 , distributed_retriever="ray" )
self.assertGreaterEqual(result["test"][0]["test_avg_em"] , 0.2 )
| 1 | 1 |
'''simple docstring'''
from typing import Callable, Dict, Optional, Tuple
import torch
from torch import nn
from torch.distributions import (
AffineTransform,
Distribution,
Independent,
NegativeBinomial,
Normal,
StudentT,
TransformedDistribution,
)
class __A ( UpperCamelCase__ ):
def __init__(self : int , __a : Distribution , __a : Dict=None , __a : int=None , __a : Any=0 ):
UpperCAmelCase_ = 1.0 if scale is None else scale
UpperCAmelCase_ = 0.0 if loc is None else loc
super().__init__(__a , [AffineTransform(loc=self.loc , scale=self.scale , event_dim=__a )] )
@property
def _lowercase (self : Union[str, Any] ):
return self.base_dist.mean * self.scale + self.loc
@property
def _lowercase (self : List[Any] ):
return self.base_dist.variance * self.scale**2
@property
def _lowercase (self : List[Any] ):
return self.variance.sqrt()
class __A ( nn.Module ):
def __init__(self : Optional[int] , __a : int , __a : Dict[str, int] , __a : Callable[..., Tuple[torch.Tensor]] , **__a : List[str] ):
super().__init__(**__a )
UpperCAmelCase_ = args_dim
UpperCAmelCase_ = nn.ModuleList([nn.Linear(__a , __a ) for dim in args_dim.values()] )
UpperCAmelCase_ = domain_map
def _lowercase (self : List[str] , __a : torch.Tensor ):
UpperCAmelCase_ = [proj(__a ) for proj in self.proj]
return self.domain_map(*__a )
class __A ( nn.Module ):
def __init__(self : Union[str, Any] , __a : List[str] ):
super().__init__()
UpperCAmelCase_ = function
def _lowercase (self : Optional[int] , __a : List[str] , *__a : Optional[int] ):
return self.function(__a , *__a )
class __A :
a__ : type
a__ : int
a__ : Dict[str, int]
def __init__(self : List[Any] , __a : int = 1 ):
UpperCAmelCase_ = dim
UpperCAmelCase_ = {k: dim * self.args_dim[k] for k in self.args_dim}
def _lowercase (self : Any , __a : Any ):
if self.dim == 1:
return self.distribution_class(*__a )
else:
return Independent(self.distribution_class(*__a ) , 1 )
def _lowercase (self : List[str] , __a : Union[str, Any] , __a : Optional[torch.Tensor] = None , __a : Optional[torch.Tensor] = None , ):
UpperCAmelCase_ = self._base_distribution(__a )
if loc is None and scale is None:
return distr
else:
return AffineTransformed(__a , loc=__a , scale=__a , event_dim=self.event_dim )
@property
def _lowercase (self : Any ):
return () if self.dim == 1 else (self.dim,)
@property
def _lowercase (self : Dict ):
return len(self.event_shape )
@property
def _lowercase (self : Tuple ):
return 0.0
def _lowercase (self : List[str] , __a : int ):
return ParameterProjection(
in_features=__a , args_dim=self.args_dim , domain_map=LambdaLayer(self.domain_map ) , )
def _lowercase (self : Optional[int] , *__a : torch.Tensor ):
raise NotImplementedError()
@staticmethod
def _lowercase (__a : torch.Tensor ):
return (x + torch.sqrt(torch.square(__a ) + 4.0 )) / 2.0
class __A ( UpperCamelCase__ ):
a__ : Dict[str, int] = {"df": 1, "loc": 1, "scale": 1}
a__ : type = StudentT
@classmethod
def _lowercase (cls : Union[str, Any] , __a : torch.Tensor , __a : torch.Tensor , __a : torch.Tensor ):
UpperCAmelCase_ = cls.squareplus(__a ).clamp_min(torch.finfo(scale.dtype ).eps )
UpperCAmelCase_ = 2.0 + cls.squareplus(__a )
return df.squeeze(-1 ), loc.squeeze(-1 ), scale.squeeze(-1 )
class __A ( UpperCamelCase__ ):
a__ : Dict[str, int] = {"loc": 1, "scale": 1}
a__ : type = Normal
@classmethod
def _lowercase (cls : Tuple , __a : torch.Tensor , __a : torch.Tensor ):
UpperCAmelCase_ = cls.squareplus(__a ).clamp_min(torch.finfo(scale.dtype ).eps )
return loc.squeeze(-1 ), scale.squeeze(-1 )
class __A ( UpperCamelCase__ ):
a__ : Dict[str, int] = {"total_count": 1, "logits": 1}
a__ : type = NegativeBinomial
@classmethod
def _lowercase (cls : Optional[Any] , __a : torch.Tensor , __a : torch.Tensor ):
UpperCAmelCase_ = cls.squareplus(__a )
return total_count.squeeze(-1 ), logits.squeeze(-1 )
def _lowercase (self : List[str] , __a : str ):
UpperCAmelCase_ , UpperCAmelCase_ = distr_args
if self.dim == 1:
return self.distribution_class(total_count=__a , logits=__a )
else:
return Independent(self.distribution_class(total_count=__a , logits=__a ) , 1 )
def _lowercase (self : Optional[Any] , __a : int , __a : Optional[torch.Tensor] = None , __a : Optional[torch.Tensor] = None ):
UpperCAmelCase_ , UpperCAmelCase_ = distr_args
if scale is not None:
# See scaling property of Gamma.
logits += scale.log()
return self._base_distribution((total_count, logits) )
| 1 | '''simple docstring'''
from multiprocessing import Lock, Pipe, Process
# lock used to ensure that two processes do not access a pipe at the same time
SCREAMING_SNAKE_CASE_: Optional[int] =Lock()
def lowerCAmelCase_ ( snake_case_ : Dict , snake_case_ : Dict , snake_case_ : Optional[int] , snake_case_ : Tuple , snake_case_ : Any , snake_case_ : Optional[Any] , snake_case_ : Union[str, Any] ) -> Optional[Any]:
'''simple docstring'''
global process_lock
# we perform n swaps since after n swaps we know we are sorted
# we *could* stop early if we are sorted already, but it takes as long to
# find out we are sorted as it does to sort the list with this algorithm
for i in range(0 , 10 ):
if (i + position) % 2 == 0 and r_send is not None:
# send your value to your right neighbor
process_lock.acquire()
r_send[1].send(snake_case_ )
process_lock.release()
# receive your right neighbor's value
process_lock.acquire()
UpperCAmelCase_ = rr_cv[0].recv()
process_lock.release()
# take the lower value since you are on the left
UpperCAmelCase_ = min(snake_case_ , snake_case_ )
elif (i + position) % 2 != 0 and l_send is not None:
# send your value to your left neighbor
process_lock.acquire()
l_send[1].send(snake_case_ )
process_lock.release()
# receive your left neighbor's value
process_lock.acquire()
UpperCAmelCase_ = lr_cv[0].recv()
process_lock.release()
# take the higher value since you are on the right
UpperCAmelCase_ = max(snake_case_ , snake_case_ )
# after all swaps are performed, send the values back to main
result_pipe[1].send(snake_case_ )
def lowerCAmelCase_ ( snake_case_ : Optional[Any] ) -> Dict:
'''simple docstring'''
UpperCAmelCase_ = []
UpperCAmelCase_ = []
# initialize the list of pipes where the values will be retrieved
for _ in arr:
result_pipe.append(Pipe() )
# creates the processes
# the first and last process only have one neighbor so they are made outside
# of the loop
UpperCAmelCase_ = Pipe()
UpperCAmelCase_ = Pipe()
process_array_.append(
Process(
target=snake_case_ , args=(0, arr[0], None, temp_rs, None, temp_rr, result_pipe[0]) , ) )
UpperCAmelCase_ = temp_rs
UpperCAmelCase_ = temp_rr
for i in range(1 , len(snake_case_ ) - 1 ):
UpperCAmelCase_ = Pipe()
UpperCAmelCase_ = Pipe()
process_array_.append(
Process(
target=snake_case_ , args=(i, arr[i], temp_ls, temp_rs, temp_lr, temp_rr, result_pipe[i]) , ) )
UpperCAmelCase_ = temp_rs
UpperCAmelCase_ = temp_rr
process_array_.append(
Process(
target=snake_case_ , args=(
len(snake_case_ ) - 1,
arr[len(snake_case_ ) - 1],
temp_ls,
None,
temp_lr,
None,
result_pipe[len(snake_case_ ) - 1],
) , ) )
# start the processes
for p in process_array_:
p.start()
# wait for the processes to end and write their values to the list
for p in range(0 , len(snake_case_ ) ):
UpperCAmelCase_ = result_pipe[p][0].recv()
process_array_[p].join()
return arr
def lowerCAmelCase_ ( ) -> str:
'''simple docstring'''
UpperCAmelCase_ = list(range(10 , 0 , -1 ) )
print("Initial List" )
print(*snake_case_ )
UpperCAmelCase_ = odd_even_transposition(snake_case_ )
print("Sorted List\n" )
print(*snake_case_ )
if __name__ == "__main__":
main()
| 1 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
SCREAMING_SNAKE_CASE_: Union[str, Any] ={'configuration_deit': ['DEIT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'DeiTConfig', 'DeiTOnnxConfig']}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE_: Union[str, Any] =['DeiTFeatureExtractor']
SCREAMING_SNAKE_CASE_: str =['DeiTImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE_: Union[str, Any] =[
'DEIT_PRETRAINED_MODEL_ARCHIVE_LIST',
'DeiTForImageClassification',
'DeiTForImageClassificationWithTeacher',
'DeiTForMaskedImageModeling',
'DeiTModel',
'DeiTPreTrainedModel',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE_: Dict =[
'TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFDeiTForImageClassification',
'TFDeiTForImageClassificationWithTeacher',
'TFDeiTForMaskedImageModeling',
'TFDeiTModel',
'TFDeiTPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_deit import DEIT_PRETRAINED_CONFIG_ARCHIVE_MAP, DeiTConfig, DeiTOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_deit import DeiTFeatureExtractor
from .image_processing_deit import DeiTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_deit import (
DEIT_PRETRAINED_MODEL_ARCHIVE_LIST,
DeiTForImageClassification,
DeiTForImageClassificationWithTeacher,
DeiTForMaskedImageModeling,
DeiTModel,
DeiTPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_deit import (
TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFDeiTForImageClassification,
TFDeiTForImageClassificationWithTeacher,
TFDeiTForMaskedImageModeling,
TFDeiTModel,
TFDeiTPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE_: Union[str, Any] =_LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 1 | '''simple docstring'''
def lowerCAmelCase_ ( snake_case_ : int , snake_case_ : int ) -> str:
'''simple docstring'''
if a < 0 or b < 0:
raise ValueError("the value of both inputs must be positive" )
UpperCAmelCase_ = str(bin(snake_case_ ) )[2:] # remove the leading "0b"
UpperCAmelCase_ = str(bin(snake_case_ ) )[2:]
UpperCAmelCase_ = max(len(snake_case_ ) , len(snake_case_ ) )
return "0b" + "".join(
str(int("1" in (char_a, char_b) ) )
for char_a, char_b in zip(a_binary.zfill(snake_case_ ) , b_binary.zfill(snake_case_ ) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 1 | 1 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
SCREAMING_SNAKE_CASE_: Union[str, Any] =logging.get_logger(__name__)
SCREAMING_SNAKE_CASE_: List[str] ={
'microsoft/focalnet-tiny': 'https://huggingface.co/microsoft/focalnet-tiny/resolve/main/config.json',
}
class __A ( UpperCamelCase__ , UpperCamelCase__ ):
a__ : int = """focalnet"""
def __init__(self : Optional[int] , __a : int=224 , __a : List[str]=4 , __a : int=3 , __a : List[Any]=96 , __a : str=False , __a : List[Any]=[192, 384, 768, 768] , __a : Any=[2, 2, 6, 2] , __a : int=[2, 2, 2, 2] , __a : List[str]=[3, 3, 3, 3] , __a : Dict="gelu" , __a : Tuple=4.0 , __a : Dict=0.0 , __a : List[str]=0.1 , __a : str=False , __a : Any=1E-4 , __a : Tuple=False , __a : Any=False , __a : Optional[int]=False , __a : Any=0.02 , __a : Dict=1E-5 , __a : str=32 , __a : Dict=None , __a : str=None , **__a : str , ):
super().__init__(**__a )
UpperCAmelCase_ = image_size
UpperCAmelCase_ = patch_size
UpperCAmelCase_ = num_channels
UpperCAmelCase_ = embed_dim
UpperCAmelCase_ = use_conv_embed
UpperCAmelCase_ = hidden_sizes
UpperCAmelCase_ = depths
UpperCAmelCase_ = focal_levels
UpperCAmelCase_ = focal_windows
UpperCAmelCase_ = hidden_act
UpperCAmelCase_ = mlp_ratio
UpperCAmelCase_ = hidden_dropout_prob
UpperCAmelCase_ = drop_path_rate
UpperCAmelCase_ = use_layerscale
UpperCAmelCase_ = layerscale_value
UpperCAmelCase_ = use_post_layernorm
UpperCAmelCase_ = use_post_layernorm_in_modulation
UpperCAmelCase_ = normalize_modulator
UpperCAmelCase_ = initializer_range
UpperCAmelCase_ = layer_norm_eps
UpperCAmelCase_ = encoder_stride
UpperCAmelCase_ = ["stem"] + [f"""stage{idx}""" for idx in range(1 , len(self.depths ) + 1 )]
UpperCAmelCase_ , UpperCAmelCase_ = get_aligned_output_features_output_indices(
out_features=__a , out_indices=__a , stage_names=self.stage_names )
| 1 | '''simple docstring'''
from __future__ import annotations
def lowerCAmelCase_ ( snake_case_ : list , snake_case_ : int | None = None , snake_case_ : int | None = None ) -> None:
'''simple docstring'''
if start is None:
UpperCAmelCase_ = 0
if end is None:
UpperCAmelCase_ = len(snake_case_ ) - 1
if start >= end:
return
UpperCAmelCase_ = (start + end) // 2
slowsort(snake_case_ , snake_case_ , snake_case_ )
slowsort(snake_case_ , mid + 1 , snake_case_ )
if sequence[end] < sequence[mid]:
UpperCAmelCase_ , UpperCAmelCase_ = sequence[mid], sequence[end]
slowsort(snake_case_ , snake_case_ , end - 1 )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 1 | 1 |
'''simple docstring'''
import copy
from collections import OrderedDict
from typing import Dict, Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
SCREAMING_SNAKE_CASE_: Union[str, Any] =logging.get_logger(__name__)
SCREAMING_SNAKE_CASE_: Any ={
'facebook/detr-resnet-50': 'https://huggingface.co/facebook/detr-resnet-50/resolve/main/config.json',
# See all DETR models at https://huggingface.co/models?filter=detr
}
class __A ( UpperCamelCase__ ):
a__ : int = """detr"""
a__ : List[Any] = ["""past_key_values"""]
a__ : Tuple = {
"""hidden_size""": """d_model""",
"""num_attention_heads""": """encoder_attention_heads""",
}
def __init__(self : Dict , __a : List[str]=True , __a : str=None , __a : Tuple=3 , __a : List[Any]=100 , __a : List[Any]=6 , __a : int=2048 , __a : Dict=8 , __a : List[Any]=6 , __a : Optional[int]=2048 , __a : Tuple=8 , __a : int=0.0 , __a : Union[str, Any]=0.0 , __a : str=True , __a : Optional[int]="relu" , __a : Optional[int]=256 , __a : int=0.1 , __a : Union[str, Any]=0.0 , __a : Optional[Any]=0.0 , __a : List[Any]=0.02 , __a : Union[str, Any]=1.0 , __a : Optional[int]=False , __a : str="sine" , __a : Any="resnet50" , __a : Optional[int]=True , __a : List[str]=False , __a : Dict=1 , __a : List[Any]=5 , __a : Union[str, Any]=2 , __a : int=1 , __a : Optional[int]=1 , __a : Optional[int]=5 , __a : Optional[int]=2 , __a : List[str]=0.1 , **__a : List[str] , ):
if backbone_config is not None and use_timm_backbone:
raise ValueError("You can't specify both `backbone_config` and `use_timm_backbone`." )
if not use_timm_backbone:
if backbone_config is None:
logger.info("`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone." )
UpperCAmelCase_ = CONFIG_MAPPING["resnet"](out_features=["stage4"] )
elif isinstance(__a , __a ):
UpperCAmelCase_ = backbone_config.get("model_type" )
UpperCAmelCase_ = CONFIG_MAPPING[backbone_model_type]
UpperCAmelCase_ = config_class.from_dict(__a )
# set timm attributes to None
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = None, None, None
UpperCAmelCase_ = use_timm_backbone
UpperCAmelCase_ = backbone_config
UpperCAmelCase_ = num_channels
UpperCAmelCase_ = num_queries
UpperCAmelCase_ = d_model
UpperCAmelCase_ = encoder_ffn_dim
UpperCAmelCase_ = encoder_layers
UpperCAmelCase_ = encoder_attention_heads
UpperCAmelCase_ = decoder_ffn_dim
UpperCAmelCase_ = decoder_layers
UpperCAmelCase_ = decoder_attention_heads
UpperCAmelCase_ = dropout
UpperCAmelCase_ = attention_dropout
UpperCAmelCase_ = activation_dropout
UpperCAmelCase_ = activation_function
UpperCAmelCase_ = init_std
UpperCAmelCase_ = init_xavier_std
UpperCAmelCase_ = encoder_layerdrop
UpperCAmelCase_ = decoder_layerdrop
UpperCAmelCase_ = encoder_layers
UpperCAmelCase_ = auxiliary_loss
UpperCAmelCase_ = position_embedding_type
UpperCAmelCase_ = backbone
UpperCAmelCase_ = use_pretrained_backbone
UpperCAmelCase_ = dilation
# Hungarian matcher
UpperCAmelCase_ = class_cost
UpperCAmelCase_ = bbox_cost
UpperCAmelCase_ = giou_cost
# Loss coefficients
UpperCAmelCase_ = mask_loss_coefficient
UpperCAmelCase_ = dice_loss_coefficient
UpperCAmelCase_ = bbox_loss_coefficient
UpperCAmelCase_ = giou_loss_coefficient
UpperCAmelCase_ = eos_coefficient
super().__init__(is_encoder_decoder=__a , **__a )
@property
def _lowercase (self : Tuple ):
return self.encoder_attention_heads
@property
def _lowercase (self : Optional[int] ):
return self.d_model
@classmethod
def _lowercase (cls : Optional[int] , __a : PretrainedConfig , **__a : Tuple ):
return cls(backbone_config=__a , **__a )
def _lowercase (self : Optional[Any] ):
UpperCAmelCase_ = copy.deepcopy(self.__dict__ )
if output["backbone_config"] is not None:
UpperCAmelCase_ = self.backbone_config.to_dict()
UpperCAmelCase_ = self.__class__.model_type
return output
class __A ( UpperCamelCase__ ):
a__ : Optional[Any] = version.parse("""1.11""" )
@property
def _lowercase (self : Dict ):
return OrderedDict(
[
("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}),
("pixel_mask", {0: "batch"}),
] )
@property
def _lowercase (self : List[str] ):
return 1E-5
@property
def _lowercase (self : Dict ):
return 12
| 1 | '''simple docstring'''
from transformers import DistilBertTokenizer, DistilBertTokenizerFast
from transformers.testing_utils import require_tokenizers, slow
from ..bert.test_tokenization_bert import BertTokenizationTest
@require_tokenizers
class __A ( UpperCamelCase__ ):
a__ : Optional[Any] = DistilBertTokenizer
a__ : Any = DistilBertTokenizerFast
a__ : str = True
@slow
def _lowercase (self : int ):
UpperCAmelCase_ = DistilBertTokenizer.from_pretrained("distilbert-base-uncased" )
UpperCAmelCase_ = tokenizer.encode("sequence builders" , add_special_tokens=__a )
UpperCAmelCase_ = tokenizer.encode("multi-sequence build" , add_special_tokens=__a )
UpperCAmelCase_ = tokenizer.build_inputs_with_special_tokens(__a )
UpperCAmelCase_ = tokenizer.build_inputs_with_special_tokens(__a , __a )
assert encoded_sentence == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id]
assert encoded_pair == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] + text_a + [
tokenizer.sep_token_id
]
| 1 | 1 |
'''simple docstring'''
import warnings
from typing import Dict
import numpy as np
from ..utils import ExplicitEnum, add_end_docstrings, is_tf_available, is_torch_available
from .base import PIPELINE_INIT_ARGS, GenericTensor, Pipeline
if is_tf_available():
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
def lowerCAmelCase_ ( snake_case_ : Optional[int] ) -> str:
'''simple docstring'''
return 1.0 / (1.0 + np.exp(-_outputs ))
def lowerCAmelCase_ ( snake_case_ : Optional[int] ) -> Dict:
'''simple docstring'''
UpperCAmelCase_ = np.max(_outputs , axis=-1 , keepdims=snake_case_ )
UpperCAmelCase_ = np.exp(_outputs - maxes )
return shifted_exp / shifted_exp.sum(axis=-1 , keepdims=snake_case_ )
class __A ( UpperCamelCase__ ):
a__ : List[str] = """sigmoid"""
a__ : Tuple = """softmax"""
a__ : Union[str, Any] = """none"""
@add_end_docstrings(
UpperCamelCase__ , r"""
return_all_scores (`bool`, *optional*, defaults to `False`):
Whether to return all prediction scores or just the one of the predicted class.
function_to_apply (`str`, *optional*, defaults to `\"default\"`):
The function to apply to the model outputs in order to retrieve the scores. Accepts four different values:
- `\"default\"`: if the model has a single label, will apply the sigmoid function on the output. If the model
has several labels, will apply the softmax function on the output.
- `\"sigmoid\"`: Applies the sigmoid function on the output.
- `\"softmax\"`: Applies the softmax function on the output.
- `\"none\"`: Does not apply any function on the output.
""" , )
class __A ( UpperCamelCase__ ):
a__ : Optional[Any] = False
a__ : List[str] = ClassificationFunction.NONE
def __init__(self : Dict , **__a : Any ):
super().__init__(**__a )
self.check_model_type(
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
if self.framework == "tf"
else MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING )
def _lowercase (self : Dict , __a : Dict=None , __a : Any=None , __a : int="" , **__a : Dict ):
# Using "" as default argument because we're going to use `top_k=None` in user code to declare
# "No top_k"
UpperCAmelCase_ = tokenizer_kwargs
UpperCAmelCase_ = {}
if hasattr(self.model.config , "return_all_scores" ) and return_all_scores is None:
UpperCAmelCase_ = self.model.config.return_all_scores
if isinstance(__a , __a ) or top_k is None:
UpperCAmelCase_ = top_k
UpperCAmelCase_ = False
elif return_all_scores is not None:
warnings.warn(
"`return_all_scores` is now deprecated, if want a similar functionality use `top_k=None` instead of"
" `return_all_scores=True` or `top_k=1` instead of `return_all_scores=False`." , __a , )
if return_all_scores:
UpperCAmelCase_ = None
else:
UpperCAmelCase_ = 1
if isinstance(__a , __a ):
UpperCAmelCase_ = ClassificationFunction[function_to_apply.upper()]
if function_to_apply is not None:
UpperCAmelCase_ = function_to_apply
return preprocess_params, {}, postprocess_params
def __call__(self : Optional[int] , *__a : List[Any] , **__a : Tuple ):
UpperCAmelCase_ = super().__call__(*__a , **__a )
# TODO try and retrieve it in a nicer way from _sanitize_parameters.
UpperCAmelCase_ = "top_k" not in kwargs
if isinstance(args[0] , __a ) and _legacy:
# This pipeline is odd, and return a list when single item is run
return [result]
else:
return result
def _lowercase (self : Optional[Any] , __a : Dict , **__a : List[str] ):
UpperCAmelCase_ = self.framework
if isinstance(__a , __a ):
return self.tokenizer(**__a , return_tensors=__a , **__a )
elif isinstance(__a , __a ) and len(__a ) == 1 and isinstance(inputs[0] , __a ) and len(inputs[0] ) == 2:
# It used to be valid to use a list of list of list for text pairs, keeping this path for BC
return self.tokenizer(
text=inputs[0][0] , text_pair=inputs[0][1] , return_tensors=__a , **__a )
elif isinstance(__a , __a ):
# This is likely an invalid usage of the pipeline attempting to pass text pairs.
raise ValueError(
"The pipeline received invalid inputs, if you are trying to send text pairs, you can try to send a"
" dictionary `{\"text\": \"My text\", \"text_pair\": \"My pair\"}` in order to send a text pair." )
return self.tokenizer(__a , return_tensors=__a , **__a )
def _lowercase (self : Dict , __a : Optional[Any] ):
return self.model(**__a )
def _lowercase (self : Dict , __a : int , __a : int=None , __a : List[str]=1 , __a : List[str]=True ):
# `_legacy` is used to determine if we're running the naked pipeline and in backward
# compatibility mode, or if running the pipeline with `pipeline(..., top_k=1)` we're running
# the more natural result containing the list.
# Default value before `set_parameters`
if function_to_apply is None:
if self.model.config.problem_type == "multi_label_classification" or self.model.config.num_labels == 1:
UpperCAmelCase_ = ClassificationFunction.SIGMOID
elif self.model.config.problem_type == "single_label_classification" or self.model.config.num_labels > 1:
UpperCAmelCase_ = ClassificationFunction.SOFTMAX
elif hasattr(self.model.config , "function_to_apply" ) and function_to_apply is None:
UpperCAmelCase_ = self.model.config.function_to_apply
else:
UpperCAmelCase_ = ClassificationFunction.NONE
UpperCAmelCase_ = model_outputs["logits"][0]
UpperCAmelCase_ = outputs.numpy()
if function_to_apply == ClassificationFunction.SIGMOID:
UpperCAmelCase_ = sigmoid(__a )
elif function_to_apply == ClassificationFunction.SOFTMAX:
UpperCAmelCase_ = softmax(__a )
elif function_to_apply == ClassificationFunction.NONE:
UpperCAmelCase_ = outputs
else:
raise ValueError(f"""Unrecognized `function_to_apply` argument: {function_to_apply}""" )
if top_k == 1 and _legacy:
return {"label": self.model.config.idalabel[scores.argmax().item()], "score": scores.max().item()}
UpperCAmelCase_ = [
{"label": self.model.config.idalabel[i], "score": score.item()} for i, score in enumerate(__a )
]
if not _legacy:
dict_scores.sort(key=lambda __a : x["score"] , reverse=__a )
if top_k is not None:
UpperCAmelCase_ = dict_scores[:top_k]
return dict_scores
| 1 | '''simple docstring'''
import argparse
import json
from collections import OrderedDict
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
ConditionalDetrConfig,
ConditionalDetrForObjectDetection,
ConditionalDetrForSegmentation,
ConditionalDetrImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
SCREAMING_SNAKE_CASE_: Optional[int] =logging.get_logger(__name__)
# here we list all keys to be renamed (original name on the left, our name on the right)
SCREAMING_SNAKE_CASE_: Tuple =[]
for i in range(6):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append(
(f"transformer.encoder.layers.{i}.self_attn.out_proj.weight", f"encoder.layers.{i}.self_attn.out_proj.weight")
)
rename_keys.append(
(f"transformer.encoder.layers.{i}.self_attn.out_proj.bias", f"encoder.layers.{i}.self_attn.out_proj.bias")
)
rename_keys.append((f"transformer.encoder.layers.{i}.linear1.weight", f"encoder.layers.{i}.fc1.weight"))
rename_keys.append((f"transformer.encoder.layers.{i}.linear1.bias", f"encoder.layers.{i}.fc1.bias"))
rename_keys.append((f"transformer.encoder.layers.{i}.linear2.weight", f"encoder.layers.{i}.fc2.weight"))
rename_keys.append((f"transformer.encoder.layers.{i}.linear2.bias", f"encoder.layers.{i}.fc2.bias"))
rename_keys.append(
(f"transformer.encoder.layers.{i}.norm1.weight", f"encoder.layers.{i}.self_attn_layer_norm.weight")
)
rename_keys.append((f"transformer.encoder.layers.{i}.norm1.bias", f"encoder.layers.{i}.self_attn_layer_norm.bias"))
rename_keys.append((f"transformer.encoder.layers.{i}.norm2.weight", f"encoder.layers.{i}.final_layer_norm.weight"))
rename_keys.append((f"transformer.encoder.layers.{i}.norm2.bias", f"encoder.layers.{i}.final_layer_norm.bias"))
# decoder layers: 2 times output projection, 2 feedforward neural networks and 3 layernorms
rename_keys.append(
(f"transformer.decoder.layers.{i}.self_attn.out_proj.weight", f"decoder.layers.{i}.self_attn.out_proj.weight")
)
rename_keys.append(
(f"transformer.decoder.layers.{i}.self_attn.out_proj.bias", f"decoder.layers.{i}.self_attn.out_proj.bias")
)
rename_keys.append(
(
f"transformer.decoder.layers.{i}.cross_attn.out_proj.weight",
f"decoder.layers.{i}.encoder_attn.out_proj.weight",
)
)
rename_keys.append(
(
f"transformer.decoder.layers.{i}.cross_attn.out_proj.bias",
f"decoder.layers.{i}.encoder_attn.out_proj.bias",
)
)
rename_keys.append((f"transformer.decoder.layers.{i}.linear1.weight", f"decoder.layers.{i}.fc1.weight"))
rename_keys.append((f"transformer.decoder.layers.{i}.linear1.bias", f"decoder.layers.{i}.fc1.bias"))
rename_keys.append((f"transformer.decoder.layers.{i}.linear2.weight", f"decoder.layers.{i}.fc2.weight"))
rename_keys.append((f"transformer.decoder.layers.{i}.linear2.bias", f"decoder.layers.{i}.fc2.bias"))
rename_keys.append(
(f"transformer.decoder.layers.{i}.norm1.weight", f"decoder.layers.{i}.self_attn_layer_norm.weight")
)
rename_keys.append((f"transformer.decoder.layers.{i}.norm1.bias", f"decoder.layers.{i}.self_attn_layer_norm.bias"))
rename_keys.append(
(f"transformer.decoder.layers.{i}.norm2.weight", f"decoder.layers.{i}.encoder_attn_layer_norm.weight")
)
rename_keys.append(
(f"transformer.decoder.layers.{i}.norm2.bias", f"decoder.layers.{i}.encoder_attn_layer_norm.bias")
)
rename_keys.append((f"transformer.decoder.layers.{i}.norm3.weight", f"decoder.layers.{i}.final_layer_norm.weight"))
rename_keys.append((f"transformer.decoder.layers.{i}.norm3.bias", f"decoder.layers.{i}.final_layer_norm.bias"))
# q, k, v projections in self/cross-attention in decoder for conditional DETR
rename_keys.append(
(f"transformer.decoder.layers.{i}.sa_qcontent_proj.weight", f"decoder.layers.{i}.sa_qcontent_proj.weight")
)
rename_keys.append(
(f"transformer.decoder.layers.{i}.sa_kcontent_proj.weight", f"decoder.layers.{i}.sa_kcontent_proj.weight")
)
rename_keys.append(
(f"transformer.decoder.layers.{i}.sa_qpos_proj.weight", f"decoder.layers.{i}.sa_qpos_proj.weight")
)
rename_keys.append(
(f"transformer.decoder.layers.{i}.sa_kpos_proj.weight", f"decoder.layers.{i}.sa_kpos_proj.weight")
)
rename_keys.append((f"transformer.decoder.layers.{i}.sa_v_proj.weight", f"decoder.layers.{i}.sa_v_proj.weight"))
rename_keys.append(
(f"transformer.decoder.layers.{i}.ca_qcontent_proj.weight", f"decoder.layers.{i}.ca_qcontent_proj.weight")
)
# rename_keys.append((f"transformer.decoder.layers.{i}.ca_qpos_proj.weight", f"decoder.layers.{i}.ca_qpos_proj.weight"))
rename_keys.append(
(f"transformer.decoder.layers.{i}.ca_kcontent_proj.weight", f"decoder.layers.{i}.ca_kcontent_proj.weight")
)
rename_keys.append(
(f"transformer.decoder.layers.{i}.ca_kpos_proj.weight", f"decoder.layers.{i}.ca_kpos_proj.weight")
)
rename_keys.append((f"transformer.decoder.layers.{i}.ca_v_proj.weight", f"decoder.layers.{i}.ca_v_proj.weight"))
rename_keys.append(
(f"transformer.decoder.layers.{i}.ca_qpos_sine_proj.weight", f"decoder.layers.{i}.ca_qpos_sine_proj.weight")
)
rename_keys.append(
(f"transformer.decoder.layers.{i}.sa_qcontent_proj.bias", f"decoder.layers.{i}.sa_qcontent_proj.bias")
)
rename_keys.append(
(f"transformer.decoder.layers.{i}.sa_kcontent_proj.bias", f"decoder.layers.{i}.sa_kcontent_proj.bias")
)
rename_keys.append((f"transformer.decoder.layers.{i}.sa_qpos_proj.bias", f"decoder.layers.{i}.sa_qpos_proj.bias"))
rename_keys.append((f"transformer.decoder.layers.{i}.sa_kpos_proj.bias", f"decoder.layers.{i}.sa_kpos_proj.bias"))
rename_keys.append((f"transformer.decoder.layers.{i}.sa_v_proj.bias", f"decoder.layers.{i}.sa_v_proj.bias"))
rename_keys.append(
(f"transformer.decoder.layers.{i}.ca_qcontent_proj.bias", f"decoder.layers.{i}.ca_qcontent_proj.bias")
)
# rename_keys.append((f"transformer.decoder.layers.{i}.ca_qpos_proj.bias", f"decoder.layers.{i}.ca_qpos_proj.bias"))
rename_keys.append(
(f"transformer.decoder.layers.{i}.ca_kcontent_proj.bias", f"decoder.layers.{i}.ca_kcontent_proj.bias")
)
rename_keys.append((f"transformer.decoder.layers.{i}.ca_kpos_proj.bias", f"decoder.layers.{i}.ca_kpos_proj.bias"))
rename_keys.append((f"transformer.decoder.layers.{i}.ca_v_proj.bias", f"decoder.layers.{i}.ca_v_proj.bias"))
rename_keys.append(
(f"transformer.decoder.layers.{i}.ca_qpos_sine_proj.bias", f"decoder.layers.{i}.ca_qpos_sine_proj.bias")
)
# convolutional projection + query embeddings + layernorm of decoder + class and bounding box heads
# for conditional DETR, also convert reference point head and query scale MLP
rename_keys.extend(
[
('input_proj.weight', 'input_projection.weight'),
('input_proj.bias', 'input_projection.bias'),
('query_embed.weight', 'query_position_embeddings.weight'),
('transformer.decoder.norm.weight', 'decoder.layernorm.weight'),
('transformer.decoder.norm.bias', 'decoder.layernorm.bias'),
('class_embed.weight', 'class_labels_classifier.weight'),
('class_embed.bias', 'class_labels_classifier.bias'),
('bbox_embed.layers.0.weight', 'bbox_predictor.layers.0.weight'),
('bbox_embed.layers.0.bias', 'bbox_predictor.layers.0.bias'),
('bbox_embed.layers.1.weight', 'bbox_predictor.layers.1.weight'),
('bbox_embed.layers.1.bias', 'bbox_predictor.layers.1.bias'),
('bbox_embed.layers.2.weight', 'bbox_predictor.layers.2.weight'),
('bbox_embed.layers.2.bias', 'bbox_predictor.layers.2.bias'),
('transformer.decoder.ref_point_head.layers.0.weight', 'decoder.ref_point_head.layers.0.weight'),
('transformer.decoder.ref_point_head.layers.0.bias', 'decoder.ref_point_head.layers.0.bias'),
('transformer.decoder.ref_point_head.layers.1.weight', 'decoder.ref_point_head.layers.1.weight'),
('transformer.decoder.ref_point_head.layers.1.bias', 'decoder.ref_point_head.layers.1.bias'),
('transformer.decoder.query_scale.layers.0.weight', 'decoder.query_scale.layers.0.weight'),
('transformer.decoder.query_scale.layers.0.bias', 'decoder.query_scale.layers.0.bias'),
('transformer.decoder.query_scale.layers.1.weight', 'decoder.query_scale.layers.1.weight'),
('transformer.decoder.query_scale.layers.1.bias', 'decoder.query_scale.layers.1.bias'),
('transformer.decoder.layers.0.ca_qpos_proj.weight', 'decoder.layers.0.ca_qpos_proj.weight'),
('transformer.decoder.layers.0.ca_qpos_proj.bias', 'decoder.layers.0.ca_qpos_proj.bias'),
]
)
def lowerCAmelCase_ ( snake_case_ : Dict , snake_case_ : Any , snake_case_ : Optional[int] ) -> Dict:
'''simple docstring'''
UpperCAmelCase_ = state_dict.pop(snake_case_ )
UpperCAmelCase_ = val
def lowerCAmelCase_ ( snake_case_ : int ) -> List[Any]:
'''simple docstring'''
UpperCAmelCase_ = OrderedDict()
for key, value in state_dict.items():
if "backbone.0.body" in key:
UpperCAmelCase_ = key.replace("backbone.0.body" , "backbone.conv_encoder.model" )
UpperCAmelCase_ = value
else:
UpperCAmelCase_ = value
return new_state_dict
def lowerCAmelCase_ ( snake_case_ : List[Any] , snake_case_ : Dict=False ) -> List[str]:
'''simple docstring'''
UpperCAmelCase_ = ""
if is_panoptic:
UpperCAmelCase_ = "conditional_detr."
# first: transformer encoder
for i in range(6 ):
# read in weights + bias of input projection layer (in PyTorch's MultiHeadAttention, this is a single matrix + bias)
UpperCAmelCase_ = state_dict.pop(f"""{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_weight""" )
UpperCAmelCase_ = state_dict.pop(f"""{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_bias""" )
# next, add query, keys and values (in that order) to the state dict
UpperCAmelCase_ = in_proj_weight[:2_56, :]
UpperCAmelCase_ = in_proj_bias[:2_56]
UpperCAmelCase_ = in_proj_weight[2_56:5_12, :]
UpperCAmelCase_ = in_proj_bias[2_56:5_12]
UpperCAmelCase_ = in_proj_weight[-2_56:, :]
UpperCAmelCase_ = in_proj_bias[-2_56:]
def lowerCAmelCase_ ( ) -> Dict:
'''simple docstring'''
UpperCAmelCase_ = "http://images.cocodataset.org/val2017/000000039769.jpg"
UpperCAmelCase_ = Image.open(requests.get(snake_case_ , stream=snake_case_ ).raw )
return im
@torch.no_grad()
def lowerCAmelCase_ ( snake_case_ : Optional[int] , snake_case_ : Dict ) -> Optional[int]:
'''simple docstring'''
UpperCAmelCase_ = ConditionalDetrConfig()
# set backbone and dilation attributes
if "resnet101" in model_name:
UpperCAmelCase_ = "resnet101"
if "dc5" in model_name:
UpperCAmelCase_ = True
UpperCAmelCase_ = "panoptic" in model_name
if is_panoptic:
UpperCAmelCase_ = 2_50
else:
UpperCAmelCase_ = 91
UpperCAmelCase_ = "huggingface/label-files"
UpperCAmelCase_ = "coco-detection-id2label.json"
UpperCAmelCase_ = json.load(open(hf_hub_download(snake_case_ , snake_case_ , repo_type="dataset" ) , "r" ) )
UpperCAmelCase_ = {int(snake_case_ ): v for k, v in idalabel.items()}
UpperCAmelCase_ = idalabel
UpperCAmelCase_ = {v: k for k, v in idalabel.items()}
# load image processor
UpperCAmelCase_ = "coco_panoptic" if is_panoptic else "coco_detection"
UpperCAmelCase_ = ConditionalDetrImageProcessor(format=snake_case_ )
# prepare image
UpperCAmelCase_ = prepare_img()
UpperCAmelCase_ = image_processor(images=snake_case_ , return_tensors="pt" )
UpperCAmelCase_ = encoding["pixel_values"]
logger.info(f"""Converting model {model_name}...""" )
# load original model from torch hub
UpperCAmelCase_ = torch.hub.load("DeppMeng/ConditionalDETR" , snake_case_ , pretrained=snake_case_ ).eval()
UpperCAmelCase_ = conditional_detr.state_dict()
# rename keys
for src, dest in rename_keys:
if is_panoptic:
UpperCAmelCase_ = "conditional_detr." + src
rename_key(snake_case_ , snake_case_ , snake_case_ )
UpperCAmelCase_ = rename_backbone_keys(snake_case_ )
# query, key and value matrices need special treatment
read_in_q_k_v(snake_case_ , is_panoptic=snake_case_ )
# important: we need to prepend a prefix to each of the base model keys as the head models use different attributes for them
UpperCAmelCase_ = "conditional_detr.model." if is_panoptic else "model."
for key in state_dict.copy().keys():
if is_panoptic:
if (
key.startswith("conditional_detr" )
and not key.startswith("class_labels_classifier" )
and not key.startswith("bbox_predictor" )
):
UpperCAmelCase_ = state_dict.pop(snake_case_ )
UpperCAmelCase_ = val
elif "class_labels_classifier" in key or "bbox_predictor" in key:
UpperCAmelCase_ = state_dict.pop(snake_case_ )
UpperCAmelCase_ = val
elif key.startswith("bbox_attention" ) or key.startswith("mask_head" ):
continue
else:
UpperCAmelCase_ = state_dict.pop(snake_case_ )
UpperCAmelCase_ = val
else:
if not key.startswith("class_labels_classifier" ) and not key.startswith("bbox_predictor" ):
UpperCAmelCase_ = state_dict.pop(snake_case_ )
UpperCAmelCase_ = val
# finally, create HuggingFace model and load state dict
UpperCAmelCase_ = ConditionalDetrForSegmentation(snake_case_ ) if is_panoptic else ConditionalDetrForObjectDetection(snake_case_ )
model.load_state_dict(snake_case_ )
model.eval()
model.push_to_hub(repo_id=snake_case_ , organization="DepuMeng" , commit_message="Add model" )
# verify our conversion
UpperCAmelCase_ = conditional_detr(snake_case_ )
UpperCAmelCase_ = model(snake_case_ )
assert torch.allclose(outputs.logits , original_outputs["pred_logits"] , atol=1E-4 )
assert torch.allclose(outputs.pred_boxes , original_outputs["pred_boxes"] , atol=1E-4 )
if is_panoptic:
assert torch.allclose(outputs.pred_masks , original_outputs["pred_masks"] , atol=1E-4 )
# Save model and image processor
logger.info(f"""Saving PyTorch model and image processor to {pytorch_dump_folder_path}...""" )
Path(snake_case_ ).mkdir(exist_ok=snake_case_ )
model.save_pretrained(snake_case_ )
image_processor.save_pretrained(snake_case_ )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE_: List[str] =argparse.ArgumentParser()
parser.add_argument(
'--model_name',
default='conditional_detr_resnet50',
type=str,
help='Name of the CONDITIONAL_DETR model you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the folder to output PyTorch model.'
)
SCREAMING_SNAKE_CASE_: int =parser.parse_args()
convert_conditional_detr_checkpoint(args.model_name, args.pytorch_dump_folder_path)
| 1 | 1 |
'''simple docstring'''
import tempfile
import unittest
from transformers import SPIECE_UNDERLINE, BatchEncoding, PLBartTokenizer, is_torch_available
from transformers.testing_utils import (
get_tests_dir,
nested_simplify,
require_sentencepiece,
require_tokenizers,
require_torch,
)
from ...test_tokenization_common import TokenizerTesterMixin
SCREAMING_SNAKE_CASE_: Tuple =get_tests_dir('fixtures/test_sentencepiece.model')
if is_torch_available():
from transformers.models.plbart.modeling_plbart import shift_tokens_right
SCREAMING_SNAKE_CASE_: int =5_00_03
SCREAMING_SNAKE_CASE_: Any =5_00_02
@require_sentencepiece
@require_tokenizers
class __A ( UpperCamelCase__ , unittest.TestCase ):
a__ : Union[str, Any] = PLBartTokenizer
a__ : Union[str, Any] = None
a__ : Tuple = False
def _lowercase (self : int ):
super().setUp()
# We have a SentencePiece fixture for testing
UpperCAmelCase_ = PLBartTokenizer(__a , language_codes="base" , keep_accents=__a )
tokenizer.save_pretrained(self.tmpdirname )
def _lowercase (self : List[str] ):
UpperCAmelCase_ = PLBartTokenizer(__a , language_codes="base" , keep_accents=__a )
UpperCAmelCase_ = tokenizer.tokenize("This is a test" )
self.assertListEqual(__a , ["▁This", "▁is", "▁a", "▁t", "est"] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(__a ) , [value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]] , )
UpperCAmelCase_ = tokenizer.tokenize("I was born in 92000, and this is falsé." )
self.assertListEqual(
__a , [
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"9",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"é",
".",
] , )
UpperCAmelCase_ = tokenizer.convert_tokens_to_ids(__a )
self.assertListEqual(
__a , [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, 2, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 2, 4]
] , )
UpperCAmelCase_ = tokenizer.convert_ids_to_tokens(__a )
self.assertListEqual(
__a , [
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"<unk>",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"<unk>",
".",
] , )
UpperCAmelCase_ = tokenizer.vocab_size
UpperCAmelCase_ = [tokenizer.convert_ids_to_tokens(__a ) for x in range(end - 4 , __a )]
self.assertListEqual(__a , ["__java__", "__python__", "__en_XX__", "<mask>"] )
UpperCAmelCase_ = "java.lang.Exception, python.lang.Exception, javascript, php, ruby, go"
UpperCAmelCase_ = tokenizer(__a ).input_ids
self.assertEqual(
tokenizer.decode(__a , skip_special_tokens=__a , clean_up_tokenization_spaces=__a ) , __a , )
def _lowercase (self : List[str] ):
UpperCAmelCase_ = PLBartTokenizer(__a , language_codes="multi" , keep_accents=__a )
UpperCAmelCase_ = tokenizer.tokenize("This is a test" )
self.assertListEqual(__a , ["▁This", "▁is", "▁a", "▁t", "est"] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(__a ) , [value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]] , )
UpperCAmelCase_ = tokenizer.tokenize("I was born in 92000, and this is falsé." )
self.assertListEqual(
__a , [
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"9",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"é",
".",
] , )
UpperCAmelCase_ = tokenizer.convert_tokens_to_ids(__a )
self.assertListEqual(
__a , [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, 2, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 2, 4]
] , )
UpperCAmelCase_ = tokenizer.convert_ids_to_tokens(__a )
self.assertListEqual(
__a , [
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"<unk>",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"<unk>",
".",
] , )
UpperCAmelCase_ = tokenizer.vocab_size
UpperCAmelCase_ = [tokenizer.convert_ids_to_tokens(__a ) for x in range(end - 7 , __a )]
self.assertListEqual(
__a , ["__java__", "__python__", "__en_XX__", "__javascript__", "__php__", "__ruby__", "__go__"] )
UpperCAmelCase_ = "java.lang.Exception, python.lang.Exception, javascript, php, ruby, go"
UpperCAmelCase_ = tokenizer(__a ).input_ids
self.assertEqual(
tokenizer.decode(__a , skip_special_tokens=__a , clean_up_tokenization_spaces=__a ) , __a , )
@require_torch
@require_sentencepiece
@require_tokenizers
class __A ( unittest.TestCase ):
a__ : str = """uclanlp/plbart-python-en_XX"""
a__ : List[str] = [
"""def maximum(a,b,c):NEW_LINE_INDENTreturn max([a,b,c])""",
"""def sum(a,b,c):NEW_LINE_INDENTreturn sum([a,b,c])""",
]
a__ : Tuple = [
"""Returns the maximum value of a b c.""",
"""Sums the values of a b c.""",
]
a__ : Optional[int] = [
134,
5_452,
33_460,
33_441,
33_463,
33_465,
33_463,
33_449,
988,
20,
33_456,
19,
33_456,
771,
39,
4_258,
889,
3_318,
33_441,
33_463,
33_465,
33_463,
33_449,
2_471,
2,
PYTHON_CODE,
]
@classmethod
def _lowercase (cls : Optional[int] ):
UpperCAmelCase_ = PLBartTokenizer.from_pretrained(
cls.checkpoint_name , language_codes="base" , src_lang="python" , tgt_lang="en_XX" )
UpperCAmelCase_ = 1
return cls
def _lowercase (self : Union[str, Any] ):
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["__java__"] , 50001 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["__python__"] , 50002 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["__en_XX__"] , 50003 )
def _lowercase (self : Tuple ):
UpperCAmelCase_ = self.tokenizer.batch_encode_plus(self.src_text ).input_ids[0]
self.assertListEqual(self.expected_src_tokens , __a )
def _lowercase (self : Dict ):
self.assertIn(__a , self.tokenizer.all_special_ids )
UpperCAmelCase_ = [EN_CODE, 9037, 33442, 57, 752, 153, 14, 56, 18, 9, 2]
UpperCAmelCase_ = self.tokenizer.decode(__a , skip_special_tokens=__a )
UpperCAmelCase_ = self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=__a )
self.assertEqual(__a , __a )
self.assertNotIn(self.tokenizer.eos_token , __a )
def _lowercase (self : int ):
UpperCAmelCase_ = ["def sum(a,b,c):NEW_LINE_INDENTreturn sum([a,b,c])" * 20]
self.assertIsInstance(src_text[0] , __a )
UpperCAmelCase_ = 10
UpperCAmelCase_ = self.tokenizer(__a , max_length=__a , truncation=__a ).input_ids[0]
self.assertEqual(ids[-2] , 2 )
self.assertEqual(ids[-1] , __a )
self.assertEqual(len(__a ) , __a )
def _lowercase (self : str ):
self.assertListEqual(self.tokenizer.convert_tokens_to_ids(["<mask>", "__java__"] ) , [50004, 50001] )
def _lowercase (self : int ):
UpperCAmelCase_ = tempfile.mkdtemp()
UpperCAmelCase_ = self.tokenizer.fairseq_tokens_to_ids
self.tokenizer.save_pretrained(__a )
UpperCAmelCase_ = PLBartTokenizer.from_pretrained(__a )
self.assertDictEqual(new_tok.fairseq_tokens_to_ids , __a )
@require_torch
def _lowercase (self : str ):
UpperCAmelCase_ = self.tokenizer(self.src_text , text_target=self.tgt_text , padding=__a , return_tensors="pt" )
UpperCAmelCase_ = shift_tokens_right(batch["labels"] , self.tokenizer.pad_token_id )
# fairseq batch: https://gist.github.com/sshleifer/cba08bc2109361a74ac3760a7e30e4f4
self.assertEqual(batch.input_ids[1][-2:].tolist() , [2, PYTHON_CODE] )
self.assertEqual(batch.decoder_input_ids[1][0] , __a )
self.assertEqual(batch.decoder_input_ids[1][-1] , 2 )
self.assertEqual(batch.labels[1][-2:].tolist() , [2, EN_CODE] )
@require_torch
def _lowercase (self : Optional[Any] ):
UpperCAmelCase_ = self.tokenizer(
self.src_text , text_target=self.tgt_text , padding=__a , truncation=__a , max_length=len(self.expected_src_tokens ) , return_tensors="pt" , )
UpperCAmelCase_ = shift_tokens_right(batch["labels"] , self.tokenizer.pad_token_id )
self.assertIsInstance(__a , __a )
self.assertEqual((2, 26) , batch.input_ids.shape )
self.assertEqual((2, 26) , batch.attention_mask.shape )
UpperCAmelCase_ = batch.input_ids.tolist()[0]
self.assertListEqual(self.expected_src_tokens , __a )
self.assertEqual(2 , batch.decoder_input_ids[0, -1] ) # EOS
# Test that special tokens are reset
self.assertEqual(self.tokenizer.prefix_tokens , [] )
self.assertEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id, PYTHON_CODE] )
def _lowercase (self : Optional[int] ):
UpperCAmelCase_ = self.tokenizer(self.src_text , padding=__a , truncation=__a , max_length=3 , return_tensors="pt" )
UpperCAmelCase_ = self.tokenizer(
text_target=self.tgt_text , padding=__a , truncation=__a , max_length=10 , return_tensors="pt" )
UpperCAmelCase_ = targets["input_ids"]
UpperCAmelCase_ = shift_tokens_right(__a , self.tokenizer.pad_token_id )
self.assertEqual(batch.input_ids.shape[1] , 3 )
self.assertEqual(batch.decoder_input_ids.shape[1] , 10 )
@require_torch
def _lowercase (self : List[Any] ):
UpperCAmelCase_ = self.tokenizer._build_translation_inputs(
"A test" , return_tensors="pt" , src_lang="en_XX" , tgt_lang="java" )
self.assertEqual(
nested_simplify(__a ) , {
# A, test, EOS, en_XX
"input_ids": [[150, 242, 2, 50003]],
"attention_mask": [[1, 1, 1, 1]],
# java
"forced_bos_token_id": 50001,
} , )
| 1 | '''simple docstring'''
import warnings
from ...utils import logging
from .image_processing_clip import CLIPImageProcessor
SCREAMING_SNAKE_CASE_: Union[str, Any] =logging.get_logger(__name__)
class __A ( UpperCamelCase__ ):
def __init__(self : int , *__a : Dict , **__a : str ):
warnings.warn(
"The class CLIPFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"
" use CLIPImageProcessor instead." , __a , )
super().__init__(*__a , **__a )
| 1 | 1 |
'''simple docstring'''
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import LevitImageProcessor
class __A ( unittest.TestCase ):
def __init__(self : str , __a : str , __a : Optional[int]=7 , __a : List[str]=3 , __a : Any=18 , __a : List[str]=30 , __a : Dict=400 , __a : Optional[Any]=True , __a : Optional[Any]=None , __a : int=True , __a : int=None , __a : List[str]=True , __a : Union[str, Any]=[0.5, 0.5, 0.5] , __a : Any=[0.5, 0.5, 0.5] , ):
UpperCAmelCase_ = size if size is not None else {"shortest_edge": 18}
UpperCAmelCase_ = crop_size if crop_size is not None else {"height": 18, "width": 18}
UpperCAmelCase_ = parent
UpperCAmelCase_ = batch_size
UpperCAmelCase_ = num_channels
UpperCAmelCase_ = image_size
UpperCAmelCase_ = min_resolution
UpperCAmelCase_ = max_resolution
UpperCAmelCase_ = do_resize
UpperCAmelCase_ = size
UpperCAmelCase_ = do_center_crop
UpperCAmelCase_ = crop_size
UpperCAmelCase_ = do_normalize
UpperCAmelCase_ = image_mean
UpperCAmelCase_ = image_std
def _lowercase (self : int ):
return {
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_normalize": self.do_normalize,
"do_resize": self.do_resize,
"do_center_crop": self.do_center_crop,
"size": self.size,
"crop_size": self.crop_size,
}
@require_torch
@require_vision
class __A ( UpperCamelCase__ , unittest.TestCase ):
a__ : Any = LevitImageProcessor if is_vision_available() else None
def _lowercase (self : int ):
UpperCAmelCase_ = LevitImageProcessingTester(self )
@property
def _lowercase (self : Any ):
return self.image_processor_tester.prepare_image_processor_dict()
def _lowercase (self : Union[str, Any] ):
UpperCAmelCase_ = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(__a , "image_mean" ) )
self.assertTrue(hasattr(__a , "image_std" ) )
self.assertTrue(hasattr(__a , "do_normalize" ) )
self.assertTrue(hasattr(__a , "do_resize" ) )
self.assertTrue(hasattr(__a , "do_center_crop" ) )
self.assertTrue(hasattr(__a , "size" ) )
def _lowercase (self : Dict ):
UpperCAmelCase_ = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"shortest_edge": 18} )
self.assertEqual(image_processor.crop_size , {"height": 18, "width": 18} )
UpperCAmelCase_ = self.image_processing_class.from_dict(self.image_processor_dict , size=42 , crop_size=84 )
self.assertEqual(image_processor.size , {"shortest_edge": 42} )
self.assertEqual(image_processor.crop_size , {"height": 84, "width": 84} )
def _lowercase (self : int ):
pass
def _lowercase (self : Optional[Any] ):
# Initialize image_processing
UpperCAmelCase_ = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
UpperCAmelCase_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=__a )
for image in image_inputs:
self.assertIsInstance(__a , Image.Image )
# Test not batched input
UpperCAmelCase_ = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
# Test batched
UpperCAmelCase_ = image_processing(__a , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
def _lowercase (self : Dict ):
# Initialize image_processing
UpperCAmelCase_ = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
UpperCAmelCase_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=__a , numpify=__a )
for image in image_inputs:
self.assertIsInstance(__a , np.ndarray )
# Test not batched input
UpperCAmelCase_ = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
# Test batched
UpperCAmelCase_ = image_processing(__a , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
def _lowercase (self : int ):
# Initialize image_processing
UpperCAmelCase_ = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
UpperCAmelCase_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=__a , torchify=__a )
for image in image_inputs:
self.assertIsInstance(__a , torch.Tensor )
# Test not batched input
UpperCAmelCase_ = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
# Test batched
UpperCAmelCase_ = image_processing(__a , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
| 1 | '''simple docstring'''
from __future__ import annotations
import queue
class __A :
def __init__(self : Optional[Any] , __a : str ):
UpperCAmelCase_ = data
UpperCAmelCase_ = None
UpperCAmelCase_ = None
def lowerCAmelCase_ ( ) -> TreeNode:
'''simple docstring'''
print("\n********Press N to stop entering at any point of time********\n" )
UpperCAmelCase_ = input("Enter the value of the root node: " ).strip().lower()
UpperCAmelCase_ = queue.Queue()
UpperCAmelCase_ = TreeNode(int(snake_case_ ) )
q.put(snake_case_ )
while not q.empty():
UpperCAmelCase_ = q.get()
UpperCAmelCase_ = f"""Enter the left node of {node_found.data}: """
UpperCAmelCase_ = input(snake_case_ ).strip().lower() or "n"
if check == "n":
return tree_node
UpperCAmelCase_ = TreeNode(int(snake_case_ ) )
UpperCAmelCase_ = left_node
q.put(snake_case_ )
UpperCAmelCase_ = f"""Enter the right node of {node_found.data}: """
UpperCAmelCase_ = input(snake_case_ ).strip().lower() or "n"
if check == "n":
return tree_node
UpperCAmelCase_ = TreeNode(int(snake_case_ ) )
UpperCAmelCase_ = right_node
q.put(snake_case_ )
raise
def lowerCAmelCase_ ( snake_case_ : TreeNode ) -> None:
'''simple docstring'''
if not isinstance(snake_case_ , snake_case_ ) or not node:
return
print(node.data , end="," )
pre_order(node.left )
pre_order(node.right )
def lowerCAmelCase_ ( snake_case_ : TreeNode ) -> None:
'''simple docstring'''
if not isinstance(snake_case_ , snake_case_ ) or not node:
return
in_order(node.left )
print(node.data , end="," )
in_order(node.right )
def lowerCAmelCase_ ( snake_case_ : TreeNode ) -> None:
'''simple docstring'''
if not isinstance(snake_case_ , snake_case_ ) or not node:
return
post_order(node.left )
post_order(node.right )
print(node.data , end="," )
def lowerCAmelCase_ ( snake_case_ : TreeNode ) -> None:
'''simple docstring'''
if not isinstance(snake_case_ , snake_case_ ) or not node:
return
UpperCAmelCase_ = queue.Queue()
q.put(snake_case_ )
while not q.empty():
UpperCAmelCase_ = q.get()
print(node_dequeued.data , end="," )
if node_dequeued.left:
q.put(node_dequeued.left )
if node_dequeued.right:
q.put(node_dequeued.right )
def lowerCAmelCase_ ( snake_case_ : TreeNode ) -> None:
'''simple docstring'''
if not isinstance(snake_case_ , snake_case_ ) or not node:
return
UpperCAmelCase_ = queue.Queue()
q.put(snake_case_ )
while not q.empty():
UpperCAmelCase_ = []
while not q.empty():
UpperCAmelCase_ = q.get()
print(node_dequeued.data , end="," )
if node_dequeued.left:
list_.append(node_dequeued.left )
if node_dequeued.right:
list_.append(node_dequeued.right )
print()
for node in list_:
q.put(snake_case_ )
def lowerCAmelCase_ ( snake_case_ : TreeNode ) -> None:
'''simple docstring'''
if not isinstance(snake_case_ , snake_case_ ) or not node:
return
UpperCAmelCase_ = []
UpperCAmelCase_ = node
while n or stack:
while n: # start from root node, find its left child
print(n.data , end="," )
stack.append(snake_case_ )
UpperCAmelCase_ = n.left
# end of while means current node doesn't have left child
UpperCAmelCase_ = stack.pop()
# start to traverse its right child
UpperCAmelCase_ = n.right
def lowerCAmelCase_ ( snake_case_ : TreeNode ) -> None:
'''simple docstring'''
if not isinstance(snake_case_ , snake_case_ ) or not node:
return
UpperCAmelCase_ = []
UpperCAmelCase_ = node
while n or stack:
while n:
stack.append(snake_case_ )
UpperCAmelCase_ = n.left
UpperCAmelCase_ = stack.pop()
print(n.data , end="," )
UpperCAmelCase_ = n.right
def lowerCAmelCase_ ( snake_case_ : TreeNode ) -> None:
'''simple docstring'''
if not isinstance(snake_case_ , snake_case_ ) or not node:
return
UpperCAmelCase_ , UpperCAmelCase_ = [], []
UpperCAmelCase_ = node
stacka.append(snake_case_ )
while stacka: # to find the reversed order of post order, store it in stack2
UpperCAmelCase_ = stacka.pop()
if n.left:
stacka.append(n.left )
if n.right:
stacka.append(n.right )
stacka.append(snake_case_ )
while stacka: # pop up from stack2 will be the post order
print(stacka.pop().data , end="," )
def lowerCAmelCase_ ( snake_case_ : str = "" , snake_case_ : Any=50 , snake_case_ : Union[str, Any]="*" ) -> str:
'''simple docstring'''
if not s:
return "\n" + width * char
UpperCAmelCase_ , UpperCAmelCase_ = divmod(width - len(snake_case_ ) - 2 , 2 )
return f"""{left * char} {s} {(left + extra) * char}"""
if __name__ == "__main__":
import doctest
doctest.testmod()
print(prompt('Binary Tree Traversals'))
SCREAMING_SNAKE_CASE_: TreeNode =build_tree()
print(prompt('Pre Order Traversal'))
pre_order(node)
print(prompt() + '\n')
print(prompt('In Order Traversal'))
in_order(node)
print(prompt() + '\n')
print(prompt('Post Order Traversal'))
post_order(node)
print(prompt() + '\n')
print(prompt('Level Order Traversal'))
level_order(node)
print(prompt() + '\n')
print(prompt('Actual Level Order Traversal'))
level_order_actual(node)
print('*' * 50 + '\n')
print(prompt('Pre Order Traversal - Iteration Version'))
pre_order_iter(node)
print(prompt() + '\n')
print(prompt('In Order Traversal - Iteration Version'))
in_order_iter(node)
print(prompt() + '\n')
print(prompt('Post Order Traversal - Iteration Version'))
post_order_iter(node)
print(prompt())
| 1 | 1 |
'''simple docstring'''
import json
import os
import unittest
from transformers.models.roc_bert.tokenization_roc_bert import (
VOCAB_FILES_NAMES,
RoCBertBasicTokenizer,
RoCBertTokenizer,
RoCBertWordpieceTokenizer,
_is_control,
_is_punctuation,
_is_whitespace,
)
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin, filter_non_english
@require_tokenizers
class __A ( UpperCamelCase__ , unittest.TestCase ):
a__ : str = RoCBertTokenizer
a__ : Union[str, Any] = None
a__ : List[Any] = False
a__ : str = True
a__ : Optional[int] = filter_non_english
def _lowercase (self : Union[str, Any] ):
super().setUp()
UpperCAmelCase_ = ["[UNK]", "[CLS]", "[SEP]", "[PAD]", "[MASK]", "你", "好", "是", "谁", "a", "b", "c", "d"]
UpperCAmelCase_ = {}
UpperCAmelCase_ = {}
for i, value in enumerate(__a ):
UpperCAmelCase_ = i
UpperCAmelCase_ = i
UpperCAmelCase_ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
UpperCAmelCase_ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["word_shape_file"] )
UpperCAmelCase_ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["word_pronunciation_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in vocab_tokens] ) )
with open(self.word_shape_file , "w" , encoding="utf-8" ) as word_shape_writer:
json.dump(__a , __a , ensure_ascii=__a )
with open(self.word_pronunciation_file , "w" , encoding="utf-8" ) as word_pronunciation_writer:
json.dump(__a , __a , ensure_ascii=__a )
def _lowercase (self : Optional[Any] ):
UpperCAmelCase_ = self.tokenizer_class(self.vocab_file , self.word_shape_file , self.word_pronunciation_file )
UpperCAmelCase_ = tokenizer.tokenize("你好[SEP]你是谁" )
self.assertListEqual(__a , ["你", "好", "[SEP]", "你", "是", "谁"] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(__a ) , [5, 6, 2, 5, 7, 8] )
self.assertListEqual(tokenizer.convert_tokens_to_shape_ids(__a ) , [5, 6, 2, 5, 7, 8] )
self.assertListEqual(tokenizer.convert_tokens_to_pronunciation_ids(__a ) , [5, 6, 2, 5, 7, 8] )
def _lowercase (self : Any ):
UpperCAmelCase_ = RoCBertBasicTokenizer()
self.assertListEqual(tokenizer.tokenize("ah\u535A\u63A8zz" ) , ["ah", "\u535A", "\u63A8", "zz"] )
def _lowercase (self : Dict ):
UpperCAmelCase_ = RoCBertBasicTokenizer(do_lower_case=__a )
self.assertListEqual(
tokenizer.tokenize(" \tHeLLo!how \n Are yoU? " ) , ["hello", "!", "how", "are", "you", "?"] )
self.assertListEqual(tokenizer.tokenize("H\u00E9llo" ) , ["hello"] )
def _lowercase (self : Optional[int] ):
UpperCAmelCase_ = RoCBertBasicTokenizer(do_lower_case=__a , strip_accents=__a )
self.assertListEqual(
tokenizer.tokenize(" \tHäLLo!how \n Are yoU? " ) , ["hällo", "!", "how", "are", "you", "?"] )
self.assertListEqual(tokenizer.tokenize("H\u00E9llo" ) , ["h\u00E9llo"] )
def _lowercase (self : Dict ):
UpperCAmelCase_ = RoCBertBasicTokenizer(do_lower_case=__a , strip_accents=__a )
self.assertListEqual(
tokenizer.tokenize(" \tHäLLo!how \n Are yoU? " ) , ["hallo", "!", "how", "are", "you", "?"] )
self.assertListEqual(tokenizer.tokenize("H\u00E9llo" ) , ["hello"] )
def _lowercase (self : Optional[Any] ):
UpperCAmelCase_ = RoCBertBasicTokenizer(do_lower_case=__a )
self.assertListEqual(
tokenizer.tokenize(" \tHäLLo!how \n Are yoU? " ) , ["hallo", "!", "how", "are", "you", "?"] )
self.assertListEqual(tokenizer.tokenize("H\u00E9llo" ) , ["hello"] )
def _lowercase (self : Any ):
UpperCAmelCase_ = RoCBertBasicTokenizer(do_lower_case=__a )
self.assertListEqual(
tokenizer.tokenize(" \tHeLLo!how \n Are yoU? " ) , ["HeLLo", "!", "how", "Are", "yoU", "?"] )
def _lowercase (self : Union[str, Any] ):
UpperCAmelCase_ = RoCBertBasicTokenizer(do_lower_case=__a , strip_accents=__a )
self.assertListEqual(
tokenizer.tokenize(" \tHäLLo!how \n Are yoU? " ) , ["HäLLo", "!", "how", "Are", "yoU", "?"] )
def _lowercase (self : Union[str, Any] ):
UpperCAmelCase_ = RoCBertBasicTokenizer(do_lower_case=__a , strip_accents=__a )
self.assertListEqual(
tokenizer.tokenize(" \tHäLLo!how \n Are yoU? " ) , ["HaLLo", "!", "how", "Are", "yoU", "?"] )
def _lowercase (self : Any ):
UpperCAmelCase_ = RoCBertBasicTokenizer(do_lower_case=__a , never_split=["[UNK]"] )
self.assertListEqual(
tokenizer.tokenize(" \tHeLLo!how \n Are yoU? [UNK]" ) , ["HeLLo", "!", "how", "Are", "yoU", "?", "[UNK]"] )
def _lowercase (self : Tuple ):
UpperCAmelCase_ = ["[UNK]", "[CLS]", "[SEP]", "want", "##want", "##ed", "wa", "un", "runn", "##ing"]
UpperCAmelCase_ = {}
for i, token in enumerate(__a ):
UpperCAmelCase_ = i
UpperCAmelCase_ = RoCBertWordpieceTokenizer(vocab=__a , unk_token="[UNK]" )
self.assertListEqual(tokenizer.tokenize("" ) , [] )
self.assertListEqual(tokenizer.tokenize("unwanted running" ) , ["un", "##want", "##ed", "runn", "##ing"] )
self.assertListEqual(tokenizer.tokenize("unwantedX running" ) , ["[UNK]", "runn", "##ing"] )
def _lowercase (self : Any ):
self.assertTrue(_is_whitespace(" " ) )
self.assertTrue(_is_whitespace("\t" ) )
self.assertTrue(_is_whitespace("\r" ) )
self.assertTrue(_is_whitespace("\n" ) )
self.assertTrue(_is_whitespace("\u00A0" ) )
self.assertFalse(_is_whitespace("A" ) )
self.assertFalse(_is_whitespace("-" ) )
def _lowercase (self : Optional[int] ):
self.assertTrue(_is_control("\u0005" ) )
self.assertFalse(_is_control("A" ) )
self.assertFalse(_is_control(" " ) )
self.assertFalse(_is_control("\t" ) )
self.assertFalse(_is_control("\r" ) )
def _lowercase (self : Union[str, Any] ):
self.assertTrue(_is_punctuation("-" ) )
self.assertTrue(_is_punctuation("$" ) )
self.assertTrue(_is_punctuation("`" ) )
self.assertTrue(_is_punctuation("." ) )
self.assertFalse(_is_punctuation("A" ) )
self.assertFalse(_is_punctuation(" " ) )
def _lowercase (self : List[Any] ):
UpperCAmelCase_ = self.get_tokenizer()
# Example taken from the issue https://github.com/huggingface/tokenizers/issues/340
self.assertListEqual([tokenizer.tokenize(__a ) for t in ["Test", "\xad", "test"]] , [["[UNK]"], [], ["[UNK]"]] )
if self.test_rust_tokenizer:
UpperCAmelCase_ = self.get_rust_tokenizer()
self.assertListEqual(
[rust_tokenizer.tokenize(__a ) for t in ["Test", "\xad", "test"]] , [["[UNK]"], [], ["[UNK]"]] )
def _lowercase (self : List[str] ):
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
UpperCAmelCase_ = self.rust_tokenizer_class.from_pretrained(__a , **__a )
UpperCAmelCase_ = f"""A, naïve {tokenizer_r.mask_token} AllenNLP sentence."""
UpperCAmelCase_ = tokenizer_r.encode_plus(
__a , return_attention_mask=__a , return_token_type_ids=__a , return_offsets_mapping=__a , add_special_tokens=__a , )
UpperCAmelCase_ = tokenizer_r.do_lower_case if hasattr(__a , "do_lower_case" ) else False
UpperCAmelCase_ = (
[
((0, 0), tokenizer_r.cls_token),
((0, 1), "A"),
((1, 2), ","),
((3, 5), "na"),
((5, 6), "##ï"),
((6, 8), "##ve"),
((9, 15), tokenizer_r.mask_token),
((16, 21), "Allen"),
((21, 23), "##NL"),
((23, 24), "##P"),
((25, 33), "sentence"),
((33, 34), "."),
((0, 0), tokenizer_r.sep_token),
]
if not do_lower_case
else [
((0, 0), tokenizer_r.cls_token),
((0, 1), "a"),
((1, 2), ","),
((3, 8), "naive"),
((9, 15), tokenizer_r.mask_token),
((16, 21), "allen"),
((21, 23), "##nl"),
((23, 24), "##p"),
((25, 33), "sentence"),
((33, 34), "."),
((0, 0), tokenizer_r.sep_token),
]
)
self.assertEqual(
[e[1] for e in expected_results] , tokenizer_r.convert_ids_to_tokens(tokens["input_ids"] ) )
self.assertEqual([e[0] for e in expected_results] , tokens["offset_mapping"] )
def _lowercase (self : Dict ):
UpperCAmelCase_ = ["的", "人", "有"]
UpperCAmelCase_ = "".join(__a )
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
UpperCAmelCase_ = True
UpperCAmelCase_ = self.tokenizer_class.from_pretrained(__a , **__a )
UpperCAmelCase_ = self.rust_tokenizer_class.from_pretrained(__a , **__a )
UpperCAmelCase_ = tokenizer_p.encode(__a , add_special_tokens=__a )
UpperCAmelCase_ = tokenizer_r.encode(__a , add_special_tokens=__a )
UpperCAmelCase_ = tokenizer_r.convert_ids_to_tokens(__a )
UpperCAmelCase_ = tokenizer_p.convert_ids_to_tokens(__a )
# it is expected that each Chinese character is not preceded by "##"
self.assertListEqual(__a , __a )
self.assertListEqual(__a , __a )
UpperCAmelCase_ = False
UpperCAmelCase_ = self.rust_tokenizer_class.from_pretrained(__a , **__a )
UpperCAmelCase_ = self.tokenizer_class.from_pretrained(__a , **__a )
UpperCAmelCase_ = tokenizer_r.encode(__a , add_special_tokens=__a )
UpperCAmelCase_ = tokenizer_p.encode(__a , add_special_tokens=__a )
UpperCAmelCase_ = tokenizer_r.convert_ids_to_tokens(__a )
UpperCAmelCase_ = tokenizer_p.convert_ids_to_tokens(__a )
# it is expected that only the first Chinese character is not preceded by "##".
UpperCAmelCase_ = [
f"""##{token}""" if idx != 0 else token for idx, token in enumerate(__a )
]
self.assertListEqual(__a , __a )
self.assertListEqual(__a , __a )
@slow
def _lowercase (self : Tuple ):
UpperCAmelCase_ = self.tokenizer_class(self.vocab_file , self.word_shape_file , self.word_pronunciation_file )
UpperCAmelCase_ = tokenizer.encode("你好" , add_special_tokens=__a )
UpperCAmelCase_ = tokenizer.encode("你是谁" , add_special_tokens=__a )
UpperCAmelCase_ = tokenizer.build_inputs_with_special_tokens(__a )
UpperCAmelCase_ = tokenizer.build_inputs_with_special_tokens(__a , __a )
assert encoded_sentence == [1] + text + [2]
assert encoded_pair == [1] + text + [2] + text_a + [2]
def _lowercase (self : List[str] ):
UpperCAmelCase_ = self.get_tokenizers(do_lower_case=__a )
for tokenizer in tokenizers:
with self.subTest(f"""{tokenizer.__class__.__name__}""" ):
UpperCAmelCase_ = "你好,你是谁"
UpperCAmelCase_ = tokenizer.tokenize(__a )
UpperCAmelCase_ = tokenizer.convert_tokens_to_ids(__a )
UpperCAmelCase_ = tokenizer.convert_tokens_to_shape_ids(__a )
UpperCAmelCase_ = tokenizer.convert_tokens_to_pronunciation_ids(__a )
UpperCAmelCase_ = tokenizer.prepare_for_model(
__a , __a , __a , add_special_tokens=__a )
UpperCAmelCase_ = tokenizer.encode_plus(__a , add_special_tokens=__a )
self.assertEqual(__a , __a )
| 1 | '''simple docstring'''
from typing import Dict
import numpy as np
from ..utils import add_end_docstrings, is_tf_available, is_torch_available, logging
from .base import PIPELINE_INIT_ARGS, GenericTensor, Pipeline, PipelineException
if is_tf_available():
import tensorflow as tf
from ..tf_utils import stable_softmax
if is_torch_available():
import torch
SCREAMING_SNAKE_CASE_: Optional[int] =logging.get_logger(__name__)
@add_end_docstrings(
UpperCamelCase__ , r"""
top_k (`int`, defaults to 5):
The number of predictions to return.
targets (`str` or `List[str]`, *optional*):
When passed, the model will limit the scores to the passed targets instead of looking up in the whole
vocab. If the provided targets are not in the model vocab, they will be tokenized and the first resulting
token will be used (with a warning, and that might be slower).
""" , )
class __A ( UpperCamelCase__ ):
def _lowercase (self : str , __a : GenericTensor ):
if self.framework == "tf":
UpperCAmelCase_ = tf.where(input_ids == self.tokenizer.mask_token_id ).numpy()
elif self.framework == "pt":
UpperCAmelCase_ = torch.nonzero(input_ids == self.tokenizer.mask_token_id , as_tuple=__a )
else:
raise ValueError("Unsupported framework" )
return masked_index
def _lowercase (self : Tuple , __a : GenericTensor ):
UpperCAmelCase_ = self.get_masked_index(__a )
UpperCAmelCase_ = np.prod(masked_index.shape )
if numel < 1:
raise PipelineException(
"fill-mask" , self.model.base_model_prefix , f"""No mask_token ({self.tokenizer.mask_token}) found on the input""" , )
def _lowercase (self : List[Any] , __a : GenericTensor ):
if isinstance(__a , __a ):
for model_input in model_inputs:
self._ensure_exactly_one_mask_token(model_input["input_ids"][0] )
else:
for input_ids in model_inputs["input_ids"]:
self._ensure_exactly_one_mask_token(__a )
def _lowercase (self : Tuple , __a : Dict , __a : List[str]=None , **__a : Any ):
if return_tensors is None:
UpperCAmelCase_ = self.framework
UpperCAmelCase_ = self.tokenizer(__a , return_tensors=__a )
self.ensure_exactly_one_mask_token(__a )
return model_inputs
def _lowercase (self : str , __a : Optional[int] ):
UpperCAmelCase_ = self.model(**__a )
UpperCAmelCase_ = model_inputs["input_ids"]
return model_outputs
def _lowercase (self : List[str] , __a : Tuple , __a : int=5 , __a : Dict=None ):
# Cap top_k if there are targets
if target_ids is not None and target_ids.shape[0] < top_k:
UpperCAmelCase_ = target_ids.shape[0]
UpperCAmelCase_ = model_outputs["input_ids"][0]
UpperCAmelCase_ = model_outputs["logits"]
if self.framework == "tf":
UpperCAmelCase_ = tf.where(input_ids == self.tokenizer.mask_token_id ).numpy()[:, 0]
UpperCAmelCase_ = outputs.numpy()
UpperCAmelCase_ = outputs[0, masked_index, :]
UpperCAmelCase_ = stable_softmax(__a , axis=-1 )
if target_ids is not None:
UpperCAmelCase_ = tf.gather_nd(tf.squeeze(__a , 0 ) , target_ids.reshape(-1 , 1 ) )
UpperCAmelCase_ = tf.expand_dims(__a , 0 )
UpperCAmelCase_ = tf.math.top_k(__a , k=__a )
UpperCAmelCase_ , UpperCAmelCase_ = topk.values.numpy(), topk.indices.numpy()
else:
UpperCAmelCase_ = torch.nonzero(input_ids == self.tokenizer.mask_token_id , as_tuple=__a ).squeeze(-1 )
# Fill mask pipeline supports only one ${mask_token} per sample
UpperCAmelCase_ = outputs[0, masked_index, :]
UpperCAmelCase_ = logits.softmax(dim=-1 )
if target_ids is not None:
UpperCAmelCase_ = probs[..., target_ids]
UpperCAmelCase_ , UpperCAmelCase_ = probs.topk(__a )
UpperCAmelCase_ = []
UpperCAmelCase_ = values.shape[0] == 1
for i, (_values, _predictions) in enumerate(zip(values.tolist() , predictions.tolist() ) ):
UpperCAmelCase_ = []
for v, p in zip(_values , _predictions ):
# Copy is important since we're going to modify this array in place
UpperCAmelCase_ = input_ids.numpy().copy()
if target_ids is not None:
UpperCAmelCase_ = target_ids[p].tolist()
UpperCAmelCase_ = p
# Filter padding out:
UpperCAmelCase_ = tokens[np.where(tokens != self.tokenizer.pad_token_id )]
# Originally we skip special tokens to give readable output.
# For multi masks though, the other [MASK] would be removed otherwise
# making the output look odd, so we add them back
UpperCAmelCase_ = self.tokenizer.decode(__a , skip_special_tokens=__a )
UpperCAmelCase_ = {"score": v, "token": p, "token_str": self.tokenizer.decode([p] ), "sequence": sequence}
row.append(__a )
result.append(__a )
if single_mask:
return result[0]
return result
def _lowercase (self : Dict , __a : List[Any] , __a : List[str]=None ):
if isinstance(__a , __a ):
UpperCAmelCase_ = [targets]
try:
UpperCAmelCase_ = self.tokenizer.get_vocab()
except Exception:
UpperCAmelCase_ = {}
UpperCAmelCase_ = []
for target in targets:
UpperCAmelCase_ = vocab.get(__a , __a )
if id_ is None:
UpperCAmelCase_ = self.tokenizer(
__a , add_special_tokens=__a , return_attention_mask=__a , return_token_type_ids=__a , max_length=1 , truncation=__a , )["input_ids"]
if len(__a ) == 0:
logger.warning(
f"""The specified target token `{target}` does not exist in the model vocabulary. """
"We cannot replace it with anything meaningful, ignoring it" )
continue
UpperCAmelCase_ = input_ids[0]
# XXX: If users encounter this pass
# it becomes pretty slow, so let's make sure
# The warning enables them to fix the input to
# get faster performance.
logger.warning(
f"""The specified target token `{target}` does not exist in the model vocabulary. """
f"""Replacing with `{self.tokenizer.convert_ids_to_tokens(id_ )}`.""" )
target_ids.append(id_ )
UpperCAmelCase_ = list(set(__a ) )
if len(__a ) == 0:
raise ValueError("At least one target must be provided when passed." )
UpperCAmelCase_ = np.array(__a )
return target_ids
def _lowercase (self : Tuple , __a : Dict=None , __a : List[str]=None ):
UpperCAmelCase_ = {}
if targets is not None:
UpperCAmelCase_ = self.get_target_ids(__a , __a )
UpperCAmelCase_ = target_ids
if top_k is not None:
UpperCAmelCase_ = top_k
if self.tokenizer.mask_token_id is None:
raise PipelineException(
"fill-mask" , self.model.base_model_prefix , "The tokenizer does not define a `mask_token`." )
return {}, {}, postprocess_params
def __call__(self : Union[str, Any] , __a : str , *__a : Any , **__a : Tuple ):
UpperCAmelCase_ = super().__call__(__a , **__a )
if isinstance(__a , __a ) and len(__a ) == 1:
return outputs[0]
return outputs
| 1 | 1 |
'''simple docstring'''
from string import ascii_uppercase
SCREAMING_SNAKE_CASE_: Any ={str(ord(c) - 55): c for c in ascii_uppercase}
def lowerCAmelCase_ ( snake_case_ : int , snake_case_ : int ) -> str:
'''simple docstring'''
if isinstance(snake_case_ , snake_case_ ):
raise TypeError("int() can't convert non-string with explicit base" )
if num < 0:
raise ValueError("parameter must be positive int" )
if isinstance(snake_case_ , snake_case_ ):
raise TypeError("'str' object cannot be interpreted as an integer" )
if isinstance(snake_case_ , snake_case_ ):
raise TypeError("'float' object cannot be interpreted as an integer" )
if base in (0, 1):
raise ValueError("base must be >= 2" )
if base > 36:
raise ValueError("base must be <= 36" )
UpperCAmelCase_ = ""
UpperCAmelCase_ = 0
UpperCAmelCase_ = 0
while div != 1:
UpperCAmelCase_ , UpperCAmelCase_ = divmod(snake_case_ , snake_case_ )
if base >= 11 and 9 < mod < 36:
UpperCAmelCase_ = ALPHABET_VALUES[str(snake_case_ )]
else:
UpperCAmelCase_ = str(snake_case_ )
new_value += actual_value
UpperCAmelCase_ = num // base
UpperCAmelCase_ = div
if div == 0:
return str(new_value[::-1] )
elif div == 1:
new_value += str(snake_case_ )
return str(new_value[::-1] )
return new_value[::-1]
if __name__ == "__main__":
import doctest
doctest.testmod()
for base in range(2, 37):
for num in range(10_00):
assert int(decimal_to_any(num, base), base) == num, (
num,
base,
decimal_to_any(num, base),
int(decimal_to_any(num, base), base),
)
| 1 | '''simple docstring'''
import logging
import os
from dataclasses import dataclass
from typing import List, Optional, Union
import tqdm
from filelock import FileLock
from transformers import (
BartTokenizer,
BartTokenizerFast,
DataProcessor,
PreTrainedTokenizer,
RobertaTokenizer,
RobertaTokenizerFast,
XLMRobertaTokenizer,
is_tf_available,
is_torch_available,
)
SCREAMING_SNAKE_CASE_: str =logging.getLogger(__name__)
@dataclass(frozen=UpperCamelCase__ )
class __A :
a__ : str
a__ : str
a__ : Optional[str] = None
a__ : Optional[str] = None
a__ : Optional[str] = None
@dataclass(frozen=UpperCamelCase__ )
class __A :
a__ : List[int]
a__ : Optional[List[int]] = None
a__ : Optional[List[int]] = None
a__ : Optional[Union[int, float]] = None
a__ : Optional[int] = None
if is_torch_available():
import torch
from torch.utils.data import Dataset
class __A ( UpperCamelCase__ ):
a__ : List[InputFeatures]
def __init__(self : Any , __a : str , __a : PreTrainedTokenizer , __a : str , __a : Optional[int] = None , __a : Dict=False , __a : bool = False , ):
UpperCAmelCase_ = hans_processors[task]()
UpperCAmelCase_ = os.path.join(
__a , "cached_{}_{}_{}_{}".format(
"dev" if evaluate else "train" , tokenizer.__class__.__name__ , str(__a ) , __a , ) , )
UpperCAmelCase_ = processor.get_labels()
if tokenizer.__class__ in (
RobertaTokenizer,
RobertaTokenizerFast,
XLMRobertaTokenizer,
BartTokenizer,
BartTokenizerFast,
):
# HACK(label indices are swapped in RoBERTa pretrained model)
UpperCAmelCase_ , UpperCAmelCase_ = label_list[2], label_list[1]
UpperCAmelCase_ = label_list
# Make sure only the first process in distributed training processes the dataset,
# and the others will use the cache.
UpperCAmelCase_ = cached_features_file + ".lock"
with FileLock(__a ):
if os.path.exists(__a ) and not overwrite_cache:
logger.info(f"""Loading features from cached file {cached_features_file}""" )
UpperCAmelCase_ = torch.load(__a )
else:
logger.info(f"""Creating features from dataset file at {data_dir}""" )
UpperCAmelCase_ = (
processor.get_dev_examples(__a ) if evaluate else processor.get_train_examples(__a )
)
logger.info("Training examples: %s" , len(__a ) )
UpperCAmelCase_ = hans_convert_examples_to_features(__a , __a , __a , __a )
logger.info("Saving features into cached file %s" , __a )
torch.save(self.features , __a )
def __len__(self : List[Any] ):
return len(self.features )
def __getitem__(self : Any , __a : Optional[Any] ):
return self.features[i]
def _lowercase (self : Union[str, Any] ):
return self.label_list
if is_tf_available():
import tensorflow as tf
class __A :
a__ : List[InputFeatures]
def __init__(self : Union[str, Any] , __a : str , __a : PreTrainedTokenizer , __a : str , __a : Optional[int] = 128 , __a : Any=False , __a : bool = False , ):
UpperCAmelCase_ = hans_processors[task]()
UpperCAmelCase_ = processor.get_labels()
if tokenizer.__class__ in (
RobertaTokenizer,
RobertaTokenizerFast,
XLMRobertaTokenizer,
BartTokenizer,
BartTokenizerFast,
):
# HACK(label indices are swapped in RoBERTa pretrained model)
UpperCAmelCase_ , UpperCAmelCase_ = label_list[2], label_list[1]
UpperCAmelCase_ = label_list
UpperCAmelCase_ = processor.get_dev_examples(__a ) if evaluate else processor.get_train_examples(__a )
UpperCAmelCase_ = hans_convert_examples_to_features(__a , __a , __a , __a )
def gen():
for ex_index, ex in tqdm.tqdm(enumerate(self.features ) , desc="convert examples to features" ):
if ex_index % 10000 == 0:
logger.info("Writing example %d of %d" % (ex_index, len(__a )) )
yield (
{
"example_id": 0,
"input_ids": ex.input_ids,
"attention_mask": ex.attention_mask,
"token_type_ids": ex.token_type_ids,
},
ex.label,
)
UpperCAmelCase_ = tf.data.Dataset.from_generator(
__a , (
{
"example_id": tf.intaa,
"input_ids": tf.intaa,
"attention_mask": tf.intaa,
"token_type_ids": tf.intaa,
},
tf.intaa,
) , (
{
"example_id": tf.TensorShape([] ),
"input_ids": tf.TensorShape([None, None] ),
"attention_mask": tf.TensorShape([None, None] ),
"token_type_ids": tf.TensorShape([None, None] ),
},
tf.TensorShape([] ),
) , )
def _lowercase (self : int ):
return self.dataset
def __len__(self : Any ):
return len(self.features )
def __getitem__(self : int , __a : Union[str, Any] ):
return self.features[i]
def _lowercase (self : int ):
return self.label_list
class __A ( UpperCamelCase__ ):
def _lowercase (self : List[Any] , __a : Dict ):
return self._create_examples(self._read_tsv(os.path.join(__a , "heuristics_train_set.txt" ) ) , "train" )
def _lowercase (self : Any , __a : List[Any] ):
return self._create_examples(self._read_tsv(os.path.join(__a , "heuristics_evaluation_set.txt" ) ) , "dev" )
def _lowercase (self : Any ):
return ["contradiction", "entailment", "neutral"]
def _lowercase (self : Union[str, Any] , __a : Optional[int] , __a : Union[str, Any] ):
UpperCAmelCase_ = []
for i, line in enumerate(__a ):
if i == 0:
continue
UpperCAmelCase_ = "%s-%s" % (set_type, line[0])
UpperCAmelCase_ = line[5]
UpperCAmelCase_ = line[6]
UpperCAmelCase_ = line[7][2:] if line[7].startswith("ex" ) else line[7]
UpperCAmelCase_ = line[0]
examples.append(InputExample(guid=__a , text_a=__a , text_b=__a , label=__a , pairID=__a ) )
return examples
def lowerCAmelCase_ ( snake_case_ : List[InputExample] , snake_case_ : List[str] , snake_case_ : int , snake_case_ : PreTrainedTokenizer , ) -> Optional[Any]:
'''simple docstring'''
UpperCAmelCase_ = {label: i for i, label in enumerate(snake_case_ )}
UpperCAmelCase_ = []
for ex_index, example in tqdm.tqdm(enumerate(snake_case_ ) , desc="convert examples to features" ):
if ex_index % 1_00_00 == 0:
logger.info("Writing example %d" % (ex_index) )
UpperCAmelCase_ = tokenizer(
example.text_a , example.text_b , add_special_tokens=snake_case_ , max_length=snake_case_ , padding="max_length" , truncation=snake_case_ , return_overflowing_tokens=snake_case_ , )
UpperCAmelCase_ = label_map[example.label] if example.label in label_map else 0
UpperCAmelCase_ = int(example.pairID )
features.append(InputFeatures(**snake_case_ , label=snake_case_ , pairID=snake_case_ ) )
for i, example in enumerate(examples[:5] ):
logger.info("*** Example ***" )
logger.info(f"""guid: {example}""" )
logger.info(f"""features: {features[i]}""" )
return features
SCREAMING_SNAKE_CASE_: int ={
'hans': 3,
}
SCREAMING_SNAKE_CASE_: Any ={
'hans': HansProcessor,
}
| 1 | 1 |
'''simple docstring'''
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
SCREAMING_SNAKE_CASE_: Optional[int] =logging.get_logger(__name__)
SCREAMING_SNAKE_CASE_: Union[str, Any] ='▁'
SCREAMING_SNAKE_CASE_: List[str] ={'vocab_file': 'sentencepiece.bpe.model', 'monolingual_vocab_file': 'dict.txt'}
SCREAMING_SNAKE_CASE_: Any ={
'vocab_file': {
'vinai/bartpho-syllable': 'https://huggingface.co/vinai/bartpho-syllable/resolve/main/sentencepiece.bpe.model',
},
'monolingual_vocab_file': {
'vinai/bartpho-syllable': 'https://huggingface.co/vinai/bartpho-syllable/resolve/main/dict.txt',
},
}
SCREAMING_SNAKE_CASE_: Dict ={'vinai/bartpho-syllable': 10_24}
class __A ( UpperCamelCase__ ):
a__ : Dict = VOCAB_FILES_NAMES
a__ : Optional[Any] = PRETRAINED_VOCAB_FILES_MAP
a__ : str = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a__ : Dict = ["""input_ids""", """attention_mask"""]
def __init__(self : int , __a : Optional[Any] , __a : Union[str, Any] , __a : str="<s>" , __a : int="</s>" , __a : Tuple="</s>" , __a : Any="<s>" , __a : List[str]="<unk>" , __a : Any="<pad>" , __a : Optional[int]="<mask>" , __a : Optional[Dict[str, Any]] = None , **__a : int , ):
# Mask token behave like a normal word, i.e. include the space before it
UpperCAmelCase_ = AddedToken(__a , lstrip=__a , rstrip=__a ) if isinstance(__a , __a ) else mask_token
UpperCAmelCase_ = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=__a , eos_token=__a , unk_token=__a , sep_token=__a , cls_token=__a , pad_token=__a , mask_token=__a , sp_model_kwargs=self.sp_model_kwargs , **__a , )
UpperCAmelCase_ = vocab_file
UpperCAmelCase_ = monolingual_vocab_file
UpperCAmelCase_ = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(__a ) )
# Load the reduced vocab
# Keep order of special tokens for backward compatibility
UpperCAmelCase_ = {}
UpperCAmelCase_ = 0
for token in [bos_token, pad_token, eos_token, unk_token, sep_token, cls_token]:
if str(__a ) not in self.fairseq_tokens_to_ids:
UpperCAmelCase_ = cnt
cnt += 1
with open(__a , "r" , encoding="utf-8" ) as f:
for line in f.readlines():
UpperCAmelCase_ = line.strip().split()[0]
UpperCAmelCase_ = len(self.fairseq_tokens_to_ids )
if str(__a ) not in self.fairseq_tokens_to_ids:
UpperCAmelCase_ = len(self.fairseq_tokens_to_ids )
UpperCAmelCase_ = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
def __getstate__(self : Optional[Any] ):
UpperCAmelCase_ = self.__dict__.copy()
UpperCAmelCase_ = None
UpperCAmelCase_ = self.sp_model.serialized_model_proto()
return state
def __setstate__(self : Tuple , __a : Optional[Any] ):
UpperCAmelCase_ = d
# for backward compatibility
if not hasattr(self , "sp_model_kwargs" ):
UpperCAmelCase_ = {}
UpperCAmelCase_ = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.LoadFromSerializedProto(self.sp_model_proto )
def _lowercase (self : int , __a : List[int] , __a : Optional[List[int]] = None ):
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
UpperCAmelCase_ = [self.cls_token_id]
UpperCAmelCase_ = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def _lowercase (self : List[Any] , __a : List[int] , __a : Optional[List[int]] = None , __a : bool = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__a , token_ids_a=__a , already_has_special_tokens=__a )
if token_ids_a is None:
return [1] + ([0] * len(__a )) + [1]
return [1] + ([0] * len(__a )) + [1, 1] + ([0] * len(__a )) + [1]
def _lowercase (self : List[str] , __a : List[int] , __a : Optional[List[int]] = None ):
UpperCAmelCase_ = [self.sep_token_id]
UpperCAmelCase_ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
@property
def _lowercase (self : Dict ):
return len(self.fairseq_ids_to_tokens )
def _lowercase (self : str ):
UpperCAmelCase_ = {self.convert_ids_to_tokens(__a ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def _lowercase (self : Union[str, Any] , __a : str ):
return self.sp_model.encode(__a , out_type=__a )
def _lowercase (self : Optional[int] , __a : List[str] ):
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
else:
return self.unk_token_id
def _lowercase (self : Tuple , __a : Optional[int] ):
return self.fairseq_ids_to_tokens[index]
def _lowercase (self : Union[str, Any] , __a : Optional[int] ):
UpperCAmelCase_ = "".join(__a ).replace(__a , " " ).strip()
return out_string
def _lowercase (self : Optional[Any] , __a : str , __a : Optional[str] = None ):
if not os.path.isdir(__a ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
UpperCAmelCase_ = os.path.join(
__a , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
UpperCAmelCase_ = os.path.join(
__a , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["monolingual_vocab_file"] , )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__a ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , __a )
elif not os.path.isfile(self.vocab_file ):
with open(__a , "wb" ) as fi:
UpperCAmelCase_ = self.sp_model.serialized_model_proto()
fi.write(__a )
if os.path.abspath(self.monolingual_vocab_file ) != os.path.abspath(
__a ) and os.path.isfile(self.monolingual_vocab_file ):
copyfile(self.monolingual_vocab_file , __a )
elif not os.path.isfile(self.monolingual_vocab_file ):
with open(__a , "w" , encoding="utf-8" ) as fp:
for token in self.fairseq_tokens_to_ids:
if token not in self.all_special_tokens:
fp.write(f"""{str(__a )} \n""" )
return out_vocab_file, out_monolingual_vocab_file
| 1 | '''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
SCREAMING_SNAKE_CASE_: Dict =logging.get_logger(__name__)
SCREAMING_SNAKE_CASE_: Tuple ={}
class __A ( UpperCamelCase__ ):
a__ : int = """llama"""
a__ : Any = ["""past_key_values"""]
def __init__(self : List[str] , __a : List[str]=32000 , __a : Tuple=4096 , __a : List[Any]=11008 , __a : Dict=32 , __a : Tuple=32 , __a : Any=None , __a : Any="silu" , __a : List[Any]=2048 , __a : List[Any]=0.02 , __a : str=1E-6 , __a : Optional[Any]=True , __a : Union[str, Any]=0 , __a : Any=1 , __a : Dict=2 , __a : Dict=1 , __a : str=False , __a : str=None , **__a : Optional[Any] , ):
UpperCAmelCase_ = vocab_size
UpperCAmelCase_ = max_position_embeddings
UpperCAmelCase_ = hidden_size
UpperCAmelCase_ = intermediate_size
UpperCAmelCase_ = num_hidden_layers
UpperCAmelCase_ = num_attention_heads
# for backward compatibility
if num_key_value_heads is None:
UpperCAmelCase_ = num_attention_heads
UpperCAmelCase_ = num_key_value_heads
UpperCAmelCase_ = hidden_act
UpperCAmelCase_ = initializer_range
UpperCAmelCase_ = rms_norm_eps
UpperCAmelCase_ = pretraining_tp
UpperCAmelCase_ = use_cache
UpperCAmelCase_ = rope_scaling
self._rope_scaling_validation()
super().__init__(
pad_token_id=__a , bos_token_id=__a , eos_token_id=__a , tie_word_embeddings=__a , **__a , )
def _lowercase (self : List[str] ):
if self.rope_scaling is None:
return
if not isinstance(self.rope_scaling , __a ) or len(self.rope_scaling ) != 2:
raise ValueError(
"`rope_scaling` must be a dictionary with with two fields, `name` and `factor`, "
f"""got {self.rope_scaling}""" )
UpperCAmelCase_ = self.rope_scaling.get("type" , __a )
UpperCAmelCase_ = self.rope_scaling.get("factor" , __a )
if rope_scaling_type is None or rope_scaling_type not in ["linear", "dynamic"]:
raise ValueError(
f"""`rope_scaling`'s name field must be one of ['linear', 'dynamic'], got {rope_scaling_type}""" )
if rope_scaling_factor is None or not isinstance(__a , __a ) or rope_scaling_factor <= 1.0:
raise ValueError(f"""`rope_scaling`'s factor field must be an float > 1, got {rope_scaling_factor}""" )
| 1 | 1 |
'''simple docstring'''
def lowerCAmelCase_ ( snake_case_ : str , snake_case_ : int ) -> str:
'''simple docstring'''
UpperCAmelCase_ = [[] for _ in range(snake_case_ )]
UpperCAmelCase_ = key - 1
if key <= 0:
raise ValueError("Height of grid can't be 0 or negative" )
if key == 1 or len(snake_case_ ) <= key:
return input_string
for position, character in enumerate(snake_case_ ):
UpperCAmelCase_ = position % (lowest * 2) # puts it in bounds
UpperCAmelCase_ = min(snake_case_ , lowest * 2 - num ) # creates zigzag pattern
temp_grid[num].append(snake_case_ )
UpperCAmelCase_ = ["".join(snake_case_ ) for row in temp_grid]
UpperCAmelCase_ = "".join(snake_case_ )
return output_string
def lowerCAmelCase_ ( snake_case_ : str , snake_case_ : int ) -> str:
'''simple docstring'''
UpperCAmelCase_ = []
UpperCAmelCase_ = key - 1
if key <= 0:
raise ValueError("Height of grid can't be 0 or negative" )
if key == 1:
return input_string
UpperCAmelCase_ = [[] for _ in range(snake_case_ )] # generates template
for position in range(len(snake_case_ ) ):
UpperCAmelCase_ = position % (lowest * 2) # puts it in bounds
UpperCAmelCase_ = min(snake_case_ , lowest * 2 - num ) # creates zigzag pattern
temp_grid[num].append("*" )
UpperCAmelCase_ = 0
for row in temp_grid: # fills in the characters
UpperCAmelCase_ = input_string[counter : counter + len(snake_case_ )]
grid.append(list(snake_case_ ) )
counter += len(snake_case_ )
UpperCAmelCase_ = "" # reads as zigzag
for position in range(len(snake_case_ ) ):
UpperCAmelCase_ = position % (lowest * 2) # puts it in bounds
UpperCAmelCase_ = min(snake_case_ , lowest * 2 - num ) # creates zigzag pattern
output_string += grid[num][0]
grid[num].pop(0 )
return output_string
def lowerCAmelCase_ ( snake_case_ : str ) -> dict[int, str]:
'''simple docstring'''
UpperCAmelCase_ = {}
for key_guess in range(1 , len(snake_case_ ) ): # tries every key
UpperCAmelCase_ = decrypt(snake_case_ , snake_case_ )
return results
if __name__ == "__main__":
import doctest
doctest.testmod()
| 1 | '''simple docstring'''
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, DDIMScheduler, DDPMScheduler, StableDiffusionUpscalePipeline, UNetaDConditionModel
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
enable_full_determinism()
class __A ( unittest.TestCase ):
def _lowercase (self : Tuple ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def _lowercase (self : str ):
UpperCAmelCase_ = 1
UpperCAmelCase_ = 3
UpperCAmelCase_ = (32, 32)
UpperCAmelCase_ = floats_tensor((batch_size, num_channels) + sizes , rng=random.Random(0 ) ).to(__a )
return image
@property
def _lowercase (self : int ):
torch.manual_seed(0 )
UpperCAmelCase_ = UNetaDConditionModel(
block_out_channels=(32, 32, 64) , layers_per_block=2 , sample_size=32 , in_channels=7 , out_channels=4 , down_block_types=("DownBlock2D", "CrossAttnDownBlock2D", "CrossAttnDownBlock2D") , up_block_types=("CrossAttnUpBlock2D", "CrossAttnUpBlock2D", "UpBlock2D") , cross_attention_dim=32 , attention_head_dim=8 , use_linear_projection=__a , only_cross_attention=(True, True, False) , num_class_embeds=100 , )
return model
@property
def _lowercase (self : Any ):
torch.manual_seed(0 )
UpperCAmelCase_ = AutoencoderKL(
block_out_channels=[32, 32, 64] , in_channels=3 , out_channels=3 , down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D", "DownEncoderBlock2D"] , up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D"] , latent_channels=4 , )
return model
@property
def _lowercase (self : Optional[Any] ):
torch.manual_seed(0 )
UpperCAmelCase_ = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , hidden_act="gelu" , projection_dim=512 , )
return CLIPTextModel(__a )
def _lowercase (self : Any ):
UpperCAmelCase_ = "cpu" # ensure determinism for the device-dependent torch.Generator
UpperCAmelCase_ = self.dummy_cond_unet_upscale
UpperCAmelCase_ = DDPMScheduler()
UpperCAmelCase_ = DDIMScheduler(prediction_type="v_prediction" )
UpperCAmelCase_ = self.dummy_vae
UpperCAmelCase_ = self.dummy_text_encoder
UpperCAmelCase_ = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
UpperCAmelCase_ = self.dummy_image.cpu().permute(0 , 2 , 3 , 1 )[0]
UpperCAmelCase_ = Image.fromarray(np.uinta(__a ) ).convert("RGB" ).resize((64, 64) )
# make sure here that pndm scheduler skips prk
UpperCAmelCase_ = StableDiffusionUpscalePipeline(
unet=__a , low_res_scheduler=__a , scheduler=__a , vae=__a , text_encoder=__a , tokenizer=__a , max_noise_level=350 , )
UpperCAmelCase_ = sd_pipe.to(__a )
sd_pipe.set_progress_bar_config(disable=__a )
UpperCAmelCase_ = "A painting of a squirrel eating a burger"
UpperCAmelCase_ = torch.Generator(device=__a ).manual_seed(0 )
UpperCAmelCase_ = sd_pipe(
[prompt] , image=__a , generator=__a , guidance_scale=6.0 , noise_level=20 , num_inference_steps=2 , output_type="np" , )
UpperCAmelCase_ = output.images
UpperCAmelCase_ = torch.Generator(device=__a ).manual_seed(0 )
UpperCAmelCase_ = sd_pipe(
[prompt] , image=__a , generator=__a , guidance_scale=6.0 , noise_level=20 , num_inference_steps=2 , output_type="np" , return_dict=__a , )[0]
UpperCAmelCase_ = image[0, -3:, -3:, -1]
UpperCAmelCase_ = image_from_tuple[0, -3:, -3:, -1]
UpperCAmelCase_ = low_res_image.size[0] * 4
assert image.shape == (1, expected_height_width, expected_height_width, 3)
UpperCAmelCase_ = np.array([0.31_13, 0.39_10, 0.42_72, 0.48_59, 0.50_61, 0.46_52, 0.53_62, 0.57_15, 0.56_61] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
def _lowercase (self : Optional[int] ):
UpperCAmelCase_ = "cpu" # ensure determinism for the device-dependent torch.Generator
UpperCAmelCase_ = self.dummy_cond_unet_upscale
UpperCAmelCase_ = DDPMScheduler()
UpperCAmelCase_ = DDIMScheduler(prediction_type="v_prediction" )
UpperCAmelCase_ = self.dummy_vae
UpperCAmelCase_ = self.dummy_text_encoder
UpperCAmelCase_ = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
UpperCAmelCase_ = self.dummy_image.cpu().permute(0 , 2 , 3 , 1 )[0]
UpperCAmelCase_ = Image.fromarray(np.uinta(__a ) ).convert("RGB" ).resize((64, 64) )
# make sure here that pndm scheduler skips prk
UpperCAmelCase_ = StableDiffusionUpscalePipeline(
unet=__a , low_res_scheduler=__a , scheduler=__a , vae=__a , text_encoder=__a , tokenizer=__a , max_noise_level=350 , )
UpperCAmelCase_ = sd_pipe.to(__a )
sd_pipe.set_progress_bar_config(disable=__a )
UpperCAmelCase_ = "A painting of a squirrel eating a burger"
UpperCAmelCase_ = sd_pipe(
2 * [prompt] , image=2 * [low_res_image] , guidance_scale=6.0 , noise_level=20 , num_inference_steps=2 , output_type="np" , )
UpperCAmelCase_ = output.images
assert image.shape[0] == 2
UpperCAmelCase_ = torch.Generator(device=__a ).manual_seed(0 )
UpperCAmelCase_ = sd_pipe(
[prompt] , image=__a , generator=__a , num_images_per_prompt=2 , guidance_scale=6.0 , noise_level=20 , num_inference_steps=2 , output_type="np" , )
UpperCAmelCase_ = output.images
assert image.shape[0] == 2
@unittest.skipIf(torch_device != "cuda" , "This test requires a GPU" )
def _lowercase (self : str ):
UpperCAmelCase_ = self.dummy_cond_unet_upscale
UpperCAmelCase_ = DDPMScheduler()
UpperCAmelCase_ = DDIMScheduler(prediction_type="v_prediction" )
UpperCAmelCase_ = self.dummy_vae
UpperCAmelCase_ = self.dummy_text_encoder
UpperCAmelCase_ = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
UpperCAmelCase_ = self.dummy_image.cpu().permute(0 , 2 , 3 , 1 )[0]
UpperCAmelCase_ = Image.fromarray(np.uinta(__a ) ).convert("RGB" ).resize((64, 64) )
# put models in fp16, except vae as it overflows in fp16
UpperCAmelCase_ = unet.half()
UpperCAmelCase_ = text_encoder.half()
# make sure here that pndm scheduler skips prk
UpperCAmelCase_ = StableDiffusionUpscalePipeline(
unet=__a , low_res_scheduler=__a , scheduler=__a , vae=__a , text_encoder=__a , tokenizer=__a , max_noise_level=350 , )
UpperCAmelCase_ = sd_pipe.to(__a )
sd_pipe.set_progress_bar_config(disable=__a )
UpperCAmelCase_ = "A painting of a squirrel eating a burger"
UpperCAmelCase_ = torch.manual_seed(0 )
UpperCAmelCase_ = sd_pipe(
[prompt] , image=__a , generator=__a , num_inference_steps=2 , output_type="np" , ).images
UpperCAmelCase_ = low_res_image.size[0] * 4
assert image.shape == (1, expected_height_width, expected_height_width, 3)
@slow
@require_torch_gpu
class __A ( unittest.TestCase ):
def _lowercase (self : List[str] ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _lowercase (self : List[Any] ):
UpperCAmelCase_ = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/sd2-upscale/low_res_cat.png" )
UpperCAmelCase_ = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-upscale"
"/upsampled_cat.npy" )
UpperCAmelCase_ = "stabilityai/stable-diffusion-x4-upscaler"
UpperCAmelCase_ = StableDiffusionUpscalePipeline.from_pretrained(__a )
pipe.to(__a )
pipe.set_progress_bar_config(disable=__a )
pipe.enable_attention_slicing()
UpperCAmelCase_ = "a cat sitting on a park bench"
UpperCAmelCase_ = torch.manual_seed(0 )
UpperCAmelCase_ = pipe(
prompt=__a , image=__a , generator=__a , output_type="np" , )
UpperCAmelCase_ = output.images[0]
assert image.shape == (512, 512, 3)
assert np.abs(expected_image - image ).max() < 1E-3
def _lowercase (self : Tuple ):
UpperCAmelCase_ = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/sd2-upscale/low_res_cat.png" )
UpperCAmelCase_ = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-upscale"
"/upsampled_cat_fp16.npy" )
UpperCAmelCase_ = "stabilityai/stable-diffusion-x4-upscaler"
UpperCAmelCase_ = StableDiffusionUpscalePipeline.from_pretrained(
__a , torch_dtype=torch.floataa , )
pipe.to(__a )
pipe.set_progress_bar_config(disable=__a )
pipe.enable_attention_slicing()
UpperCAmelCase_ = "a cat sitting on a park bench"
UpperCAmelCase_ = torch.manual_seed(0 )
UpperCAmelCase_ = pipe(
prompt=__a , image=__a , generator=__a , output_type="np" , )
UpperCAmelCase_ = output.images[0]
assert image.shape == (512, 512, 3)
assert np.abs(expected_image - image ).max() < 5E-1
def _lowercase (self : List[Any] ):
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
UpperCAmelCase_ = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/sd2-upscale/low_res_cat.png" )
UpperCAmelCase_ = "stabilityai/stable-diffusion-x4-upscaler"
UpperCAmelCase_ = StableDiffusionUpscalePipeline.from_pretrained(
__a , torch_dtype=torch.floataa , )
pipe.to(__a )
pipe.set_progress_bar_config(disable=__a )
pipe.enable_attention_slicing(1 )
pipe.enable_sequential_cpu_offload()
UpperCAmelCase_ = "a cat sitting on a park bench"
UpperCAmelCase_ = torch.manual_seed(0 )
UpperCAmelCase_ = pipe(
prompt=__a , image=__a , generator=__a , num_inference_steps=5 , output_type="np" , )
UpperCAmelCase_ = torch.cuda.max_memory_allocated()
# make sure that less than 2.9 GB is allocated
assert mem_bytes < 2.9 * 10**9
| 1 | 1 |
'''simple docstring'''
import unittest
from transformers import EsmConfig, is_torch_available
from transformers.testing_utils import TestCasePlus, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import EsmForMaskedLM, EsmForSequenceClassification, EsmForTokenClassification, EsmModel
from transformers.models.esm.modeling_esm import (
ESM_PRETRAINED_MODEL_ARCHIVE_LIST,
EsmEmbeddings,
create_position_ids_from_input_ids,
)
class __A :
def __init__(self : str , __a : Union[str, Any] , __a : Optional[Any]=13 , __a : Optional[Any]=7 , __a : Dict=False , __a : List[str]=True , __a : List[str]=False , __a : Any=True , __a : Optional[Any]=33 , __a : Union[str, Any]=32 , __a : str=5 , __a : List[Any]=4 , __a : str=37 , __a : Dict="gelu" , __a : Optional[Any]=0.1 , __a : List[Any]=0.1 , __a : Tuple=512 , __a : int=16 , __a : str=2 , __a : Any=0.02 , __a : Union[str, Any]=3 , __a : List[Any]=4 , __a : Optional[Any]=None , ):
UpperCAmelCase_ = parent
UpperCAmelCase_ = batch_size
UpperCAmelCase_ = seq_length
UpperCAmelCase_ = is_training
UpperCAmelCase_ = use_input_mask
UpperCAmelCase_ = use_token_type_ids
UpperCAmelCase_ = use_labels
UpperCAmelCase_ = vocab_size
UpperCAmelCase_ = hidden_size
UpperCAmelCase_ = num_hidden_layers
UpperCAmelCase_ = num_attention_heads
UpperCAmelCase_ = intermediate_size
UpperCAmelCase_ = hidden_act
UpperCAmelCase_ = hidden_dropout_prob
UpperCAmelCase_ = attention_probs_dropout_prob
UpperCAmelCase_ = max_position_embeddings
UpperCAmelCase_ = type_vocab_size
UpperCAmelCase_ = type_sequence_label_size
UpperCAmelCase_ = initializer_range
UpperCAmelCase_ = num_labels
UpperCAmelCase_ = num_choices
UpperCAmelCase_ = scope
def _lowercase (self : List[str] ):
UpperCAmelCase_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCAmelCase_ = None
if self.use_input_mask:
UpperCAmelCase_ = random_attention_mask([self.batch_size, self.seq_length] )
UpperCAmelCase_ = None
UpperCAmelCase_ = None
UpperCAmelCase_ = None
if self.use_labels:
UpperCAmelCase_ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCAmelCase_ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
UpperCAmelCase_ = ids_tensor([self.batch_size] , self.num_choices )
UpperCAmelCase_ = self.get_config()
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def _lowercase (self : Any ):
return EsmConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , pad_token_id=1 , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , )
def _lowercase (self : Union[str, Any] , __a : Tuple , __a : List[str] , __a : Optional[int] , __a : List[Any] , __a : List[str] , __a : int ):
UpperCAmelCase_ = EsmModel(config=__a )
model.to(__a )
model.eval()
UpperCAmelCase_ = model(__a , attention_mask=__a )
UpperCAmelCase_ = model(__a )
UpperCAmelCase_ = model(__a )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def _lowercase (self : Union[str, Any] , __a : int , __a : Optional[Any] , __a : List[str] , __a : Dict , __a : Tuple , __a : str ):
UpperCAmelCase_ = EsmForMaskedLM(config=__a )
model.to(__a )
model.eval()
UpperCAmelCase_ = model(__a , attention_mask=__a , labels=__a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _lowercase (self : Optional[Any] , __a : Dict , __a : Dict , __a : List[str] , __a : List[str] , __a : Any , __a : int ):
UpperCAmelCase_ = self.num_labels
UpperCAmelCase_ = EsmForTokenClassification(config=__a )
model.to(__a )
model.eval()
UpperCAmelCase_ = model(__a , attention_mask=__a , labels=__a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def _lowercase (self : str ):
UpperCAmelCase_ = self.prepare_config_and_inputs()
(
(
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) ,
) = config_and_inputs
UpperCAmelCase_ = {"input_ids": input_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_torch
class __A ( UpperCamelCase__ , UpperCamelCase__ , unittest.TestCase ):
a__ : Union[str, Any] = False
a__ : Tuple = (
(
EsmForMaskedLM,
EsmModel,
EsmForSequenceClassification,
EsmForTokenClassification,
)
if is_torch_available()
else ()
)
a__ : Any = ()
a__ : Union[str, Any] = (
{
"""feature-extraction""": EsmModel,
"""fill-mask""": EsmForMaskedLM,
"""text-classification""": EsmForSequenceClassification,
"""token-classification""": EsmForTokenClassification,
"""zero-shot""": EsmForSequenceClassification,
}
if is_torch_available()
else {}
)
a__ : List[Any] = True
def _lowercase (self : Optional[int] ):
UpperCAmelCase_ = EsmModelTester(self )
UpperCAmelCase_ = ConfigTester(self , config_class=__a , hidden_size=37 )
def _lowercase (self : List[Any] ):
self.config_tester.run_common_tests()
def _lowercase (self : Optional[Any] ):
UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__a )
def _lowercase (self : Any ):
UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
UpperCAmelCase_ = type
self.model_tester.create_and_check_model(*__a )
def _lowercase (self : str ):
UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*__a )
def _lowercase (self : Union[str, Any] ):
UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*__a )
@slow
def _lowercase (self : Dict ):
for model_name in ESM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase_ = EsmModel.from_pretrained(__a )
self.assertIsNotNone(__a )
def _lowercase (self : Tuple ):
UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs()[0]
UpperCAmelCase_ = EsmEmbeddings(config=__a )
UpperCAmelCase_ = torch.as_tensor([[12, 31, 13, model.padding_idx]] )
UpperCAmelCase_ = torch.as_tensor(
[
[
0 + model.padding_idx + 1,
1 + model.padding_idx + 1,
2 + model.padding_idx + 1,
model.padding_idx,
]
] )
UpperCAmelCase_ = create_position_ids_from_input_ids(__a , model.padding_idx )
self.assertEqual(position_ids.shape , expected_positions.shape )
self.assertTrue(torch.all(torch.eq(__a , __a ) ) )
def _lowercase (self : Optional[Any] ):
UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs()[0]
UpperCAmelCase_ = EsmEmbeddings(config=__a )
UpperCAmelCase_ = torch.empty(2 , 4 , 30 )
UpperCAmelCase_ = [
0 + embeddings.padding_idx + 1,
1 + embeddings.padding_idx + 1,
2 + embeddings.padding_idx + 1,
3 + embeddings.padding_idx + 1,
]
UpperCAmelCase_ = torch.as_tensor([expected_single_positions, expected_single_positions] )
UpperCAmelCase_ = embeddings.create_position_ids_from_inputs_embeds(__a )
self.assertEqual(position_ids.shape , expected_positions.shape )
self.assertTrue(torch.all(torch.eq(__a , __a ) ) )
@unittest.skip("Esm does not support embedding resizing" )
def _lowercase (self : List[str] ):
pass
@unittest.skip("Esm does not support embedding resizing" )
def _lowercase (self : str ):
pass
@unittest.skip("Will be fixed soon by reducing the size of the model used for common tests." )
def _lowercase (self : Tuple ):
pass
@require_torch
class __A ( UpperCamelCase__ ):
@slow
def _lowercase (self : Any ):
with torch.no_grad():
UpperCAmelCase_ = EsmForMaskedLM.from_pretrained("facebook/esm2_t6_8M_UR50D" )
model.eval()
UpperCAmelCase_ = torch.tensor([[0, 1, 2, 3, 4, 5]] )
UpperCAmelCase_ = model(__a )[0]
UpperCAmelCase_ = 33
UpperCAmelCase_ = torch.Size((1, 6, vocab_size) )
self.assertEqual(output.shape , __a )
UpperCAmelCase_ = torch.tensor(
[[[8.92_15, -10.58_98, -6.46_71], [-6.39_67, -13.91_14, -1.12_12], [-7.78_12, -13.95_16, -3.74_06]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , __a , atol=1E-4 ) )
@slow
def _lowercase (self : List[Any] ):
with torch.no_grad():
UpperCAmelCase_ = EsmModel.from_pretrained("facebook/esm2_t6_8M_UR50D" )
model.eval()
UpperCAmelCase_ = torch.tensor([[0, 6, 4, 13, 5, 4, 16, 12, 11, 7, 2]] )
UpperCAmelCase_ = model(__a )[0]
# compare the actual values for a slice.
UpperCAmelCase_ = torch.tensor(
[[[0.14_44, 0.54_13, 0.32_48], [0.30_34, 0.00_53, 0.31_08], [0.32_28, -0.24_99, 0.34_15]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , __a , atol=1E-4 ) )
| 1 | '''simple docstring'''
from typing import Callable, Dict, Optional, Tuple
import torch
from torch import nn
from torch.distributions import (
AffineTransform,
Distribution,
Independent,
NegativeBinomial,
Normal,
StudentT,
TransformedDistribution,
)
class __A ( UpperCamelCase__ ):
def __init__(self : int , __a : Distribution , __a : Dict=None , __a : int=None , __a : Any=0 ):
UpperCAmelCase_ = 1.0 if scale is None else scale
UpperCAmelCase_ = 0.0 if loc is None else loc
super().__init__(__a , [AffineTransform(loc=self.loc , scale=self.scale , event_dim=__a )] )
@property
def _lowercase (self : Union[str, Any] ):
return self.base_dist.mean * self.scale + self.loc
@property
def _lowercase (self : List[Any] ):
return self.base_dist.variance * self.scale**2
@property
def _lowercase (self : List[Any] ):
return self.variance.sqrt()
class __A ( nn.Module ):
def __init__(self : Optional[int] , __a : int , __a : Dict[str, int] , __a : Callable[..., Tuple[torch.Tensor]] , **__a : List[str] ):
super().__init__(**__a )
UpperCAmelCase_ = args_dim
UpperCAmelCase_ = nn.ModuleList([nn.Linear(__a , __a ) for dim in args_dim.values()] )
UpperCAmelCase_ = domain_map
def _lowercase (self : List[str] , __a : torch.Tensor ):
UpperCAmelCase_ = [proj(__a ) for proj in self.proj]
return self.domain_map(*__a )
class __A ( nn.Module ):
def __init__(self : Union[str, Any] , __a : List[str] ):
super().__init__()
UpperCAmelCase_ = function
def _lowercase (self : Optional[int] , __a : List[str] , *__a : Optional[int] ):
return self.function(__a , *__a )
class __A :
a__ : type
a__ : int
a__ : Dict[str, int]
def __init__(self : List[Any] , __a : int = 1 ):
UpperCAmelCase_ = dim
UpperCAmelCase_ = {k: dim * self.args_dim[k] for k in self.args_dim}
def _lowercase (self : Any , __a : Any ):
if self.dim == 1:
return self.distribution_class(*__a )
else:
return Independent(self.distribution_class(*__a ) , 1 )
def _lowercase (self : List[str] , __a : Union[str, Any] , __a : Optional[torch.Tensor] = None , __a : Optional[torch.Tensor] = None , ):
UpperCAmelCase_ = self._base_distribution(__a )
if loc is None and scale is None:
return distr
else:
return AffineTransformed(__a , loc=__a , scale=__a , event_dim=self.event_dim )
@property
def _lowercase (self : Any ):
return () if self.dim == 1 else (self.dim,)
@property
def _lowercase (self : Dict ):
return len(self.event_shape )
@property
def _lowercase (self : Tuple ):
return 0.0
def _lowercase (self : List[str] , __a : int ):
return ParameterProjection(
in_features=__a , args_dim=self.args_dim , domain_map=LambdaLayer(self.domain_map ) , )
def _lowercase (self : Optional[int] , *__a : torch.Tensor ):
raise NotImplementedError()
@staticmethod
def _lowercase (__a : torch.Tensor ):
return (x + torch.sqrt(torch.square(__a ) + 4.0 )) / 2.0
class __A ( UpperCamelCase__ ):
a__ : Dict[str, int] = {"df": 1, "loc": 1, "scale": 1}
a__ : type = StudentT
@classmethod
def _lowercase (cls : Union[str, Any] , __a : torch.Tensor , __a : torch.Tensor , __a : torch.Tensor ):
UpperCAmelCase_ = cls.squareplus(__a ).clamp_min(torch.finfo(scale.dtype ).eps )
UpperCAmelCase_ = 2.0 + cls.squareplus(__a )
return df.squeeze(-1 ), loc.squeeze(-1 ), scale.squeeze(-1 )
class __A ( UpperCamelCase__ ):
a__ : Dict[str, int] = {"loc": 1, "scale": 1}
a__ : type = Normal
@classmethod
def _lowercase (cls : Tuple , __a : torch.Tensor , __a : torch.Tensor ):
UpperCAmelCase_ = cls.squareplus(__a ).clamp_min(torch.finfo(scale.dtype ).eps )
return loc.squeeze(-1 ), scale.squeeze(-1 )
class __A ( UpperCamelCase__ ):
a__ : Dict[str, int] = {"total_count": 1, "logits": 1}
a__ : type = NegativeBinomial
@classmethod
def _lowercase (cls : Optional[Any] , __a : torch.Tensor , __a : torch.Tensor ):
UpperCAmelCase_ = cls.squareplus(__a )
return total_count.squeeze(-1 ), logits.squeeze(-1 )
def _lowercase (self : List[str] , __a : str ):
UpperCAmelCase_ , UpperCAmelCase_ = distr_args
if self.dim == 1:
return self.distribution_class(total_count=__a , logits=__a )
else:
return Independent(self.distribution_class(total_count=__a , logits=__a ) , 1 )
def _lowercase (self : Optional[Any] , __a : int , __a : Optional[torch.Tensor] = None , __a : Optional[torch.Tensor] = None ):
UpperCAmelCase_ , UpperCAmelCase_ = distr_args
if scale is not None:
# See scaling property of Gamma.
logits += scale.log()
return self._base_distribution((total_count, logits) )
| 1 | 1 |
'''simple docstring'''
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
PNDMScheduler,
StableDiffusionLDMaDPipeline,
UNetaDConditionModel,
)
from diffusers.utils import nightly, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
enable_full_determinism()
class __A ( unittest.TestCase ):
a__ : Union[str, Any] = StableDiffusionLDMaDPipeline
a__ : str = TEXT_TO_IMAGE_PARAMS
a__ : Dict = TEXT_TO_IMAGE_BATCH_PARAMS
a__ : str = TEXT_TO_IMAGE_IMAGE_PARAMS
def _lowercase (self : int ):
torch.manual_seed(0 )
UpperCAmelCase_ = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("DownBlock2D", "CrossAttnDownBlock2D") , up_block_types=("CrossAttnUpBlock2D", "UpBlock2D") , cross_attention_dim=32 , )
UpperCAmelCase_ = DDIMScheduler(
beta_start=0.0_00_85 , beta_end=0.0_12 , beta_schedule="scaled_linear" , clip_sample=__a , set_alpha_to_one=__a , )
torch.manual_seed(0 )
UpperCAmelCase_ = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=6 , out_channels=6 , down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"] , up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"] , latent_channels=4 , )
torch.manual_seed(0 )
UpperCAmelCase_ = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
UpperCAmelCase_ = CLIPTextModel(__a )
UpperCAmelCase_ = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
UpperCAmelCase_ = {
"unet": unet,
"scheduler": scheduler,
"vae": vae,
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"safety_checker": None,
"feature_extractor": None,
}
return components
def _lowercase (self : Any , __a : str , __a : Any=0 ):
if str(__a ).startswith("mps" ):
UpperCAmelCase_ = torch.manual_seed(__a )
else:
UpperCAmelCase_ = torch.Generator(device=__a ).manual_seed(__a )
UpperCAmelCase_ = {
"prompt": "A painting of a squirrel eating a burger",
"generator": generator,
"num_inference_steps": 2,
"guidance_scale": 6.0,
"output_type": "numpy",
}
return inputs
def _lowercase (self : List[str] ):
UpperCAmelCase_ = "cpu" # ensure determinism for the device-dependent torch.Generator
UpperCAmelCase_ = self.get_dummy_components()
UpperCAmelCase_ = StableDiffusionLDMaDPipeline(**__a )
UpperCAmelCase_ = ldmad_pipe.to(__a )
ldmad_pipe.set_progress_bar_config(disable=__a )
UpperCAmelCase_ = self.get_dummy_inputs(__a )
UpperCAmelCase_ = ldmad_pipe(**__a )
UpperCAmelCase_ , UpperCAmelCase_ = output.rgb, output.depth
UpperCAmelCase_ = rgb[0, -3:, -3:, -1]
UpperCAmelCase_ = depth[0, -3:, -1]
assert rgb.shape == (1, 64, 64, 3)
assert depth.shape == (1, 64, 64)
UpperCAmelCase_ = np.array(
[0.37_33_81_76, 0.7_02_47, 0.74_20_31_93, 0.51_64_36_04, 0.58_25_67_93, 0.60_93_21_36, 0.4_18_10_95, 0.48_35_58_77, 0.46_53_52_62] )
UpperCAmelCase_ = np.array([1_03.4_67_27, 85.81_20_04, 87.84_92_36] )
assert np.abs(image_slice_rgb.flatten() - expected_slice_rgb ).max() < 1E-2
assert np.abs(image_slice_depth.flatten() - expected_slice_depth ).max() < 1E-2
def _lowercase (self : Optional[int] ):
UpperCAmelCase_ = self.get_dummy_components()
UpperCAmelCase_ = StableDiffusionLDMaDPipeline(**__a )
UpperCAmelCase_ = ldmad_pipe.to(__a )
ldmad_pipe.set_progress_bar_config(disable=__a )
UpperCAmelCase_ = self.get_dummy_inputs(__a )
UpperCAmelCase_ = 3 * [inputs["prompt"]]
# forward
UpperCAmelCase_ = ldmad_pipe(**__a )
UpperCAmelCase_ , UpperCAmelCase_ = output.rgb, output.depth
UpperCAmelCase_ = rgb_slice_a[0, -3:, -3:, -1]
UpperCAmelCase_ = depth_slice_a[0, -3:, -1]
UpperCAmelCase_ = self.get_dummy_inputs(__a )
UpperCAmelCase_ = 3 * [inputs.pop("prompt" )]
UpperCAmelCase_ = ldmad_pipe.tokenizer(
__a , padding="max_length" , max_length=ldmad_pipe.tokenizer.model_max_length , truncation=__a , return_tensors="pt" , )
UpperCAmelCase_ = text_inputs["input_ids"].to(__a )
UpperCAmelCase_ = ldmad_pipe.text_encoder(__a )[0]
UpperCAmelCase_ = prompt_embeds
# forward
UpperCAmelCase_ = ldmad_pipe(**__a )
UpperCAmelCase_ , UpperCAmelCase_ = output.rgb, output.depth
UpperCAmelCase_ = rgb_slice_a[0, -3:, -3:, -1]
UpperCAmelCase_ = depth_slice_a[0, -3:, -1]
assert np.abs(rgb_slice_a.flatten() - rgb_slice_a.flatten() ).max() < 1E-4
assert np.abs(depth_slice_a.flatten() - depth_slice_a.flatten() ).max() < 1E-4
def _lowercase (self : Optional[Any] ):
UpperCAmelCase_ = "cpu" # ensure determinism for the device-dependent torch.Generator
UpperCAmelCase_ = self.get_dummy_components()
UpperCAmelCase_ = PNDMScheduler(skip_prk_steps=__a )
UpperCAmelCase_ = StableDiffusionLDMaDPipeline(**__a )
UpperCAmelCase_ = ldmad_pipe.to(__a )
ldmad_pipe.set_progress_bar_config(disable=__a )
UpperCAmelCase_ = self.get_dummy_inputs(__a )
UpperCAmelCase_ = "french fries"
UpperCAmelCase_ = ldmad_pipe(**__a , negative_prompt=__a )
UpperCAmelCase_ , UpperCAmelCase_ = output.rgb, output.depth
UpperCAmelCase_ = rgb[0, -3:, -3:, -1]
UpperCAmelCase_ = depth[0, -3:, -1]
assert rgb.shape == (1, 64, 64, 3)
assert depth.shape == (1, 64, 64)
UpperCAmelCase_ = np.array(
[0.3_70_44, 0.71_81_15_03, 0.7_22_32_51, 0.48_60_36_75, 0.5_63_83_91, 0.6_36_49_48, 0.42_83_37_04, 0.4_90_13_15, 0.47_92_62_17] )
UpperCAmelCase_ = np.array([1_07.8_47_38, 84.6_28_02, 89.96_21_35] )
assert np.abs(rgb_slice.flatten() - expected_slice_rgb ).max() < 1E-2
assert np.abs(depth_slice.flatten() - expected_slice_depth ).max() < 1E-2
@slow
@require_torch_gpu
class __A ( unittest.TestCase ):
def _lowercase (self : str ):
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _lowercase (self : Union[str, Any] , __a : List[Any] , __a : int="cpu" , __a : Dict=torch.floataa , __a : Dict=0 ):
UpperCAmelCase_ = torch.Generator(device=__a ).manual_seed(__a )
UpperCAmelCase_ = np.random.RandomState(__a ).standard_normal((1, 4, 64, 64) )
UpperCAmelCase_ = torch.from_numpy(__a ).to(device=__a , dtype=__a )
UpperCAmelCase_ = {
"prompt": "a photograph of an astronaut riding a horse",
"latents": latents,
"generator": generator,
"num_inference_steps": 3,
"guidance_scale": 7.5,
"output_type": "numpy",
}
return inputs
def _lowercase (self : Tuple ):
UpperCAmelCase_ = StableDiffusionLDMaDPipeline.from_pretrained("Intel/ldm3d" )
UpperCAmelCase_ = ldmad_pipe.to(__a )
ldmad_pipe.set_progress_bar_config(disable=__a )
UpperCAmelCase_ = self.get_inputs(__a )
UpperCAmelCase_ = ldmad_pipe(**__a )
UpperCAmelCase_ , UpperCAmelCase_ = output.rgb, output.depth
UpperCAmelCase_ = rgb[0, -3:, -3:, -1].flatten()
UpperCAmelCase_ = rgb[0, -3:, -1].flatten()
assert rgb.shape == (1, 512, 512, 3)
assert depth.shape == (1, 512, 512)
UpperCAmelCase_ = np.array(
[0.53_80_54_65, 0.56_70_73_05, 0.5_48_65_15, 0.57_01_22_36, 0.5_81_45_11, 0.56_25_34_87, 0.54_84_30_14, 0.55_09_22_63, 0.6_45_97_06] )
UpperCAmelCase_ = np.array(
[0.9_26_37_81, 0.6_67_86_72, 0.5_48_65_15, 0.92_20_21_45, 0.67_83_11_35, 0.56_25_34_87, 0.9_24_16_94, 0.7_55_14_78, 0.6_45_97_06] )
assert np.abs(rgb_slice - expected_slice_rgb ).max() < 3E-3
assert np.abs(depth_slice - expected_slice_depth ).max() < 3E-3
@nightly
@require_torch_gpu
class __A ( unittest.TestCase ):
def _lowercase (self : List[Any] ):
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _lowercase (self : int , __a : Union[str, Any] , __a : List[str]="cpu" , __a : List[str]=torch.floataa , __a : Tuple=0 ):
UpperCAmelCase_ = torch.Generator(device=__a ).manual_seed(__a )
UpperCAmelCase_ = np.random.RandomState(__a ).standard_normal((1, 4, 64, 64) )
UpperCAmelCase_ = torch.from_numpy(__a ).to(device=__a , dtype=__a )
UpperCAmelCase_ = {
"prompt": "a photograph of an astronaut riding a horse",
"latents": latents,
"generator": generator,
"num_inference_steps": 50,
"guidance_scale": 7.5,
"output_type": "numpy",
}
return inputs
def _lowercase (self : Optional[int] ):
UpperCAmelCase_ = StableDiffusionLDMaDPipeline.from_pretrained("Intel/ldm3d" ).to(__a )
ldmad_pipe.set_progress_bar_config(disable=__a )
UpperCAmelCase_ = self.get_inputs(__a )
UpperCAmelCase_ = ldmad_pipe(**__a )
UpperCAmelCase_ , UpperCAmelCase_ = output.rgb, output.depth
UpperCAmelCase_ = 0.49_55_86
UpperCAmelCase_ = 0.33_79_55_15
UpperCAmelCase_ = 1_12.4_85_18
UpperCAmelCase_ = 98.48_97_46
assert np.abs(expected_rgb_mean - rgb.mean() ) < 1E-3
assert np.abs(expected_rgb_std - rgb.std() ) < 1E-3
assert np.abs(expected_depth_mean - depth.mean() ) < 1E-3
assert np.abs(expected_depth_std - depth.std() ) < 1E-3
def _lowercase (self : Any ):
UpperCAmelCase_ = StableDiffusionLDMaDPipeline.from_pretrained("Intel/ldm3d-4c" ).to(__a )
ldmad_pipe.set_progress_bar_config(disable=__a )
UpperCAmelCase_ = self.get_inputs(__a )
UpperCAmelCase_ = ldmad_pipe(**__a )
UpperCAmelCase_ , UpperCAmelCase_ = output.rgb, output.depth
UpperCAmelCase_ = 0.4_19_41_27
UpperCAmelCase_ = 0.35_37_55_86
UpperCAmelCase_ = 0.5_63_85_02
UpperCAmelCase_ = 0.34_68_61_03
assert rgb.shape == (1, 512, 512, 3)
assert depth.shape == (1, 512, 512, 1)
assert np.abs(expected_rgb_mean - rgb.mean() ) < 1E-3
assert np.abs(expected_rgb_std - rgb.std() ) < 1E-3
assert np.abs(expected_depth_mean - depth.mean() ) < 1E-3
assert np.abs(expected_depth_std - depth.std() ) < 1E-3
| 1 | '''simple docstring'''
import math_equivalence # From: git+https://github.com/hendrycks/math.git
import datasets
SCREAMING_SNAKE_CASE_: Optional[Any] ='\\n@article{hendrycksmath2021,\n title={Measuring Mathematical Problem Solving With the MATH Dataset},\n author={Dan Hendrycks\n and Collin Burns\n and Saurav Kadavath\n and Akul Arora\n and Steven Basart\n and Eric Tang\n and Dawn Song\n and Jacob Steinhardt},\n journal={arXiv preprint arXiv:2103.03874},\n year={2021}\n}\n'
SCREAMING_SNAKE_CASE_: Union[str, Any] ='\\nThis metric is used to assess performance on the Mathematics Aptitude Test of Heuristics (MATH) dataset.\nIt first canonicalizes the inputs (e.g., converting "1/2" to "\\frac{1}{2}") and then computes accuracy.\n'
SCREAMING_SNAKE_CASE_: List[Any] =r'\nCalculates accuracy after canonicalizing inputs.\n\nArgs:\n predictions: list of predictions to score. Each prediction\n is a string that contains natural language and LaTex.\n references: list of reference for each prediction. Each\n reference is a string that contains natural language\n and LaTex.\nReturns:\n accuracy: accuracy after canonicalizing inputs\n (e.g., converting "1/2" to "\\frac{1}{2}")\n\nExamples:\n >>> metric = datasets.load_metric("competition_math")\n >>> results = metric.compute(references=["\\frac{1}{2}"], predictions=["1/2"])\n >>> print(results)\n {\'accuracy\': 1.0}\n'
@datasets.utils.file_utils.add_end_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __A ( datasets.Metric ):
def _lowercase (self : Optional[Any] ):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Value("string" ),
"references": datasets.Value("string" ),
} ) , homepage="https://github.com/hendrycks/math" , codebase_urls=["https://github.com/hendrycks/math"] , )
def _lowercase (self : Tuple , __a : Optional[int] , __a : List[Any] ):
UpperCAmelCase_ = 0.0
for i, j in zip(__a , __a ):
n_correct += 1.0 if math_equivalence.is_equiv(__a , __a ) else 0.0
UpperCAmelCase_ = n_correct / len(__a )
return {
"accuracy": accuracy,
}
| 1 | 1 |
'''simple docstring'''
from __future__ import annotations
from statistics import mean
def lowerCAmelCase_ ( snake_case_ : list[int] , snake_case_ : list[int] , snake_case_ : int ) -> list[int]:
'''simple docstring'''
UpperCAmelCase_ = [0] * no_of_processes
UpperCAmelCase_ = [0] * no_of_processes
# Initialize remaining_time to waiting_time.
for i in range(snake_case_ ):
UpperCAmelCase_ = burst_time[i]
UpperCAmelCase_ = []
UpperCAmelCase_ = 0
UpperCAmelCase_ = 0
# When processes are not completed,
# A process whose arrival time has passed \
# and has remaining execution time is put into the ready_process.
# The shortest process in the ready_process, target_process is executed.
while completed != no_of_processes:
UpperCAmelCase_ = []
UpperCAmelCase_ = -1
for i in range(snake_case_ ):
if (arrival_time[i] <= total_time) and (remaining_time[i] > 0):
ready_process.append(snake_case_ )
if len(snake_case_ ) > 0:
UpperCAmelCase_ = ready_process[0]
for i in ready_process:
if remaining_time[i] < remaining_time[target_process]:
UpperCAmelCase_ = i
total_time += burst_time[target_process]
completed += 1
UpperCAmelCase_ = 0
UpperCAmelCase_ = (
total_time - arrival_time[target_process] - burst_time[target_process]
)
else:
total_time += 1
return waiting_time
def lowerCAmelCase_ ( snake_case_ : list[int] , snake_case_ : int , snake_case_ : list[int] ) -> list[int]:
'''simple docstring'''
UpperCAmelCase_ = [0] * no_of_processes
for i in range(snake_case_ ):
UpperCAmelCase_ = burst_time[i] + waiting_time[i]
return turn_around_time
if __name__ == "__main__":
print('[TEST CASE 01]')
SCREAMING_SNAKE_CASE_: List[str] =4
SCREAMING_SNAKE_CASE_: List[str] =[2, 5, 3, 7]
SCREAMING_SNAKE_CASE_: Tuple =[0, 0, 0, 0]
SCREAMING_SNAKE_CASE_: str =calculate_waitingtime(arrival_time, burst_time, no_of_processes)
SCREAMING_SNAKE_CASE_: Dict =calculate_turnaroundtime(
burst_time, no_of_processes, waiting_time
)
# Printing the Result
print('PID\tBurst Time\tArrival Time\tWaiting Time\tTurnaround Time')
for i, process_id in enumerate(list(range(1, 5))):
print(
f"{process_id}\t{burst_time[i]}\t\t\t{arrival_time[i]}\t\t\t\t"
f"{waiting_time[i]}\t\t\t\t{turn_around_time[i]}"
)
print(f"\nAverage waiting time = {mean(waiting_time):.5f}")
print(f"Average turnaround time = {mean(turn_around_time):.5f}")
| 1 | '''simple docstring'''
from copy import deepcopy
import torch
import torch.nn.functional as F
from torch.optim import AdamW
from torch.optim.lr_scheduler import LambdaLR
from torch.utils.data import DataLoader
from accelerate.accelerator import Accelerator
from accelerate.state import GradientState
from accelerate.test_utils import RegressionDataset, RegressionModel
from accelerate.utils import DistributedType, is_torch_version, set_seed
def lowerCAmelCase_ ( snake_case_ : Dict , snake_case_ : Union[str, Any] , snake_case_ : Optional[Any] , snake_case_ : List[Any] ) -> List[Any]:
'''simple docstring'''
for param, grad_param in zip(model_a.parameters() , model_b.parameters() ):
if not param.requires_grad:
continue
if not did_step:
# Grads should not be in sync
assert (
torch.allclose(param.grad , grad_param.grad ) is False
), f"""Gradients in sync when they should not be at iteration {iteration}:\nmodel_a grad ({param.grad}) == model_b grad ({grad_param.grad})"""
else:
# Grads should be in sync
assert (
torch.allclose(param.grad , grad_param.grad ) is True
), f"""Gradients not in sync when they should be at iteration {iteration}:\nmodel_a grad ({param.grad}) != model_b grad ({grad_param.grad})"""
def lowerCAmelCase_ ( snake_case_ : Any , snake_case_ : Tuple , snake_case_ : Any , snake_case_ : List[Any] , snake_case_ : str=True ) -> Optional[Any]:
'''simple docstring'''
model.train()
UpperCAmelCase_ = model(snake_case_ )
UpperCAmelCase_ = F.mse_loss(snake_case_ , target.to(output.device ) )
if not do_backward:
loss /= accelerator.gradient_accumulation_steps
loss.backward()
else:
accelerator.backward(snake_case_ )
def lowerCAmelCase_ ( snake_case_ : Optional[Any] , snake_case_ : Any=False ) -> Dict:
'''simple docstring'''
set_seed(42 )
UpperCAmelCase_ = RegressionModel()
UpperCAmelCase_ = deepcopy(snake_case_ )
UpperCAmelCase_ = RegressionDataset(length=80 )
UpperCAmelCase_ = DataLoader(snake_case_ , batch_size=16 )
model.to(accelerator.device )
if sched:
UpperCAmelCase_ = AdamW(params=model.parameters() , lr=1E-3 )
UpperCAmelCase_ = AdamW(params=ddp_model.parameters() , lr=1E-3 )
UpperCAmelCase_ = LambdaLR(snake_case_ , lr_lambda=lambda snake_case_ : epoch**0.65 )
UpperCAmelCase_ = LambdaLR(snake_case_ , lr_lambda=lambda snake_case_ : epoch**0.65 )
# Make a copy of `model`
if sched:
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = accelerator.prepare(snake_case_ , snake_case_ , snake_case_ , snake_case_ )
else:
UpperCAmelCase_ , UpperCAmelCase_ = accelerator.prepare(snake_case_ , snake_case_ )
if sched:
return (model, opt, sched, dataloader, ddp_model, ddp_opt, ddp_sched)
return model, ddp_model, dataloader
def lowerCAmelCase_ ( snake_case_ : Any ) -> int:
'''simple docstring'''
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = get_training_setup(snake_case_ )
# Use a single batch
UpperCAmelCase_ , UpperCAmelCase_ = next(iter(snake_case_ ) ).values()
for iteration in range(3 ):
# Gather the distributed inputs and targs for the base model
UpperCAmelCase_ , UpperCAmelCase_ = accelerator.gather((ddp_input, ddp_target) )
UpperCAmelCase_ , UpperCAmelCase_ = input.to(accelerator.device ), target.to(accelerator.device )
# Perform our initial ground truth step in non "DDP"
step_model(snake_case_ , snake_case_ , snake_case_ , snake_case_ )
# Do "gradient accumulation" (noop)
if iteration % 2 == 0:
# Accumulate grads locally
with accelerator.no_sync(snake_case_ ):
step_model(snake_case_ , snake_case_ , snake_case_ , snake_case_ )
else:
# Sync grads
step_model(snake_case_ , snake_case_ , snake_case_ , snake_case_ )
# Since `no_sync` is a noop, `ddp_model` and `model` grads should always be in sync
check_model_parameters(snake_case_ , snake_case_ , snake_case_ , snake_case_ )
for param, ddp_param in zip(model.parameters() , ddp_model.parameters() ):
if not param.requires_grad:
continue
assert torch.allclose(
param.grad , ddp_param.grad ), f"""Gradients not in sync when they should be:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})"""
# Shuffle ddp_input on each iteration
torch.manual_seed(13_37 + iteration )
UpperCAmelCase_ = ddp_input[torch.randperm(len(snake_case_ ) )]
def lowerCAmelCase_ ( snake_case_ : Tuple ) -> str:
'''simple docstring'''
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = get_training_setup(snake_case_ )
# Use a single batch
UpperCAmelCase_ , UpperCAmelCase_ = next(iter(snake_case_ ) ).values()
for iteration in range(3 ):
# Gather the distributed inputs and targs for the base model
UpperCAmelCase_ , UpperCAmelCase_ = accelerator.gather((ddp_input, ddp_target) )
UpperCAmelCase_ , UpperCAmelCase_ = input.to(accelerator.device ), target.to(accelerator.device )
# Perform our initial ground truth step in non "DDP"
step_model(snake_case_ , snake_case_ , snake_case_ , snake_case_ )
# Do "gradient accumulation" (noop)
if iteration % 2 == 0:
# Accumulate grads locally
with accelerator.no_sync(snake_case_ ):
step_model(snake_case_ , snake_case_ , snake_case_ , snake_case_ )
else:
# Sync grads
step_model(snake_case_ , snake_case_ , snake_case_ , snake_case_ )
# DDP model and model should only be in sync when not (iteration % 2 == 0)
for param, ddp_param in zip(model.parameters() , ddp_model.parameters() ):
if not param.requires_grad:
continue
if iteration % 2 == 0:
# Grads should not be in sync
assert (
torch.allclose(param.grad , ddp_param.grad ) is False
), f"""Gradients in sync when they should not be:\nModel grad ({param.grad}) == DDP grad ({ddp_param.grad})"""
else:
# Grads should be in sync
assert (
torch.allclose(param.grad , ddp_param.grad ) is True
), f"""Gradients not in sync when they should be:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})"""
# Shuffle ddp_input on each iteration
torch.manual_seed(13_37 + iteration )
UpperCAmelCase_ = ddp_input[torch.randperm(len(snake_case_ ) )]
def lowerCAmelCase_ ( snake_case_ : Optional[int]=False , snake_case_ : str=False ) -> List[str]:
'''simple docstring'''
UpperCAmelCase_ = Accelerator(
split_batches=snake_case_ , dispatch_batches=snake_case_ , gradient_accumulation_steps=2 )
# Test that context manager behaves properly
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = get_training_setup(snake_case_ )
for iteration, batch in enumerate(snake_case_ ):
UpperCAmelCase_ , UpperCAmelCase_ = batch.values()
# Gather the distributed inputs and targs for the base model
UpperCAmelCase_ , UpperCAmelCase_ = accelerator.gather((ddp_input, ddp_target) )
UpperCAmelCase_ , UpperCAmelCase_ = input.to(accelerator.device ), target.to(accelerator.device )
# Perform our initial ground truth step in non "DDP"
step_model(snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ )
# Do "gradient accumulation" (noop)
with accelerator.accumulate(snake_case_ ):
step_model(snake_case_ , snake_case_ , snake_case_ , snake_case_ )
# DDP model and model should only be in sync when not (iteration % 2 == 0)
for param, ddp_param in zip(model.parameters() , ddp_model.parameters() ):
if not param.requires_grad:
continue
if ((iteration + 1) % 2 == 0) or (iteration == len(snake_case_ ) - 1):
# Grads should be in sync
assert (
torch.allclose(param.grad , ddp_param.grad ) is True
), f"""Gradients not in sync when they should be at iteration {iteration}:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})"""
else:
# Grads should not be in sync
assert (
torch.allclose(param.grad , ddp_param.grad ) is False
), f"""Gradients in sync when they should not be at iteration {iteration}:\nModel grad ({param.grad}) == DDP grad ({ddp_param.grad})"""
# Shuffle ddp_input on each iteration
torch.manual_seed(13_37 + iteration )
UpperCAmelCase_ = ddp_input[torch.randperm(len(snake_case_ ) )]
GradientState._reset_state()
def lowerCAmelCase_ ( snake_case_ : Optional[Any]=False , snake_case_ : Tuple=False ) -> Union[str, Any]:
'''simple docstring'''
UpperCAmelCase_ = Accelerator(
split_batches=snake_case_ , dispatch_batches=snake_case_ , gradient_accumulation_steps=2 )
# Test that context manager behaves properly
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = get_training_setup(snake_case_ , snake_case_ )
for iteration, batch in enumerate(snake_case_ ):
UpperCAmelCase_ , UpperCAmelCase_ = batch.values()
# Gather the distributed inputs and targs for the base model
UpperCAmelCase_ , UpperCAmelCase_ = accelerator.gather((ddp_input, ddp_target) )
UpperCAmelCase_ , UpperCAmelCase_ = input.to(accelerator.device ), target.to(accelerator.device )
# Perform our initial ground truth step in non "DDP"
model.train()
ddp_model.train()
step_model(snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ )
opt.step()
if ((iteration + 1) % 2 == 0) or ((iteration + 1) == len(snake_case_ )):
if split_batches:
sched.step()
else:
for _ in range(accelerator.num_processes ):
sched.step()
opt.zero_grad()
# Perform gradient accumulation under wrapper
with accelerator.accumulate(snake_case_ ):
step_model(snake_case_ , snake_case_ , snake_case_ , snake_case_ )
ddp_opt.step()
ddp_sched.step()
ddp_opt.zero_grad()
# Learning rates should be the same
assert (
opt.param_groups[0]["lr"] == ddp_opt.param_groups[0]["lr"]
), f"""Learning rates found in each optimizer did not align\nopt: {opt.param_groups[0]["lr"]}\nDDP opt: {ddp_opt.param_groups[0]["lr"]}\n"""
UpperCAmelCase_ = (((iteration + 1) % 2) == 0) or ((iteration + 1) == len(snake_case_ ))
if accelerator.num_processes > 1:
check_model_parameters(snake_case_ , snake_case_ , snake_case_ , snake_case_ )
# Shuffle ddp_input on each iteration
torch.manual_seed(13_37 + iteration )
GradientState._reset_state()
def lowerCAmelCase_ ( ) -> List[Any]:
'''simple docstring'''
UpperCAmelCase_ = Accelerator()
UpperCAmelCase_ = RegressionDataset(length=80 )
UpperCAmelCase_ = DataLoader(snake_case_ , batch_size=16 )
UpperCAmelCase_ = RegressionDataset(length=96 )
UpperCAmelCase_ = DataLoader(snake_case_ , batch_size=16 )
UpperCAmelCase_ , UpperCAmelCase_ = accelerator.prepare(snake_case_ , snake_case_ )
assert accelerator.gradient_state.active_dataloader is None
for iteration, _ in enumerate(snake_case_ ):
assert id(accelerator.gradient_state.active_dataloader ) == id(snake_case_ )
if iteration < len(snake_case_ ) - 1:
assert not accelerator.gradient_state.end_of_dataloader
if iteration == 1:
for batch_num, _ in enumerate(snake_case_ ):
assert id(accelerator.gradient_state.active_dataloader ) == id(snake_case_ )
if batch_num < len(snake_case_ ) - 1:
assert not accelerator.gradient_state.end_of_dataloader
else:
assert accelerator.gradient_state.end_of_dataloader
else:
assert accelerator.gradient_state.end_of_dataloader
assert accelerator.gradient_state.active_dataloader is None
def lowerCAmelCase_ ( ) -> str:
'''simple docstring'''
UpperCAmelCase_ = Accelerator()
UpperCAmelCase_ = accelerator.state
if state.local_process_index == 0:
print("**Test `accumulate` gradient accumulation with dataloader break**" )
test_dataloader_break()
if state.distributed_type == DistributedType.NO:
if state.local_process_index == 0:
print("**Test NOOP `no_sync` context manager**" )
test_noop_sync(snake_case_ )
if state.distributed_type in (DistributedType.MULTI_GPU, DistributedType.MULTI_CPU):
if state.local_process_index == 0:
print("**Test Distributed `no_sync` context manager**" )
test_distributed_sync(snake_case_ )
if state.distributed_type == DistributedType.MULTI_GPU:
for split_batch in [True, False]:
for dispatch_batches in [True, False]:
if state.local_process_index == 0:
print(
"**Test `accumulate` gradient accumulation, " , f"""`split_batches={split_batch}` and `dispatch_batches={dispatch_batches}`**""" , )
test_gradient_accumulation(snake_case_ , snake_case_ )
# Currently will break on torch 2.0 +, need to investigate why
if is_torch_version("<" , "2.0" ) or state.distributed_type == DistributedType.NO:
if state.local_process_index == 0:
print(
"**Test `accumulate` gradient accumulation with optimizer and scheduler, " , "`split_batches=False`, `dispatch_batches=False`**" , )
test_gradient_accumulation_with_opt_and_scheduler()
if state.distributed_type == DistributedType.MULTI_GPU:
for split_batch in [True, False]:
for dispatch_batches in [True, False]:
if not split_batch and not dispatch_batches:
continue
if state.local_process_index == 0:
print(
"**Test `accumulate` gradient accumulation with optimizer and scheduler, " , f"""`split_batches={split_batch}` and `dispatch_batches={dispatch_batches}`**""" , )
test_gradient_accumulation_with_opt_and_scheduler(snake_case_ , snake_case_ )
def lowerCAmelCase_ ( snake_case_ : Dict ) -> int:
'''simple docstring'''
main()
if __name__ == "__main__":
main()
| 1 | 1 |
'''simple docstring'''
import logging
from pathlib import Path
import numpy as np
import pytorch_lightning as pl
import torch
from pytorch_lightning.callbacks import EarlyStopping, ModelCheckpoint
from pytorch_lightning.utilities import rank_zero_only
from utils_rag import save_json
def lowerCAmelCase_ ( snake_case_ : str ) -> Optional[Any]:
'''simple docstring'''
UpperCAmelCase_ = filter(lambda snake_case_ : p.requires_grad , model.parameters() )
UpperCAmelCase_ = sum([np.prod(p.size() ) for p in model_parameters] )
return params
SCREAMING_SNAKE_CASE_: Union[str, Any] =logging.getLogger(__name__)
def lowerCAmelCase_ ( snake_case_ : Optional[int] , snake_case_ : List[str] ) -> List[str]:
'''simple docstring'''
if metric == "rouge2":
UpperCAmelCase_ = "{val_avg_rouge2:.4f}-{step_count}"
elif metric == "bleu":
UpperCAmelCase_ = "{val_avg_bleu:.4f}-{step_count}"
elif metric == "em":
UpperCAmelCase_ = "{val_avg_em:.4f}-{step_count}"
elif metric == "loss":
UpperCAmelCase_ = "{val_avg_loss:.4f}-{step_count}"
else:
raise NotImplementedError(
f"""seq2seq callbacks only support rouge2 and bleu, got {metric}, You can make your own by adding to this"""
" function." )
UpperCAmelCase_ = ModelCheckpoint(
dirpath=snake_case_ , filename=snake_case_ , monitor=f"""val_{metric}""" , mode="max" , save_top_k=1 , every_n_epochs=1 , )
return checkpoint_callback
def lowerCAmelCase_ ( snake_case_ : Any , snake_case_ : Tuple ) -> int:
'''simple docstring'''
return EarlyStopping(
monitor=f"""val_{metric}""" , mode="min" if "loss" in metric else "max" , patience=snake_case_ , verbose=snake_case_ , )
class __A ( pl.Callback ):
def _lowercase (self : Optional[int] , __a : Tuple , __a : Optional[Any] ):
UpperCAmelCase_ = {f"""lr_group_{i}""": param["lr"] for i, param in enumerate(pl_module.trainer.optimizers[0].param_groups )}
pl_module.logger.log_metrics(__a )
@rank_zero_only
def _lowercase (self : int , __a : pl.Trainer , __a : pl.LightningModule , __a : str , __a : List[Any]=True ):
logger.info(f"""***** {type_path} results at step {trainer.global_step:05d} *****""" )
UpperCAmelCase_ = trainer.callback_metrics
trainer.logger.log_metrics({k: v for k, v in metrics.items() if k not in ["log", "progress_bar", "preds"]} )
# Log results
UpperCAmelCase_ = Path(pl_module.hparams.output_dir )
if type_path == "test":
UpperCAmelCase_ = od / "test_results.txt"
UpperCAmelCase_ = od / "test_generations.txt"
else:
# this never gets hit. I prefer not to save intermediate generations, and results are in metrics.json
# If people want this it will be easy enough to add back.
UpperCAmelCase_ = od / f"""{type_path}_results/{trainer.global_step:05d}.txt"""
UpperCAmelCase_ = od / f"""{type_path}_generations/{trainer.global_step:05d}.txt"""
results_file.parent.mkdir(exist_ok=__a )
generations_file.parent.mkdir(exist_ok=__a )
with open(__a , "a+" ) as writer:
for key in sorted(__a ):
if key in ["log", "progress_bar", "preds"]:
continue
UpperCAmelCase_ = metrics[key]
if isinstance(__a , torch.Tensor ):
UpperCAmelCase_ = val.item()
UpperCAmelCase_ = f"""{key}: {val:.6f}\n"""
writer.write(__a )
if not save_generations:
return
if "preds" in metrics:
UpperCAmelCase_ = "\n".join(metrics["preds"] )
generations_file.open("w+" ).write(__a )
@rank_zero_only
def _lowercase (self : Optional[Any] , __a : Optional[Any] , __a : Union[str, Any] ):
try:
UpperCAmelCase_ = pl_module.model.model.num_parameters()
except AttributeError:
UpperCAmelCase_ = pl_module.model.num_parameters()
UpperCAmelCase_ = count_trainable_parameters(__a )
# mp stands for million parameters
trainer.logger.log_metrics({"n_params": npars, "mp": npars / 1E6, "grad_mp": n_trainable_pars / 1E6} )
@rank_zero_only
def _lowercase (self : List[str] , __a : pl.Trainer , __a : pl.LightningModule ):
save_json(pl_module.metrics , pl_module.metrics_save_path )
return self._write_logs(__a , __a , "test" )
@rank_zero_only
def _lowercase (self : List[Any] , __a : pl.Trainer , __a : Dict ):
save_json(pl_module.metrics , pl_module.metrics_save_path )
# Uncommenting this will save val generations
# return self._write_logs(trainer, pl_module, "valid")
| 1 | '''simple docstring'''
def lowerCAmelCase_ ( snake_case_ : int , snake_case_ : int ) -> int:
'''simple docstring'''
return x if y == 0 else greatest_common_divisor(snake_case_ , x % y )
def lowerCAmelCase_ ( snake_case_ : int , snake_case_ : int ) -> int:
'''simple docstring'''
return (x * y) // greatest_common_divisor(snake_case_ , snake_case_ )
def lowerCAmelCase_ ( snake_case_ : int = 20 ) -> int:
'''simple docstring'''
UpperCAmelCase_ = 1
for i in range(1 , n + 1 ):
UpperCAmelCase_ = lcm(snake_case_ , snake_case_ )
return g
if __name__ == "__main__":
print(f"{solution() = }")
| 1 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
SCREAMING_SNAKE_CASE_: Optional[Any] ={
'configuration_x_clip': [
'XCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP',
'XCLIPConfig',
'XCLIPTextConfig',
'XCLIPVisionConfig',
],
'processing_x_clip': ['XCLIPProcessor'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE_: str =[
'XCLIP_PRETRAINED_MODEL_ARCHIVE_LIST',
'XCLIPModel',
'XCLIPPreTrainedModel',
'XCLIPTextModel',
'XCLIPVisionModel',
]
if TYPE_CHECKING:
from .configuration_x_clip import (
XCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
XCLIPConfig,
XCLIPTextConfig,
XCLIPVisionConfig,
)
from .processing_x_clip import XCLIPProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_x_clip import (
XCLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
XCLIPModel,
XCLIPPreTrainedModel,
XCLIPTextModel,
XCLIPVisionModel,
)
else:
import sys
SCREAMING_SNAKE_CASE_: List[Any] =_LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 1 | '''simple docstring'''
import os
from math import logaa
def lowerCAmelCase_ ( snake_case_ : str = "base_exp.txt" ) -> int:
'''simple docstring'''
UpperCAmelCase_ = 0
UpperCAmelCase_ = 0
for i, line in enumerate(open(os.path.join(os.path.dirname(snake_case_ ) , snake_case_ ) ) ):
UpperCAmelCase_ , UpperCAmelCase_ = list(map(snake_case_ , line.split("," ) ) )
if x * logaa(snake_case_ ) > largest:
UpperCAmelCase_ = x * logaa(snake_case_ )
UpperCAmelCase_ = i + 1
return result
if __name__ == "__main__":
print(solution())
| 1 | 1 |
'''simple docstring'''
def lowerCAmelCase_ ( snake_case_ : list , snake_case_ : list , snake_case_ : int , snake_case_ : int , snake_case_ : int ) -> int:
'''simple docstring'''
if index == number_of_items:
return 0
UpperCAmelCase_ = 0
UpperCAmelCase_ = 0
UpperCAmelCase_ = knapsack(snake_case_ , snake_case_ , snake_case_ , snake_case_ , index + 1 )
if weights[index] <= max_weight:
UpperCAmelCase_ = values[index] + knapsack(
snake_case_ , snake_case_ , snake_case_ , max_weight - weights[index] , index + 1 )
return max(snake_case_ , snake_case_ )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 1 | '''simple docstring'''
import argparse
import io
import requests
import torch
from omegaconf import OmegaConf
from diffusers import AutoencoderKL
from diffusers.pipelines.stable_diffusion.convert_from_ckpt import (
assign_to_checkpoint,
conv_attn_to_linear,
create_vae_diffusers_config,
renew_vae_attention_paths,
renew_vae_resnet_paths,
)
def lowerCAmelCase_ ( snake_case_ : List[Any] , snake_case_ : int ) -> Optional[int]:
'''simple docstring'''
UpperCAmelCase_ = checkpoint
UpperCAmelCase_ = {}
UpperCAmelCase_ = vae_state_dict["encoder.conv_in.weight"]
UpperCAmelCase_ = vae_state_dict["encoder.conv_in.bias"]
UpperCAmelCase_ = vae_state_dict["encoder.conv_out.weight"]
UpperCAmelCase_ = vae_state_dict["encoder.conv_out.bias"]
UpperCAmelCase_ = vae_state_dict["encoder.norm_out.weight"]
UpperCAmelCase_ = vae_state_dict["encoder.norm_out.bias"]
UpperCAmelCase_ = vae_state_dict["decoder.conv_in.weight"]
UpperCAmelCase_ = vae_state_dict["decoder.conv_in.bias"]
UpperCAmelCase_ = vae_state_dict["decoder.conv_out.weight"]
UpperCAmelCase_ = vae_state_dict["decoder.conv_out.bias"]
UpperCAmelCase_ = vae_state_dict["decoder.norm_out.weight"]
UpperCAmelCase_ = vae_state_dict["decoder.norm_out.bias"]
UpperCAmelCase_ = vae_state_dict["quant_conv.weight"]
UpperCAmelCase_ = vae_state_dict["quant_conv.bias"]
UpperCAmelCase_ = vae_state_dict["post_quant_conv.weight"]
UpperCAmelCase_ = vae_state_dict["post_quant_conv.bias"]
# Retrieves the keys for the encoder down blocks only
UpperCAmelCase_ = len({".".join(layer.split("." )[:3] ) for layer in vae_state_dict if "encoder.down" in layer} )
UpperCAmelCase_ = {
layer_id: [key for key in vae_state_dict if f"""down.{layer_id}""" in key] for layer_id in range(snake_case_ )
}
# Retrieves the keys for the decoder up blocks only
UpperCAmelCase_ = len({".".join(layer.split("." )[:3] ) for layer in vae_state_dict if "decoder.up" in layer} )
UpperCAmelCase_ = {
layer_id: [key for key in vae_state_dict if f"""up.{layer_id}""" in key] for layer_id in range(snake_case_ )
}
for i in range(snake_case_ ):
UpperCAmelCase_ = [key for key in down_blocks[i] if f"""down.{i}""" in key and f"""down.{i}.downsample""" not in key]
if f"""encoder.down.{i}.downsample.conv.weight""" in vae_state_dict:
UpperCAmelCase_ = vae_state_dict.pop(
f"""encoder.down.{i}.downsample.conv.weight""" )
UpperCAmelCase_ = vae_state_dict.pop(
f"""encoder.down.{i}.downsample.conv.bias""" )
UpperCAmelCase_ = renew_vae_resnet_paths(snake_case_ )
UpperCAmelCase_ = {"old": f"""down.{i}.block""", "new": f"""down_blocks.{i}.resnets"""}
assign_to_checkpoint(snake_case_ , snake_case_ , snake_case_ , additional_replacements=[meta_path] , config=snake_case_ )
UpperCAmelCase_ = [key for key in vae_state_dict if "encoder.mid.block" in key]
UpperCAmelCase_ = 2
for i in range(1 , num_mid_res_blocks + 1 ):
UpperCAmelCase_ = [key for key in mid_resnets if f"""encoder.mid.block_{i}""" in key]
UpperCAmelCase_ = renew_vae_resnet_paths(snake_case_ )
UpperCAmelCase_ = {"old": f"""mid.block_{i}""", "new": f"""mid_block.resnets.{i - 1}"""}
assign_to_checkpoint(snake_case_ , snake_case_ , snake_case_ , additional_replacements=[meta_path] , config=snake_case_ )
UpperCAmelCase_ = [key for key in vae_state_dict if "encoder.mid.attn" in key]
UpperCAmelCase_ = renew_vae_attention_paths(snake_case_ )
UpperCAmelCase_ = {"old": "mid.attn_1", "new": "mid_block.attentions.0"}
assign_to_checkpoint(snake_case_ , snake_case_ , snake_case_ , additional_replacements=[meta_path] , config=snake_case_ )
conv_attn_to_linear(snake_case_ )
for i in range(snake_case_ ):
UpperCAmelCase_ = num_up_blocks - 1 - i
UpperCAmelCase_ = [
key for key in up_blocks[block_id] if f"""up.{block_id}""" in key and f"""up.{block_id}.upsample""" not in key
]
if f"""decoder.up.{block_id}.upsample.conv.weight""" in vae_state_dict:
UpperCAmelCase_ = vae_state_dict[
f"""decoder.up.{block_id}.upsample.conv.weight"""
]
UpperCAmelCase_ = vae_state_dict[
f"""decoder.up.{block_id}.upsample.conv.bias"""
]
UpperCAmelCase_ = renew_vae_resnet_paths(snake_case_ )
UpperCAmelCase_ = {"old": f"""up.{block_id}.block""", "new": f"""up_blocks.{i}.resnets"""}
assign_to_checkpoint(snake_case_ , snake_case_ , snake_case_ , additional_replacements=[meta_path] , config=snake_case_ )
UpperCAmelCase_ = [key for key in vae_state_dict if "decoder.mid.block" in key]
UpperCAmelCase_ = 2
for i in range(1 , num_mid_res_blocks + 1 ):
UpperCAmelCase_ = [key for key in mid_resnets if f"""decoder.mid.block_{i}""" in key]
UpperCAmelCase_ = renew_vae_resnet_paths(snake_case_ )
UpperCAmelCase_ = {"old": f"""mid.block_{i}""", "new": f"""mid_block.resnets.{i - 1}"""}
assign_to_checkpoint(snake_case_ , snake_case_ , snake_case_ , additional_replacements=[meta_path] , config=snake_case_ )
UpperCAmelCase_ = [key for key in vae_state_dict if "decoder.mid.attn" in key]
UpperCAmelCase_ = renew_vae_attention_paths(snake_case_ )
UpperCAmelCase_ = {"old": "mid.attn_1", "new": "mid_block.attentions.0"}
assign_to_checkpoint(snake_case_ , snake_case_ , snake_case_ , additional_replacements=[meta_path] , config=snake_case_ )
conv_attn_to_linear(snake_case_ )
return new_checkpoint
def lowerCAmelCase_ ( snake_case_ : str , snake_case_ : str , ) -> Dict:
'''simple docstring'''
UpperCAmelCase_ = requests.get(
" https://raw.githubusercontent.com/CompVis/stable-diffusion/main/configs/stable-diffusion/v1-inference.yaml" )
UpperCAmelCase_ = io.BytesIO(r.content )
UpperCAmelCase_ = OmegaConf.load(snake_case_ )
UpperCAmelCase_ = 5_12
UpperCAmelCase_ = "cuda" if torch.cuda.is_available() else "cpu"
if checkpoint_path.endswith("safetensors" ):
from safetensors import safe_open
UpperCAmelCase_ = {}
with safe_open(snake_case_ , framework="pt" , device="cpu" ) as f:
for key in f.keys():
UpperCAmelCase_ = f.get_tensor(snake_case_ )
else:
UpperCAmelCase_ = torch.load(snake_case_ , map_location=snake_case_ )["state_dict"]
# Convert the VAE model.
UpperCAmelCase_ = create_vae_diffusers_config(snake_case_ , image_size=snake_case_ )
UpperCAmelCase_ = custom_convert_ldm_vae_checkpoint(snake_case_ , snake_case_ )
UpperCAmelCase_ = AutoencoderKL(**snake_case_ )
vae.load_state_dict(snake_case_ )
vae.save_pretrained(snake_case_ )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE_: Optional[int] =argparse.ArgumentParser()
parser.add_argument('--vae_pt_path', default=None, type=str, required=True, help='Path to the VAE.pt to convert.')
parser.add_argument('--dump_path', default=None, type=str, required=True, help='Path to the VAE.pt to convert.')
SCREAMING_SNAKE_CASE_: str =parser.parse_args()
vae_pt_to_vae_diffuser(args.vae_pt_path, args.dump_path)
| 1 | 1 |
'''simple docstring'''
import baseaa
import io
import json
import os
from copy import deepcopy
from ..optimizer import AcceleratedOptimizer
from ..scheduler import AcceleratedScheduler
class __A :
def __init__(self : List[str] , __a : Any ):
if isinstance(__a , __a ):
# Don't modify user's data should they want to reuse it (e.g. in tests), because once we
# modified it, it will not be accepted here again, since `auto` values would have been overridden
UpperCAmelCase_ = deepcopy(__a )
elif os.path.exists(__a ):
with io.open(__a , "r" , encoding="utf-8" ) as f:
UpperCAmelCase_ = json.load(__a )
else:
try:
UpperCAmelCase_ = baseaa.urlsafe_baadecode(__a ).decode("utf-8" )
UpperCAmelCase_ = json.loads(__a )
except (UnicodeDecodeError, AttributeError, ValueError):
raise ValueError(
f"""Expected a string path to an existing deepspeed config, or a dictionary, or a base64 encoded string. Received: {config_file_or_dict}""" )
UpperCAmelCase_ = config
self.set_stage_and_offload()
def _lowercase (self : Optional[Any] ):
# zero stage - this is done as early as possible, before model is created, to allow
# ``is_deepspeed_zero3_enabled`` query and getting to the early deepspeed config object
# during ``zero.Init()`` which needs to know the dtype, and some other hparams.
UpperCAmelCase_ = self.get_value("zero_optimization.stage" , -1 )
# offload
UpperCAmelCase_ = False
if self.is_zeroa() or self.is_zeroa():
UpperCAmelCase_ = set(["cpu", "nvme"] )
UpperCAmelCase_ = set(
[
self.get_value("zero_optimization.offload_optimizer.device" ),
self.get_value("zero_optimization.offload_param.device" ),
] )
if len(offload_devices & offload_devices_valid ) > 0:
UpperCAmelCase_ = True
def _lowercase (self : Tuple , __a : int ):
UpperCAmelCase_ = self.config
# find the config node of interest if it exists
UpperCAmelCase_ = ds_key_long.split("." )
UpperCAmelCase_ = nodes.pop()
for node in nodes:
UpperCAmelCase_ = config.get(__a )
if config is None:
return None, ds_key
return config, ds_key
def _lowercase (self : Union[str, Any] , __a : Union[str, Any] , __a : List[Any]=None ):
UpperCAmelCase_ , UpperCAmelCase_ = self.find_config_node(__a )
if config is None:
return default
return config.get(__a , __a )
def _lowercase (self : Optional[int] , __a : List[str] , __a : Tuple=False ):
UpperCAmelCase_ = self.config
# find the config node of interest if it exists
UpperCAmelCase_ = ds_key_long.split("." )
for node in nodes:
UpperCAmelCase_ = config
UpperCAmelCase_ = config.get(__a )
if config is None:
if must_exist:
raise ValueError(f"""Can't find {ds_key_long} entry in the config: {self.config}""" )
else:
return
# if found remove it
if parent_config is not None:
parent_config.pop(__a )
def _lowercase (self : Dict , __a : Any ):
UpperCAmelCase_ = self.get_value(__a )
return False if value is None else bool(__a )
def _lowercase (self : Dict , __a : List[Any] ):
UpperCAmelCase_ = self.get_value(__a )
return False if value is None else not bool(__a )
def _lowercase (self : Any ):
return self._stage == 2
def _lowercase (self : Union[str, Any] ):
return self._stage == 3
def _lowercase (self : Dict ):
return self._offload
class __A :
def __init__(self : Dict , __a : List[str] ):
UpperCAmelCase_ = engine
def _lowercase (self : str , __a : Tuple , **__a : Tuple ):
# runs backpropagation and handles mixed precision
self.engine.backward(__a , **__a )
# Deepspeed's `engine.step` performs the following operations:
# - gradient accumulation check
# - gradient clipping
# - optimizer step
# - zero grad
# - checking overflow
# - lr_scheduler step (only if engine.lr_scheduler is not None)
self.engine.step()
# and this plugin overrides the above calls with no-ops when Accelerate runs under
# Deepspeed, but allows normal functionality for non-Deepspeed cases thus enabling a simple
# training loop that works transparently under many training regimes.
class __A ( UpperCamelCase__ ):
def __init__(self : Union[str, Any] , __a : int ):
super().__init__(__a , device_placement=__a , scaler=__a )
UpperCAmelCase_ = hasattr(self.optimizer , "overflow" )
def _lowercase (self : Dict , __a : List[Any]=None ):
pass # `accelerator.backward(loss)` is doing that automatically. Therefore, its implementation is not needed
def _lowercase (self : List[str] ):
pass # `accelerator.backward(loss)` is doing that automatically. Therefore, its implementation is not needed
@property
def _lowercase (self : str ):
if self.__has_overflow__:
return self.optimizer.overflow
return False
class __A ( UpperCamelCase__ ):
def __init__(self : Optional[int] , __a : Any , __a : int ):
super().__init__(__a , __a )
def _lowercase (self : List[str] ):
pass # `accelerator.backward(loss)` is doing that automatically. Therefore, its implementation is not needed
class __A :
def __init__(self : Dict , __a : Optional[int] , __a : Dict=0.0_01 , __a : Any=0 , **__a : Dict ):
UpperCAmelCase_ = params
UpperCAmelCase_ = lr
UpperCAmelCase_ = weight_decay
UpperCAmelCase_ = kwargs
class __A :
def __init__(self : Optional[int] , __a : List[str] , __a : List[str]=None , __a : Dict=0 , **__a : Dict ):
UpperCAmelCase_ = optimizer
UpperCAmelCase_ = total_num_steps
UpperCAmelCase_ = warmup_num_steps
UpperCAmelCase_ = kwargs
| 1 | '''simple docstring'''
import inspect
import unittest
import numpy as np
from transformers import ViTConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor
if is_flax_available():
import jax
from transformers.models.vit.modeling_flax_vit import FlaxViTForImageClassification, FlaxViTModel
class __A ( unittest.TestCase ):
def __init__(self : str , __a : Optional[Any] , __a : Optional[Any]=13 , __a : int=30 , __a : Union[str, Any]=2 , __a : Dict=3 , __a : List[Any]=True , __a : Optional[Any]=True , __a : List[Any]=32 , __a : Any=5 , __a : str=4 , __a : Optional[int]=37 , __a : Optional[int]="gelu" , __a : List[str]=0.1 , __a : Tuple=0.1 , __a : List[str]=10 , __a : Optional[int]=0.02 , ):
UpperCAmelCase_ = parent
UpperCAmelCase_ = batch_size
UpperCAmelCase_ = image_size
UpperCAmelCase_ = patch_size
UpperCAmelCase_ = num_channels
UpperCAmelCase_ = is_training
UpperCAmelCase_ = use_labels
UpperCAmelCase_ = hidden_size
UpperCAmelCase_ = num_hidden_layers
UpperCAmelCase_ = num_attention_heads
UpperCAmelCase_ = intermediate_size
UpperCAmelCase_ = hidden_act
UpperCAmelCase_ = hidden_dropout_prob
UpperCAmelCase_ = attention_probs_dropout_prob
UpperCAmelCase_ = type_sequence_label_size
UpperCAmelCase_ = initializer_range
# in ViT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
UpperCAmelCase_ = (image_size // patch_size) ** 2
UpperCAmelCase_ = num_patches + 1
def _lowercase (self : Any ):
UpperCAmelCase_ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCAmelCase_ = ViTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=__a , initializer_range=self.initializer_range , )
return config, pixel_values
def _lowercase (self : Dict , __a : Any , __a : List[Any] ):
UpperCAmelCase_ = FlaxViTModel(config=__a )
UpperCAmelCase_ = model(__a )
# expected sequence length = num_patches + 1 (we add 1 for the [CLS] token)
UpperCAmelCase_ = (self.image_size, self.image_size)
UpperCAmelCase_ = (self.patch_size, self.patch_size)
UpperCAmelCase_ = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, num_patches + 1, self.hidden_size) )
def _lowercase (self : Tuple , __a : str , __a : Any ):
UpperCAmelCase_ = self.type_sequence_label_size
UpperCAmelCase_ = FlaxViTForImageClassification(config=__a )
UpperCAmelCase_ = model(__a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
UpperCAmelCase_ = 1
UpperCAmelCase_ = FlaxViTForImageClassification(__a )
UpperCAmelCase_ = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
UpperCAmelCase_ = model(__a )
def _lowercase (self : Optional[Any] ):
UpperCAmelCase_ = self.prepare_config_and_inputs()
(
(
UpperCAmelCase_
) , (
UpperCAmelCase_
) ,
) = config_and_inputs
UpperCAmelCase_ = {"pixel_values": pixel_values}
return config, inputs_dict
@require_flax
class __A ( UpperCamelCase__ , unittest.TestCase ):
a__ : Tuple = (FlaxViTModel, FlaxViTForImageClassification) if is_flax_available() else ()
def _lowercase (self : Any ):
UpperCAmelCase_ = FlaxViTModelTester(self )
UpperCAmelCase_ = ConfigTester(self , config_class=__a , has_text_modality=__a , hidden_size=37 )
def _lowercase (self : Tuple ):
self.config_tester.run_common_tests()
def _lowercase (self : str ):
UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__a )
def _lowercase (self : str ):
UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__a )
def _lowercase (self : Tuple ):
UpperCAmelCase_ , UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase_ = model_class(__a )
UpperCAmelCase_ = inspect.signature(model.__call__ )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCAmelCase_ = [*signature.parameters.keys()]
UpperCAmelCase_ = ["pixel_values"]
self.assertListEqual(arg_names[:1] , __a )
def _lowercase (self : Optional[Any] ):
UpperCAmelCase_ , UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
UpperCAmelCase_ = self._prepare_for_class(__a , __a )
UpperCAmelCase_ = model_class(__a )
@jax.jit
def model_jitted(__a : Tuple , **__a : List[Any] ):
return model(pixel_values=__a , **__a )
with self.subTest("JIT Enabled" ):
UpperCAmelCase_ = model_jitted(**__a ).to_tuple()
with self.subTest("JIT Disabled" ):
with jax.disable_jit():
UpperCAmelCase_ = model_jitted(**__a ).to_tuple()
self.assertEqual(len(__a ) , len(__a ) )
for jitted_output, output in zip(__a , __a ):
self.assertEqual(jitted_output.shape , output.shape )
@slow
def _lowercase (self : Tuple ):
for model_class_name in self.all_model_classes:
UpperCAmelCase_ = model_class_name.from_pretrained("google/vit-base-patch16-224" )
UpperCAmelCase_ = model(np.ones((1, 3, 224, 224) ) )
self.assertIsNotNone(__a )
| 1 | 1 |
'''simple docstring'''
import json
from typing import List, Optional, Tuple
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_bart import BartTokenizer
SCREAMING_SNAKE_CASE_: Any =logging.get_logger(__name__)
SCREAMING_SNAKE_CASE_: Union[str, Any] ={'vocab_file': 'vocab.json', 'merges_file': 'merges.txt', 'tokenizer_file': 'tokenizer.json'}
# See all BART models at https://huggingface.co/models?filter=bart
SCREAMING_SNAKE_CASE_: str ={
'vocab_file': {
'facebook/bart-base': 'https://huggingface.co/facebook/bart-base/resolve/main/vocab.json',
'facebook/bart-large': 'https://huggingface.co/facebook/bart-large/resolve/main/vocab.json',
'facebook/bart-large-mnli': 'https://huggingface.co/facebook/bart-large-mnli/resolve/main/vocab.json',
'facebook/bart-large-cnn': 'https://huggingface.co/facebook/bart-large-cnn/resolve/main/vocab.json',
'facebook/bart-large-xsum': 'https://huggingface.co/facebook/bart-large-xsum/resolve/main/vocab.json',
'yjernite/bart_eli5': 'https://huggingface.co/yjernite/bart_eli5/resolve/main/vocab.json',
},
'merges_file': {
'facebook/bart-base': 'https://huggingface.co/facebook/bart-base/resolve/main/merges.txt',
'facebook/bart-large': 'https://huggingface.co/facebook/bart-large/resolve/main/merges.txt',
'facebook/bart-large-mnli': 'https://huggingface.co/facebook/bart-large-mnli/resolve/main/merges.txt',
'facebook/bart-large-cnn': 'https://huggingface.co/facebook/bart-large-cnn/resolve/main/merges.txt',
'facebook/bart-large-xsum': 'https://huggingface.co/facebook/bart-large-xsum/resolve/main/merges.txt',
'yjernite/bart_eli5': 'https://huggingface.co/yjernite/bart_eli5/resolve/main/merges.txt',
},
'tokenizer_file': {
'facebook/bart-base': 'https://huggingface.co/facebook/bart-base/resolve/main/tokenizer.json',
'facebook/bart-large': 'https://huggingface.co/facebook/bart-large/resolve/main/tokenizer.json',
'facebook/bart-large-mnli': 'https://huggingface.co/facebook/bart-large-mnli/resolve/main/tokenizer.json',
'facebook/bart-large-cnn': 'https://huggingface.co/facebook/bart-large-cnn/resolve/main/tokenizer.json',
'facebook/bart-large-xsum': 'https://huggingface.co/facebook/bart-large-xsum/resolve/main/tokenizer.json',
'yjernite/bart_eli5': 'https://huggingface.co/yjernite/bart_eli5/resolve/main/tokenizer.json',
},
}
SCREAMING_SNAKE_CASE_: int ={
'facebook/bart-base': 10_24,
'facebook/bart-large': 10_24,
'facebook/bart-large-mnli': 10_24,
'facebook/bart-large-cnn': 10_24,
'facebook/bart-large-xsum': 10_24,
'yjernite/bart_eli5': 10_24,
}
class __A ( UpperCamelCase__ ):
a__ : Dict = VOCAB_FILES_NAMES
a__ : Any = PRETRAINED_VOCAB_FILES_MAP
a__ : Union[str, Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a__ : int = ["""input_ids""", """attention_mask"""]
a__ : str = BartTokenizer
def __init__(self : List[str] , __a : List[Any]=None , __a : str=None , __a : List[str]=None , __a : Union[str, Any]="replace" , __a : List[str]="<s>" , __a : List[Any]="</s>" , __a : Dict="</s>" , __a : Optional[int]="<s>" , __a : Any="<unk>" , __a : List[Any]="<pad>" , __a : List[str]="<mask>" , __a : Tuple=False , __a : Optional[Any]=True , **__a : Optional[Any] , ):
super().__init__(
__a , __a , tokenizer_file=__a , errors=__a , bos_token=__a , eos_token=__a , sep_token=__a , cls_token=__a , unk_token=__a , pad_token=__a , mask_token=__a , add_prefix_space=__a , trim_offsets=__a , **__a , )
UpperCAmelCase_ = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get("add_prefix_space" , __a ) != add_prefix_space:
UpperCAmelCase_ = getattr(__a , pre_tok_state.pop("type" ) )
UpperCAmelCase_ = add_prefix_space
UpperCAmelCase_ = pre_tok_class(**__a )
UpperCAmelCase_ = add_prefix_space
# the pre_tokenizer is already updated in the GPT2TokenizerFast `__init__`
UpperCAmelCase_ = "post_processor"
UpperCAmelCase_ = getattr(self.backend_tokenizer , __a , __a )
if tokenizer_component_instance:
UpperCAmelCase_ = json.loads(tokenizer_component_instance.__getstate__() )
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
UpperCAmelCase_ = tuple(state["sep"] )
if "cls" in state:
UpperCAmelCase_ = tuple(state["cls"] )
UpperCAmelCase_ = False
if state.get("add_prefix_space" , __a ) != add_prefix_space:
UpperCAmelCase_ = add_prefix_space
UpperCAmelCase_ = True
if state.get("trim_offsets" , __a ) != trim_offsets:
UpperCAmelCase_ = trim_offsets
UpperCAmelCase_ = True
if changes_to_apply:
UpperCAmelCase_ = getattr(__a , state.pop("type" ) )
UpperCAmelCase_ = component_class(**__a )
setattr(self.backend_tokenizer , __a , __a )
@property
def _lowercase (self : Dict ):
if self._mask_token is None:
if self.verbose:
logger.error("Using mask_token, but it is not set yet." )
return None
return str(self._mask_token )
@mask_token.setter
def _lowercase (self : Optional[int] , __a : List[Any] ):
UpperCAmelCase_ = AddedToken(__a , lstrip=__a , rstrip=__a ) if isinstance(__a , __a ) else value
UpperCAmelCase_ = value
def _lowercase (self : Any , *__a : List[Any] , **__a : Any ):
UpperCAmelCase_ = kwargs.get("is_split_into_words" , __a )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
f"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """
"to use it with pretokenized inputs." )
return super()._batch_encode_plus(*__a , **__a )
def _lowercase (self : List[str] , *__a : Optional[Any] , **__a : Dict ):
UpperCAmelCase_ = kwargs.get("is_split_into_words" , __a )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
f"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """
"to use it with pretokenized inputs." )
return super()._encode_plus(*__a , **__a )
def _lowercase (self : str , __a : str , __a : Optional[str] = None ):
UpperCAmelCase_ = self._tokenizer.model.save(__a , name=__a )
return tuple(__a )
def _lowercase (self : Any , __a : Optional[int] , __a : str=None ):
UpperCAmelCase_ = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def _lowercase (self : Union[str, Any] , __a : List[int] , __a : Optional[List[int]] = None ):
UpperCAmelCase_ = [self.sep_token_id]
UpperCAmelCase_ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
| 1 | '''simple docstring'''
import os
import shutil
import tempfile
from unittest import TestCase
from unittest.mock import patch
import numpy as np
from datasets import Dataset
from transformers.models.realm.configuration_realm import RealmConfig
from transformers.models.realm.retrieval_realm import _REALM_BLOCK_RECORDS_FILENAME, RealmRetriever
from transformers.models.realm.tokenization_realm import VOCAB_FILES_NAMES, RealmTokenizer
class __A ( UpperCamelCase__ ):
def _lowercase (self : Optional[Any] ):
UpperCAmelCase_ = tempfile.mkdtemp()
UpperCAmelCase_ = 5
# Realm tok
UpperCAmelCase_ = [
"[UNK]",
"[CLS]",
"[SEP]",
"[PAD]",
"[MASK]",
"test",
"question",
"this",
"is",
"the",
"first",
"second",
"third",
"fourth",
"fifth",
"record",
"want",
"##want",
"##ed",
"wa",
"un",
"runn",
"##ing",
",",
"low",
"lowest",
]
UpperCAmelCase_ = os.path.join(self.tmpdirname , "realm_tokenizer" )
os.makedirs(__a , exist_ok=__a )
UpperCAmelCase_ = os.path.join(__a , VOCAB_FILES_NAMES["vocab_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in vocab_tokens] ) )
UpperCAmelCase_ = os.path.join(self.tmpdirname , "realm_block_records" )
os.makedirs(__a , exist_ok=__a )
def _lowercase (self : Optional[Any] ):
return RealmTokenizer.from_pretrained(os.path.join(self.tmpdirname , "realm_tokenizer" ) )
def _lowercase (self : Any ):
shutil.rmtree(self.tmpdirname )
def _lowercase (self : List[Any] ):
UpperCAmelCase_ = RealmConfig(num_block_records=self.num_block_records )
return config
def _lowercase (self : List[str] ):
UpperCAmelCase_ = Dataset.from_dict(
{
"id": ["0", "1"],
"question": ["foo", "bar"],
"answers": [["Foo", "Bar"], ["Bar"]],
} )
return dataset
def _lowercase (self : Any ):
UpperCAmelCase_ = np.array(
[
B"This is the first record",
B"This is the second record",
B"This is the third record",
B"This is the fourth record",
B"This is the fifth record",
B"This is a longer longer longer record",
] , dtype=__a , )
return block_records
def _lowercase (self : Union[str, Any] ):
UpperCAmelCase_ = RealmRetriever(
block_records=self.get_dummy_block_records() , tokenizer=self.get_tokenizer() , )
return retriever
def _lowercase (self : int ):
UpperCAmelCase_ = self.get_config()
UpperCAmelCase_ = self.get_dummy_retriever()
UpperCAmelCase_ = retriever.tokenizer
UpperCAmelCase_ = np.array([0, 3] , dtype="long" )
UpperCAmelCase_ = tokenizer(["Test question"] ).input_ids
UpperCAmelCase_ = tokenizer(
["the fourth"] , add_special_tokens=__a , return_token_type_ids=__a , return_attention_mask=__a , ).input_ids
UpperCAmelCase_ = config.reader_seq_len
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = retriever(
__a , __a , answer_ids=__a , max_length=__a , return_tensors="np" )
self.assertEqual(len(__a ) , 2 )
self.assertEqual(len(__a ) , 2 )
self.assertEqual(len(__a ) , 2 )
self.assertEqual(concat_inputs.input_ids.shape , (2, 10) )
self.assertEqual(concat_inputs.attention_mask.shape , (2, 10) )
self.assertEqual(concat_inputs.token_type_ids.shape , (2, 10) )
self.assertEqual(concat_inputs.special_tokens_mask.shape , (2, 10) )
self.assertEqual(
tokenizer.convert_ids_to_tokens(concat_inputs.input_ids[0] ) , ["[CLS]", "test", "question", "[SEP]", "this", "is", "the", "first", "record", "[SEP]"] , )
self.assertEqual(
tokenizer.convert_ids_to_tokens(concat_inputs.input_ids[1] ) , ["[CLS]", "test", "question", "[SEP]", "this", "is", "the", "fourth", "record", "[SEP]"] , )
def _lowercase (self : List[Any] ):
UpperCAmelCase_ = self.get_config()
UpperCAmelCase_ = self.get_dummy_retriever()
UpperCAmelCase_ = retriever.tokenizer
UpperCAmelCase_ = np.array([0, 3, 5] , dtype="long" )
UpperCAmelCase_ = tokenizer(["Test question"] ).input_ids
UpperCAmelCase_ = tokenizer(
["the fourth", "longer longer"] , add_special_tokens=__a , return_token_type_ids=__a , return_attention_mask=__a , ).input_ids
UpperCAmelCase_ = config.reader_seq_len
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = retriever(
__a , __a , answer_ids=__a , max_length=__a , return_tensors="np" )
self.assertEqual([False, True, True] , __a )
self.assertEqual([[-1, -1, -1], [6, -1, -1], [6, 7, 8]] , __a )
self.assertEqual([[-1, -1, -1], [7, -1, -1], [7, 8, 9]] , __a )
def _lowercase (self : Optional[Any] ):
UpperCAmelCase_ = self.get_dummy_retriever()
retriever.save_pretrained(os.path.join(self.tmpdirname , "realm_block_records" ) )
# Test local path
UpperCAmelCase_ = retriever.from_pretrained(os.path.join(self.tmpdirname , "realm_block_records" ) )
self.assertEqual(retriever.block_records[0] , B"This is the first record" )
# Test mocked remote path
with patch("transformers.models.realm.retrieval_realm.hf_hub_download" ) as mock_hf_hub_download:
UpperCAmelCase_ = os.path.join(
os.path.join(self.tmpdirname , "realm_block_records" ) , _REALM_BLOCK_RECORDS_FILENAME )
UpperCAmelCase_ = RealmRetriever.from_pretrained("google/realm-cc-news-pretrained-openqa" )
self.assertEqual(retriever.block_records[0] , B"This is the first record" )
| 1 | 1 |
'''simple docstring'''
import argparse
import json
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.utils.deepspeed import DummyOptim, DummyScheduler
SCREAMING_SNAKE_CASE_: Tuple =16
SCREAMING_SNAKE_CASE_: Tuple =32
def lowerCAmelCase_ ( snake_case_ : Accelerator , snake_case_ : int = 16 , snake_case_ : str = "bert-base-cased" ) -> Union[str, Any]:
'''simple docstring'''
UpperCAmelCase_ = AutoTokenizer.from_pretrained(snake_case_ )
UpperCAmelCase_ = load_dataset("glue" , "mrpc" )
def tokenize_function(snake_case_ : Optional[Any] ):
# max_length=None => use the model max length (it's actually the default)
UpperCAmelCase_ = tokenizer(examples["sentence1"] , examples["sentence2"] , truncation=snake_case_ , max_length=snake_case_ )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
UpperCAmelCase_ = datasets.map(
snake_case_ , batched=snake_case_ , remove_columns=["idx", "sentence1", "sentence2"] , load_from_cache_file=snake_case_ )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
UpperCAmelCase_ = tokenized_datasets.rename_column("label" , "labels" )
def collate_fn(snake_case_ : str ):
# On TPU it's best to pad everything to the same length or training will be very slow.
if accelerator.distributed_type == DistributedType.TPU:
return tokenizer.pad(snake_case_ , padding="max_length" , max_length=1_28 , return_tensors="pt" )
return tokenizer.pad(snake_case_ , padding="longest" , return_tensors="pt" )
# Instantiate dataloaders.
UpperCAmelCase_ = DataLoader(
tokenized_datasets["train"] , shuffle=snake_case_ , collate_fn=snake_case_ , batch_size=snake_case_ )
UpperCAmelCase_ = DataLoader(
tokenized_datasets["validation"] , shuffle=snake_case_ , collate_fn=snake_case_ , batch_size=snake_case_ )
return train_dataloader, eval_dataloader
def lowerCAmelCase_ ( snake_case_ : int , snake_case_ : Union[str, Any] , snake_case_ : Optional[int] , snake_case_ : Union[str, Any] ) -> Union[str, Any]:
'''simple docstring'''
model.eval()
UpperCAmelCase_ = 0
for step, batch in enumerate(snake_case_ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
UpperCAmelCase_ = model(**snake_case_ )
UpperCAmelCase_ = outputs.logits.argmax(dim=-1 )
# It is slightly faster to call this once, than multiple times
UpperCAmelCase_ , UpperCAmelCase_ = accelerator.gather(
(predictions, batch["labels"]) ) # If we are in a multiprocess environment, the last batch has duplicates
if accelerator.use_distributed:
if step == len(snake_case_ ) - 1:
UpperCAmelCase_ = predictions[: len(eval_dataloader.dataset ) - samples_seen]
UpperCAmelCase_ = references[: len(eval_dataloader.dataset ) - samples_seen]
else:
samples_seen += references.shape[0]
metric.add_batch(
predictions=snake_case_ , references=snake_case_ , )
UpperCAmelCase_ = metric.compute()
return eval_metric["accuracy"]
def lowerCAmelCase_ ( snake_case_ : Tuple , snake_case_ : Optional[int] ) -> Optional[Any]:
'''simple docstring'''
UpperCAmelCase_ = Accelerator()
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
UpperCAmelCase_ = config["lr"]
UpperCAmelCase_ = int(config["num_epochs"] )
UpperCAmelCase_ = int(config["seed"] )
UpperCAmelCase_ = int(config["batch_size"] )
UpperCAmelCase_ = args.model_name_or_path
set_seed(snake_case_ )
UpperCAmelCase_ , UpperCAmelCase_ = get_dataloaders(snake_case_ , snake_case_ , snake_case_ )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
UpperCAmelCase_ = AutoModelForSequenceClassification.from_pretrained(snake_case_ , return_dict=snake_case_ )
# Instantiate optimizer
UpperCAmelCase_ = (
AdamW
if accelerator.state.deepspeed_plugin is None
or "optimizer" not in accelerator.state.deepspeed_plugin.deepspeed_config
else DummyOptim
)
UpperCAmelCase_ = optimizer_cls(params=model.parameters() , lr=snake_case_ )
if accelerator.state.deepspeed_plugin is not None:
UpperCAmelCase_ = accelerator.state.deepspeed_plugin.deepspeed_config[
"gradient_accumulation_steps"
]
else:
UpperCAmelCase_ = 1
UpperCAmelCase_ = (len(snake_case_ ) * num_epochs) // gradient_accumulation_steps
# Instantiate scheduler
if (
accelerator.state.deepspeed_plugin is None
or "scheduler" not in accelerator.state.deepspeed_plugin.deepspeed_config
):
UpperCAmelCase_ = get_linear_schedule_with_warmup(
optimizer=snake_case_ , num_warmup_steps=0 , num_training_steps=snake_case_ , )
else:
UpperCAmelCase_ = DummyScheduler(snake_case_ , total_num_steps=snake_case_ , warmup_num_steps=0 )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = accelerator.prepare(
snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ )
# We need to keep track of how many total steps we have iterated over
UpperCAmelCase_ = 0
# We also need to keep track of the stating epoch so files are named properly
UpperCAmelCase_ = 0
UpperCAmelCase_ = evaluate.load("glue" , "mrpc" )
UpperCAmelCase_ = num_epochs
if args.partial_train_epoch is not None:
UpperCAmelCase_ = args.partial_train_epoch
if args.resume_from_checkpoint:
accelerator.load_state(args.resume_from_checkpoint )
UpperCAmelCase_ = args.resume_from_checkpoint.split("epoch_" )[1]
UpperCAmelCase_ = ""
for char in epoch_string:
if char.isdigit():
state_epoch_num += char
else:
break
UpperCAmelCase_ = int(snake_case_ ) + 1
UpperCAmelCase_ = evaluation_loop(snake_case_ , snake_case_ , snake_case_ , snake_case_ )
accelerator.print("resumed checkpoint performance:" , snake_case_ )
accelerator.print("resumed checkpoint's scheduler's lr:" , lr_scheduler.get_lr()[0] )
accelerator.print("resumed optimizers's lr:" , optimizer.param_groups[0]["lr"] )
with open(os.path.join(args.output_dir , f"""state_{starting_epoch-1}.json""" ) , "r" ) as f:
UpperCAmelCase_ = json.load(snake_case_ )
assert resumed_state["accuracy"] == accuracy, "Accuracy mismatch, loading from checkpoint failed"
assert (
resumed_state["lr"] == lr_scheduler.get_lr()[0]
), "Scheduler learning rate mismatch, loading from checkpoint failed"
assert (
resumed_state["optimizer_lr"] == optimizer.param_groups[0]["lr"]
), "Optimizer learning rate mismatch, loading from checkpoint failed"
assert resumed_state["epoch"] == starting_epoch - 1, "Epoch mismatch, loading from checkpoint failed"
return
# Now we train the model
UpperCAmelCase_ = {}
for epoch in range(snake_case_ , snake_case_ ):
model.train()
for step, batch in enumerate(snake_case_ ):
UpperCAmelCase_ = model(**snake_case_ )
UpperCAmelCase_ = outputs.loss
UpperCAmelCase_ = loss / gradient_accumulation_steps
accelerator.backward(snake_case_ )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
overall_step += 1
UpperCAmelCase_ = f"""epoch_{epoch}"""
UpperCAmelCase_ = os.path.join(args.output_dir , snake_case_ )
accelerator.save_state(snake_case_ )
UpperCAmelCase_ = evaluation_loop(snake_case_ , snake_case_ , snake_case_ , snake_case_ )
UpperCAmelCase_ = accuracy
UpperCAmelCase_ = lr_scheduler.get_lr()[0]
UpperCAmelCase_ = optimizer.param_groups[0]["lr"]
UpperCAmelCase_ = epoch
UpperCAmelCase_ = overall_step
accelerator.print(f"""epoch {epoch}:""" , snake_case_ )
accelerator.wait_for_everyone()
if accelerator.is_main_process:
with open(os.path.join(args.output_dir , f"""state_{epoch}.json""" ) , "w" ) as f:
json.dump(snake_case_ , snake_case_ )
def lowerCAmelCase_ ( ) -> List[str]:
'''simple docstring'''
UpperCAmelCase_ = argparse.ArgumentParser(description="Simple example of training script tracking peak GPU memory usage." )
parser.add_argument(
"--model_name_or_path" , type=snake_case_ , default="bert-base-cased" , help="Path to pretrained model or model identifier from huggingface.co/models." , required=snake_case_ , )
parser.add_argument(
"--output_dir" , type=snake_case_ , default="." , help="Optional save directory where all checkpoint folders will be stored. Default is the current working directory." , )
parser.add_argument(
"--resume_from_checkpoint" , type=snake_case_ , default=snake_case_ , help="If the training should continue from a checkpoint folder." , )
parser.add_argument(
"--partial_train_epoch" , type=snake_case_ , default=snake_case_ , help="If passed, the training will stop after this number of epochs." , )
parser.add_argument(
"--num_epochs" , type=snake_case_ , default=2 , help="Number of train epochs." , )
UpperCAmelCase_ = parser.parse_args()
UpperCAmelCase_ = {"lr": 2E-5, "num_epochs": args.num_epochs, "seed": 42, "batch_size": 16}
training_function(snake_case_ , snake_case_ )
if __name__ == "__main__":
main()
| 1 | '''simple docstring'''
from math import log
from scipy.constants import Boltzmann, physical_constants
SCREAMING_SNAKE_CASE_: Optional[int] =3_00 # TEMPERATURE (unit = K)
def lowerCAmelCase_ ( snake_case_ : float , snake_case_ : float , snake_case_ : float , ) -> float:
'''simple docstring'''
if donor_conc <= 0:
raise ValueError("Donor concentration should be positive" )
elif acceptor_conc <= 0:
raise ValueError("Acceptor concentration should be positive" )
elif intrinsic_conc <= 0:
raise ValueError("Intrinsic concentration should be positive" )
elif donor_conc <= intrinsic_conc:
raise ValueError(
"Donor concentration should be greater than intrinsic concentration" )
elif acceptor_conc <= intrinsic_conc:
raise ValueError(
"Acceptor concentration should be greater than intrinsic concentration" )
else:
return (
Boltzmann
* T
* log((donor_conc * acceptor_conc) / intrinsic_conc**2 )
/ physical_constants["electron volt"][0]
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 1 | 1 |
'''simple docstring'''
from __future__ import annotations
class __A :
def __init__(self : Optional[Any] , __a : list[list[int]] ):
UpperCAmelCase_ = TypeError(
"Matrices must be formed from a list of zero or more lists containing at "
"least one and the same number of values, each of which must be of type "
"int or float." )
if len(__a ) != 0:
UpperCAmelCase_ = len(rows[0] )
if cols == 0:
raise error
for row in rows:
if len(__a ) != cols:
raise error
for value in row:
if not isinstance(__a , (int, float) ):
raise error
UpperCAmelCase_ = rows
else:
UpperCAmelCase_ = []
def _lowercase (self : Tuple ):
return [[row[i] for row in self.rows] for i in range(len(self.rows[0] ) )]
@property
def _lowercase (self : List[Any] ):
return len(self.rows )
@property
def _lowercase (self : Tuple ):
return len(self.rows[0] )
@property
def _lowercase (self : Tuple ):
return (self.num_rows, self.num_columns)
@property
def _lowercase (self : str ):
return self.order[0] == self.order[1]
def _lowercase (self : int ):
UpperCAmelCase_ = [
[0 if column_num != row_num else 1 for column_num in range(self.num_rows )]
for row_num in range(self.num_rows )
]
return Matrix(__a )
def _lowercase (self : List[Any] ):
if not self.is_square:
return 0
if self.order == (0, 0):
return 1
if self.order == (1, 1):
return int(self.rows[0][0] )
if self.order == (2, 2):
return int(
(self.rows[0][0] * self.rows[1][1])
- (self.rows[0][1] * self.rows[1][0]) )
else:
return sum(
self.rows[0][column] * self.cofactors().rows[0][column]
for column in range(self.num_columns ) )
def _lowercase (self : int ):
return bool(self.determinant() )
def _lowercase (self : Union[str, Any] , __a : int , __a : int ):
UpperCAmelCase_ = [
[
self.rows[other_row][other_column]
for other_column in range(self.num_columns )
if other_column != column
]
for other_row in range(self.num_rows )
if other_row != row
]
return Matrix(__a ).determinant()
def _lowercase (self : Optional[int] , __a : int , __a : int ):
if (row + column) % 2 == 0:
return self.get_minor(__a , __a )
return -1 * self.get_minor(__a , __a )
def _lowercase (self : Optional[int] ):
return Matrix(
[
[self.get_minor(__a , __a ) for column in range(self.num_columns )]
for row in range(self.num_rows )
] )
def _lowercase (self : Optional[int] ):
return Matrix(
[
[
self.minors().rows[row][column]
if (row + column) % 2 == 0
else self.minors().rows[row][column] * -1
for column in range(self.minors().num_columns )
]
for row in range(self.minors().num_rows )
] )
def _lowercase (self : Tuple ):
UpperCAmelCase_ = [
[self.cofactors().rows[column][row] for column in range(self.num_columns )]
for row in range(self.num_rows )
]
return Matrix(__a )
def _lowercase (self : List[str] ):
UpperCAmelCase_ = self.determinant()
if not determinant:
raise TypeError("Only matrices with a non-zero determinant have an inverse" )
return self.adjugate() * (1 / determinant)
def __repr__(self : Optional[int] ):
return str(self.rows )
def __str__(self : Optional[int] ):
if self.num_rows == 0:
return "[]"
if self.num_rows == 1:
return "[[" + ". ".join(str(self.rows[0] ) ) + "]]"
return (
"["
+ "\n ".join(
[
"[" + ". ".join([str(__a ) for value in row] ) + ".]"
for row in self.rows
] )
+ "]"
)
def _lowercase (self : Dict , __a : list[int] , __a : int | None = None ):
UpperCAmelCase_ = TypeError("Row must be a list containing all ints and/or floats" )
if not isinstance(__a , __a ):
raise type_error
for value in row:
if not isinstance(__a , (int, float) ):
raise type_error
if len(__a ) != self.num_columns:
raise ValueError(
"Row must be equal in length to the other rows in the matrix" )
if position is None:
self.rows.append(__a )
else:
UpperCAmelCase_ = self.rows[0:position] + [row] + self.rows[position:]
def _lowercase (self : Union[str, Any] , __a : list[int] , __a : int | None = None ):
UpperCAmelCase_ = TypeError(
"Column must be a list containing all ints and/or floats" )
if not isinstance(__a , __a ):
raise type_error
for value in column:
if not isinstance(__a , (int, float) ):
raise type_error
if len(__a ) != self.num_rows:
raise ValueError(
"Column must be equal in length to the other columns in the matrix" )
if position is None:
UpperCAmelCase_ = [self.rows[i] + [column[i]] for i in range(self.num_rows )]
else:
UpperCAmelCase_ = [
self.rows[i][0:position] + [column[i]] + self.rows[i][position:]
for i in range(self.num_rows )
]
def __eq__(self : Optional[Any] , __a : object ):
if not isinstance(__a , __a ):
return NotImplemented
return self.rows == other.rows
def __ne__(self : Union[str, Any] , __a : object ):
return not self == other
def __neg__(self : Optional[Any] ):
return self * -1
def __add__(self : Union[str, Any] , __a : Matrix ):
if self.order != other.order:
raise ValueError("Addition requires matrices of the same order" )
return Matrix(
[
[self.rows[i][j] + other.rows[i][j] for j in range(self.num_columns )]
for i in range(self.num_rows )
] )
def __sub__(self : Tuple , __a : Matrix ):
if self.order != other.order:
raise ValueError("Subtraction requires matrices of the same order" )
return Matrix(
[
[self.rows[i][j] - other.rows[i][j] for j in range(self.num_columns )]
for i in range(self.num_rows )
] )
def __mul__(self : Dict , __a : Matrix | int | float ):
if isinstance(__a , (int, float) ):
return Matrix(
[[int(element * other ) for element in row] for row in self.rows] )
elif isinstance(__a , __a ):
if self.num_columns != other.num_rows:
raise ValueError(
"The number of columns in the first matrix must "
"be equal to the number of rows in the second" )
return Matrix(
[
[Matrix.dot_product(__a , __a ) for column in other.columns()]
for row in self.rows
] )
else:
raise TypeError(
"A Matrix can only be multiplied by an int, float, or another matrix" )
def __pow__(self : Optional[Any] , __a : int ):
if not isinstance(__a , __a ):
raise TypeError("A Matrix can only be raised to the power of an int" )
if not self.is_square:
raise ValueError("Only square matrices can be raised to a power" )
if other == 0:
return self.identity()
if other < 0:
if self.is_invertable():
return self.inverse() ** (-other)
raise ValueError(
"Only invertable matrices can be raised to a negative power" )
UpperCAmelCase_ = self
for _ in range(other - 1 ):
result *= self
return result
@classmethod
def _lowercase (cls : Any , __a : list[int] , __a : list[int] ):
return sum(row[i] * column[i] for i in range(len(__a ) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 1 | '''simple docstring'''
import math
def lowerCAmelCase_ ( ) -> None:
'''simple docstring'''
UpperCAmelCase_ = input("Enter message: " )
UpperCAmelCase_ = int(input(f"""Enter key [2-{len(snake_case_ ) - 1}]: """ ) )
UpperCAmelCase_ = input("Encryption/Decryption [e/d]: " )
if mode.lower().startswith("e" ):
UpperCAmelCase_ = encrypt_message(snake_case_ , snake_case_ )
elif mode.lower().startswith("d" ):
UpperCAmelCase_ = decrypt_message(snake_case_ , snake_case_ )
# Append pipe symbol (vertical bar) to identify spaces at the end.
print(f"""Output:\n{text + "|"}""" )
def lowerCAmelCase_ ( snake_case_ : int , snake_case_ : str ) -> str:
'''simple docstring'''
UpperCAmelCase_ = [""] * key
for col in range(snake_case_ ):
UpperCAmelCase_ = col
while pointer < len(snake_case_ ):
cipher_text[col] += message[pointer]
pointer += key
return "".join(snake_case_ )
def lowerCAmelCase_ ( snake_case_ : int , snake_case_ : str ) -> str:
'''simple docstring'''
UpperCAmelCase_ = math.ceil(len(snake_case_ ) / key )
UpperCAmelCase_ = key
UpperCAmelCase_ = (num_cols * num_rows) - len(snake_case_ )
UpperCAmelCase_ = [""] * num_cols
UpperCAmelCase_ = 0
UpperCAmelCase_ = 0
for symbol in message:
plain_text[col] += symbol
col += 1
if (
(col == num_cols)
or (col == num_cols - 1)
and (row >= num_rows - num_shaded_boxes)
):
UpperCAmelCase_ = 0
row += 1
return "".join(snake_case_ )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 1 | 1 |
'''simple docstring'''
import unittest
from queue import Empty
from threading import Thread
from transformers import AutoTokenizer, TextIteratorStreamer, TextStreamer, is_torch_available
from transformers.testing_utils import CaptureStdout, require_torch, torch_device
from ..test_modeling_common import ids_tensor
if is_torch_available():
import torch
from transformers import AutoModelForCausalLM
@require_torch
class __A ( unittest.TestCase ):
def _lowercase (self : Dict ):
UpperCAmelCase_ = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-gpt2" )
UpperCAmelCase_ = AutoModelForCausalLM.from_pretrained("hf-internal-testing/tiny-random-gpt2" ).to(__a )
UpperCAmelCase_ = -1
UpperCAmelCase_ = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(__a )
UpperCAmelCase_ = model.generate(__a , max_new_tokens=10 , do_sample=__a )
UpperCAmelCase_ = tokenizer.decode(greedy_ids[0] )
with CaptureStdout() as cs:
UpperCAmelCase_ = TextStreamer(__a )
model.generate(__a , max_new_tokens=10 , do_sample=__a , streamer=__a )
# The greedy text should be printed to stdout, except for the final "\n" in the streamer
UpperCAmelCase_ = cs.out[:-1]
self.assertEqual(__a , __a )
def _lowercase (self : Union[str, Any] ):
UpperCAmelCase_ = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-gpt2" )
UpperCAmelCase_ = AutoModelForCausalLM.from_pretrained("hf-internal-testing/tiny-random-gpt2" ).to(__a )
UpperCAmelCase_ = -1
UpperCAmelCase_ = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(__a )
UpperCAmelCase_ = model.generate(__a , max_new_tokens=10 , do_sample=__a )
UpperCAmelCase_ = tokenizer.decode(greedy_ids[0] )
UpperCAmelCase_ = TextIteratorStreamer(__a )
UpperCAmelCase_ = {"input_ids": input_ids, "max_new_tokens": 10, "do_sample": False, "streamer": streamer}
UpperCAmelCase_ = Thread(target=model.generate , kwargs=__a )
thread.start()
UpperCAmelCase_ = ""
for new_text in streamer:
streamer_text += new_text
self.assertEqual(__a , __a )
def _lowercase (self : Optional[int] ):
UpperCAmelCase_ = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-gpt2" )
UpperCAmelCase_ = AutoModelForCausalLM.from_pretrained("hf-internal-testing/tiny-random-gpt2" ).to(__a )
UpperCAmelCase_ = -1
UpperCAmelCase_ = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(__a )
UpperCAmelCase_ = model.generate(__a , max_new_tokens=10 , do_sample=__a )
UpperCAmelCase_ = greedy_ids[:, input_ids.shape[1] :]
UpperCAmelCase_ = tokenizer.decode(new_greedy_ids[0] )
with CaptureStdout() as cs:
UpperCAmelCase_ = TextStreamer(__a , skip_prompt=__a )
model.generate(__a , max_new_tokens=10 , do_sample=__a , streamer=__a )
# The greedy text should be printed to stdout, except for the final "\n" in the streamer
UpperCAmelCase_ = cs.out[:-1]
self.assertEqual(__a , __a )
def _lowercase (self : Any ):
# Tests that we can pass `decode_kwargs` to the streamer to control how the tokens are decoded. Must be tested
# with actual models -- the dummy models' tokenizers are not aligned with their models, and
# `skip_special_tokens=True` has no effect on them
UpperCAmelCase_ = AutoTokenizer.from_pretrained("distilgpt2" )
UpperCAmelCase_ = AutoModelForCausalLM.from_pretrained("distilgpt2" ).to(__a )
UpperCAmelCase_ = -1
UpperCAmelCase_ = torch.ones((1, 5) , device=__a ).long() * model.config.bos_token_id
with CaptureStdout() as cs:
UpperCAmelCase_ = TextStreamer(__a , skip_special_tokens=__a )
model.generate(__a , max_new_tokens=1 , do_sample=__a , streamer=__a )
# The prompt contains a special token, so the streamer should not print it. As such, the output text, when
# re-tokenized, must only contain one token
UpperCAmelCase_ = cs.out[:-1] # Remove the final "\n"
UpperCAmelCase_ = tokenizer(__a , return_tensors="pt" )
self.assertEqual(streamer_text_tokenized.input_ids.shape , (1, 1) )
def _lowercase (self : Dict ):
UpperCAmelCase_ = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-gpt2" )
UpperCAmelCase_ = AutoModelForCausalLM.from_pretrained("hf-internal-testing/tiny-random-gpt2" ).to(__a )
UpperCAmelCase_ = -1
UpperCAmelCase_ = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(__a )
UpperCAmelCase_ = TextIteratorStreamer(__a , timeout=0.0_01 )
UpperCAmelCase_ = {"input_ids": input_ids, "max_new_tokens": 10, "do_sample": False, "streamer": streamer}
UpperCAmelCase_ = Thread(target=model.generate , kwargs=__a )
thread.start()
# The streamer will timeout after 0.001 seconds, so an exception will be raised
with self.assertRaises(__a ):
UpperCAmelCase_ = ""
for new_text in streamer:
streamer_text += new_text
| 1 | '''simple docstring'''
import json
import logging
import os
import sys
from pathlib import Path
import finetune_rag
from transformers.file_utils import is_apex_available
from transformers.testing_utils import (
TestCasePlus,
execute_subprocess_async,
require_ray,
require_torch_gpu,
require_torch_multi_gpu,
)
logging.basicConfig(level=logging.DEBUG)
SCREAMING_SNAKE_CASE_: Optional[int] =logging.getLogger()
SCREAMING_SNAKE_CASE_: Union[str, Any] =logging.StreamHandler(sys.stdout)
logger.addHandler(stream_handler)
class __A ( UpperCamelCase__ ):
def _lowercase (self : Optional[Any] , __a : str ):
os.makedirs(__a , exist_ok=__a )
UpperCAmelCase_ = {"source": "What is love ?", "target": "life"}
UpperCAmelCase_ = {"train": 12, "val": 2, "test": 2}
for split in ["train", "test", "val"]:
for field in ["source", "target"]:
UpperCAmelCase_ = "\n".join([contents[field]] * n_lines[split] )
with open(os.path.join(__a , f"""{split}.{field}""" ) , "w" ) as f:
f.write(__a )
def _lowercase (self : Optional[int] , __a : int , __a : str = "pytorch" ):
UpperCAmelCase_ = self.get_auto_remove_tmp_dir()
UpperCAmelCase_ = os.path.join(__a , "output" )
UpperCAmelCase_ = os.path.join(__a , "data" )
self._create_dummy_data(data_dir=__a )
UpperCAmelCase_ = f"""
--data_dir {data_dir} \
--output_dir {output_dir} \
--model_name_or_path facebook/rag-sequence-base \
--model_type rag_sequence \
--do_train \
--do_predict \
--n_val -1 \
--val_check_interval 1.0 \
--train_batch_size 2 \
--eval_batch_size 1 \
--max_source_length 25 \
--max_target_length 25 \
--val_max_target_length 25 \
--test_max_target_length 25 \
--label_smoothing 0.1 \
--dropout 0.1 \
--attention_dropout 0.1 \
--weight_decay 0.001 \
--adam_epsilon 1e-08 \
--max_grad_norm 0.1 \
--lr_scheduler polynomial \
--learning_rate 3e-04 \
--num_train_epochs 1 \
--warmup_steps 4 \
--gradient_accumulation_steps 1 \
--distributed-port 8787 \
--use_dummy_dataset 1 \
--distributed_retriever {distributed_retriever} \
""".split()
if gpus > 0:
testargs.append(f"""--gpus={gpus}""" )
if is_apex_available():
testargs.append("--fp16" )
else:
testargs.append("--gpus=0" )
testargs.append("--distributed_backend=ddp_cpu" )
testargs.append("--num_processes=2" )
UpperCAmelCase_ = [sys.executable, str(Path(finetune_rag.__file__ ).resolve() )] + testargs
execute_subprocess_async(__a , env=self.get_env() )
UpperCAmelCase_ = os.path.join(__a , "metrics.json" )
with open(__a ) as f:
UpperCAmelCase_ = json.load(__a )
return result
@require_torch_gpu
def _lowercase (self : Optional[int] ):
UpperCAmelCase_ = self._run_finetune(gpus=1 )
self.assertGreaterEqual(result["test"][0]["test_avg_em"] , 0.2 )
@require_torch_multi_gpu
def _lowercase (self : Dict ):
UpperCAmelCase_ = self._run_finetune(gpus=2 )
self.assertGreaterEqual(result["test"][0]["test_avg_em"] , 0.2 )
@require_torch_gpu
@require_ray
def _lowercase (self : Optional[int] ):
UpperCAmelCase_ = self._run_finetune(gpus=1 , distributed_retriever="ray" )
self.assertGreaterEqual(result["test"][0]["test_avg_em"] , 0.2 )
@require_torch_multi_gpu
@require_ray
def _lowercase (self : Any ):
UpperCAmelCase_ = self._run_finetune(gpus=1 , distributed_retriever="ray" )
self.assertGreaterEqual(result["test"][0]["test_avg_em"] , 0.2 )
| 1 | 1 |
'''simple docstring'''
from __future__ import annotations
def lowerCAmelCase_ ( snake_case_ : list , snake_case_ : int | None = None , snake_case_ : int | None = None ) -> None:
'''simple docstring'''
if start is None:
UpperCAmelCase_ = 0
if end is None:
UpperCAmelCase_ = len(snake_case_ ) - 1
if start >= end:
return
UpperCAmelCase_ = (start + end) // 2
slowsort(snake_case_ , snake_case_ , snake_case_ )
slowsort(snake_case_ , mid + 1 , snake_case_ )
if sequence[end] < sequence[mid]:
UpperCAmelCase_ , UpperCAmelCase_ = sequence[mid], sequence[end]
slowsort(snake_case_ , snake_case_ , end - 1 )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 1 | '''simple docstring'''
from multiprocessing import Lock, Pipe, Process
# lock used to ensure that two processes do not access a pipe at the same time
SCREAMING_SNAKE_CASE_: Optional[int] =Lock()
def lowerCAmelCase_ ( snake_case_ : Dict , snake_case_ : Dict , snake_case_ : Optional[int] , snake_case_ : Tuple , snake_case_ : Any , snake_case_ : Optional[Any] , snake_case_ : Union[str, Any] ) -> Optional[Any]:
'''simple docstring'''
global process_lock
# we perform n swaps since after n swaps we know we are sorted
# we *could* stop early if we are sorted already, but it takes as long to
# find out we are sorted as it does to sort the list with this algorithm
for i in range(0 , 10 ):
if (i + position) % 2 == 0 and r_send is not None:
# send your value to your right neighbor
process_lock.acquire()
r_send[1].send(snake_case_ )
process_lock.release()
# receive your right neighbor's value
process_lock.acquire()
UpperCAmelCase_ = rr_cv[0].recv()
process_lock.release()
# take the lower value since you are on the left
UpperCAmelCase_ = min(snake_case_ , snake_case_ )
elif (i + position) % 2 != 0 and l_send is not None:
# send your value to your left neighbor
process_lock.acquire()
l_send[1].send(snake_case_ )
process_lock.release()
# receive your left neighbor's value
process_lock.acquire()
UpperCAmelCase_ = lr_cv[0].recv()
process_lock.release()
# take the higher value since you are on the right
UpperCAmelCase_ = max(snake_case_ , snake_case_ )
# after all swaps are performed, send the values back to main
result_pipe[1].send(snake_case_ )
def lowerCAmelCase_ ( snake_case_ : Optional[Any] ) -> Dict:
'''simple docstring'''
UpperCAmelCase_ = []
UpperCAmelCase_ = []
# initialize the list of pipes where the values will be retrieved
for _ in arr:
result_pipe.append(Pipe() )
# creates the processes
# the first and last process only have one neighbor so they are made outside
# of the loop
UpperCAmelCase_ = Pipe()
UpperCAmelCase_ = Pipe()
process_array_.append(
Process(
target=snake_case_ , args=(0, arr[0], None, temp_rs, None, temp_rr, result_pipe[0]) , ) )
UpperCAmelCase_ = temp_rs
UpperCAmelCase_ = temp_rr
for i in range(1 , len(snake_case_ ) - 1 ):
UpperCAmelCase_ = Pipe()
UpperCAmelCase_ = Pipe()
process_array_.append(
Process(
target=snake_case_ , args=(i, arr[i], temp_ls, temp_rs, temp_lr, temp_rr, result_pipe[i]) , ) )
UpperCAmelCase_ = temp_rs
UpperCAmelCase_ = temp_rr
process_array_.append(
Process(
target=snake_case_ , args=(
len(snake_case_ ) - 1,
arr[len(snake_case_ ) - 1],
temp_ls,
None,
temp_lr,
None,
result_pipe[len(snake_case_ ) - 1],
) , ) )
# start the processes
for p in process_array_:
p.start()
# wait for the processes to end and write their values to the list
for p in range(0 , len(snake_case_ ) ):
UpperCAmelCase_ = result_pipe[p][0].recv()
process_array_[p].join()
return arr
def lowerCAmelCase_ ( ) -> str:
'''simple docstring'''
UpperCAmelCase_ = list(range(10 , 0 , -1 ) )
print("Initial List" )
print(*snake_case_ )
UpperCAmelCase_ = odd_even_transposition(snake_case_ )
print("Sorted List\n" )
print(*snake_case_ )
if __name__ == "__main__":
main()
| 1 | 1 |
'''simple docstring'''
import argparse
import json
from collections import OrderedDict
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
ConditionalDetrConfig,
ConditionalDetrForObjectDetection,
ConditionalDetrForSegmentation,
ConditionalDetrImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
SCREAMING_SNAKE_CASE_: Optional[int] =logging.get_logger(__name__)
# here we list all keys to be renamed (original name on the left, our name on the right)
SCREAMING_SNAKE_CASE_: Tuple =[]
for i in range(6):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append(
(f"transformer.encoder.layers.{i}.self_attn.out_proj.weight", f"encoder.layers.{i}.self_attn.out_proj.weight")
)
rename_keys.append(
(f"transformer.encoder.layers.{i}.self_attn.out_proj.bias", f"encoder.layers.{i}.self_attn.out_proj.bias")
)
rename_keys.append((f"transformer.encoder.layers.{i}.linear1.weight", f"encoder.layers.{i}.fc1.weight"))
rename_keys.append((f"transformer.encoder.layers.{i}.linear1.bias", f"encoder.layers.{i}.fc1.bias"))
rename_keys.append((f"transformer.encoder.layers.{i}.linear2.weight", f"encoder.layers.{i}.fc2.weight"))
rename_keys.append((f"transformer.encoder.layers.{i}.linear2.bias", f"encoder.layers.{i}.fc2.bias"))
rename_keys.append(
(f"transformer.encoder.layers.{i}.norm1.weight", f"encoder.layers.{i}.self_attn_layer_norm.weight")
)
rename_keys.append((f"transformer.encoder.layers.{i}.norm1.bias", f"encoder.layers.{i}.self_attn_layer_norm.bias"))
rename_keys.append((f"transformer.encoder.layers.{i}.norm2.weight", f"encoder.layers.{i}.final_layer_norm.weight"))
rename_keys.append((f"transformer.encoder.layers.{i}.norm2.bias", f"encoder.layers.{i}.final_layer_norm.bias"))
# decoder layers: 2 times output projection, 2 feedforward neural networks and 3 layernorms
rename_keys.append(
(f"transformer.decoder.layers.{i}.self_attn.out_proj.weight", f"decoder.layers.{i}.self_attn.out_proj.weight")
)
rename_keys.append(
(f"transformer.decoder.layers.{i}.self_attn.out_proj.bias", f"decoder.layers.{i}.self_attn.out_proj.bias")
)
rename_keys.append(
(
f"transformer.decoder.layers.{i}.cross_attn.out_proj.weight",
f"decoder.layers.{i}.encoder_attn.out_proj.weight",
)
)
rename_keys.append(
(
f"transformer.decoder.layers.{i}.cross_attn.out_proj.bias",
f"decoder.layers.{i}.encoder_attn.out_proj.bias",
)
)
rename_keys.append((f"transformer.decoder.layers.{i}.linear1.weight", f"decoder.layers.{i}.fc1.weight"))
rename_keys.append((f"transformer.decoder.layers.{i}.linear1.bias", f"decoder.layers.{i}.fc1.bias"))
rename_keys.append((f"transformer.decoder.layers.{i}.linear2.weight", f"decoder.layers.{i}.fc2.weight"))
rename_keys.append((f"transformer.decoder.layers.{i}.linear2.bias", f"decoder.layers.{i}.fc2.bias"))
rename_keys.append(
(f"transformer.decoder.layers.{i}.norm1.weight", f"decoder.layers.{i}.self_attn_layer_norm.weight")
)
rename_keys.append((f"transformer.decoder.layers.{i}.norm1.bias", f"decoder.layers.{i}.self_attn_layer_norm.bias"))
rename_keys.append(
(f"transformer.decoder.layers.{i}.norm2.weight", f"decoder.layers.{i}.encoder_attn_layer_norm.weight")
)
rename_keys.append(
(f"transformer.decoder.layers.{i}.norm2.bias", f"decoder.layers.{i}.encoder_attn_layer_norm.bias")
)
rename_keys.append((f"transformer.decoder.layers.{i}.norm3.weight", f"decoder.layers.{i}.final_layer_norm.weight"))
rename_keys.append((f"transformer.decoder.layers.{i}.norm3.bias", f"decoder.layers.{i}.final_layer_norm.bias"))
# q, k, v projections in self/cross-attention in decoder for conditional DETR
rename_keys.append(
(f"transformer.decoder.layers.{i}.sa_qcontent_proj.weight", f"decoder.layers.{i}.sa_qcontent_proj.weight")
)
rename_keys.append(
(f"transformer.decoder.layers.{i}.sa_kcontent_proj.weight", f"decoder.layers.{i}.sa_kcontent_proj.weight")
)
rename_keys.append(
(f"transformer.decoder.layers.{i}.sa_qpos_proj.weight", f"decoder.layers.{i}.sa_qpos_proj.weight")
)
rename_keys.append(
(f"transformer.decoder.layers.{i}.sa_kpos_proj.weight", f"decoder.layers.{i}.sa_kpos_proj.weight")
)
rename_keys.append((f"transformer.decoder.layers.{i}.sa_v_proj.weight", f"decoder.layers.{i}.sa_v_proj.weight"))
rename_keys.append(
(f"transformer.decoder.layers.{i}.ca_qcontent_proj.weight", f"decoder.layers.{i}.ca_qcontent_proj.weight")
)
# rename_keys.append((f"transformer.decoder.layers.{i}.ca_qpos_proj.weight", f"decoder.layers.{i}.ca_qpos_proj.weight"))
rename_keys.append(
(f"transformer.decoder.layers.{i}.ca_kcontent_proj.weight", f"decoder.layers.{i}.ca_kcontent_proj.weight")
)
rename_keys.append(
(f"transformer.decoder.layers.{i}.ca_kpos_proj.weight", f"decoder.layers.{i}.ca_kpos_proj.weight")
)
rename_keys.append((f"transformer.decoder.layers.{i}.ca_v_proj.weight", f"decoder.layers.{i}.ca_v_proj.weight"))
rename_keys.append(
(f"transformer.decoder.layers.{i}.ca_qpos_sine_proj.weight", f"decoder.layers.{i}.ca_qpos_sine_proj.weight")
)
rename_keys.append(
(f"transformer.decoder.layers.{i}.sa_qcontent_proj.bias", f"decoder.layers.{i}.sa_qcontent_proj.bias")
)
rename_keys.append(
(f"transformer.decoder.layers.{i}.sa_kcontent_proj.bias", f"decoder.layers.{i}.sa_kcontent_proj.bias")
)
rename_keys.append((f"transformer.decoder.layers.{i}.sa_qpos_proj.bias", f"decoder.layers.{i}.sa_qpos_proj.bias"))
rename_keys.append((f"transformer.decoder.layers.{i}.sa_kpos_proj.bias", f"decoder.layers.{i}.sa_kpos_proj.bias"))
rename_keys.append((f"transformer.decoder.layers.{i}.sa_v_proj.bias", f"decoder.layers.{i}.sa_v_proj.bias"))
rename_keys.append(
(f"transformer.decoder.layers.{i}.ca_qcontent_proj.bias", f"decoder.layers.{i}.ca_qcontent_proj.bias")
)
# rename_keys.append((f"transformer.decoder.layers.{i}.ca_qpos_proj.bias", f"decoder.layers.{i}.ca_qpos_proj.bias"))
rename_keys.append(
(f"transformer.decoder.layers.{i}.ca_kcontent_proj.bias", f"decoder.layers.{i}.ca_kcontent_proj.bias")
)
rename_keys.append((f"transformer.decoder.layers.{i}.ca_kpos_proj.bias", f"decoder.layers.{i}.ca_kpos_proj.bias"))
rename_keys.append((f"transformer.decoder.layers.{i}.ca_v_proj.bias", f"decoder.layers.{i}.ca_v_proj.bias"))
rename_keys.append(
(f"transformer.decoder.layers.{i}.ca_qpos_sine_proj.bias", f"decoder.layers.{i}.ca_qpos_sine_proj.bias")
)
# convolutional projection + query embeddings + layernorm of decoder + class and bounding box heads
# for conditional DETR, also convert reference point head and query scale MLP
rename_keys.extend(
[
('input_proj.weight', 'input_projection.weight'),
('input_proj.bias', 'input_projection.bias'),
('query_embed.weight', 'query_position_embeddings.weight'),
('transformer.decoder.norm.weight', 'decoder.layernorm.weight'),
('transformer.decoder.norm.bias', 'decoder.layernorm.bias'),
('class_embed.weight', 'class_labels_classifier.weight'),
('class_embed.bias', 'class_labels_classifier.bias'),
('bbox_embed.layers.0.weight', 'bbox_predictor.layers.0.weight'),
('bbox_embed.layers.0.bias', 'bbox_predictor.layers.0.bias'),
('bbox_embed.layers.1.weight', 'bbox_predictor.layers.1.weight'),
('bbox_embed.layers.1.bias', 'bbox_predictor.layers.1.bias'),
('bbox_embed.layers.2.weight', 'bbox_predictor.layers.2.weight'),
('bbox_embed.layers.2.bias', 'bbox_predictor.layers.2.bias'),
('transformer.decoder.ref_point_head.layers.0.weight', 'decoder.ref_point_head.layers.0.weight'),
('transformer.decoder.ref_point_head.layers.0.bias', 'decoder.ref_point_head.layers.0.bias'),
('transformer.decoder.ref_point_head.layers.1.weight', 'decoder.ref_point_head.layers.1.weight'),
('transformer.decoder.ref_point_head.layers.1.bias', 'decoder.ref_point_head.layers.1.bias'),
('transformer.decoder.query_scale.layers.0.weight', 'decoder.query_scale.layers.0.weight'),
('transformer.decoder.query_scale.layers.0.bias', 'decoder.query_scale.layers.0.bias'),
('transformer.decoder.query_scale.layers.1.weight', 'decoder.query_scale.layers.1.weight'),
('transformer.decoder.query_scale.layers.1.bias', 'decoder.query_scale.layers.1.bias'),
('transformer.decoder.layers.0.ca_qpos_proj.weight', 'decoder.layers.0.ca_qpos_proj.weight'),
('transformer.decoder.layers.0.ca_qpos_proj.bias', 'decoder.layers.0.ca_qpos_proj.bias'),
]
)
def lowerCAmelCase_ ( snake_case_ : Dict , snake_case_ : Any , snake_case_ : Optional[int] ) -> Dict:
'''simple docstring'''
UpperCAmelCase_ = state_dict.pop(snake_case_ )
UpperCAmelCase_ = val
def lowerCAmelCase_ ( snake_case_ : int ) -> List[Any]:
'''simple docstring'''
UpperCAmelCase_ = OrderedDict()
for key, value in state_dict.items():
if "backbone.0.body" in key:
UpperCAmelCase_ = key.replace("backbone.0.body" , "backbone.conv_encoder.model" )
UpperCAmelCase_ = value
else:
UpperCAmelCase_ = value
return new_state_dict
def lowerCAmelCase_ ( snake_case_ : List[Any] , snake_case_ : Dict=False ) -> List[str]:
'''simple docstring'''
UpperCAmelCase_ = ""
if is_panoptic:
UpperCAmelCase_ = "conditional_detr."
# first: transformer encoder
for i in range(6 ):
# read in weights + bias of input projection layer (in PyTorch's MultiHeadAttention, this is a single matrix + bias)
UpperCAmelCase_ = state_dict.pop(f"""{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_weight""" )
UpperCAmelCase_ = state_dict.pop(f"""{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_bias""" )
# next, add query, keys and values (in that order) to the state dict
UpperCAmelCase_ = in_proj_weight[:2_56, :]
UpperCAmelCase_ = in_proj_bias[:2_56]
UpperCAmelCase_ = in_proj_weight[2_56:5_12, :]
UpperCAmelCase_ = in_proj_bias[2_56:5_12]
UpperCAmelCase_ = in_proj_weight[-2_56:, :]
UpperCAmelCase_ = in_proj_bias[-2_56:]
def lowerCAmelCase_ ( ) -> Dict:
'''simple docstring'''
UpperCAmelCase_ = "http://images.cocodataset.org/val2017/000000039769.jpg"
UpperCAmelCase_ = Image.open(requests.get(snake_case_ , stream=snake_case_ ).raw )
return im
@torch.no_grad()
def lowerCAmelCase_ ( snake_case_ : Optional[int] , snake_case_ : Dict ) -> Optional[int]:
'''simple docstring'''
UpperCAmelCase_ = ConditionalDetrConfig()
# set backbone and dilation attributes
if "resnet101" in model_name:
UpperCAmelCase_ = "resnet101"
if "dc5" in model_name:
UpperCAmelCase_ = True
UpperCAmelCase_ = "panoptic" in model_name
if is_panoptic:
UpperCAmelCase_ = 2_50
else:
UpperCAmelCase_ = 91
UpperCAmelCase_ = "huggingface/label-files"
UpperCAmelCase_ = "coco-detection-id2label.json"
UpperCAmelCase_ = json.load(open(hf_hub_download(snake_case_ , snake_case_ , repo_type="dataset" ) , "r" ) )
UpperCAmelCase_ = {int(snake_case_ ): v for k, v in idalabel.items()}
UpperCAmelCase_ = idalabel
UpperCAmelCase_ = {v: k for k, v in idalabel.items()}
# load image processor
UpperCAmelCase_ = "coco_panoptic" if is_panoptic else "coco_detection"
UpperCAmelCase_ = ConditionalDetrImageProcessor(format=snake_case_ )
# prepare image
UpperCAmelCase_ = prepare_img()
UpperCAmelCase_ = image_processor(images=snake_case_ , return_tensors="pt" )
UpperCAmelCase_ = encoding["pixel_values"]
logger.info(f"""Converting model {model_name}...""" )
# load original model from torch hub
UpperCAmelCase_ = torch.hub.load("DeppMeng/ConditionalDETR" , snake_case_ , pretrained=snake_case_ ).eval()
UpperCAmelCase_ = conditional_detr.state_dict()
# rename keys
for src, dest in rename_keys:
if is_panoptic:
UpperCAmelCase_ = "conditional_detr." + src
rename_key(snake_case_ , snake_case_ , snake_case_ )
UpperCAmelCase_ = rename_backbone_keys(snake_case_ )
# query, key and value matrices need special treatment
read_in_q_k_v(snake_case_ , is_panoptic=snake_case_ )
# important: we need to prepend a prefix to each of the base model keys as the head models use different attributes for them
UpperCAmelCase_ = "conditional_detr.model." if is_panoptic else "model."
for key in state_dict.copy().keys():
if is_panoptic:
if (
key.startswith("conditional_detr" )
and not key.startswith("class_labels_classifier" )
and not key.startswith("bbox_predictor" )
):
UpperCAmelCase_ = state_dict.pop(snake_case_ )
UpperCAmelCase_ = val
elif "class_labels_classifier" in key or "bbox_predictor" in key:
UpperCAmelCase_ = state_dict.pop(snake_case_ )
UpperCAmelCase_ = val
elif key.startswith("bbox_attention" ) or key.startswith("mask_head" ):
continue
else:
UpperCAmelCase_ = state_dict.pop(snake_case_ )
UpperCAmelCase_ = val
else:
if not key.startswith("class_labels_classifier" ) and not key.startswith("bbox_predictor" ):
UpperCAmelCase_ = state_dict.pop(snake_case_ )
UpperCAmelCase_ = val
# finally, create HuggingFace model and load state dict
UpperCAmelCase_ = ConditionalDetrForSegmentation(snake_case_ ) if is_panoptic else ConditionalDetrForObjectDetection(snake_case_ )
model.load_state_dict(snake_case_ )
model.eval()
model.push_to_hub(repo_id=snake_case_ , organization="DepuMeng" , commit_message="Add model" )
# verify our conversion
UpperCAmelCase_ = conditional_detr(snake_case_ )
UpperCAmelCase_ = model(snake_case_ )
assert torch.allclose(outputs.logits , original_outputs["pred_logits"] , atol=1E-4 )
assert torch.allclose(outputs.pred_boxes , original_outputs["pred_boxes"] , atol=1E-4 )
if is_panoptic:
assert torch.allclose(outputs.pred_masks , original_outputs["pred_masks"] , atol=1E-4 )
# Save model and image processor
logger.info(f"""Saving PyTorch model and image processor to {pytorch_dump_folder_path}...""" )
Path(snake_case_ ).mkdir(exist_ok=snake_case_ )
model.save_pretrained(snake_case_ )
image_processor.save_pretrained(snake_case_ )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE_: List[str] =argparse.ArgumentParser()
parser.add_argument(
'--model_name',
default='conditional_detr_resnet50',
type=str,
help='Name of the CONDITIONAL_DETR model you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the folder to output PyTorch model.'
)
SCREAMING_SNAKE_CASE_: int =parser.parse_args()
convert_conditional_detr_checkpoint(args.model_name, args.pytorch_dump_folder_path)
| 1 | '''simple docstring'''
def lowerCAmelCase_ ( snake_case_ : int , snake_case_ : int ) -> str:
'''simple docstring'''
if a < 0 or b < 0:
raise ValueError("the value of both inputs must be positive" )
UpperCAmelCase_ = str(bin(snake_case_ ) )[2:] # remove the leading "0b"
UpperCAmelCase_ = str(bin(snake_case_ ) )[2:]
UpperCAmelCase_ = max(len(snake_case_ ) , len(snake_case_ ) )
return "0b" + "".join(
str(int("1" in (char_a, char_b) ) )
for char_a, char_b in zip(a_binary.zfill(snake_case_ ) , b_binary.zfill(snake_case_ ) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 1 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
SCREAMING_SNAKE_CASE_: Dict ={
'configuration_rag': ['RagConfig'],
'retrieval_rag': ['RagRetriever'],
'tokenization_rag': ['RagTokenizer'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE_: Optional[Any] =[
'RagModel',
'RagPreTrainedModel',
'RagSequenceForGeneration',
'RagTokenForGeneration',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE_: List[Any] =[
'TFRagModel',
'TFRagPreTrainedModel',
'TFRagSequenceForGeneration',
'TFRagTokenForGeneration',
]
if TYPE_CHECKING:
from .configuration_rag import RagConfig
from .retrieval_rag import RagRetriever
from .tokenization_rag import RagTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_rag import RagModel, RagPreTrainedModel, RagSequenceForGeneration, RagTokenForGeneration
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_rag import (
TFRagModel,
TFRagPreTrainedModel,
TFRagSequenceForGeneration,
TFRagTokenForGeneration,
)
else:
import sys
SCREAMING_SNAKE_CASE_: Dict =_LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 1 | '''simple docstring'''
from __future__ import annotations
def lowerCAmelCase_ ( snake_case_ : list , snake_case_ : int | None = None , snake_case_ : int | None = None ) -> None:
'''simple docstring'''
if start is None:
UpperCAmelCase_ = 0
if end is None:
UpperCAmelCase_ = len(snake_case_ ) - 1
if start >= end:
return
UpperCAmelCase_ = (start + end) // 2
slowsort(snake_case_ , snake_case_ , snake_case_ )
slowsort(snake_case_ , mid + 1 , snake_case_ )
if sequence[end] < sequence[mid]:
UpperCAmelCase_ , UpperCAmelCase_ = sequence[mid], sequence[end]
slowsort(snake_case_ , snake_case_ , end - 1 )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 1 | 1 |
'''simple docstring'''
from __future__ import annotations
import queue
class __A :
def __init__(self : Optional[Any] , __a : str ):
UpperCAmelCase_ = data
UpperCAmelCase_ = None
UpperCAmelCase_ = None
def lowerCAmelCase_ ( ) -> TreeNode:
'''simple docstring'''
print("\n********Press N to stop entering at any point of time********\n" )
UpperCAmelCase_ = input("Enter the value of the root node: " ).strip().lower()
UpperCAmelCase_ = queue.Queue()
UpperCAmelCase_ = TreeNode(int(snake_case_ ) )
q.put(snake_case_ )
while not q.empty():
UpperCAmelCase_ = q.get()
UpperCAmelCase_ = f"""Enter the left node of {node_found.data}: """
UpperCAmelCase_ = input(snake_case_ ).strip().lower() or "n"
if check == "n":
return tree_node
UpperCAmelCase_ = TreeNode(int(snake_case_ ) )
UpperCAmelCase_ = left_node
q.put(snake_case_ )
UpperCAmelCase_ = f"""Enter the right node of {node_found.data}: """
UpperCAmelCase_ = input(snake_case_ ).strip().lower() or "n"
if check == "n":
return tree_node
UpperCAmelCase_ = TreeNode(int(snake_case_ ) )
UpperCAmelCase_ = right_node
q.put(snake_case_ )
raise
def lowerCAmelCase_ ( snake_case_ : TreeNode ) -> None:
'''simple docstring'''
if not isinstance(snake_case_ , snake_case_ ) or not node:
return
print(node.data , end="," )
pre_order(node.left )
pre_order(node.right )
def lowerCAmelCase_ ( snake_case_ : TreeNode ) -> None:
'''simple docstring'''
if not isinstance(snake_case_ , snake_case_ ) or not node:
return
in_order(node.left )
print(node.data , end="," )
in_order(node.right )
def lowerCAmelCase_ ( snake_case_ : TreeNode ) -> None:
'''simple docstring'''
if not isinstance(snake_case_ , snake_case_ ) or not node:
return
post_order(node.left )
post_order(node.right )
print(node.data , end="," )
def lowerCAmelCase_ ( snake_case_ : TreeNode ) -> None:
'''simple docstring'''
if not isinstance(snake_case_ , snake_case_ ) or not node:
return
UpperCAmelCase_ = queue.Queue()
q.put(snake_case_ )
while not q.empty():
UpperCAmelCase_ = q.get()
print(node_dequeued.data , end="," )
if node_dequeued.left:
q.put(node_dequeued.left )
if node_dequeued.right:
q.put(node_dequeued.right )
def lowerCAmelCase_ ( snake_case_ : TreeNode ) -> None:
'''simple docstring'''
if not isinstance(snake_case_ , snake_case_ ) or not node:
return
UpperCAmelCase_ = queue.Queue()
q.put(snake_case_ )
while not q.empty():
UpperCAmelCase_ = []
while not q.empty():
UpperCAmelCase_ = q.get()
print(node_dequeued.data , end="," )
if node_dequeued.left:
list_.append(node_dequeued.left )
if node_dequeued.right:
list_.append(node_dequeued.right )
print()
for node in list_:
q.put(snake_case_ )
def lowerCAmelCase_ ( snake_case_ : TreeNode ) -> None:
'''simple docstring'''
if not isinstance(snake_case_ , snake_case_ ) or not node:
return
UpperCAmelCase_ = []
UpperCAmelCase_ = node
while n or stack:
while n: # start from root node, find its left child
print(n.data , end="," )
stack.append(snake_case_ )
UpperCAmelCase_ = n.left
# end of while means current node doesn't have left child
UpperCAmelCase_ = stack.pop()
# start to traverse its right child
UpperCAmelCase_ = n.right
def lowerCAmelCase_ ( snake_case_ : TreeNode ) -> None:
'''simple docstring'''
if not isinstance(snake_case_ , snake_case_ ) or not node:
return
UpperCAmelCase_ = []
UpperCAmelCase_ = node
while n or stack:
while n:
stack.append(snake_case_ )
UpperCAmelCase_ = n.left
UpperCAmelCase_ = stack.pop()
print(n.data , end="," )
UpperCAmelCase_ = n.right
def lowerCAmelCase_ ( snake_case_ : TreeNode ) -> None:
'''simple docstring'''
if not isinstance(snake_case_ , snake_case_ ) or not node:
return
UpperCAmelCase_ , UpperCAmelCase_ = [], []
UpperCAmelCase_ = node
stacka.append(snake_case_ )
while stacka: # to find the reversed order of post order, store it in stack2
UpperCAmelCase_ = stacka.pop()
if n.left:
stacka.append(n.left )
if n.right:
stacka.append(n.right )
stacka.append(snake_case_ )
while stacka: # pop up from stack2 will be the post order
print(stacka.pop().data , end="," )
def lowerCAmelCase_ ( snake_case_ : str = "" , snake_case_ : Any=50 , snake_case_ : Union[str, Any]="*" ) -> str:
'''simple docstring'''
if not s:
return "\n" + width * char
UpperCAmelCase_ , UpperCAmelCase_ = divmod(width - len(snake_case_ ) - 2 , 2 )
return f"""{left * char} {s} {(left + extra) * char}"""
if __name__ == "__main__":
import doctest
doctest.testmod()
print(prompt('Binary Tree Traversals'))
SCREAMING_SNAKE_CASE_: TreeNode =build_tree()
print(prompt('Pre Order Traversal'))
pre_order(node)
print(prompt() + '\n')
print(prompt('In Order Traversal'))
in_order(node)
print(prompt() + '\n')
print(prompt('Post Order Traversal'))
post_order(node)
print(prompt() + '\n')
print(prompt('Level Order Traversal'))
level_order(node)
print(prompt() + '\n')
print(prompt('Actual Level Order Traversal'))
level_order_actual(node)
print('*' * 50 + '\n')
print(prompt('Pre Order Traversal - Iteration Version'))
pre_order_iter(node)
print(prompt() + '\n')
print(prompt('In Order Traversal - Iteration Version'))
in_order_iter(node)
print(prompt() + '\n')
print(prompt('Post Order Traversal - Iteration Version'))
post_order_iter(node)
print(prompt())
| 1 | '''simple docstring'''
from transformers import DistilBertTokenizer, DistilBertTokenizerFast
from transformers.testing_utils import require_tokenizers, slow
from ..bert.test_tokenization_bert import BertTokenizationTest
@require_tokenizers
class __A ( UpperCamelCase__ ):
a__ : Optional[Any] = DistilBertTokenizer
a__ : Any = DistilBertTokenizerFast
a__ : str = True
@slow
def _lowercase (self : int ):
UpperCAmelCase_ = DistilBertTokenizer.from_pretrained("distilbert-base-uncased" )
UpperCAmelCase_ = tokenizer.encode("sequence builders" , add_special_tokens=__a )
UpperCAmelCase_ = tokenizer.encode("multi-sequence build" , add_special_tokens=__a )
UpperCAmelCase_ = tokenizer.build_inputs_with_special_tokens(__a )
UpperCAmelCase_ = tokenizer.build_inputs_with_special_tokens(__a , __a )
assert encoded_sentence == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id]
assert encoded_pair == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] + text_a + [
tokenizer.sep_token_id
]
| 1 | 1 |
'''simple docstring'''
from __future__ import annotations
def lowerCAmelCase_ ( snake_case_ : list , snake_case_ : int ) -> Optional[int]:
'''simple docstring'''
if len(snake_case_ ) <= 1 or n <= 1:
return
insert_next(snake_case_ , n - 1 )
rec_insertion_sort(snake_case_ , n - 1 )
def lowerCAmelCase_ ( snake_case_ : list , snake_case_ : int ) -> List[str]:
'''simple docstring'''
if index >= len(snake_case_ ) or collection[index - 1] <= collection[index]:
return
# Swaps adjacent elements since they are not in ascending order
UpperCAmelCase_ , UpperCAmelCase_ = (
collection[index],
collection[index - 1],
)
insert_next(snake_case_ , index + 1 )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE_: Optional[int] =input('Enter integers separated by spaces: ')
SCREAMING_SNAKE_CASE_: list[int] =[int(num) for num in numbers.split()]
rec_insertion_sort(number_list, len(number_list))
print(number_list)
| 1 | '''simple docstring'''
import argparse
import json
from collections import OrderedDict
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
ConditionalDetrConfig,
ConditionalDetrForObjectDetection,
ConditionalDetrForSegmentation,
ConditionalDetrImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
SCREAMING_SNAKE_CASE_: Optional[int] =logging.get_logger(__name__)
# here we list all keys to be renamed (original name on the left, our name on the right)
SCREAMING_SNAKE_CASE_: Tuple =[]
for i in range(6):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append(
(f"transformer.encoder.layers.{i}.self_attn.out_proj.weight", f"encoder.layers.{i}.self_attn.out_proj.weight")
)
rename_keys.append(
(f"transformer.encoder.layers.{i}.self_attn.out_proj.bias", f"encoder.layers.{i}.self_attn.out_proj.bias")
)
rename_keys.append((f"transformer.encoder.layers.{i}.linear1.weight", f"encoder.layers.{i}.fc1.weight"))
rename_keys.append((f"transformer.encoder.layers.{i}.linear1.bias", f"encoder.layers.{i}.fc1.bias"))
rename_keys.append((f"transformer.encoder.layers.{i}.linear2.weight", f"encoder.layers.{i}.fc2.weight"))
rename_keys.append((f"transformer.encoder.layers.{i}.linear2.bias", f"encoder.layers.{i}.fc2.bias"))
rename_keys.append(
(f"transformer.encoder.layers.{i}.norm1.weight", f"encoder.layers.{i}.self_attn_layer_norm.weight")
)
rename_keys.append((f"transformer.encoder.layers.{i}.norm1.bias", f"encoder.layers.{i}.self_attn_layer_norm.bias"))
rename_keys.append((f"transformer.encoder.layers.{i}.norm2.weight", f"encoder.layers.{i}.final_layer_norm.weight"))
rename_keys.append((f"transformer.encoder.layers.{i}.norm2.bias", f"encoder.layers.{i}.final_layer_norm.bias"))
# decoder layers: 2 times output projection, 2 feedforward neural networks and 3 layernorms
rename_keys.append(
(f"transformer.decoder.layers.{i}.self_attn.out_proj.weight", f"decoder.layers.{i}.self_attn.out_proj.weight")
)
rename_keys.append(
(f"transformer.decoder.layers.{i}.self_attn.out_proj.bias", f"decoder.layers.{i}.self_attn.out_proj.bias")
)
rename_keys.append(
(
f"transformer.decoder.layers.{i}.cross_attn.out_proj.weight",
f"decoder.layers.{i}.encoder_attn.out_proj.weight",
)
)
rename_keys.append(
(
f"transformer.decoder.layers.{i}.cross_attn.out_proj.bias",
f"decoder.layers.{i}.encoder_attn.out_proj.bias",
)
)
rename_keys.append((f"transformer.decoder.layers.{i}.linear1.weight", f"decoder.layers.{i}.fc1.weight"))
rename_keys.append((f"transformer.decoder.layers.{i}.linear1.bias", f"decoder.layers.{i}.fc1.bias"))
rename_keys.append((f"transformer.decoder.layers.{i}.linear2.weight", f"decoder.layers.{i}.fc2.weight"))
rename_keys.append((f"transformer.decoder.layers.{i}.linear2.bias", f"decoder.layers.{i}.fc2.bias"))
rename_keys.append(
(f"transformer.decoder.layers.{i}.norm1.weight", f"decoder.layers.{i}.self_attn_layer_norm.weight")
)
rename_keys.append((f"transformer.decoder.layers.{i}.norm1.bias", f"decoder.layers.{i}.self_attn_layer_norm.bias"))
rename_keys.append(
(f"transformer.decoder.layers.{i}.norm2.weight", f"decoder.layers.{i}.encoder_attn_layer_norm.weight")
)
rename_keys.append(
(f"transformer.decoder.layers.{i}.norm2.bias", f"decoder.layers.{i}.encoder_attn_layer_norm.bias")
)
rename_keys.append((f"transformer.decoder.layers.{i}.norm3.weight", f"decoder.layers.{i}.final_layer_norm.weight"))
rename_keys.append((f"transformer.decoder.layers.{i}.norm3.bias", f"decoder.layers.{i}.final_layer_norm.bias"))
# q, k, v projections in self/cross-attention in decoder for conditional DETR
rename_keys.append(
(f"transformer.decoder.layers.{i}.sa_qcontent_proj.weight", f"decoder.layers.{i}.sa_qcontent_proj.weight")
)
rename_keys.append(
(f"transformer.decoder.layers.{i}.sa_kcontent_proj.weight", f"decoder.layers.{i}.sa_kcontent_proj.weight")
)
rename_keys.append(
(f"transformer.decoder.layers.{i}.sa_qpos_proj.weight", f"decoder.layers.{i}.sa_qpos_proj.weight")
)
rename_keys.append(
(f"transformer.decoder.layers.{i}.sa_kpos_proj.weight", f"decoder.layers.{i}.sa_kpos_proj.weight")
)
rename_keys.append((f"transformer.decoder.layers.{i}.sa_v_proj.weight", f"decoder.layers.{i}.sa_v_proj.weight"))
rename_keys.append(
(f"transformer.decoder.layers.{i}.ca_qcontent_proj.weight", f"decoder.layers.{i}.ca_qcontent_proj.weight")
)
# rename_keys.append((f"transformer.decoder.layers.{i}.ca_qpos_proj.weight", f"decoder.layers.{i}.ca_qpos_proj.weight"))
rename_keys.append(
(f"transformer.decoder.layers.{i}.ca_kcontent_proj.weight", f"decoder.layers.{i}.ca_kcontent_proj.weight")
)
rename_keys.append(
(f"transformer.decoder.layers.{i}.ca_kpos_proj.weight", f"decoder.layers.{i}.ca_kpos_proj.weight")
)
rename_keys.append((f"transformer.decoder.layers.{i}.ca_v_proj.weight", f"decoder.layers.{i}.ca_v_proj.weight"))
rename_keys.append(
(f"transformer.decoder.layers.{i}.ca_qpos_sine_proj.weight", f"decoder.layers.{i}.ca_qpos_sine_proj.weight")
)
rename_keys.append(
(f"transformer.decoder.layers.{i}.sa_qcontent_proj.bias", f"decoder.layers.{i}.sa_qcontent_proj.bias")
)
rename_keys.append(
(f"transformer.decoder.layers.{i}.sa_kcontent_proj.bias", f"decoder.layers.{i}.sa_kcontent_proj.bias")
)
rename_keys.append((f"transformer.decoder.layers.{i}.sa_qpos_proj.bias", f"decoder.layers.{i}.sa_qpos_proj.bias"))
rename_keys.append((f"transformer.decoder.layers.{i}.sa_kpos_proj.bias", f"decoder.layers.{i}.sa_kpos_proj.bias"))
rename_keys.append((f"transformer.decoder.layers.{i}.sa_v_proj.bias", f"decoder.layers.{i}.sa_v_proj.bias"))
rename_keys.append(
(f"transformer.decoder.layers.{i}.ca_qcontent_proj.bias", f"decoder.layers.{i}.ca_qcontent_proj.bias")
)
# rename_keys.append((f"transformer.decoder.layers.{i}.ca_qpos_proj.bias", f"decoder.layers.{i}.ca_qpos_proj.bias"))
rename_keys.append(
(f"transformer.decoder.layers.{i}.ca_kcontent_proj.bias", f"decoder.layers.{i}.ca_kcontent_proj.bias")
)
rename_keys.append((f"transformer.decoder.layers.{i}.ca_kpos_proj.bias", f"decoder.layers.{i}.ca_kpos_proj.bias"))
rename_keys.append((f"transformer.decoder.layers.{i}.ca_v_proj.bias", f"decoder.layers.{i}.ca_v_proj.bias"))
rename_keys.append(
(f"transformer.decoder.layers.{i}.ca_qpos_sine_proj.bias", f"decoder.layers.{i}.ca_qpos_sine_proj.bias")
)
# convolutional projection + query embeddings + layernorm of decoder + class and bounding box heads
# for conditional DETR, also convert reference point head and query scale MLP
rename_keys.extend(
[
('input_proj.weight', 'input_projection.weight'),
('input_proj.bias', 'input_projection.bias'),
('query_embed.weight', 'query_position_embeddings.weight'),
('transformer.decoder.norm.weight', 'decoder.layernorm.weight'),
('transformer.decoder.norm.bias', 'decoder.layernorm.bias'),
('class_embed.weight', 'class_labels_classifier.weight'),
('class_embed.bias', 'class_labels_classifier.bias'),
('bbox_embed.layers.0.weight', 'bbox_predictor.layers.0.weight'),
('bbox_embed.layers.0.bias', 'bbox_predictor.layers.0.bias'),
('bbox_embed.layers.1.weight', 'bbox_predictor.layers.1.weight'),
('bbox_embed.layers.1.bias', 'bbox_predictor.layers.1.bias'),
('bbox_embed.layers.2.weight', 'bbox_predictor.layers.2.weight'),
('bbox_embed.layers.2.bias', 'bbox_predictor.layers.2.bias'),
('transformer.decoder.ref_point_head.layers.0.weight', 'decoder.ref_point_head.layers.0.weight'),
('transformer.decoder.ref_point_head.layers.0.bias', 'decoder.ref_point_head.layers.0.bias'),
('transformer.decoder.ref_point_head.layers.1.weight', 'decoder.ref_point_head.layers.1.weight'),
('transformer.decoder.ref_point_head.layers.1.bias', 'decoder.ref_point_head.layers.1.bias'),
('transformer.decoder.query_scale.layers.0.weight', 'decoder.query_scale.layers.0.weight'),
('transformer.decoder.query_scale.layers.0.bias', 'decoder.query_scale.layers.0.bias'),
('transformer.decoder.query_scale.layers.1.weight', 'decoder.query_scale.layers.1.weight'),
('transformer.decoder.query_scale.layers.1.bias', 'decoder.query_scale.layers.1.bias'),
('transformer.decoder.layers.0.ca_qpos_proj.weight', 'decoder.layers.0.ca_qpos_proj.weight'),
('transformer.decoder.layers.0.ca_qpos_proj.bias', 'decoder.layers.0.ca_qpos_proj.bias'),
]
)
def lowerCAmelCase_ ( snake_case_ : Dict , snake_case_ : Any , snake_case_ : Optional[int] ) -> Dict:
'''simple docstring'''
UpperCAmelCase_ = state_dict.pop(snake_case_ )
UpperCAmelCase_ = val
def lowerCAmelCase_ ( snake_case_ : int ) -> List[Any]:
'''simple docstring'''
UpperCAmelCase_ = OrderedDict()
for key, value in state_dict.items():
if "backbone.0.body" in key:
UpperCAmelCase_ = key.replace("backbone.0.body" , "backbone.conv_encoder.model" )
UpperCAmelCase_ = value
else:
UpperCAmelCase_ = value
return new_state_dict
def lowerCAmelCase_ ( snake_case_ : List[Any] , snake_case_ : Dict=False ) -> List[str]:
'''simple docstring'''
UpperCAmelCase_ = ""
if is_panoptic:
UpperCAmelCase_ = "conditional_detr."
# first: transformer encoder
for i in range(6 ):
# read in weights + bias of input projection layer (in PyTorch's MultiHeadAttention, this is a single matrix + bias)
UpperCAmelCase_ = state_dict.pop(f"""{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_weight""" )
UpperCAmelCase_ = state_dict.pop(f"""{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_bias""" )
# next, add query, keys and values (in that order) to the state dict
UpperCAmelCase_ = in_proj_weight[:2_56, :]
UpperCAmelCase_ = in_proj_bias[:2_56]
UpperCAmelCase_ = in_proj_weight[2_56:5_12, :]
UpperCAmelCase_ = in_proj_bias[2_56:5_12]
UpperCAmelCase_ = in_proj_weight[-2_56:, :]
UpperCAmelCase_ = in_proj_bias[-2_56:]
def lowerCAmelCase_ ( ) -> Dict:
'''simple docstring'''
UpperCAmelCase_ = "http://images.cocodataset.org/val2017/000000039769.jpg"
UpperCAmelCase_ = Image.open(requests.get(snake_case_ , stream=snake_case_ ).raw )
return im
@torch.no_grad()
def lowerCAmelCase_ ( snake_case_ : Optional[int] , snake_case_ : Dict ) -> Optional[int]:
'''simple docstring'''
UpperCAmelCase_ = ConditionalDetrConfig()
# set backbone and dilation attributes
if "resnet101" in model_name:
UpperCAmelCase_ = "resnet101"
if "dc5" in model_name:
UpperCAmelCase_ = True
UpperCAmelCase_ = "panoptic" in model_name
if is_panoptic:
UpperCAmelCase_ = 2_50
else:
UpperCAmelCase_ = 91
UpperCAmelCase_ = "huggingface/label-files"
UpperCAmelCase_ = "coco-detection-id2label.json"
UpperCAmelCase_ = json.load(open(hf_hub_download(snake_case_ , snake_case_ , repo_type="dataset" ) , "r" ) )
UpperCAmelCase_ = {int(snake_case_ ): v for k, v in idalabel.items()}
UpperCAmelCase_ = idalabel
UpperCAmelCase_ = {v: k for k, v in idalabel.items()}
# load image processor
UpperCAmelCase_ = "coco_panoptic" if is_panoptic else "coco_detection"
UpperCAmelCase_ = ConditionalDetrImageProcessor(format=snake_case_ )
# prepare image
UpperCAmelCase_ = prepare_img()
UpperCAmelCase_ = image_processor(images=snake_case_ , return_tensors="pt" )
UpperCAmelCase_ = encoding["pixel_values"]
logger.info(f"""Converting model {model_name}...""" )
# load original model from torch hub
UpperCAmelCase_ = torch.hub.load("DeppMeng/ConditionalDETR" , snake_case_ , pretrained=snake_case_ ).eval()
UpperCAmelCase_ = conditional_detr.state_dict()
# rename keys
for src, dest in rename_keys:
if is_panoptic:
UpperCAmelCase_ = "conditional_detr." + src
rename_key(snake_case_ , snake_case_ , snake_case_ )
UpperCAmelCase_ = rename_backbone_keys(snake_case_ )
# query, key and value matrices need special treatment
read_in_q_k_v(snake_case_ , is_panoptic=snake_case_ )
# important: we need to prepend a prefix to each of the base model keys as the head models use different attributes for them
UpperCAmelCase_ = "conditional_detr.model." if is_panoptic else "model."
for key in state_dict.copy().keys():
if is_panoptic:
if (
key.startswith("conditional_detr" )
and not key.startswith("class_labels_classifier" )
and not key.startswith("bbox_predictor" )
):
UpperCAmelCase_ = state_dict.pop(snake_case_ )
UpperCAmelCase_ = val
elif "class_labels_classifier" in key or "bbox_predictor" in key:
UpperCAmelCase_ = state_dict.pop(snake_case_ )
UpperCAmelCase_ = val
elif key.startswith("bbox_attention" ) or key.startswith("mask_head" ):
continue
else:
UpperCAmelCase_ = state_dict.pop(snake_case_ )
UpperCAmelCase_ = val
else:
if not key.startswith("class_labels_classifier" ) and not key.startswith("bbox_predictor" ):
UpperCAmelCase_ = state_dict.pop(snake_case_ )
UpperCAmelCase_ = val
# finally, create HuggingFace model and load state dict
UpperCAmelCase_ = ConditionalDetrForSegmentation(snake_case_ ) if is_panoptic else ConditionalDetrForObjectDetection(snake_case_ )
model.load_state_dict(snake_case_ )
model.eval()
model.push_to_hub(repo_id=snake_case_ , organization="DepuMeng" , commit_message="Add model" )
# verify our conversion
UpperCAmelCase_ = conditional_detr(snake_case_ )
UpperCAmelCase_ = model(snake_case_ )
assert torch.allclose(outputs.logits , original_outputs["pred_logits"] , atol=1E-4 )
assert torch.allclose(outputs.pred_boxes , original_outputs["pred_boxes"] , atol=1E-4 )
if is_panoptic:
assert torch.allclose(outputs.pred_masks , original_outputs["pred_masks"] , atol=1E-4 )
# Save model and image processor
logger.info(f"""Saving PyTorch model and image processor to {pytorch_dump_folder_path}...""" )
Path(snake_case_ ).mkdir(exist_ok=snake_case_ )
model.save_pretrained(snake_case_ )
image_processor.save_pretrained(snake_case_ )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE_: List[str] =argparse.ArgumentParser()
parser.add_argument(
'--model_name',
default='conditional_detr_resnet50',
type=str,
help='Name of the CONDITIONAL_DETR model you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the folder to output PyTorch model.'
)
SCREAMING_SNAKE_CASE_: int =parser.parse_args()
convert_conditional_detr_checkpoint(args.model_name, args.pytorch_dump_folder_path)
| 1 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
SCREAMING_SNAKE_CASE_: int ={
'configuration_transfo_xl': ['TRANSFO_XL_PRETRAINED_CONFIG_ARCHIVE_MAP', 'TransfoXLConfig'],
'tokenization_transfo_xl': ['TransfoXLCorpus', 'TransfoXLTokenizer'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE_: Optional[Any] =[
'TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST',
'AdaptiveEmbedding',
'TransfoXLForSequenceClassification',
'TransfoXLLMHeadModel',
'TransfoXLModel',
'TransfoXLPreTrainedModel',
'load_tf_weights_in_transfo_xl',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE_: Union[str, Any] =[
'TF_TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFAdaptiveEmbedding',
'TFTransfoXLForSequenceClassification',
'TFTransfoXLLMHeadModel',
'TFTransfoXLMainLayer',
'TFTransfoXLModel',
'TFTransfoXLPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_transfo_xl import TRANSFO_XL_PRETRAINED_CONFIG_ARCHIVE_MAP, TransfoXLConfig
from .tokenization_transfo_xl import TransfoXLCorpus, TransfoXLTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_transfo_xl import (
TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST,
AdaptiveEmbedding,
TransfoXLForSequenceClassification,
TransfoXLLMHeadModel,
TransfoXLModel,
TransfoXLPreTrainedModel,
load_tf_weights_in_transfo_xl,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_transfo_xl import (
TF_TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST,
TFAdaptiveEmbedding,
TFTransfoXLForSequenceClassification,
TFTransfoXLLMHeadModel,
TFTransfoXLMainLayer,
TFTransfoXLModel,
TFTransfoXLPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE_: Tuple =_LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 1 | '''simple docstring'''
import warnings
from ...utils import logging
from .image_processing_clip import CLIPImageProcessor
SCREAMING_SNAKE_CASE_: Union[str, Any] =logging.get_logger(__name__)
class __A ( UpperCamelCase__ ):
def __init__(self : int , *__a : Dict , **__a : str ):
warnings.warn(
"The class CLIPFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"
" use CLIPImageProcessor instead." , __a , )
super().__init__(*__a , **__a )
| 1 | 1 |
'''simple docstring'''
SCREAMING_SNAKE_CASE_: str =2_56
# Modulus to hash a string
SCREAMING_SNAKE_CASE_: int =1_00_00_03
def lowerCAmelCase_ ( snake_case_ : str , snake_case_ : str ) -> bool:
'''simple docstring'''
UpperCAmelCase_ = len(snake_case_ )
UpperCAmelCase_ = len(snake_case_ )
if p_len > t_len:
return False
UpperCAmelCase_ = 0
UpperCAmelCase_ = 0
UpperCAmelCase_ = 1
# Calculating the hash of pattern and substring of text
for i in range(snake_case_ ):
UpperCAmelCase_ = (ord(pattern[i] ) + p_hash * alphabet_size) % modulus
UpperCAmelCase_ = (ord(text[i] ) + text_hash * alphabet_size) % modulus
if i == p_len - 1:
continue
UpperCAmelCase_ = (modulus_power * alphabet_size) % modulus
for i in range(0 , t_len - p_len + 1 ):
if text_hash == p_hash and text[i : i + p_len] == pattern:
return True
if i == t_len - p_len:
continue
# Calculate the https://en.wikipedia.org/wiki/Rolling_hash
UpperCAmelCase_ = (
(text_hash - ord(text[i] ) * modulus_power) * alphabet_size
+ ord(text[i + p_len] )
) % modulus
return False
def lowerCAmelCase_ ( ) -> None:
'''simple docstring'''
UpperCAmelCase_ = "abc1abc12"
UpperCAmelCase_ = "alskfjaldsabc1abc1abc12k23adsfabcabc"
UpperCAmelCase_ = "alskfjaldsk23adsfabcabc"
assert rabin_karp(snake_case_ , snake_case_ ) and not rabin_karp(snake_case_ , snake_case_ )
# Test 2)
UpperCAmelCase_ = "ABABX"
UpperCAmelCase_ = "ABABZABABYABABX"
assert rabin_karp(snake_case_ , snake_case_ )
# Test 3)
UpperCAmelCase_ = "AAAB"
UpperCAmelCase_ = "ABAAAAAB"
assert rabin_karp(snake_case_ , snake_case_ )
# Test 4)
UpperCAmelCase_ = "abcdabcy"
UpperCAmelCase_ = "abcxabcdabxabcdabcdabcy"
assert rabin_karp(snake_case_ , snake_case_ )
# Test 5)
UpperCAmelCase_ = "Lü"
UpperCAmelCase_ = "Lüsai"
assert rabin_karp(snake_case_ , snake_case_ )
UpperCAmelCase_ = "Lue"
assert not rabin_karp(snake_case_ , snake_case_ )
print("Success." )
if __name__ == "__main__":
test_rabin_karp()
| 1 | '''simple docstring'''
from __future__ import annotations
import queue
class __A :
def __init__(self : Optional[Any] , __a : str ):
UpperCAmelCase_ = data
UpperCAmelCase_ = None
UpperCAmelCase_ = None
def lowerCAmelCase_ ( ) -> TreeNode:
'''simple docstring'''
print("\n********Press N to stop entering at any point of time********\n" )
UpperCAmelCase_ = input("Enter the value of the root node: " ).strip().lower()
UpperCAmelCase_ = queue.Queue()
UpperCAmelCase_ = TreeNode(int(snake_case_ ) )
q.put(snake_case_ )
while not q.empty():
UpperCAmelCase_ = q.get()
UpperCAmelCase_ = f"""Enter the left node of {node_found.data}: """
UpperCAmelCase_ = input(snake_case_ ).strip().lower() or "n"
if check == "n":
return tree_node
UpperCAmelCase_ = TreeNode(int(snake_case_ ) )
UpperCAmelCase_ = left_node
q.put(snake_case_ )
UpperCAmelCase_ = f"""Enter the right node of {node_found.data}: """
UpperCAmelCase_ = input(snake_case_ ).strip().lower() or "n"
if check == "n":
return tree_node
UpperCAmelCase_ = TreeNode(int(snake_case_ ) )
UpperCAmelCase_ = right_node
q.put(snake_case_ )
raise
def lowerCAmelCase_ ( snake_case_ : TreeNode ) -> None:
'''simple docstring'''
if not isinstance(snake_case_ , snake_case_ ) or not node:
return
print(node.data , end="," )
pre_order(node.left )
pre_order(node.right )
def lowerCAmelCase_ ( snake_case_ : TreeNode ) -> None:
'''simple docstring'''
if not isinstance(snake_case_ , snake_case_ ) or not node:
return
in_order(node.left )
print(node.data , end="," )
in_order(node.right )
def lowerCAmelCase_ ( snake_case_ : TreeNode ) -> None:
'''simple docstring'''
if not isinstance(snake_case_ , snake_case_ ) or not node:
return
post_order(node.left )
post_order(node.right )
print(node.data , end="," )
def lowerCAmelCase_ ( snake_case_ : TreeNode ) -> None:
'''simple docstring'''
if not isinstance(snake_case_ , snake_case_ ) or not node:
return
UpperCAmelCase_ = queue.Queue()
q.put(snake_case_ )
while not q.empty():
UpperCAmelCase_ = q.get()
print(node_dequeued.data , end="," )
if node_dequeued.left:
q.put(node_dequeued.left )
if node_dequeued.right:
q.put(node_dequeued.right )
def lowerCAmelCase_ ( snake_case_ : TreeNode ) -> None:
'''simple docstring'''
if not isinstance(snake_case_ , snake_case_ ) or not node:
return
UpperCAmelCase_ = queue.Queue()
q.put(snake_case_ )
while not q.empty():
UpperCAmelCase_ = []
while not q.empty():
UpperCAmelCase_ = q.get()
print(node_dequeued.data , end="," )
if node_dequeued.left:
list_.append(node_dequeued.left )
if node_dequeued.right:
list_.append(node_dequeued.right )
print()
for node in list_:
q.put(snake_case_ )
def lowerCAmelCase_ ( snake_case_ : TreeNode ) -> None:
'''simple docstring'''
if not isinstance(snake_case_ , snake_case_ ) or not node:
return
UpperCAmelCase_ = []
UpperCAmelCase_ = node
while n or stack:
while n: # start from root node, find its left child
print(n.data , end="," )
stack.append(snake_case_ )
UpperCAmelCase_ = n.left
# end of while means current node doesn't have left child
UpperCAmelCase_ = stack.pop()
# start to traverse its right child
UpperCAmelCase_ = n.right
def lowerCAmelCase_ ( snake_case_ : TreeNode ) -> None:
'''simple docstring'''
if not isinstance(snake_case_ , snake_case_ ) or not node:
return
UpperCAmelCase_ = []
UpperCAmelCase_ = node
while n or stack:
while n:
stack.append(snake_case_ )
UpperCAmelCase_ = n.left
UpperCAmelCase_ = stack.pop()
print(n.data , end="," )
UpperCAmelCase_ = n.right
def lowerCAmelCase_ ( snake_case_ : TreeNode ) -> None:
'''simple docstring'''
if not isinstance(snake_case_ , snake_case_ ) or not node:
return
UpperCAmelCase_ , UpperCAmelCase_ = [], []
UpperCAmelCase_ = node
stacka.append(snake_case_ )
while stacka: # to find the reversed order of post order, store it in stack2
UpperCAmelCase_ = stacka.pop()
if n.left:
stacka.append(n.left )
if n.right:
stacka.append(n.right )
stacka.append(snake_case_ )
while stacka: # pop up from stack2 will be the post order
print(stacka.pop().data , end="," )
def lowerCAmelCase_ ( snake_case_ : str = "" , snake_case_ : Any=50 , snake_case_ : Union[str, Any]="*" ) -> str:
'''simple docstring'''
if not s:
return "\n" + width * char
UpperCAmelCase_ , UpperCAmelCase_ = divmod(width - len(snake_case_ ) - 2 , 2 )
return f"""{left * char} {s} {(left + extra) * char}"""
if __name__ == "__main__":
import doctest
doctest.testmod()
print(prompt('Binary Tree Traversals'))
SCREAMING_SNAKE_CASE_: TreeNode =build_tree()
print(prompt('Pre Order Traversal'))
pre_order(node)
print(prompt() + '\n')
print(prompt('In Order Traversal'))
in_order(node)
print(prompt() + '\n')
print(prompt('Post Order Traversal'))
post_order(node)
print(prompt() + '\n')
print(prompt('Level Order Traversal'))
level_order(node)
print(prompt() + '\n')
print(prompt('Actual Level Order Traversal'))
level_order_actual(node)
print('*' * 50 + '\n')
print(prompt('Pre Order Traversal - Iteration Version'))
pre_order_iter(node)
print(prompt() + '\n')
print(prompt('In Order Traversal - Iteration Version'))
in_order_iter(node)
print(prompt() + '\n')
print(prompt('Post Order Traversal - Iteration Version'))
post_order_iter(node)
print(prompt())
| 1 | 1 |
'''simple docstring'''
from __future__ import annotations
SCREAMING_SNAKE_CASE_: Any =[]
def lowerCAmelCase_ ( snake_case_ : list[list[int]] , snake_case_ : int , snake_case_ : int ) -> bool:
'''simple docstring'''
for i in range(len(snake_case_ ) ):
if board[row][i] == 1:
return False
for i in range(len(snake_case_ ) ):
if board[i][column] == 1:
return False
for i, j in zip(range(snake_case_ , -1 , -1 ) , range(snake_case_ , -1 , -1 ) ):
if board[i][j] == 1:
return False
for i, j in zip(range(snake_case_ , -1 , -1 ) , range(snake_case_ , len(snake_case_ ) ) ):
if board[i][j] == 1:
return False
return True
def lowerCAmelCase_ ( snake_case_ : list[list[int]] , snake_case_ : int ) -> bool:
'''simple docstring'''
if row >= len(snake_case_ ):
solution.append(snake_case_ )
printboard(snake_case_ )
print()
return True
for i in range(len(snake_case_ ) ):
if is_safe(snake_case_ , snake_case_ , snake_case_ ):
UpperCAmelCase_ = 1
solve(snake_case_ , row + 1 )
UpperCAmelCase_ = 0
return False
def lowerCAmelCase_ ( snake_case_ : list[list[int]] ) -> None:
'''simple docstring'''
for i in range(len(snake_case_ ) ):
for j in range(len(snake_case_ ) ):
if board[i][j] == 1:
print("Q" , end=" " )
else:
print("." , end=" " )
print()
# n=int(input("The no. of queens"))
SCREAMING_SNAKE_CASE_: Tuple =8
SCREAMING_SNAKE_CASE_: Optional[Any] =[[0 for i in range(n)] for j in range(n)]
solve(board, 0)
print('The total no. of solutions are :', len(solution))
| 1 | '''simple docstring'''
from typing import Dict
import numpy as np
from ..utils import add_end_docstrings, is_tf_available, is_torch_available, logging
from .base import PIPELINE_INIT_ARGS, GenericTensor, Pipeline, PipelineException
if is_tf_available():
import tensorflow as tf
from ..tf_utils import stable_softmax
if is_torch_available():
import torch
SCREAMING_SNAKE_CASE_: Optional[int] =logging.get_logger(__name__)
@add_end_docstrings(
UpperCamelCase__ , r"""
top_k (`int`, defaults to 5):
The number of predictions to return.
targets (`str` or `List[str]`, *optional*):
When passed, the model will limit the scores to the passed targets instead of looking up in the whole
vocab. If the provided targets are not in the model vocab, they will be tokenized and the first resulting
token will be used (with a warning, and that might be slower).
""" , )
class __A ( UpperCamelCase__ ):
def _lowercase (self : str , __a : GenericTensor ):
if self.framework == "tf":
UpperCAmelCase_ = tf.where(input_ids == self.tokenizer.mask_token_id ).numpy()
elif self.framework == "pt":
UpperCAmelCase_ = torch.nonzero(input_ids == self.tokenizer.mask_token_id , as_tuple=__a )
else:
raise ValueError("Unsupported framework" )
return masked_index
def _lowercase (self : Tuple , __a : GenericTensor ):
UpperCAmelCase_ = self.get_masked_index(__a )
UpperCAmelCase_ = np.prod(masked_index.shape )
if numel < 1:
raise PipelineException(
"fill-mask" , self.model.base_model_prefix , f"""No mask_token ({self.tokenizer.mask_token}) found on the input""" , )
def _lowercase (self : List[Any] , __a : GenericTensor ):
if isinstance(__a , __a ):
for model_input in model_inputs:
self._ensure_exactly_one_mask_token(model_input["input_ids"][0] )
else:
for input_ids in model_inputs["input_ids"]:
self._ensure_exactly_one_mask_token(__a )
def _lowercase (self : Tuple , __a : Dict , __a : List[str]=None , **__a : Any ):
if return_tensors is None:
UpperCAmelCase_ = self.framework
UpperCAmelCase_ = self.tokenizer(__a , return_tensors=__a )
self.ensure_exactly_one_mask_token(__a )
return model_inputs
def _lowercase (self : str , __a : Optional[int] ):
UpperCAmelCase_ = self.model(**__a )
UpperCAmelCase_ = model_inputs["input_ids"]
return model_outputs
def _lowercase (self : List[str] , __a : Tuple , __a : int=5 , __a : Dict=None ):
# Cap top_k if there are targets
if target_ids is not None and target_ids.shape[0] < top_k:
UpperCAmelCase_ = target_ids.shape[0]
UpperCAmelCase_ = model_outputs["input_ids"][0]
UpperCAmelCase_ = model_outputs["logits"]
if self.framework == "tf":
UpperCAmelCase_ = tf.where(input_ids == self.tokenizer.mask_token_id ).numpy()[:, 0]
UpperCAmelCase_ = outputs.numpy()
UpperCAmelCase_ = outputs[0, masked_index, :]
UpperCAmelCase_ = stable_softmax(__a , axis=-1 )
if target_ids is not None:
UpperCAmelCase_ = tf.gather_nd(tf.squeeze(__a , 0 ) , target_ids.reshape(-1 , 1 ) )
UpperCAmelCase_ = tf.expand_dims(__a , 0 )
UpperCAmelCase_ = tf.math.top_k(__a , k=__a )
UpperCAmelCase_ , UpperCAmelCase_ = topk.values.numpy(), topk.indices.numpy()
else:
UpperCAmelCase_ = torch.nonzero(input_ids == self.tokenizer.mask_token_id , as_tuple=__a ).squeeze(-1 )
# Fill mask pipeline supports only one ${mask_token} per sample
UpperCAmelCase_ = outputs[0, masked_index, :]
UpperCAmelCase_ = logits.softmax(dim=-1 )
if target_ids is not None:
UpperCAmelCase_ = probs[..., target_ids]
UpperCAmelCase_ , UpperCAmelCase_ = probs.topk(__a )
UpperCAmelCase_ = []
UpperCAmelCase_ = values.shape[0] == 1
for i, (_values, _predictions) in enumerate(zip(values.tolist() , predictions.tolist() ) ):
UpperCAmelCase_ = []
for v, p in zip(_values , _predictions ):
# Copy is important since we're going to modify this array in place
UpperCAmelCase_ = input_ids.numpy().copy()
if target_ids is not None:
UpperCAmelCase_ = target_ids[p].tolist()
UpperCAmelCase_ = p
# Filter padding out:
UpperCAmelCase_ = tokens[np.where(tokens != self.tokenizer.pad_token_id )]
# Originally we skip special tokens to give readable output.
# For multi masks though, the other [MASK] would be removed otherwise
# making the output look odd, so we add them back
UpperCAmelCase_ = self.tokenizer.decode(__a , skip_special_tokens=__a )
UpperCAmelCase_ = {"score": v, "token": p, "token_str": self.tokenizer.decode([p] ), "sequence": sequence}
row.append(__a )
result.append(__a )
if single_mask:
return result[0]
return result
def _lowercase (self : Dict , __a : List[Any] , __a : List[str]=None ):
if isinstance(__a , __a ):
UpperCAmelCase_ = [targets]
try:
UpperCAmelCase_ = self.tokenizer.get_vocab()
except Exception:
UpperCAmelCase_ = {}
UpperCAmelCase_ = []
for target in targets:
UpperCAmelCase_ = vocab.get(__a , __a )
if id_ is None:
UpperCAmelCase_ = self.tokenizer(
__a , add_special_tokens=__a , return_attention_mask=__a , return_token_type_ids=__a , max_length=1 , truncation=__a , )["input_ids"]
if len(__a ) == 0:
logger.warning(
f"""The specified target token `{target}` does not exist in the model vocabulary. """
"We cannot replace it with anything meaningful, ignoring it" )
continue
UpperCAmelCase_ = input_ids[0]
# XXX: If users encounter this pass
# it becomes pretty slow, so let's make sure
# The warning enables them to fix the input to
# get faster performance.
logger.warning(
f"""The specified target token `{target}` does not exist in the model vocabulary. """
f"""Replacing with `{self.tokenizer.convert_ids_to_tokens(id_ )}`.""" )
target_ids.append(id_ )
UpperCAmelCase_ = list(set(__a ) )
if len(__a ) == 0:
raise ValueError("At least one target must be provided when passed." )
UpperCAmelCase_ = np.array(__a )
return target_ids
def _lowercase (self : Tuple , __a : Dict=None , __a : List[str]=None ):
UpperCAmelCase_ = {}
if targets is not None:
UpperCAmelCase_ = self.get_target_ids(__a , __a )
UpperCAmelCase_ = target_ids
if top_k is not None:
UpperCAmelCase_ = top_k
if self.tokenizer.mask_token_id is None:
raise PipelineException(
"fill-mask" , self.model.base_model_prefix , "The tokenizer does not define a `mask_token`." )
return {}, {}, postprocess_params
def __call__(self : Union[str, Any] , __a : str , *__a : Any , **__a : Tuple ):
UpperCAmelCase_ = super().__call__(__a , **__a )
if isinstance(__a , __a ) and len(__a ) == 1:
return outputs[0]
return outputs
| 1 | 1 |
'''simple docstring'''
def lowerCAmelCase_ ( ) -> int:
'''simple docstring'''
return 1
def lowerCAmelCase_ ( snake_case_ : int ) -> int:
'''simple docstring'''
return 0 if x < 0 else two_pence(x - 2 ) + one_pence()
def lowerCAmelCase_ ( snake_case_ : int ) -> int:
'''simple docstring'''
return 0 if x < 0 else five_pence(x - 5 ) + two_pence(snake_case_ )
def lowerCAmelCase_ ( snake_case_ : int ) -> int:
'''simple docstring'''
return 0 if x < 0 else ten_pence(x - 10 ) + five_pence(snake_case_ )
def lowerCAmelCase_ ( snake_case_ : int ) -> int:
'''simple docstring'''
return 0 if x < 0 else twenty_pence(x - 20 ) + ten_pence(snake_case_ )
def lowerCAmelCase_ ( snake_case_ : int ) -> int:
'''simple docstring'''
return 0 if x < 0 else fifty_pence(x - 50 ) + twenty_pence(snake_case_ )
def lowerCAmelCase_ ( snake_case_ : int ) -> int:
'''simple docstring'''
return 0 if x < 0 else one_pound(x - 1_00 ) + fifty_pence(snake_case_ )
def lowerCAmelCase_ ( snake_case_ : int ) -> int:
'''simple docstring'''
return 0 if x < 0 else two_pound(x - 2_00 ) + one_pound(snake_case_ )
def lowerCAmelCase_ ( snake_case_ : int = 2_00 ) -> int:
'''simple docstring'''
return two_pound(snake_case_ )
if __name__ == "__main__":
print(solution(int(input().strip())))
| 1 | '''simple docstring'''
import logging
import os
from dataclasses import dataclass
from typing import List, Optional, Union
import tqdm
from filelock import FileLock
from transformers import (
BartTokenizer,
BartTokenizerFast,
DataProcessor,
PreTrainedTokenizer,
RobertaTokenizer,
RobertaTokenizerFast,
XLMRobertaTokenizer,
is_tf_available,
is_torch_available,
)
SCREAMING_SNAKE_CASE_: str =logging.getLogger(__name__)
@dataclass(frozen=UpperCamelCase__ )
class __A :
a__ : str
a__ : str
a__ : Optional[str] = None
a__ : Optional[str] = None
a__ : Optional[str] = None
@dataclass(frozen=UpperCamelCase__ )
class __A :
a__ : List[int]
a__ : Optional[List[int]] = None
a__ : Optional[List[int]] = None
a__ : Optional[Union[int, float]] = None
a__ : Optional[int] = None
if is_torch_available():
import torch
from torch.utils.data import Dataset
class __A ( UpperCamelCase__ ):
a__ : List[InputFeatures]
def __init__(self : Any , __a : str , __a : PreTrainedTokenizer , __a : str , __a : Optional[int] = None , __a : Dict=False , __a : bool = False , ):
UpperCAmelCase_ = hans_processors[task]()
UpperCAmelCase_ = os.path.join(
__a , "cached_{}_{}_{}_{}".format(
"dev" if evaluate else "train" , tokenizer.__class__.__name__ , str(__a ) , __a , ) , )
UpperCAmelCase_ = processor.get_labels()
if tokenizer.__class__ in (
RobertaTokenizer,
RobertaTokenizerFast,
XLMRobertaTokenizer,
BartTokenizer,
BartTokenizerFast,
):
# HACK(label indices are swapped in RoBERTa pretrained model)
UpperCAmelCase_ , UpperCAmelCase_ = label_list[2], label_list[1]
UpperCAmelCase_ = label_list
# Make sure only the first process in distributed training processes the dataset,
# and the others will use the cache.
UpperCAmelCase_ = cached_features_file + ".lock"
with FileLock(__a ):
if os.path.exists(__a ) and not overwrite_cache:
logger.info(f"""Loading features from cached file {cached_features_file}""" )
UpperCAmelCase_ = torch.load(__a )
else:
logger.info(f"""Creating features from dataset file at {data_dir}""" )
UpperCAmelCase_ = (
processor.get_dev_examples(__a ) if evaluate else processor.get_train_examples(__a )
)
logger.info("Training examples: %s" , len(__a ) )
UpperCAmelCase_ = hans_convert_examples_to_features(__a , __a , __a , __a )
logger.info("Saving features into cached file %s" , __a )
torch.save(self.features , __a )
def __len__(self : List[Any] ):
return len(self.features )
def __getitem__(self : Any , __a : Optional[Any] ):
return self.features[i]
def _lowercase (self : Union[str, Any] ):
return self.label_list
if is_tf_available():
import tensorflow as tf
class __A :
a__ : List[InputFeatures]
def __init__(self : Union[str, Any] , __a : str , __a : PreTrainedTokenizer , __a : str , __a : Optional[int] = 128 , __a : Any=False , __a : bool = False , ):
UpperCAmelCase_ = hans_processors[task]()
UpperCAmelCase_ = processor.get_labels()
if tokenizer.__class__ in (
RobertaTokenizer,
RobertaTokenizerFast,
XLMRobertaTokenizer,
BartTokenizer,
BartTokenizerFast,
):
# HACK(label indices are swapped in RoBERTa pretrained model)
UpperCAmelCase_ , UpperCAmelCase_ = label_list[2], label_list[1]
UpperCAmelCase_ = label_list
UpperCAmelCase_ = processor.get_dev_examples(__a ) if evaluate else processor.get_train_examples(__a )
UpperCAmelCase_ = hans_convert_examples_to_features(__a , __a , __a , __a )
def gen():
for ex_index, ex in tqdm.tqdm(enumerate(self.features ) , desc="convert examples to features" ):
if ex_index % 10000 == 0:
logger.info("Writing example %d of %d" % (ex_index, len(__a )) )
yield (
{
"example_id": 0,
"input_ids": ex.input_ids,
"attention_mask": ex.attention_mask,
"token_type_ids": ex.token_type_ids,
},
ex.label,
)
UpperCAmelCase_ = tf.data.Dataset.from_generator(
__a , (
{
"example_id": tf.intaa,
"input_ids": tf.intaa,
"attention_mask": tf.intaa,
"token_type_ids": tf.intaa,
},
tf.intaa,
) , (
{
"example_id": tf.TensorShape([] ),
"input_ids": tf.TensorShape([None, None] ),
"attention_mask": tf.TensorShape([None, None] ),
"token_type_ids": tf.TensorShape([None, None] ),
},
tf.TensorShape([] ),
) , )
def _lowercase (self : int ):
return self.dataset
def __len__(self : Any ):
return len(self.features )
def __getitem__(self : int , __a : Union[str, Any] ):
return self.features[i]
def _lowercase (self : int ):
return self.label_list
class __A ( UpperCamelCase__ ):
def _lowercase (self : List[Any] , __a : Dict ):
return self._create_examples(self._read_tsv(os.path.join(__a , "heuristics_train_set.txt" ) ) , "train" )
def _lowercase (self : Any , __a : List[Any] ):
return self._create_examples(self._read_tsv(os.path.join(__a , "heuristics_evaluation_set.txt" ) ) , "dev" )
def _lowercase (self : Any ):
return ["contradiction", "entailment", "neutral"]
def _lowercase (self : Union[str, Any] , __a : Optional[int] , __a : Union[str, Any] ):
UpperCAmelCase_ = []
for i, line in enumerate(__a ):
if i == 0:
continue
UpperCAmelCase_ = "%s-%s" % (set_type, line[0])
UpperCAmelCase_ = line[5]
UpperCAmelCase_ = line[6]
UpperCAmelCase_ = line[7][2:] if line[7].startswith("ex" ) else line[7]
UpperCAmelCase_ = line[0]
examples.append(InputExample(guid=__a , text_a=__a , text_b=__a , label=__a , pairID=__a ) )
return examples
def lowerCAmelCase_ ( snake_case_ : List[InputExample] , snake_case_ : List[str] , snake_case_ : int , snake_case_ : PreTrainedTokenizer , ) -> Optional[Any]:
'''simple docstring'''
UpperCAmelCase_ = {label: i for i, label in enumerate(snake_case_ )}
UpperCAmelCase_ = []
for ex_index, example in tqdm.tqdm(enumerate(snake_case_ ) , desc="convert examples to features" ):
if ex_index % 1_00_00 == 0:
logger.info("Writing example %d" % (ex_index) )
UpperCAmelCase_ = tokenizer(
example.text_a , example.text_b , add_special_tokens=snake_case_ , max_length=snake_case_ , padding="max_length" , truncation=snake_case_ , return_overflowing_tokens=snake_case_ , )
UpperCAmelCase_ = label_map[example.label] if example.label in label_map else 0
UpperCAmelCase_ = int(example.pairID )
features.append(InputFeatures(**snake_case_ , label=snake_case_ , pairID=snake_case_ ) )
for i, example in enumerate(examples[:5] ):
logger.info("*** Example ***" )
logger.info(f"""guid: {example}""" )
logger.info(f"""features: {features[i]}""" )
return features
SCREAMING_SNAKE_CASE_: int ={
'hans': 3,
}
SCREAMING_SNAKE_CASE_: Any ={
'hans': HansProcessor,
}
| 1 | 1 |
'''simple docstring'''
import logging
import os
from dataclasses import dataclass
from typing import List, Optional, Union
import tqdm
from filelock import FileLock
from transformers import (
BartTokenizer,
BartTokenizerFast,
DataProcessor,
PreTrainedTokenizer,
RobertaTokenizer,
RobertaTokenizerFast,
XLMRobertaTokenizer,
is_tf_available,
is_torch_available,
)
SCREAMING_SNAKE_CASE_: str =logging.getLogger(__name__)
@dataclass(frozen=UpperCamelCase__ )
class __A :
a__ : str
a__ : str
a__ : Optional[str] = None
a__ : Optional[str] = None
a__ : Optional[str] = None
@dataclass(frozen=UpperCamelCase__ )
class __A :
a__ : List[int]
a__ : Optional[List[int]] = None
a__ : Optional[List[int]] = None
a__ : Optional[Union[int, float]] = None
a__ : Optional[int] = None
if is_torch_available():
import torch
from torch.utils.data import Dataset
class __A ( UpperCamelCase__ ):
a__ : List[InputFeatures]
def __init__(self : Any , __a : str , __a : PreTrainedTokenizer , __a : str , __a : Optional[int] = None , __a : Dict=False , __a : bool = False , ):
UpperCAmelCase_ = hans_processors[task]()
UpperCAmelCase_ = os.path.join(
__a , "cached_{}_{}_{}_{}".format(
"dev" if evaluate else "train" , tokenizer.__class__.__name__ , str(__a ) , __a , ) , )
UpperCAmelCase_ = processor.get_labels()
if tokenizer.__class__ in (
RobertaTokenizer,
RobertaTokenizerFast,
XLMRobertaTokenizer,
BartTokenizer,
BartTokenizerFast,
):
# HACK(label indices are swapped in RoBERTa pretrained model)
UpperCAmelCase_ , UpperCAmelCase_ = label_list[2], label_list[1]
UpperCAmelCase_ = label_list
# Make sure only the first process in distributed training processes the dataset,
# and the others will use the cache.
UpperCAmelCase_ = cached_features_file + ".lock"
with FileLock(__a ):
if os.path.exists(__a ) and not overwrite_cache:
logger.info(f"""Loading features from cached file {cached_features_file}""" )
UpperCAmelCase_ = torch.load(__a )
else:
logger.info(f"""Creating features from dataset file at {data_dir}""" )
UpperCAmelCase_ = (
processor.get_dev_examples(__a ) if evaluate else processor.get_train_examples(__a )
)
logger.info("Training examples: %s" , len(__a ) )
UpperCAmelCase_ = hans_convert_examples_to_features(__a , __a , __a , __a )
logger.info("Saving features into cached file %s" , __a )
torch.save(self.features , __a )
def __len__(self : List[Any] ):
return len(self.features )
def __getitem__(self : Any , __a : Optional[Any] ):
return self.features[i]
def _lowercase (self : Union[str, Any] ):
return self.label_list
if is_tf_available():
import tensorflow as tf
class __A :
a__ : List[InputFeatures]
def __init__(self : Union[str, Any] , __a : str , __a : PreTrainedTokenizer , __a : str , __a : Optional[int] = 128 , __a : Any=False , __a : bool = False , ):
UpperCAmelCase_ = hans_processors[task]()
UpperCAmelCase_ = processor.get_labels()
if tokenizer.__class__ in (
RobertaTokenizer,
RobertaTokenizerFast,
XLMRobertaTokenizer,
BartTokenizer,
BartTokenizerFast,
):
# HACK(label indices are swapped in RoBERTa pretrained model)
UpperCAmelCase_ , UpperCAmelCase_ = label_list[2], label_list[1]
UpperCAmelCase_ = label_list
UpperCAmelCase_ = processor.get_dev_examples(__a ) if evaluate else processor.get_train_examples(__a )
UpperCAmelCase_ = hans_convert_examples_to_features(__a , __a , __a , __a )
def gen():
for ex_index, ex in tqdm.tqdm(enumerate(self.features ) , desc="convert examples to features" ):
if ex_index % 10000 == 0:
logger.info("Writing example %d of %d" % (ex_index, len(__a )) )
yield (
{
"example_id": 0,
"input_ids": ex.input_ids,
"attention_mask": ex.attention_mask,
"token_type_ids": ex.token_type_ids,
},
ex.label,
)
UpperCAmelCase_ = tf.data.Dataset.from_generator(
__a , (
{
"example_id": tf.intaa,
"input_ids": tf.intaa,
"attention_mask": tf.intaa,
"token_type_ids": tf.intaa,
},
tf.intaa,
) , (
{
"example_id": tf.TensorShape([] ),
"input_ids": tf.TensorShape([None, None] ),
"attention_mask": tf.TensorShape([None, None] ),
"token_type_ids": tf.TensorShape([None, None] ),
},
tf.TensorShape([] ),
) , )
def _lowercase (self : int ):
return self.dataset
def __len__(self : Any ):
return len(self.features )
def __getitem__(self : int , __a : Union[str, Any] ):
return self.features[i]
def _lowercase (self : int ):
return self.label_list
class __A ( UpperCamelCase__ ):
def _lowercase (self : List[Any] , __a : Dict ):
return self._create_examples(self._read_tsv(os.path.join(__a , "heuristics_train_set.txt" ) ) , "train" )
def _lowercase (self : Any , __a : List[Any] ):
return self._create_examples(self._read_tsv(os.path.join(__a , "heuristics_evaluation_set.txt" ) ) , "dev" )
def _lowercase (self : Any ):
return ["contradiction", "entailment", "neutral"]
def _lowercase (self : Union[str, Any] , __a : Optional[int] , __a : Union[str, Any] ):
UpperCAmelCase_ = []
for i, line in enumerate(__a ):
if i == 0:
continue
UpperCAmelCase_ = "%s-%s" % (set_type, line[0])
UpperCAmelCase_ = line[5]
UpperCAmelCase_ = line[6]
UpperCAmelCase_ = line[7][2:] if line[7].startswith("ex" ) else line[7]
UpperCAmelCase_ = line[0]
examples.append(InputExample(guid=__a , text_a=__a , text_b=__a , label=__a , pairID=__a ) )
return examples
def lowerCAmelCase_ ( snake_case_ : List[InputExample] , snake_case_ : List[str] , snake_case_ : int , snake_case_ : PreTrainedTokenizer , ) -> Optional[Any]:
'''simple docstring'''
UpperCAmelCase_ = {label: i for i, label in enumerate(snake_case_ )}
UpperCAmelCase_ = []
for ex_index, example in tqdm.tqdm(enumerate(snake_case_ ) , desc="convert examples to features" ):
if ex_index % 1_00_00 == 0:
logger.info("Writing example %d" % (ex_index) )
UpperCAmelCase_ = tokenizer(
example.text_a , example.text_b , add_special_tokens=snake_case_ , max_length=snake_case_ , padding="max_length" , truncation=snake_case_ , return_overflowing_tokens=snake_case_ , )
UpperCAmelCase_ = label_map[example.label] if example.label in label_map else 0
UpperCAmelCase_ = int(example.pairID )
features.append(InputFeatures(**snake_case_ , label=snake_case_ , pairID=snake_case_ ) )
for i, example in enumerate(examples[:5] ):
logger.info("*** Example ***" )
logger.info(f"""guid: {example}""" )
logger.info(f"""features: {features[i]}""" )
return features
SCREAMING_SNAKE_CASE_: int ={
'hans': 3,
}
SCREAMING_SNAKE_CASE_: Any ={
'hans': HansProcessor,
}
| 1 | '''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
SCREAMING_SNAKE_CASE_: Dict =logging.get_logger(__name__)
SCREAMING_SNAKE_CASE_: Tuple ={}
class __A ( UpperCamelCase__ ):
a__ : int = """llama"""
a__ : Any = ["""past_key_values"""]
def __init__(self : List[str] , __a : List[str]=32000 , __a : Tuple=4096 , __a : List[Any]=11008 , __a : Dict=32 , __a : Tuple=32 , __a : Any=None , __a : Any="silu" , __a : List[Any]=2048 , __a : List[Any]=0.02 , __a : str=1E-6 , __a : Optional[Any]=True , __a : Union[str, Any]=0 , __a : Any=1 , __a : Dict=2 , __a : Dict=1 , __a : str=False , __a : str=None , **__a : Optional[Any] , ):
UpperCAmelCase_ = vocab_size
UpperCAmelCase_ = max_position_embeddings
UpperCAmelCase_ = hidden_size
UpperCAmelCase_ = intermediate_size
UpperCAmelCase_ = num_hidden_layers
UpperCAmelCase_ = num_attention_heads
# for backward compatibility
if num_key_value_heads is None:
UpperCAmelCase_ = num_attention_heads
UpperCAmelCase_ = num_key_value_heads
UpperCAmelCase_ = hidden_act
UpperCAmelCase_ = initializer_range
UpperCAmelCase_ = rms_norm_eps
UpperCAmelCase_ = pretraining_tp
UpperCAmelCase_ = use_cache
UpperCAmelCase_ = rope_scaling
self._rope_scaling_validation()
super().__init__(
pad_token_id=__a , bos_token_id=__a , eos_token_id=__a , tie_word_embeddings=__a , **__a , )
def _lowercase (self : List[str] ):
if self.rope_scaling is None:
return
if not isinstance(self.rope_scaling , __a ) or len(self.rope_scaling ) != 2:
raise ValueError(
"`rope_scaling` must be a dictionary with with two fields, `name` and `factor`, "
f"""got {self.rope_scaling}""" )
UpperCAmelCase_ = self.rope_scaling.get("type" , __a )
UpperCAmelCase_ = self.rope_scaling.get("factor" , __a )
if rope_scaling_type is None or rope_scaling_type not in ["linear", "dynamic"]:
raise ValueError(
f"""`rope_scaling`'s name field must be one of ['linear', 'dynamic'], got {rope_scaling_type}""" )
if rope_scaling_factor is None or not isinstance(__a , __a ) or rope_scaling_factor <= 1.0:
raise ValueError(f"""`rope_scaling`'s factor field must be an float > 1, got {rope_scaling_factor}""" )
| 1 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
SCREAMING_SNAKE_CASE_: Tuple ={
'configuration_maskformer': ['MASKFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP', 'MaskFormerConfig'],
'configuration_maskformer_swin': ['MaskFormerSwinConfig'],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE_: int =['MaskFormerFeatureExtractor']
SCREAMING_SNAKE_CASE_: Any =['MaskFormerImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE_: List[str] =[
'MASKFORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'MaskFormerForInstanceSegmentation',
'MaskFormerModel',
'MaskFormerPreTrainedModel',
]
SCREAMING_SNAKE_CASE_: Dict =[
'MaskFormerSwinBackbone',
'MaskFormerSwinModel',
'MaskFormerSwinPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_maskformer import MASKFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, MaskFormerConfig
from .configuration_maskformer_swin import MaskFormerSwinConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_maskformer import MaskFormerFeatureExtractor
from .image_processing_maskformer import MaskFormerImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_maskformer import (
MASKFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
MaskFormerForInstanceSegmentation,
MaskFormerModel,
MaskFormerPreTrainedModel,
)
from .modeling_maskformer_swin import (
MaskFormerSwinBackbone,
MaskFormerSwinModel,
MaskFormerSwinPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE_: str =_LazyModule(__name__, globals()['__file__'], _import_structure)
| 1 | '''simple docstring'''
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, DDIMScheduler, DDPMScheduler, StableDiffusionUpscalePipeline, UNetaDConditionModel
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
enable_full_determinism()
class __A ( unittest.TestCase ):
def _lowercase (self : Tuple ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def _lowercase (self : str ):
UpperCAmelCase_ = 1
UpperCAmelCase_ = 3
UpperCAmelCase_ = (32, 32)
UpperCAmelCase_ = floats_tensor((batch_size, num_channels) + sizes , rng=random.Random(0 ) ).to(__a )
return image
@property
def _lowercase (self : int ):
torch.manual_seed(0 )
UpperCAmelCase_ = UNetaDConditionModel(
block_out_channels=(32, 32, 64) , layers_per_block=2 , sample_size=32 , in_channels=7 , out_channels=4 , down_block_types=("DownBlock2D", "CrossAttnDownBlock2D", "CrossAttnDownBlock2D") , up_block_types=("CrossAttnUpBlock2D", "CrossAttnUpBlock2D", "UpBlock2D") , cross_attention_dim=32 , attention_head_dim=8 , use_linear_projection=__a , only_cross_attention=(True, True, False) , num_class_embeds=100 , )
return model
@property
def _lowercase (self : Any ):
torch.manual_seed(0 )
UpperCAmelCase_ = AutoencoderKL(
block_out_channels=[32, 32, 64] , in_channels=3 , out_channels=3 , down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D", "DownEncoderBlock2D"] , up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D"] , latent_channels=4 , )
return model
@property
def _lowercase (self : Optional[Any] ):
torch.manual_seed(0 )
UpperCAmelCase_ = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , hidden_act="gelu" , projection_dim=512 , )
return CLIPTextModel(__a )
def _lowercase (self : Any ):
UpperCAmelCase_ = "cpu" # ensure determinism for the device-dependent torch.Generator
UpperCAmelCase_ = self.dummy_cond_unet_upscale
UpperCAmelCase_ = DDPMScheduler()
UpperCAmelCase_ = DDIMScheduler(prediction_type="v_prediction" )
UpperCAmelCase_ = self.dummy_vae
UpperCAmelCase_ = self.dummy_text_encoder
UpperCAmelCase_ = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
UpperCAmelCase_ = self.dummy_image.cpu().permute(0 , 2 , 3 , 1 )[0]
UpperCAmelCase_ = Image.fromarray(np.uinta(__a ) ).convert("RGB" ).resize((64, 64) )
# make sure here that pndm scheduler skips prk
UpperCAmelCase_ = StableDiffusionUpscalePipeline(
unet=__a , low_res_scheduler=__a , scheduler=__a , vae=__a , text_encoder=__a , tokenizer=__a , max_noise_level=350 , )
UpperCAmelCase_ = sd_pipe.to(__a )
sd_pipe.set_progress_bar_config(disable=__a )
UpperCAmelCase_ = "A painting of a squirrel eating a burger"
UpperCAmelCase_ = torch.Generator(device=__a ).manual_seed(0 )
UpperCAmelCase_ = sd_pipe(
[prompt] , image=__a , generator=__a , guidance_scale=6.0 , noise_level=20 , num_inference_steps=2 , output_type="np" , )
UpperCAmelCase_ = output.images
UpperCAmelCase_ = torch.Generator(device=__a ).manual_seed(0 )
UpperCAmelCase_ = sd_pipe(
[prompt] , image=__a , generator=__a , guidance_scale=6.0 , noise_level=20 , num_inference_steps=2 , output_type="np" , return_dict=__a , )[0]
UpperCAmelCase_ = image[0, -3:, -3:, -1]
UpperCAmelCase_ = image_from_tuple[0, -3:, -3:, -1]
UpperCAmelCase_ = low_res_image.size[0] * 4
assert image.shape == (1, expected_height_width, expected_height_width, 3)
UpperCAmelCase_ = np.array([0.31_13, 0.39_10, 0.42_72, 0.48_59, 0.50_61, 0.46_52, 0.53_62, 0.57_15, 0.56_61] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
def _lowercase (self : Optional[int] ):
UpperCAmelCase_ = "cpu" # ensure determinism for the device-dependent torch.Generator
UpperCAmelCase_ = self.dummy_cond_unet_upscale
UpperCAmelCase_ = DDPMScheduler()
UpperCAmelCase_ = DDIMScheduler(prediction_type="v_prediction" )
UpperCAmelCase_ = self.dummy_vae
UpperCAmelCase_ = self.dummy_text_encoder
UpperCAmelCase_ = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
UpperCAmelCase_ = self.dummy_image.cpu().permute(0 , 2 , 3 , 1 )[0]
UpperCAmelCase_ = Image.fromarray(np.uinta(__a ) ).convert("RGB" ).resize((64, 64) )
# make sure here that pndm scheduler skips prk
UpperCAmelCase_ = StableDiffusionUpscalePipeline(
unet=__a , low_res_scheduler=__a , scheduler=__a , vae=__a , text_encoder=__a , tokenizer=__a , max_noise_level=350 , )
UpperCAmelCase_ = sd_pipe.to(__a )
sd_pipe.set_progress_bar_config(disable=__a )
UpperCAmelCase_ = "A painting of a squirrel eating a burger"
UpperCAmelCase_ = sd_pipe(
2 * [prompt] , image=2 * [low_res_image] , guidance_scale=6.0 , noise_level=20 , num_inference_steps=2 , output_type="np" , )
UpperCAmelCase_ = output.images
assert image.shape[0] == 2
UpperCAmelCase_ = torch.Generator(device=__a ).manual_seed(0 )
UpperCAmelCase_ = sd_pipe(
[prompt] , image=__a , generator=__a , num_images_per_prompt=2 , guidance_scale=6.0 , noise_level=20 , num_inference_steps=2 , output_type="np" , )
UpperCAmelCase_ = output.images
assert image.shape[0] == 2
@unittest.skipIf(torch_device != "cuda" , "This test requires a GPU" )
def _lowercase (self : str ):
UpperCAmelCase_ = self.dummy_cond_unet_upscale
UpperCAmelCase_ = DDPMScheduler()
UpperCAmelCase_ = DDIMScheduler(prediction_type="v_prediction" )
UpperCAmelCase_ = self.dummy_vae
UpperCAmelCase_ = self.dummy_text_encoder
UpperCAmelCase_ = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
UpperCAmelCase_ = self.dummy_image.cpu().permute(0 , 2 , 3 , 1 )[0]
UpperCAmelCase_ = Image.fromarray(np.uinta(__a ) ).convert("RGB" ).resize((64, 64) )
# put models in fp16, except vae as it overflows in fp16
UpperCAmelCase_ = unet.half()
UpperCAmelCase_ = text_encoder.half()
# make sure here that pndm scheduler skips prk
UpperCAmelCase_ = StableDiffusionUpscalePipeline(
unet=__a , low_res_scheduler=__a , scheduler=__a , vae=__a , text_encoder=__a , tokenizer=__a , max_noise_level=350 , )
UpperCAmelCase_ = sd_pipe.to(__a )
sd_pipe.set_progress_bar_config(disable=__a )
UpperCAmelCase_ = "A painting of a squirrel eating a burger"
UpperCAmelCase_ = torch.manual_seed(0 )
UpperCAmelCase_ = sd_pipe(
[prompt] , image=__a , generator=__a , num_inference_steps=2 , output_type="np" , ).images
UpperCAmelCase_ = low_res_image.size[0] * 4
assert image.shape == (1, expected_height_width, expected_height_width, 3)
@slow
@require_torch_gpu
class __A ( unittest.TestCase ):
def _lowercase (self : List[str] ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _lowercase (self : List[Any] ):
UpperCAmelCase_ = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/sd2-upscale/low_res_cat.png" )
UpperCAmelCase_ = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-upscale"
"/upsampled_cat.npy" )
UpperCAmelCase_ = "stabilityai/stable-diffusion-x4-upscaler"
UpperCAmelCase_ = StableDiffusionUpscalePipeline.from_pretrained(__a )
pipe.to(__a )
pipe.set_progress_bar_config(disable=__a )
pipe.enable_attention_slicing()
UpperCAmelCase_ = "a cat sitting on a park bench"
UpperCAmelCase_ = torch.manual_seed(0 )
UpperCAmelCase_ = pipe(
prompt=__a , image=__a , generator=__a , output_type="np" , )
UpperCAmelCase_ = output.images[0]
assert image.shape == (512, 512, 3)
assert np.abs(expected_image - image ).max() < 1E-3
def _lowercase (self : Tuple ):
UpperCAmelCase_ = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/sd2-upscale/low_res_cat.png" )
UpperCAmelCase_ = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-upscale"
"/upsampled_cat_fp16.npy" )
UpperCAmelCase_ = "stabilityai/stable-diffusion-x4-upscaler"
UpperCAmelCase_ = StableDiffusionUpscalePipeline.from_pretrained(
__a , torch_dtype=torch.floataa , )
pipe.to(__a )
pipe.set_progress_bar_config(disable=__a )
pipe.enable_attention_slicing()
UpperCAmelCase_ = "a cat sitting on a park bench"
UpperCAmelCase_ = torch.manual_seed(0 )
UpperCAmelCase_ = pipe(
prompt=__a , image=__a , generator=__a , output_type="np" , )
UpperCAmelCase_ = output.images[0]
assert image.shape == (512, 512, 3)
assert np.abs(expected_image - image ).max() < 5E-1
def _lowercase (self : List[Any] ):
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
UpperCAmelCase_ = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/sd2-upscale/low_res_cat.png" )
UpperCAmelCase_ = "stabilityai/stable-diffusion-x4-upscaler"
UpperCAmelCase_ = StableDiffusionUpscalePipeline.from_pretrained(
__a , torch_dtype=torch.floataa , )
pipe.to(__a )
pipe.set_progress_bar_config(disable=__a )
pipe.enable_attention_slicing(1 )
pipe.enable_sequential_cpu_offload()
UpperCAmelCase_ = "a cat sitting on a park bench"
UpperCAmelCase_ = torch.manual_seed(0 )
UpperCAmelCase_ = pipe(
prompt=__a , image=__a , generator=__a , num_inference_steps=5 , output_type="np" , )
UpperCAmelCase_ = torch.cuda.max_memory_allocated()
# make sure that less than 2.9 GB is allocated
assert mem_bytes < 2.9 * 10**9
| 1 | 1 |
'''simple docstring'''
import argparse
import os
import torch
from diffusers import (
CMStochasticIterativeScheduler,
ConsistencyModelPipeline,
UNetaDModel,
)
SCREAMING_SNAKE_CASE_: Tuple ={
'sample_size': 32,
'in_channels': 3,
'out_channels': 3,
'layers_per_block': 2,
'num_class_embeds': 10_00,
'block_out_channels': [32, 64],
'attention_head_dim': 8,
'down_block_types': [
'ResnetDownsampleBlock2D',
'AttnDownBlock2D',
],
'up_block_types': [
'AttnUpBlock2D',
'ResnetUpsampleBlock2D',
],
'resnet_time_scale_shift': 'scale_shift',
'upsample_type': 'resnet',
'downsample_type': 'resnet',
}
SCREAMING_SNAKE_CASE_: Union[str, Any] ={
'sample_size': 64,
'in_channels': 3,
'out_channels': 3,
'layers_per_block': 3,
'num_class_embeds': 10_00,
'block_out_channels': [1_92, 1_92 * 2, 1_92 * 3, 1_92 * 4],
'attention_head_dim': 64,
'down_block_types': [
'ResnetDownsampleBlock2D',
'AttnDownBlock2D',
'AttnDownBlock2D',
'AttnDownBlock2D',
],
'up_block_types': [
'AttnUpBlock2D',
'AttnUpBlock2D',
'AttnUpBlock2D',
'ResnetUpsampleBlock2D',
],
'resnet_time_scale_shift': 'scale_shift',
'upsample_type': 'resnet',
'downsample_type': 'resnet',
}
SCREAMING_SNAKE_CASE_: Tuple ={
'sample_size': 2_56,
'in_channels': 3,
'out_channels': 3,
'layers_per_block': 2,
'num_class_embeds': None,
'block_out_channels': [2_56, 2_56, 2_56 * 2, 2_56 * 2, 2_56 * 4, 2_56 * 4],
'attention_head_dim': 64,
'down_block_types': [
'ResnetDownsampleBlock2D',
'ResnetDownsampleBlock2D',
'ResnetDownsampleBlock2D',
'AttnDownBlock2D',
'AttnDownBlock2D',
'AttnDownBlock2D',
],
'up_block_types': [
'AttnUpBlock2D',
'AttnUpBlock2D',
'AttnUpBlock2D',
'ResnetUpsampleBlock2D',
'ResnetUpsampleBlock2D',
'ResnetUpsampleBlock2D',
],
'resnet_time_scale_shift': 'default',
'upsample_type': 'resnet',
'downsample_type': 'resnet',
}
SCREAMING_SNAKE_CASE_: Any ={
'num_train_timesteps': 40,
'sigma_min': 0.002,
'sigma_max': 80.0,
}
SCREAMING_SNAKE_CASE_: List[str] ={
'num_train_timesteps': 2_01,
'sigma_min': 0.002,
'sigma_max': 80.0,
}
SCREAMING_SNAKE_CASE_: List[Any] ={
'num_train_timesteps': 1_51,
'sigma_min': 0.002,
'sigma_max': 80.0,
}
def lowerCAmelCase_ ( snake_case_ : Optional[int] ) -> Optional[Any]:
'''simple docstring'''
if isinstance(snake_case_ , snake_case_ ):
return v
if v.lower() in ("yes", "true", "t", "y", "1"):
return True
elif v.lower() in ("no", "false", "f", "n", "0"):
return False
else:
raise argparse.ArgumentTypeError("boolean value expected" )
def lowerCAmelCase_ ( snake_case_ : Optional[Any] , snake_case_ : Optional[Any] , snake_case_ : Any , snake_case_ : Optional[int] , snake_case_ : Dict=False ) -> Optional[int]:
'''simple docstring'''
UpperCAmelCase_ = checkpoint[f"""{old_prefix}.in_layers.0.weight"""]
UpperCAmelCase_ = checkpoint[f"""{old_prefix}.in_layers.0.bias"""]
UpperCAmelCase_ = checkpoint[f"""{old_prefix}.in_layers.2.weight"""]
UpperCAmelCase_ = checkpoint[f"""{old_prefix}.in_layers.2.bias"""]
UpperCAmelCase_ = checkpoint[f"""{old_prefix}.emb_layers.1.weight"""]
UpperCAmelCase_ = checkpoint[f"""{old_prefix}.emb_layers.1.bias"""]
UpperCAmelCase_ = checkpoint[f"""{old_prefix}.out_layers.0.weight"""]
UpperCAmelCase_ = checkpoint[f"""{old_prefix}.out_layers.0.bias"""]
UpperCAmelCase_ = checkpoint[f"""{old_prefix}.out_layers.3.weight"""]
UpperCAmelCase_ = checkpoint[f"""{old_prefix}.out_layers.3.bias"""]
if has_skip:
UpperCAmelCase_ = checkpoint[f"""{old_prefix}.skip_connection.weight"""]
UpperCAmelCase_ = checkpoint[f"""{old_prefix}.skip_connection.bias"""]
return new_checkpoint
def lowerCAmelCase_ ( snake_case_ : List[Any] , snake_case_ : Optional[int] , snake_case_ : int , snake_case_ : Any , snake_case_ : Any=None ) -> List[str]:
'''simple docstring'''
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = checkpoint[f"""{old_prefix}.qkv.weight"""].chunk(3 , dim=0 )
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = checkpoint[f"""{old_prefix}.qkv.bias"""].chunk(3 , dim=0 )
UpperCAmelCase_ = checkpoint[f"""{old_prefix}.norm.weight"""]
UpperCAmelCase_ = checkpoint[f"""{old_prefix}.norm.bias"""]
UpperCAmelCase_ = weight_q.squeeze(-1 ).squeeze(-1 )
UpperCAmelCase_ = bias_q.squeeze(-1 ).squeeze(-1 )
UpperCAmelCase_ = weight_k.squeeze(-1 ).squeeze(-1 )
UpperCAmelCase_ = bias_k.squeeze(-1 ).squeeze(-1 )
UpperCAmelCase_ = weight_v.squeeze(-1 ).squeeze(-1 )
UpperCAmelCase_ = bias_v.squeeze(-1 ).squeeze(-1 )
UpperCAmelCase_ = (
checkpoint[f"""{old_prefix}.proj_out.weight"""].squeeze(-1 ).squeeze(-1 )
)
UpperCAmelCase_ = checkpoint[f"""{old_prefix}.proj_out.bias"""].squeeze(-1 ).squeeze(-1 )
return new_checkpoint
def lowerCAmelCase_ ( snake_case_ : str , snake_case_ : int ) -> Dict:
'''simple docstring'''
UpperCAmelCase_ = torch.load(snake_case_ , map_location="cpu" )
UpperCAmelCase_ = {}
UpperCAmelCase_ = checkpoint["time_embed.0.weight"]
UpperCAmelCase_ = checkpoint["time_embed.0.bias"]
UpperCAmelCase_ = checkpoint["time_embed.2.weight"]
UpperCAmelCase_ = checkpoint["time_embed.2.bias"]
if unet_config["num_class_embeds"] is not None:
UpperCAmelCase_ = checkpoint["label_emb.weight"]
UpperCAmelCase_ = checkpoint["input_blocks.0.0.weight"]
UpperCAmelCase_ = checkpoint["input_blocks.0.0.bias"]
UpperCAmelCase_ = unet_config["down_block_types"]
UpperCAmelCase_ = unet_config["layers_per_block"]
UpperCAmelCase_ = unet_config["attention_head_dim"]
UpperCAmelCase_ = unet_config["block_out_channels"]
UpperCAmelCase_ = 1
UpperCAmelCase_ = channels_list[0]
for i, layer_type in enumerate(snake_case_ ):
UpperCAmelCase_ = channels_list[i]
UpperCAmelCase_ = current_channels != prev_channels
if layer_type == "ResnetDownsampleBlock2D":
for j in range(snake_case_ ):
UpperCAmelCase_ = f"""down_blocks.{i}.resnets.{j}"""
UpperCAmelCase_ = f"""input_blocks.{current_layer}.0"""
UpperCAmelCase_ = True if j == 0 and downsample_block_has_skip else False
UpperCAmelCase_ = convert_resnet(snake_case_ , snake_case_ , snake_case_ , snake_case_ , has_skip=snake_case_ )
current_layer += 1
elif layer_type == "AttnDownBlock2D":
for j in range(snake_case_ ):
UpperCAmelCase_ = f"""down_blocks.{i}.resnets.{j}"""
UpperCAmelCase_ = f"""input_blocks.{current_layer}.0"""
UpperCAmelCase_ = True if j == 0 and downsample_block_has_skip else False
UpperCAmelCase_ = convert_resnet(snake_case_ , snake_case_ , snake_case_ , snake_case_ , has_skip=snake_case_ )
UpperCAmelCase_ = f"""down_blocks.{i}.attentions.{j}"""
UpperCAmelCase_ = f"""input_blocks.{current_layer}.1"""
UpperCAmelCase_ = convert_attention(
snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ )
current_layer += 1
if i != len(snake_case_ ) - 1:
UpperCAmelCase_ = f"""down_blocks.{i}.downsamplers.0"""
UpperCAmelCase_ = f"""input_blocks.{current_layer}.0"""
UpperCAmelCase_ = convert_resnet(snake_case_ , snake_case_ , snake_case_ , snake_case_ )
current_layer += 1
UpperCAmelCase_ = current_channels
# hardcoded the mid-block for now
UpperCAmelCase_ = "mid_block.resnets.0"
UpperCAmelCase_ = "middle_block.0"
UpperCAmelCase_ = convert_resnet(snake_case_ , snake_case_ , snake_case_ , snake_case_ )
UpperCAmelCase_ = "mid_block.attentions.0"
UpperCAmelCase_ = "middle_block.1"
UpperCAmelCase_ = convert_attention(snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ )
UpperCAmelCase_ = "mid_block.resnets.1"
UpperCAmelCase_ = "middle_block.2"
UpperCAmelCase_ = convert_resnet(snake_case_ , snake_case_ , snake_case_ , snake_case_ )
UpperCAmelCase_ = 0
UpperCAmelCase_ = unet_config["up_block_types"]
for i, layer_type in enumerate(snake_case_ ):
if layer_type == "ResnetUpsampleBlock2D":
for j in range(layers_per_block + 1 ):
UpperCAmelCase_ = f"""up_blocks.{i}.resnets.{j}"""
UpperCAmelCase_ = f"""output_blocks.{current_layer}.0"""
UpperCAmelCase_ = convert_resnet(snake_case_ , snake_case_ , snake_case_ , snake_case_ , has_skip=snake_case_ )
current_layer += 1
if i != len(snake_case_ ) - 1:
UpperCAmelCase_ = f"""up_blocks.{i}.upsamplers.0"""
UpperCAmelCase_ = f"""output_blocks.{current_layer-1}.1"""
UpperCAmelCase_ = convert_resnet(snake_case_ , snake_case_ , snake_case_ , snake_case_ )
elif layer_type == "AttnUpBlock2D":
for j in range(layers_per_block + 1 ):
UpperCAmelCase_ = f"""up_blocks.{i}.resnets.{j}"""
UpperCAmelCase_ = f"""output_blocks.{current_layer}.0"""
UpperCAmelCase_ = convert_resnet(snake_case_ , snake_case_ , snake_case_ , snake_case_ , has_skip=snake_case_ )
UpperCAmelCase_ = f"""up_blocks.{i}.attentions.{j}"""
UpperCAmelCase_ = f"""output_blocks.{current_layer}.1"""
UpperCAmelCase_ = convert_attention(
snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ )
current_layer += 1
if i != len(snake_case_ ) - 1:
UpperCAmelCase_ = f"""up_blocks.{i}.upsamplers.0"""
UpperCAmelCase_ = f"""output_blocks.{current_layer-1}.2"""
UpperCAmelCase_ = convert_resnet(snake_case_ , snake_case_ , snake_case_ , snake_case_ )
UpperCAmelCase_ = checkpoint["out.0.weight"]
UpperCAmelCase_ = checkpoint["out.0.bias"]
UpperCAmelCase_ = checkpoint["out.2.weight"]
UpperCAmelCase_ = checkpoint["out.2.bias"]
return new_checkpoint
if __name__ == "__main__":
SCREAMING_SNAKE_CASE_: Tuple =argparse.ArgumentParser()
parser.add_argument('--unet_path', default=None, type=str, required=True, help='Path to the unet.pt to convert.')
parser.add_argument(
'--dump_path', default=None, type=str, required=True, help='Path to output the converted UNet model.'
)
parser.add_argument('--class_cond', default=True, type=str, help='Whether the model is class-conditional.')
SCREAMING_SNAKE_CASE_: Union[str, Any] =parser.parse_args()
SCREAMING_SNAKE_CASE_: Optional[Any] =strabool(args.class_cond)
SCREAMING_SNAKE_CASE_: List[Any] =os.path.basename(args.unet_path)
print(f"Checkpoint: {ckpt_name}")
# Get U-Net config
if "imagenet64" in ckpt_name:
SCREAMING_SNAKE_CASE_: int =IMAGENET_64_UNET_CONFIG
elif "256" in ckpt_name and (("bedroom" in ckpt_name) or ("cat" in ckpt_name)):
SCREAMING_SNAKE_CASE_: Tuple =LSUN_256_UNET_CONFIG
elif "test" in ckpt_name:
SCREAMING_SNAKE_CASE_: Any =TEST_UNET_CONFIG
else:
raise ValueError(f"Checkpoint type {ckpt_name} is not currently supported.")
if not args.class_cond:
SCREAMING_SNAKE_CASE_: Union[str, Any] =None
SCREAMING_SNAKE_CASE_: Any =con_pt_to_diffuser(args.unet_path, unet_config)
SCREAMING_SNAKE_CASE_: str =UNetaDModel(**unet_config)
image_unet.load_state_dict(converted_unet_ckpt)
# Get scheduler config
if "cd" in ckpt_name or "test" in ckpt_name:
SCREAMING_SNAKE_CASE_: str =CD_SCHEDULER_CONFIG
elif "ct" in ckpt_name and "imagenet64" in ckpt_name:
SCREAMING_SNAKE_CASE_: List[Any] =CT_IMAGENET_64_SCHEDULER_CONFIG
elif "ct" in ckpt_name and "256" in ckpt_name and (("bedroom" in ckpt_name) or ("cat" in ckpt_name)):
SCREAMING_SNAKE_CASE_: Optional[Any] =CT_LSUN_256_SCHEDULER_CONFIG
else:
raise ValueError(f"Checkpoint type {ckpt_name} is not currently supported.")
SCREAMING_SNAKE_CASE_: Union[str, Any] =CMStochasticIterativeScheduler(**scheduler_config)
SCREAMING_SNAKE_CASE_: Union[str, Any] =ConsistencyModelPipeline(unet=image_unet, scheduler=cm_scheduler)
consistency_model.save_pretrained(args.dump_path)
| 1 | '''simple docstring'''
from typing import Callable, Dict, Optional, Tuple
import torch
from torch import nn
from torch.distributions import (
AffineTransform,
Distribution,
Independent,
NegativeBinomial,
Normal,
StudentT,
TransformedDistribution,
)
class __A ( UpperCamelCase__ ):
def __init__(self : int , __a : Distribution , __a : Dict=None , __a : int=None , __a : Any=0 ):
UpperCAmelCase_ = 1.0 if scale is None else scale
UpperCAmelCase_ = 0.0 if loc is None else loc
super().__init__(__a , [AffineTransform(loc=self.loc , scale=self.scale , event_dim=__a )] )
@property
def _lowercase (self : Union[str, Any] ):
return self.base_dist.mean * self.scale + self.loc
@property
def _lowercase (self : List[Any] ):
return self.base_dist.variance * self.scale**2
@property
def _lowercase (self : List[Any] ):
return self.variance.sqrt()
class __A ( nn.Module ):
def __init__(self : Optional[int] , __a : int , __a : Dict[str, int] , __a : Callable[..., Tuple[torch.Tensor]] , **__a : List[str] ):
super().__init__(**__a )
UpperCAmelCase_ = args_dim
UpperCAmelCase_ = nn.ModuleList([nn.Linear(__a , __a ) for dim in args_dim.values()] )
UpperCAmelCase_ = domain_map
def _lowercase (self : List[str] , __a : torch.Tensor ):
UpperCAmelCase_ = [proj(__a ) for proj in self.proj]
return self.domain_map(*__a )
class __A ( nn.Module ):
def __init__(self : Union[str, Any] , __a : List[str] ):
super().__init__()
UpperCAmelCase_ = function
def _lowercase (self : Optional[int] , __a : List[str] , *__a : Optional[int] ):
return self.function(__a , *__a )
class __A :
a__ : type
a__ : int
a__ : Dict[str, int]
def __init__(self : List[Any] , __a : int = 1 ):
UpperCAmelCase_ = dim
UpperCAmelCase_ = {k: dim * self.args_dim[k] for k in self.args_dim}
def _lowercase (self : Any , __a : Any ):
if self.dim == 1:
return self.distribution_class(*__a )
else:
return Independent(self.distribution_class(*__a ) , 1 )
def _lowercase (self : List[str] , __a : Union[str, Any] , __a : Optional[torch.Tensor] = None , __a : Optional[torch.Tensor] = None , ):
UpperCAmelCase_ = self._base_distribution(__a )
if loc is None and scale is None:
return distr
else:
return AffineTransformed(__a , loc=__a , scale=__a , event_dim=self.event_dim )
@property
def _lowercase (self : Any ):
return () if self.dim == 1 else (self.dim,)
@property
def _lowercase (self : Dict ):
return len(self.event_shape )
@property
def _lowercase (self : Tuple ):
return 0.0
def _lowercase (self : List[str] , __a : int ):
return ParameterProjection(
in_features=__a , args_dim=self.args_dim , domain_map=LambdaLayer(self.domain_map ) , )
def _lowercase (self : Optional[int] , *__a : torch.Tensor ):
raise NotImplementedError()
@staticmethod
def _lowercase (__a : torch.Tensor ):
return (x + torch.sqrt(torch.square(__a ) + 4.0 )) / 2.0
class __A ( UpperCamelCase__ ):
a__ : Dict[str, int] = {"df": 1, "loc": 1, "scale": 1}
a__ : type = StudentT
@classmethod
def _lowercase (cls : Union[str, Any] , __a : torch.Tensor , __a : torch.Tensor , __a : torch.Tensor ):
UpperCAmelCase_ = cls.squareplus(__a ).clamp_min(torch.finfo(scale.dtype ).eps )
UpperCAmelCase_ = 2.0 + cls.squareplus(__a )
return df.squeeze(-1 ), loc.squeeze(-1 ), scale.squeeze(-1 )
class __A ( UpperCamelCase__ ):
a__ : Dict[str, int] = {"loc": 1, "scale": 1}
a__ : type = Normal
@classmethod
def _lowercase (cls : Tuple , __a : torch.Tensor , __a : torch.Tensor ):
UpperCAmelCase_ = cls.squareplus(__a ).clamp_min(torch.finfo(scale.dtype ).eps )
return loc.squeeze(-1 ), scale.squeeze(-1 )
class __A ( UpperCamelCase__ ):
a__ : Dict[str, int] = {"total_count": 1, "logits": 1}
a__ : type = NegativeBinomial
@classmethod
def _lowercase (cls : Optional[Any] , __a : torch.Tensor , __a : torch.Tensor ):
UpperCAmelCase_ = cls.squareplus(__a )
return total_count.squeeze(-1 ), logits.squeeze(-1 )
def _lowercase (self : List[str] , __a : str ):
UpperCAmelCase_ , UpperCAmelCase_ = distr_args
if self.dim == 1:
return self.distribution_class(total_count=__a , logits=__a )
else:
return Independent(self.distribution_class(total_count=__a , logits=__a ) , 1 )
def _lowercase (self : Optional[Any] , __a : int , __a : Optional[torch.Tensor] = None , __a : Optional[torch.Tensor] = None ):
UpperCAmelCase_ , UpperCAmelCase_ = distr_args
if scale is not None:
# See scaling property of Gamma.
logits += scale.log()
return self._base_distribution((total_count, logits) )
| 1 | 1 |
'''simple docstring'''
import pprint
import requests
SCREAMING_SNAKE_CASE_: int ='https://zenquotes.io/api'
def lowerCAmelCase_ ( ) -> list:
'''simple docstring'''
return requests.get(API_ENDPOINT_URL + "/today" ).json()
def lowerCAmelCase_ ( ) -> list:
'''simple docstring'''
return requests.get(API_ENDPOINT_URL + "/random" ).json()
if __name__ == "__main__":
SCREAMING_SNAKE_CASE_: Optional[Any] =random_quotes()
pprint.pprint(response)
| 1 | '''simple docstring'''
import math_equivalence # From: git+https://github.com/hendrycks/math.git
import datasets
SCREAMING_SNAKE_CASE_: Optional[Any] ='\\n@article{hendrycksmath2021,\n title={Measuring Mathematical Problem Solving With the MATH Dataset},\n author={Dan Hendrycks\n and Collin Burns\n and Saurav Kadavath\n and Akul Arora\n and Steven Basart\n and Eric Tang\n and Dawn Song\n and Jacob Steinhardt},\n journal={arXiv preprint arXiv:2103.03874},\n year={2021}\n}\n'
SCREAMING_SNAKE_CASE_: Union[str, Any] ='\\nThis metric is used to assess performance on the Mathematics Aptitude Test of Heuristics (MATH) dataset.\nIt first canonicalizes the inputs (e.g., converting "1/2" to "\\frac{1}{2}") and then computes accuracy.\n'
SCREAMING_SNAKE_CASE_: List[Any] =r'\nCalculates accuracy after canonicalizing inputs.\n\nArgs:\n predictions: list of predictions to score. Each prediction\n is a string that contains natural language and LaTex.\n references: list of reference for each prediction. Each\n reference is a string that contains natural language\n and LaTex.\nReturns:\n accuracy: accuracy after canonicalizing inputs\n (e.g., converting "1/2" to "\\frac{1}{2}")\n\nExamples:\n >>> metric = datasets.load_metric("competition_math")\n >>> results = metric.compute(references=["\\frac{1}{2}"], predictions=["1/2"])\n >>> print(results)\n {\'accuracy\': 1.0}\n'
@datasets.utils.file_utils.add_end_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __A ( datasets.Metric ):
def _lowercase (self : Optional[Any] ):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Value("string" ),
"references": datasets.Value("string" ),
} ) , homepage="https://github.com/hendrycks/math" , codebase_urls=["https://github.com/hendrycks/math"] , )
def _lowercase (self : Tuple , __a : Optional[int] , __a : List[Any] ):
UpperCAmelCase_ = 0.0
for i, j in zip(__a , __a ):
n_correct += 1.0 if math_equivalence.is_equiv(__a , __a ) else 0.0
UpperCAmelCase_ = n_correct / len(__a )
return {
"accuracy": accuracy,
}
| 1 | 1 |
'''simple docstring'''
def lowerCAmelCase_ ( snake_case_ : float , snake_case_ : float , snake_case_ : int ) -> float:
'''simple docstring'''
if principal <= 0:
raise Exception("Principal borrowed must be > 0" )
if rate_per_annum < 0:
raise Exception("Rate of interest must be >= 0" )
if years_to_repay <= 0 or not isinstance(snake_case_ , snake_case_ ):
raise Exception("Years to repay must be an integer > 0" )
# Yearly rate is divided by 12 to get monthly rate
UpperCAmelCase_ = rate_per_annum / 12
# Years to repay is multiplied by 12 to get number of payments as payment is monthly
UpperCAmelCase_ = years_to_repay * 12
return (
principal
* rate_per_month
* (1 + rate_per_month) ** number_of_payments
/ ((1 + rate_per_month) ** number_of_payments - 1)
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 1 | '''simple docstring'''
from copy import deepcopy
import torch
import torch.nn.functional as F
from torch.optim import AdamW
from torch.optim.lr_scheduler import LambdaLR
from torch.utils.data import DataLoader
from accelerate.accelerator import Accelerator
from accelerate.state import GradientState
from accelerate.test_utils import RegressionDataset, RegressionModel
from accelerate.utils import DistributedType, is_torch_version, set_seed
def lowerCAmelCase_ ( snake_case_ : Dict , snake_case_ : Union[str, Any] , snake_case_ : Optional[Any] , snake_case_ : List[Any] ) -> List[Any]:
'''simple docstring'''
for param, grad_param in zip(model_a.parameters() , model_b.parameters() ):
if not param.requires_grad:
continue
if not did_step:
# Grads should not be in sync
assert (
torch.allclose(param.grad , grad_param.grad ) is False
), f"""Gradients in sync when they should not be at iteration {iteration}:\nmodel_a grad ({param.grad}) == model_b grad ({grad_param.grad})"""
else:
# Grads should be in sync
assert (
torch.allclose(param.grad , grad_param.grad ) is True
), f"""Gradients not in sync when they should be at iteration {iteration}:\nmodel_a grad ({param.grad}) != model_b grad ({grad_param.grad})"""
def lowerCAmelCase_ ( snake_case_ : Any , snake_case_ : Tuple , snake_case_ : Any , snake_case_ : List[Any] , snake_case_ : str=True ) -> Optional[Any]:
'''simple docstring'''
model.train()
UpperCAmelCase_ = model(snake_case_ )
UpperCAmelCase_ = F.mse_loss(snake_case_ , target.to(output.device ) )
if not do_backward:
loss /= accelerator.gradient_accumulation_steps
loss.backward()
else:
accelerator.backward(snake_case_ )
def lowerCAmelCase_ ( snake_case_ : Optional[Any] , snake_case_ : Any=False ) -> Dict:
'''simple docstring'''
set_seed(42 )
UpperCAmelCase_ = RegressionModel()
UpperCAmelCase_ = deepcopy(snake_case_ )
UpperCAmelCase_ = RegressionDataset(length=80 )
UpperCAmelCase_ = DataLoader(snake_case_ , batch_size=16 )
model.to(accelerator.device )
if sched:
UpperCAmelCase_ = AdamW(params=model.parameters() , lr=1E-3 )
UpperCAmelCase_ = AdamW(params=ddp_model.parameters() , lr=1E-3 )
UpperCAmelCase_ = LambdaLR(snake_case_ , lr_lambda=lambda snake_case_ : epoch**0.65 )
UpperCAmelCase_ = LambdaLR(snake_case_ , lr_lambda=lambda snake_case_ : epoch**0.65 )
# Make a copy of `model`
if sched:
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = accelerator.prepare(snake_case_ , snake_case_ , snake_case_ , snake_case_ )
else:
UpperCAmelCase_ , UpperCAmelCase_ = accelerator.prepare(snake_case_ , snake_case_ )
if sched:
return (model, opt, sched, dataloader, ddp_model, ddp_opt, ddp_sched)
return model, ddp_model, dataloader
def lowerCAmelCase_ ( snake_case_ : Any ) -> int:
'''simple docstring'''
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = get_training_setup(snake_case_ )
# Use a single batch
UpperCAmelCase_ , UpperCAmelCase_ = next(iter(snake_case_ ) ).values()
for iteration in range(3 ):
# Gather the distributed inputs and targs for the base model
UpperCAmelCase_ , UpperCAmelCase_ = accelerator.gather((ddp_input, ddp_target) )
UpperCAmelCase_ , UpperCAmelCase_ = input.to(accelerator.device ), target.to(accelerator.device )
# Perform our initial ground truth step in non "DDP"
step_model(snake_case_ , snake_case_ , snake_case_ , snake_case_ )
# Do "gradient accumulation" (noop)
if iteration % 2 == 0:
# Accumulate grads locally
with accelerator.no_sync(snake_case_ ):
step_model(snake_case_ , snake_case_ , snake_case_ , snake_case_ )
else:
# Sync grads
step_model(snake_case_ , snake_case_ , snake_case_ , snake_case_ )
# Since `no_sync` is a noop, `ddp_model` and `model` grads should always be in sync
check_model_parameters(snake_case_ , snake_case_ , snake_case_ , snake_case_ )
for param, ddp_param in zip(model.parameters() , ddp_model.parameters() ):
if not param.requires_grad:
continue
assert torch.allclose(
param.grad , ddp_param.grad ), f"""Gradients not in sync when they should be:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})"""
# Shuffle ddp_input on each iteration
torch.manual_seed(13_37 + iteration )
UpperCAmelCase_ = ddp_input[torch.randperm(len(snake_case_ ) )]
def lowerCAmelCase_ ( snake_case_ : Tuple ) -> str:
'''simple docstring'''
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = get_training_setup(snake_case_ )
# Use a single batch
UpperCAmelCase_ , UpperCAmelCase_ = next(iter(snake_case_ ) ).values()
for iteration in range(3 ):
# Gather the distributed inputs and targs for the base model
UpperCAmelCase_ , UpperCAmelCase_ = accelerator.gather((ddp_input, ddp_target) )
UpperCAmelCase_ , UpperCAmelCase_ = input.to(accelerator.device ), target.to(accelerator.device )
# Perform our initial ground truth step in non "DDP"
step_model(snake_case_ , snake_case_ , snake_case_ , snake_case_ )
# Do "gradient accumulation" (noop)
if iteration % 2 == 0:
# Accumulate grads locally
with accelerator.no_sync(snake_case_ ):
step_model(snake_case_ , snake_case_ , snake_case_ , snake_case_ )
else:
# Sync grads
step_model(snake_case_ , snake_case_ , snake_case_ , snake_case_ )
# DDP model and model should only be in sync when not (iteration % 2 == 0)
for param, ddp_param in zip(model.parameters() , ddp_model.parameters() ):
if not param.requires_grad:
continue
if iteration % 2 == 0:
# Grads should not be in sync
assert (
torch.allclose(param.grad , ddp_param.grad ) is False
), f"""Gradients in sync when they should not be:\nModel grad ({param.grad}) == DDP grad ({ddp_param.grad})"""
else:
# Grads should be in sync
assert (
torch.allclose(param.grad , ddp_param.grad ) is True
), f"""Gradients not in sync when they should be:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})"""
# Shuffle ddp_input on each iteration
torch.manual_seed(13_37 + iteration )
UpperCAmelCase_ = ddp_input[torch.randperm(len(snake_case_ ) )]
def lowerCAmelCase_ ( snake_case_ : Optional[int]=False , snake_case_ : str=False ) -> List[str]:
'''simple docstring'''
UpperCAmelCase_ = Accelerator(
split_batches=snake_case_ , dispatch_batches=snake_case_ , gradient_accumulation_steps=2 )
# Test that context manager behaves properly
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = get_training_setup(snake_case_ )
for iteration, batch in enumerate(snake_case_ ):
UpperCAmelCase_ , UpperCAmelCase_ = batch.values()
# Gather the distributed inputs and targs for the base model
UpperCAmelCase_ , UpperCAmelCase_ = accelerator.gather((ddp_input, ddp_target) )
UpperCAmelCase_ , UpperCAmelCase_ = input.to(accelerator.device ), target.to(accelerator.device )
# Perform our initial ground truth step in non "DDP"
step_model(snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ )
# Do "gradient accumulation" (noop)
with accelerator.accumulate(snake_case_ ):
step_model(snake_case_ , snake_case_ , snake_case_ , snake_case_ )
# DDP model and model should only be in sync when not (iteration % 2 == 0)
for param, ddp_param in zip(model.parameters() , ddp_model.parameters() ):
if not param.requires_grad:
continue
if ((iteration + 1) % 2 == 0) or (iteration == len(snake_case_ ) - 1):
# Grads should be in sync
assert (
torch.allclose(param.grad , ddp_param.grad ) is True
), f"""Gradients not in sync when they should be at iteration {iteration}:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})"""
else:
# Grads should not be in sync
assert (
torch.allclose(param.grad , ddp_param.grad ) is False
), f"""Gradients in sync when they should not be at iteration {iteration}:\nModel grad ({param.grad}) == DDP grad ({ddp_param.grad})"""
# Shuffle ddp_input on each iteration
torch.manual_seed(13_37 + iteration )
UpperCAmelCase_ = ddp_input[torch.randperm(len(snake_case_ ) )]
GradientState._reset_state()
def lowerCAmelCase_ ( snake_case_ : Optional[Any]=False , snake_case_ : Tuple=False ) -> Union[str, Any]:
'''simple docstring'''
UpperCAmelCase_ = Accelerator(
split_batches=snake_case_ , dispatch_batches=snake_case_ , gradient_accumulation_steps=2 )
# Test that context manager behaves properly
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = get_training_setup(snake_case_ , snake_case_ )
for iteration, batch in enumerate(snake_case_ ):
UpperCAmelCase_ , UpperCAmelCase_ = batch.values()
# Gather the distributed inputs and targs for the base model
UpperCAmelCase_ , UpperCAmelCase_ = accelerator.gather((ddp_input, ddp_target) )
UpperCAmelCase_ , UpperCAmelCase_ = input.to(accelerator.device ), target.to(accelerator.device )
# Perform our initial ground truth step in non "DDP"
model.train()
ddp_model.train()
step_model(snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ )
opt.step()
if ((iteration + 1) % 2 == 0) or ((iteration + 1) == len(snake_case_ )):
if split_batches:
sched.step()
else:
for _ in range(accelerator.num_processes ):
sched.step()
opt.zero_grad()
# Perform gradient accumulation under wrapper
with accelerator.accumulate(snake_case_ ):
step_model(snake_case_ , snake_case_ , snake_case_ , snake_case_ )
ddp_opt.step()
ddp_sched.step()
ddp_opt.zero_grad()
# Learning rates should be the same
assert (
opt.param_groups[0]["lr"] == ddp_opt.param_groups[0]["lr"]
), f"""Learning rates found in each optimizer did not align\nopt: {opt.param_groups[0]["lr"]}\nDDP opt: {ddp_opt.param_groups[0]["lr"]}\n"""
UpperCAmelCase_ = (((iteration + 1) % 2) == 0) or ((iteration + 1) == len(snake_case_ ))
if accelerator.num_processes > 1:
check_model_parameters(snake_case_ , snake_case_ , snake_case_ , snake_case_ )
# Shuffle ddp_input on each iteration
torch.manual_seed(13_37 + iteration )
GradientState._reset_state()
def lowerCAmelCase_ ( ) -> List[Any]:
'''simple docstring'''
UpperCAmelCase_ = Accelerator()
UpperCAmelCase_ = RegressionDataset(length=80 )
UpperCAmelCase_ = DataLoader(snake_case_ , batch_size=16 )
UpperCAmelCase_ = RegressionDataset(length=96 )
UpperCAmelCase_ = DataLoader(snake_case_ , batch_size=16 )
UpperCAmelCase_ , UpperCAmelCase_ = accelerator.prepare(snake_case_ , snake_case_ )
assert accelerator.gradient_state.active_dataloader is None
for iteration, _ in enumerate(snake_case_ ):
assert id(accelerator.gradient_state.active_dataloader ) == id(snake_case_ )
if iteration < len(snake_case_ ) - 1:
assert not accelerator.gradient_state.end_of_dataloader
if iteration == 1:
for batch_num, _ in enumerate(snake_case_ ):
assert id(accelerator.gradient_state.active_dataloader ) == id(snake_case_ )
if batch_num < len(snake_case_ ) - 1:
assert not accelerator.gradient_state.end_of_dataloader
else:
assert accelerator.gradient_state.end_of_dataloader
else:
assert accelerator.gradient_state.end_of_dataloader
assert accelerator.gradient_state.active_dataloader is None
def lowerCAmelCase_ ( ) -> str:
'''simple docstring'''
UpperCAmelCase_ = Accelerator()
UpperCAmelCase_ = accelerator.state
if state.local_process_index == 0:
print("**Test `accumulate` gradient accumulation with dataloader break**" )
test_dataloader_break()
if state.distributed_type == DistributedType.NO:
if state.local_process_index == 0:
print("**Test NOOP `no_sync` context manager**" )
test_noop_sync(snake_case_ )
if state.distributed_type in (DistributedType.MULTI_GPU, DistributedType.MULTI_CPU):
if state.local_process_index == 0:
print("**Test Distributed `no_sync` context manager**" )
test_distributed_sync(snake_case_ )
if state.distributed_type == DistributedType.MULTI_GPU:
for split_batch in [True, False]:
for dispatch_batches in [True, False]:
if state.local_process_index == 0:
print(
"**Test `accumulate` gradient accumulation, " , f"""`split_batches={split_batch}` and `dispatch_batches={dispatch_batches}`**""" , )
test_gradient_accumulation(snake_case_ , snake_case_ )
# Currently will break on torch 2.0 +, need to investigate why
if is_torch_version("<" , "2.0" ) or state.distributed_type == DistributedType.NO:
if state.local_process_index == 0:
print(
"**Test `accumulate` gradient accumulation with optimizer and scheduler, " , "`split_batches=False`, `dispatch_batches=False`**" , )
test_gradient_accumulation_with_opt_and_scheduler()
if state.distributed_type == DistributedType.MULTI_GPU:
for split_batch in [True, False]:
for dispatch_batches in [True, False]:
if not split_batch and not dispatch_batches:
continue
if state.local_process_index == 0:
print(
"**Test `accumulate` gradient accumulation with optimizer and scheduler, " , f"""`split_batches={split_batch}` and `dispatch_batches={dispatch_batches}`**""" , )
test_gradient_accumulation_with_opt_and_scheduler(snake_case_ , snake_case_ )
def lowerCAmelCase_ ( snake_case_ : Dict ) -> int:
'''simple docstring'''
main()
if __name__ == "__main__":
main()
| 1 | 1 |
'''simple docstring'''
import shutil
import tempfile
import unittest
from transformers import ClapFeatureExtractor, ClapProcessor, RobertaTokenizer, RobertaTokenizerFast
from transformers.testing_utils import require_sentencepiece, require_torchaudio
from .test_feature_extraction_clap import floats_list
@require_torchaudio
@require_sentencepiece
class __A ( unittest.TestCase ):
def _lowercase (self : Dict ):
UpperCAmelCase_ = "laion/clap-htsat-unfused"
UpperCAmelCase_ = tempfile.mkdtemp()
def _lowercase (self : Dict , **__a : List[Any] ):
return RobertaTokenizer.from_pretrained(self.checkpoint , **__a )
def _lowercase (self : str , **__a : Union[str, Any] ):
return ClapFeatureExtractor.from_pretrained(self.checkpoint , **__a )
def _lowercase (self : str ):
shutil.rmtree(self.tmpdirname )
def _lowercase (self : Optional[Any] ):
UpperCAmelCase_ = self.get_tokenizer()
UpperCAmelCase_ = self.get_feature_extractor()
UpperCAmelCase_ = ClapProcessor(tokenizer=__a , feature_extractor=__a )
processor.save_pretrained(self.tmpdirname )
UpperCAmelCase_ = ClapProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() )
self.assertIsInstance(processor.tokenizer , __a )
self.assertEqual(processor.feature_extractor.to_json_string() , feature_extractor.to_json_string() )
self.assertIsInstance(processor.feature_extractor , __a )
def _lowercase (self : Optional[Any] ):
UpperCAmelCase_ = ClapProcessor(tokenizer=self.get_tokenizer() , feature_extractor=self.get_feature_extractor() )
processor.save_pretrained(self.tmpdirname )
UpperCAmelCase_ = self.get_tokenizer(bos_token="(BOS)" , eos_token="(EOS)" )
UpperCAmelCase_ = self.get_feature_extractor(do_normalize=__a , padding_value=1.0 )
UpperCAmelCase_ = ClapProcessor.from_pretrained(
self.tmpdirname , bos_token="(BOS)" , eos_token="(EOS)" , do_normalize=__a , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , __a )
self.assertEqual(processor.feature_extractor.to_json_string() , feature_extractor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.feature_extractor , __a )
def _lowercase (self : Optional[Any] ):
UpperCAmelCase_ = self.get_feature_extractor()
UpperCAmelCase_ = self.get_tokenizer()
UpperCAmelCase_ = ClapProcessor(tokenizer=__a , feature_extractor=__a )
UpperCAmelCase_ = floats_list((3, 1000) )
UpperCAmelCase_ = feature_extractor(__a , return_tensors="np" )
UpperCAmelCase_ = processor(audios=__a , return_tensors="np" )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2 )
def _lowercase (self : Optional[Any] ):
UpperCAmelCase_ = self.get_feature_extractor()
UpperCAmelCase_ = self.get_tokenizer()
UpperCAmelCase_ = ClapProcessor(tokenizer=__a , feature_extractor=__a )
UpperCAmelCase_ = "This is a test string"
UpperCAmelCase_ = processor(text=__a )
UpperCAmelCase_ = tokenizer(__a )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def _lowercase (self : int ):
UpperCAmelCase_ = self.get_feature_extractor()
UpperCAmelCase_ = self.get_tokenizer()
UpperCAmelCase_ = ClapProcessor(tokenizer=__a , feature_extractor=__a )
UpperCAmelCase_ = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
UpperCAmelCase_ = processor.batch_decode(__a )
UpperCAmelCase_ = tokenizer.batch_decode(__a )
self.assertListEqual(__a , __a )
def _lowercase (self : Union[str, Any] ):
UpperCAmelCase_ = self.get_feature_extractor()
UpperCAmelCase_ = self.get_tokenizer()
UpperCAmelCase_ = ClapProcessor(tokenizer=__a , feature_extractor=__a )
self.assertListEqual(
processor.model_input_names[2:] , feature_extractor.model_input_names , msg="`processor` and `feature_extractor` model input names do not match" , )
| 1 | '''simple docstring'''
def lowerCAmelCase_ ( snake_case_ : int , snake_case_ : int ) -> int:
'''simple docstring'''
return x if y == 0 else greatest_common_divisor(snake_case_ , x % y )
def lowerCAmelCase_ ( snake_case_ : int , snake_case_ : int ) -> int:
'''simple docstring'''
return (x * y) // greatest_common_divisor(snake_case_ , snake_case_ )
def lowerCAmelCase_ ( snake_case_ : int = 20 ) -> int:
'''simple docstring'''
UpperCAmelCase_ = 1
for i in range(1 , n + 1 ):
UpperCAmelCase_ = lcm(snake_case_ , snake_case_ )
return g
if __name__ == "__main__":
print(f"{solution() = }")
| 1 | 1 |
'''simple docstring'''
import argparse
import json
import torch
from diffusers import DDPMScheduler, LDMPipeline, UNetaDModel, VQModel
def lowerCAmelCase_ ( snake_case_ : Tuple , snake_case_ : List[Any]=1 ) -> Optional[int]:
'''simple docstring'''
if n_shave_prefix_segments >= 0:
return ".".join(path.split("." )[n_shave_prefix_segments:] )
else:
return ".".join(path.split("." )[:n_shave_prefix_segments] )
def lowerCAmelCase_ ( snake_case_ : Dict , snake_case_ : int=0 ) -> Dict:
'''simple docstring'''
UpperCAmelCase_ = []
for old_item in old_list:
UpperCAmelCase_ = old_item.replace("in_layers.0" , "norm1" )
UpperCAmelCase_ = new_item.replace("in_layers.2" , "conv1" )
UpperCAmelCase_ = new_item.replace("out_layers.0" , "norm2" )
UpperCAmelCase_ = new_item.replace("out_layers.3" , "conv2" )
UpperCAmelCase_ = new_item.replace("emb_layers.1" , "time_emb_proj" )
UpperCAmelCase_ = new_item.replace("skip_connection" , "conv_shortcut" )
UpperCAmelCase_ = shave_segments(snake_case_ , n_shave_prefix_segments=snake_case_ )
mapping.append({"old": old_item, "new": new_item} )
return mapping
def lowerCAmelCase_ ( snake_case_ : Optional[Any] , snake_case_ : Optional[int]=0 ) -> int:
'''simple docstring'''
UpperCAmelCase_ = []
for old_item in old_list:
UpperCAmelCase_ = old_item
UpperCAmelCase_ = new_item.replace("norm.weight" , "group_norm.weight" )
UpperCAmelCase_ = new_item.replace("norm.bias" , "group_norm.bias" )
UpperCAmelCase_ = new_item.replace("proj_out.weight" , "proj_attn.weight" )
UpperCAmelCase_ = new_item.replace("proj_out.bias" , "proj_attn.bias" )
UpperCAmelCase_ = shave_segments(snake_case_ , n_shave_prefix_segments=snake_case_ )
mapping.append({"old": old_item, "new": new_item} )
return mapping
def lowerCAmelCase_ ( snake_case_ : Any , snake_case_ : Any , snake_case_ : str , snake_case_ : List[Any]=None , snake_case_ : Optional[int]=None , snake_case_ : str=None ) -> List[Any]:
'''simple docstring'''
assert isinstance(snake_case_ , snake_case_ ), "Paths should be a list of dicts containing 'old' and 'new' keys."
# Splits the attention layers into three variables.
if attention_paths_to_split is not None:
for path, path_map in attention_paths_to_split.items():
UpperCAmelCase_ = old_checkpoint[path]
UpperCAmelCase_ = old_tensor.shape[0] // 3
UpperCAmelCase_ = (-1, channels) if len(old_tensor.shape ) == 3 else (-1)
UpperCAmelCase_ = old_tensor.shape[0] // config["num_head_channels"] // 3
UpperCAmelCase_ = old_tensor.reshape((num_heads, 3 * channels // num_heads) + old_tensor.shape[1:] )
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = old_tensor.split(channels // num_heads , dim=1 )
UpperCAmelCase_ = query.reshape(snake_case_ )
UpperCAmelCase_ = key.reshape(snake_case_ )
UpperCAmelCase_ = value.reshape(snake_case_ )
for path in paths:
UpperCAmelCase_ = path["new"]
# These have already been assigned
if attention_paths_to_split is not None and new_path in attention_paths_to_split:
continue
# Global renaming happens here
UpperCAmelCase_ = new_path.replace("middle_block.0" , "mid_block.resnets.0" )
UpperCAmelCase_ = new_path.replace("middle_block.1" , "mid_block.attentions.0" )
UpperCAmelCase_ = new_path.replace("middle_block.2" , "mid_block.resnets.1" )
if additional_replacements is not None:
for replacement in additional_replacements:
UpperCAmelCase_ = new_path.replace(replacement["old"] , replacement["new"] )
# proj_attn.weight has to be converted from conv 1D to linear
if "proj_attn.weight" in new_path:
UpperCAmelCase_ = old_checkpoint[path["old"]][:, :, 0]
else:
UpperCAmelCase_ = old_checkpoint[path["old"]]
def lowerCAmelCase_ ( snake_case_ : List[str] , snake_case_ : Optional[int] ) -> Dict:
'''simple docstring'''
UpperCAmelCase_ = {}
UpperCAmelCase_ = checkpoint["time_embed.0.weight"]
UpperCAmelCase_ = checkpoint["time_embed.0.bias"]
UpperCAmelCase_ = checkpoint["time_embed.2.weight"]
UpperCAmelCase_ = checkpoint["time_embed.2.bias"]
UpperCAmelCase_ = checkpoint["input_blocks.0.0.weight"]
UpperCAmelCase_ = checkpoint["input_blocks.0.0.bias"]
UpperCAmelCase_ = checkpoint["out.0.weight"]
UpperCAmelCase_ = checkpoint["out.0.bias"]
UpperCAmelCase_ = checkpoint["out.2.weight"]
UpperCAmelCase_ = checkpoint["out.2.bias"]
# Retrieves the keys for the input blocks only
UpperCAmelCase_ = len({".".join(layer.split("." )[:2] ) for layer in checkpoint if "input_blocks" in layer} )
UpperCAmelCase_ = {
layer_id: [key for key in checkpoint if f"""input_blocks.{layer_id}""" in key]
for layer_id in range(snake_case_ )
}
# Retrieves the keys for the middle blocks only
UpperCAmelCase_ = len({".".join(layer.split("." )[:2] ) for layer in checkpoint if "middle_block" in layer} )
UpperCAmelCase_ = {
layer_id: [key for key in checkpoint if f"""middle_block.{layer_id}""" in key]
for layer_id in range(snake_case_ )
}
# Retrieves the keys for the output blocks only
UpperCAmelCase_ = len({".".join(layer.split("." )[:2] ) for layer in checkpoint if "output_blocks" in layer} )
UpperCAmelCase_ = {
layer_id: [key for key in checkpoint if f"""output_blocks.{layer_id}""" in key]
for layer_id in range(snake_case_ )
}
for i in range(1 , snake_case_ ):
UpperCAmelCase_ = (i - 1) // (config["num_res_blocks"] + 1)
UpperCAmelCase_ = (i - 1) % (config["num_res_blocks"] + 1)
UpperCAmelCase_ = [key for key in input_blocks[i] if f"""input_blocks.{i}.0""" in key]
UpperCAmelCase_ = [key for key in input_blocks[i] if f"""input_blocks.{i}.1""" in key]
if f"""input_blocks.{i}.0.op.weight""" in checkpoint:
UpperCAmelCase_ = checkpoint[
f"""input_blocks.{i}.0.op.weight"""
]
UpperCAmelCase_ = checkpoint[
f"""input_blocks.{i}.0.op.bias"""
]
continue
UpperCAmelCase_ = renew_resnet_paths(snake_case_ )
UpperCAmelCase_ = {"old": f"""input_blocks.{i}.0""", "new": f"""down_blocks.{block_id}.resnets.{layer_in_block_id}"""}
UpperCAmelCase_ = {"old": "resnets.2.op", "new": "downsamplers.0.op"}
assign_to_checkpoint(
snake_case_ , snake_case_ , snake_case_ , additional_replacements=[meta_path, resnet_op] , config=snake_case_ )
if len(snake_case_ ):
UpperCAmelCase_ = renew_attention_paths(snake_case_ )
UpperCAmelCase_ = {
"old": f"""input_blocks.{i}.1""",
"new": f"""down_blocks.{block_id}.attentions.{layer_in_block_id}""",
}
UpperCAmelCase_ = {
f"""input_blocks.{i}.1.qkv.bias""": {
"key": f"""down_blocks.{block_id}.attentions.{layer_in_block_id}.key.bias""",
"query": f"""down_blocks.{block_id}.attentions.{layer_in_block_id}.query.bias""",
"value": f"""down_blocks.{block_id}.attentions.{layer_in_block_id}.value.bias""",
},
f"""input_blocks.{i}.1.qkv.weight""": {
"key": f"""down_blocks.{block_id}.attentions.{layer_in_block_id}.key.weight""",
"query": f"""down_blocks.{block_id}.attentions.{layer_in_block_id}.query.weight""",
"value": f"""down_blocks.{block_id}.attentions.{layer_in_block_id}.value.weight""",
},
}
assign_to_checkpoint(
snake_case_ , snake_case_ , snake_case_ , additional_replacements=[meta_path] , attention_paths_to_split=snake_case_ , config=snake_case_ , )
UpperCAmelCase_ = middle_blocks[0]
UpperCAmelCase_ = middle_blocks[1]
UpperCAmelCase_ = middle_blocks[2]
UpperCAmelCase_ = renew_resnet_paths(snake_case_ )
assign_to_checkpoint(snake_case_ , snake_case_ , snake_case_ , config=snake_case_ )
UpperCAmelCase_ = renew_resnet_paths(snake_case_ )
assign_to_checkpoint(snake_case_ , snake_case_ , snake_case_ , config=snake_case_ )
UpperCAmelCase_ = renew_attention_paths(snake_case_ )
UpperCAmelCase_ = {
"middle_block.1.qkv.bias": {
"key": "mid_block.attentions.0.key.bias",
"query": "mid_block.attentions.0.query.bias",
"value": "mid_block.attentions.0.value.bias",
},
"middle_block.1.qkv.weight": {
"key": "mid_block.attentions.0.key.weight",
"query": "mid_block.attentions.0.query.weight",
"value": "mid_block.attentions.0.value.weight",
},
}
assign_to_checkpoint(
snake_case_ , snake_case_ , snake_case_ , attention_paths_to_split=snake_case_ , config=snake_case_ )
for i in range(snake_case_ ):
UpperCAmelCase_ = i // (config["num_res_blocks"] + 1)
UpperCAmelCase_ = i % (config["num_res_blocks"] + 1)
UpperCAmelCase_ = [shave_segments(snake_case_ , 2 ) for name in output_blocks[i]]
UpperCAmelCase_ = {}
for layer in output_block_layers:
UpperCAmelCase_ , UpperCAmelCase_ = layer.split("." )[0], shave_segments(snake_case_ , 1 )
if layer_id in output_block_list:
output_block_list[layer_id].append(snake_case_ )
else:
UpperCAmelCase_ = [layer_name]
if len(snake_case_ ) > 1:
UpperCAmelCase_ = [key for key in output_blocks[i] if f"""output_blocks.{i}.0""" in key]
UpperCAmelCase_ = [key for key in output_blocks[i] if f"""output_blocks.{i}.1""" in key]
UpperCAmelCase_ = renew_resnet_paths(snake_case_ )
UpperCAmelCase_ = renew_resnet_paths(snake_case_ )
UpperCAmelCase_ = {"old": f"""output_blocks.{i}.0""", "new": f"""up_blocks.{block_id}.resnets.{layer_in_block_id}"""}
assign_to_checkpoint(snake_case_ , snake_case_ , snake_case_ , additional_replacements=[meta_path] , config=snake_case_ )
if ["conv.weight", "conv.bias"] in output_block_list.values():
UpperCAmelCase_ = list(output_block_list.values() ).index(["conv.weight", "conv.bias"] )
UpperCAmelCase_ = checkpoint[
f"""output_blocks.{i}.{index}.conv.weight"""
]
UpperCAmelCase_ = checkpoint[
f"""output_blocks.{i}.{index}.conv.bias"""
]
# Clear attentions as they have been attributed above.
if len(snake_case_ ) == 2:
UpperCAmelCase_ = []
if len(snake_case_ ):
UpperCAmelCase_ = renew_attention_paths(snake_case_ )
UpperCAmelCase_ = {
"old": f"""output_blocks.{i}.1""",
"new": f"""up_blocks.{block_id}.attentions.{layer_in_block_id}""",
}
UpperCAmelCase_ = {
f"""output_blocks.{i}.1.qkv.bias""": {
"key": f"""up_blocks.{block_id}.attentions.{layer_in_block_id}.key.bias""",
"query": f"""up_blocks.{block_id}.attentions.{layer_in_block_id}.query.bias""",
"value": f"""up_blocks.{block_id}.attentions.{layer_in_block_id}.value.bias""",
},
f"""output_blocks.{i}.1.qkv.weight""": {
"key": f"""up_blocks.{block_id}.attentions.{layer_in_block_id}.key.weight""",
"query": f"""up_blocks.{block_id}.attentions.{layer_in_block_id}.query.weight""",
"value": f"""up_blocks.{block_id}.attentions.{layer_in_block_id}.value.weight""",
},
}
assign_to_checkpoint(
snake_case_ , snake_case_ , snake_case_ , additional_replacements=[meta_path] , attention_paths_to_split=to_split if any("qkv" in key for key in attentions ) else None , config=snake_case_ , )
else:
UpperCAmelCase_ = renew_resnet_paths(snake_case_ , n_shave_prefix_segments=1 )
for path in resnet_0_paths:
UpperCAmelCase_ = ".".join(["output_blocks", str(snake_case_ ), path["old"]] )
UpperCAmelCase_ = ".".join(["up_blocks", str(snake_case_ ), "resnets", str(snake_case_ ), path["new"]] )
UpperCAmelCase_ = checkpoint[old_path]
return new_checkpoint
if __name__ == "__main__":
SCREAMING_SNAKE_CASE_: Tuple =argparse.ArgumentParser()
parser.add_argument(
'--checkpoint_path', default=None, type=str, required=True, help='Path to the checkpoint to convert.'
)
parser.add_argument(
'--config_file',
default=None,
type=str,
required=True,
help='The config json file corresponding to the architecture.',
)
parser.add_argument('--dump_path', default=None, type=str, required=True, help='Path to the output model.')
SCREAMING_SNAKE_CASE_: int =parser.parse_args()
SCREAMING_SNAKE_CASE_: List[str] =torch.load(args.checkpoint_path)
with open(args.config_file) as f:
SCREAMING_SNAKE_CASE_: Dict =json.loads(f.read())
SCREAMING_SNAKE_CASE_: int =convert_ldm_checkpoint(checkpoint, config)
if "ldm" in config:
del config["ldm"]
SCREAMING_SNAKE_CASE_: Union[str, Any] =UNetaDModel(**config)
model.load_state_dict(converted_checkpoint)
try:
SCREAMING_SNAKE_CASE_: Optional[Any] =DDPMScheduler.from_config('/'.join(args.checkpoint_path.split('/')[:-1]))
SCREAMING_SNAKE_CASE_: Optional[Any] =VQModel.from_pretrained('/'.join(args.checkpoint_path.split('/')[:-1]))
SCREAMING_SNAKE_CASE_: Any =LDMPipeline(unet=model, scheduler=scheduler, vae=vqvae)
pipe.save_pretrained(args.dump_path)
except: # noqa: E722
model.save_pretrained(args.dump_path)
| 1 | '''simple docstring'''
import os
from math import logaa
def lowerCAmelCase_ ( snake_case_ : str = "base_exp.txt" ) -> int:
'''simple docstring'''
UpperCAmelCase_ = 0
UpperCAmelCase_ = 0
for i, line in enumerate(open(os.path.join(os.path.dirname(snake_case_ ) , snake_case_ ) ) ):
UpperCAmelCase_ , UpperCAmelCase_ = list(map(snake_case_ , line.split("," ) ) )
if x * logaa(snake_case_ ) > largest:
UpperCAmelCase_ = x * logaa(snake_case_ )
UpperCAmelCase_ = i + 1
return result
if __name__ == "__main__":
print(solution())
| 1 | 1 |
'''simple docstring'''
import argparse
import logging
import os
import datasets
import tensorflow as tf
from transformers import AutoTokenizer
SCREAMING_SNAKE_CASE_: Union[str, Any] =logging.getLogger(__name__)
def lowerCAmelCase_ ( ) -> Union[str, Any]:
'''simple docstring'''
UpperCAmelCase_ = argparse.ArgumentParser(
description="Prepare TFRecord shards from pre-tokenized samples of the wikitext dataset." )
parser.add_argument(
"--dataset_name" , type=snake_case_ , default="wikitext" , help="Name of the training. Explore datasets at: hf.co/datasets." , )
parser.add_argument(
"--dataset_config" , type=snake_case_ , default="wikitext-103-raw-v1" , help="Configuration name of the dataset." )
parser.add_argument(
"--tokenizer_name_or_path" , type=snake_case_ , default="sayakpaul/unigram-tokenizer-wikitext" , help="Tokenizer identifier. Can be a local filepath or a Hub identifier." , )
parser.add_argument(
"--shard_size" , type=snake_case_ , default=10_00 , help="Number of entries to go in a single shard." , )
parser.add_argument("--split" , type=snake_case_ , default="train" , choices=["train", "test", "validation"] )
parser.add_argument(
"--limit" , default=snake_case_ , type=snake_case_ , help="Limit the number of shards (used for debugging)." , )
parser.add_argument(
"--max_length" , type=snake_case_ , default=5_12 , help="Maximum sequence length. For training on TPUs, it helps to have a maximum"
" sequence length that is a multiple of 8." , )
parser.add_argument(
"--output_dir" , default="tf-tpu" , type=snake_case_ , help="Output directory where the TFRecord shards will be saved. If the"
" path is appended with `gs://` ('gs://tf-tpu', for example) then the TFRecord"
" shards will be directly saved to a Google Cloud Storage bucket." , )
UpperCAmelCase_ = parser.parse_args()
return args
def lowerCAmelCase_ ( snake_case_ : Dict ) -> str:
'''simple docstring'''
def fn(snake_case_ : Optional[Any] ):
return tokenizer(examples["text"] )
return fn
def lowerCAmelCase_ ( snake_case_ : List[str] ) -> Union[str, Any]:
'''simple docstring'''
UpperCAmelCase_ = []
for i in range(len(tokenized_data["input_ids"] ) ):
UpperCAmelCase_ = {
"input_ids": tf.train.Feature(intaa_list=tf.train.IntaaList(value=tokenized_data["input_ids"][i] ) ),
"attention_mask": tf.train.Feature(
intaa_list=tf.train.IntaaList(value=tokenized_data["attention_mask"][i] ) ),
}
UpperCAmelCase_ = tf.train.Features(feature=snake_case_ )
UpperCAmelCase_ = tf.train.Example(features=snake_case_ )
UpperCAmelCase_ = example.SerializeToString()
records.append(snake_case_ )
return records
def lowerCAmelCase_ ( snake_case_ : List[str] ) -> str:
'''simple docstring'''
UpperCAmelCase_ = datasets.load_dataset(args.dataset_name , args.dataset_config , split=args.split )
if args.limit is not None:
UpperCAmelCase_ = min(len(snake_case_ ) , args.limit )
UpperCAmelCase_ = dataset.select(range(snake_case_ ) )
print(f"""Limiting the dataset to {args.limit} entries.""" )
UpperCAmelCase_ = AutoTokenizer.from_pretrained(args.tokenizer_name_or_path )
# Handle output directory creation.
# For serializing into a Google Cloud Storage Bucket, one needs to first
# create a bucket.
if "gs" not in args.output_dir:
if not os.path.exists(args.output_dir ):
os.makedirs(args.output_dir )
UpperCAmelCase_ = os.path.join(args.output_dir , args.split )
if not os.path.exists(snake_case_ ):
os.makedirs(snake_case_ )
else:
UpperCAmelCase_ = os.path.join(args.output_dir , args.split )
# Tokenize the whole dataset at once.
UpperCAmelCase_ = tokenize_function(snake_case_ )
UpperCAmelCase_ = dataset.map(snake_case_ , batched=snake_case_ , num_proc=4 , remove_columns=["text"] )
# We need to concatenate all our texts together, and then split the result
# into chunks of a fixed size, which we will call block_size. To do this, we
# will use the map method again, with the option batched=True. When we use batched=True,
# the function we pass to map() will be passed multiple inputs at once, allowing us
# to group them into more or fewer examples than we had in the input.
# This allows us to create our new fixed-length samples. The advantage of this
# method is that we don't lose a whole lot of content from the dataset compared to the
# case where we simply tokenize with a pre-defined max_length.
def group_texts(snake_case_ : str ):
# Concatenate all texts.
UpperCAmelCase_ = {k: sum(examples[k] , [] ) for k in examples.keys()}
UpperCAmelCase_ = len(concatenated_examples[list(examples.keys() )[0]] )
# We drop the small remainder, though you could add padding instead if the model supports it
# In this, as in all things, we advise you to follow your heart 🫀
UpperCAmelCase_ = (total_length // args.max_length) * args.max_length
# Split by chunks of max_len.
UpperCAmelCase_ = {
k: [t[i : i + args.max_length] for i in range(0 , snake_case_ , args.max_length )]
for k, t in concatenated_examples.items()
}
return result
UpperCAmelCase_ = dataset_tokenized.map(snake_case_ , batched=snake_case_ , batch_size=10_00 , num_proc=4 )
UpperCAmelCase_ = 0
UpperCAmelCase_ = 0
for shard in range(0 , len(snake_case_ ) , args.shard_size ):
UpperCAmelCase_ = grouped_dataset[shard : shard + args.shard_size]
UpperCAmelCase_ = len(dataset_snapshot["input_ids"] )
UpperCAmelCase_ = os.path.join(snake_case_ , f"""dataset-{shard_count}-{records_containing}.tfrecord""" )
UpperCAmelCase_ = get_serialized_examples(snake_case_ )
with tf.io.TFRecordWriter(snake_case_ ) as out_file:
for i in range(len(snake_case_ ) ):
UpperCAmelCase_ = serialized_examples[i]
out_file.write(snake_case_ )
print("Wrote file {} containing {} records".format(snake_case_ , snake_case_ ) )
shard_count += 1
total_records += records_containing
with open(f"""split-{args.split}-records-count.txt""" , "w" ) as f:
print(f"""Total {args.split} records: {total_records}""" , file=snake_case_ )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE_: List[str] =parse_args()
main(args)
| 1 | '''simple docstring'''
import argparse
import io
import requests
import torch
from omegaconf import OmegaConf
from diffusers import AutoencoderKL
from diffusers.pipelines.stable_diffusion.convert_from_ckpt import (
assign_to_checkpoint,
conv_attn_to_linear,
create_vae_diffusers_config,
renew_vae_attention_paths,
renew_vae_resnet_paths,
)
def lowerCAmelCase_ ( snake_case_ : List[Any] , snake_case_ : int ) -> Optional[int]:
'''simple docstring'''
UpperCAmelCase_ = checkpoint
UpperCAmelCase_ = {}
UpperCAmelCase_ = vae_state_dict["encoder.conv_in.weight"]
UpperCAmelCase_ = vae_state_dict["encoder.conv_in.bias"]
UpperCAmelCase_ = vae_state_dict["encoder.conv_out.weight"]
UpperCAmelCase_ = vae_state_dict["encoder.conv_out.bias"]
UpperCAmelCase_ = vae_state_dict["encoder.norm_out.weight"]
UpperCAmelCase_ = vae_state_dict["encoder.norm_out.bias"]
UpperCAmelCase_ = vae_state_dict["decoder.conv_in.weight"]
UpperCAmelCase_ = vae_state_dict["decoder.conv_in.bias"]
UpperCAmelCase_ = vae_state_dict["decoder.conv_out.weight"]
UpperCAmelCase_ = vae_state_dict["decoder.conv_out.bias"]
UpperCAmelCase_ = vae_state_dict["decoder.norm_out.weight"]
UpperCAmelCase_ = vae_state_dict["decoder.norm_out.bias"]
UpperCAmelCase_ = vae_state_dict["quant_conv.weight"]
UpperCAmelCase_ = vae_state_dict["quant_conv.bias"]
UpperCAmelCase_ = vae_state_dict["post_quant_conv.weight"]
UpperCAmelCase_ = vae_state_dict["post_quant_conv.bias"]
# Retrieves the keys for the encoder down blocks only
UpperCAmelCase_ = len({".".join(layer.split("." )[:3] ) for layer in vae_state_dict if "encoder.down" in layer} )
UpperCAmelCase_ = {
layer_id: [key for key in vae_state_dict if f"""down.{layer_id}""" in key] for layer_id in range(snake_case_ )
}
# Retrieves the keys for the decoder up blocks only
UpperCAmelCase_ = len({".".join(layer.split("." )[:3] ) for layer in vae_state_dict if "decoder.up" in layer} )
UpperCAmelCase_ = {
layer_id: [key for key in vae_state_dict if f"""up.{layer_id}""" in key] for layer_id in range(snake_case_ )
}
for i in range(snake_case_ ):
UpperCAmelCase_ = [key for key in down_blocks[i] if f"""down.{i}""" in key and f"""down.{i}.downsample""" not in key]
if f"""encoder.down.{i}.downsample.conv.weight""" in vae_state_dict:
UpperCAmelCase_ = vae_state_dict.pop(
f"""encoder.down.{i}.downsample.conv.weight""" )
UpperCAmelCase_ = vae_state_dict.pop(
f"""encoder.down.{i}.downsample.conv.bias""" )
UpperCAmelCase_ = renew_vae_resnet_paths(snake_case_ )
UpperCAmelCase_ = {"old": f"""down.{i}.block""", "new": f"""down_blocks.{i}.resnets"""}
assign_to_checkpoint(snake_case_ , snake_case_ , snake_case_ , additional_replacements=[meta_path] , config=snake_case_ )
UpperCAmelCase_ = [key for key in vae_state_dict if "encoder.mid.block" in key]
UpperCAmelCase_ = 2
for i in range(1 , num_mid_res_blocks + 1 ):
UpperCAmelCase_ = [key for key in mid_resnets if f"""encoder.mid.block_{i}""" in key]
UpperCAmelCase_ = renew_vae_resnet_paths(snake_case_ )
UpperCAmelCase_ = {"old": f"""mid.block_{i}""", "new": f"""mid_block.resnets.{i - 1}"""}
assign_to_checkpoint(snake_case_ , snake_case_ , snake_case_ , additional_replacements=[meta_path] , config=snake_case_ )
UpperCAmelCase_ = [key for key in vae_state_dict if "encoder.mid.attn" in key]
UpperCAmelCase_ = renew_vae_attention_paths(snake_case_ )
UpperCAmelCase_ = {"old": "mid.attn_1", "new": "mid_block.attentions.0"}
assign_to_checkpoint(snake_case_ , snake_case_ , snake_case_ , additional_replacements=[meta_path] , config=snake_case_ )
conv_attn_to_linear(snake_case_ )
for i in range(snake_case_ ):
UpperCAmelCase_ = num_up_blocks - 1 - i
UpperCAmelCase_ = [
key for key in up_blocks[block_id] if f"""up.{block_id}""" in key and f"""up.{block_id}.upsample""" not in key
]
if f"""decoder.up.{block_id}.upsample.conv.weight""" in vae_state_dict:
UpperCAmelCase_ = vae_state_dict[
f"""decoder.up.{block_id}.upsample.conv.weight"""
]
UpperCAmelCase_ = vae_state_dict[
f"""decoder.up.{block_id}.upsample.conv.bias"""
]
UpperCAmelCase_ = renew_vae_resnet_paths(snake_case_ )
UpperCAmelCase_ = {"old": f"""up.{block_id}.block""", "new": f"""up_blocks.{i}.resnets"""}
assign_to_checkpoint(snake_case_ , snake_case_ , snake_case_ , additional_replacements=[meta_path] , config=snake_case_ )
UpperCAmelCase_ = [key for key in vae_state_dict if "decoder.mid.block" in key]
UpperCAmelCase_ = 2
for i in range(1 , num_mid_res_blocks + 1 ):
UpperCAmelCase_ = [key for key in mid_resnets if f"""decoder.mid.block_{i}""" in key]
UpperCAmelCase_ = renew_vae_resnet_paths(snake_case_ )
UpperCAmelCase_ = {"old": f"""mid.block_{i}""", "new": f"""mid_block.resnets.{i - 1}"""}
assign_to_checkpoint(snake_case_ , snake_case_ , snake_case_ , additional_replacements=[meta_path] , config=snake_case_ )
UpperCAmelCase_ = [key for key in vae_state_dict if "decoder.mid.attn" in key]
UpperCAmelCase_ = renew_vae_attention_paths(snake_case_ )
UpperCAmelCase_ = {"old": "mid.attn_1", "new": "mid_block.attentions.0"}
assign_to_checkpoint(snake_case_ , snake_case_ , snake_case_ , additional_replacements=[meta_path] , config=snake_case_ )
conv_attn_to_linear(snake_case_ )
return new_checkpoint
def lowerCAmelCase_ ( snake_case_ : str , snake_case_ : str , ) -> Dict:
'''simple docstring'''
UpperCAmelCase_ = requests.get(
" https://raw.githubusercontent.com/CompVis/stable-diffusion/main/configs/stable-diffusion/v1-inference.yaml" )
UpperCAmelCase_ = io.BytesIO(r.content )
UpperCAmelCase_ = OmegaConf.load(snake_case_ )
UpperCAmelCase_ = 5_12
UpperCAmelCase_ = "cuda" if torch.cuda.is_available() else "cpu"
if checkpoint_path.endswith("safetensors" ):
from safetensors import safe_open
UpperCAmelCase_ = {}
with safe_open(snake_case_ , framework="pt" , device="cpu" ) as f:
for key in f.keys():
UpperCAmelCase_ = f.get_tensor(snake_case_ )
else:
UpperCAmelCase_ = torch.load(snake_case_ , map_location=snake_case_ )["state_dict"]
# Convert the VAE model.
UpperCAmelCase_ = create_vae_diffusers_config(snake_case_ , image_size=snake_case_ )
UpperCAmelCase_ = custom_convert_ldm_vae_checkpoint(snake_case_ , snake_case_ )
UpperCAmelCase_ = AutoencoderKL(**snake_case_ )
vae.load_state_dict(snake_case_ )
vae.save_pretrained(snake_case_ )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE_: Optional[int] =argparse.ArgumentParser()
parser.add_argument('--vae_pt_path', default=None, type=str, required=True, help='Path to the VAE.pt to convert.')
parser.add_argument('--dump_path', default=None, type=str, required=True, help='Path to the VAE.pt to convert.')
SCREAMING_SNAKE_CASE_: str =parser.parse_args()
vae_pt_to_vae_diffuser(args.vae_pt_path, args.dump_path)
| 1 | 1 |
'''simple docstring'''
from __future__ import annotations
import string
from itertools import cycle, product
from pathlib import Path
SCREAMING_SNAKE_CASE_: str =(
string.ascii_letters + string.digits + string.punctuation + string.whitespace
)
SCREAMING_SNAKE_CASE_: list[int] =[ord(letter) for letter in string.ascii_lowercase]
SCREAMING_SNAKE_CASE_: set[int] ={ord(char) for char in VALID_CHARS}
SCREAMING_SNAKE_CASE_: list[str] =["the", "be", "to", "of", "and", "in", "that", "have"]
def lowerCAmelCase_ ( snake_case_ : list[int] , snake_case_ : tuple[int, ...] ) -> str | None:
'''simple docstring'''
UpperCAmelCase_ = ""
UpperCAmelCase_ = 42
UpperCAmelCase_ = 42
UpperCAmelCase_ = 42
for keychar, cipherchar in zip(cycle(snake_case_ ) , snake_case_ ):
UpperCAmelCase_ = cipherchar ^ keychar
if decodedchar not in VALID_INTS:
return None
decoded += chr(snake_case_ )
return decoded
def lowerCAmelCase_ ( snake_case_ : list[int] ) -> list[str]:
'''simple docstring'''
UpperCAmelCase_ = []
for key in product(snake_case_ , repeat=3 ):
UpperCAmelCase_ = try_key(snake_case_ , snake_case_ )
if encoded is not None:
possibles.append(snake_case_ )
return possibles
def lowerCAmelCase_ ( snake_case_ : list[str] , snake_case_ : str ) -> list[str]:
'''simple docstring'''
return [possible for possible in possibles if common_word in possible.lower()]
def lowerCAmelCase_ ( snake_case_ : str = "p059_cipher.txt" ) -> int:
'''simple docstring'''
UpperCAmelCase_ = 42
UpperCAmelCase_ = 42
UpperCAmelCase_ = 42
UpperCAmelCase_ = 42
UpperCAmelCase_ = Path(snake_case_ ).parent.joinpath(snake_case_ ).read_text(encoding="utf-8" )
UpperCAmelCase_ = [int(snake_case_ ) for number in data.strip().split("," )]
UpperCAmelCase_ = filter_valid_chars(snake_case_ )
for common_word in COMMON_WORDS:
UpperCAmelCase_ = filter_common_word(snake_case_ , snake_case_ )
if len(snake_case_ ) == 1:
break
UpperCAmelCase_ = possibles[0]
return sum(ord(snake_case_ ) for char in decoded_text )
if __name__ == "__main__":
print(f"{solution() = }")
| 1 | '''simple docstring'''
import inspect
import unittest
import numpy as np
from transformers import ViTConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor
if is_flax_available():
import jax
from transformers.models.vit.modeling_flax_vit import FlaxViTForImageClassification, FlaxViTModel
class __A ( unittest.TestCase ):
def __init__(self : str , __a : Optional[Any] , __a : Optional[Any]=13 , __a : int=30 , __a : Union[str, Any]=2 , __a : Dict=3 , __a : List[Any]=True , __a : Optional[Any]=True , __a : List[Any]=32 , __a : Any=5 , __a : str=4 , __a : Optional[int]=37 , __a : Optional[int]="gelu" , __a : List[str]=0.1 , __a : Tuple=0.1 , __a : List[str]=10 , __a : Optional[int]=0.02 , ):
UpperCAmelCase_ = parent
UpperCAmelCase_ = batch_size
UpperCAmelCase_ = image_size
UpperCAmelCase_ = patch_size
UpperCAmelCase_ = num_channels
UpperCAmelCase_ = is_training
UpperCAmelCase_ = use_labels
UpperCAmelCase_ = hidden_size
UpperCAmelCase_ = num_hidden_layers
UpperCAmelCase_ = num_attention_heads
UpperCAmelCase_ = intermediate_size
UpperCAmelCase_ = hidden_act
UpperCAmelCase_ = hidden_dropout_prob
UpperCAmelCase_ = attention_probs_dropout_prob
UpperCAmelCase_ = type_sequence_label_size
UpperCAmelCase_ = initializer_range
# in ViT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
UpperCAmelCase_ = (image_size // patch_size) ** 2
UpperCAmelCase_ = num_patches + 1
def _lowercase (self : Any ):
UpperCAmelCase_ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCAmelCase_ = ViTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=__a , initializer_range=self.initializer_range , )
return config, pixel_values
def _lowercase (self : Dict , __a : Any , __a : List[Any] ):
UpperCAmelCase_ = FlaxViTModel(config=__a )
UpperCAmelCase_ = model(__a )
# expected sequence length = num_patches + 1 (we add 1 for the [CLS] token)
UpperCAmelCase_ = (self.image_size, self.image_size)
UpperCAmelCase_ = (self.patch_size, self.patch_size)
UpperCAmelCase_ = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, num_patches + 1, self.hidden_size) )
def _lowercase (self : Tuple , __a : str , __a : Any ):
UpperCAmelCase_ = self.type_sequence_label_size
UpperCAmelCase_ = FlaxViTForImageClassification(config=__a )
UpperCAmelCase_ = model(__a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
UpperCAmelCase_ = 1
UpperCAmelCase_ = FlaxViTForImageClassification(__a )
UpperCAmelCase_ = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
UpperCAmelCase_ = model(__a )
def _lowercase (self : Optional[Any] ):
UpperCAmelCase_ = self.prepare_config_and_inputs()
(
(
UpperCAmelCase_
) , (
UpperCAmelCase_
) ,
) = config_and_inputs
UpperCAmelCase_ = {"pixel_values": pixel_values}
return config, inputs_dict
@require_flax
class __A ( UpperCamelCase__ , unittest.TestCase ):
a__ : Tuple = (FlaxViTModel, FlaxViTForImageClassification) if is_flax_available() else ()
def _lowercase (self : Any ):
UpperCAmelCase_ = FlaxViTModelTester(self )
UpperCAmelCase_ = ConfigTester(self , config_class=__a , has_text_modality=__a , hidden_size=37 )
def _lowercase (self : Tuple ):
self.config_tester.run_common_tests()
def _lowercase (self : str ):
UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__a )
def _lowercase (self : str ):
UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__a )
def _lowercase (self : Tuple ):
UpperCAmelCase_ , UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase_ = model_class(__a )
UpperCAmelCase_ = inspect.signature(model.__call__ )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCAmelCase_ = [*signature.parameters.keys()]
UpperCAmelCase_ = ["pixel_values"]
self.assertListEqual(arg_names[:1] , __a )
def _lowercase (self : Optional[Any] ):
UpperCAmelCase_ , UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
UpperCAmelCase_ = self._prepare_for_class(__a , __a )
UpperCAmelCase_ = model_class(__a )
@jax.jit
def model_jitted(__a : Tuple , **__a : List[Any] ):
return model(pixel_values=__a , **__a )
with self.subTest("JIT Enabled" ):
UpperCAmelCase_ = model_jitted(**__a ).to_tuple()
with self.subTest("JIT Disabled" ):
with jax.disable_jit():
UpperCAmelCase_ = model_jitted(**__a ).to_tuple()
self.assertEqual(len(__a ) , len(__a ) )
for jitted_output, output in zip(__a , __a ):
self.assertEqual(jitted_output.shape , output.shape )
@slow
def _lowercase (self : Tuple ):
for model_class_name in self.all_model_classes:
UpperCAmelCase_ = model_class_name.from_pretrained("google/vit-base-patch16-224" )
UpperCAmelCase_ = model(np.ones((1, 3, 224, 224) ) )
self.assertIsNotNone(__a )
| 1 | 1 |
'''simple docstring'''
import argparse
import torch
from huggingface_hub import hf_hub_download
from transformers import AutoTokenizer, RobertaPreLayerNormConfig, RobertaPreLayerNormForMaskedLM
from transformers.utils import logging
logging.set_verbosity_info()
SCREAMING_SNAKE_CASE_: Tuple =logging.get_logger(__name__)
def lowerCAmelCase_ ( snake_case_ : str , snake_case_ : str ) -> Optional[Any]:
'''simple docstring'''
UpperCAmelCase_ = RobertaPreLayerNormConfig.from_pretrained(
snake_case_ , architectures=["RobertaPreLayerNormForMaskedLM"] )
# convert state_dict
UpperCAmelCase_ = torch.load(hf_hub_download(repo_id=snake_case_ , filename="pytorch_model.bin" ) )
UpperCAmelCase_ = {}
for tensor_key, tensor_value in original_state_dict.items():
# The transformer implementation gives the model a unique name, rather than overwiriting 'roberta'
if tensor_key.startswith("roberta." ):
UpperCAmelCase_ = "roberta_prelayernorm." + tensor_key[len("roberta." ) :]
# The original implementation contains weights which are not used, remove them from the state_dict
if tensor_key.endswith(".self.LayerNorm.weight" ) or tensor_key.endswith(".self.LayerNorm.bias" ):
continue
UpperCAmelCase_ = tensor_value
UpperCAmelCase_ = RobertaPreLayerNormForMaskedLM.from_pretrained(
pretrained_model_name_or_path=snake_case_ , config=snake_case_ , state_dict=snake_case_ )
model.save_pretrained(snake_case_ )
# convert tokenizer
UpperCAmelCase_ = AutoTokenizer.from_pretrained(snake_case_ )
tokenizer.save_pretrained(snake_case_ )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE_: str =argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--checkpoint-repo',
default=None,
type=str,
required=True,
help='Path the official PyTorch dump, e.g. \'andreasmadsen/efficient_mlm_m0.40\'.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
SCREAMING_SNAKE_CASE_: Optional[int] =parser.parse_args()
convert_roberta_prelayernorm_checkpoint_to_pytorch(args.checkpoint_repo, args.pytorch_dump_folder_path)
| 1 | '''simple docstring'''
import os
import shutil
import tempfile
from unittest import TestCase
from unittest.mock import patch
import numpy as np
from datasets import Dataset
from transformers.models.realm.configuration_realm import RealmConfig
from transformers.models.realm.retrieval_realm import _REALM_BLOCK_RECORDS_FILENAME, RealmRetriever
from transformers.models.realm.tokenization_realm import VOCAB_FILES_NAMES, RealmTokenizer
class __A ( UpperCamelCase__ ):
def _lowercase (self : Optional[Any] ):
UpperCAmelCase_ = tempfile.mkdtemp()
UpperCAmelCase_ = 5
# Realm tok
UpperCAmelCase_ = [
"[UNK]",
"[CLS]",
"[SEP]",
"[PAD]",
"[MASK]",
"test",
"question",
"this",
"is",
"the",
"first",
"second",
"third",
"fourth",
"fifth",
"record",
"want",
"##want",
"##ed",
"wa",
"un",
"runn",
"##ing",
",",
"low",
"lowest",
]
UpperCAmelCase_ = os.path.join(self.tmpdirname , "realm_tokenizer" )
os.makedirs(__a , exist_ok=__a )
UpperCAmelCase_ = os.path.join(__a , VOCAB_FILES_NAMES["vocab_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in vocab_tokens] ) )
UpperCAmelCase_ = os.path.join(self.tmpdirname , "realm_block_records" )
os.makedirs(__a , exist_ok=__a )
def _lowercase (self : Optional[Any] ):
return RealmTokenizer.from_pretrained(os.path.join(self.tmpdirname , "realm_tokenizer" ) )
def _lowercase (self : Any ):
shutil.rmtree(self.tmpdirname )
def _lowercase (self : List[Any] ):
UpperCAmelCase_ = RealmConfig(num_block_records=self.num_block_records )
return config
def _lowercase (self : List[str] ):
UpperCAmelCase_ = Dataset.from_dict(
{
"id": ["0", "1"],
"question": ["foo", "bar"],
"answers": [["Foo", "Bar"], ["Bar"]],
} )
return dataset
def _lowercase (self : Any ):
UpperCAmelCase_ = np.array(
[
B"This is the first record",
B"This is the second record",
B"This is the third record",
B"This is the fourth record",
B"This is the fifth record",
B"This is a longer longer longer record",
] , dtype=__a , )
return block_records
def _lowercase (self : Union[str, Any] ):
UpperCAmelCase_ = RealmRetriever(
block_records=self.get_dummy_block_records() , tokenizer=self.get_tokenizer() , )
return retriever
def _lowercase (self : int ):
UpperCAmelCase_ = self.get_config()
UpperCAmelCase_ = self.get_dummy_retriever()
UpperCAmelCase_ = retriever.tokenizer
UpperCAmelCase_ = np.array([0, 3] , dtype="long" )
UpperCAmelCase_ = tokenizer(["Test question"] ).input_ids
UpperCAmelCase_ = tokenizer(
["the fourth"] , add_special_tokens=__a , return_token_type_ids=__a , return_attention_mask=__a , ).input_ids
UpperCAmelCase_ = config.reader_seq_len
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = retriever(
__a , __a , answer_ids=__a , max_length=__a , return_tensors="np" )
self.assertEqual(len(__a ) , 2 )
self.assertEqual(len(__a ) , 2 )
self.assertEqual(len(__a ) , 2 )
self.assertEqual(concat_inputs.input_ids.shape , (2, 10) )
self.assertEqual(concat_inputs.attention_mask.shape , (2, 10) )
self.assertEqual(concat_inputs.token_type_ids.shape , (2, 10) )
self.assertEqual(concat_inputs.special_tokens_mask.shape , (2, 10) )
self.assertEqual(
tokenizer.convert_ids_to_tokens(concat_inputs.input_ids[0] ) , ["[CLS]", "test", "question", "[SEP]", "this", "is", "the", "first", "record", "[SEP]"] , )
self.assertEqual(
tokenizer.convert_ids_to_tokens(concat_inputs.input_ids[1] ) , ["[CLS]", "test", "question", "[SEP]", "this", "is", "the", "fourth", "record", "[SEP]"] , )
def _lowercase (self : List[Any] ):
UpperCAmelCase_ = self.get_config()
UpperCAmelCase_ = self.get_dummy_retriever()
UpperCAmelCase_ = retriever.tokenizer
UpperCAmelCase_ = np.array([0, 3, 5] , dtype="long" )
UpperCAmelCase_ = tokenizer(["Test question"] ).input_ids
UpperCAmelCase_ = tokenizer(
["the fourth", "longer longer"] , add_special_tokens=__a , return_token_type_ids=__a , return_attention_mask=__a , ).input_ids
UpperCAmelCase_ = config.reader_seq_len
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = retriever(
__a , __a , answer_ids=__a , max_length=__a , return_tensors="np" )
self.assertEqual([False, True, True] , __a )
self.assertEqual([[-1, -1, -1], [6, -1, -1], [6, 7, 8]] , __a )
self.assertEqual([[-1, -1, -1], [7, -1, -1], [7, 8, 9]] , __a )
def _lowercase (self : Optional[Any] ):
UpperCAmelCase_ = self.get_dummy_retriever()
retriever.save_pretrained(os.path.join(self.tmpdirname , "realm_block_records" ) )
# Test local path
UpperCAmelCase_ = retriever.from_pretrained(os.path.join(self.tmpdirname , "realm_block_records" ) )
self.assertEqual(retriever.block_records[0] , B"This is the first record" )
# Test mocked remote path
with patch("transformers.models.realm.retrieval_realm.hf_hub_download" ) as mock_hf_hub_download:
UpperCAmelCase_ = os.path.join(
os.path.join(self.tmpdirname , "realm_block_records" ) , _REALM_BLOCK_RECORDS_FILENAME )
UpperCAmelCase_ = RealmRetriever.from_pretrained("google/realm-cc-news-pretrained-openqa" )
self.assertEqual(retriever.block_records[0] , B"This is the first record" )
| 1 | 1 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
SCREAMING_SNAKE_CASE_: Union[str, Any] ={
'albert-base-v1': 'https://huggingface.co/albert-base-v1/resolve/main/config.json',
'albert-large-v1': 'https://huggingface.co/albert-large-v1/resolve/main/config.json',
'albert-xlarge-v1': 'https://huggingface.co/albert-xlarge-v1/resolve/main/config.json',
'albert-xxlarge-v1': 'https://huggingface.co/albert-xxlarge-v1/resolve/main/config.json',
'albert-base-v2': 'https://huggingface.co/albert-base-v2/resolve/main/config.json',
'albert-large-v2': 'https://huggingface.co/albert-large-v2/resolve/main/config.json',
'albert-xlarge-v2': 'https://huggingface.co/albert-xlarge-v2/resolve/main/config.json',
'albert-xxlarge-v2': 'https://huggingface.co/albert-xxlarge-v2/resolve/main/config.json',
}
class __A ( UpperCamelCase__ ):
a__ : Optional[int] = """albert"""
def __init__(self : Optional[int] , __a : Optional[int]=30000 , __a : Optional[Any]=128 , __a : Any=4096 , __a : Tuple=12 , __a : int=1 , __a : str=64 , __a : Optional[int]=16384 , __a : Optional[Any]=1 , __a : Dict="gelu_new" , __a : Dict=0 , __a : Dict=0 , __a : int=512 , __a : Optional[Any]=2 , __a : Tuple=0.02 , __a : Any=1E-12 , __a : List[Any]=0.1 , __a : Optional[Any]="absolute" , __a : Union[str, Any]=0 , __a : Union[str, Any]=2 , __a : str=3 , **__a : Union[str, Any] , ):
super().__init__(pad_token_id=__a , bos_token_id=__a , eos_token_id=__a , **__a )
UpperCAmelCase_ = vocab_size
UpperCAmelCase_ = embedding_size
UpperCAmelCase_ = hidden_size
UpperCAmelCase_ = num_hidden_layers
UpperCAmelCase_ = num_hidden_groups
UpperCAmelCase_ = num_attention_heads
UpperCAmelCase_ = inner_group_num
UpperCAmelCase_ = hidden_act
UpperCAmelCase_ = intermediate_size
UpperCAmelCase_ = hidden_dropout_prob
UpperCAmelCase_ = attention_probs_dropout_prob
UpperCAmelCase_ = max_position_embeddings
UpperCAmelCase_ = type_vocab_size
UpperCAmelCase_ = initializer_range
UpperCAmelCase_ = layer_norm_eps
UpperCAmelCase_ = classifier_dropout_prob
UpperCAmelCase_ = position_embedding_type
class __A ( UpperCamelCase__ ):
@property
def _lowercase (self : Tuple ):
if self.task == "multiple-choice":
UpperCAmelCase_ = {0: "batch", 1: "choice", 2: "sequence"}
else:
UpperCAmelCase_ = {0: "batch", 1: "sequence"}
return OrderedDict(
[
("input_ids", dynamic_axis),
("attention_mask", dynamic_axis),
("token_type_ids", dynamic_axis),
] )
| 1 | '''simple docstring'''
from math import log
from scipy.constants import Boltzmann, physical_constants
SCREAMING_SNAKE_CASE_: Optional[int] =3_00 # TEMPERATURE (unit = K)
def lowerCAmelCase_ ( snake_case_ : float , snake_case_ : float , snake_case_ : float , ) -> float:
'''simple docstring'''
if donor_conc <= 0:
raise ValueError("Donor concentration should be positive" )
elif acceptor_conc <= 0:
raise ValueError("Acceptor concentration should be positive" )
elif intrinsic_conc <= 0:
raise ValueError("Intrinsic concentration should be positive" )
elif donor_conc <= intrinsic_conc:
raise ValueError(
"Donor concentration should be greater than intrinsic concentration" )
elif acceptor_conc <= intrinsic_conc:
raise ValueError(
"Acceptor concentration should be greater than intrinsic concentration" )
else:
return (
Boltzmann
* T
* log((donor_conc * acceptor_conc) / intrinsic_conc**2 )
/ physical_constants["electron volt"][0]
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 1 | 1 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
SCREAMING_SNAKE_CASE_: List[Any] =logging.get_logger(__name__)
SCREAMING_SNAKE_CASE_: List[str] ={
'funnel-transformer/small': 'https://huggingface.co/funnel-transformer/small/resolve/main/config.json',
'funnel-transformer/small-base': 'https://huggingface.co/funnel-transformer/small-base/resolve/main/config.json',
'funnel-transformer/medium': 'https://huggingface.co/funnel-transformer/medium/resolve/main/config.json',
'funnel-transformer/medium-base': 'https://huggingface.co/funnel-transformer/medium-base/resolve/main/config.json',
'funnel-transformer/intermediate': (
'https://huggingface.co/funnel-transformer/intermediate/resolve/main/config.json'
),
'funnel-transformer/intermediate-base': (
'https://huggingface.co/funnel-transformer/intermediate-base/resolve/main/config.json'
),
'funnel-transformer/large': 'https://huggingface.co/funnel-transformer/large/resolve/main/config.json',
'funnel-transformer/large-base': 'https://huggingface.co/funnel-transformer/large-base/resolve/main/config.json',
'funnel-transformer/xlarge': 'https://huggingface.co/funnel-transformer/xlarge/resolve/main/config.json',
'funnel-transformer/xlarge-base': 'https://huggingface.co/funnel-transformer/xlarge-base/resolve/main/config.json',
}
class __A ( UpperCamelCase__ ):
a__ : Optional[int] = """funnel"""
a__ : List[Any] = {
"""hidden_size""": """d_model""",
"""num_attention_heads""": """n_head""",
}
def __init__(self : Tuple , __a : List[Any]=30522 , __a : List[Any]=[4, 4, 4] , __a : str=None , __a : Optional[Any]=2 , __a : List[str]=768 , __a : List[Any]=12 , __a : Dict=64 , __a : Optional[int]=3072 , __a : Optional[Any]="gelu_new" , __a : List[str]=0.1 , __a : List[str]=0.1 , __a : Tuple=0.0 , __a : Dict=0.1 , __a : int=None , __a : Tuple=1E-9 , __a : str="mean" , __a : Optional[Any]="relative_shift" , __a : Union[str, Any]=True , __a : Union[str, Any]=True , __a : List[str]=True , **__a : List[Any] , ):
UpperCAmelCase_ = vocab_size
UpperCAmelCase_ = block_sizes
UpperCAmelCase_ = [1] * len(__a ) if block_repeats is None else block_repeats
assert len(__a ) == len(
self.block_repeats ), "`block_sizes` and `block_repeats` should have the same length."
UpperCAmelCase_ = num_decoder_layers
UpperCAmelCase_ = d_model
UpperCAmelCase_ = n_head
UpperCAmelCase_ = d_head
UpperCAmelCase_ = d_inner
UpperCAmelCase_ = hidden_act
UpperCAmelCase_ = hidden_dropout
UpperCAmelCase_ = attention_dropout
UpperCAmelCase_ = activation_dropout
UpperCAmelCase_ = initializer_range
UpperCAmelCase_ = initializer_std
UpperCAmelCase_ = layer_norm_eps
assert pooling_type in [
"mean",
"max",
], f"""Got {pooling_type} for `pooling_type` but only 'mean' and 'max' are supported."""
UpperCAmelCase_ = pooling_type
assert attention_type in [
"relative_shift",
"factorized",
], f"""Got {attention_type} for `attention_type` but only 'relative_shift' and 'factorized' are supported."""
UpperCAmelCase_ = attention_type
UpperCAmelCase_ = separate_cls
UpperCAmelCase_ = truncate_seq
UpperCAmelCase_ = pool_q_only
super().__init__(**__a )
@property
def _lowercase (self : Optional[Any] ):
return sum(self.block_sizes )
@num_hidden_layers.setter
def _lowercase (self : Optional[Any] , __a : Union[str, Any] ):
raise NotImplementedError(
"This model does not support the setting of `num_hidden_layers`. Please set `block_sizes`." )
@property
def _lowercase (self : Any ):
return len(self.block_sizes )
@num_blocks.setter
def _lowercase (self : int , __a : Optional[int] ):
raise NotImplementedError("This model does not support the setting of `num_blocks`. Please set `block_sizes`." )
| 1 | '''simple docstring'''
import math
def lowerCAmelCase_ ( ) -> None:
'''simple docstring'''
UpperCAmelCase_ = input("Enter message: " )
UpperCAmelCase_ = int(input(f"""Enter key [2-{len(snake_case_ ) - 1}]: """ ) )
UpperCAmelCase_ = input("Encryption/Decryption [e/d]: " )
if mode.lower().startswith("e" ):
UpperCAmelCase_ = encrypt_message(snake_case_ , snake_case_ )
elif mode.lower().startswith("d" ):
UpperCAmelCase_ = decrypt_message(snake_case_ , snake_case_ )
# Append pipe symbol (vertical bar) to identify spaces at the end.
print(f"""Output:\n{text + "|"}""" )
def lowerCAmelCase_ ( snake_case_ : int , snake_case_ : str ) -> str:
'''simple docstring'''
UpperCAmelCase_ = [""] * key
for col in range(snake_case_ ):
UpperCAmelCase_ = col
while pointer < len(snake_case_ ):
cipher_text[col] += message[pointer]
pointer += key
return "".join(snake_case_ )
def lowerCAmelCase_ ( snake_case_ : int , snake_case_ : str ) -> str:
'''simple docstring'''
UpperCAmelCase_ = math.ceil(len(snake_case_ ) / key )
UpperCAmelCase_ = key
UpperCAmelCase_ = (num_cols * num_rows) - len(snake_case_ )
UpperCAmelCase_ = [""] * num_cols
UpperCAmelCase_ = 0
UpperCAmelCase_ = 0
for symbol in message:
plain_text[col] += symbol
col += 1
if (
(col == num_cols)
or (col == num_cols - 1)
and (row >= num_rows - num_shaded_boxes)
):
UpperCAmelCase_ = 0
row += 1
return "".join(snake_case_ )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 1 | 1 |
'''simple docstring'''
from math import log
from scipy.constants import Boltzmann, physical_constants
SCREAMING_SNAKE_CASE_: Optional[int] =3_00 # TEMPERATURE (unit = K)
def lowerCAmelCase_ ( snake_case_ : float , snake_case_ : float , snake_case_ : float , ) -> float:
'''simple docstring'''
if donor_conc <= 0:
raise ValueError("Donor concentration should be positive" )
elif acceptor_conc <= 0:
raise ValueError("Acceptor concentration should be positive" )
elif intrinsic_conc <= 0:
raise ValueError("Intrinsic concentration should be positive" )
elif donor_conc <= intrinsic_conc:
raise ValueError(
"Donor concentration should be greater than intrinsic concentration" )
elif acceptor_conc <= intrinsic_conc:
raise ValueError(
"Acceptor concentration should be greater than intrinsic concentration" )
else:
return (
Boltzmann
* T
* log((donor_conc * acceptor_conc) / intrinsic_conc**2 )
/ physical_constants["electron volt"][0]
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 1 | '''simple docstring'''
import json
import logging
import os
import sys
from pathlib import Path
import finetune_rag
from transformers.file_utils import is_apex_available
from transformers.testing_utils import (
TestCasePlus,
execute_subprocess_async,
require_ray,
require_torch_gpu,
require_torch_multi_gpu,
)
logging.basicConfig(level=logging.DEBUG)
SCREAMING_SNAKE_CASE_: Optional[int] =logging.getLogger()
SCREAMING_SNAKE_CASE_: Union[str, Any] =logging.StreamHandler(sys.stdout)
logger.addHandler(stream_handler)
class __A ( UpperCamelCase__ ):
def _lowercase (self : Optional[Any] , __a : str ):
os.makedirs(__a , exist_ok=__a )
UpperCAmelCase_ = {"source": "What is love ?", "target": "life"}
UpperCAmelCase_ = {"train": 12, "val": 2, "test": 2}
for split in ["train", "test", "val"]:
for field in ["source", "target"]:
UpperCAmelCase_ = "\n".join([contents[field]] * n_lines[split] )
with open(os.path.join(__a , f"""{split}.{field}""" ) , "w" ) as f:
f.write(__a )
def _lowercase (self : Optional[int] , __a : int , __a : str = "pytorch" ):
UpperCAmelCase_ = self.get_auto_remove_tmp_dir()
UpperCAmelCase_ = os.path.join(__a , "output" )
UpperCAmelCase_ = os.path.join(__a , "data" )
self._create_dummy_data(data_dir=__a )
UpperCAmelCase_ = f"""
--data_dir {data_dir} \
--output_dir {output_dir} \
--model_name_or_path facebook/rag-sequence-base \
--model_type rag_sequence \
--do_train \
--do_predict \
--n_val -1 \
--val_check_interval 1.0 \
--train_batch_size 2 \
--eval_batch_size 1 \
--max_source_length 25 \
--max_target_length 25 \
--val_max_target_length 25 \
--test_max_target_length 25 \
--label_smoothing 0.1 \
--dropout 0.1 \
--attention_dropout 0.1 \
--weight_decay 0.001 \
--adam_epsilon 1e-08 \
--max_grad_norm 0.1 \
--lr_scheduler polynomial \
--learning_rate 3e-04 \
--num_train_epochs 1 \
--warmup_steps 4 \
--gradient_accumulation_steps 1 \
--distributed-port 8787 \
--use_dummy_dataset 1 \
--distributed_retriever {distributed_retriever} \
""".split()
if gpus > 0:
testargs.append(f"""--gpus={gpus}""" )
if is_apex_available():
testargs.append("--fp16" )
else:
testargs.append("--gpus=0" )
testargs.append("--distributed_backend=ddp_cpu" )
testargs.append("--num_processes=2" )
UpperCAmelCase_ = [sys.executable, str(Path(finetune_rag.__file__ ).resolve() )] + testargs
execute_subprocess_async(__a , env=self.get_env() )
UpperCAmelCase_ = os.path.join(__a , "metrics.json" )
with open(__a ) as f:
UpperCAmelCase_ = json.load(__a )
return result
@require_torch_gpu
def _lowercase (self : Optional[int] ):
UpperCAmelCase_ = self._run_finetune(gpus=1 )
self.assertGreaterEqual(result["test"][0]["test_avg_em"] , 0.2 )
@require_torch_multi_gpu
def _lowercase (self : Dict ):
UpperCAmelCase_ = self._run_finetune(gpus=2 )
self.assertGreaterEqual(result["test"][0]["test_avg_em"] , 0.2 )
@require_torch_gpu
@require_ray
def _lowercase (self : Optional[int] ):
UpperCAmelCase_ = self._run_finetune(gpus=1 , distributed_retriever="ray" )
self.assertGreaterEqual(result["test"][0]["test_avg_em"] , 0.2 )
@require_torch_multi_gpu
@require_ray
def _lowercase (self : Any ):
UpperCAmelCase_ = self._run_finetune(gpus=1 , distributed_retriever="ray" )
self.assertGreaterEqual(result["test"][0]["test_avg_em"] , 0.2 )
| 1 | 1 |
'''simple docstring'''
import numpy as np
SCREAMING_SNAKE_CASE_: Dict =[
['a', 'b', 'c', 'd', 'e'],
['f', 'g', 'h', 'i', 'k'],
['l', 'm', 'n', 'o', 'p'],
['q', 'r', 's', 't', 'u'],
['v', 'w', 'x', 'y', 'z'],
]
class __A :
def __init__(self : Tuple ):
UpperCAmelCase_ = np.array(__a )
def _lowercase (self : Union[str, Any] , __a : str ):
UpperCAmelCase_ , UpperCAmelCase_ = np.where(letter == self.SQUARE )
UpperCAmelCase_ = np.concatenate([indexa + 1, indexa + 1] )
return indexes
def _lowercase (self : str , __a : int , __a : int ):
UpperCAmelCase_ = self.SQUARE[indexa - 1, indexa - 1]
return letter
def _lowercase (self : Optional[Any] , __a : str ):
UpperCAmelCase_ = message.lower()
UpperCAmelCase_ = message.replace(" " , "" )
UpperCAmelCase_ = message.replace("j" , "i" )
UpperCAmelCase_ = np.empty((2, len(__a )) )
for letter_index in range(len(__a ) ):
UpperCAmelCase_ = self.letter_to_numbers(message[letter_index] )
UpperCAmelCase_ = numbers[0]
UpperCAmelCase_ = numbers[1]
UpperCAmelCase_ = first_step.reshape(2 * len(__a ) )
UpperCAmelCase_ = ""
for numbers_index in range(len(__a ) ):
UpperCAmelCase_ = int(second_step[numbers_index * 2] )
UpperCAmelCase_ = int(second_step[(numbers_index * 2) + 1] )
UpperCAmelCase_ = self.numbers_to_letter(__a , __a )
UpperCAmelCase_ = encoded_message + letter
return encoded_message
def _lowercase (self : Dict , __a : str ):
UpperCAmelCase_ = message.lower()
message.replace(" " , "" )
UpperCAmelCase_ = np.empty(2 * len(__a ) )
for letter_index in range(len(__a ) ):
UpperCAmelCase_ = self.letter_to_numbers(message[letter_index] )
UpperCAmelCase_ = numbers[0]
UpperCAmelCase_ = numbers[1]
UpperCAmelCase_ = first_step.reshape((2, len(__a )) )
UpperCAmelCase_ = ""
for numbers_index in range(len(__a ) ):
UpperCAmelCase_ = int(second_step[0, numbers_index] )
UpperCAmelCase_ = int(second_step[1, numbers_index] )
UpperCAmelCase_ = self.numbers_to_letter(__a , __a )
UpperCAmelCase_ = decoded_message + letter
return decoded_message
| 1 | '''simple docstring'''
from multiprocessing import Lock, Pipe, Process
# lock used to ensure that two processes do not access a pipe at the same time
SCREAMING_SNAKE_CASE_: Optional[int] =Lock()
def lowerCAmelCase_ ( snake_case_ : Dict , snake_case_ : Dict , snake_case_ : Optional[int] , snake_case_ : Tuple , snake_case_ : Any , snake_case_ : Optional[Any] , snake_case_ : Union[str, Any] ) -> Optional[Any]:
'''simple docstring'''
global process_lock
# we perform n swaps since after n swaps we know we are sorted
# we *could* stop early if we are sorted already, but it takes as long to
# find out we are sorted as it does to sort the list with this algorithm
for i in range(0 , 10 ):
if (i + position) % 2 == 0 and r_send is not None:
# send your value to your right neighbor
process_lock.acquire()
r_send[1].send(snake_case_ )
process_lock.release()
# receive your right neighbor's value
process_lock.acquire()
UpperCAmelCase_ = rr_cv[0].recv()
process_lock.release()
# take the lower value since you are on the left
UpperCAmelCase_ = min(snake_case_ , snake_case_ )
elif (i + position) % 2 != 0 and l_send is not None:
# send your value to your left neighbor
process_lock.acquire()
l_send[1].send(snake_case_ )
process_lock.release()
# receive your left neighbor's value
process_lock.acquire()
UpperCAmelCase_ = lr_cv[0].recv()
process_lock.release()
# take the higher value since you are on the right
UpperCAmelCase_ = max(snake_case_ , snake_case_ )
# after all swaps are performed, send the values back to main
result_pipe[1].send(snake_case_ )
def lowerCAmelCase_ ( snake_case_ : Optional[Any] ) -> Dict:
'''simple docstring'''
UpperCAmelCase_ = []
UpperCAmelCase_ = []
# initialize the list of pipes where the values will be retrieved
for _ in arr:
result_pipe.append(Pipe() )
# creates the processes
# the first and last process only have one neighbor so they are made outside
# of the loop
UpperCAmelCase_ = Pipe()
UpperCAmelCase_ = Pipe()
process_array_.append(
Process(
target=snake_case_ , args=(0, arr[0], None, temp_rs, None, temp_rr, result_pipe[0]) , ) )
UpperCAmelCase_ = temp_rs
UpperCAmelCase_ = temp_rr
for i in range(1 , len(snake_case_ ) - 1 ):
UpperCAmelCase_ = Pipe()
UpperCAmelCase_ = Pipe()
process_array_.append(
Process(
target=snake_case_ , args=(i, arr[i], temp_ls, temp_rs, temp_lr, temp_rr, result_pipe[i]) , ) )
UpperCAmelCase_ = temp_rs
UpperCAmelCase_ = temp_rr
process_array_.append(
Process(
target=snake_case_ , args=(
len(snake_case_ ) - 1,
arr[len(snake_case_ ) - 1],
temp_ls,
None,
temp_lr,
None,
result_pipe[len(snake_case_ ) - 1],
) , ) )
# start the processes
for p in process_array_:
p.start()
# wait for the processes to end and write their values to the list
for p in range(0 , len(snake_case_ ) ):
UpperCAmelCase_ = result_pipe[p][0].recv()
process_array_[p].join()
return arr
def lowerCAmelCase_ ( ) -> str:
'''simple docstring'''
UpperCAmelCase_ = list(range(10 , 0 , -1 ) )
print("Initial List" )
print(*snake_case_ )
UpperCAmelCase_ = odd_even_transposition(snake_case_ )
print("Sorted List\n" )
print(*snake_case_ )
if __name__ == "__main__":
main()
| 1 | 1 |
'''simple docstring'''
from __future__ import annotations
def lowerCAmelCase_ ( snake_case_ : int , snake_case_ : int ) -> list[str]:
'''simple docstring'''
if partitions <= 0:
raise ValueError("partitions must be a positive number!" )
if partitions > number_of_bytes:
raise ValueError("partitions can not > number_of_bytes!" )
UpperCAmelCase_ = number_of_bytes // partitions
UpperCAmelCase_ = []
for i in range(snake_case_ ):
UpperCAmelCase_ = i * bytes_per_partition + 1
UpperCAmelCase_ = (
number_of_bytes if i == partitions - 1 else (i + 1) * bytes_per_partition
)
allocation_list.append(f"""{start_bytes}-{end_bytes}""" )
return allocation_list
if __name__ == "__main__":
import doctest
doctest.testmod()
| 1 | '''simple docstring'''
def lowerCAmelCase_ ( snake_case_ : int , snake_case_ : int ) -> str:
'''simple docstring'''
if a < 0 or b < 0:
raise ValueError("the value of both inputs must be positive" )
UpperCAmelCase_ = str(bin(snake_case_ ) )[2:] # remove the leading "0b"
UpperCAmelCase_ = str(bin(snake_case_ ) )[2:]
UpperCAmelCase_ = max(len(snake_case_ ) , len(snake_case_ ) )
return "0b" + "".join(
str(int("1" in (char_a, char_b) ) )
for char_a, char_b in zip(a_binary.zfill(snake_case_ ) , b_binary.zfill(snake_case_ ) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 1 | 1 |
'''simple docstring'''
import argparse
import torch
from transformers import (
WavaVecaConfig,
WavaVecaFeatureExtractor,
WavaVecaForAudioFrameClassification,
WavaVecaForSequenceClassification,
WavaVecaForXVector,
logging,
)
logging.set_verbosity_info()
SCREAMING_SNAKE_CASE_: Optional[int] =logging.get_logger(__name__)
def lowerCAmelCase_ ( snake_case_ : List[Any] , snake_case_ : Tuple , snake_case_ : int ) -> str:
'''simple docstring'''
UpperCAmelCase_ = WavaVecaForSequenceClassification.from_pretrained(snake_case_ , config=snake_case_ )
UpperCAmelCase_ = downstream_dict["projector.weight"]
UpperCAmelCase_ = downstream_dict["projector.bias"]
UpperCAmelCase_ = downstream_dict["model.post_net.linear.weight"]
UpperCAmelCase_ = downstream_dict["model.post_net.linear.bias"]
return model
def lowerCAmelCase_ ( snake_case_ : Union[str, Any] , snake_case_ : Dict , snake_case_ : List[str] ) -> Optional[Any]:
'''simple docstring'''
UpperCAmelCase_ = WavaVecaForAudioFrameClassification.from_pretrained(snake_case_ , config=snake_case_ )
UpperCAmelCase_ = downstream_dict["model.linear.weight"]
UpperCAmelCase_ = downstream_dict["model.linear.bias"]
return model
def lowerCAmelCase_ ( snake_case_ : int , snake_case_ : List[str] , snake_case_ : Union[str, Any] ) -> str:
'''simple docstring'''
UpperCAmelCase_ = WavaVecaForXVector.from_pretrained(snake_case_ , config=snake_case_ )
UpperCAmelCase_ = downstream_dict["connector.weight"]
UpperCAmelCase_ = downstream_dict["connector.bias"]
for i, kernel_size in enumerate(hf_config.tdnn_kernel ):
UpperCAmelCase_ = downstream_dict[
f"""model.framelevel_feature_extractor.module.{i}.kernel.weight"""
]
UpperCAmelCase_ = downstream_dict[f"""model.framelevel_feature_extractor.module.{i}.kernel.bias"""]
UpperCAmelCase_ = downstream_dict["model.utterancelevel_feature_extractor.linear1.weight"]
UpperCAmelCase_ = downstream_dict["model.utterancelevel_feature_extractor.linear1.bias"]
UpperCAmelCase_ = downstream_dict["model.utterancelevel_feature_extractor.linear2.weight"]
UpperCAmelCase_ = downstream_dict["model.utterancelevel_feature_extractor.linear2.bias"]
UpperCAmelCase_ = downstream_dict["objective.W"]
return model
@torch.no_grad()
def lowerCAmelCase_ ( snake_case_ : Dict , snake_case_ : Tuple , snake_case_ : List[Any] , snake_case_ : Dict ) -> List[Any]:
'''simple docstring'''
UpperCAmelCase_ = torch.load(snake_case_ , map_location="cpu" )
UpperCAmelCase_ = checkpoint["Downstream"]
UpperCAmelCase_ = WavaVecaConfig.from_pretrained(snake_case_ )
UpperCAmelCase_ = WavaVecaFeatureExtractor.from_pretrained(
snake_case_ , return_attention_mask=snake_case_ , do_normalize=snake_case_ )
UpperCAmelCase_ = hf_config.architectures[0]
if arch.endswith("ForSequenceClassification" ):
UpperCAmelCase_ = convert_classification(snake_case_ , snake_case_ , snake_case_ )
elif arch.endswith("ForAudioFrameClassification" ):
UpperCAmelCase_ = convert_diarization(snake_case_ , snake_case_ , snake_case_ )
elif arch.endswith("ForXVector" ):
UpperCAmelCase_ = convert_xvector(snake_case_ , snake_case_ , snake_case_ )
else:
raise NotImplementedError(f"""S3PRL weights conversion is not supported for {arch}""" )
if hf_config.use_weighted_layer_sum:
UpperCAmelCase_ = checkpoint["Featurizer"]["weights"]
hf_feature_extractor.save_pretrained(snake_case_ )
hf_model.save_pretrained(snake_case_ )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE_: int =argparse.ArgumentParser()
parser.add_argument(
'--base_model_name', default=None, type=str, help='Name of the huggingface pretrained base model.'
)
parser.add_argument('--config_path', default=None, type=str, help='Path to the huggingface classifier config.')
parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to the s3prl checkpoint.')
parser.add_argument('--model_dump_path', default=None, type=str, help='Path to the final converted model.')
SCREAMING_SNAKE_CASE_: List[str] =parser.parse_args()
convert_saprl_checkpoint(args.base_model_name, args.config_path, args.checkpoint_path, args.model_dump_path)
| 1 | '''simple docstring'''
from __future__ import annotations
def lowerCAmelCase_ ( snake_case_ : list , snake_case_ : int | None = None , snake_case_ : int | None = None ) -> None:
'''simple docstring'''
if start is None:
UpperCAmelCase_ = 0
if end is None:
UpperCAmelCase_ = len(snake_case_ ) - 1
if start >= end:
return
UpperCAmelCase_ = (start + end) // 2
slowsort(snake_case_ , snake_case_ , snake_case_ )
slowsort(snake_case_ , mid + 1 , snake_case_ )
if sequence[end] < sequence[mid]:
UpperCAmelCase_ , UpperCAmelCase_ = sequence[mid], sequence[end]
slowsort(snake_case_ , snake_case_ , end - 1 )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 1 | 1 |
'''simple docstring'''
import json
from typing import TYPE_CHECKING, List, Optional, Tuple
from tokenizers import pre_tokenizers
from ...tokenization_utils_base import BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
SCREAMING_SNAKE_CASE_: Dict =logging.get_logger(__name__)
SCREAMING_SNAKE_CASE_: Optional[Any] ={'tokenizer_file': 'tokenizer.json'}
SCREAMING_SNAKE_CASE_: List[Any] ={
'tokenizer_file': {
'bigscience/tokenizer': 'https://huggingface.co/bigscience/tokenizer/blob/main/tokenizer.json',
'bigscience/bloom-560m': 'https://huggingface.co/bigscience/bloom-560m/blob/main/tokenizer.json',
'bigscience/bloom-1b1': 'https://huggingface.co/bigscience/bloom-1b1/blob/main/tokenizer.json',
'bigscience/bloom-1b7': 'https://huggingface.co/bigscience/bloom-1b7/blob/main/tokenizer.json',
'bigscience/bloom-3b': 'https://huggingface.co/bigscience/bloom-3b/blob/main/tokenizer.json',
'bigscience/bloom-7b1': 'https://huggingface.co/bigscience/bloom-7b1/blob/main/tokenizer.json',
'bigscience/bloom': 'https://huggingface.co/bigscience/bloom/blob/main/tokenizer.json',
},
}
class __A ( UpperCamelCase__ ):
a__ : int = VOCAB_FILES_NAMES
a__ : Dict = PRETRAINED_VOCAB_FILES_MAP
a__ : Any = ["""input_ids""", """attention_mask"""]
a__ : Any = None
def __init__(self : Optional[int] , __a : Optional[int]=None , __a : Union[str, Any]=None , __a : Dict=None , __a : List[Any]="<unk>" , __a : Union[str, Any]="<s>" , __a : Any="</s>" , __a : int="<pad>" , __a : str=False , __a : str=False , **__a : int , ):
super().__init__(
__a , __a , tokenizer_file=__a , unk_token=__a , bos_token=__a , eos_token=__a , pad_token=__a , add_prefix_space=__a , clean_up_tokenization_spaces=__a , **__a , )
UpperCAmelCase_ = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get("add_prefix_space" , __a ) != add_prefix_space:
UpperCAmelCase_ = getattr(__a , pre_tok_state.pop("type" ) )
UpperCAmelCase_ = add_prefix_space
UpperCAmelCase_ = pre_tok_class(**__a )
UpperCAmelCase_ = add_prefix_space
def _lowercase (self : Tuple , *__a : Optional[Any] , **__a : str ):
UpperCAmelCase_ = kwargs.get("is_split_into_words" , __a )
if not (self.add_prefix_space or not is_split_into_words):
raise Exception(
f"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True to use it with"""
" pretokenized inputs." )
return super()._batch_encode_plus(*__a , **__a )
def _lowercase (self : Tuple , *__a : Tuple , **__a : int ):
UpperCAmelCase_ = kwargs.get("is_split_into_words" , __a )
if not (self.add_prefix_space or not is_split_into_words):
raise Exception(
f"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True to use it with"""
" pretokenized inputs." )
return super()._encode_plus(*__a , **__a )
def _lowercase (self : Optional[int] , __a : str , __a : Optional[str] = None ):
UpperCAmelCase_ = self._tokenizer.model.save(__a , name=__a )
return tuple(__a )
def _lowercase (self : Optional[int] , __a : "Conversation" ):
UpperCAmelCase_ = []
for is_user, text in conversation.iter_texts():
input_ids.extend(self.encode(__a , add_special_tokens=__a ) + [self.eos_token_id] )
if len(__a ) > self.model_max_length:
UpperCAmelCase_ = input_ids[-self.model_max_length :]
return input_ids
| 1 | '''simple docstring'''
from transformers import DistilBertTokenizer, DistilBertTokenizerFast
from transformers.testing_utils import require_tokenizers, slow
from ..bert.test_tokenization_bert import BertTokenizationTest
@require_tokenizers
class __A ( UpperCamelCase__ ):
a__ : Optional[Any] = DistilBertTokenizer
a__ : Any = DistilBertTokenizerFast
a__ : str = True
@slow
def _lowercase (self : int ):
UpperCAmelCase_ = DistilBertTokenizer.from_pretrained("distilbert-base-uncased" )
UpperCAmelCase_ = tokenizer.encode("sequence builders" , add_special_tokens=__a )
UpperCAmelCase_ = tokenizer.encode("multi-sequence build" , add_special_tokens=__a )
UpperCAmelCase_ = tokenizer.build_inputs_with_special_tokens(__a )
UpperCAmelCase_ = tokenizer.build_inputs_with_special_tokens(__a , __a )
assert encoded_sentence == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id]
assert encoded_pair == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] + text_a + [
tokenizer.sep_token_id
]
| 1 | 1 |
'''simple docstring'''
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, DDIMScheduler, DDPMScheduler, StableDiffusionUpscalePipeline, UNetaDConditionModel
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
enable_full_determinism()
class __A ( unittest.TestCase ):
def _lowercase (self : Tuple ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def _lowercase (self : str ):
UpperCAmelCase_ = 1
UpperCAmelCase_ = 3
UpperCAmelCase_ = (32, 32)
UpperCAmelCase_ = floats_tensor((batch_size, num_channels) + sizes , rng=random.Random(0 ) ).to(__a )
return image
@property
def _lowercase (self : int ):
torch.manual_seed(0 )
UpperCAmelCase_ = UNetaDConditionModel(
block_out_channels=(32, 32, 64) , layers_per_block=2 , sample_size=32 , in_channels=7 , out_channels=4 , down_block_types=("DownBlock2D", "CrossAttnDownBlock2D", "CrossAttnDownBlock2D") , up_block_types=("CrossAttnUpBlock2D", "CrossAttnUpBlock2D", "UpBlock2D") , cross_attention_dim=32 , attention_head_dim=8 , use_linear_projection=__a , only_cross_attention=(True, True, False) , num_class_embeds=100 , )
return model
@property
def _lowercase (self : Any ):
torch.manual_seed(0 )
UpperCAmelCase_ = AutoencoderKL(
block_out_channels=[32, 32, 64] , in_channels=3 , out_channels=3 , down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D", "DownEncoderBlock2D"] , up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D"] , latent_channels=4 , )
return model
@property
def _lowercase (self : Optional[Any] ):
torch.manual_seed(0 )
UpperCAmelCase_ = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , hidden_act="gelu" , projection_dim=512 , )
return CLIPTextModel(__a )
def _lowercase (self : Any ):
UpperCAmelCase_ = "cpu" # ensure determinism for the device-dependent torch.Generator
UpperCAmelCase_ = self.dummy_cond_unet_upscale
UpperCAmelCase_ = DDPMScheduler()
UpperCAmelCase_ = DDIMScheduler(prediction_type="v_prediction" )
UpperCAmelCase_ = self.dummy_vae
UpperCAmelCase_ = self.dummy_text_encoder
UpperCAmelCase_ = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
UpperCAmelCase_ = self.dummy_image.cpu().permute(0 , 2 , 3 , 1 )[0]
UpperCAmelCase_ = Image.fromarray(np.uinta(__a ) ).convert("RGB" ).resize((64, 64) )
# make sure here that pndm scheduler skips prk
UpperCAmelCase_ = StableDiffusionUpscalePipeline(
unet=__a , low_res_scheduler=__a , scheduler=__a , vae=__a , text_encoder=__a , tokenizer=__a , max_noise_level=350 , )
UpperCAmelCase_ = sd_pipe.to(__a )
sd_pipe.set_progress_bar_config(disable=__a )
UpperCAmelCase_ = "A painting of a squirrel eating a burger"
UpperCAmelCase_ = torch.Generator(device=__a ).manual_seed(0 )
UpperCAmelCase_ = sd_pipe(
[prompt] , image=__a , generator=__a , guidance_scale=6.0 , noise_level=20 , num_inference_steps=2 , output_type="np" , )
UpperCAmelCase_ = output.images
UpperCAmelCase_ = torch.Generator(device=__a ).manual_seed(0 )
UpperCAmelCase_ = sd_pipe(
[prompt] , image=__a , generator=__a , guidance_scale=6.0 , noise_level=20 , num_inference_steps=2 , output_type="np" , return_dict=__a , )[0]
UpperCAmelCase_ = image[0, -3:, -3:, -1]
UpperCAmelCase_ = image_from_tuple[0, -3:, -3:, -1]
UpperCAmelCase_ = low_res_image.size[0] * 4
assert image.shape == (1, expected_height_width, expected_height_width, 3)
UpperCAmelCase_ = np.array([0.31_13, 0.39_10, 0.42_72, 0.48_59, 0.50_61, 0.46_52, 0.53_62, 0.57_15, 0.56_61] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
def _lowercase (self : Optional[int] ):
UpperCAmelCase_ = "cpu" # ensure determinism for the device-dependent torch.Generator
UpperCAmelCase_ = self.dummy_cond_unet_upscale
UpperCAmelCase_ = DDPMScheduler()
UpperCAmelCase_ = DDIMScheduler(prediction_type="v_prediction" )
UpperCAmelCase_ = self.dummy_vae
UpperCAmelCase_ = self.dummy_text_encoder
UpperCAmelCase_ = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
UpperCAmelCase_ = self.dummy_image.cpu().permute(0 , 2 , 3 , 1 )[0]
UpperCAmelCase_ = Image.fromarray(np.uinta(__a ) ).convert("RGB" ).resize((64, 64) )
# make sure here that pndm scheduler skips prk
UpperCAmelCase_ = StableDiffusionUpscalePipeline(
unet=__a , low_res_scheduler=__a , scheduler=__a , vae=__a , text_encoder=__a , tokenizer=__a , max_noise_level=350 , )
UpperCAmelCase_ = sd_pipe.to(__a )
sd_pipe.set_progress_bar_config(disable=__a )
UpperCAmelCase_ = "A painting of a squirrel eating a burger"
UpperCAmelCase_ = sd_pipe(
2 * [prompt] , image=2 * [low_res_image] , guidance_scale=6.0 , noise_level=20 , num_inference_steps=2 , output_type="np" , )
UpperCAmelCase_ = output.images
assert image.shape[0] == 2
UpperCAmelCase_ = torch.Generator(device=__a ).manual_seed(0 )
UpperCAmelCase_ = sd_pipe(
[prompt] , image=__a , generator=__a , num_images_per_prompt=2 , guidance_scale=6.0 , noise_level=20 , num_inference_steps=2 , output_type="np" , )
UpperCAmelCase_ = output.images
assert image.shape[0] == 2
@unittest.skipIf(torch_device != "cuda" , "This test requires a GPU" )
def _lowercase (self : str ):
UpperCAmelCase_ = self.dummy_cond_unet_upscale
UpperCAmelCase_ = DDPMScheduler()
UpperCAmelCase_ = DDIMScheduler(prediction_type="v_prediction" )
UpperCAmelCase_ = self.dummy_vae
UpperCAmelCase_ = self.dummy_text_encoder
UpperCAmelCase_ = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
UpperCAmelCase_ = self.dummy_image.cpu().permute(0 , 2 , 3 , 1 )[0]
UpperCAmelCase_ = Image.fromarray(np.uinta(__a ) ).convert("RGB" ).resize((64, 64) )
# put models in fp16, except vae as it overflows in fp16
UpperCAmelCase_ = unet.half()
UpperCAmelCase_ = text_encoder.half()
# make sure here that pndm scheduler skips prk
UpperCAmelCase_ = StableDiffusionUpscalePipeline(
unet=__a , low_res_scheduler=__a , scheduler=__a , vae=__a , text_encoder=__a , tokenizer=__a , max_noise_level=350 , )
UpperCAmelCase_ = sd_pipe.to(__a )
sd_pipe.set_progress_bar_config(disable=__a )
UpperCAmelCase_ = "A painting of a squirrel eating a burger"
UpperCAmelCase_ = torch.manual_seed(0 )
UpperCAmelCase_ = sd_pipe(
[prompt] , image=__a , generator=__a , num_inference_steps=2 , output_type="np" , ).images
UpperCAmelCase_ = low_res_image.size[0] * 4
assert image.shape == (1, expected_height_width, expected_height_width, 3)
@slow
@require_torch_gpu
class __A ( unittest.TestCase ):
def _lowercase (self : List[str] ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _lowercase (self : List[Any] ):
UpperCAmelCase_ = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/sd2-upscale/low_res_cat.png" )
UpperCAmelCase_ = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-upscale"
"/upsampled_cat.npy" )
UpperCAmelCase_ = "stabilityai/stable-diffusion-x4-upscaler"
UpperCAmelCase_ = StableDiffusionUpscalePipeline.from_pretrained(__a )
pipe.to(__a )
pipe.set_progress_bar_config(disable=__a )
pipe.enable_attention_slicing()
UpperCAmelCase_ = "a cat sitting on a park bench"
UpperCAmelCase_ = torch.manual_seed(0 )
UpperCAmelCase_ = pipe(
prompt=__a , image=__a , generator=__a , output_type="np" , )
UpperCAmelCase_ = output.images[0]
assert image.shape == (512, 512, 3)
assert np.abs(expected_image - image ).max() < 1E-3
def _lowercase (self : Tuple ):
UpperCAmelCase_ = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/sd2-upscale/low_res_cat.png" )
UpperCAmelCase_ = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-upscale"
"/upsampled_cat_fp16.npy" )
UpperCAmelCase_ = "stabilityai/stable-diffusion-x4-upscaler"
UpperCAmelCase_ = StableDiffusionUpscalePipeline.from_pretrained(
__a , torch_dtype=torch.floataa , )
pipe.to(__a )
pipe.set_progress_bar_config(disable=__a )
pipe.enable_attention_slicing()
UpperCAmelCase_ = "a cat sitting on a park bench"
UpperCAmelCase_ = torch.manual_seed(0 )
UpperCAmelCase_ = pipe(
prompt=__a , image=__a , generator=__a , output_type="np" , )
UpperCAmelCase_ = output.images[0]
assert image.shape == (512, 512, 3)
assert np.abs(expected_image - image ).max() < 5E-1
def _lowercase (self : List[Any] ):
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
UpperCAmelCase_ = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/sd2-upscale/low_res_cat.png" )
UpperCAmelCase_ = "stabilityai/stable-diffusion-x4-upscaler"
UpperCAmelCase_ = StableDiffusionUpscalePipeline.from_pretrained(
__a , torch_dtype=torch.floataa , )
pipe.to(__a )
pipe.set_progress_bar_config(disable=__a )
pipe.enable_attention_slicing(1 )
pipe.enable_sequential_cpu_offload()
UpperCAmelCase_ = "a cat sitting on a park bench"
UpperCAmelCase_ = torch.manual_seed(0 )
UpperCAmelCase_ = pipe(
prompt=__a , image=__a , generator=__a , num_inference_steps=5 , output_type="np" , )
UpperCAmelCase_ = torch.cuda.max_memory_allocated()
# make sure that less than 2.9 GB is allocated
assert mem_bytes < 2.9 * 10**9
| 1 | '''simple docstring'''
import argparse
import json
from collections import OrderedDict
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
ConditionalDetrConfig,
ConditionalDetrForObjectDetection,
ConditionalDetrForSegmentation,
ConditionalDetrImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
SCREAMING_SNAKE_CASE_: Optional[int] =logging.get_logger(__name__)
# here we list all keys to be renamed (original name on the left, our name on the right)
SCREAMING_SNAKE_CASE_: Tuple =[]
for i in range(6):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append(
(f"transformer.encoder.layers.{i}.self_attn.out_proj.weight", f"encoder.layers.{i}.self_attn.out_proj.weight")
)
rename_keys.append(
(f"transformer.encoder.layers.{i}.self_attn.out_proj.bias", f"encoder.layers.{i}.self_attn.out_proj.bias")
)
rename_keys.append((f"transformer.encoder.layers.{i}.linear1.weight", f"encoder.layers.{i}.fc1.weight"))
rename_keys.append((f"transformer.encoder.layers.{i}.linear1.bias", f"encoder.layers.{i}.fc1.bias"))
rename_keys.append((f"transformer.encoder.layers.{i}.linear2.weight", f"encoder.layers.{i}.fc2.weight"))
rename_keys.append((f"transformer.encoder.layers.{i}.linear2.bias", f"encoder.layers.{i}.fc2.bias"))
rename_keys.append(
(f"transformer.encoder.layers.{i}.norm1.weight", f"encoder.layers.{i}.self_attn_layer_norm.weight")
)
rename_keys.append((f"transformer.encoder.layers.{i}.norm1.bias", f"encoder.layers.{i}.self_attn_layer_norm.bias"))
rename_keys.append((f"transformer.encoder.layers.{i}.norm2.weight", f"encoder.layers.{i}.final_layer_norm.weight"))
rename_keys.append((f"transformer.encoder.layers.{i}.norm2.bias", f"encoder.layers.{i}.final_layer_norm.bias"))
# decoder layers: 2 times output projection, 2 feedforward neural networks and 3 layernorms
rename_keys.append(
(f"transformer.decoder.layers.{i}.self_attn.out_proj.weight", f"decoder.layers.{i}.self_attn.out_proj.weight")
)
rename_keys.append(
(f"transformer.decoder.layers.{i}.self_attn.out_proj.bias", f"decoder.layers.{i}.self_attn.out_proj.bias")
)
rename_keys.append(
(
f"transformer.decoder.layers.{i}.cross_attn.out_proj.weight",
f"decoder.layers.{i}.encoder_attn.out_proj.weight",
)
)
rename_keys.append(
(
f"transformer.decoder.layers.{i}.cross_attn.out_proj.bias",
f"decoder.layers.{i}.encoder_attn.out_proj.bias",
)
)
rename_keys.append((f"transformer.decoder.layers.{i}.linear1.weight", f"decoder.layers.{i}.fc1.weight"))
rename_keys.append((f"transformer.decoder.layers.{i}.linear1.bias", f"decoder.layers.{i}.fc1.bias"))
rename_keys.append((f"transformer.decoder.layers.{i}.linear2.weight", f"decoder.layers.{i}.fc2.weight"))
rename_keys.append((f"transformer.decoder.layers.{i}.linear2.bias", f"decoder.layers.{i}.fc2.bias"))
rename_keys.append(
(f"transformer.decoder.layers.{i}.norm1.weight", f"decoder.layers.{i}.self_attn_layer_norm.weight")
)
rename_keys.append((f"transformer.decoder.layers.{i}.norm1.bias", f"decoder.layers.{i}.self_attn_layer_norm.bias"))
rename_keys.append(
(f"transformer.decoder.layers.{i}.norm2.weight", f"decoder.layers.{i}.encoder_attn_layer_norm.weight")
)
rename_keys.append(
(f"transformer.decoder.layers.{i}.norm2.bias", f"decoder.layers.{i}.encoder_attn_layer_norm.bias")
)
rename_keys.append((f"transformer.decoder.layers.{i}.norm3.weight", f"decoder.layers.{i}.final_layer_norm.weight"))
rename_keys.append((f"transformer.decoder.layers.{i}.norm3.bias", f"decoder.layers.{i}.final_layer_norm.bias"))
# q, k, v projections in self/cross-attention in decoder for conditional DETR
rename_keys.append(
(f"transformer.decoder.layers.{i}.sa_qcontent_proj.weight", f"decoder.layers.{i}.sa_qcontent_proj.weight")
)
rename_keys.append(
(f"transformer.decoder.layers.{i}.sa_kcontent_proj.weight", f"decoder.layers.{i}.sa_kcontent_proj.weight")
)
rename_keys.append(
(f"transformer.decoder.layers.{i}.sa_qpos_proj.weight", f"decoder.layers.{i}.sa_qpos_proj.weight")
)
rename_keys.append(
(f"transformer.decoder.layers.{i}.sa_kpos_proj.weight", f"decoder.layers.{i}.sa_kpos_proj.weight")
)
rename_keys.append((f"transformer.decoder.layers.{i}.sa_v_proj.weight", f"decoder.layers.{i}.sa_v_proj.weight"))
rename_keys.append(
(f"transformer.decoder.layers.{i}.ca_qcontent_proj.weight", f"decoder.layers.{i}.ca_qcontent_proj.weight")
)
# rename_keys.append((f"transformer.decoder.layers.{i}.ca_qpos_proj.weight", f"decoder.layers.{i}.ca_qpos_proj.weight"))
rename_keys.append(
(f"transformer.decoder.layers.{i}.ca_kcontent_proj.weight", f"decoder.layers.{i}.ca_kcontent_proj.weight")
)
rename_keys.append(
(f"transformer.decoder.layers.{i}.ca_kpos_proj.weight", f"decoder.layers.{i}.ca_kpos_proj.weight")
)
rename_keys.append((f"transformer.decoder.layers.{i}.ca_v_proj.weight", f"decoder.layers.{i}.ca_v_proj.weight"))
rename_keys.append(
(f"transformer.decoder.layers.{i}.ca_qpos_sine_proj.weight", f"decoder.layers.{i}.ca_qpos_sine_proj.weight")
)
rename_keys.append(
(f"transformer.decoder.layers.{i}.sa_qcontent_proj.bias", f"decoder.layers.{i}.sa_qcontent_proj.bias")
)
rename_keys.append(
(f"transformer.decoder.layers.{i}.sa_kcontent_proj.bias", f"decoder.layers.{i}.sa_kcontent_proj.bias")
)
rename_keys.append((f"transformer.decoder.layers.{i}.sa_qpos_proj.bias", f"decoder.layers.{i}.sa_qpos_proj.bias"))
rename_keys.append((f"transformer.decoder.layers.{i}.sa_kpos_proj.bias", f"decoder.layers.{i}.sa_kpos_proj.bias"))
rename_keys.append((f"transformer.decoder.layers.{i}.sa_v_proj.bias", f"decoder.layers.{i}.sa_v_proj.bias"))
rename_keys.append(
(f"transformer.decoder.layers.{i}.ca_qcontent_proj.bias", f"decoder.layers.{i}.ca_qcontent_proj.bias")
)
# rename_keys.append((f"transformer.decoder.layers.{i}.ca_qpos_proj.bias", f"decoder.layers.{i}.ca_qpos_proj.bias"))
rename_keys.append(
(f"transformer.decoder.layers.{i}.ca_kcontent_proj.bias", f"decoder.layers.{i}.ca_kcontent_proj.bias")
)
rename_keys.append((f"transformer.decoder.layers.{i}.ca_kpos_proj.bias", f"decoder.layers.{i}.ca_kpos_proj.bias"))
rename_keys.append((f"transformer.decoder.layers.{i}.ca_v_proj.bias", f"decoder.layers.{i}.ca_v_proj.bias"))
rename_keys.append(
(f"transformer.decoder.layers.{i}.ca_qpos_sine_proj.bias", f"decoder.layers.{i}.ca_qpos_sine_proj.bias")
)
# convolutional projection + query embeddings + layernorm of decoder + class and bounding box heads
# for conditional DETR, also convert reference point head and query scale MLP
rename_keys.extend(
[
('input_proj.weight', 'input_projection.weight'),
('input_proj.bias', 'input_projection.bias'),
('query_embed.weight', 'query_position_embeddings.weight'),
('transformer.decoder.norm.weight', 'decoder.layernorm.weight'),
('transformer.decoder.norm.bias', 'decoder.layernorm.bias'),
('class_embed.weight', 'class_labels_classifier.weight'),
('class_embed.bias', 'class_labels_classifier.bias'),
('bbox_embed.layers.0.weight', 'bbox_predictor.layers.0.weight'),
('bbox_embed.layers.0.bias', 'bbox_predictor.layers.0.bias'),
('bbox_embed.layers.1.weight', 'bbox_predictor.layers.1.weight'),
('bbox_embed.layers.1.bias', 'bbox_predictor.layers.1.bias'),
('bbox_embed.layers.2.weight', 'bbox_predictor.layers.2.weight'),
('bbox_embed.layers.2.bias', 'bbox_predictor.layers.2.bias'),
('transformer.decoder.ref_point_head.layers.0.weight', 'decoder.ref_point_head.layers.0.weight'),
('transformer.decoder.ref_point_head.layers.0.bias', 'decoder.ref_point_head.layers.0.bias'),
('transformer.decoder.ref_point_head.layers.1.weight', 'decoder.ref_point_head.layers.1.weight'),
('transformer.decoder.ref_point_head.layers.1.bias', 'decoder.ref_point_head.layers.1.bias'),
('transformer.decoder.query_scale.layers.0.weight', 'decoder.query_scale.layers.0.weight'),
('transformer.decoder.query_scale.layers.0.bias', 'decoder.query_scale.layers.0.bias'),
('transformer.decoder.query_scale.layers.1.weight', 'decoder.query_scale.layers.1.weight'),
('transformer.decoder.query_scale.layers.1.bias', 'decoder.query_scale.layers.1.bias'),
('transformer.decoder.layers.0.ca_qpos_proj.weight', 'decoder.layers.0.ca_qpos_proj.weight'),
('transformer.decoder.layers.0.ca_qpos_proj.bias', 'decoder.layers.0.ca_qpos_proj.bias'),
]
)
def lowerCAmelCase_ ( snake_case_ : Dict , snake_case_ : Any , snake_case_ : Optional[int] ) -> Dict:
'''simple docstring'''
UpperCAmelCase_ = state_dict.pop(snake_case_ )
UpperCAmelCase_ = val
def lowerCAmelCase_ ( snake_case_ : int ) -> List[Any]:
'''simple docstring'''
UpperCAmelCase_ = OrderedDict()
for key, value in state_dict.items():
if "backbone.0.body" in key:
UpperCAmelCase_ = key.replace("backbone.0.body" , "backbone.conv_encoder.model" )
UpperCAmelCase_ = value
else:
UpperCAmelCase_ = value
return new_state_dict
def lowerCAmelCase_ ( snake_case_ : List[Any] , snake_case_ : Dict=False ) -> List[str]:
'''simple docstring'''
UpperCAmelCase_ = ""
if is_panoptic:
UpperCAmelCase_ = "conditional_detr."
# first: transformer encoder
for i in range(6 ):
# read in weights + bias of input projection layer (in PyTorch's MultiHeadAttention, this is a single matrix + bias)
UpperCAmelCase_ = state_dict.pop(f"""{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_weight""" )
UpperCAmelCase_ = state_dict.pop(f"""{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_bias""" )
# next, add query, keys and values (in that order) to the state dict
UpperCAmelCase_ = in_proj_weight[:2_56, :]
UpperCAmelCase_ = in_proj_bias[:2_56]
UpperCAmelCase_ = in_proj_weight[2_56:5_12, :]
UpperCAmelCase_ = in_proj_bias[2_56:5_12]
UpperCAmelCase_ = in_proj_weight[-2_56:, :]
UpperCAmelCase_ = in_proj_bias[-2_56:]
def lowerCAmelCase_ ( ) -> Dict:
'''simple docstring'''
UpperCAmelCase_ = "http://images.cocodataset.org/val2017/000000039769.jpg"
UpperCAmelCase_ = Image.open(requests.get(snake_case_ , stream=snake_case_ ).raw )
return im
@torch.no_grad()
def lowerCAmelCase_ ( snake_case_ : Optional[int] , snake_case_ : Dict ) -> Optional[int]:
'''simple docstring'''
UpperCAmelCase_ = ConditionalDetrConfig()
# set backbone and dilation attributes
if "resnet101" in model_name:
UpperCAmelCase_ = "resnet101"
if "dc5" in model_name:
UpperCAmelCase_ = True
UpperCAmelCase_ = "panoptic" in model_name
if is_panoptic:
UpperCAmelCase_ = 2_50
else:
UpperCAmelCase_ = 91
UpperCAmelCase_ = "huggingface/label-files"
UpperCAmelCase_ = "coco-detection-id2label.json"
UpperCAmelCase_ = json.load(open(hf_hub_download(snake_case_ , snake_case_ , repo_type="dataset" ) , "r" ) )
UpperCAmelCase_ = {int(snake_case_ ): v for k, v in idalabel.items()}
UpperCAmelCase_ = idalabel
UpperCAmelCase_ = {v: k for k, v in idalabel.items()}
# load image processor
UpperCAmelCase_ = "coco_panoptic" if is_panoptic else "coco_detection"
UpperCAmelCase_ = ConditionalDetrImageProcessor(format=snake_case_ )
# prepare image
UpperCAmelCase_ = prepare_img()
UpperCAmelCase_ = image_processor(images=snake_case_ , return_tensors="pt" )
UpperCAmelCase_ = encoding["pixel_values"]
logger.info(f"""Converting model {model_name}...""" )
# load original model from torch hub
UpperCAmelCase_ = torch.hub.load("DeppMeng/ConditionalDETR" , snake_case_ , pretrained=snake_case_ ).eval()
UpperCAmelCase_ = conditional_detr.state_dict()
# rename keys
for src, dest in rename_keys:
if is_panoptic:
UpperCAmelCase_ = "conditional_detr." + src
rename_key(snake_case_ , snake_case_ , snake_case_ )
UpperCAmelCase_ = rename_backbone_keys(snake_case_ )
# query, key and value matrices need special treatment
read_in_q_k_v(snake_case_ , is_panoptic=snake_case_ )
# important: we need to prepend a prefix to each of the base model keys as the head models use different attributes for them
UpperCAmelCase_ = "conditional_detr.model." if is_panoptic else "model."
for key in state_dict.copy().keys():
if is_panoptic:
if (
key.startswith("conditional_detr" )
and not key.startswith("class_labels_classifier" )
and not key.startswith("bbox_predictor" )
):
UpperCAmelCase_ = state_dict.pop(snake_case_ )
UpperCAmelCase_ = val
elif "class_labels_classifier" in key or "bbox_predictor" in key:
UpperCAmelCase_ = state_dict.pop(snake_case_ )
UpperCAmelCase_ = val
elif key.startswith("bbox_attention" ) or key.startswith("mask_head" ):
continue
else:
UpperCAmelCase_ = state_dict.pop(snake_case_ )
UpperCAmelCase_ = val
else:
if not key.startswith("class_labels_classifier" ) and not key.startswith("bbox_predictor" ):
UpperCAmelCase_ = state_dict.pop(snake_case_ )
UpperCAmelCase_ = val
# finally, create HuggingFace model and load state dict
UpperCAmelCase_ = ConditionalDetrForSegmentation(snake_case_ ) if is_panoptic else ConditionalDetrForObjectDetection(snake_case_ )
model.load_state_dict(snake_case_ )
model.eval()
model.push_to_hub(repo_id=snake_case_ , organization="DepuMeng" , commit_message="Add model" )
# verify our conversion
UpperCAmelCase_ = conditional_detr(snake_case_ )
UpperCAmelCase_ = model(snake_case_ )
assert torch.allclose(outputs.logits , original_outputs["pred_logits"] , atol=1E-4 )
assert torch.allclose(outputs.pred_boxes , original_outputs["pred_boxes"] , atol=1E-4 )
if is_panoptic:
assert torch.allclose(outputs.pred_masks , original_outputs["pred_masks"] , atol=1E-4 )
# Save model and image processor
logger.info(f"""Saving PyTorch model and image processor to {pytorch_dump_folder_path}...""" )
Path(snake_case_ ).mkdir(exist_ok=snake_case_ )
model.save_pretrained(snake_case_ )
image_processor.save_pretrained(snake_case_ )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE_: List[str] =argparse.ArgumentParser()
parser.add_argument(
'--model_name',
default='conditional_detr_resnet50',
type=str,
help='Name of the CONDITIONAL_DETR model you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the folder to output PyTorch model.'
)
SCREAMING_SNAKE_CASE_: int =parser.parse_args()
convert_conditional_detr_checkpoint(args.model_name, args.pytorch_dump_folder_path)
| 1 | 1 |
'''simple docstring'''
import itertools
import math
def lowerCAmelCase_ ( snake_case_ : int ) -> bool:
'''simple docstring'''
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(snake_case_ ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def lowerCAmelCase_ ( ) -> Optional[int]:
'''simple docstring'''
UpperCAmelCase_ = 2
while True:
if is_prime(snake_case_ ):
yield num
num += 1
def lowerCAmelCase_ ( snake_case_ : int = 1_00_01 ) -> int:
'''simple docstring'''
return next(itertools.islice(prime_generator() , nth - 1 , snake_case_ ) )
if __name__ == "__main__":
print(f"{solution() = }")
| 1 | '''simple docstring'''
import warnings
from ...utils import logging
from .image_processing_clip import CLIPImageProcessor
SCREAMING_SNAKE_CASE_: Union[str, Any] =logging.get_logger(__name__)
class __A ( UpperCamelCase__ ):
def __init__(self : int , *__a : Dict , **__a : str ):
warnings.warn(
"The class CLIPFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"
" use CLIPImageProcessor instead." , __a , )
super().__init__(*__a , **__a )
| 1 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
SCREAMING_SNAKE_CASE_: Union[str, Any] ={
'configuration_rembert': ['REMBERT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'RemBertConfig', 'RemBertOnnxConfig']
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE_: Optional[Any] =['RemBertTokenizer']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE_: Union[str, Any] =['RemBertTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE_: List[Any] =[
'REMBERT_PRETRAINED_MODEL_ARCHIVE_LIST',
'RemBertForCausalLM',
'RemBertForMaskedLM',
'RemBertForMultipleChoice',
'RemBertForQuestionAnswering',
'RemBertForSequenceClassification',
'RemBertForTokenClassification',
'RemBertLayer',
'RemBertModel',
'RemBertPreTrainedModel',
'load_tf_weights_in_rembert',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE_: Optional[Any] =[
'TF_REMBERT_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFRemBertForCausalLM',
'TFRemBertForMaskedLM',
'TFRemBertForMultipleChoice',
'TFRemBertForQuestionAnswering',
'TFRemBertForSequenceClassification',
'TFRemBertForTokenClassification',
'TFRemBertLayer',
'TFRemBertModel',
'TFRemBertPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_rembert import REMBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, RemBertConfig, RemBertOnnxConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_rembert import RemBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_rembert_fast import RemBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_rembert import (
REMBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
RemBertForCausalLM,
RemBertForMaskedLM,
RemBertForMultipleChoice,
RemBertForQuestionAnswering,
RemBertForSequenceClassification,
RemBertForTokenClassification,
RemBertLayer,
RemBertModel,
RemBertPreTrainedModel,
load_tf_weights_in_rembert,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_rembert import (
TF_REMBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFRemBertForCausalLM,
TFRemBertForMaskedLM,
TFRemBertForMultipleChoice,
TFRemBertForQuestionAnswering,
TFRemBertForSequenceClassification,
TFRemBertForTokenClassification,
TFRemBertLayer,
TFRemBertModel,
TFRemBertPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE_: Optional[int] =_LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 1 | '''simple docstring'''
from __future__ import annotations
import queue
class __A :
def __init__(self : Optional[Any] , __a : str ):
UpperCAmelCase_ = data
UpperCAmelCase_ = None
UpperCAmelCase_ = None
def lowerCAmelCase_ ( ) -> TreeNode:
'''simple docstring'''
print("\n********Press N to stop entering at any point of time********\n" )
UpperCAmelCase_ = input("Enter the value of the root node: " ).strip().lower()
UpperCAmelCase_ = queue.Queue()
UpperCAmelCase_ = TreeNode(int(snake_case_ ) )
q.put(snake_case_ )
while not q.empty():
UpperCAmelCase_ = q.get()
UpperCAmelCase_ = f"""Enter the left node of {node_found.data}: """
UpperCAmelCase_ = input(snake_case_ ).strip().lower() or "n"
if check == "n":
return tree_node
UpperCAmelCase_ = TreeNode(int(snake_case_ ) )
UpperCAmelCase_ = left_node
q.put(snake_case_ )
UpperCAmelCase_ = f"""Enter the right node of {node_found.data}: """
UpperCAmelCase_ = input(snake_case_ ).strip().lower() or "n"
if check == "n":
return tree_node
UpperCAmelCase_ = TreeNode(int(snake_case_ ) )
UpperCAmelCase_ = right_node
q.put(snake_case_ )
raise
def lowerCAmelCase_ ( snake_case_ : TreeNode ) -> None:
'''simple docstring'''
if not isinstance(snake_case_ , snake_case_ ) or not node:
return
print(node.data , end="," )
pre_order(node.left )
pre_order(node.right )
def lowerCAmelCase_ ( snake_case_ : TreeNode ) -> None:
'''simple docstring'''
if not isinstance(snake_case_ , snake_case_ ) or not node:
return
in_order(node.left )
print(node.data , end="," )
in_order(node.right )
def lowerCAmelCase_ ( snake_case_ : TreeNode ) -> None:
'''simple docstring'''
if not isinstance(snake_case_ , snake_case_ ) or not node:
return
post_order(node.left )
post_order(node.right )
print(node.data , end="," )
def lowerCAmelCase_ ( snake_case_ : TreeNode ) -> None:
'''simple docstring'''
if not isinstance(snake_case_ , snake_case_ ) or not node:
return
UpperCAmelCase_ = queue.Queue()
q.put(snake_case_ )
while not q.empty():
UpperCAmelCase_ = q.get()
print(node_dequeued.data , end="," )
if node_dequeued.left:
q.put(node_dequeued.left )
if node_dequeued.right:
q.put(node_dequeued.right )
def lowerCAmelCase_ ( snake_case_ : TreeNode ) -> None:
'''simple docstring'''
if not isinstance(snake_case_ , snake_case_ ) or not node:
return
UpperCAmelCase_ = queue.Queue()
q.put(snake_case_ )
while not q.empty():
UpperCAmelCase_ = []
while not q.empty():
UpperCAmelCase_ = q.get()
print(node_dequeued.data , end="," )
if node_dequeued.left:
list_.append(node_dequeued.left )
if node_dequeued.right:
list_.append(node_dequeued.right )
print()
for node in list_:
q.put(snake_case_ )
def lowerCAmelCase_ ( snake_case_ : TreeNode ) -> None:
'''simple docstring'''
if not isinstance(snake_case_ , snake_case_ ) or not node:
return
UpperCAmelCase_ = []
UpperCAmelCase_ = node
while n or stack:
while n: # start from root node, find its left child
print(n.data , end="," )
stack.append(snake_case_ )
UpperCAmelCase_ = n.left
# end of while means current node doesn't have left child
UpperCAmelCase_ = stack.pop()
# start to traverse its right child
UpperCAmelCase_ = n.right
def lowerCAmelCase_ ( snake_case_ : TreeNode ) -> None:
'''simple docstring'''
if not isinstance(snake_case_ , snake_case_ ) or not node:
return
UpperCAmelCase_ = []
UpperCAmelCase_ = node
while n or stack:
while n:
stack.append(snake_case_ )
UpperCAmelCase_ = n.left
UpperCAmelCase_ = stack.pop()
print(n.data , end="," )
UpperCAmelCase_ = n.right
def lowerCAmelCase_ ( snake_case_ : TreeNode ) -> None:
'''simple docstring'''
if not isinstance(snake_case_ , snake_case_ ) or not node:
return
UpperCAmelCase_ , UpperCAmelCase_ = [], []
UpperCAmelCase_ = node
stacka.append(snake_case_ )
while stacka: # to find the reversed order of post order, store it in stack2
UpperCAmelCase_ = stacka.pop()
if n.left:
stacka.append(n.left )
if n.right:
stacka.append(n.right )
stacka.append(snake_case_ )
while stacka: # pop up from stack2 will be the post order
print(stacka.pop().data , end="," )
def lowerCAmelCase_ ( snake_case_ : str = "" , snake_case_ : Any=50 , snake_case_ : Union[str, Any]="*" ) -> str:
'''simple docstring'''
if not s:
return "\n" + width * char
UpperCAmelCase_ , UpperCAmelCase_ = divmod(width - len(snake_case_ ) - 2 , 2 )
return f"""{left * char} {s} {(left + extra) * char}"""
if __name__ == "__main__":
import doctest
doctest.testmod()
print(prompt('Binary Tree Traversals'))
SCREAMING_SNAKE_CASE_: TreeNode =build_tree()
print(prompt('Pre Order Traversal'))
pre_order(node)
print(prompt() + '\n')
print(prompt('In Order Traversal'))
in_order(node)
print(prompt() + '\n')
print(prompt('Post Order Traversal'))
post_order(node)
print(prompt() + '\n')
print(prompt('Level Order Traversal'))
level_order(node)
print(prompt() + '\n')
print(prompt('Actual Level Order Traversal'))
level_order_actual(node)
print('*' * 50 + '\n')
print(prompt('Pre Order Traversal - Iteration Version'))
pre_order_iter(node)
print(prompt() + '\n')
print(prompt('In Order Traversal - Iteration Version'))
in_order_iter(node)
print(prompt() + '\n')
print(prompt('Post Order Traversal - Iteration Version'))
post_order_iter(node)
print(prompt())
| 1 | 1 |
'''simple docstring'''
from __future__ import annotations
import matplotlib.pyplot as plt # type: ignore
import numpy
# initial triangle of Koch snowflake
SCREAMING_SNAKE_CASE_: str =numpy.array([0, 0])
SCREAMING_SNAKE_CASE_: str =numpy.array([0.5, 0.8660254])
SCREAMING_SNAKE_CASE_: Optional[int] =numpy.array([1, 0])
SCREAMING_SNAKE_CASE_: List[Any] =[VECTOR_1, VECTOR_2, VECTOR_3, VECTOR_1]
def lowerCAmelCase_ ( snake_case_ : list[numpy.ndarray] , snake_case_ : int ) -> list[numpy.ndarray]:
'''simple docstring'''
UpperCAmelCase_ = initial_vectors
for _ in range(snake_case_ ):
UpperCAmelCase_ = iteration_step(snake_case_ )
return vectors
def lowerCAmelCase_ ( snake_case_ : list[numpy.ndarray] ) -> list[numpy.ndarray]:
'''simple docstring'''
UpperCAmelCase_ = []
for i, start_vector in enumerate(vectors[:-1] ):
UpperCAmelCase_ = vectors[i + 1]
new_vectors.append(snake_case_ )
UpperCAmelCase_ = end_vector - start_vector
new_vectors.append(start_vector + difference_vector / 3 )
new_vectors.append(
start_vector + difference_vector / 3 + rotate(difference_vector / 3 , 60 ) )
new_vectors.append(start_vector + difference_vector * 2 / 3 )
new_vectors.append(vectors[-1] )
return new_vectors
def lowerCAmelCase_ ( snake_case_ : numpy.ndarray , snake_case_ : float ) -> numpy.ndarray:
'''simple docstring'''
UpperCAmelCase_ = numpy.radians(snake_case_ )
UpperCAmelCase_ , UpperCAmelCase_ = numpy.cos(snake_case_ ), numpy.sin(snake_case_ )
UpperCAmelCase_ = numpy.array(((c, -s), (s, c)) )
return numpy.dot(snake_case_ , snake_case_ )
def lowerCAmelCase_ ( snake_case_ : list[numpy.ndarray] ) -> None:
'''simple docstring'''
UpperCAmelCase_ = plt.gca()
axes.set_aspect("equal" )
# matplotlib.pyplot.plot takes a list of all x-coordinates and a list of all
# y-coordinates as inputs, which are constructed from the vector-list using
# zip()
UpperCAmelCase_ , UpperCAmelCase_ = zip(*snake_case_ )
plt.plot(snake_case_ , snake_case_ )
plt.show()
if __name__ == "__main__":
import doctest
doctest.testmod()
SCREAMING_SNAKE_CASE_: List[str] =iterate(INITIAL_VECTORS, 5)
plot(processed_vectors)
| 1 | '''simple docstring'''
from typing import Dict
import numpy as np
from ..utils import add_end_docstrings, is_tf_available, is_torch_available, logging
from .base import PIPELINE_INIT_ARGS, GenericTensor, Pipeline, PipelineException
if is_tf_available():
import tensorflow as tf
from ..tf_utils import stable_softmax
if is_torch_available():
import torch
SCREAMING_SNAKE_CASE_: Optional[int] =logging.get_logger(__name__)
@add_end_docstrings(
UpperCamelCase__ , r"""
top_k (`int`, defaults to 5):
The number of predictions to return.
targets (`str` or `List[str]`, *optional*):
When passed, the model will limit the scores to the passed targets instead of looking up in the whole
vocab. If the provided targets are not in the model vocab, they will be tokenized and the first resulting
token will be used (with a warning, and that might be slower).
""" , )
class __A ( UpperCamelCase__ ):
def _lowercase (self : str , __a : GenericTensor ):
if self.framework == "tf":
UpperCAmelCase_ = tf.where(input_ids == self.tokenizer.mask_token_id ).numpy()
elif self.framework == "pt":
UpperCAmelCase_ = torch.nonzero(input_ids == self.tokenizer.mask_token_id , as_tuple=__a )
else:
raise ValueError("Unsupported framework" )
return masked_index
def _lowercase (self : Tuple , __a : GenericTensor ):
UpperCAmelCase_ = self.get_masked_index(__a )
UpperCAmelCase_ = np.prod(masked_index.shape )
if numel < 1:
raise PipelineException(
"fill-mask" , self.model.base_model_prefix , f"""No mask_token ({self.tokenizer.mask_token}) found on the input""" , )
def _lowercase (self : List[Any] , __a : GenericTensor ):
if isinstance(__a , __a ):
for model_input in model_inputs:
self._ensure_exactly_one_mask_token(model_input["input_ids"][0] )
else:
for input_ids in model_inputs["input_ids"]:
self._ensure_exactly_one_mask_token(__a )
def _lowercase (self : Tuple , __a : Dict , __a : List[str]=None , **__a : Any ):
if return_tensors is None:
UpperCAmelCase_ = self.framework
UpperCAmelCase_ = self.tokenizer(__a , return_tensors=__a )
self.ensure_exactly_one_mask_token(__a )
return model_inputs
def _lowercase (self : str , __a : Optional[int] ):
UpperCAmelCase_ = self.model(**__a )
UpperCAmelCase_ = model_inputs["input_ids"]
return model_outputs
def _lowercase (self : List[str] , __a : Tuple , __a : int=5 , __a : Dict=None ):
# Cap top_k if there are targets
if target_ids is not None and target_ids.shape[0] < top_k:
UpperCAmelCase_ = target_ids.shape[0]
UpperCAmelCase_ = model_outputs["input_ids"][0]
UpperCAmelCase_ = model_outputs["logits"]
if self.framework == "tf":
UpperCAmelCase_ = tf.where(input_ids == self.tokenizer.mask_token_id ).numpy()[:, 0]
UpperCAmelCase_ = outputs.numpy()
UpperCAmelCase_ = outputs[0, masked_index, :]
UpperCAmelCase_ = stable_softmax(__a , axis=-1 )
if target_ids is not None:
UpperCAmelCase_ = tf.gather_nd(tf.squeeze(__a , 0 ) , target_ids.reshape(-1 , 1 ) )
UpperCAmelCase_ = tf.expand_dims(__a , 0 )
UpperCAmelCase_ = tf.math.top_k(__a , k=__a )
UpperCAmelCase_ , UpperCAmelCase_ = topk.values.numpy(), topk.indices.numpy()
else:
UpperCAmelCase_ = torch.nonzero(input_ids == self.tokenizer.mask_token_id , as_tuple=__a ).squeeze(-1 )
# Fill mask pipeline supports only one ${mask_token} per sample
UpperCAmelCase_ = outputs[0, masked_index, :]
UpperCAmelCase_ = logits.softmax(dim=-1 )
if target_ids is not None:
UpperCAmelCase_ = probs[..., target_ids]
UpperCAmelCase_ , UpperCAmelCase_ = probs.topk(__a )
UpperCAmelCase_ = []
UpperCAmelCase_ = values.shape[0] == 1
for i, (_values, _predictions) in enumerate(zip(values.tolist() , predictions.tolist() ) ):
UpperCAmelCase_ = []
for v, p in zip(_values , _predictions ):
# Copy is important since we're going to modify this array in place
UpperCAmelCase_ = input_ids.numpy().copy()
if target_ids is not None:
UpperCAmelCase_ = target_ids[p].tolist()
UpperCAmelCase_ = p
# Filter padding out:
UpperCAmelCase_ = tokens[np.where(tokens != self.tokenizer.pad_token_id )]
# Originally we skip special tokens to give readable output.
# For multi masks though, the other [MASK] would be removed otherwise
# making the output look odd, so we add them back
UpperCAmelCase_ = self.tokenizer.decode(__a , skip_special_tokens=__a )
UpperCAmelCase_ = {"score": v, "token": p, "token_str": self.tokenizer.decode([p] ), "sequence": sequence}
row.append(__a )
result.append(__a )
if single_mask:
return result[0]
return result
def _lowercase (self : Dict , __a : List[Any] , __a : List[str]=None ):
if isinstance(__a , __a ):
UpperCAmelCase_ = [targets]
try:
UpperCAmelCase_ = self.tokenizer.get_vocab()
except Exception:
UpperCAmelCase_ = {}
UpperCAmelCase_ = []
for target in targets:
UpperCAmelCase_ = vocab.get(__a , __a )
if id_ is None:
UpperCAmelCase_ = self.tokenizer(
__a , add_special_tokens=__a , return_attention_mask=__a , return_token_type_ids=__a , max_length=1 , truncation=__a , )["input_ids"]
if len(__a ) == 0:
logger.warning(
f"""The specified target token `{target}` does not exist in the model vocabulary. """
"We cannot replace it with anything meaningful, ignoring it" )
continue
UpperCAmelCase_ = input_ids[0]
# XXX: If users encounter this pass
# it becomes pretty slow, so let's make sure
# The warning enables them to fix the input to
# get faster performance.
logger.warning(
f"""The specified target token `{target}` does not exist in the model vocabulary. """
f"""Replacing with `{self.tokenizer.convert_ids_to_tokens(id_ )}`.""" )
target_ids.append(id_ )
UpperCAmelCase_ = list(set(__a ) )
if len(__a ) == 0:
raise ValueError("At least one target must be provided when passed." )
UpperCAmelCase_ = np.array(__a )
return target_ids
def _lowercase (self : Tuple , __a : Dict=None , __a : List[str]=None ):
UpperCAmelCase_ = {}
if targets is not None:
UpperCAmelCase_ = self.get_target_ids(__a , __a )
UpperCAmelCase_ = target_ids
if top_k is not None:
UpperCAmelCase_ = top_k
if self.tokenizer.mask_token_id is None:
raise PipelineException(
"fill-mask" , self.model.base_model_prefix , "The tokenizer does not define a `mask_token`." )
return {}, {}, postprocess_params
def __call__(self : Union[str, Any] , __a : str , *__a : Any , **__a : Tuple ):
UpperCAmelCase_ = super().__call__(__a , **__a )
if isinstance(__a , __a ) and len(__a ) == 1:
return outputs[0]
return outputs
| 1 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
SCREAMING_SNAKE_CASE_: Dict ={
'configuration_albert': ['ALBERT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'AlbertConfig', 'AlbertOnnxConfig'],
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE_: Tuple =['AlbertTokenizer']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE_: str =['AlbertTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE_: Optional[int] =[
'ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST',
'AlbertForMaskedLM',
'AlbertForMultipleChoice',
'AlbertForPreTraining',
'AlbertForQuestionAnswering',
'AlbertForSequenceClassification',
'AlbertForTokenClassification',
'AlbertModel',
'AlbertPreTrainedModel',
'load_tf_weights_in_albert',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE_: List[str] =[
'TF_ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFAlbertForMaskedLM',
'TFAlbertForMultipleChoice',
'TFAlbertForPreTraining',
'TFAlbertForQuestionAnswering',
'TFAlbertForSequenceClassification',
'TFAlbertForTokenClassification',
'TFAlbertMainLayer',
'TFAlbertModel',
'TFAlbertPreTrainedModel',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE_: Optional[Any] =[
'FlaxAlbertForMaskedLM',
'FlaxAlbertForMultipleChoice',
'FlaxAlbertForPreTraining',
'FlaxAlbertForQuestionAnswering',
'FlaxAlbertForSequenceClassification',
'FlaxAlbertForTokenClassification',
'FlaxAlbertModel',
'FlaxAlbertPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_albert import ALBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, AlbertConfig, AlbertOnnxConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_albert import AlbertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_albert_fast import AlbertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_albert import (
ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
AlbertForMaskedLM,
AlbertForMultipleChoice,
AlbertForPreTraining,
AlbertForQuestionAnswering,
AlbertForSequenceClassification,
AlbertForTokenClassification,
AlbertModel,
AlbertPreTrainedModel,
load_tf_weights_in_albert,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_albert import (
TF_ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFAlbertForMaskedLM,
TFAlbertForMultipleChoice,
TFAlbertForPreTraining,
TFAlbertForQuestionAnswering,
TFAlbertForSequenceClassification,
TFAlbertForTokenClassification,
TFAlbertMainLayer,
TFAlbertModel,
TFAlbertPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_albert import (
FlaxAlbertForMaskedLM,
FlaxAlbertForMultipleChoice,
FlaxAlbertForPreTraining,
FlaxAlbertForQuestionAnswering,
FlaxAlbertForSequenceClassification,
FlaxAlbertForTokenClassification,
FlaxAlbertModel,
FlaxAlbertPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE_: Tuple =_LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 1 | '''simple docstring'''
import logging
import os
from dataclasses import dataclass
from typing import List, Optional, Union
import tqdm
from filelock import FileLock
from transformers import (
BartTokenizer,
BartTokenizerFast,
DataProcessor,
PreTrainedTokenizer,
RobertaTokenizer,
RobertaTokenizerFast,
XLMRobertaTokenizer,
is_tf_available,
is_torch_available,
)
SCREAMING_SNAKE_CASE_: str =logging.getLogger(__name__)
@dataclass(frozen=UpperCamelCase__ )
class __A :
a__ : str
a__ : str
a__ : Optional[str] = None
a__ : Optional[str] = None
a__ : Optional[str] = None
@dataclass(frozen=UpperCamelCase__ )
class __A :
a__ : List[int]
a__ : Optional[List[int]] = None
a__ : Optional[List[int]] = None
a__ : Optional[Union[int, float]] = None
a__ : Optional[int] = None
if is_torch_available():
import torch
from torch.utils.data import Dataset
class __A ( UpperCamelCase__ ):
a__ : List[InputFeatures]
def __init__(self : Any , __a : str , __a : PreTrainedTokenizer , __a : str , __a : Optional[int] = None , __a : Dict=False , __a : bool = False , ):
UpperCAmelCase_ = hans_processors[task]()
UpperCAmelCase_ = os.path.join(
__a , "cached_{}_{}_{}_{}".format(
"dev" if evaluate else "train" , tokenizer.__class__.__name__ , str(__a ) , __a , ) , )
UpperCAmelCase_ = processor.get_labels()
if tokenizer.__class__ in (
RobertaTokenizer,
RobertaTokenizerFast,
XLMRobertaTokenizer,
BartTokenizer,
BartTokenizerFast,
):
# HACK(label indices are swapped in RoBERTa pretrained model)
UpperCAmelCase_ , UpperCAmelCase_ = label_list[2], label_list[1]
UpperCAmelCase_ = label_list
# Make sure only the first process in distributed training processes the dataset,
# and the others will use the cache.
UpperCAmelCase_ = cached_features_file + ".lock"
with FileLock(__a ):
if os.path.exists(__a ) and not overwrite_cache:
logger.info(f"""Loading features from cached file {cached_features_file}""" )
UpperCAmelCase_ = torch.load(__a )
else:
logger.info(f"""Creating features from dataset file at {data_dir}""" )
UpperCAmelCase_ = (
processor.get_dev_examples(__a ) if evaluate else processor.get_train_examples(__a )
)
logger.info("Training examples: %s" , len(__a ) )
UpperCAmelCase_ = hans_convert_examples_to_features(__a , __a , __a , __a )
logger.info("Saving features into cached file %s" , __a )
torch.save(self.features , __a )
def __len__(self : List[Any] ):
return len(self.features )
def __getitem__(self : Any , __a : Optional[Any] ):
return self.features[i]
def _lowercase (self : Union[str, Any] ):
return self.label_list
if is_tf_available():
import tensorflow as tf
class __A :
a__ : List[InputFeatures]
def __init__(self : Union[str, Any] , __a : str , __a : PreTrainedTokenizer , __a : str , __a : Optional[int] = 128 , __a : Any=False , __a : bool = False , ):
UpperCAmelCase_ = hans_processors[task]()
UpperCAmelCase_ = processor.get_labels()
if tokenizer.__class__ in (
RobertaTokenizer,
RobertaTokenizerFast,
XLMRobertaTokenizer,
BartTokenizer,
BartTokenizerFast,
):
# HACK(label indices are swapped in RoBERTa pretrained model)
UpperCAmelCase_ , UpperCAmelCase_ = label_list[2], label_list[1]
UpperCAmelCase_ = label_list
UpperCAmelCase_ = processor.get_dev_examples(__a ) if evaluate else processor.get_train_examples(__a )
UpperCAmelCase_ = hans_convert_examples_to_features(__a , __a , __a , __a )
def gen():
for ex_index, ex in tqdm.tqdm(enumerate(self.features ) , desc="convert examples to features" ):
if ex_index % 10000 == 0:
logger.info("Writing example %d of %d" % (ex_index, len(__a )) )
yield (
{
"example_id": 0,
"input_ids": ex.input_ids,
"attention_mask": ex.attention_mask,
"token_type_ids": ex.token_type_ids,
},
ex.label,
)
UpperCAmelCase_ = tf.data.Dataset.from_generator(
__a , (
{
"example_id": tf.intaa,
"input_ids": tf.intaa,
"attention_mask": tf.intaa,
"token_type_ids": tf.intaa,
},
tf.intaa,
) , (
{
"example_id": tf.TensorShape([] ),
"input_ids": tf.TensorShape([None, None] ),
"attention_mask": tf.TensorShape([None, None] ),
"token_type_ids": tf.TensorShape([None, None] ),
},
tf.TensorShape([] ),
) , )
def _lowercase (self : int ):
return self.dataset
def __len__(self : Any ):
return len(self.features )
def __getitem__(self : int , __a : Union[str, Any] ):
return self.features[i]
def _lowercase (self : int ):
return self.label_list
class __A ( UpperCamelCase__ ):
def _lowercase (self : List[Any] , __a : Dict ):
return self._create_examples(self._read_tsv(os.path.join(__a , "heuristics_train_set.txt" ) ) , "train" )
def _lowercase (self : Any , __a : List[Any] ):
return self._create_examples(self._read_tsv(os.path.join(__a , "heuristics_evaluation_set.txt" ) ) , "dev" )
def _lowercase (self : Any ):
return ["contradiction", "entailment", "neutral"]
def _lowercase (self : Union[str, Any] , __a : Optional[int] , __a : Union[str, Any] ):
UpperCAmelCase_ = []
for i, line in enumerate(__a ):
if i == 0:
continue
UpperCAmelCase_ = "%s-%s" % (set_type, line[0])
UpperCAmelCase_ = line[5]
UpperCAmelCase_ = line[6]
UpperCAmelCase_ = line[7][2:] if line[7].startswith("ex" ) else line[7]
UpperCAmelCase_ = line[0]
examples.append(InputExample(guid=__a , text_a=__a , text_b=__a , label=__a , pairID=__a ) )
return examples
def lowerCAmelCase_ ( snake_case_ : List[InputExample] , snake_case_ : List[str] , snake_case_ : int , snake_case_ : PreTrainedTokenizer , ) -> Optional[Any]:
'''simple docstring'''
UpperCAmelCase_ = {label: i for i, label in enumerate(snake_case_ )}
UpperCAmelCase_ = []
for ex_index, example in tqdm.tqdm(enumerate(snake_case_ ) , desc="convert examples to features" ):
if ex_index % 1_00_00 == 0:
logger.info("Writing example %d" % (ex_index) )
UpperCAmelCase_ = tokenizer(
example.text_a , example.text_b , add_special_tokens=snake_case_ , max_length=snake_case_ , padding="max_length" , truncation=snake_case_ , return_overflowing_tokens=snake_case_ , )
UpperCAmelCase_ = label_map[example.label] if example.label in label_map else 0
UpperCAmelCase_ = int(example.pairID )
features.append(InputFeatures(**snake_case_ , label=snake_case_ , pairID=snake_case_ ) )
for i, example in enumerate(examples[:5] ):
logger.info("*** Example ***" )
logger.info(f"""guid: {example}""" )
logger.info(f"""features: {features[i]}""" )
return features
SCREAMING_SNAKE_CASE_: int ={
'hans': 3,
}
SCREAMING_SNAKE_CASE_: Any ={
'hans': HansProcessor,
}
| 1 | 1 |
'''simple docstring'''
import argparse
import json
from pathlib import Path
import torch
import torchaudio
from datasets import load_dataset
from huggingface_hub import hf_hub_download
from transformers import ASTConfig, ASTFeatureExtractor, ASTForAudioClassification
from transformers.utils import logging
logging.set_verbosity_info()
SCREAMING_SNAKE_CASE_: Optional[int] =logging.get_logger(__name__)
def lowerCAmelCase_ ( snake_case_ : Any ) -> int:
'''simple docstring'''
UpperCAmelCase_ = ASTConfig()
if "10-10" in model_name:
pass
elif "speech-commands" in model_name:
UpperCAmelCase_ = 1_28
elif "12-12" in model_name:
UpperCAmelCase_ = 12
UpperCAmelCase_ = 12
elif "14-14" in model_name:
UpperCAmelCase_ = 14
UpperCAmelCase_ = 14
elif "16-16" in model_name:
UpperCAmelCase_ = 16
UpperCAmelCase_ = 16
else:
raise ValueError("Model not supported" )
UpperCAmelCase_ = "huggingface/label-files"
if "speech-commands" in model_name:
UpperCAmelCase_ = 35
UpperCAmelCase_ = "speech-commands-v2-id2label.json"
else:
UpperCAmelCase_ = 5_27
UpperCAmelCase_ = "audioset-id2label.json"
UpperCAmelCase_ = json.load(open(hf_hub_download(snake_case_ , snake_case_ , repo_type="dataset" ) , "r" ) )
UpperCAmelCase_ = {int(snake_case_ ): v for k, v in idalabel.items()}
UpperCAmelCase_ = idalabel
UpperCAmelCase_ = {v: k for k, v in idalabel.items()}
return config
def lowerCAmelCase_ ( snake_case_ : List[str] ) -> Any:
'''simple docstring'''
if "module.v" in name:
UpperCAmelCase_ = name.replace("module.v" , "audio_spectrogram_transformer" )
if "cls_token" in name:
UpperCAmelCase_ = name.replace("cls_token" , "embeddings.cls_token" )
if "dist_token" in name:
UpperCAmelCase_ = name.replace("dist_token" , "embeddings.distillation_token" )
if "pos_embed" in name:
UpperCAmelCase_ = name.replace("pos_embed" , "embeddings.position_embeddings" )
if "patch_embed.proj" in name:
UpperCAmelCase_ = name.replace("patch_embed.proj" , "embeddings.patch_embeddings.projection" )
# transformer blocks
if "blocks" in name:
UpperCAmelCase_ = name.replace("blocks" , "encoder.layer" )
if "attn.proj" in name:
UpperCAmelCase_ = name.replace("attn.proj" , "attention.output.dense" )
if "attn" in name:
UpperCAmelCase_ = name.replace("attn" , "attention.self" )
if "norm1" in name:
UpperCAmelCase_ = name.replace("norm1" , "layernorm_before" )
if "norm2" in name:
UpperCAmelCase_ = name.replace("norm2" , "layernorm_after" )
if "mlp.fc1" in name:
UpperCAmelCase_ = name.replace("mlp.fc1" , "intermediate.dense" )
if "mlp.fc2" in name:
UpperCAmelCase_ = name.replace("mlp.fc2" , "output.dense" )
# final layernorm
if "audio_spectrogram_transformer.norm" in name:
UpperCAmelCase_ = name.replace("audio_spectrogram_transformer.norm" , "audio_spectrogram_transformer.layernorm" )
# classifier head
if "module.mlp_head.0" in name:
UpperCAmelCase_ = name.replace("module.mlp_head.0" , "classifier.layernorm" )
if "module.mlp_head.1" in name:
UpperCAmelCase_ = name.replace("module.mlp_head.1" , "classifier.dense" )
return name
def lowerCAmelCase_ ( snake_case_ : int , snake_case_ : Optional[Any] ) -> Union[str, Any]:
'''simple docstring'''
for key in orig_state_dict.copy().keys():
UpperCAmelCase_ = orig_state_dict.pop(snake_case_ )
if "qkv" in key:
UpperCAmelCase_ = key.split("." )
UpperCAmelCase_ = int(key_split[3] )
UpperCAmelCase_ = config.hidden_size
if "weight" in key:
UpperCAmelCase_ = val[:dim, :]
UpperCAmelCase_ = val[dim : dim * 2, :]
UpperCAmelCase_ = val[-dim:, :]
else:
UpperCAmelCase_ = val[:dim]
UpperCAmelCase_ = val[dim : dim * 2]
UpperCAmelCase_ = val[-dim:]
else:
UpperCAmelCase_ = val
return orig_state_dict
def lowerCAmelCase_ ( snake_case_ : Any ) -> str:
'''simple docstring'''
UpperCAmelCase_ = [
"module.v.head.weight",
"module.v.head.bias",
"module.v.head_dist.weight",
"module.v.head_dist.bias",
]
for k in ignore_keys:
state_dict.pop(snake_case_ , snake_case_ )
@torch.no_grad()
def lowerCAmelCase_ ( snake_case_ : int , snake_case_ : Any , snake_case_ : Optional[int]=False ) -> List[Any]:
'''simple docstring'''
UpperCAmelCase_ = get_audio_spectrogram_transformer_config(snake_case_ )
UpperCAmelCase_ = {
"ast-finetuned-audioset-10-10-0.4593": (
"https://www.dropbox.com/s/ca0b1v2nlxzyeb4/audioset_10_10_0.4593.pth?dl=1"
),
"ast-finetuned-audioset-10-10-0.450": (
"https://www.dropbox.com/s/1tv0hovue1bxupk/audioset_10_10_0.4495.pth?dl=1"
),
"ast-finetuned-audioset-10-10-0.448": (
"https://www.dropbox.com/s/6u5sikl4b9wo4u5/audioset_10_10_0.4483.pth?dl=1"
),
"ast-finetuned-audioset-10-10-0.448-v2": (
"https://www.dropbox.com/s/kt6i0v9fvfm1mbq/audioset_10_10_0.4475.pth?dl=1"
),
"ast-finetuned-audioset-12-12-0.447": (
"https://www.dropbox.com/s/snfhx3tizr4nuc8/audioset_12_12_0.4467.pth?dl=1"
),
"ast-finetuned-audioset-14-14-0.443": (
"https://www.dropbox.com/s/z18s6pemtnxm4k7/audioset_14_14_0.4431.pth?dl=1"
),
"ast-finetuned-audioset-16-16-0.442": (
"https://www.dropbox.com/s/mdsa4t1xmcimia6/audioset_16_16_0.4422.pth?dl=1"
),
"ast-finetuned-speech-commands-v2": (
"https://www.dropbox.com/s/q0tbqpwv44pquwy/speechcommands_10_10_0.9812.pth?dl=1"
),
}
# load original state_dict
UpperCAmelCase_ = model_name_to_url[model_name]
UpperCAmelCase_ = torch.hub.load_state_dict_from_url(snake_case_ , map_location="cpu" )
# remove some keys
remove_keys(snake_case_ )
# rename some keys
UpperCAmelCase_ = convert_state_dict(snake_case_ , snake_case_ )
# load 🤗 model
UpperCAmelCase_ = ASTForAudioClassification(snake_case_ )
model.eval()
model.load_state_dict(snake_case_ )
# verify outputs on dummy input
# source: https://github.com/YuanGongND/ast/blob/79e873b8a54d0a3b330dd522584ff2b9926cd581/src/run.py#L62
UpperCAmelCase_ = -4.267_7393 if "speech-commands" not in model_name else -6.84_5978
UpperCAmelCase_ = 4.568_9974 if "speech-commands" not in model_name else 5.565_4526
UpperCAmelCase_ = 10_24 if "speech-commands" not in model_name else 1_28
UpperCAmelCase_ = ASTFeatureExtractor(mean=snake_case_ , std=snake_case_ , max_length=snake_case_ )
if "speech-commands" in model_name:
UpperCAmelCase_ = load_dataset("speech_commands" , "v0.02" , split="validation" )
UpperCAmelCase_ = dataset[0]["audio"]["array"]
else:
UpperCAmelCase_ = hf_hub_download(
repo_id="nielsr/audio-spectogram-transformer-checkpoint" , filename="sample_audio.flac" , repo_type="dataset" , )
UpperCAmelCase_ , UpperCAmelCase_ = torchaudio.load(snake_case_ )
UpperCAmelCase_ = waveform.squeeze().numpy()
UpperCAmelCase_ = feature_extractor(snake_case_ , sampling_rate=1_60_00 , return_tensors="pt" )
# forward pass
UpperCAmelCase_ = model(**snake_case_ )
UpperCAmelCase_ = outputs.logits
if model_name == "ast-finetuned-audioset-10-10-0.4593":
UpperCAmelCase_ = torch.tensor([-0.8760, -7.0042, -8.6602] )
elif model_name == "ast-finetuned-audioset-10-10-0.450":
UpperCAmelCase_ = torch.tensor([-1.1986, -7.0903, -8.2718] )
elif model_name == "ast-finetuned-audioset-10-10-0.448":
UpperCAmelCase_ = torch.tensor([-2.6128, -8.0080, -9.4344] )
elif model_name == "ast-finetuned-audioset-10-10-0.448-v2":
UpperCAmelCase_ = torch.tensor([-1.5080, -7.4534, -8.8917] )
elif model_name == "ast-finetuned-audioset-12-12-0.447":
UpperCAmelCase_ = torch.tensor([-0.5050, -6.5833, -8.0843] )
elif model_name == "ast-finetuned-audioset-14-14-0.443":
UpperCAmelCase_ = torch.tensor([-0.3826, -7.0336, -8.2413] )
elif model_name == "ast-finetuned-audioset-16-16-0.442":
UpperCAmelCase_ = torch.tensor([-1.2113, -6.9101, -8.3470] )
elif model_name == "ast-finetuned-speech-commands-v2":
UpperCAmelCase_ = torch.tensor([6.1589, -8.0566, -8.7984] )
else:
raise ValueError("Unknown model name" )
if not torch.allclose(logits[0, :3] , snake_case_ , atol=1E-4 ):
raise ValueError("Logits don't match" )
print("Looks ok!" )
if pytorch_dump_folder_path is not None:
Path(snake_case_ ).mkdir(exist_ok=snake_case_ )
print(f"""Saving model {model_name} to {pytorch_dump_folder_path}""" )
model.save_pretrained(snake_case_ )
print(f"""Saving feature extractor to {pytorch_dump_folder_path}""" )
feature_extractor.save_pretrained(snake_case_ )
if push_to_hub:
print("Pushing model and feature extractor to the hub..." )
model.push_to_hub(f"""MIT/{model_name}""" )
feature_extractor.push_to_hub(f"""MIT/{model_name}""" )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE_: Tuple =argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--model_name',
default='ast-finetuned-audioset-10-10-0.4593',
type=str,
help='Name of the Audio Spectrogram Transformer model you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
parser.add_argument(
'--push_to_hub', action='store_true', help='Whether or not to push the converted model to the 🤗 hub.'
)
SCREAMING_SNAKE_CASE_: List[Any] =parser.parse_args()
convert_audio_spectrogram_transformer_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 1 | '''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
SCREAMING_SNAKE_CASE_: Dict =logging.get_logger(__name__)
SCREAMING_SNAKE_CASE_: Tuple ={}
class __A ( UpperCamelCase__ ):
a__ : int = """llama"""
a__ : Any = ["""past_key_values"""]
def __init__(self : List[str] , __a : List[str]=32000 , __a : Tuple=4096 , __a : List[Any]=11008 , __a : Dict=32 , __a : Tuple=32 , __a : Any=None , __a : Any="silu" , __a : List[Any]=2048 , __a : List[Any]=0.02 , __a : str=1E-6 , __a : Optional[Any]=True , __a : Union[str, Any]=0 , __a : Any=1 , __a : Dict=2 , __a : Dict=1 , __a : str=False , __a : str=None , **__a : Optional[Any] , ):
UpperCAmelCase_ = vocab_size
UpperCAmelCase_ = max_position_embeddings
UpperCAmelCase_ = hidden_size
UpperCAmelCase_ = intermediate_size
UpperCAmelCase_ = num_hidden_layers
UpperCAmelCase_ = num_attention_heads
# for backward compatibility
if num_key_value_heads is None:
UpperCAmelCase_ = num_attention_heads
UpperCAmelCase_ = num_key_value_heads
UpperCAmelCase_ = hidden_act
UpperCAmelCase_ = initializer_range
UpperCAmelCase_ = rms_norm_eps
UpperCAmelCase_ = pretraining_tp
UpperCAmelCase_ = use_cache
UpperCAmelCase_ = rope_scaling
self._rope_scaling_validation()
super().__init__(
pad_token_id=__a , bos_token_id=__a , eos_token_id=__a , tie_word_embeddings=__a , **__a , )
def _lowercase (self : List[str] ):
if self.rope_scaling is None:
return
if not isinstance(self.rope_scaling , __a ) or len(self.rope_scaling ) != 2:
raise ValueError(
"`rope_scaling` must be a dictionary with with two fields, `name` and `factor`, "
f"""got {self.rope_scaling}""" )
UpperCAmelCase_ = self.rope_scaling.get("type" , __a )
UpperCAmelCase_ = self.rope_scaling.get("factor" , __a )
if rope_scaling_type is None or rope_scaling_type not in ["linear", "dynamic"]:
raise ValueError(
f"""`rope_scaling`'s name field must be one of ['linear', 'dynamic'], got {rope_scaling_type}""" )
if rope_scaling_factor is None or not isinstance(__a , __a ) or rope_scaling_factor <= 1.0:
raise ValueError(f"""`rope_scaling`'s factor field must be an float > 1, got {rope_scaling_factor}""" )
| 1 | 1 |
'''simple docstring'''
import argparse
import os
import torch
from transformers import FlavaConfig, FlavaForPreTraining
from transformers.models.flava.convert_dalle_to_flava_codebook import convert_dalle_checkpoint
def lowerCAmelCase_ ( snake_case_ : str ) -> str:
'''simple docstring'''
return sum(param.float().sum() if "encoder.embeddings" not in key else 0 for key, param in state_dict.items() )
def lowerCAmelCase_ ( snake_case_ : str , snake_case_ : List[str] ) -> str:
'''simple docstring'''
UpperCAmelCase_ = {}
for key, value in state_dict.items():
if "text_encoder.embeddings" in key or "image_encoder.embeddings" in key:
continue
UpperCAmelCase_ = key.replace("heads.cmd.mim_head.cls.predictions" , "mmm_image_head" )
UpperCAmelCase_ = key.replace("heads.cmd.mlm_head.cls.predictions" , "mmm_text_head" )
UpperCAmelCase_ = key.replace("heads.cmd.itm_head.cls" , "itm_head" )
UpperCAmelCase_ = key.replace("heads.cmd.itm_head.pooler" , "itm_head.pooler" )
UpperCAmelCase_ = key.replace("heads.cmd.clip_head.logit_scale" , "flava.logit_scale" )
UpperCAmelCase_ = key.replace("heads.fairseq_mlm.cls.predictions" , "mlm_head" )
UpperCAmelCase_ = key.replace("heads.imagenet.mim_head.cls.predictions" , "mim_head" )
UpperCAmelCase_ = key.replace("mm_text_projection" , "flava.text_to_mm_projection" )
UpperCAmelCase_ = key.replace("mm_image_projection" , "flava.image_to_mm_projection" )
UpperCAmelCase_ = key.replace("image_encoder.module" , "flava.image_model" )
UpperCAmelCase_ = key.replace("text_encoder.module" , "flava.text_model" )
UpperCAmelCase_ = key.replace("mm_encoder.module.encoder.cls_token" , "flava.multimodal_model.cls_token" )
UpperCAmelCase_ = key.replace("mm_encoder.module" , "flava.multimodal_model" )
UpperCAmelCase_ = key.replace("text_projection" , "flava.text_projection" )
UpperCAmelCase_ = key.replace("image_projection" , "flava.image_projection" )
UpperCAmelCase_ = value.float()
for key, value in codebook_state_dict.items():
UpperCAmelCase_ = value
return upgrade
@torch.no_grad()
def lowerCAmelCase_ ( snake_case_ : Union[str, Any] , snake_case_ : List[str] , snake_case_ : str , snake_case_ : str=None ) -> Tuple:
'''simple docstring'''
if config_path is not None:
UpperCAmelCase_ = FlavaConfig.from_pretrained(snake_case_ )
else:
UpperCAmelCase_ = FlavaConfig()
UpperCAmelCase_ = FlavaForPreTraining(snake_case_ ).eval()
UpperCAmelCase_ = convert_dalle_checkpoint(snake_case_ , snake_case_ , save_checkpoint=snake_case_ )
if os.path.exists(snake_case_ ):
UpperCAmelCase_ = torch.load(snake_case_ , map_location="cpu" )
else:
UpperCAmelCase_ = torch.hub.load_state_dict_from_url(snake_case_ , map_location="cpu" )
UpperCAmelCase_ = upgrade_state_dict(snake_case_ , snake_case_ )
hf_model.load_state_dict(snake_case_ )
UpperCAmelCase_ = hf_model.state_dict()
UpperCAmelCase_ = count_parameters(snake_case_ )
UpperCAmelCase_ = count_parameters(snake_case_ ) + count_parameters(snake_case_ )
assert torch.allclose(snake_case_ , snake_case_ , atol=1E-3 )
hf_model.save_pretrained(snake_case_ )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE_: List[str] =argparse.ArgumentParser()
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to flava checkpoint')
parser.add_argument('--codebook_path', default=None, type=str, help='Path to flava codebook checkpoint')
parser.add_argument('--config_path', default=None, type=str, help='Path to hf config.json of model to convert')
SCREAMING_SNAKE_CASE_: Union[str, Any] =parser.parse_args()
convert_flava_checkpoint(args.checkpoint_path, args.codebook_path, args.pytorch_dump_folder_path, args.config_path)
| 1 | '''simple docstring'''
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, DDIMScheduler, DDPMScheduler, StableDiffusionUpscalePipeline, UNetaDConditionModel
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
enable_full_determinism()
class __A ( unittest.TestCase ):
def _lowercase (self : Tuple ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def _lowercase (self : str ):
UpperCAmelCase_ = 1
UpperCAmelCase_ = 3
UpperCAmelCase_ = (32, 32)
UpperCAmelCase_ = floats_tensor((batch_size, num_channels) + sizes , rng=random.Random(0 ) ).to(__a )
return image
@property
def _lowercase (self : int ):
torch.manual_seed(0 )
UpperCAmelCase_ = UNetaDConditionModel(
block_out_channels=(32, 32, 64) , layers_per_block=2 , sample_size=32 , in_channels=7 , out_channels=4 , down_block_types=("DownBlock2D", "CrossAttnDownBlock2D", "CrossAttnDownBlock2D") , up_block_types=("CrossAttnUpBlock2D", "CrossAttnUpBlock2D", "UpBlock2D") , cross_attention_dim=32 , attention_head_dim=8 , use_linear_projection=__a , only_cross_attention=(True, True, False) , num_class_embeds=100 , )
return model
@property
def _lowercase (self : Any ):
torch.manual_seed(0 )
UpperCAmelCase_ = AutoencoderKL(
block_out_channels=[32, 32, 64] , in_channels=3 , out_channels=3 , down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D", "DownEncoderBlock2D"] , up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D"] , latent_channels=4 , )
return model
@property
def _lowercase (self : Optional[Any] ):
torch.manual_seed(0 )
UpperCAmelCase_ = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , hidden_act="gelu" , projection_dim=512 , )
return CLIPTextModel(__a )
def _lowercase (self : Any ):
UpperCAmelCase_ = "cpu" # ensure determinism for the device-dependent torch.Generator
UpperCAmelCase_ = self.dummy_cond_unet_upscale
UpperCAmelCase_ = DDPMScheduler()
UpperCAmelCase_ = DDIMScheduler(prediction_type="v_prediction" )
UpperCAmelCase_ = self.dummy_vae
UpperCAmelCase_ = self.dummy_text_encoder
UpperCAmelCase_ = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
UpperCAmelCase_ = self.dummy_image.cpu().permute(0 , 2 , 3 , 1 )[0]
UpperCAmelCase_ = Image.fromarray(np.uinta(__a ) ).convert("RGB" ).resize((64, 64) )
# make sure here that pndm scheduler skips prk
UpperCAmelCase_ = StableDiffusionUpscalePipeline(
unet=__a , low_res_scheduler=__a , scheduler=__a , vae=__a , text_encoder=__a , tokenizer=__a , max_noise_level=350 , )
UpperCAmelCase_ = sd_pipe.to(__a )
sd_pipe.set_progress_bar_config(disable=__a )
UpperCAmelCase_ = "A painting of a squirrel eating a burger"
UpperCAmelCase_ = torch.Generator(device=__a ).manual_seed(0 )
UpperCAmelCase_ = sd_pipe(
[prompt] , image=__a , generator=__a , guidance_scale=6.0 , noise_level=20 , num_inference_steps=2 , output_type="np" , )
UpperCAmelCase_ = output.images
UpperCAmelCase_ = torch.Generator(device=__a ).manual_seed(0 )
UpperCAmelCase_ = sd_pipe(
[prompt] , image=__a , generator=__a , guidance_scale=6.0 , noise_level=20 , num_inference_steps=2 , output_type="np" , return_dict=__a , )[0]
UpperCAmelCase_ = image[0, -3:, -3:, -1]
UpperCAmelCase_ = image_from_tuple[0, -3:, -3:, -1]
UpperCAmelCase_ = low_res_image.size[0] * 4
assert image.shape == (1, expected_height_width, expected_height_width, 3)
UpperCAmelCase_ = np.array([0.31_13, 0.39_10, 0.42_72, 0.48_59, 0.50_61, 0.46_52, 0.53_62, 0.57_15, 0.56_61] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
def _lowercase (self : Optional[int] ):
UpperCAmelCase_ = "cpu" # ensure determinism for the device-dependent torch.Generator
UpperCAmelCase_ = self.dummy_cond_unet_upscale
UpperCAmelCase_ = DDPMScheduler()
UpperCAmelCase_ = DDIMScheduler(prediction_type="v_prediction" )
UpperCAmelCase_ = self.dummy_vae
UpperCAmelCase_ = self.dummy_text_encoder
UpperCAmelCase_ = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
UpperCAmelCase_ = self.dummy_image.cpu().permute(0 , 2 , 3 , 1 )[0]
UpperCAmelCase_ = Image.fromarray(np.uinta(__a ) ).convert("RGB" ).resize((64, 64) )
# make sure here that pndm scheduler skips prk
UpperCAmelCase_ = StableDiffusionUpscalePipeline(
unet=__a , low_res_scheduler=__a , scheduler=__a , vae=__a , text_encoder=__a , tokenizer=__a , max_noise_level=350 , )
UpperCAmelCase_ = sd_pipe.to(__a )
sd_pipe.set_progress_bar_config(disable=__a )
UpperCAmelCase_ = "A painting of a squirrel eating a burger"
UpperCAmelCase_ = sd_pipe(
2 * [prompt] , image=2 * [low_res_image] , guidance_scale=6.0 , noise_level=20 , num_inference_steps=2 , output_type="np" , )
UpperCAmelCase_ = output.images
assert image.shape[0] == 2
UpperCAmelCase_ = torch.Generator(device=__a ).manual_seed(0 )
UpperCAmelCase_ = sd_pipe(
[prompt] , image=__a , generator=__a , num_images_per_prompt=2 , guidance_scale=6.0 , noise_level=20 , num_inference_steps=2 , output_type="np" , )
UpperCAmelCase_ = output.images
assert image.shape[0] == 2
@unittest.skipIf(torch_device != "cuda" , "This test requires a GPU" )
def _lowercase (self : str ):
UpperCAmelCase_ = self.dummy_cond_unet_upscale
UpperCAmelCase_ = DDPMScheduler()
UpperCAmelCase_ = DDIMScheduler(prediction_type="v_prediction" )
UpperCAmelCase_ = self.dummy_vae
UpperCAmelCase_ = self.dummy_text_encoder
UpperCAmelCase_ = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
UpperCAmelCase_ = self.dummy_image.cpu().permute(0 , 2 , 3 , 1 )[0]
UpperCAmelCase_ = Image.fromarray(np.uinta(__a ) ).convert("RGB" ).resize((64, 64) )
# put models in fp16, except vae as it overflows in fp16
UpperCAmelCase_ = unet.half()
UpperCAmelCase_ = text_encoder.half()
# make sure here that pndm scheduler skips prk
UpperCAmelCase_ = StableDiffusionUpscalePipeline(
unet=__a , low_res_scheduler=__a , scheduler=__a , vae=__a , text_encoder=__a , tokenizer=__a , max_noise_level=350 , )
UpperCAmelCase_ = sd_pipe.to(__a )
sd_pipe.set_progress_bar_config(disable=__a )
UpperCAmelCase_ = "A painting of a squirrel eating a burger"
UpperCAmelCase_ = torch.manual_seed(0 )
UpperCAmelCase_ = sd_pipe(
[prompt] , image=__a , generator=__a , num_inference_steps=2 , output_type="np" , ).images
UpperCAmelCase_ = low_res_image.size[0] * 4
assert image.shape == (1, expected_height_width, expected_height_width, 3)
@slow
@require_torch_gpu
class __A ( unittest.TestCase ):
def _lowercase (self : List[str] ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _lowercase (self : List[Any] ):
UpperCAmelCase_ = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/sd2-upscale/low_res_cat.png" )
UpperCAmelCase_ = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-upscale"
"/upsampled_cat.npy" )
UpperCAmelCase_ = "stabilityai/stable-diffusion-x4-upscaler"
UpperCAmelCase_ = StableDiffusionUpscalePipeline.from_pretrained(__a )
pipe.to(__a )
pipe.set_progress_bar_config(disable=__a )
pipe.enable_attention_slicing()
UpperCAmelCase_ = "a cat sitting on a park bench"
UpperCAmelCase_ = torch.manual_seed(0 )
UpperCAmelCase_ = pipe(
prompt=__a , image=__a , generator=__a , output_type="np" , )
UpperCAmelCase_ = output.images[0]
assert image.shape == (512, 512, 3)
assert np.abs(expected_image - image ).max() < 1E-3
def _lowercase (self : Tuple ):
UpperCAmelCase_ = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/sd2-upscale/low_res_cat.png" )
UpperCAmelCase_ = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-upscale"
"/upsampled_cat_fp16.npy" )
UpperCAmelCase_ = "stabilityai/stable-diffusion-x4-upscaler"
UpperCAmelCase_ = StableDiffusionUpscalePipeline.from_pretrained(
__a , torch_dtype=torch.floataa , )
pipe.to(__a )
pipe.set_progress_bar_config(disable=__a )
pipe.enable_attention_slicing()
UpperCAmelCase_ = "a cat sitting on a park bench"
UpperCAmelCase_ = torch.manual_seed(0 )
UpperCAmelCase_ = pipe(
prompt=__a , image=__a , generator=__a , output_type="np" , )
UpperCAmelCase_ = output.images[0]
assert image.shape == (512, 512, 3)
assert np.abs(expected_image - image ).max() < 5E-1
def _lowercase (self : List[Any] ):
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
UpperCAmelCase_ = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/sd2-upscale/low_res_cat.png" )
UpperCAmelCase_ = "stabilityai/stable-diffusion-x4-upscaler"
UpperCAmelCase_ = StableDiffusionUpscalePipeline.from_pretrained(
__a , torch_dtype=torch.floataa , )
pipe.to(__a )
pipe.set_progress_bar_config(disable=__a )
pipe.enable_attention_slicing(1 )
pipe.enable_sequential_cpu_offload()
UpperCAmelCase_ = "a cat sitting on a park bench"
UpperCAmelCase_ = torch.manual_seed(0 )
UpperCAmelCase_ = pipe(
prompt=__a , image=__a , generator=__a , num_inference_steps=5 , output_type="np" , )
UpperCAmelCase_ = torch.cuda.max_memory_allocated()
# make sure that less than 2.9 GB is allocated
assert mem_bytes < 2.9 * 10**9
| 1 | 1 |
'''simple docstring'''
import warnings
from ...utils import logging
from .image_processing_clip import CLIPImageProcessor
SCREAMING_SNAKE_CASE_: Union[str, Any] =logging.get_logger(__name__)
class __A ( UpperCamelCase__ ):
def __init__(self : int , *__a : Dict , **__a : str ):
warnings.warn(
"The class CLIPFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"
" use CLIPImageProcessor instead." , __a , )
super().__init__(*__a , **__a )
| 1 | '''simple docstring'''
from typing import Callable, Dict, Optional, Tuple
import torch
from torch import nn
from torch.distributions import (
AffineTransform,
Distribution,
Independent,
NegativeBinomial,
Normal,
StudentT,
TransformedDistribution,
)
class __A ( UpperCamelCase__ ):
def __init__(self : int , __a : Distribution , __a : Dict=None , __a : int=None , __a : Any=0 ):
UpperCAmelCase_ = 1.0 if scale is None else scale
UpperCAmelCase_ = 0.0 if loc is None else loc
super().__init__(__a , [AffineTransform(loc=self.loc , scale=self.scale , event_dim=__a )] )
@property
def _lowercase (self : Union[str, Any] ):
return self.base_dist.mean * self.scale + self.loc
@property
def _lowercase (self : List[Any] ):
return self.base_dist.variance * self.scale**2
@property
def _lowercase (self : List[Any] ):
return self.variance.sqrt()
class __A ( nn.Module ):
def __init__(self : Optional[int] , __a : int , __a : Dict[str, int] , __a : Callable[..., Tuple[torch.Tensor]] , **__a : List[str] ):
super().__init__(**__a )
UpperCAmelCase_ = args_dim
UpperCAmelCase_ = nn.ModuleList([nn.Linear(__a , __a ) for dim in args_dim.values()] )
UpperCAmelCase_ = domain_map
def _lowercase (self : List[str] , __a : torch.Tensor ):
UpperCAmelCase_ = [proj(__a ) for proj in self.proj]
return self.domain_map(*__a )
class __A ( nn.Module ):
def __init__(self : Union[str, Any] , __a : List[str] ):
super().__init__()
UpperCAmelCase_ = function
def _lowercase (self : Optional[int] , __a : List[str] , *__a : Optional[int] ):
return self.function(__a , *__a )
class __A :
a__ : type
a__ : int
a__ : Dict[str, int]
def __init__(self : List[Any] , __a : int = 1 ):
UpperCAmelCase_ = dim
UpperCAmelCase_ = {k: dim * self.args_dim[k] for k in self.args_dim}
def _lowercase (self : Any , __a : Any ):
if self.dim == 1:
return self.distribution_class(*__a )
else:
return Independent(self.distribution_class(*__a ) , 1 )
def _lowercase (self : List[str] , __a : Union[str, Any] , __a : Optional[torch.Tensor] = None , __a : Optional[torch.Tensor] = None , ):
UpperCAmelCase_ = self._base_distribution(__a )
if loc is None and scale is None:
return distr
else:
return AffineTransformed(__a , loc=__a , scale=__a , event_dim=self.event_dim )
@property
def _lowercase (self : Any ):
return () if self.dim == 1 else (self.dim,)
@property
def _lowercase (self : Dict ):
return len(self.event_shape )
@property
def _lowercase (self : Tuple ):
return 0.0
def _lowercase (self : List[str] , __a : int ):
return ParameterProjection(
in_features=__a , args_dim=self.args_dim , domain_map=LambdaLayer(self.domain_map ) , )
def _lowercase (self : Optional[int] , *__a : torch.Tensor ):
raise NotImplementedError()
@staticmethod
def _lowercase (__a : torch.Tensor ):
return (x + torch.sqrt(torch.square(__a ) + 4.0 )) / 2.0
class __A ( UpperCamelCase__ ):
a__ : Dict[str, int] = {"df": 1, "loc": 1, "scale": 1}
a__ : type = StudentT
@classmethod
def _lowercase (cls : Union[str, Any] , __a : torch.Tensor , __a : torch.Tensor , __a : torch.Tensor ):
UpperCAmelCase_ = cls.squareplus(__a ).clamp_min(torch.finfo(scale.dtype ).eps )
UpperCAmelCase_ = 2.0 + cls.squareplus(__a )
return df.squeeze(-1 ), loc.squeeze(-1 ), scale.squeeze(-1 )
class __A ( UpperCamelCase__ ):
a__ : Dict[str, int] = {"loc": 1, "scale": 1}
a__ : type = Normal
@classmethod
def _lowercase (cls : Tuple , __a : torch.Tensor , __a : torch.Tensor ):
UpperCAmelCase_ = cls.squareplus(__a ).clamp_min(torch.finfo(scale.dtype ).eps )
return loc.squeeze(-1 ), scale.squeeze(-1 )
class __A ( UpperCamelCase__ ):
a__ : Dict[str, int] = {"total_count": 1, "logits": 1}
a__ : type = NegativeBinomial
@classmethod
def _lowercase (cls : Optional[Any] , __a : torch.Tensor , __a : torch.Tensor ):
UpperCAmelCase_ = cls.squareplus(__a )
return total_count.squeeze(-1 ), logits.squeeze(-1 )
def _lowercase (self : List[str] , __a : str ):
UpperCAmelCase_ , UpperCAmelCase_ = distr_args
if self.dim == 1:
return self.distribution_class(total_count=__a , logits=__a )
else:
return Independent(self.distribution_class(total_count=__a , logits=__a ) , 1 )
def _lowercase (self : Optional[Any] , __a : int , __a : Optional[torch.Tensor] = None , __a : Optional[torch.Tensor] = None ):
UpperCAmelCase_ , UpperCAmelCase_ = distr_args
if scale is not None:
# See scaling property of Gamma.
logits += scale.log()
return self._base_distribution((total_count, logits) )
| 1 | 1 |
'''simple docstring'''
from __future__ import annotations
import unittest
from transformers import DebertaVaConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFDebertaVaForMaskedLM,
TFDebertaVaForQuestionAnswering,
TFDebertaVaForSequenceClassification,
TFDebertaVaForTokenClassification,
TFDebertaVaModel,
)
class __A :
def __init__(self : Dict , __a : List[str] , __a : Union[str, Any]=13 , __a : Optional[int]=7 , __a : Tuple=True , __a : Any=True , __a : List[Any]=True , __a : List[str]=True , __a : Optional[Any]=99 , __a : Dict=32 , __a : Tuple=2 , __a : Dict=4 , __a : Dict=37 , __a : int="gelu" , __a : Tuple=0.1 , __a : Optional[int]=0.1 , __a : Union[str, Any]=512 , __a : str=16 , __a : Any=2 , __a : Optional[Any]=0.02 , __a : int=False , __a : Any=True , __a : int="None" , __a : str=3 , __a : List[Any]=4 , __a : int=None , ):
UpperCAmelCase_ = parent
UpperCAmelCase_ = batch_size
UpperCAmelCase_ = seq_length
UpperCAmelCase_ = is_training
UpperCAmelCase_ = use_input_mask
UpperCAmelCase_ = use_token_type_ids
UpperCAmelCase_ = use_labels
UpperCAmelCase_ = vocab_size
UpperCAmelCase_ = hidden_size
UpperCAmelCase_ = num_hidden_layers
UpperCAmelCase_ = num_attention_heads
UpperCAmelCase_ = intermediate_size
UpperCAmelCase_ = hidden_act
UpperCAmelCase_ = hidden_dropout_prob
UpperCAmelCase_ = attention_probs_dropout_prob
UpperCAmelCase_ = max_position_embeddings
UpperCAmelCase_ = type_vocab_size
UpperCAmelCase_ = type_sequence_label_size
UpperCAmelCase_ = initializer_range
UpperCAmelCase_ = num_labels
UpperCAmelCase_ = num_choices
UpperCAmelCase_ = relative_attention
UpperCAmelCase_ = position_biased_input
UpperCAmelCase_ = pos_att_type
UpperCAmelCase_ = scope
def _lowercase (self : Optional[Any] ):
UpperCAmelCase_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCAmelCase_ = None
if self.use_input_mask:
UpperCAmelCase_ = random_attention_mask([self.batch_size, self.seq_length] )
UpperCAmelCase_ = None
if self.use_token_type_ids:
UpperCAmelCase_ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
UpperCAmelCase_ = None
UpperCAmelCase_ = None
UpperCAmelCase_ = None
if self.use_labels:
UpperCAmelCase_ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCAmelCase_ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
UpperCAmelCase_ = DebertaVaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , relative_attention=self.relative_attention , position_biased_input=self.position_biased_input , initializer_range=self.initializer_range , return_dict=__a , )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def _lowercase (self : Optional[Any] , __a : Dict , __a : List[Any] , __a : str , __a : str , __a : int , __a : int , __a : Tuple ):
UpperCAmelCase_ = TFDebertaVaModel(config=__a )
UpperCAmelCase_ = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids}
UpperCAmelCase_ = [input_ids, input_mask]
UpperCAmelCase_ = model(__a )
UpperCAmelCase_ = model(__a )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _lowercase (self : Optional[int] , __a : Tuple , __a : Optional[Any] , __a : int , __a : Dict , __a : str , __a : Any , __a : Any ):
UpperCAmelCase_ = TFDebertaVaForMaskedLM(config=__a )
UpperCAmelCase_ = {
"input_ids": input_ids,
"attention_mask": input_mask,
"token_type_ids": token_type_ids,
}
UpperCAmelCase_ = model(__a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _lowercase (self : Optional[Any] , __a : int , __a : Any , __a : Optional[Any] , __a : Dict , __a : Dict , __a : Tuple , __a : str ):
UpperCAmelCase_ = self.num_labels
UpperCAmelCase_ = TFDebertaVaForSequenceClassification(config=__a )
UpperCAmelCase_ = {
"input_ids": input_ids,
"attention_mask": input_mask,
"token_type_ids": token_type_ids,
}
UpperCAmelCase_ = model(__a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _lowercase (self : Tuple , __a : List[Any] , __a : str , __a : Dict , __a : Tuple , __a : int , __a : Tuple , __a : List[str] ):
UpperCAmelCase_ = self.num_labels
UpperCAmelCase_ = TFDebertaVaForTokenClassification(config=__a )
UpperCAmelCase_ = {
"input_ids": input_ids,
"attention_mask": input_mask,
"token_type_ids": token_type_ids,
}
UpperCAmelCase_ = model(__a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def _lowercase (self : Optional[Any] , __a : Optional[int] , __a : Tuple , __a : Optional[Any] , __a : Any , __a : Union[str, Any] , __a : Union[str, Any] , __a : int ):
UpperCAmelCase_ = TFDebertaVaForQuestionAnswering(config=__a )
UpperCAmelCase_ = {
"input_ids": input_ids,
"attention_mask": input_mask,
"token_type_ids": token_type_ids,
}
UpperCAmelCase_ = model(__a )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def _lowercase (self : List[str] ):
UpperCAmelCase_ = self.prepare_config_and_inputs()
(
(
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) ,
) = config_and_inputs
UpperCAmelCase_ = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_tf
class __A ( UpperCamelCase__ , UpperCamelCase__ , unittest.TestCase ):
a__ : Optional[Any] = (
(
TFDebertaVaModel,
TFDebertaVaForMaskedLM,
TFDebertaVaForQuestionAnswering,
TFDebertaVaForSequenceClassification,
TFDebertaVaForTokenClassification,
)
if is_tf_available()
else ()
)
a__ : int = (
{
"""feature-extraction""": TFDebertaVaModel,
"""fill-mask""": TFDebertaVaForMaskedLM,
"""question-answering""": TFDebertaVaForQuestionAnswering,
"""text-classification""": TFDebertaVaForSequenceClassification,
"""token-classification""": TFDebertaVaForTokenClassification,
"""zero-shot""": TFDebertaVaForSequenceClassification,
}
if is_tf_available()
else {}
)
a__ : Optional[Any] = False
a__ : List[Any] = False
def _lowercase (self : Any ):
UpperCAmelCase_ = TFDebertaVaModelTester(self )
UpperCAmelCase_ = ConfigTester(self , config_class=__a , hidden_size=37 )
def _lowercase (self : Optional[int] ):
self.config_tester.run_common_tests()
def _lowercase (self : int ):
UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__a )
def _lowercase (self : Union[str, Any] ):
UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*__a )
def _lowercase (self : Union[str, Any] ):
UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*__a )
def _lowercase (self : Union[str, Any] ):
UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*__a )
def _lowercase (self : Tuple ):
UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*__a )
@slow
def _lowercase (self : Dict ):
UpperCAmelCase_ = TFDebertaVaModel.from_pretrained("kamalkraj/deberta-v2-xlarge" )
self.assertIsNotNone(__a )
@require_tf
class __A ( unittest.TestCase ):
@unittest.skip(reason="Model not available yet" )
def _lowercase (self : Dict ):
pass
@slow
def _lowercase (self : str ):
UpperCAmelCase_ = TFDebertaVaModel.from_pretrained("kamalkraj/deberta-v2-xlarge" )
UpperCAmelCase_ = tf.constant([[0, 31414, 232, 328, 740, 1140, 12695, 69, 46078, 1588, 2]] )
UpperCAmelCase_ = tf.constant([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
UpperCAmelCase_ = model(__a , attention_mask=__a )[0]
UpperCAmelCase_ = tf.constant(
[[[0.23_56, 0.19_48, 0.03_69], [-0.10_63, 0.35_86, -0.51_52], [-0.63_99, -0.02_59, -0.25_25]]] )
tf.debugging.assert_near(output[:, 1:4, 1:4] , __a , atol=1E-4 )
| 1 | '''simple docstring'''
import math_equivalence # From: git+https://github.com/hendrycks/math.git
import datasets
SCREAMING_SNAKE_CASE_: Optional[Any] ='\\n@article{hendrycksmath2021,\n title={Measuring Mathematical Problem Solving With the MATH Dataset},\n author={Dan Hendrycks\n and Collin Burns\n and Saurav Kadavath\n and Akul Arora\n and Steven Basart\n and Eric Tang\n and Dawn Song\n and Jacob Steinhardt},\n journal={arXiv preprint arXiv:2103.03874},\n year={2021}\n}\n'
SCREAMING_SNAKE_CASE_: Union[str, Any] ='\\nThis metric is used to assess performance on the Mathematics Aptitude Test of Heuristics (MATH) dataset.\nIt first canonicalizes the inputs (e.g., converting "1/2" to "\\frac{1}{2}") and then computes accuracy.\n'
SCREAMING_SNAKE_CASE_: List[Any] =r'\nCalculates accuracy after canonicalizing inputs.\n\nArgs:\n predictions: list of predictions to score. Each prediction\n is a string that contains natural language and LaTex.\n references: list of reference for each prediction. Each\n reference is a string that contains natural language\n and LaTex.\nReturns:\n accuracy: accuracy after canonicalizing inputs\n (e.g., converting "1/2" to "\\frac{1}{2}")\n\nExamples:\n >>> metric = datasets.load_metric("competition_math")\n >>> results = metric.compute(references=["\\frac{1}{2}"], predictions=["1/2"])\n >>> print(results)\n {\'accuracy\': 1.0}\n'
@datasets.utils.file_utils.add_end_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __A ( datasets.Metric ):
def _lowercase (self : Optional[Any] ):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Value("string" ),
"references": datasets.Value("string" ),
} ) , homepage="https://github.com/hendrycks/math" , codebase_urls=["https://github.com/hendrycks/math"] , )
def _lowercase (self : Tuple , __a : Optional[int] , __a : List[Any] ):
UpperCAmelCase_ = 0.0
for i, j in zip(__a , __a ):
n_correct += 1.0 if math_equivalence.is_equiv(__a , __a ) else 0.0
UpperCAmelCase_ = n_correct / len(__a )
return {
"accuracy": accuracy,
}
| 1 | 1 |
'''simple docstring'''
import argparse
from collections import OrderedDict
from pathlib import Path
import torch
from transformers import (
VisualBertConfig,
VisualBertForMultipleChoice,
VisualBertForPreTraining,
VisualBertForQuestionAnswering,
VisualBertForVisualReasoning,
)
from transformers.utils import logging
logging.set_verbosity_info()
SCREAMING_SNAKE_CASE_: List[str] =logging.get_logger(__name__)
SCREAMING_SNAKE_CASE_: List[Any] =[
('bert.bert', 'visual_bert'),
('bert.cls', 'cls'),
('bert.classifier', 'cls'),
('token_type_embeddings_visual', 'visual_token_type_embeddings'),
('position_embeddings_visual', 'visual_position_embeddings'),
('projection', 'visual_projection'),
]
SCREAMING_SNAKE_CASE_: Tuple =[
'nlvr2_coco_pre_trained.th',
'nlvr2_fine_tuned.th',
'nlvr2_pre_trained.th',
'vcr_coco_pre_train.th',
'vcr_fine_tune.th',
'vcr_pre_train.th',
'vqa_coco_pre_trained.th',
'vqa_fine_tuned.th',
'vqa_pre_trained.th',
]
def lowerCAmelCase_ ( snake_case_ : Dict ) -> Tuple:
'''simple docstring'''
UpperCAmelCase_ = torch.load(snake_case_ , map_location="cpu" )
return sd
def lowerCAmelCase_ ( snake_case_ : int , snake_case_ : Optional[int] , snake_case_ : int=rename_keys_prefix ) -> Dict:
'''simple docstring'''
UpperCAmelCase_ = OrderedDict()
UpperCAmelCase_ = torch.arange(config.max_position_embeddings ).expand((1, -1) )
# detector_d = OrderedDict()
for key in d:
if "detector" in key:
# detector_d[key.replace('detector.','')] = d[key]
continue
UpperCAmelCase_ = key
for name_pair in rename_keys_prefix:
UpperCAmelCase_ = new_key.replace(name_pair[0] , name_pair[1] )
UpperCAmelCase_ = d[key]
if key == "bert.cls.predictions.decoder.weight":
# Old bert code didn't have `decoder.bias`, but was added separately
UpperCAmelCase_ = new_d["cls.predictions.bias"]
return new_d
@torch.no_grad()
def lowerCAmelCase_ ( snake_case_ : Any , snake_case_ : Tuple ) -> Tuple:
'''simple docstring'''
assert (
checkpoint_path.split("/" )[-1] in ACCEPTABLE_CHECKPOINTS
), f"""The checkpoint provided must be in {ACCEPTABLE_CHECKPOINTS}."""
# Get Config
if "pre" in checkpoint_path:
UpperCAmelCase_ = "pretraining"
if "vcr" in checkpoint_path:
UpperCAmelCase_ = {"visual_embedding_dim": 5_12}
elif "vqa_advanced" in checkpoint_path:
UpperCAmelCase_ = {"visual_embedding_dim": 20_48}
elif "vqa" in checkpoint_path:
UpperCAmelCase_ = {"visual_embedding_dim": 20_48}
elif "nlvr" in checkpoint_path:
UpperCAmelCase_ = {"visual_embedding_dim": 10_24}
else:
raise NotImplementedError(f"""No implementation found for `{checkpoint_path}`.""" )
else:
if "vcr" in checkpoint_path:
UpperCAmelCase_ = {"visual_embedding_dim": 5_12}
UpperCAmelCase_ = "multichoice"
elif "vqa_advanced" in checkpoint_path:
UpperCAmelCase_ = {"visual_embedding_dim": 20_48}
UpperCAmelCase_ = "vqa_advanced"
elif "vqa" in checkpoint_path:
UpperCAmelCase_ = {"visual_embedding_dim": 20_48, "num_labels": 31_29}
UpperCAmelCase_ = "vqa"
elif "nlvr" in checkpoint_path:
UpperCAmelCase_ = {
"visual_embedding_dim": 10_24,
"num_labels": 2,
}
UpperCAmelCase_ = "nlvr"
UpperCAmelCase_ = VisualBertConfig(**snake_case_ )
# Load State Dict
UpperCAmelCase_ = load_state_dict(snake_case_ )
UpperCAmelCase_ = get_new_dict(snake_case_ , snake_case_ )
if model_type == "pretraining":
UpperCAmelCase_ = VisualBertForPreTraining(snake_case_ )
elif model_type == "vqa":
UpperCAmelCase_ = VisualBertForQuestionAnswering(snake_case_ )
elif model_type == "nlvr":
UpperCAmelCase_ = VisualBertForVisualReasoning(snake_case_ )
elif model_type == "multichoice":
UpperCAmelCase_ = VisualBertForMultipleChoice(snake_case_ )
model.load_state_dict(snake_case_ )
# Save Checkpoints
Path(snake_case_ ).mkdir(exist_ok=snake_case_ )
model.save_pretrained(snake_case_ )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE_: str =argparse.ArgumentParser()
# Required parameters
parser.add_argument('orig_checkpoint_path', type=str, help='A path to .th on local filesystem.')
parser.add_argument('pytorch_dump_folder_path', type=str, help='Path to the output PyTorch model.')
SCREAMING_SNAKE_CASE_: Optional[int] =parser.parse_args()
convert_visual_bert_checkpoint(args.orig_checkpoint_path, args.pytorch_dump_folder_path)
| 1 | '''simple docstring'''
from copy import deepcopy
import torch
import torch.nn.functional as F
from torch.optim import AdamW
from torch.optim.lr_scheduler import LambdaLR
from torch.utils.data import DataLoader
from accelerate.accelerator import Accelerator
from accelerate.state import GradientState
from accelerate.test_utils import RegressionDataset, RegressionModel
from accelerate.utils import DistributedType, is_torch_version, set_seed
def lowerCAmelCase_ ( snake_case_ : Dict , snake_case_ : Union[str, Any] , snake_case_ : Optional[Any] , snake_case_ : List[Any] ) -> List[Any]:
'''simple docstring'''
for param, grad_param in zip(model_a.parameters() , model_b.parameters() ):
if not param.requires_grad:
continue
if not did_step:
# Grads should not be in sync
assert (
torch.allclose(param.grad , grad_param.grad ) is False
), f"""Gradients in sync when they should not be at iteration {iteration}:\nmodel_a grad ({param.grad}) == model_b grad ({grad_param.grad})"""
else:
# Grads should be in sync
assert (
torch.allclose(param.grad , grad_param.grad ) is True
), f"""Gradients not in sync when they should be at iteration {iteration}:\nmodel_a grad ({param.grad}) != model_b grad ({grad_param.grad})"""
def lowerCAmelCase_ ( snake_case_ : Any , snake_case_ : Tuple , snake_case_ : Any , snake_case_ : List[Any] , snake_case_ : str=True ) -> Optional[Any]:
'''simple docstring'''
model.train()
UpperCAmelCase_ = model(snake_case_ )
UpperCAmelCase_ = F.mse_loss(snake_case_ , target.to(output.device ) )
if not do_backward:
loss /= accelerator.gradient_accumulation_steps
loss.backward()
else:
accelerator.backward(snake_case_ )
def lowerCAmelCase_ ( snake_case_ : Optional[Any] , snake_case_ : Any=False ) -> Dict:
'''simple docstring'''
set_seed(42 )
UpperCAmelCase_ = RegressionModel()
UpperCAmelCase_ = deepcopy(snake_case_ )
UpperCAmelCase_ = RegressionDataset(length=80 )
UpperCAmelCase_ = DataLoader(snake_case_ , batch_size=16 )
model.to(accelerator.device )
if sched:
UpperCAmelCase_ = AdamW(params=model.parameters() , lr=1E-3 )
UpperCAmelCase_ = AdamW(params=ddp_model.parameters() , lr=1E-3 )
UpperCAmelCase_ = LambdaLR(snake_case_ , lr_lambda=lambda snake_case_ : epoch**0.65 )
UpperCAmelCase_ = LambdaLR(snake_case_ , lr_lambda=lambda snake_case_ : epoch**0.65 )
# Make a copy of `model`
if sched:
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = accelerator.prepare(snake_case_ , snake_case_ , snake_case_ , snake_case_ )
else:
UpperCAmelCase_ , UpperCAmelCase_ = accelerator.prepare(snake_case_ , snake_case_ )
if sched:
return (model, opt, sched, dataloader, ddp_model, ddp_opt, ddp_sched)
return model, ddp_model, dataloader
def lowerCAmelCase_ ( snake_case_ : Any ) -> int:
'''simple docstring'''
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = get_training_setup(snake_case_ )
# Use a single batch
UpperCAmelCase_ , UpperCAmelCase_ = next(iter(snake_case_ ) ).values()
for iteration in range(3 ):
# Gather the distributed inputs and targs for the base model
UpperCAmelCase_ , UpperCAmelCase_ = accelerator.gather((ddp_input, ddp_target) )
UpperCAmelCase_ , UpperCAmelCase_ = input.to(accelerator.device ), target.to(accelerator.device )
# Perform our initial ground truth step in non "DDP"
step_model(snake_case_ , snake_case_ , snake_case_ , snake_case_ )
# Do "gradient accumulation" (noop)
if iteration % 2 == 0:
# Accumulate grads locally
with accelerator.no_sync(snake_case_ ):
step_model(snake_case_ , snake_case_ , snake_case_ , snake_case_ )
else:
# Sync grads
step_model(snake_case_ , snake_case_ , snake_case_ , snake_case_ )
# Since `no_sync` is a noop, `ddp_model` and `model` grads should always be in sync
check_model_parameters(snake_case_ , snake_case_ , snake_case_ , snake_case_ )
for param, ddp_param in zip(model.parameters() , ddp_model.parameters() ):
if not param.requires_grad:
continue
assert torch.allclose(
param.grad , ddp_param.grad ), f"""Gradients not in sync when they should be:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})"""
# Shuffle ddp_input on each iteration
torch.manual_seed(13_37 + iteration )
UpperCAmelCase_ = ddp_input[torch.randperm(len(snake_case_ ) )]
def lowerCAmelCase_ ( snake_case_ : Tuple ) -> str:
'''simple docstring'''
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = get_training_setup(snake_case_ )
# Use a single batch
UpperCAmelCase_ , UpperCAmelCase_ = next(iter(snake_case_ ) ).values()
for iteration in range(3 ):
# Gather the distributed inputs and targs for the base model
UpperCAmelCase_ , UpperCAmelCase_ = accelerator.gather((ddp_input, ddp_target) )
UpperCAmelCase_ , UpperCAmelCase_ = input.to(accelerator.device ), target.to(accelerator.device )
# Perform our initial ground truth step in non "DDP"
step_model(snake_case_ , snake_case_ , snake_case_ , snake_case_ )
# Do "gradient accumulation" (noop)
if iteration % 2 == 0:
# Accumulate grads locally
with accelerator.no_sync(snake_case_ ):
step_model(snake_case_ , snake_case_ , snake_case_ , snake_case_ )
else:
# Sync grads
step_model(snake_case_ , snake_case_ , snake_case_ , snake_case_ )
# DDP model and model should only be in sync when not (iteration % 2 == 0)
for param, ddp_param in zip(model.parameters() , ddp_model.parameters() ):
if not param.requires_grad:
continue
if iteration % 2 == 0:
# Grads should not be in sync
assert (
torch.allclose(param.grad , ddp_param.grad ) is False
), f"""Gradients in sync when they should not be:\nModel grad ({param.grad}) == DDP grad ({ddp_param.grad})"""
else:
# Grads should be in sync
assert (
torch.allclose(param.grad , ddp_param.grad ) is True
), f"""Gradients not in sync when they should be:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})"""
# Shuffle ddp_input on each iteration
torch.manual_seed(13_37 + iteration )
UpperCAmelCase_ = ddp_input[torch.randperm(len(snake_case_ ) )]
def lowerCAmelCase_ ( snake_case_ : Optional[int]=False , snake_case_ : str=False ) -> List[str]:
'''simple docstring'''
UpperCAmelCase_ = Accelerator(
split_batches=snake_case_ , dispatch_batches=snake_case_ , gradient_accumulation_steps=2 )
# Test that context manager behaves properly
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = get_training_setup(snake_case_ )
for iteration, batch in enumerate(snake_case_ ):
UpperCAmelCase_ , UpperCAmelCase_ = batch.values()
# Gather the distributed inputs and targs for the base model
UpperCAmelCase_ , UpperCAmelCase_ = accelerator.gather((ddp_input, ddp_target) )
UpperCAmelCase_ , UpperCAmelCase_ = input.to(accelerator.device ), target.to(accelerator.device )
# Perform our initial ground truth step in non "DDP"
step_model(snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ )
# Do "gradient accumulation" (noop)
with accelerator.accumulate(snake_case_ ):
step_model(snake_case_ , snake_case_ , snake_case_ , snake_case_ )
# DDP model and model should only be in sync when not (iteration % 2 == 0)
for param, ddp_param in zip(model.parameters() , ddp_model.parameters() ):
if not param.requires_grad:
continue
if ((iteration + 1) % 2 == 0) or (iteration == len(snake_case_ ) - 1):
# Grads should be in sync
assert (
torch.allclose(param.grad , ddp_param.grad ) is True
), f"""Gradients not in sync when they should be at iteration {iteration}:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})"""
else:
# Grads should not be in sync
assert (
torch.allclose(param.grad , ddp_param.grad ) is False
), f"""Gradients in sync when they should not be at iteration {iteration}:\nModel grad ({param.grad}) == DDP grad ({ddp_param.grad})"""
# Shuffle ddp_input on each iteration
torch.manual_seed(13_37 + iteration )
UpperCAmelCase_ = ddp_input[torch.randperm(len(snake_case_ ) )]
GradientState._reset_state()
def lowerCAmelCase_ ( snake_case_ : Optional[Any]=False , snake_case_ : Tuple=False ) -> Union[str, Any]:
'''simple docstring'''
UpperCAmelCase_ = Accelerator(
split_batches=snake_case_ , dispatch_batches=snake_case_ , gradient_accumulation_steps=2 )
# Test that context manager behaves properly
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = get_training_setup(snake_case_ , snake_case_ )
for iteration, batch in enumerate(snake_case_ ):
UpperCAmelCase_ , UpperCAmelCase_ = batch.values()
# Gather the distributed inputs and targs for the base model
UpperCAmelCase_ , UpperCAmelCase_ = accelerator.gather((ddp_input, ddp_target) )
UpperCAmelCase_ , UpperCAmelCase_ = input.to(accelerator.device ), target.to(accelerator.device )
# Perform our initial ground truth step in non "DDP"
model.train()
ddp_model.train()
step_model(snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ )
opt.step()
if ((iteration + 1) % 2 == 0) or ((iteration + 1) == len(snake_case_ )):
if split_batches:
sched.step()
else:
for _ in range(accelerator.num_processes ):
sched.step()
opt.zero_grad()
# Perform gradient accumulation under wrapper
with accelerator.accumulate(snake_case_ ):
step_model(snake_case_ , snake_case_ , snake_case_ , snake_case_ )
ddp_opt.step()
ddp_sched.step()
ddp_opt.zero_grad()
# Learning rates should be the same
assert (
opt.param_groups[0]["lr"] == ddp_opt.param_groups[0]["lr"]
), f"""Learning rates found in each optimizer did not align\nopt: {opt.param_groups[0]["lr"]}\nDDP opt: {ddp_opt.param_groups[0]["lr"]}\n"""
UpperCAmelCase_ = (((iteration + 1) % 2) == 0) or ((iteration + 1) == len(snake_case_ ))
if accelerator.num_processes > 1:
check_model_parameters(snake_case_ , snake_case_ , snake_case_ , snake_case_ )
# Shuffle ddp_input on each iteration
torch.manual_seed(13_37 + iteration )
GradientState._reset_state()
def lowerCAmelCase_ ( ) -> List[Any]:
'''simple docstring'''
UpperCAmelCase_ = Accelerator()
UpperCAmelCase_ = RegressionDataset(length=80 )
UpperCAmelCase_ = DataLoader(snake_case_ , batch_size=16 )
UpperCAmelCase_ = RegressionDataset(length=96 )
UpperCAmelCase_ = DataLoader(snake_case_ , batch_size=16 )
UpperCAmelCase_ , UpperCAmelCase_ = accelerator.prepare(snake_case_ , snake_case_ )
assert accelerator.gradient_state.active_dataloader is None
for iteration, _ in enumerate(snake_case_ ):
assert id(accelerator.gradient_state.active_dataloader ) == id(snake_case_ )
if iteration < len(snake_case_ ) - 1:
assert not accelerator.gradient_state.end_of_dataloader
if iteration == 1:
for batch_num, _ in enumerate(snake_case_ ):
assert id(accelerator.gradient_state.active_dataloader ) == id(snake_case_ )
if batch_num < len(snake_case_ ) - 1:
assert not accelerator.gradient_state.end_of_dataloader
else:
assert accelerator.gradient_state.end_of_dataloader
else:
assert accelerator.gradient_state.end_of_dataloader
assert accelerator.gradient_state.active_dataloader is None
def lowerCAmelCase_ ( ) -> str:
'''simple docstring'''
UpperCAmelCase_ = Accelerator()
UpperCAmelCase_ = accelerator.state
if state.local_process_index == 0:
print("**Test `accumulate` gradient accumulation with dataloader break**" )
test_dataloader_break()
if state.distributed_type == DistributedType.NO:
if state.local_process_index == 0:
print("**Test NOOP `no_sync` context manager**" )
test_noop_sync(snake_case_ )
if state.distributed_type in (DistributedType.MULTI_GPU, DistributedType.MULTI_CPU):
if state.local_process_index == 0:
print("**Test Distributed `no_sync` context manager**" )
test_distributed_sync(snake_case_ )
if state.distributed_type == DistributedType.MULTI_GPU:
for split_batch in [True, False]:
for dispatch_batches in [True, False]:
if state.local_process_index == 0:
print(
"**Test `accumulate` gradient accumulation, " , f"""`split_batches={split_batch}` and `dispatch_batches={dispatch_batches}`**""" , )
test_gradient_accumulation(snake_case_ , snake_case_ )
# Currently will break on torch 2.0 +, need to investigate why
if is_torch_version("<" , "2.0" ) or state.distributed_type == DistributedType.NO:
if state.local_process_index == 0:
print(
"**Test `accumulate` gradient accumulation with optimizer and scheduler, " , "`split_batches=False`, `dispatch_batches=False`**" , )
test_gradient_accumulation_with_opt_and_scheduler()
if state.distributed_type == DistributedType.MULTI_GPU:
for split_batch in [True, False]:
for dispatch_batches in [True, False]:
if not split_batch and not dispatch_batches:
continue
if state.local_process_index == 0:
print(
"**Test `accumulate` gradient accumulation with optimizer and scheduler, " , f"""`split_batches={split_batch}` and `dispatch_batches={dispatch_batches}`**""" , )
test_gradient_accumulation_with_opt_and_scheduler(snake_case_ , snake_case_ )
def lowerCAmelCase_ ( snake_case_ : Dict ) -> int:
'''simple docstring'''
main()
if __name__ == "__main__":
main()
| 1 | 1 |
'''simple docstring'''
import math
import flax.linen as nn
import jax.numpy as jnp
def lowerCAmelCase_ ( snake_case_ : jnp.ndarray , snake_case_ : int , snake_case_ : float = 1 , snake_case_ : float = 1 , snake_case_ : float = 1.0E4 , snake_case_ : bool = False , snake_case_ : float = 1.0 , ) -> jnp.ndarray:
'''simple docstring'''
assert timesteps.ndim == 1, "Timesteps should be a 1d-array"
assert embedding_dim % 2 == 0, f"""Embedding dimension {embedding_dim} should be even"""
UpperCAmelCase_ = float(embedding_dim // 2 )
UpperCAmelCase_ = math.log(max_timescale / min_timescale ) / (num_timescales - freq_shift)
UpperCAmelCase_ = min_timescale * jnp.exp(jnp.arange(snake_case_ , dtype=jnp.floataa ) * -log_timescale_increment )
UpperCAmelCase_ = jnp.expand_dims(snake_case_ , 1 ) * jnp.expand_dims(snake_case_ , 0 )
# scale embeddings
UpperCAmelCase_ = scale * emb
if flip_sin_to_cos:
UpperCAmelCase_ = jnp.concatenate([jnp.cos(snake_case_ ), jnp.sin(snake_case_ )] , axis=1 )
else:
UpperCAmelCase_ = jnp.concatenate([jnp.sin(snake_case_ ), jnp.cos(snake_case_ )] , axis=1 )
UpperCAmelCase_ = jnp.reshape(snake_case_ , [jnp.shape(snake_case_ )[0], embedding_dim] )
return signal
class __A ( nn.Module ):
a__ : int = 32
a__ : jnp.dtype = jnp.floataa
@nn.compact
def __call__(self : Dict , __a : Tuple ):
UpperCAmelCase_ = nn.Dense(self.time_embed_dim , dtype=self.dtype , name="linear_1" )(__a )
UpperCAmelCase_ = nn.silu(__a )
UpperCAmelCase_ = nn.Dense(self.time_embed_dim , dtype=self.dtype , name="linear_2" )(__a )
return temb
class __A ( nn.Module ):
a__ : int = 32
a__ : bool = False
a__ : float = 1
@nn.compact
def __call__(self : Optional[Any] , __a : List[Any] ):
return get_sinusoidal_embeddings(
__a , embedding_dim=self.dim , flip_sin_to_cos=self.flip_sin_to_cos , freq_shift=self.freq_shift )
| 1 | '''simple docstring'''
def lowerCAmelCase_ ( snake_case_ : int , snake_case_ : int ) -> int:
'''simple docstring'''
return x if y == 0 else greatest_common_divisor(snake_case_ , x % y )
def lowerCAmelCase_ ( snake_case_ : int , snake_case_ : int ) -> int:
'''simple docstring'''
return (x * y) // greatest_common_divisor(snake_case_ , snake_case_ )
def lowerCAmelCase_ ( snake_case_ : int = 20 ) -> int:
'''simple docstring'''
UpperCAmelCase_ = 1
for i in range(1 , n + 1 ):
UpperCAmelCase_ = lcm(snake_case_ , snake_case_ )
return g
if __name__ == "__main__":
print(f"{solution() = }")
| 1 | 1 |
'''simple docstring'''
from __future__ import annotations
import inspect
import unittest
import numpy as np
from transformers import ResNetConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFResNetForImageClassification, TFResNetModel
from transformers.models.resnet.modeling_tf_resnet import TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class __A :
def __init__(self : List[Any] , __a : List[Any] , __a : int=3 , __a : Optional[int]=32 , __a : Optional[Any]=3 , __a : List[Any]=10 , __a : str=[10, 20, 30, 40] , __a : Any=[1, 1, 2, 1] , __a : str=True , __a : Optional[Any]=True , __a : Optional[int]="relu" , __a : Optional[Any]=3 , __a : Union[str, Any]=None , ):
UpperCAmelCase_ = parent
UpperCAmelCase_ = batch_size
UpperCAmelCase_ = image_size
UpperCAmelCase_ = num_channels
UpperCAmelCase_ = embeddings_size
UpperCAmelCase_ = hidden_sizes
UpperCAmelCase_ = depths
UpperCAmelCase_ = is_training
UpperCAmelCase_ = use_labels
UpperCAmelCase_ = hidden_act
UpperCAmelCase_ = num_labels
UpperCAmelCase_ = scope
UpperCAmelCase_ = len(__a )
def _lowercase (self : Tuple ):
UpperCAmelCase_ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCAmelCase_ = None
if self.use_labels:
UpperCAmelCase_ = ids_tensor([self.batch_size] , self.num_labels )
UpperCAmelCase_ = self.get_config()
return config, pixel_values, labels
def _lowercase (self : int ):
return ResNetConfig(
num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , image_size=self.image_size , )
def _lowercase (self : str , __a : List[Any] , __a : Tuple , __a : Dict ):
UpperCAmelCase_ = TFResNetModel(config=__a )
UpperCAmelCase_ = model(__a )
# expected last hidden states: B, C, H // 32, W // 32
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def _lowercase (self : List[Any] , __a : Tuple , __a : Dict , __a : Tuple ):
UpperCAmelCase_ = self.num_labels
UpperCAmelCase_ = TFResNetForImageClassification(__a )
UpperCAmelCase_ = model(__a , labels=__a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _lowercase (self : Any ):
UpperCAmelCase_ = self.prepare_config_and_inputs()
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = config_and_inputs
UpperCAmelCase_ = {"pixel_values": pixel_values}
return config, inputs_dict
@require_tf
class __A ( UpperCamelCase__ , UpperCamelCase__ , unittest.TestCase ):
a__ : Union[str, Any] = (TFResNetModel, TFResNetForImageClassification) if is_tf_available() else ()
a__ : List[Any] = (
{"""feature-extraction""": TFResNetModel, """image-classification""": TFResNetForImageClassification}
if is_tf_available()
else {}
)
a__ : int = False
a__ : str = False
a__ : Optional[Any] = False
a__ : List[str] = False
a__ : str = False
def _lowercase (self : Tuple ):
UpperCAmelCase_ = TFResNetModelTester(self )
UpperCAmelCase_ = ConfigTester(self , config_class=__a , has_text_modality=__a )
def _lowercase (self : Union[str, Any] ):
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def _lowercase (self : Optional[int] ):
return
@unittest.skip(reason="ResNet does not use inputs_embeds" )
def _lowercase (self : Optional[Any] ):
pass
@unittest.skip(reason="ResNet does not support input and output embeddings" )
def _lowercase (self : str ):
pass
def _lowercase (self : Optional[Any] ):
UpperCAmelCase_ , UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase_ = model_class(__a )
UpperCAmelCase_ = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCAmelCase_ = [*signature.parameters.keys()]
UpperCAmelCase_ = ["pixel_values"]
self.assertListEqual(arg_names[:1] , __a )
def _lowercase (self : str ):
UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__a )
def _lowercase (self : List[Any] ):
def check_hidden_states_output(__a : str , __a : Dict , __a : Tuple ):
UpperCAmelCase_ = model_class(__a )
UpperCAmelCase_ = model(**self._prepare_for_class(__a , __a ) )
UpperCAmelCase_ = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
UpperCAmelCase_ = self.model_tester.num_stages
self.assertEqual(len(__a ) , expected_num_stages + 1 )
# ResNet's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , )
UpperCAmelCase_ , UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase_ = ["basic", "bottleneck"]
for model_class in self.all_model_classes:
for layer_type in layers_type:
UpperCAmelCase_ = layer_type
UpperCAmelCase_ = True
check_hidden_states_output(__a , __a , __a )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
UpperCAmelCase_ = True
check_hidden_states_output(__a , __a , __a )
def _lowercase (self : Tuple ):
UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__a )
@slow
def _lowercase (self : List[str] ):
for model_name in TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase_ = TFResNetModel.from_pretrained(__a )
self.assertIsNotNone(__a )
def lowerCAmelCase_ ( ) -> str:
'''simple docstring'''
UpperCAmelCase_ = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_tf
@require_vision
class __A ( unittest.TestCase ):
@cached_property
def _lowercase (self : List[str] ):
return (
AutoImageProcessor.from_pretrained(TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
if is_vision_available()
else None
)
@slow
def _lowercase (self : str ):
UpperCAmelCase_ = TFResNetForImageClassification.from_pretrained(TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
UpperCAmelCase_ = self.default_image_processor
UpperCAmelCase_ = prepare_img()
UpperCAmelCase_ = image_processor(images=__a , return_tensors="tf" )
# forward pass
UpperCAmelCase_ = model(**__a )
# verify the logits
UpperCAmelCase_ = tf.TensorShape((1, 1000) )
self.assertEqual(outputs.logits.shape , __a )
UpperCAmelCase_ = tf.constant([-11.10_69, -9.78_77, -8.37_77] )
self.assertTrue(np.allclose(outputs.logits[0, :3].numpy() , __a , atol=1E-4 ) )
| 1 | '''simple docstring'''
import os
from math import logaa
def lowerCAmelCase_ ( snake_case_ : str = "base_exp.txt" ) -> int:
'''simple docstring'''
UpperCAmelCase_ = 0
UpperCAmelCase_ = 0
for i, line in enumerate(open(os.path.join(os.path.dirname(snake_case_ ) , snake_case_ ) ) ):
UpperCAmelCase_ , UpperCAmelCase_ = list(map(snake_case_ , line.split("," ) ) )
if x * logaa(snake_case_ ) > largest:
UpperCAmelCase_ = x * logaa(snake_case_ )
UpperCAmelCase_ = i + 1
return result
if __name__ == "__main__":
print(solution())
| 1 | 1 |
'''simple docstring'''
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel
from diffusers import DDIMScheduler, LDMPipeline, UNetaDModel, VQModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device
enable_full_determinism()
class __A ( unittest.TestCase ):
@property
def _lowercase (self : Tuple ):
torch.manual_seed(0 )
UpperCAmelCase_ = UNetaDModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=3 , out_channels=3 , down_block_types=("DownBlock2D", "AttnDownBlock2D") , up_block_types=("AttnUpBlock2D", "UpBlock2D") , )
return model
@property
def _lowercase (self : Optional[int] ):
torch.manual_seed(0 )
UpperCAmelCase_ = VQModel(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"] , up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"] , latent_channels=3 , )
return model
@property
def _lowercase (self : str ):
torch.manual_seed(0 )
UpperCAmelCase_ = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
return CLIPTextModel(__a )
def _lowercase (self : Dict ):
UpperCAmelCase_ = self.dummy_uncond_unet
UpperCAmelCase_ = DDIMScheduler()
UpperCAmelCase_ = self.dummy_vq_model
UpperCAmelCase_ = LDMPipeline(unet=__a , vqvae=__a , scheduler=__a )
ldm.to(__a )
ldm.set_progress_bar_config(disable=__a )
UpperCAmelCase_ = torch.manual_seed(0 )
UpperCAmelCase_ = ldm(generator=__a , num_inference_steps=2 , output_type="numpy" ).images
UpperCAmelCase_ = torch.manual_seed(0 )
UpperCAmelCase_ = ldm(generator=__a , num_inference_steps=2 , output_type="numpy" , return_dict=__a )[0]
UpperCAmelCase_ = image[0, -3:, -3:, -1]
UpperCAmelCase_ = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
UpperCAmelCase_ = np.array([0.85_12, 0.8_18, 0.64_11, 0.68_08, 0.44_65, 0.56_18, 0.46, 0.62_31, 0.51_72] )
UpperCAmelCase_ = 1E-2 if torch_device != "mps" else 3E-2
assert np.abs(image_slice.flatten() - expected_slice ).max() < tolerance
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < tolerance
@slow
@require_torch
class __A ( unittest.TestCase ):
def _lowercase (self : str ):
UpperCAmelCase_ = LDMPipeline.from_pretrained("CompVis/ldm-celebahq-256" )
ldm.to(__a )
ldm.set_progress_bar_config(disable=__a )
UpperCAmelCase_ = torch.manual_seed(0 )
UpperCAmelCase_ = ldm(generator=__a , num_inference_steps=5 , output_type="numpy" ).images
UpperCAmelCase_ = image[0, -3:, -3:, -1]
assert image.shape == (1, 256, 256, 3)
UpperCAmelCase_ = np.array([0.43_99, 0.4_49_75, 0.4_68_25, 0.4_74, 0.43_59, 0.45_81, 0.4_50_95, 0.43_41, 0.44_47] )
UpperCAmelCase_ = 1E-2 if torch_device != "mps" else 3E-2
assert np.abs(image_slice.flatten() - expected_slice ).max() < tolerance
| 1 | '''simple docstring'''
import argparse
import io
import requests
import torch
from omegaconf import OmegaConf
from diffusers import AutoencoderKL
from diffusers.pipelines.stable_diffusion.convert_from_ckpt import (
assign_to_checkpoint,
conv_attn_to_linear,
create_vae_diffusers_config,
renew_vae_attention_paths,
renew_vae_resnet_paths,
)
def lowerCAmelCase_ ( snake_case_ : List[Any] , snake_case_ : int ) -> Optional[int]:
'''simple docstring'''
UpperCAmelCase_ = checkpoint
UpperCAmelCase_ = {}
UpperCAmelCase_ = vae_state_dict["encoder.conv_in.weight"]
UpperCAmelCase_ = vae_state_dict["encoder.conv_in.bias"]
UpperCAmelCase_ = vae_state_dict["encoder.conv_out.weight"]
UpperCAmelCase_ = vae_state_dict["encoder.conv_out.bias"]
UpperCAmelCase_ = vae_state_dict["encoder.norm_out.weight"]
UpperCAmelCase_ = vae_state_dict["encoder.norm_out.bias"]
UpperCAmelCase_ = vae_state_dict["decoder.conv_in.weight"]
UpperCAmelCase_ = vae_state_dict["decoder.conv_in.bias"]
UpperCAmelCase_ = vae_state_dict["decoder.conv_out.weight"]
UpperCAmelCase_ = vae_state_dict["decoder.conv_out.bias"]
UpperCAmelCase_ = vae_state_dict["decoder.norm_out.weight"]
UpperCAmelCase_ = vae_state_dict["decoder.norm_out.bias"]
UpperCAmelCase_ = vae_state_dict["quant_conv.weight"]
UpperCAmelCase_ = vae_state_dict["quant_conv.bias"]
UpperCAmelCase_ = vae_state_dict["post_quant_conv.weight"]
UpperCAmelCase_ = vae_state_dict["post_quant_conv.bias"]
# Retrieves the keys for the encoder down blocks only
UpperCAmelCase_ = len({".".join(layer.split("." )[:3] ) for layer in vae_state_dict if "encoder.down" in layer} )
UpperCAmelCase_ = {
layer_id: [key for key in vae_state_dict if f"""down.{layer_id}""" in key] for layer_id in range(snake_case_ )
}
# Retrieves the keys for the decoder up blocks only
UpperCAmelCase_ = len({".".join(layer.split("." )[:3] ) for layer in vae_state_dict if "decoder.up" in layer} )
UpperCAmelCase_ = {
layer_id: [key for key in vae_state_dict if f"""up.{layer_id}""" in key] for layer_id in range(snake_case_ )
}
for i in range(snake_case_ ):
UpperCAmelCase_ = [key for key in down_blocks[i] if f"""down.{i}""" in key and f"""down.{i}.downsample""" not in key]
if f"""encoder.down.{i}.downsample.conv.weight""" in vae_state_dict:
UpperCAmelCase_ = vae_state_dict.pop(
f"""encoder.down.{i}.downsample.conv.weight""" )
UpperCAmelCase_ = vae_state_dict.pop(
f"""encoder.down.{i}.downsample.conv.bias""" )
UpperCAmelCase_ = renew_vae_resnet_paths(snake_case_ )
UpperCAmelCase_ = {"old": f"""down.{i}.block""", "new": f"""down_blocks.{i}.resnets"""}
assign_to_checkpoint(snake_case_ , snake_case_ , snake_case_ , additional_replacements=[meta_path] , config=snake_case_ )
UpperCAmelCase_ = [key for key in vae_state_dict if "encoder.mid.block" in key]
UpperCAmelCase_ = 2
for i in range(1 , num_mid_res_blocks + 1 ):
UpperCAmelCase_ = [key for key in mid_resnets if f"""encoder.mid.block_{i}""" in key]
UpperCAmelCase_ = renew_vae_resnet_paths(snake_case_ )
UpperCAmelCase_ = {"old": f"""mid.block_{i}""", "new": f"""mid_block.resnets.{i - 1}"""}
assign_to_checkpoint(snake_case_ , snake_case_ , snake_case_ , additional_replacements=[meta_path] , config=snake_case_ )
UpperCAmelCase_ = [key for key in vae_state_dict if "encoder.mid.attn" in key]
UpperCAmelCase_ = renew_vae_attention_paths(snake_case_ )
UpperCAmelCase_ = {"old": "mid.attn_1", "new": "mid_block.attentions.0"}
assign_to_checkpoint(snake_case_ , snake_case_ , snake_case_ , additional_replacements=[meta_path] , config=snake_case_ )
conv_attn_to_linear(snake_case_ )
for i in range(snake_case_ ):
UpperCAmelCase_ = num_up_blocks - 1 - i
UpperCAmelCase_ = [
key for key in up_blocks[block_id] if f"""up.{block_id}""" in key and f"""up.{block_id}.upsample""" not in key
]
if f"""decoder.up.{block_id}.upsample.conv.weight""" in vae_state_dict:
UpperCAmelCase_ = vae_state_dict[
f"""decoder.up.{block_id}.upsample.conv.weight"""
]
UpperCAmelCase_ = vae_state_dict[
f"""decoder.up.{block_id}.upsample.conv.bias"""
]
UpperCAmelCase_ = renew_vae_resnet_paths(snake_case_ )
UpperCAmelCase_ = {"old": f"""up.{block_id}.block""", "new": f"""up_blocks.{i}.resnets"""}
assign_to_checkpoint(snake_case_ , snake_case_ , snake_case_ , additional_replacements=[meta_path] , config=snake_case_ )
UpperCAmelCase_ = [key for key in vae_state_dict if "decoder.mid.block" in key]
UpperCAmelCase_ = 2
for i in range(1 , num_mid_res_blocks + 1 ):
UpperCAmelCase_ = [key for key in mid_resnets if f"""decoder.mid.block_{i}""" in key]
UpperCAmelCase_ = renew_vae_resnet_paths(snake_case_ )
UpperCAmelCase_ = {"old": f"""mid.block_{i}""", "new": f"""mid_block.resnets.{i - 1}"""}
assign_to_checkpoint(snake_case_ , snake_case_ , snake_case_ , additional_replacements=[meta_path] , config=snake_case_ )
UpperCAmelCase_ = [key for key in vae_state_dict if "decoder.mid.attn" in key]
UpperCAmelCase_ = renew_vae_attention_paths(snake_case_ )
UpperCAmelCase_ = {"old": "mid.attn_1", "new": "mid_block.attentions.0"}
assign_to_checkpoint(snake_case_ , snake_case_ , snake_case_ , additional_replacements=[meta_path] , config=snake_case_ )
conv_attn_to_linear(snake_case_ )
return new_checkpoint
def lowerCAmelCase_ ( snake_case_ : str , snake_case_ : str , ) -> Dict:
'''simple docstring'''
UpperCAmelCase_ = requests.get(
" https://raw.githubusercontent.com/CompVis/stable-diffusion/main/configs/stable-diffusion/v1-inference.yaml" )
UpperCAmelCase_ = io.BytesIO(r.content )
UpperCAmelCase_ = OmegaConf.load(snake_case_ )
UpperCAmelCase_ = 5_12
UpperCAmelCase_ = "cuda" if torch.cuda.is_available() else "cpu"
if checkpoint_path.endswith("safetensors" ):
from safetensors import safe_open
UpperCAmelCase_ = {}
with safe_open(snake_case_ , framework="pt" , device="cpu" ) as f:
for key in f.keys():
UpperCAmelCase_ = f.get_tensor(snake_case_ )
else:
UpperCAmelCase_ = torch.load(snake_case_ , map_location=snake_case_ )["state_dict"]
# Convert the VAE model.
UpperCAmelCase_ = create_vae_diffusers_config(snake_case_ , image_size=snake_case_ )
UpperCAmelCase_ = custom_convert_ldm_vae_checkpoint(snake_case_ , snake_case_ )
UpperCAmelCase_ = AutoencoderKL(**snake_case_ )
vae.load_state_dict(snake_case_ )
vae.save_pretrained(snake_case_ )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE_: Optional[int] =argparse.ArgumentParser()
parser.add_argument('--vae_pt_path', default=None, type=str, required=True, help='Path to the VAE.pt to convert.')
parser.add_argument('--dump_path', default=None, type=str, required=True, help='Path to the VAE.pt to convert.')
SCREAMING_SNAKE_CASE_: str =parser.parse_args()
vae_pt_to_vae_diffuser(args.vae_pt_path, args.dump_path)
| 1 | 1 |
'''simple docstring'''
import os
from typing import Any, Callable, Dict, List, Optional, Tuple, Union
import torch
from torch import nn
from ...models.controlnet import ControlNetModel, ControlNetOutput
from ...models.modeling_utils import ModelMixin
from ...utils import logging
SCREAMING_SNAKE_CASE_: Any =logging.get_logger(__name__)
class __A ( UpperCamelCase__ ):
def __init__(self : Tuple , __a : Union[List[ControlNetModel], Tuple[ControlNetModel]] ):
super().__init__()
UpperCAmelCase_ = nn.ModuleList(__a )
def _lowercase (self : Tuple , __a : torch.FloatTensor , __a : Union[torch.Tensor, float, int] , __a : torch.Tensor , __a : List[torch.tensor] , __a : List[float] , __a : Optional[torch.Tensor] = None , __a : Optional[torch.Tensor] = None , __a : Optional[torch.Tensor] = None , __a : Optional[Dict[str, Any]] = None , __a : bool = False , __a : bool = True , ):
for i, (image, scale, controlnet) in enumerate(zip(__a , __a , self.nets ) ):
UpperCAmelCase_ , UpperCAmelCase_ = controlnet(
__a , __a , __a , __a , __a , __a , __a , __a , __a , __a , __a , )
# merge samples
if i == 0:
UpperCAmelCase_ , UpperCAmelCase_ = down_samples, mid_sample
else:
UpperCAmelCase_ = [
samples_prev + samples_curr
for samples_prev, samples_curr in zip(__a , __a )
]
mid_block_res_sample += mid_sample
return down_block_res_samples, mid_block_res_sample
def _lowercase (self : List[str] , __a : Union[str, os.PathLike] , __a : bool = True , __a : Callable = None , __a : bool = False , __a : Optional[str] = None , ):
UpperCAmelCase_ = 0
UpperCAmelCase_ = save_directory
for controlnet in self.nets:
controlnet.save_pretrained(
__a , is_main_process=__a , save_function=__a , safe_serialization=__a , variant=__a , )
idx += 1
UpperCAmelCase_ = model_path_to_save + f"""_{idx}"""
@classmethod
def _lowercase (cls : Tuple , __a : Optional[Union[str, os.PathLike]] , **__a : List[Any] ):
UpperCAmelCase_ = 0
UpperCAmelCase_ = []
# load controlnet and append to list until no controlnet directory exists anymore
# first controlnet has to be saved under `./mydirectory/controlnet` to be compliant with `DiffusionPipeline.from_prertained`
# second, third, ... controlnets have to be saved under `./mydirectory/controlnet_1`, `./mydirectory/controlnet_2`, ...
UpperCAmelCase_ = pretrained_model_path
while os.path.isdir(__a ):
UpperCAmelCase_ = ControlNetModel.from_pretrained(__a , **__a )
controlnets.append(__a )
idx += 1
UpperCAmelCase_ = pretrained_model_path + f"""_{idx}"""
logger.info(f"""{len(__a )} controlnets loaded from {pretrained_model_path}.""" )
if len(__a ) == 0:
raise ValueError(
f"""No ControlNets found under {os.path.dirname(__a )}. Expected at least {pretrained_model_path + "_0"}.""" )
return cls(__a )
| 1 | '''simple docstring'''
import inspect
import unittest
import numpy as np
from transformers import ViTConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor
if is_flax_available():
import jax
from transformers.models.vit.modeling_flax_vit import FlaxViTForImageClassification, FlaxViTModel
class __A ( unittest.TestCase ):
def __init__(self : str , __a : Optional[Any] , __a : Optional[Any]=13 , __a : int=30 , __a : Union[str, Any]=2 , __a : Dict=3 , __a : List[Any]=True , __a : Optional[Any]=True , __a : List[Any]=32 , __a : Any=5 , __a : str=4 , __a : Optional[int]=37 , __a : Optional[int]="gelu" , __a : List[str]=0.1 , __a : Tuple=0.1 , __a : List[str]=10 , __a : Optional[int]=0.02 , ):
UpperCAmelCase_ = parent
UpperCAmelCase_ = batch_size
UpperCAmelCase_ = image_size
UpperCAmelCase_ = patch_size
UpperCAmelCase_ = num_channels
UpperCAmelCase_ = is_training
UpperCAmelCase_ = use_labels
UpperCAmelCase_ = hidden_size
UpperCAmelCase_ = num_hidden_layers
UpperCAmelCase_ = num_attention_heads
UpperCAmelCase_ = intermediate_size
UpperCAmelCase_ = hidden_act
UpperCAmelCase_ = hidden_dropout_prob
UpperCAmelCase_ = attention_probs_dropout_prob
UpperCAmelCase_ = type_sequence_label_size
UpperCAmelCase_ = initializer_range
# in ViT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
UpperCAmelCase_ = (image_size // patch_size) ** 2
UpperCAmelCase_ = num_patches + 1
def _lowercase (self : Any ):
UpperCAmelCase_ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCAmelCase_ = ViTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=__a , initializer_range=self.initializer_range , )
return config, pixel_values
def _lowercase (self : Dict , __a : Any , __a : List[Any] ):
UpperCAmelCase_ = FlaxViTModel(config=__a )
UpperCAmelCase_ = model(__a )
# expected sequence length = num_patches + 1 (we add 1 for the [CLS] token)
UpperCAmelCase_ = (self.image_size, self.image_size)
UpperCAmelCase_ = (self.patch_size, self.patch_size)
UpperCAmelCase_ = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, num_patches + 1, self.hidden_size) )
def _lowercase (self : Tuple , __a : str , __a : Any ):
UpperCAmelCase_ = self.type_sequence_label_size
UpperCAmelCase_ = FlaxViTForImageClassification(config=__a )
UpperCAmelCase_ = model(__a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
UpperCAmelCase_ = 1
UpperCAmelCase_ = FlaxViTForImageClassification(__a )
UpperCAmelCase_ = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
UpperCAmelCase_ = model(__a )
def _lowercase (self : Optional[Any] ):
UpperCAmelCase_ = self.prepare_config_and_inputs()
(
(
UpperCAmelCase_
) , (
UpperCAmelCase_
) ,
) = config_and_inputs
UpperCAmelCase_ = {"pixel_values": pixel_values}
return config, inputs_dict
@require_flax
class __A ( UpperCamelCase__ , unittest.TestCase ):
a__ : Tuple = (FlaxViTModel, FlaxViTForImageClassification) if is_flax_available() else ()
def _lowercase (self : Any ):
UpperCAmelCase_ = FlaxViTModelTester(self )
UpperCAmelCase_ = ConfigTester(self , config_class=__a , has_text_modality=__a , hidden_size=37 )
def _lowercase (self : Tuple ):
self.config_tester.run_common_tests()
def _lowercase (self : str ):
UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__a )
def _lowercase (self : str ):
UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__a )
def _lowercase (self : Tuple ):
UpperCAmelCase_ , UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase_ = model_class(__a )
UpperCAmelCase_ = inspect.signature(model.__call__ )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCAmelCase_ = [*signature.parameters.keys()]
UpperCAmelCase_ = ["pixel_values"]
self.assertListEqual(arg_names[:1] , __a )
def _lowercase (self : Optional[Any] ):
UpperCAmelCase_ , UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
UpperCAmelCase_ = self._prepare_for_class(__a , __a )
UpperCAmelCase_ = model_class(__a )
@jax.jit
def model_jitted(__a : Tuple , **__a : List[Any] ):
return model(pixel_values=__a , **__a )
with self.subTest("JIT Enabled" ):
UpperCAmelCase_ = model_jitted(**__a ).to_tuple()
with self.subTest("JIT Disabled" ):
with jax.disable_jit():
UpperCAmelCase_ = model_jitted(**__a ).to_tuple()
self.assertEqual(len(__a ) , len(__a ) )
for jitted_output, output in zip(__a , __a ):
self.assertEqual(jitted_output.shape , output.shape )
@slow
def _lowercase (self : Tuple ):
for model_class_name in self.all_model_classes:
UpperCAmelCase_ = model_class_name.from_pretrained("google/vit-base-patch16-224" )
UpperCAmelCase_ = model(np.ones((1, 3, 224, 224) ) )
self.assertIsNotNone(__a )
| 1 | 1 |
'''simple docstring'''
from __future__ import annotations
import inspect
import unittest
import numpy as np
from transformers import DeiTConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFDeiTForImageClassification,
TFDeiTForImageClassificationWithTeacher,
TFDeiTForMaskedImageModeling,
TFDeiTModel,
)
from transformers.models.deit.modeling_tf_deit import TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import DeiTImageProcessor
class __A :
def __init__(self : Any , __a : str , __a : List[str]=13 , __a : Optional[int]=30 , __a : Tuple=2 , __a : str=3 , __a : Tuple=True , __a : List[Any]=True , __a : Optional[int]=32 , __a : Optional[int]=2 , __a : int=4 , __a : Optional[Any]=37 , __a : Optional[Any]="gelu" , __a : Optional[Any]=0.1 , __a : int=0.1 , __a : int=10 , __a : Optional[int]=0.02 , __a : Dict=3 , __a : Optional[int]=None , __a : List[str]=2 , ):
UpperCAmelCase_ = parent
UpperCAmelCase_ = batch_size
UpperCAmelCase_ = image_size
UpperCAmelCase_ = patch_size
UpperCAmelCase_ = num_channels
UpperCAmelCase_ = is_training
UpperCAmelCase_ = use_labels
UpperCAmelCase_ = hidden_size
UpperCAmelCase_ = num_hidden_layers
UpperCAmelCase_ = num_attention_heads
UpperCAmelCase_ = intermediate_size
UpperCAmelCase_ = hidden_act
UpperCAmelCase_ = hidden_dropout_prob
UpperCAmelCase_ = attention_probs_dropout_prob
UpperCAmelCase_ = type_sequence_label_size
UpperCAmelCase_ = initializer_range
UpperCAmelCase_ = scope
UpperCAmelCase_ = encoder_stride
# in DeiT, the seq length equals the number of patches + 2 (we add 2 for the [CLS] and distilation tokens)
UpperCAmelCase_ = (image_size // patch_size) ** 2
UpperCAmelCase_ = num_patches + 2
def _lowercase (self : Optional[Any] ):
UpperCAmelCase_ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCAmelCase_ = None
if self.use_labels:
UpperCAmelCase_ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCAmelCase_ = self.get_config()
return config, pixel_values, labels
def _lowercase (self : List[str] ):
return DeiTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=__a , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , )
def _lowercase (self : Union[str, Any] , __a : str , __a : Any , __a : Dict ):
UpperCAmelCase_ = TFDeiTModel(config=__a )
UpperCAmelCase_ = model(__a )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _lowercase (self : str , __a : Optional[int] , __a : List[str] , __a : Tuple ):
UpperCAmelCase_ = TFDeiTForMaskedImageModeling(config=__a )
UpperCAmelCase_ = model(__a )
self.parent.assertEqual(
result.reconstruction.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size) )
# test greyscale images
UpperCAmelCase_ = 1
UpperCAmelCase_ = TFDeiTForMaskedImageModeling(__a )
UpperCAmelCase_ = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
UpperCAmelCase_ = model(__a )
self.parent.assertEqual(result.reconstruction.shape , (self.batch_size, 1, self.image_size, self.image_size) )
def _lowercase (self : List[Any] , __a : List[str] , __a : int , __a : int ):
UpperCAmelCase_ = self.type_sequence_label_size
UpperCAmelCase_ = TFDeiTForImageClassification(__a )
UpperCAmelCase_ = model(__a , labels=__a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
UpperCAmelCase_ = 1
UpperCAmelCase_ = TFDeiTForImageClassification(__a )
UpperCAmelCase_ = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
UpperCAmelCase_ = model(__a , labels=__a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def _lowercase (self : Dict ):
UpperCAmelCase_ = self.prepare_config_and_inputs()
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = config_and_inputs
UpperCAmelCase_ = {"pixel_values": pixel_values}
return config, inputs_dict
@require_tf
class __A ( UpperCamelCase__ , UpperCamelCase__ , unittest.TestCase ):
a__ : Tuple = (
(
TFDeiTModel,
TFDeiTForImageClassification,
TFDeiTForImageClassificationWithTeacher,
TFDeiTForMaskedImageModeling,
)
if is_tf_available()
else ()
)
a__ : Union[str, Any] = (
{
"""feature-extraction""": TFDeiTModel,
"""image-classification""": (TFDeiTForImageClassification, TFDeiTForImageClassificationWithTeacher),
}
if is_tf_available()
else {}
)
a__ : Tuple = False
a__ : List[Any] = False
a__ : Any = False
a__ : Dict = False
def _lowercase (self : List[Any] ):
UpperCAmelCase_ = TFDeiTModelTester(self )
UpperCAmelCase_ = ConfigTester(self , config_class=__a , has_text_modality=__a , hidden_size=37 )
def _lowercase (self : Union[str, Any] ):
self.config_tester.run_common_tests()
@unittest.skip(reason="DeiT does not use inputs_embeds" )
def _lowercase (self : List[str] ):
pass
def _lowercase (self : List[Any] ):
UpperCAmelCase_ , UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase_ = model_class(__a )
self.assertIsInstance(model.get_input_embeddings() , (tf.keras.layers.Layer) )
UpperCAmelCase_ = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(__a , tf.keras.layers.Dense ) )
def _lowercase (self : Optional[Any] ):
UpperCAmelCase_ , UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase_ = model_class(__a )
UpperCAmelCase_ = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCAmelCase_ = [*signature.parameters.keys()]
UpperCAmelCase_ = ["pixel_values"]
self.assertListEqual(arg_names[:1] , __a )
def _lowercase (self : Dict ):
UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__a )
def _lowercase (self : Any ):
UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*__a )
def _lowercase (self : Any ):
UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__a )
def _lowercase (self : Optional[Any] , __a : Tuple , __a : Tuple , __a : Union[str, Any]=False ):
UpperCAmelCase_ = super()._prepare_for_class(__a , __a , return_labels=__a )
if return_labels:
if "labels" in inputs_dict and "labels" not in inspect.signature(model_class.call ).parameters:
del inputs_dict["labels"]
return inputs_dict
@slow
def _lowercase (self : int ):
for model_name in TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase_ = TFDeiTModel.from_pretrained(__a )
self.assertIsNotNone(__a )
def lowerCAmelCase_ ( ) -> Dict:
'''simple docstring'''
UpperCAmelCase_ = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_tf
@require_vision
class __A ( unittest.TestCase ):
@cached_property
def _lowercase (self : Union[str, Any] ):
return (
DeiTImageProcessor.from_pretrained("facebook/deit-base-distilled-patch16-224" )
if is_vision_available()
else None
)
@slow
def _lowercase (self : Optional[Any] ):
UpperCAmelCase_ = TFDeiTForImageClassificationWithTeacher.from_pretrained("facebook/deit-base-distilled-patch16-224" )
UpperCAmelCase_ = self.default_image_processor
UpperCAmelCase_ = prepare_img()
UpperCAmelCase_ = image_processor(images=__a , return_tensors="tf" )
# forward pass
UpperCAmelCase_ = model(**__a )
# verify the logits
UpperCAmelCase_ = tf.TensorShape((1, 1000) )
self.assertEqual(outputs.logits.shape , __a )
UpperCAmelCase_ = tf.constant([-1.02_66, 0.19_12, -1.28_61] )
self.assertTrue(np.allclose(outputs.logits[0, :3] , __a , atol=1E-4 ) )
| 1 | '''simple docstring'''
import os
import shutil
import tempfile
from unittest import TestCase
from unittest.mock import patch
import numpy as np
from datasets import Dataset
from transformers.models.realm.configuration_realm import RealmConfig
from transformers.models.realm.retrieval_realm import _REALM_BLOCK_RECORDS_FILENAME, RealmRetriever
from transformers.models.realm.tokenization_realm import VOCAB_FILES_NAMES, RealmTokenizer
class __A ( UpperCamelCase__ ):
def _lowercase (self : Optional[Any] ):
UpperCAmelCase_ = tempfile.mkdtemp()
UpperCAmelCase_ = 5
# Realm tok
UpperCAmelCase_ = [
"[UNK]",
"[CLS]",
"[SEP]",
"[PAD]",
"[MASK]",
"test",
"question",
"this",
"is",
"the",
"first",
"second",
"third",
"fourth",
"fifth",
"record",
"want",
"##want",
"##ed",
"wa",
"un",
"runn",
"##ing",
",",
"low",
"lowest",
]
UpperCAmelCase_ = os.path.join(self.tmpdirname , "realm_tokenizer" )
os.makedirs(__a , exist_ok=__a )
UpperCAmelCase_ = os.path.join(__a , VOCAB_FILES_NAMES["vocab_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in vocab_tokens] ) )
UpperCAmelCase_ = os.path.join(self.tmpdirname , "realm_block_records" )
os.makedirs(__a , exist_ok=__a )
def _lowercase (self : Optional[Any] ):
return RealmTokenizer.from_pretrained(os.path.join(self.tmpdirname , "realm_tokenizer" ) )
def _lowercase (self : Any ):
shutil.rmtree(self.tmpdirname )
def _lowercase (self : List[Any] ):
UpperCAmelCase_ = RealmConfig(num_block_records=self.num_block_records )
return config
def _lowercase (self : List[str] ):
UpperCAmelCase_ = Dataset.from_dict(
{
"id": ["0", "1"],
"question": ["foo", "bar"],
"answers": [["Foo", "Bar"], ["Bar"]],
} )
return dataset
def _lowercase (self : Any ):
UpperCAmelCase_ = np.array(
[
B"This is the first record",
B"This is the second record",
B"This is the third record",
B"This is the fourth record",
B"This is the fifth record",
B"This is a longer longer longer record",
] , dtype=__a , )
return block_records
def _lowercase (self : Union[str, Any] ):
UpperCAmelCase_ = RealmRetriever(
block_records=self.get_dummy_block_records() , tokenizer=self.get_tokenizer() , )
return retriever
def _lowercase (self : int ):
UpperCAmelCase_ = self.get_config()
UpperCAmelCase_ = self.get_dummy_retriever()
UpperCAmelCase_ = retriever.tokenizer
UpperCAmelCase_ = np.array([0, 3] , dtype="long" )
UpperCAmelCase_ = tokenizer(["Test question"] ).input_ids
UpperCAmelCase_ = tokenizer(
["the fourth"] , add_special_tokens=__a , return_token_type_ids=__a , return_attention_mask=__a , ).input_ids
UpperCAmelCase_ = config.reader_seq_len
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = retriever(
__a , __a , answer_ids=__a , max_length=__a , return_tensors="np" )
self.assertEqual(len(__a ) , 2 )
self.assertEqual(len(__a ) , 2 )
self.assertEqual(len(__a ) , 2 )
self.assertEqual(concat_inputs.input_ids.shape , (2, 10) )
self.assertEqual(concat_inputs.attention_mask.shape , (2, 10) )
self.assertEqual(concat_inputs.token_type_ids.shape , (2, 10) )
self.assertEqual(concat_inputs.special_tokens_mask.shape , (2, 10) )
self.assertEqual(
tokenizer.convert_ids_to_tokens(concat_inputs.input_ids[0] ) , ["[CLS]", "test", "question", "[SEP]", "this", "is", "the", "first", "record", "[SEP]"] , )
self.assertEqual(
tokenizer.convert_ids_to_tokens(concat_inputs.input_ids[1] ) , ["[CLS]", "test", "question", "[SEP]", "this", "is", "the", "fourth", "record", "[SEP]"] , )
def _lowercase (self : List[Any] ):
UpperCAmelCase_ = self.get_config()
UpperCAmelCase_ = self.get_dummy_retriever()
UpperCAmelCase_ = retriever.tokenizer
UpperCAmelCase_ = np.array([0, 3, 5] , dtype="long" )
UpperCAmelCase_ = tokenizer(["Test question"] ).input_ids
UpperCAmelCase_ = tokenizer(
["the fourth", "longer longer"] , add_special_tokens=__a , return_token_type_ids=__a , return_attention_mask=__a , ).input_ids
UpperCAmelCase_ = config.reader_seq_len
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = retriever(
__a , __a , answer_ids=__a , max_length=__a , return_tensors="np" )
self.assertEqual([False, True, True] , __a )
self.assertEqual([[-1, -1, -1], [6, -1, -1], [6, 7, 8]] , __a )
self.assertEqual([[-1, -1, -1], [7, -1, -1], [7, 8, 9]] , __a )
def _lowercase (self : Optional[Any] ):
UpperCAmelCase_ = self.get_dummy_retriever()
retriever.save_pretrained(os.path.join(self.tmpdirname , "realm_block_records" ) )
# Test local path
UpperCAmelCase_ = retriever.from_pretrained(os.path.join(self.tmpdirname , "realm_block_records" ) )
self.assertEqual(retriever.block_records[0] , B"This is the first record" )
# Test mocked remote path
with patch("transformers.models.realm.retrieval_realm.hf_hub_download" ) as mock_hf_hub_download:
UpperCAmelCase_ = os.path.join(
os.path.join(self.tmpdirname , "realm_block_records" ) , _REALM_BLOCK_RECORDS_FILENAME )
UpperCAmelCase_ = RealmRetriever.from_pretrained("google/realm-cc-news-pretrained-openqa" )
self.assertEqual(retriever.block_records[0] , B"This is the first record" )
| 1 | 1 |
'''simple docstring'''
import argparse
import requests
import torch
from PIL import Image
from transformers import CLIPProcessor, GroupViTConfig, GroupViTModel
def lowerCAmelCase_ ( snake_case_ : int ) -> List[str]:
'''simple docstring'''
if "img_encoder.pos_embed" in name:
UpperCAmelCase_ = name.replace("img_encoder.pos_embed" , "vision_model.embeddings.position_embeddings" )
if "img_encoder.patch_embed.proj" in name:
UpperCAmelCase_ = name.replace("img_encoder.patch_embed.proj" , "vision_model.embeddings.patch_embeddings.projection" )
if "img_encoder.patch_embed.norm" in name:
UpperCAmelCase_ = name.replace("img_encoder.patch_embed.norm" , "vision_model.embeddings.layernorm" )
if "img_encoder.layers" in name:
UpperCAmelCase_ = name.replace("img_encoder.layers" , "vision_model.encoder.stages" )
if "blocks" in name and "res" not in name:
UpperCAmelCase_ = name.replace("blocks" , "layers" )
if "attn" in name and "pre_assign" not in name:
UpperCAmelCase_ = name.replace("attn" , "self_attn" )
if "proj" in name and "self_attn" in name and "text" not in name:
UpperCAmelCase_ = name.replace("proj" , "out_proj" )
if "pre_assign_attn.attn.proj" in name:
UpperCAmelCase_ = name.replace("pre_assign_attn.attn.proj" , "pre_assign_attn.attn.out_proj" )
if "norm1" in name:
UpperCAmelCase_ = name.replace("norm1" , "layer_norm1" )
if "norm2" in name and "pre_assign" not in name:
UpperCAmelCase_ = name.replace("norm2" , "layer_norm2" )
if "img_encoder.norm" in name:
UpperCAmelCase_ = name.replace("img_encoder.norm" , "vision_model.layernorm" )
# text encoder
if "text_encoder.token_embedding" in name:
UpperCAmelCase_ = name.replace("text_encoder.token_embedding" , "text_model.embeddings.token_embedding" )
if "text_encoder.positional_embedding" in name:
UpperCAmelCase_ = name.replace("text_encoder.positional_embedding" , "text_model.embeddings.position_embedding.weight" )
if "text_encoder.transformer.resblocks." in name:
UpperCAmelCase_ = name.replace("text_encoder.transformer.resblocks." , "text_model.encoder.layers." )
if "ln_1" in name:
UpperCAmelCase_ = name.replace("ln_1" , "layer_norm1" )
if "ln_2" in name:
UpperCAmelCase_ = name.replace("ln_2" , "layer_norm2" )
if "c_fc" in name:
UpperCAmelCase_ = name.replace("c_fc" , "fc1" )
if "c_proj" in name:
UpperCAmelCase_ = name.replace("c_proj" , "fc2" )
if "text_encoder" in name:
UpperCAmelCase_ = name.replace("text_encoder" , "text_model" )
if "ln_final" in name:
UpperCAmelCase_ = name.replace("ln_final" , "final_layer_norm" )
# projection layers
if "img_projector.linear_hidden." in name:
UpperCAmelCase_ = name.replace("img_projector.linear_hidden." , "visual_projection." )
if "img_projector.linear_out." in name:
UpperCAmelCase_ = name.replace("img_projector.linear_out." , "visual_projection.3." )
if "text_projector.linear_hidden" in name:
UpperCAmelCase_ = name.replace("text_projector.linear_hidden" , "text_projection" )
if "text_projector.linear_out" in name:
UpperCAmelCase_ = name.replace("text_projector.linear_out" , "text_projection.3" )
return name
def lowerCAmelCase_ ( snake_case_ : Tuple , snake_case_ : List[str] ) -> Union[str, Any]:
'''simple docstring'''
for key in orig_state_dict.copy().keys():
UpperCAmelCase_ = orig_state_dict.pop(snake_case_ )
if "qkv" in key:
# weights and biases of the key, value and query projections of vision encoder's attention layers require special treatment:
# we need to split them up into separate matrices/vectors
UpperCAmelCase_ = key.split("." )
UpperCAmelCase_ , UpperCAmelCase_ = int(key_split[2] ), int(key_split[4] )
UpperCAmelCase_ = config.vision_config.hidden_size
if "weight" in key:
UpperCAmelCase_ = val[:dim, :]
UpperCAmelCase_ = val[dim : dim * 2, :]
UpperCAmelCase_ = val[-dim:, :]
else:
UpperCAmelCase_ = val[:dim]
UpperCAmelCase_ = val[dim : dim * 2]
UpperCAmelCase_ = val[-dim:]
elif "in_proj" in key:
# weights and biases of the key, value and query projections of text encoder's attention layers require special treatment:
# we need to split them up into separate matrices/vectors
UpperCAmelCase_ = key.split("." )
UpperCAmelCase_ = int(key_split[3] )
UpperCAmelCase_ = config.text_config.hidden_size
if "weight" in key:
UpperCAmelCase_ = val[:dim, :]
UpperCAmelCase_ = val[
dim : dim * 2, :
]
UpperCAmelCase_ = val[-dim:, :]
else:
UpperCAmelCase_ = val[:dim]
UpperCAmelCase_ = val[dim : dim * 2]
UpperCAmelCase_ = val[-dim:]
else:
UpperCAmelCase_ = rename_key(snake_case_ )
# squeeze if necessary
if (
"text_projection.0" in new_name
or "text_projection.3" in new_name
or "visual_projection.0" in new_name
or "visual_projection.3" in new_name
):
UpperCAmelCase_ = val.squeeze_()
else:
UpperCAmelCase_ = val
return orig_state_dict
def lowerCAmelCase_ ( ) -> Any:
'''simple docstring'''
UpperCAmelCase_ = "http://images.cocodataset.org/val2017/000000039769.jpg"
UpperCAmelCase_ = Image.open(requests.get(snake_case_ , stream=snake_case_ ).raw )
return im
@torch.no_grad()
def lowerCAmelCase_ ( snake_case_ : List[Any] , snake_case_ : List[Any] , snake_case_ : Optional[Any]="groupvit-gcc-yfcc" , snake_case_ : List[str]=False ) -> Any:
'''simple docstring'''
UpperCAmelCase_ = GroupViTConfig()
UpperCAmelCase_ = GroupViTModel(snake_case_ ).eval()
UpperCAmelCase_ = torch.load(snake_case_ , map_location="cpu" )["model"]
UpperCAmelCase_ = convert_state_dict(snake_case_ , snake_case_ )
UpperCAmelCase_ , UpperCAmelCase_ = model.load_state_dict(snake_case_ , strict=snake_case_ )
assert missing_keys == ["text_model.embeddings.position_ids"]
assert (unexpected_keys == ["multi_label_logit_scale"]) or (len(snake_case_ ) == 0)
# verify result
UpperCAmelCase_ = CLIPProcessor.from_pretrained("openai/clip-vit-base-patch32" )
UpperCAmelCase_ = prepare_img()
UpperCAmelCase_ = processor(text=["a photo of a cat", "a photo of a dog"] , images=snake_case_ , padding=snake_case_ , return_tensors="pt" )
with torch.no_grad():
UpperCAmelCase_ = model(**snake_case_ )
if model_name == "groupvit-gcc-yfcc":
UpperCAmelCase_ = torch.tensor([[13.3523, 6.3629]] )
elif model_name == "groupvit-gcc-redcaps":
UpperCAmelCase_ = torch.tensor([[16.1873, 8.6230]] )
else:
raise ValueError(f"""Model name {model_name} not supported.""" )
assert torch.allclose(outputs.logits_per_image , snake_case_ , atol=1E-3 )
processor.save_pretrained(snake_case_ )
model.save_pretrained(snake_case_ )
print("Successfully saved processor and model to" , snake_case_ )
if push_to_hub:
print("Pushing to the hub..." )
processor.push_to_hub(snake_case_ , organization="nielsr" )
model.push_to_hub(snake_case_ , organization="nielsr" )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE_: Dict =argparse.ArgumentParser()
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to dump the processor and PyTorch model.'
)
parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to GroupViT checkpoint')
parser.add_argument(
'--model_name',
default='groupvit-gccy-fcc',
type=str,
help='Name of the model. Expecting either \'groupvit-gcc-yfcc\' or \'groupvit-gcc-redcaps\'',
)
parser.add_argument(
'--push_to_hub',
action='store_true',
help='Whether or not to push the converted model and processor to the 🤗 hub using the provided `model_name`.',
)
SCREAMING_SNAKE_CASE_: Optional[int] =parser.parse_args()
convert_groupvit_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.model_name, args.push_to_hub)
| 1 | '''simple docstring'''
from math import log
from scipy.constants import Boltzmann, physical_constants
SCREAMING_SNAKE_CASE_: Optional[int] =3_00 # TEMPERATURE (unit = K)
def lowerCAmelCase_ ( snake_case_ : float , snake_case_ : float , snake_case_ : float , ) -> float:
'''simple docstring'''
if donor_conc <= 0:
raise ValueError("Donor concentration should be positive" )
elif acceptor_conc <= 0:
raise ValueError("Acceptor concentration should be positive" )
elif intrinsic_conc <= 0:
raise ValueError("Intrinsic concentration should be positive" )
elif donor_conc <= intrinsic_conc:
raise ValueError(
"Donor concentration should be greater than intrinsic concentration" )
elif acceptor_conc <= intrinsic_conc:
raise ValueError(
"Acceptor concentration should be greater than intrinsic concentration" )
else:
return (
Boltzmann
* T
* log((donor_conc * acceptor_conc) / intrinsic_conc**2 )
/ physical_constants["electron volt"][0]
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 1 | 1 |