code
stringlengths
81
54k
code_codestyle
int64
0
721
style_context
stringlengths
91
41.9k
style_context_codestyle
int64
0
699
label
int64
0
1
from __future__ import annotations def UpperCamelCase__ ( A__ , A__ ) -> list[str]: if nth_term == "": return [""] snake_case__ : str = int(A__ ) snake_case__ : Dict = int(A__ ) snake_case__ : list[str] = [] for temp in range(int(A__ ) ): series.append(F"""1 / {pow(temp + 1 , int(A__ ) )}""" if series else '1' ) return series if __name__ == "__main__": import doctest doctest.testmod() lowerCAmelCase__ : Dict = int(input('''Enter the last number (nth term) of the P-Series''')) lowerCAmelCase__ : Optional[Any] = int(input('''Enter the power for P-Series''')) print('''Formula of P-Series => 1+1/2^p+1/3^p ..... 1/n^p''') print(p_series(nth_term, power))
699
from __future__ import annotations import random # Maximum size of the population. Bigger could be faster but is more memory expensive. lowerCAmelCase__ : Dict = 2_00 # Number of elements selected in every generation of evolution. The selection takes # place from best to worst of that generation and must be smaller than N_POPULATION. lowerCAmelCase__ : List[str] = 50 # Probability that an element of a generation can mutate, changing one of its genes. # This will guarantee that all genes will be used during evolution. lowerCAmelCase__ : List[str] = 0.4 # Just a seed to improve randomness required by the algorithm. random.seed(random.randint(0, 10_00)) def UpperCamelCase__ ( A__ , A__ ) -> tuple[str, float]: snake_case__ : Tuple = len([g for position, g in enumerate(A__ ) if g == main_target[position]] ) return (item, float(A__ )) def UpperCamelCase__ ( A__ , A__ ) -> tuple[str, str]: snake_case__ : str = random.randint(0 , len(A__ ) - 1 ) snake_case__ : int = parent_a[:random_slice] + parent_a[random_slice:] snake_case__ : Any = parent_a[:random_slice] + parent_a[random_slice:] return (child_a, child_a) def UpperCamelCase__ ( A__ , A__ ) -> str: snake_case__ : List[Any] = list(A__ ) if random.uniform(0 , 1 ) < MUTATION_PROBABILITY: snake_case__ : Optional[Any] = random.choice(A__ ) return "".join(A__ ) def UpperCamelCase__ ( A__ , A__ , A__ , ) -> list[str]: snake_case__ : Tuple = [] # Generate more children proportionally to the fitness score. snake_case__ : Optional[Any] = int(parent_a[1] * 100 ) + 1 snake_case__ : str = 10 if child_n >= 10 else child_n for _ in range(A__ ): snake_case__ : Any = population_score[random.randint(0 , A__ )][0] snake_case__ , snake_case__ : int = crossover(parent_a[0] , A__ ) # Append new string to the population list. pop.append(mutate(A__ , A__ ) ) pop.append(mutate(A__ , A__ ) ) return pop def UpperCamelCase__ ( A__ , A__ , A__ = True ) -> tuple[int, int, str]: # Verify if N_POPULATION is bigger than N_SELECTED if N_POPULATION < N_SELECTED: snake_case__ : Union[str, Any] = F"""{N_POPULATION} must be bigger than {N_SELECTED}""" raise ValueError(A__ ) # Verify that the target contains no genes besides the ones inside genes variable. snake_case__ : Tuple = sorted({c for c in target if c not in genes} ) if not_in_genes_list: snake_case__ : int = F"""{not_in_genes_list} is not in genes list, evolution cannot converge""" raise ValueError(A__ ) # Generate random starting population. snake_case__ : Union[str, Any] = [] for _ in range(A__ ): population.append(''.join([random.choice(A__ ) for i in range(len(A__ ) )] ) ) # Just some logs to know what the algorithms is doing. snake_case__ , snake_case__ : str = 0, 0 # This loop will end when we find a perfect match for our target. while True: generation += 1 total_population += len(A__ ) # Random population created. Now it's time to evaluate. # Adding a bit of concurrency can make everything faster, # # import concurrent.futures # population_score: list[tuple[str, float]] = [] # with concurrent.futures.ThreadPoolExecutor( # max_workers=NUM_WORKERS) as executor: # futures = {executor.submit(evaluate, item) for item in population} # concurrent.futures.wait(futures) # population_score = [item.result() for item in futures] # # but with a simple algorithm like this, it will probably be slower. # We just need to call evaluate for every item inside the population. snake_case__ : List[Any] = [evaluate(A__ , A__ ) for item in population] # Check if there is a matching evolution. snake_case__ : int = sorted(A__ , key=lambda A__ : x[1] , reverse=A__ ) if population_score[0][0] == target: return (generation, total_population, population_score[0][0]) # Print the best result every 10 generation. # Just to know that the algorithm is working. if debug and generation % 10 == 0: print( F"""\nGeneration: {generation}""" F"""\nTotal Population:{total_population}""" F"""\nBest score: {population_score[0][1]}""" F"""\nBest string: {population_score[0][0]}""" ) # Flush the old population, keeping some of the best evolutions. # Keeping this avoid regression of evolution. snake_case__ : Optional[int] = population[: int(N_POPULATION / 3 )] population.clear() population.extend(A__ ) # Normalize population score to be between 0 and 1. snake_case__ : str = [ (item, score / len(A__ )) for item, score in population_score ] # This is selection for i in range(A__ ): population.extend(select(population_score[int(A__ )] , A__ , A__ ) ) # Check if the population has already reached the maximum value and if so, # break the cycle. If this check is disabled, the algorithm will take # forever to compute large strings, but will also calculate small strings in # a far fewer generations. if len(A__ ) > N_POPULATION: break if __name__ == "__main__": lowerCAmelCase__ : str = ( '''This is a genetic algorithm to evaluate, combine, evolve, and mutate a string!''' ) lowerCAmelCase__ : Optional[Any] = list( ''' ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklm''' '''nopqrstuvwxyz.,;!?+-*#@^\'èéòà€ù=)(&%$£/\\''' ) lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__ : List[str] = basic(target_str, genes_list) print( F'''\nGeneration: {generation}\nTotal Population: {population}\nTarget: {target}''' )
699
1
import random import unittest import numpy as np from diffusers import ( DPMSolverMultistepScheduler, EulerAncestralDiscreteScheduler, EulerDiscreteScheduler, LMSDiscreteScheduler, OnnxStableDiffusionImgaImgPipeline, PNDMScheduler, ) from diffusers.utils import floats_tensor from diffusers.utils.testing_utils import ( is_onnx_available, load_image, nightly, require_onnxruntime, require_torch_gpu, ) from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin if is_onnx_available(): import onnxruntime as ort class __snake_case ( _lowerCamelCase ,unittest.TestCase ): __lowerCamelCase = """hf-internal-testing/tiny-random-OnnxStableDiffusionPipeline""" def __a ( self , __UpperCamelCase=0 ) -> Optional[Any]: '''simple docstring''' snake_case__ : str = floats_tensor((1, 3, 128, 128) , rng=random.Random(__UpperCamelCase ) ) snake_case__ : Union[str, Any] = np.random.RandomState(__UpperCamelCase ) snake_case__ : Any = { 'prompt': 'A painting of a squirrel eating a burger', 'image': image, 'generator': generator, 'num_inference_steps': 3, 'strength': 0.7_5, 'guidance_scale': 7.5, 'output_type': 'numpy', } return inputs def __a ( self ) -> int: '''simple docstring''' snake_case__ : List[Any] = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider='CPUExecutionProvider' ) pipe.set_progress_bar_config(disable=__UpperCamelCase ) snake_case__ : int = self.get_dummy_inputs() snake_case__ : int = pipe(**__UpperCamelCase ).images snake_case__ : int = image[0, -3:, -3:, -1].flatten() assert image.shape == (1, 128, 128, 3) snake_case__ : str = np.array([0.6_9_6_4_3, 0.5_8_4_8_4, 0.5_0_3_1_4, 0.5_8_7_6_0, 0.5_5_3_6_8, 0.5_9_6_4_3, 0.5_1_5_2_9, 0.4_1_2_1_7, 0.4_9_0_8_7] ) assert np.abs(image_slice - expected_slice ).max() < 1E-1 def __a ( self ) -> Dict: '''simple docstring''' snake_case__ : int = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider='CPUExecutionProvider' ) snake_case__ : str = PNDMScheduler.from_config(pipe.scheduler.config , skip_prk_steps=__UpperCamelCase ) pipe.set_progress_bar_config(disable=__UpperCamelCase ) snake_case__ : Optional[Any] = self.get_dummy_inputs() snake_case__ : List[str] = pipe(**__UpperCamelCase ).images snake_case__ : Union[str, Any] = image[0, -3:, -3:, -1] assert image.shape == (1, 128, 128, 3) snake_case__ : str = np.array([0.6_1_7_3_7, 0.5_4_6_4_2, 0.5_3_1_8_3, 0.5_4_4_6_5, 0.5_2_7_4_2, 0.6_0_5_2_5, 0.4_9_9_6_9, 0.4_0_6_5_5, 0.4_8_1_5_4] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1 def __a ( self ) -> List[Any]: '''simple docstring''' snake_case__ : Any = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider='CPUExecutionProvider' ) snake_case__ : int = LMSDiscreteScheduler.from_config(pipe.scheduler.config ) pipe.set_progress_bar_config(disable=__UpperCamelCase ) # warmup pass to apply optimizations snake_case__ : Any = pipe(**self.get_dummy_inputs() ) snake_case__ : List[Any] = self.get_dummy_inputs() snake_case__ : Optional[Any] = pipe(**__UpperCamelCase ).images snake_case__ : Union[str, Any] = image[0, -3:, -3:, -1] assert image.shape == (1, 128, 128, 3) snake_case__ : List[Any] = np.array([0.5_2_7_6_1, 0.5_9_9_7_7, 0.4_9_0_3_3, 0.4_9_6_1_9, 0.5_4_2_8_2, 0.5_0_3_1_1, 0.4_7_6_0_0, 0.4_0_9_1_8, 0.4_5_2_0_3] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1 def __a ( self ) -> Union[str, Any]: '''simple docstring''' snake_case__ : List[str] = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider='CPUExecutionProvider' ) snake_case__ : Optional[Any] = EulerDiscreteScheduler.from_config(pipe.scheduler.config ) pipe.set_progress_bar_config(disable=__UpperCamelCase ) snake_case__ : Optional[int] = self.get_dummy_inputs() snake_case__ : Dict = pipe(**__UpperCamelCase ).images snake_case__ : Any = image[0, -3:, -3:, -1] assert image.shape == (1, 128, 128, 3) snake_case__ : Union[str, Any] = np.array([0.5_2_9_1_1, 0.6_0_0_0_4, 0.4_9_2_2_9, 0.4_9_8_0_5, 0.5_4_5_0_2, 0.5_0_6_8_0, 0.4_7_7_7_7, 0.4_1_0_2_8, 0.4_5_3_0_4] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1 def __a ( self ) -> Any: '''simple docstring''' snake_case__ : Any = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider='CPUExecutionProvider' ) snake_case__ : Optional[int] = EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config ) pipe.set_progress_bar_config(disable=__UpperCamelCase ) snake_case__ : List[str] = self.get_dummy_inputs() snake_case__ : Dict = pipe(**__UpperCamelCase ).images snake_case__ : int = image[0, -3:, -3:, -1] assert image.shape == (1, 128, 128, 3) snake_case__ : Optional[int] = np.array([0.5_2_9_1_1, 0.6_0_0_0_4, 0.4_9_2_2_9, 0.4_9_8_0_5, 0.5_4_5_0_2, 0.5_0_6_8_0, 0.4_7_7_7_7, 0.4_1_0_2_8, 0.4_5_3_0_4] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1 def __a ( self ) -> Optional[int]: '''simple docstring''' snake_case__ : str = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider='CPUExecutionProvider' ) snake_case__ : Dict = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config ) pipe.set_progress_bar_config(disable=__UpperCamelCase ) snake_case__ : int = self.get_dummy_inputs() snake_case__ : List[str] = pipe(**__UpperCamelCase ).images snake_case__ : Dict = image[0, -3:, -3:, -1] assert image.shape == (1, 128, 128, 3) snake_case__ : Dict = np.array([0.6_5_3_3_1, 0.5_8_2_7_7, 0.4_8_2_0_4, 0.5_6_0_5_9, 0.5_3_6_6_5, 0.5_6_2_3_5, 0.5_0_9_6_9, 0.4_0_0_0_9, 0.4_6_5_5_2] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1 @nightly @require_onnxruntime @require_torch_gpu class __snake_case ( unittest.TestCase ): @property def __a ( self ) -> Optional[int]: '''simple docstring''' return ( "CUDAExecutionProvider", { "gpu_mem_limit": "15000000000", # 15GB "arena_extend_strategy": "kSameAsRequested", }, ) @property def __a ( self ) -> Union[str, Any]: '''simple docstring''' snake_case__ : List[str] = ort.SessionOptions() snake_case__ : Dict = False return options def __a ( self ) -> List[str]: '''simple docstring''' snake_case__ : List[Any] = load_image( 'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main' '/img2img/sketch-mountains-input.jpg' ) snake_case__ : List[str] = init_image.resize((768, 512) ) # using the PNDM scheduler by default snake_case__ : Union[str, Any] = OnnxStableDiffusionImgaImgPipeline.from_pretrained( 'CompVis/stable-diffusion-v1-4' , revision='onnx' , safety_checker=__UpperCamelCase , feature_extractor=__UpperCamelCase , provider=self.gpu_provider , sess_options=self.gpu_options , ) pipe.set_progress_bar_config(disable=__UpperCamelCase ) snake_case__ : int = 'A fantasy landscape, trending on artstation' snake_case__ : Optional[int] = np.random.RandomState(0 ) snake_case__ : Optional[Any] = pipe( prompt=__UpperCamelCase , image=__UpperCamelCase , strength=0.7_5 , guidance_scale=7.5 , num_inference_steps=10 , generator=__UpperCamelCase , output_type='np' , ) snake_case__ : Optional[int] = output.images snake_case__ : Union[str, Any] = images[0, 255:258, 383:386, -1] assert images.shape == (1, 512, 768, 3) snake_case__ : str = np.array([0.4_9_0_9, 0.5_0_5_9, 0.5_3_7_2, 0.4_6_2_3, 0.4_8_7_6, 0.5_0_4_9, 0.4_8_2_0, 0.4_9_5_6, 0.5_0_1_9] ) # TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues assert np.abs(image_slice.flatten() - expected_slice ).max() < 2E-2 def __a ( self ) -> List[Any]: '''simple docstring''' snake_case__ : Tuple = load_image( 'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main' '/img2img/sketch-mountains-input.jpg' ) snake_case__ : List[Any] = init_image.resize((768, 512) ) snake_case__ : List[str] = LMSDiscreteScheduler.from_pretrained( 'runwayml/stable-diffusion-v1-5' , subfolder='scheduler' , revision='onnx' ) snake_case__ : Dict = OnnxStableDiffusionImgaImgPipeline.from_pretrained( 'runwayml/stable-diffusion-v1-5' , revision='onnx' , scheduler=__UpperCamelCase , safety_checker=__UpperCamelCase , feature_extractor=__UpperCamelCase , provider=self.gpu_provider , sess_options=self.gpu_options , ) pipe.set_progress_bar_config(disable=__UpperCamelCase ) snake_case__ : Dict = 'A fantasy landscape, trending on artstation' snake_case__ : Optional[Any] = np.random.RandomState(0 ) snake_case__ : str = pipe( prompt=__UpperCamelCase , image=__UpperCamelCase , strength=0.7_5 , guidance_scale=7.5 , num_inference_steps=20 , generator=__UpperCamelCase , output_type='np' , ) snake_case__ : Tuple = output.images snake_case__ : str = images[0, 255:258, 383:386, -1] assert images.shape == (1, 512, 768, 3) snake_case__ : int = np.array([0.8_0_4_3, 0.9_2_6, 0.9_5_8_1, 0.8_1_1_9, 0.8_9_5_4, 0.9_1_3, 0.7_2_0_9, 0.7_4_6_3, 0.7_4_3_1] ) # TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues assert np.abs(image_slice.flatten() - expected_slice ).max() < 2E-2
699
from __future__ import annotations from collections.abc import Iterator from typing import Generic, TypeVar lowerCAmelCase__ : Optional[int] = TypeVar('''T''') class __snake_case ( Generic[T] ): def __init__( self , __UpperCamelCase ) -> Any: '''simple docstring''' snake_case__ : Optional[int] = data snake_case__ : Node[T] | None = None def __str__( self ) -> str: '''simple docstring''' return F"""{self.data}""" class __snake_case ( Generic[T] ): def __init__( self ) -> None: '''simple docstring''' snake_case__ : Node[T] | None = None def __iter__( self ) -> Iterator[T]: '''simple docstring''' snake_case__ : str = self.top while node: yield node.data snake_case__ : Dict = node.next def __str__( self ) -> str: '''simple docstring''' return "->".join([str(__UpperCamelCase ) for item in self] ) def __len__( self ) -> int: '''simple docstring''' return len(tuple(iter(self ) ) ) def __a ( self ) -> bool: '''simple docstring''' return self.top is None def __a ( self , __UpperCamelCase ) -> None: '''simple docstring''' snake_case__ : str = Node(__UpperCamelCase ) if not self.is_empty(): snake_case__ : List[str] = self.top snake_case__ : Tuple = node def __a ( self ) -> T: '''simple docstring''' if self.is_empty(): raise IndexError('pop from empty stack' ) assert isinstance(self.top , __UpperCamelCase ) snake_case__ : List[str] = self.top snake_case__ : Union[str, Any] = self.top.next return pop_node.data def __a ( self ) -> T: '''simple docstring''' if self.is_empty(): raise IndexError('peek from empty stack' ) assert self.top is not None return self.top.data def __a ( self ) -> None: '''simple docstring''' snake_case__ : Any = None if __name__ == "__main__": from doctest import testmod testmod()
699
1
from scipy.stats import pearsonr, spearmanr from sklearn.metrics import fa_score, matthews_corrcoef import datasets lowerCAmelCase__ : Optional[Any] = '''\ @inproceedings{wang2019glue, title={{GLUE}: A Multi-Task Benchmark and Analysis Platform for Natural Language Understanding}, author={Wang, Alex and Singh, Amanpreet and Michael, Julian and Hill, Felix and Levy, Omer and Bowman, Samuel R.}, note={In the Proceedings of ICLR.}, year={2019} } ''' lowerCAmelCase__ : Optional[Any] = '''\ GLUE, the General Language Understanding Evaluation benchmark (https://gluebenchmark.com/) is a collection of resources for training, evaluating, and analyzing natural language understanding systems. ''' lowerCAmelCase__ : Optional[int] = ''' Compute GLUE evaluation metric associated to each GLUE dataset. Args: predictions: list of predictions to score. Each translation should be tokenized into a list of tokens. references: list of lists of references for each translation. Each reference should be tokenized into a list of tokens. Returns: depending on the GLUE subset, one or several of: "accuracy": Accuracy "f1": F1 score "pearson": Pearson Correlation "spearmanr": Spearman Correlation "matthews_correlation": Matthew Correlation Examples: >>> glue_metric = datasets.load_metric(\'glue\', \'sst2\') # \'sst2\' or any of ["mnli", "mnli_mismatched", "mnli_matched", "qnli", "rte", "wnli", "hans"] >>> references = [0, 1] >>> predictions = [0, 1] >>> results = glue_metric.compute(predictions=predictions, references=references) >>> print(results) {\'accuracy\': 1.0} >>> glue_metric = datasets.load_metric(\'glue\', \'mrpc\') # \'mrpc\' or \'qqp\' >>> references = [0, 1] >>> predictions = [0, 1] >>> results = glue_metric.compute(predictions=predictions, references=references) >>> print(results) {\'accuracy\': 1.0, \'f1\': 1.0} >>> glue_metric = datasets.load_metric(\'glue\', \'stsb\') >>> references = [0., 1., 2., 3., 4., 5.] >>> predictions = [0., 1., 2., 3., 4., 5.] >>> results = glue_metric.compute(predictions=predictions, references=references) >>> print({"pearson": round(results["pearson"], 2), "spearmanr": round(results["spearmanr"], 2)}) {\'pearson\': 1.0, \'spearmanr\': 1.0} >>> glue_metric = datasets.load_metric(\'glue\', \'cola\') >>> references = [0, 1] >>> predictions = [0, 1] >>> results = glue_metric.compute(predictions=predictions, references=references) >>> print(results) {\'matthews_correlation\': 1.0} ''' def UpperCamelCase__ ( A__ , A__ ) -> List[str]: return float((preds == labels).mean() ) def UpperCamelCase__ ( A__ , A__ ) -> Union[str, Any]: snake_case__ : Any = simple_accuracy(A__ , A__ ) snake_case__ : int = float(fa_score(y_true=A__ , y_pred=A__ ) ) return { "accuracy": acc, "f1": fa, } def UpperCamelCase__ ( A__ , A__ ) -> Optional[int]: snake_case__ : List[str] = float(pearsonr(A__ , A__ )[0] ) snake_case__ : Tuple = float(spearmanr(A__ , A__ )[0] ) return { "pearson": pearson_corr, "spearmanr": spearman_corr, } @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION ,_KWARGS_DESCRIPTION ) class __snake_case ( datasets.Metric ): def __a ( self ) -> List[str]: '''simple docstring''' if self.config_name not in [ "sst2", "mnli", "mnli_mismatched", "mnli_matched", "cola", "stsb", "mrpc", "qqp", "qnli", "rte", "wnli", "hans", ]: raise KeyError( 'You should supply a configuration name selected in ' '["sst2", "mnli", "mnli_mismatched", "mnli_matched", ' '"cola", "stsb", "mrpc", "qqp", "qnli", "rte", "wnli", "hans"]' ) return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { 'predictions': datasets.Value('int64' if self.config_name != 'stsb' else 'float32' ), 'references': datasets.Value('int64' if self.config_name != 'stsb' else 'float32' ), } ) , codebase_urls=[] , reference_urls=[] , format='numpy' , ) def __a ( self , __UpperCamelCase , __UpperCamelCase ) -> str: '''simple docstring''' if self.config_name == "cola": return {"matthews_correlation": matthews_corrcoef(__UpperCamelCase , __UpperCamelCase )} elif self.config_name == "stsb": return pearson_and_spearman(__UpperCamelCase , __UpperCamelCase ) elif self.config_name in ["mrpc", "qqp"]: return acc_and_fa(__UpperCamelCase , __UpperCamelCase ) elif self.config_name in ["sst2", "mnli", "mnli_mismatched", "mnli_matched", "qnli", "rte", "wnli", "hans"]: return {"accuracy": simple_accuracy(__UpperCamelCase , __UpperCamelCase )} else: raise KeyError( 'You should supply a configuration name selected in ' '["sst2", "mnli", "mnli_mismatched", "mnli_matched", ' '"cola", "stsb", "mrpc", "qqp", "qnli", "rte", "wnli", "hans"]' )
699
from collections import OrderedDict from typing import Mapping from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging lowerCAmelCase__ : Dict = logging.get_logger(__name__) lowerCAmelCase__ : int = { '''sail/poolformer_s12''': '''https://huggingface.co/sail/poolformer_s12/resolve/main/config.json''', # See all PoolFormer models at https://huggingface.co/models?filter=poolformer } class __snake_case ( _lowerCamelCase ): __lowerCamelCase = """poolformer""" def __init__( self , __UpperCamelCase=3 , __UpperCamelCase=16 , __UpperCamelCase=16 , __UpperCamelCase=3 , __UpperCamelCase=4.0 , __UpperCamelCase=[2, 2, 6, 2] , __UpperCamelCase=[64, 128, 320, 512] , __UpperCamelCase=[7, 3, 3, 3] , __UpperCamelCase=[4, 2, 2, 2] , __UpperCamelCase=[2, 1, 1, 1] , __UpperCamelCase=4 , __UpperCamelCase=0.0 , __UpperCamelCase="gelu" , __UpperCamelCase=True , __UpperCamelCase=1E-5 , __UpperCamelCase=0.0_2 , **__UpperCamelCase , ) -> Any: '''simple docstring''' snake_case__ : List[str] = num_channels snake_case__ : Dict = patch_size snake_case__ : Optional[int] = stride snake_case__ : str = padding snake_case__ : List[str] = pool_size snake_case__ : List[Any] = hidden_sizes snake_case__ : List[Any] = mlp_ratio snake_case__ : Union[str, Any] = depths snake_case__ : Dict = patch_sizes snake_case__ : Dict = strides snake_case__ : Dict = num_encoder_blocks snake_case__ : Union[str, Any] = drop_path_rate snake_case__ : List[str] = hidden_act snake_case__ : Optional[Any] = use_layer_scale snake_case__ : int = layer_scale_init_value snake_case__ : Dict = initializer_range super().__init__(**__UpperCamelCase ) class __snake_case ( _lowerCamelCase ): __lowerCamelCase = version.parse("""1.11""" ) @property def __a ( self ) -> Mapping[str, Mapping[int, str]]: '''simple docstring''' return OrderedDict( [ ('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}), ] ) @property def __a ( self ) -> float: '''simple docstring''' return 2E-3
699
1
import argparse import collections import json from pathlib import Path import requests import torch import yaml from huggingface_hub import hf_hub_download from PIL import Image from transformers import ( MobileViTImageProcessor, MobileViTVaConfig, MobileViTVaForImageClassification, MobileViTVaForSemanticSegmentation, ) from transformers.utils import logging logging.set_verbosity_info() lowerCAmelCase__ : str = logging.get_logger(__name__) def UpperCamelCase__ ( A__ ) -> str: print('Loading config file...' ) def flatten_yaml_as_dict(A__ , A__="" , A__="." ): snake_case__ : List[Any] = [] for k, v in d.items(): snake_case__ : Optional[Any] = parent_key + sep + k if parent_key else k if isinstance(A__ , collections.abc.MutableMapping ): items.extend(flatten_yaml_as_dict(A__ , A__ , sep=A__ ).items() ) else: items.append((new_key, v) ) return dict(A__ ) snake_case__ : int = argparse.Namespace() with open(A__ , 'r' ) as yaml_file: try: snake_case__ : int = yaml.load(A__ , Loader=yaml.FullLoader ) snake_case__ : Tuple = flatten_yaml_as_dict(A__ ) for k, v in flat_cfg.items(): setattr(A__ , A__ , A__ ) except yaml.YAMLError as exc: logger.error('Error while loading config file: {}. Error message: {}'.format(A__ , str(A__ ) ) ) return config def UpperCamelCase__ ( A__ , A__ ) -> int: snake_case__ : Tuple = MobileViTVaConfig() snake_case__ : List[str] = False # dataset if task_name.startswith('imagenet1k_' ): snake_case__ : Union[str, Any] = 1000 if int(task_name.strip().split('_' )[-1] ) == 384: snake_case__ : List[str] = 384 else: snake_case__ : str = 256 snake_case__ : int = 'imagenet-1k-id2label.json' elif task_name.startswith('imagenet21k_to_1k_' ): snake_case__ : Any = 2_1000 if int(task_name.strip().split('_' )[-1] ) == 384: snake_case__ : Union[str, Any] = 384 else: snake_case__ : List[Any] = 256 snake_case__ : Optional[int] = 'imagenet-22k-id2label.json' elif task_name.startswith('ade20k_' ): snake_case__ : Dict = 151 snake_case__ : Any = 512 snake_case__ : Tuple = 'ade20k-id2label.json' snake_case__ : Union[str, Any] = True elif task_name.startswith('voc_' ): snake_case__ : int = 21 snake_case__ : Optional[Any] = 512 snake_case__ : List[Any] = 'pascal-voc-id2label.json' snake_case__ : List[Any] = True # orig_config snake_case__ : int = load_orig_config_file(A__ ) assert getattr(A__ , 'model.classification.name' , -1 ) == "mobilevit_v2", "Invalid model" snake_case__ : Union[str, Any] = getattr(A__ , 'model.classification.mitv2.width_multiplier' , 1.0 ) assert ( getattr(A__ , 'model.classification.mitv2.attn_norm_layer' , -1 ) == "layer_norm_2d" ), "Norm layers other than layer_norm_2d is not supported" snake_case__ : Tuple = getattr(A__ , 'model.classification.activation.name' , 'swish' ) # config.image_size == getattr(orig_config, 'sampler.bs.crop_size_width', 256) if is_segmentation_model: snake_case__ : Optional[Any] = getattr(A__ , 'model.segmentation.output_stride' , 16 ) if "_deeplabv3" in task_name: snake_case__ : Tuple = getattr(A__ , 'model.segmentation.deeplabv3.aspp_rates' , [12, 24, 36] ) snake_case__ : Tuple = getattr(A__ , 'model.segmentation.deeplabv3.aspp_out_channels' , 512 ) snake_case__ : str = getattr(A__ , 'model.segmentation.deeplabv3.aspp_dropout' , 0.1 ) # id2label snake_case__ : Dict = 'huggingface/label-files' snake_case__ : str = json.load(open(hf_hub_download(A__ , A__ , repo_type='dataset' ) , 'r' ) ) snake_case__ : List[Any] = {int(A__ ): v for k, v in idalabel.items()} snake_case__ : Any = idalabel snake_case__ : Dict = {v: k for k, v in idalabel.items()} return config def UpperCamelCase__ ( A__ , A__ , A__ ) -> List[Any]: snake_case__ : Tuple = dct.pop(A__ ) snake_case__ : Optional[Any] = val def UpperCamelCase__ ( A__ , A__=False ) -> Optional[int]: if base_model: snake_case__ : Union[str, Any] = '' else: snake_case__ : Dict = 'mobilevitv2.' snake_case__ : Union[str, Any] = [] for k in state_dict.keys(): if k[:8] == "encoder.": snake_case__ : int = k[8:] else: snake_case__ : Tuple = k if ".block." in k: snake_case__ : List[str] = k_new.replace('.block.' , '.' ) if ".conv." in k: snake_case__ : List[Any] = k_new.replace('.conv.' , '.convolution.' ) if ".norm." in k: snake_case__ : Any = k_new.replace('.norm.' , '.normalization.' ) if "conv_1." in k: snake_case__ : List[str] = k_new.replace('conv_1.' , F"""{model_prefix}conv_stem.""" ) for i in [1, 2]: if F"""layer_{i}.""" in k: snake_case__ : Dict = k_new.replace(F"""layer_{i}.""" , F"""{model_prefix}encoder.layer.{i-1}.layer.""" ) if ".exp_1x1." in k: snake_case__ : int = k_new.replace('.exp_1x1.' , '.expand_1x1.' ) if ".red_1x1." in k: snake_case__ : Dict = k_new.replace('.red_1x1.' , '.reduce_1x1.' ) for i in [3, 4, 5]: if F"""layer_{i}.0.""" in k: snake_case__ : Union[str, Any] = k_new.replace(F"""layer_{i}.0.""" , F"""{model_prefix}encoder.layer.{i-1}.downsampling_layer.""" ) if F"""layer_{i}.1.local_rep.0.""" in k: snake_case__ : Optional[int] = k_new.replace(F"""layer_{i}.1.local_rep.0.""" , F"""{model_prefix}encoder.layer.{i-1}.conv_kxk.""" ) if F"""layer_{i}.1.local_rep.1.""" in k: snake_case__ : Union[str, Any] = k_new.replace(F"""layer_{i}.1.local_rep.1.""" , F"""{model_prefix}encoder.layer.{i-1}.conv_1x1.""" ) for i in [3, 4, 5]: if i == 3: snake_case__ : Optional[int] = [0, 1] elif i == 4: snake_case__ : List[Any] = [0, 1, 2, 3] elif i == 5: snake_case__ : Tuple = [0, 1, 2] for j in j_in: if F"""layer_{i}.1.global_rep.{j}.""" in k: snake_case__ : Optional[int] = k_new.replace( F"""layer_{i}.1.global_rep.{j}.""" , F"""{model_prefix}encoder.layer.{i-1}.transformer.layer.{j}.""" ) if F"""layer_{i}.1.global_rep.{j+1}.""" in k: snake_case__ : List[str] = k_new.replace( F"""layer_{i}.1.global_rep.{j+1}.""" , F"""{model_prefix}encoder.layer.{i-1}.layernorm.""" ) if F"""layer_{i}.1.conv_proj.""" in k: snake_case__ : Optional[Any] = k_new.replace(F"""layer_{i}.1.conv_proj.""" , F"""{model_prefix}encoder.layer.{i-1}.conv_projection.""" ) if "pre_norm_attn.0." in k: snake_case__ : Optional[int] = k_new.replace('pre_norm_attn.0.' , 'layernorm_before.' ) if "pre_norm_attn.1." in k: snake_case__ : Optional[Any] = k_new.replace('pre_norm_attn.1.' , 'attention.' ) if "pre_norm_ffn.0." in k: snake_case__ : int = k_new.replace('pre_norm_ffn.0.' , 'layernorm_after.' ) if "pre_norm_ffn.1." in k: snake_case__ : Optional[int] = k_new.replace('pre_norm_ffn.1.' , 'ffn.conv1.' ) if "pre_norm_ffn.3." in k: snake_case__ : Union[str, Any] = k_new.replace('pre_norm_ffn.3.' , 'ffn.conv2.' ) if "classifier.1." in k: snake_case__ : Tuple = k_new.replace('classifier.1.' , 'classifier.' ) if "seg_head." in k: snake_case__ : Optional[Any] = k_new.replace('seg_head.' , 'segmentation_head.' ) if ".aspp_layer." in k: snake_case__ : Optional[int] = k_new.replace('.aspp_layer.' , '.' ) if ".aspp_pool." in k: snake_case__ : Dict = k_new.replace('.aspp_pool.' , '.' ) rename_keys.append((k, k_new) ) return rename_keys def UpperCamelCase__ ( A__ ) -> Dict: snake_case__ : Tuple = [] for k in state_dict.keys(): if k.startswith('seg_head.aux_head.' ): keys_to_ignore.append(A__ ) for k in keys_to_ignore: state_dict.pop(A__ , A__ ) def UpperCamelCase__ ( ) -> Dict: snake_case__ : str = 'http://images.cocodataset.org/val2017/000000039769.jpg' # url = "https://cdn.britannica.com/86/141086-050-9D7C75EE/Gulfstream-G450-business-jet-passengers.jpg" snake_case__ : Union[str, Any] = Image.open(requests.get(A__ , stream=A__ ).raw ) return im @torch.no_grad() def UpperCamelCase__ ( A__ , A__ , A__ , A__ ) -> Dict: snake_case__ : Optional[Any] = get_mobilevitva_config(A__ , A__ ) # load original state_dict snake_case__ : Tuple = torch.load(A__ , map_location='cpu' ) # load huggingface model if task_name.startswith('ade20k_' ) or task_name.startswith('voc_' ): snake_case__ : Optional[Any] = MobileViTVaForSemanticSegmentation(A__ ).eval() snake_case__ : str = False else: snake_case__ : Union[str, Any] = MobileViTVaForImageClassification(A__ ).eval() snake_case__ : Tuple = False # remove and rename some keys of load the original model snake_case__ : Union[str, Any] = checkpoint remove_unused_keys(A__ ) snake_case__ : Union[str, Any] = create_rename_keys(A__ , base_model=A__ ) for rename_key_src, rename_key_dest in rename_keys: rename_key(A__ , A__ , A__ ) # load modified state_dict model.load_state_dict(A__ ) # Check outputs on an image, prepared by MobileViTImageProcessor snake_case__ : List[str] = MobileViTImageProcessor(crop_size=config.image_size , size=config.image_size + 32 ) snake_case__ : str = image_processor(images=prepare_img() , return_tensors='pt' ) snake_case__ : int = model(**A__ ) # verify classification model if task_name.startswith('imagenet' ): snake_case__ : List[str] = outputs.logits snake_case__ : str = logits.argmax(-1 ).item() print('Predicted class:' , model.config.idalabel[predicted_class_idx] ) if task_name.startswith('imagenet1k_256' ) and config.width_multiplier == 1.0: # expected_logits for base variant snake_case__ : int = torch.tensor([-1.6_3_3_6e0_0, -7.3_2_0_4e-0_2, -5.1_8_8_3e-0_1] ) assert torch.allclose(logits[0, :3] , A__ , atol=1e-4 ) Path(A__ ).mkdir(exist_ok=A__ ) print(F"""Saving model {task_name} to {pytorch_dump_folder_path}""" ) model.save_pretrained(A__ ) print(F"""Saving image processor to {pytorch_dump_folder_path}""" ) image_processor.save_pretrained(A__ ) if __name__ == "__main__": lowerCAmelCase__ : List[Any] = argparse.ArgumentParser() # Required parameters parser.add_argument( '''--task''', default='''imagenet1k_256''', type=str, help=( '''Name of the task for which the MobileViTV2 model you\'d like to convert is trained on . ''' ''' Classification (ImageNet-1k) - MobileViTV2 (256x256) : imagenet1k_256 - MobileViTV2 (Trained on 256x256 and Finetuned on 384x384) : imagenet1k_384 - MobileViTV2 (Trained on ImageNet-21k and Finetuned on ImageNet-1k 256x256) : imagenet21k_to_1k_256 - MobileViTV2 (Trained on ImageNet-21k, Finetuned on ImageNet-1k 256x256, and Finetuned on ImageNet-1k 384x384) : imagenet21k_to_1k_384 Segmentation - ADE20K Dataset : ade20k_deeplabv3 - Pascal VOC 2012 Dataset: voc_deeplabv3 ''' ), choices=[ '''imagenet1k_256''', '''imagenet1k_384''', '''imagenet21k_to_1k_256''', '''imagenet21k_to_1k_384''', '''ade20k_deeplabv3''', '''voc_deeplabv3''', ], ) parser.add_argument( '''--orig_checkpoint_path''', required=True, type=str, help='''Path to the original state dict (.pt file).''' ) parser.add_argument('''--orig_config_path''', required=True, type=str, help='''Path to the original config file.''') parser.add_argument( '''--pytorch_dump_folder_path''', required=True, type=str, help='''Path to the output PyTorch model directory.''' ) lowerCAmelCase__ : List[Any] = parser.parse_args() convert_mobilevitva_checkpoint( args.task, args.orig_checkpoint_path, args.orig_config_path, args.pytorch_dump_folder_path )
699
import numpy as np import qiskit def UpperCamelCase__ ( A__ = 8 , A__ = None ) -> str: snake_case__ : Optional[int] = np.random.default_rng(seed=A__ ) # Roughly 25% of the qubits will contribute to the key. # So we take more than we need. snake_case__ : Tuple = 6 * key_len # Measurement basis for Alice's qubits. snake_case__ : Tuple = rng.integers(2 , size=A__ ) # The set of states Alice will prepare. snake_case__ : List[str] = rng.integers(2 , size=A__ ) # Measurement basis for Bob's qubits. snake_case__ : List[Any] = rng.integers(2 , size=A__ ) # Quantum Circuit to simulate BB84 snake_case__ : Any = qiskit.QuantumCircuit(A__ , name='BB84' ) # Alice prepares her qubits according to rules above. for index, _ in enumerate(A__ ): if alice_state[index] == 1: bbaa_circ.x(A__ ) if alice_basis[index] == 1: bbaa_circ.h(A__ ) bbaa_circ.barrier() # Bob measures the received qubits according to rules above. for index, _ in enumerate(A__ ): if bob_basis[index] == 1: bbaa_circ.h(A__ ) bbaa_circ.barrier() bbaa_circ.measure_all() # Simulate the quantum circuit. snake_case__ : List[str] = qiskit.Aer.get_backend('aer_simulator' ) # We only need to run one shot because the key is unique. # Multiple shots will produce the same key. snake_case__ : Optional[Any] = qiskit.execute(A__ , A__ , shots=1 , seed_simulator=A__ ) # Returns the result of measurement. snake_case__ : Union[str, Any] = job.result().get_counts(A__ ).most_frequent() # Extracting the generated key from the simulation results. # Only keep measurement results where Alice and Bob chose the same basis. snake_case__ : Optional[Any] = ''.join( [ result_bit for alice_basis_bit, bob_basis_bit, result_bit in zip( A__ , A__ , A__ ) if alice_basis_bit == bob_basis_bit ] ) # Get final key. Pad with 0 if too short, otherwise truncate. snake_case__ : Tuple = gen_key[:key_len] if len(A__ ) >= key_len else gen_key.ljust(A__ , '0' ) return key if __name__ == "__main__": print(F'''The generated key is : {bbaa(8, seed=0)}''') from doctest import testmod testmod()
699
1
import random from .binary_exp_mod import bin_exp_mod def UpperCamelCase__ ( A__ , A__=1000 ) -> int: if n < 2: return False if n % 2 == 0: return n == 2 # this means n is odd snake_case__ : Optional[int] = n - 1 snake_case__ : Union[str, Any] = 0 while d % 2 == 0: d /= 2 exp += 1 # n - 1=d*(2**exp) snake_case__ : Union[str, Any] = 0 while count < prec: snake_case__ : Union[str, Any] = random.randint(2 , n - 1 ) snake_case__ : List[Any] = bin_exp_mod(A__ , A__ , A__ ) if b != 1: snake_case__ : Any = True for _ in range(A__ ): if b == n - 1: snake_case__ : Dict = False break snake_case__ : int = b * b b %= n if flag: return False count += 1 return True if __name__ == "__main__": lowerCAmelCase__ : Tuple = abs(int(input('''Enter bound : ''').strip())) print('''Here\'s the list of primes:''') print(''', '''.join(str(i) for i in range(n + 1) if is_prime_big(i)))
699
def UpperCamelCase__ ( A__ , A__ , A__ ) -> int: if exponent == 1: return base if exponent % 2 == 0: snake_case__ : Dict = _modexpt(A__ , exponent // 2 , A__ ) % modulo_value return (x * x) % modulo_value else: return (base * _modexpt(A__ , exponent - 1 , A__ )) % modulo_value def UpperCamelCase__ ( A__ = 1777 , A__ = 1855 , A__ = 8 ) -> int: snake_case__ : Tuple = base for _ in range(1 , A__ ): snake_case__ : Any = _modexpt(A__ , A__ , 10**digits ) return result if __name__ == "__main__": print(F'''{solution() = }''')
699
1
from typing import Dict, List, Optional, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import ( center_crop, convert_to_rgb, get_resize_output_image_size, normalize, rescale, resize, to_channel_dimension_format, ) from ...image_utils import ( OPENAI_CLIP_MEAN, OPENAI_CLIP_STD, ChannelDimension, ImageInput, PILImageResampling, make_list_of_images, to_numpy_array, valid_images, ) from ...utils import TensorType, is_vision_available, logging lowerCAmelCase__ : List[str] = logging.get_logger(__name__) if is_vision_available(): import PIL class __snake_case ( _lowerCamelCase ): __lowerCamelCase = ["""pixel_values"""] def __init__( self , __UpperCamelCase = True , __UpperCamelCase = None , __UpperCamelCase = PILImageResampling.BICUBIC , __UpperCamelCase = True , __UpperCamelCase = None , __UpperCamelCase = True , __UpperCamelCase = 1 / 255 , __UpperCamelCase = True , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = True , **__UpperCamelCase , ) -> None: '''simple docstring''' super().__init__(**__UpperCamelCase ) snake_case__ : Tuple = size if size is not None else {'shortest_edge': 224} snake_case__ : Any = get_size_dict(__UpperCamelCase , default_to_square=__UpperCamelCase ) snake_case__ : Optional[int] = crop_size if crop_size is not None else {'height': 224, 'width': 224} snake_case__ : List[Any] = get_size_dict(__UpperCamelCase , default_to_square=__UpperCamelCase , param_name='crop_size' ) snake_case__ : Dict = do_resize snake_case__ : Any = size snake_case__ : str = resample snake_case__ : Any = do_center_crop snake_case__ : Tuple = crop_size snake_case__ : Optional[int] = do_rescale snake_case__ : str = rescale_factor snake_case__ : List[str] = do_normalize snake_case__ : Optional[Any] = image_mean if image_mean is not None else OPENAI_CLIP_MEAN snake_case__ : List[Any] = image_std if image_std is not None else OPENAI_CLIP_STD snake_case__ : List[Any] = do_convert_rgb def __a ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = PILImageResampling.BICUBIC , __UpperCamelCase = None , **__UpperCamelCase , ) -> np.ndarray: '''simple docstring''' snake_case__ : int = get_size_dict(__UpperCamelCase , default_to_square=__UpperCamelCase ) if "shortest_edge" not in size: raise ValueError(F"""The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}""" ) snake_case__ : Any = get_resize_output_image_size(__UpperCamelCase , size=size['shortest_edge'] , default_to_square=__UpperCamelCase ) return resize(__UpperCamelCase , size=__UpperCamelCase , resample=__UpperCamelCase , data_format=__UpperCamelCase , **__UpperCamelCase ) def __a ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = None , **__UpperCamelCase , ) -> np.ndarray: '''simple docstring''' snake_case__ : Tuple = get_size_dict(__UpperCamelCase ) if "height" not in size or "width" not in size: raise ValueError(F"""The `size` parameter must contain the keys (height, width). Got {size.keys()}""" ) return center_crop(__UpperCamelCase , size=(size['height'], size['width']) , data_format=__UpperCamelCase , **__UpperCamelCase ) def __a ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = None , **__UpperCamelCase , ) -> Optional[Any]: '''simple docstring''' return rescale(__UpperCamelCase , scale=__UpperCamelCase , data_format=__UpperCamelCase , **__UpperCamelCase ) def __a ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = None , **__UpperCamelCase , ) -> np.ndarray: '''simple docstring''' return normalize(__UpperCamelCase , mean=__UpperCamelCase , std=__UpperCamelCase , data_format=__UpperCamelCase , **__UpperCamelCase ) def __a ( self , __UpperCamelCase , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = ChannelDimension.FIRST , **__UpperCamelCase , ) -> PIL.Image.Image: '''simple docstring''' snake_case__ : Any = do_resize if do_resize is not None else self.do_resize snake_case__ : Optional[int] = size if size is not None else self.size snake_case__ : Any = get_size_dict(__UpperCamelCase , param_name='size' , default_to_square=__UpperCamelCase ) snake_case__ : int = resample if resample is not None else self.resample snake_case__ : Tuple = do_center_crop if do_center_crop is not None else self.do_center_crop snake_case__ : str = crop_size if crop_size is not None else self.crop_size snake_case__ : Optional[Any] = get_size_dict(__UpperCamelCase , param_name='crop_size' , default_to_square=__UpperCamelCase ) snake_case__ : Dict = do_rescale if do_rescale is not None else self.do_rescale snake_case__ : int = rescale_factor if rescale_factor is not None else self.rescale_factor snake_case__ : List[str] = do_normalize if do_normalize is not None else self.do_normalize snake_case__ : List[str] = image_mean if image_mean is not None else self.image_mean snake_case__ : Dict = image_std if image_std is not None else self.image_std snake_case__ : List[Any] = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb snake_case__ : str = make_list_of_images(__UpperCamelCase ) if not valid_images(__UpperCamelCase ): raise ValueError( 'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, ' 'torch.Tensor, tf.Tensor or jax.ndarray.' ) if do_resize and size is None: raise ValueError('Size must be specified if do_resize is True.' ) if do_center_crop and crop_size is None: raise ValueError('Crop size must be specified if do_center_crop is True.' ) if do_rescale and rescale_factor is None: raise ValueError('Rescale factor must be specified if do_rescale is True.' ) if do_normalize and (image_mean is None or image_std is None): raise ValueError('Image mean and std must be specified if do_normalize is True.' ) # PIL RGBA images are converted to RGB if do_convert_rgb: snake_case__ : List[str] = [convert_to_rgb(__UpperCamelCase ) for image in images] # All transformations expect numpy arrays. snake_case__ : Dict = [to_numpy_array(__UpperCamelCase ) for image in images] if do_resize: snake_case__ : str = [self.resize(image=__UpperCamelCase , size=__UpperCamelCase , resample=__UpperCamelCase ) for image in images] if do_center_crop: snake_case__ : str = [self.center_crop(image=__UpperCamelCase , size=__UpperCamelCase ) for image in images] if do_rescale: snake_case__ : int = [self.rescale(image=__UpperCamelCase , scale=__UpperCamelCase ) for image in images] if do_normalize: snake_case__ : Tuple = [self.normalize(image=__UpperCamelCase , mean=__UpperCamelCase , std=__UpperCamelCase ) for image in images] snake_case__ : List[str] = [to_channel_dimension_format(__UpperCamelCase , __UpperCamelCase ) for image in images] snake_case__ : Union[str, Any] = {'pixel_values': images} return BatchFeature(data=__UpperCamelCase , tensor_type=__UpperCamelCase )
699
# tests directory-specific settings - this file is run automatically # by pytest before any tests are run import sys import warnings from os.path import abspath, dirname, join # allow having multiple repository checkouts and not needing to remember to rerun # 'pip install -e .[dev]' when switching between checkouts and running tests. lowerCAmelCase__ : Tuple = abspath(join(dirname(dirname(__file__)), '''src''')) sys.path.insert(1, git_repo_path) # silence FutureWarning warnings in tests since often we can't act on them until # they become normal warnings - i.e. the tests still need to test the current functionality warnings.simplefilter(action='''ignore''', category=FutureWarning) def UpperCamelCase__ ( A__ ) -> Optional[Any]: from diffusers.utils.testing_utils import pytest_addoption_shared pytest_addoption_shared(A__ ) def UpperCamelCase__ ( A__ ) -> Optional[Any]: from diffusers.utils.testing_utils import pytest_terminal_summary_main snake_case__ : Union[str, Any] = terminalreporter.config.getoption('--make-reports' ) if make_reports: pytest_terminal_summary_main(A__ , id=A__ )
699
1
import collections from typing import List, Optional, Union from ...tokenization_utils_base import BatchEncoding from ...utils import TensorType, add_end_docstrings, add_start_docstrings, logging from ..bert.tokenization_bert import BertTokenizer lowerCAmelCase__ : int = logging.get_logger(__name__) lowerCAmelCase__ : Optional[int] = {'''vocab_file''': '''vocab.txt''', '''tokenizer_file''': '''tokenizer.json'''} lowerCAmelCase__ : Optional[int] = { '''vocab_file''': { '''facebook/dpr-ctx_encoder-single-nq-base''': ( '''https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/vocab.txt''' ), '''facebook/dpr-ctx_encoder-multiset-base''': ( '''https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/vocab.txt''' ), }, '''tokenizer_file''': { '''facebook/dpr-ctx_encoder-single-nq-base''': ( '''https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/tokenizer.json''' ), '''facebook/dpr-ctx_encoder-multiset-base''': ( '''https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/tokenizer.json''' ), }, } lowerCAmelCase__ : str = { '''vocab_file''': { '''facebook/dpr-question_encoder-single-nq-base''': ( '''https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/vocab.txt''' ), '''facebook/dpr-question_encoder-multiset-base''': ( '''https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/vocab.txt''' ), }, '''tokenizer_file''': { '''facebook/dpr-question_encoder-single-nq-base''': ( '''https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/tokenizer.json''' ), '''facebook/dpr-question_encoder-multiset-base''': ( '''https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/tokenizer.json''' ), }, } lowerCAmelCase__ : Optional[Any] = { '''vocab_file''': { '''facebook/dpr-reader-single-nq-base''': ( '''https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/vocab.txt''' ), '''facebook/dpr-reader-multiset-base''': ( '''https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/vocab.txt''' ), }, '''tokenizer_file''': { '''facebook/dpr-reader-single-nq-base''': ( '''https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/tokenizer.json''' ), '''facebook/dpr-reader-multiset-base''': ( '''https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/tokenizer.json''' ), }, } lowerCAmelCase__ : Any = { '''facebook/dpr-ctx_encoder-single-nq-base''': 5_12, '''facebook/dpr-ctx_encoder-multiset-base''': 5_12, } lowerCAmelCase__ : int = { '''facebook/dpr-question_encoder-single-nq-base''': 5_12, '''facebook/dpr-question_encoder-multiset-base''': 5_12, } lowerCAmelCase__ : List[str] = { '''facebook/dpr-reader-single-nq-base''': 5_12, '''facebook/dpr-reader-multiset-base''': 5_12, } lowerCAmelCase__ : Tuple = { '''facebook/dpr-ctx_encoder-single-nq-base''': {'''do_lower_case''': True}, '''facebook/dpr-ctx_encoder-multiset-base''': {'''do_lower_case''': True}, } lowerCAmelCase__ : Optional[Any] = { '''facebook/dpr-question_encoder-single-nq-base''': {'''do_lower_case''': True}, '''facebook/dpr-question_encoder-multiset-base''': {'''do_lower_case''': True}, } lowerCAmelCase__ : Any = { '''facebook/dpr-reader-single-nq-base''': {'''do_lower_case''': True}, '''facebook/dpr-reader-multiset-base''': {'''do_lower_case''': True}, } class __snake_case ( _lowerCamelCase ): __lowerCamelCase = VOCAB_FILES_NAMES __lowerCamelCase = CONTEXT_ENCODER_PRETRAINED_VOCAB_FILES_MAP __lowerCamelCase = CONTEXT_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES __lowerCamelCase = CONTEXT_ENCODER_PRETRAINED_INIT_CONFIGURATION class __snake_case ( _lowerCamelCase ): __lowerCamelCase = VOCAB_FILES_NAMES __lowerCamelCase = QUESTION_ENCODER_PRETRAINED_VOCAB_FILES_MAP __lowerCamelCase = QUESTION_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES __lowerCamelCase = QUESTION_ENCODER_PRETRAINED_INIT_CONFIGURATION lowerCAmelCase__ : Any = collections.namedtuple( '''DPRSpanPrediction''', ['''span_score''', '''relevance_score''', '''doc_id''', '''start_index''', '''end_index''', '''text'''] ) lowerCAmelCase__ : Optional[int] = collections.namedtuple('''DPRReaderOutput''', ['''start_logits''', '''end_logits''', '''relevance_logits''']) lowerCAmelCase__ : List[str] = r''' Return a dictionary with the token ids of the input strings and other information to give to `.decode_best_spans`. It converts the strings of a question and different passages (title and text) in a sequence of IDs (integers), using the tokenizer and vocabulary. The resulting `input_ids` is a matrix of size `(n_passages, sequence_length)` with the format: ``` [CLS] <question token ids> [SEP] <titles ids> [SEP] <texts ids> ``` Args: questions (`str` or `List[str]`): The questions to be encoded. You can specify one question for many passages. In this case, the question will be duplicated like `[questions] * n_passages`. Otherwise you have to specify as many questions as in `titles` or `texts`. titles (`str` or `List[str]`): The passages titles to be encoded. This can be a string or a list of strings if there are several passages. texts (`str` or `List[str]`): The passages texts to be encoded. This can be a string or a list of strings if there are several passages. padding (`bool`, `str` or [`~utils.PaddingStrategy`], *optional*, defaults to `False`): Activates and controls padding. Accepts the following values: - `True` or `\'longest\'`: Pad to the longest sequence in the batch (or no padding if only a single sequence if provided). - `\'max_length\'`: Pad to a maximum length specified with the argument `max_length` or to the maximum acceptable input length for the model if that argument is not provided. - `False` or `\'do_not_pad\'` (default): No padding (i.e., can output a batch with sequences of different lengths). truncation (`bool`, `str` or [`~tokenization_utils_base.TruncationStrategy`], *optional*, defaults to `False`): Activates and controls truncation. Accepts the following values: - `True` or `\'longest_first\'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum acceptable input length for the model if that argument is not provided. This will truncate token by token, removing a token from the longest sequence in the pair if a pair of sequences (or a batch of pairs) is provided. - `\'only_first\'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum acceptable input length for the model if that argument is not provided. This will only truncate the first sequence of a pair if a pair of sequences (or a batch of pairs) is provided. - `\'only_second\'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum acceptable input length for the model if that argument is not provided. This will only truncate the second sequence of a pair if a pair of sequences (or a batch of pairs) is provided. - `False` or `\'do_not_truncate\'` (default): No truncation (i.e., can output batch with sequence lengths greater than the model maximum admissible input size). max_length (`int`, *optional*): Controls the maximum length to use by one of the truncation/padding parameters. If left unset or set to `None`, this will use the predefined model maximum length if a maximum length is required by one of the truncation/padding parameters. If the model has no specific maximum input length (like XLNet) truncation/padding to a maximum length will be deactivated. return_tensors (`str` or [`~utils.TensorType`], *optional*): If set, will return tensors instead of list of python integers. Acceptable values are: - `\'tf\'`: Return TensorFlow `tf.constant` objects. - `\'pt\'`: Return PyTorch `torch.Tensor` objects. - `\'np\'`: Return Numpy `np.ndarray` objects. return_attention_mask (`bool`, *optional*): Whether or not to return the attention mask. If not set, will return the attention mask according to the specific tokenizer\'s default, defined by the `return_outputs` attribute. [What are attention masks?](../glossary#attention-mask) Returns: `Dict[str, List[List[int]]]`: A dictionary with the following keys: - `input_ids`: List of token ids to be fed to a model. - `attention_mask`: List of indices specifying which tokens should be attended to by the model. ''' @add_start_docstrings(_lowerCamelCase ) class __snake_case : def __call__( self , __UpperCamelCase , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = False , __UpperCamelCase = False , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = None , **__UpperCamelCase , ) -> BatchEncoding: '''simple docstring''' if titles is None and texts is None: return super().__call__( __UpperCamelCase , padding=__UpperCamelCase , truncation=__UpperCamelCase , max_length=__UpperCamelCase , return_tensors=__UpperCamelCase , return_attention_mask=__UpperCamelCase , **__UpperCamelCase , ) elif titles is None or texts is None: snake_case__ : List[str] = titles if texts is None else texts return super().__call__( __UpperCamelCase , __UpperCamelCase , padding=__UpperCamelCase , truncation=__UpperCamelCase , max_length=__UpperCamelCase , return_tensors=__UpperCamelCase , return_attention_mask=__UpperCamelCase , **__UpperCamelCase , ) snake_case__ : Union[str, Any] = titles if not isinstance(__UpperCamelCase , __UpperCamelCase ) else [titles] snake_case__ : int = texts if not isinstance(__UpperCamelCase , __UpperCamelCase ) else [texts] snake_case__ : str = len(__UpperCamelCase ) snake_case__ : Dict = questions if not isinstance(__UpperCamelCase , __UpperCamelCase ) else [questions] * n_passages if len(__UpperCamelCase ) != len(__UpperCamelCase ): raise ValueError( F"""There should be as many titles than texts but got {len(__UpperCamelCase )} titles and {len(__UpperCamelCase )} texts.""" ) snake_case__ : Optional[Any] = super().__call__(__UpperCamelCase , __UpperCamelCase , padding=__UpperCamelCase , truncation=__UpperCamelCase )['input_ids'] snake_case__ : Tuple = super().__call__(__UpperCamelCase , add_special_tokens=__UpperCamelCase , padding=__UpperCamelCase , truncation=__UpperCamelCase )['input_ids'] snake_case__ : Any = { 'input_ids': [ (encoded_question_and_title + encoded_text)[:max_length] if max_length is not None and truncation else encoded_question_and_title + encoded_text for encoded_question_and_title, encoded_text in zip(__UpperCamelCase , __UpperCamelCase ) ] } if return_attention_mask is not False: snake_case__ : Optional[Any] = [] for input_ids in encoded_inputs["input_ids"]: attention_mask.append([int(input_id != self.pad_token_id ) for input_id in input_ids] ) snake_case__ : List[Any] = attention_mask return self.pad(__UpperCamelCase , padding=__UpperCamelCase , max_length=__UpperCamelCase , return_tensors=__UpperCamelCase ) def __a ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = 16 , __UpperCamelCase = 64 , __UpperCamelCase = 4 , ) -> List[DPRSpanPrediction]: '''simple docstring''' snake_case__ : List[Any] = reader_input['input_ids'] snake_case__ , snake_case__ , snake_case__ : Optional[int] = reader_output[:3] snake_case__ : Tuple = len(__UpperCamelCase ) snake_case__ : List[Any] = sorted(range(__UpperCamelCase ) , reverse=__UpperCamelCase , key=relevance_logits.__getitem__ ) snake_case__ : List[DPRReaderOutput] = [] for doc_id in sorted_docs: snake_case__ : Optional[int] = list(input_ids[doc_id] ) # assuming question & title information is at the beginning of the sequence snake_case__ : str = sequence_ids.index(self.sep_token_id , 2 ) + 1 # second sep id if sequence_ids[-1] == self.pad_token_id: snake_case__ : Optional[Any] = sequence_ids.index(self.pad_token_id ) else: snake_case__ : Optional[Any] = len(__UpperCamelCase ) snake_case__ : Any = self._get_best_spans( start_logits=start_logits[doc_id][passage_offset:sequence_len] , end_logits=end_logits[doc_id][passage_offset:sequence_len] , max_answer_length=__UpperCamelCase , top_spans=__UpperCamelCase , ) for start_index, end_index in best_spans: start_index += passage_offset end_index += passage_offset nbest_spans_predictions.append( DPRSpanPrediction( span_score=start_logits[doc_id][start_index] + end_logits[doc_id][end_index] , relevance_score=relevance_logits[doc_id] , doc_id=__UpperCamelCase , start_index=__UpperCamelCase , end_index=__UpperCamelCase , text=self.decode(sequence_ids[start_index : end_index + 1] ) , ) ) if len(__UpperCamelCase ) >= num_spans: break return nbest_spans_predictions[:num_spans] def __a ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , ) -> List[DPRSpanPrediction]: '''simple docstring''' snake_case__ : Optional[int] = [] for start_index, start_score in enumerate(__UpperCamelCase ): for answer_length, end_score in enumerate(end_logits[start_index : start_index + max_answer_length] ): scores.append(((start_index, start_index + answer_length), start_score + end_score) ) snake_case__ : Union[str, Any] = sorted(__UpperCamelCase , key=lambda __UpperCamelCase : x[1] , reverse=__UpperCamelCase ) snake_case__ : Optional[int] = [] for (start_index, end_index), score in scores: if start_index > end_index: raise ValueError(F"""Wrong span indices: [{start_index}:{end_index}]""" ) snake_case__ : int = end_index - start_index + 1 if length > max_answer_length: raise ValueError(F"""Span is too long: {length} > {max_answer_length}""" ) if any( start_index <= prev_start_index <= prev_end_index <= end_index or prev_start_index <= start_index <= end_index <= prev_end_index for (prev_start_index, prev_end_index) in chosen_span_intervals ): continue chosen_span_intervals.append((start_index, end_index) ) if len(__UpperCamelCase ) == top_spans: break return chosen_span_intervals @add_end_docstrings(_lowerCamelCase ) class __snake_case ( _lowerCamelCase ,_lowerCamelCase ): __lowerCamelCase = VOCAB_FILES_NAMES __lowerCamelCase = READER_PRETRAINED_VOCAB_FILES_MAP __lowerCamelCase = READER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES __lowerCamelCase = READER_PRETRAINED_INIT_CONFIGURATION __lowerCamelCase = ["""input_ids""", """attention_mask"""]
699
def UpperCamelCase__ ( A__ ) -> list[int]: if length <= 0 or not isinstance(A__ , A__ ): raise ValueError('Length must be a positive integer.' ) return [n * (2 * n - 1) for n in range(A__ )] if __name__ == "__main__": print(hexagonal_numbers(length=5)) print(hexagonal_numbers(length=10))
699
1
import numpy as np import torch from imwatermark import WatermarkEncoder # Copied from https://github.com/Stability-AI/generative-models/blob/613af104c6b85184091d42d374fef420eddb356d/scripts/demo/streamlit_helpers.py#L66 lowerCAmelCase__ : Union[str, Any] = 0B101100111110110010010000011110111011000110011110 # bin(x)[2:] gives bits of x as str, use int to convert them to 0/1 lowerCAmelCase__ : str = [int(bit) for bit in bin(WATERMARK_MESSAGE)[2:]] class __snake_case : def __init__( self ) -> Dict: '''simple docstring''' snake_case__ : Optional[int] = WATERMARK_BITS snake_case__ : List[str] = WatermarkEncoder() self.encoder.set_watermark('bits' , self.watermark ) def __a ( self , __UpperCamelCase ) -> List[Any]: '''simple docstring''' if images.shape[-1] < 256: return images snake_case__ : Any = (255 * (images / 2 + 0.5)).cpu().permute(0 , 2 , 3 , 1 ).float().numpy() snake_case__ : Tuple = [self.encoder.encode(__UpperCamelCase , 'dwtDct' ) for image in images] snake_case__ : Optional[int] = torch.from_numpy(np.array(__UpperCamelCase ) ).permute(0 , 3 , 1 , 2 ) snake_case__ : Any = torch.clamp(2 * (images / 255 - 0.5) , min=-1.0 , max=1.0 ) return images
699
import argparse import json from collections import OrderedDict from pathlib import Path import requests import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import ( ConditionalDetrConfig, ConditionalDetrForObjectDetection, ConditionalDetrForSegmentation, ConditionalDetrImageProcessor, ) from transformers.utils import logging logging.set_verbosity_info() lowerCAmelCase__ : Dict = logging.get_logger(__name__) # here we list all keys to be renamed (original name on the left, our name on the right) lowerCAmelCase__ : Optional[Any] = [] for i in range(6): # encoder layers: output projection, 2 feedforward neural networks and 2 layernorms rename_keys.append( (F'''transformer.encoder.layers.{i}.self_attn.out_proj.weight''', F'''encoder.layers.{i}.self_attn.out_proj.weight''') ) rename_keys.append( (F'''transformer.encoder.layers.{i}.self_attn.out_proj.bias''', F'''encoder.layers.{i}.self_attn.out_proj.bias''') ) rename_keys.append((F'''transformer.encoder.layers.{i}.linear1.weight''', F'''encoder.layers.{i}.fc1.weight''')) rename_keys.append((F'''transformer.encoder.layers.{i}.linear1.bias''', F'''encoder.layers.{i}.fc1.bias''')) rename_keys.append((F'''transformer.encoder.layers.{i}.linear2.weight''', F'''encoder.layers.{i}.fc2.weight''')) rename_keys.append((F'''transformer.encoder.layers.{i}.linear2.bias''', F'''encoder.layers.{i}.fc2.bias''')) rename_keys.append( (F'''transformer.encoder.layers.{i}.norm1.weight''', F'''encoder.layers.{i}.self_attn_layer_norm.weight''') ) rename_keys.append((F'''transformer.encoder.layers.{i}.norm1.bias''', F'''encoder.layers.{i}.self_attn_layer_norm.bias''')) rename_keys.append((F'''transformer.encoder.layers.{i}.norm2.weight''', F'''encoder.layers.{i}.final_layer_norm.weight''')) rename_keys.append((F'''transformer.encoder.layers.{i}.norm2.bias''', F'''encoder.layers.{i}.final_layer_norm.bias''')) # decoder layers: 2 times output projection, 2 feedforward neural networks and 3 layernorms rename_keys.append( (F'''transformer.decoder.layers.{i}.self_attn.out_proj.weight''', F'''decoder.layers.{i}.self_attn.out_proj.weight''') ) rename_keys.append( (F'''transformer.decoder.layers.{i}.self_attn.out_proj.bias''', F'''decoder.layers.{i}.self_attn.out_proj.bias''') ) rename_keys.append( ( F'''transformer.decoder.layers.{i}.cross_attn.out_proj.weight''', F'''decoder.layers.{i}.encoder_attn.out_proj.weight''', ) ) rename_keys.append( ( F'''transformer.decoder.layers.{i}.cross_attn.out_proj.bias''', F'''decoder.layers.{i}.encoder_attn.out_proj.bias''', ) ) rename_keys.append((F'''transformer.decoder.layers.{i}.linear1.weight''', F'''decoder.layers.{i}.fc1.weight''')) rename_keys.append((F'''transformer.decoder.layers.{i}.linear1.bias''', F'''decoder.layers.{i}.fc1.bias''')) rename_keys.append((F'''transformer.decoder.layers.{i}.linear2.weight''', F'''decoder.layers.{i}.fc2.weight''')) rename_keys.append((F'''transformer.decoder.layers.{i}.linear2.bias''', F'''decoder.layers.{i}.fc2.bias''')) rename_keys.append( (F'''transformer.decoder.layers.{i}.norm1.weight''', F'''decoder.layers.{i}.self_attn_layer_norm.weight''') ) rename_keys.append((F'''transformer.decoder.layers.{i}.norm1.bias''', F'''decoder.layers.{i}.self_attn_layer_norm.bias''')) rename_keys.append( (F'''transformer.decoder.layers.{i}.norm2.weight''', F'''decoder.layers.{i}.encoder_attn_layer_norm.weight''') ) rename_keys.append( (F'''transformer.decoder.layers.{i}.norm2.bias''', F'''decoder.layers.{i}.encoder_attn_layer_norm.bias''') ) rename_keys.append((F'''transformer.decoder.layers.{i}.norm3.weight''', F'''decoder.layers.{i}.final_layer_norm.weight''')) rename_keys.append((F'''transformer.decoder.layers.{i}.norm3.bias''', F'''decoder.layers.{i}.final_layer_norm.bias''')) # q, k, v projections in self/cross-attention in decoder for conditional DETR rename_keys.append( (F'''transformer.decoder.layers.{i}.sa_qcontent_proj.weight''', F'''decoder.layers.{i}.sa_qcontent_proj.weight''') ) rename_keys.append( (F'''transformer.decoder.layers.{i}.sa_kcontent_proj.weight''', F'''decoder.layers.{i}.sa_kcontent_proj.weight''') ) rename_keys.append( (F'''transformer.decoder.layers.{i}.sa_qpos_proj.weight''', F'''decoder.layers.{i}.sa_qpos_proj.weight''') ) rename_keys.append( (F'''transformer.decoder.layers.{i}.sa_kpos_proj.weight''', F'''decoder.layers.{i}.sa_kpos_proj.weight''') ) rename_keys.append((F'''transformer.decoder.layers.{i}.sa_v_proj.weight''', F'''decoder.layers.{i}.sa_v_proj.weight''')) rename_keys.append( (F'''transformer.decoder.layers.{i}.ca_qcontent_proj.weight''', F'''decoder.layers.{i}.ca_qcontent_proj.weight''') ) # rename_keys.append((f"transformer.decoder.layers.{i}.ca_qpos_proj.weight", f"decoder.layers.{i}.ca_qpos_proj.weight")) rename_keys.append( (F'''transformer.decoder.layers.{i}.ca_kcontent_proj.weight''', F'''decoder.layers.{i}.ca_kcontent_proj.weight''') ) rename_keys.append( (F'''transformer.decoder.layers.{i}.ca_kpos_proj.weight''', F'''decoder.layers.{i}.ca_kpos_proj.weight''') ) rename_keys.append((F'''transformer.decoder.layers.{i}.ca_v_proj.weight''', F'''decoder.layers.{i}.ca_v_proj.weight''')) rename_keys.append( (F'''transformer.decoder.layers.{i}.ca_qpos_sine_proj.weight''', F'''decoder.layers.{i}.ca_qpos_sine_proj.weight''') ) rename_keys.append( (F'''transformer.decoder.layers.{i}.sa_qcontent_proj.bias''', F'''decoder.layers.{i}.sa_qcontent_proj.bias''') ) rename_keys.append( (F'''transformer.decoder.layers.{i}.sa_kcontent_proj.bias''', F'''decoder.layers.{i}.sa_kcontent_proj.bias''') ) rename_keys.append((F'''transformer.decoder.layers.{i}.sa_qpos_proj.bias''', F'''decoder.layers.{i}.sa_qpos_proj.bias''')) rename_keys.append((F'''transformer.decoder.layers.{i}.sa_kpos_proj.bias''', F'''decoder.layers.{i}.sa_kpos_proj.bias''')) rename_keys.append((F'''transformer.decoder.layers.{i}.sa_v_proj.bias''', F'''decoder.layers.{i}.sa_v_proj.bias''')) rename_keys.append( (F'''transformer.decoder.layers.{i}.ca_qcontent_proj.bias''', F'''decoder.layers.{i}.ca_qcontent_proj.bias''') ) # rename_keys.append((f"transformer.decoder.layers.{i}.ca_qpos_proj.bias", f"decoder.layers.{i}.ca_qpos_proj.bias")) rename_keys.append( (F'''transformer.decoder.layers.{i}.ca_kcontent_proj.bias''', F'''decoder.layers.{i}.ca_kcontent_proj.bias''') ) rename_keys.append((F'''transformer.decoder.layers.{i}.ca_kpos_proj.bias''', F'''decoder.layers.{i}.ca_kpos_proj.bias''')) rename_keys.append((F'''transformer.decoder.layers.{i}.ca_v_proj.bias''', F'''decoder.layers.{i}.ca_v_proj.bias''')) rename_keys.append( (F'''transformer.decoder.layers.{i}.ca_qpos_sine_proj.bias''', F'''decoder.layers.{i}.ca_qpos_sine_proj.bias''') ) # convolutional projection + query embeddings + layernorm of decoder + class and bounding box heads # for conditional DETR, also convert reference point head and query scale MLP rename_keys.extend( [ ('''input_proj.weight''', '''input_projection.weight'''), ('''input_proj.bias''', '''input_projection.bias'''), ('''query_embed.weight''', '''query_position_embeddings.weight'''), ('''transformer.decoder.norm.weight''', '''decoder.layernorm.weight'''), ('''transformer.decoder.norm.bias''', '''decoder.layernorm.bias'''), ('''class_embed.weight''', '''class_labels_classifier.weight'''), ('''class_embed.bias''', '''class_labels_classifier.bias'''), ('''bbox_embed.layers.0.weight''', '''bbox_predictor.layers.0.weight'''), ('''bbox_embed.layers.0.bias''', '''bbox_predictor.layers.0.bias'''), ('''bbox_embed.layers.1.weight''', '''bbox_predictor.layers.1.weight'''), ('''bbox_embed.layers.1.bias''', '''bbox_predictor.layers.1.bias'''), ('''bbox_embed.layers.2.weight''', '''bbox_predictor.layers.2.weight'''), ('''bbox_embed.layers.2.bias''', '''bbox_predictor.layers.2.bias'''), ('''transformer.decoder.ref_point_head.layers.0.weight''', '''decoder.ref_point_head.layers.0.weight'''), ('''transformer.decoder.ref_point_head.layers.0.bias''', '''decoder.ref_point_head.layers.0.bias'''), ('''transformer.decoder.ref_point_head.layers.1.weight''', '''decoder.ref_point_head.layers.1.weight'''), ('''transformer.decoder.ref_point_head.layers.1.bias''', '''decoder.ref_point_head.layers.1.bias'''), ('''transformer.decoder.query_scale.layers.0.weight''', '''decoder.query_scale.layers.0.weight'''), ('''transformer.decoder.query_scale.layers.0.bias''', '''decoder.query_scale.layers.0.bias'''), ('''transformer.decoder.query_scale.layers.1.weight''', '''decoder.query_scale.layers.1.weight'''), ('''transformer.decoder.query_scale.layers.1.bias''', '''decoder.query_scale.layers.1.bias'''), ('''transformer.decoder.layers.0.ca_qpos_proj.weight''', '''decoder.layers.0.ca_qpos_proj.weight'''), ('''transformer.decoder.layers.0.ca_qpos_proj.bias''', '''decoder.layers.0.ca_qpos_proj.bias'''), ] ) def UpperCamelCase__ ( A__ , A__ , A__ ) -> List[str]: snake_case__ : int = state_dict.pop(A__ ) snake_case__ : Union[str, Any] = val def UpperCamelCase__ ( A__ ) -> int: snake_case__ : List[Any] = OrderedDict() for key, value in state_dict.items(): if "backbone.0.body" in key: snake_case__ : Any = key.replace('backbone.0.body' , 'backbone.conv_encoder.model' ) snake_case__ : Optional[int] = value else: snake_case__ : Optional[int] = value return new_state_dict def UpperCamelCase__ ( A__ , A__=False ) -> Optional[int]: snake_case__ : Optional[int] = '' if is_panoptic: snake_case__ : Tuple = 'conditional_detr.' # first: transformer encoder for i in range(6 ): # read in weights + bias of input projection layer (in PyTorch's MultiHeadAttention, this is a single matrix + bias) snake_case__ : int = state_dict.pop(F"""{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_weight""" ) snake_case__ : str = state_dict.pop(F"""{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_bias""" ) # next, add query, keys and values (in that order) to the state dict snake_case__ : Union[str, Any] = in_proj_weight[:256, :] snake_case__ : Union[str, Any] = in_proj_bias[:256] snake_case__ : Union[str, Any] = in_proj_weight[256:512, :] snake_case__ : Optional[Any] = in_proj_bias[256:512] snake_case__ : List[str] = in_proj_weight[-256:, :] snake_case__ : Tuple = in_proj_bias[-256:] def UpperCamelCase__ ( ) -> Tuple: snake_case__ : int = 'http://images.cocodataset.org/val2017/000000039769.jpg' snake_case__ : str = Image.open(requests.get(A__ , stream=A__ ).raw ) return im @torch.no_grad() def UpperCamelCase__ ( A__ , A__ ) -> str: snake_case__ : List[Any] = ConditionalDetrConfig() # set backbone and dilation attributes if "resnet101" in model_name: snake_case__ : Any = 'resnet101' if "dc5" in model_name: snake_case__ : Any = True snake_case__ : int = 'panoptic' in model_name if is_panoptic: snake_case__ : str = 250 else: snake_case__ : Union[str, Any] = 91 snake_case__ : Optional[int] = 'huggingface/label-files' snake_case__ : Optional[Any] = 'coco-detection-id2label.json' snake_case__ : str = json.load(open(hf_hub_download(A__ , A__ , repo_type='dataset' ) , 'r' ) ) snake_case__ : List[Any] = {int(A__ ): v for k, v in idalabel.items()} snake_case__ : Any = idalabel snake_case__ : int = {v: k for k, v in idalabel.items()} # load image processor snake_case__ : List[Any] = 'coco_panoptic' if is_panoptic else 'coco_detection' snake_case__ : List[Any] = ConditionalDetrImageProcessor(format=A__ ) # prepare image snake_case__ : List[str] = prepare_img() snake_case__ : Any = image_processor(images=A__ , return_tensors='pt' ) snake_case__ : Dict = encoding['pixel_values'] logger.info(F"""Converting model {model_name}...""" ) # load original model from torch hub snake_case__ : Any = torch.hub.load('DeppMeng/ConditionalDETR' , A__ , pretrained=A__ ).eval() snake_case__ : Tuple = conditional_detr.state_dict() # rename keys for src, dest in rename_keys: if is_panoptic: snake_case__ : List[Any] = 'conditional_detr.' + src rename_key(A__ , A__ , A__ ) snake_case__ : Dict = rename_backbone_keys(A__ ) # query, key and value matrices need special treatment read_in_q_k_v(A__ , is_panoptic=A__ ) # important: we need to prepend a prefix to each of the base model keys as the head models use different attributes for them snake_case__ : Optional[int] = 'conditional_detr.model.' if is_panoptic else 'model.' for key in state_dict.copy().keys(): if is_panoptic: if ( key.startswith('conditional_detr' ) and not key.startswith('class_labels_classifier' ) and not key.startswith('bbox_predictor' ) ): snake_case__ : List[Any] = state_dict.pop(A__ ) snake_case__ : Optional[int] = val elif "class_labels_classifier" in key or "bbox_predictor" in key: snake_case__ : str = state_dict.pop(A__ ) snake_case__ : List[Any] = val elif key.startswith('bbox_attention' ) or key.startswith('mask_head' ): continue else: snake_case__ : Union[str, Any] = state_dict.pop(A__ ) snake_case__ : Dict = val else: if not key.startswith('class_labels_classifier' ) and not key.startswith('bbox_predictor' ): snake_case__ : List[Any] = state_dict.pop(A__ ) snake_case__ : Optional[int] = val # finally, create HuggingFace model and load state dict snake_case__ : Union[str, Any] = ConditionalDetrForSegmentation(A__ ) if is_panoptic else ConditionalDetrForObjectDetection(A__ ) model.load_state_dict(A__ ) model.eval() model.push_to_hub(repo_id=A__ , organization='DepuMeng' , commit_message='Add model' ) # verify our conversion snake_case__ : Tuple = conditional_detr(A__ ) snake_case__ : str = model(A__ ) assert torch.allclose(outputs.logits , original_outputs['pred_logits'] , atol=1e-4 ) assert torch.allclose(outputs.pred_boxes , original_outputs['pred_boxes'] , atol=1e-4 ) if is_panoptic: assert torch.allclose(outputs.pred_masks , original_outputs['pred_masks'] , atol=1e-4 ) # Save model and image processor logger.info(F"""Saving PyTorch model and image processor to {pytorch_dump_folder_path}...""" ) Path(A__ ).mkdir(exist_ok=A__ ) model.save_pretrained(A__ ) image_processor.save_pretrained(A__ ) if __name__ == "__main__": lowerCAmelCase__ : Any = argparse.ArgumentParser() parser.add_argument( '''--model_name''', default='''conditional_detr_resnet50''', type=str, help='''Name of the CONDITIONAL_DETR model you\'d like to convert.''', ) parser.add_argument( '''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the folder to output PyTorch model.''' ) lowerCAmelCase__ : int = parser.parse_args() convert_conditional_detr_checkpoint(args.model_name, args.pytorch_dump_folder_path)
699
1
import warnings from ...utils import logging from .image_processing_perceiver import PerceiverImageProcessor lowerCAmelCase__ : Dict = logging.get_logger(__name__) class __snake_case ( _lowerCamelCase ): def __init__( self , *__UpperCamelCase , **__UpperCamelCase ) -> None: '''simple docstring''' warnings.warn( 'The class PerceiverFeatureExtractor is deprecated and will be removed in version 5 of Transformers.' ' Please use PerceiverImageProcessor instead.' , __UpperCamelCase , ) super().__init__(*__UpperCamelCase , **__UpperCamelCase )
699
from collections import namedtuple lowerCAmelCase__ : Union[str, Any] = namedtuple('''from_to''', '''from_ to''') lowerCAmelCase__ : Tuple = { '''cubicmeter''': from_to(1, 1), '''litre''': from_to(0.0_01, 10_00), '''kilolitre''': from_to(1, 1), '''gallon''': from_to(0.0_04_54, 2_64.1_72), '''cubicyard''': from_to(0.7_64_55, 1.3_07_95), '''cubicfoot''': from_to(0.0_28, 35.31_47), '''cup''': from_to(0.0_00_23_65_88, 42_26.75), } def UpperCamelCase__ ( A__ , A__ , A__ ) -> float: if from_type not in METRIC_CONVERSION: raise ValueError( F"""Invalid 'from_type' value: {from_type!r} Supported values are:\n""" + ', '.join(A__ ) ) if to_type not in METRIC_CONVERSION: raise ValueError( F"""Invalid 'to_type' value: {to_type!r}. Supported values are:\n""" + ', '.join(A__ ) ) return value * METRIC_CONVERSION[from_type].from_ * METRIC_CONVERSION[to_type].to if __name__ == "__main__": import doctest doctest.testmod()
699
1
from ...configuration_utils import PretrainedConfig from ...utils import logging from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices lowerCAmelCase__ : Any = logging.get_logger(__name__) lowerCAmelCase__ : Union[str, Any] = { '''shi-labs/nat-mini-in1k-224''': '''https://huggingface.co/shi-labs/nat-mini-in1k-224/resolve/main/config.json''', # See all Nat models at https://huggingface.co/models?filter=nat } class __snake_case ( _lowerCamelCase ,_lowerCamelCase ): __lowerCamelCase = """nat""" __lowerCamelCase = { """num_attention_heads""": """num_heads""", """num_hidden_layers""": """num_layers""", } def __init__( self , __UpperCamelCase=4 , __UpperCamelCase=3 , __UpperCamelCase=64 , __UpperCamelCase=[3, 4, 6, 5] , __UpperCamelCase=[2, 4, 8, 16] , __UpperCamelCase=7 , __UpperCamelCase=3.0 , __UpperCamelCase=True , __UpperCamelCase=0.0 , __UpperCamelCase=0.0 , __UpperCamelCase=0.1 , __UpperCamelCase="gelu" , __UpperCamelCase=0.0_2 , __UpperCamelCase=1E-5 , __UpperCamelCase=0.0 , __UpperCamelCase=None , __UpperCamelCase=None , **__UpperCamelCase , ) -> Dict: '''simple docstring''' super().__init__(**__UpperCamelCase ) snake_case__ : List[Any] = patch_size snake_case__ : Dict = num_channels snake_case__ : str = embed_dim snake_case__ : List[Any] = depths snake_case__ : List[str] = len(__UpperCamelCase ) snake_case__ : Optional[Any] = num_heads snake_case__ : List[Any] = kernel_size snake_case__ : Optional[Any] = mlp_ratio snake_case__ : Any = qkv_bias snake_case__ : Dict = hidden_dropout_prob snake_case__ : str = attention_probs_dropout_prob snake_case__ : str = drop_path_rate snake_case__ : str = hidden_act snake_case__ : Optional[Any] = layer_norm_eps snake_case__ : List[Any] = initializer_range # we set the hidden_size attribute in order to make Nat work with VisionEncoderDecoderModel # this indicates the channel dimension after the last stage of the model snake_case__ : Union[str, Any] = int(embed_dim * 2 ** (len(__UpperCamelCase ) - 1) ) snake_case__ : List[str] = layer_scale_init_value snake_case__ : str = ['stem'] + [F"""stage{idx}""" for idx in range(1 , len(__UpperCamelCase ) + 1 )] snake_case__ , snake_case__ : List[Any] = get_aligned_output_features_output_indices( out_features=__UpperCamelCase , out_indices=__UpperCamelCase , stage_names=self.stage_names )
699
import os from shutil import copyfile from typing import Any, Dict, List, Optional, Tuple import sentencepiece as spm from ...tokenization_utils import PreTrainedTokenizer from ...utils import logging lowerCAmelCase__ : Tuple = logging.get_logger(__name__) lowerCAmelCase__ : Union[str, Any] = '''▁''' lowerCAmelCase__ : List[Any] = {'''vocab_file''': '''sentencepiece.bpe.model'''} lowerCAmelCase__ : Optional[Any] = { '''vocab_file''': { '''facebook/xglm-564M''': '''https://huggingface.co/facebook/xglm-564M/resolve/main/sentencepiece.bpe.model''', } } lowerCAmelCase__ : str = { '''facebook/xglm-564M''': 20_48, } class __snake_case ( _lowerCamelCase ): __lowerCamelCase = VOCAB_FILES_NAMES __lowerCamelCase = PRETRAINED_VOCAB_FILES_MAP __lowerCamelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES __lowerCamelCase = ["""input_ids""", """attention_mask"""] def __init__( self , __UpperCamelCase , __UpperCamelCase="<s>" , __UpperCamelCase="</s>" , __UpperCamelCase="</s>" , __UpperCamelCase="<s>" , __UpperCamelCase="<unk>" , __UpperCamelCase="<pad>" , __UpperCamelCase = None , **__UpperCamelCase , ) -> None: '''simple docstring''' snake_case__ : Any = {} if sp_model_kwargs is None else sp_model_kwargs # Compatibility with the original tokenizer snake_case__ : Tuple = 7 snake_case__ : Dict = [F"""<madeupword{i}>""" for i in range(self.num_madeup_words )] snake_case__ : Union[str, Any] = kwargs.get('additional_special_tokens' , [] ) kwargs["additional_special_tokens"] += [ word for word in madeup_words if word not in kwargs["additional_special_tokens"] ] super().__init__( bos_token=__UpperCamelCase , eos_token=__UpperCamelCase , unk_token=__UpperCamelCase , sep_token=__UpperCamelCase , cls_token=__UpperCamelCase , pad_token=__UpperCamelCase , sp_model_kwargs=self.sp_model_kwargs , **__UpperCamelCase , ) snake_case__ : Optional[int] = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(str(__UpperCamelCase ) ) snake_case__ : Optional[Any] = vocab_file # Original fairseq vocab and spm vocab must be "aligned": # Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 # -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ---- # fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-' # spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a' # The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab snake_case__ : Tuple = 1 # Mimic fairseq token-to-id alignment for the first 4 token snake_case__ : Tuple = {'<s>': 0, '<pad>': 1, '</s>': 2, '<unk>': 3} snake_case__ : List[Any] = len(self.sp_model ) snake_case__ : Optional[Any] = {F"""<madeupword{i}>""": sp_size + i + self.fairseq_offset for i in range(self.num_madeup_words )} self.fairseq_tokens_to_ids.update(__UpperCamelCase ) snake_case__ : Union[str, Any] = {v: k for k, v in self.fairseq_tokens_to_ids.items()} def __getstate__( self ) -> List[Any]: '''simple docstring''' snake_case__ : Union[str, Any] = self.__dict__.copy() snake_case__ : Optional[Any] = None snake_case__ : Tuple = self.sp_model.serialized_model_proto() return state def __setstate__( self , __UpperCamelCase ) -> Union[str, Any]: '''simple docstring''' snake_case__ : Union[str, Any] = d # for backward compatibility if not hasattr(self , 'sp_model_kwargs' ): snake_case__ : Any = {} snake_case__ : Dict = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.LoadFromSerializedProto(self.sp_model_proto ) def __a ( self , __UpperCamelCase , __UpperCamelCase = None ) -> List[int]: '''simple docstring''' if token_ids_a is None: return [self.sep_token_id] + token_ids_a snake_case__ : str = [self.sep_token_id] return sep + token_ids_a + sep + sep + token_ids_a def __a ( self , __UpperCamelCase , __UpperCamelCase = None , __UpperCamelCase = False ) -> List[int]: '''simple docstring''' if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=__UpperCamelCase , token_ids_a=__UpperCamelCase , already_has_special_tokens=__UpperCamelCase ) if token_ids_a is None: return [1] + ([0] * len(__UpperCamelCase )) return [1] + ([0] * len(__UpperCamelCase )) + [1, 1] + ([0] * len(__UpperCamelCase )) def __a ( self , __UpperCamelCase , __UpperCamelCase = None ) -> List[int]: '''simple docstring''' snake_case__ : int = [self.sep_token_id] if token_ids_a is None: return len(sep + token_ids_a ) * [0] return len(sep + token_ids_a + sep + sep + token_ids_a ) * [0] @property def __a ( self ) -> Tuple: '''simple docstring''' return len(self.sp_model ) + self.fairseq_offset + self.num_madeup_words def __a ( self ) -> Union[str, Any]: '''simple docstring''' snake_case__ : int = {self.convert_ids_to_tokens(__UpperCamelCase ): i for i in range(self.vocab_size )} vocab.update(self.added_tokens_encoder ) return vocab def __a ( self , __UpperCamelCase ) -> List[str]: '''simple docstring''' return self.sp_model.encode(__UpperCamelCase , out_type=__UpperCamelCase ) def __a ( self , __UpperCamelCase ) -> Union[str, Any]: '''simple docstring''' if token in self.fairseq_tokens_to_ids: return self.fairseq_tokens_to_ids[token] snake_case__ : Optional[Any] = self.sp_model.PieceToId(__UpperCamelCase ) # Need to return unknown token if the SP model returned 0 return spm_id + self.fairseq_offset if spm_id else self.unk_token_id def __a ( self , __UpperCamelCase ) -> Dict: '''simple docstring''' if index in self.fairseq_ids_to_tokens: return self.fairseq_ids_to_tokens[index] return self.sp_model.IdToPiece(index - self.fairseq_offset ) def __a ( self , __UpperCamelCase ) -> int: '''simple docstring''' snake_case__ : int = ''.join(__UpperCamelCase ).replace(__UpperCamelCase , ' ' ).strip() return out_string def __a ( self , __UpperCamelCase , __UpperCamelCase = None ) -> Tuple[str]: '''simple docstring''' if not os.path.isdir(__UpperCamelCase ): logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" ) return snake_case__ : List[str] = os.path.join( __UpperCamelCase , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(__UpperCamelCase ) and os.path.isfile(self.vocab_file ): copyfile(self.vocab_file , __UpperCamelCase ) elif not os.path.isfile(self.vocab_file ): with open(__UpperCamelCase , 'wb' ) as fi: snake_case__ : Any = self.sp_model.serialized_model_proto() fi.write(__UpperCamelCase ) return (out_vocab_file,)
699
1
import io import itertools import json from dataclasses import dataclass from typing import Optional import pyarrow as pa import pyarrow.json as paj import datasets from datasets.table import table_cast from datasets.utils.file_utils import readline lowerCAmelCase__ : List[Any] = datasets.utils.logging.get_logger(__name__) @dataclass class __snake_case ( datasets.BuilderConfig ): __lowerCamelCase = None __lowerCamelCase = "utf-8" __lowerCamelCase = None __lowerCamelCase = None __lowerCamelCase = True # deprecated __lowerCamelCase = None # deprecated __lowerCamelCase = 10 << 20 # 10MB __lowerCamelCase = None class __snake_case ( datasets.ArrowBasedBuilder ): __lowerCamelCase = JsonConfig def __a ( self ) -> Optional[Any]: '''simple docstring''' if self.config.block_size is not None: logger.warning('The JSON loader parameter `block_size` is deprecated. Please use `chunksize` instead' ) snake_case__ : str = self.config.block_size if self.config.use_threads is not True: logger.warning( 'The JSON loader parameter `use_threads` is deprecated and doesn\'t have any effect anymore.' ) if self.config.newlines_in_values is not None: raise ValueError('The JSON loader parameter `newlines_in_values` is no longer supported' ) return datasets.DatasetInfo(features=self.config.features ) def __a ( self , __UpperCamelCase ) -> Dict: '''simple docstring''' if not self.config.data_files: raise ValueError(F"""At least one data file must be specified, but got data_files={self.config.data_files}""" ) snake_case__ : int = dl_manager.download_and_extract(self.config.data_files ) if isinstance(__UpperCamelCase , (str, list, tuple) ): snake_case__ : Any = data_files if isinstance(__UpperCamelCase , __UpperCamelCase ): snake_case__ : Optional[Any] = [files] snake_case__ : List[str] = [dl_manager.iter_files(__UpperCamelCase ) for file in files] return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={'files': files} )] snake_case__ : List[Any] = [] for split_name, files in data_files.items(): if isinstance(__UpperCamelCase , __UpperCamelCase ): snake_case__ : List[Any] = [files] snake_case__ : Any = [dl_manager.iter_files(__UpperCamelCase ) for file in files] splits.append(datasets.SplitGenerator(name=__UpperCamelCase , gen_kwargs={'files': files} ) ) return splits def __a ( self , __UpperCamelCase ) -> pa.Table: '''simple docstring''' if self.config.features is not None: # adding missing columns for column_name in set(self.config.features ) - set(pa_table.column_names ): snake_case__ : List[Any] = self.config.features.arrow_schema.field(__UpperCamelCase ).type snake_case__ : List[str] = pa_table.append_column(__UpperCamelCase , pa.array([None] * len(__UpperCamelCase ) , type=__UpperCamelCase ) ) # more expensive cast to support nested structures with keys in a different order # allows str <-> int/float or str to Audio for example snake_case__ : List[str] = table_cast(__UpperCamelCase , self.config.features.arrow_schema ) return pa_table def __a ( self , __UpperCamelCase ) -> int: '''simple docstring''' for file_idx, file in enumerate(itertools.chain.from_iterable(__UpperCamelCase ) ): # If the file is one json object and if we need to look at the list of items in one specific field if self.config.field is not None: with open(__UpperCamelCase , encoding=self.config.encoding , errors=self.config.encoding_errors ) as f: snake_case__ : Union[str, Any] = json.load(__UpperCamelCase ) # We keep only the field we are interested in snake_case__ : Tuple = dataset[self.config.field] # We accept two format: a list of dicts or a dict of lists if isinstance(__UpperCamelCase , (list, tuple) ): snake_case__ : List[Any] = set().union(*[row.keys() for row in dataset] ) snake_case__ : List[Any] = {col: [row.get(__UpperCamelCase ) for row in dataset] for col in keys} else: snake_case__ : List[Any] = dataset snake_case__ : Dict = pa.Table.from_pydict(__UpperCamelCase ) yield file_idx, self._cast_table(__UpperCamelCase ) # If the file has one json object per line else: with open(__UpperCamelCase , 'rb' ) as f: snake_case__ : Optional[int] = 0 # Use block_size equal to the chunk size divided by 32 to leverage multithreading # Set a default minimum value of 16kB if the chunk size is really small snake_case__ : Tuple = max(self.config.chunksize // 32 , 16 << 10 ) snake_case__ : Optional[Any] = ( self.config.encoding_errors if self.config.encoding_errors is not None else 'strict' ) while True: snake_case__ : Optional[int] = f.read(self.config.chunksize ) if not batch: break # Finish current line try: batch += f.readline() except (AttributeError, io.UnsupportedOperation): batch += readline(__UpperCamelCase ) # PyArrow only accepts utf-8 encoded bytes if self.config.encoding != "utf-8": snake_case__ : int = batch.decode(self.config.encoding , errors=__UpperCamelCase ).encode('utf-8' ) try: while True: try: snake_case__ : List[str] = paj.read_json( io.BytesIO(__UpperCamelCase ) , read_options=paj.ReadOptions(block_size=__UpperCamelCase ) ) break except (pa.ArrowInvalid, pa.ArrowNotImplementedError) as e: if ( isinstance(__UpperCamelCase , pa.ArrowInvalid ) and "straddling" not in str(__UpperCamelCase ) or block_size > len(__UpperCamelCase ) ): raise else: # Increase the block size in case it was too small. # The block size will be reset for the next file. logger.debug( F"""Batch of {len(__UpperCamelCase )} bytes couldn't be parsed with block_size={block_size}. Retrying with block_size={block_size * 2}.""" ) block_size *= 2 except pa.ArrowInvalid as e: try: with open( __UpperCamelCase , encoding=self.config.encoding , errors=self.config.encoding_errors ) as f: snake_case__ : Tuple = json.load(__UpperCamelCase ) except json.JSONDecodeError: logger.error(F"""Failed to read file '{file}' with error {type(__UpperCamelCase )}: {e}""" ) raise e # If possible, parse the file as a list of json objects and exit the loop if isinstance(__UpperCamelCase , __UpperCamelCase ): # list is the only sequence type supported in JSON try: snake_case__ : str = set().union(*[row.keys() for row in dataset] ) snake_case__ : Union[str, Any] = {col: [row.get(__UpperCamelCase ) for row in dataset] for col in keys} snake_case__ : List[str] = pa.Table.from_pydict(__UpperCamelCase ) except (pa.ArrowInvalid, AttributeError) as e: logger.error(F"""Failed to read file '{file}' with error {type(__UpperCamelCase )}: {e}""" ) raise ValueError(F"""Not able to read records in the JSON file at {file}.""" ) from None yield file_idx, self._cast_table(__UpperCamelCase ) break else: logger.error(F"""Failed to read file '{file}' with error {type(__UpperCamelCase )}: {e}""" ) raise ValueError( F"""Not able to read records in the JSON file at {file}. """ F"""You should probably indicate the field of the JSON file containing your records. """ F"""This JSON file contain the following fields: {str(list(dataset.keys() ) )}. """ F"""Select the correct one and provide it as `field='XXX'` to the dataset loading method. """ ) from None # Uncomment for debugging (will print the Arrow table size and elements) # logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}") # logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows))) yield (file_idx, batch_idx), self._cast_table(__UpperCamelCase ) batch_idx += 1
699
import collections from typing import List, Optional, Union from ...tokenization_utils_base import BatchEncoding from ...utils import TensorType, add_end_docstrings, add_start_docstrings, logging from ..bert.tokenization_bert_fast import BertTokenizerFast from .tokenization_dpr import DPRContextEncoderTokenizer, DPRQuestionEncoderTokenizer, DPRReaderTokenizer lowerCAmelCase__ : Any = logging.get_logger(__name__) lowerCAmelCase__ : List[Any] = {'''vocab_file''': '''vocab.txt''', '''tokenizer_file''': '''tokenizer.json'''} lowerCAmelCase__ : Any = { '''vocab_file''': { '''facebook/dpr-ctx_encoder-single-nq-base''': ( '''https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/vocab.txt''' ), '''facebook/dpr-ctx_encoder-multiset-base''': ( '''https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/vocab.txt''' ), }, '''tokenizer_file''': { '''facebook/dpr-ctx_encoder-single-nq-base''': ( '''https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/tokenizer.json''' ), '''facebook/dpr-ctx_encoder-multiset-base''': ( '''https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/tokenizer.json''' ), }, } lowerCAmelCase__ : Any = { '''vocab_file''': { '''facebook/dpr-question_encoder-single-nq-base''': ( '''https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/vocab.txt''' ), '''facebook/dpr-question_encoder-multiset-base''': ( '''https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/vocab.txt''' ), }, '''tokenizer_file''': { '''facebook/dpr-question_encoder-single-nq-base''': ( '''https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/tokenizer.json''' ), '''facebook/dpr-question_encoder-multiset-base''': ( '''https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/tokenizer.json''' ), }, } lowerCAmelCase__ : Tuple = { '''vocab_file''': { '''facebook/dpr-reader-single-nq-base''': ( '''https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/vocab.txt''' ), '''facebook/dpr-reader-multiset-base''': ( '''https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/vocab.txt''' ), }, '''tokenizer_file''': { '''facebook/dpr-reader-single-nq-base''': ( '''https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/tokenizer.json''' ), '''facebook/dpr-reader-multiset-base''': ( '''https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/tokenizer.json''' ), }, } lowerCAmelCase__ : Dict = { '''facebook/dpr-ctx_encoder-single-nq-base''': 5_12, '''facebook/dpr-ctx_encoder-multiset-base''': 5_12, } lowerCAmelCase__ : Union[str, Any] = { '''facebook/dpr-question_encoder-single-nq-base''': 5_12, '''facebook/dpr-question_encoder-multiset-base''': 5_12, } lowerCAmelCase__ : Optional[Any] = { '''facebook/dpr-reader-single-nq-base''': 5_12, '''facebook/dpr-reader-multiset-base''': 5_12, } lowerCAmelCase__ : Tuple = { '''facebook/dpr-ctx_encoder-single-nq-base''': {'''do_lower_case''': True}, '''facebook/dpr-ctx_encoder-multiset-base''': {'''do_lower_case''': True}, } lowerCAmelCase__ : Any = { '''facebook/dpr-question_encoder-single-nq-base''': {'''do_lower_case''': True}, '''facebook/dpr-question_encoder-multiset-base''': {'''do_lower_case''': True}, } lowerCAmelCase__ : List[str] = { '''facebook/dpr-reader-single-nq-base''': {'''do_lower_case''': True}, '''facebook/dpr-reader-multiset-base''': {'''do_lower_case''': True}, } class __snake_case ( _lowerCamelCase ): __lowerCamelCase = VOCAB_FILES_NAMES __lowerCamelCase = CONTEXT_ENCODER_PRETRAINED_VOCAB_FILES_MAP __lowerCamelCase = CONTEXT_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES __lowerCamelCase = CONTEXT_ENCODER_PRETRAINED_INIT_CONFIGURATION __lowerCamelCase = DPRContextEncoderTokenizer class __snake_case ( _lowerCamelCase ): __lowerCamelCase = VOCAB_FILES_NAMES __lowerCamelCase = QUESTION_ENCODER_PRETRAINED_VOCAB_FILES_MAP __lowerCamelCase = QUESTION_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES __lowerCamelCase = QUESTION_ENCODER_PRETRAINED_INIT_CONFIGURATION __lowerCamelCase = DPRQuestionEncoderTokenizer lowerCAmelCase__ : Tuple = collections.namedtuple( '''DPRSpanPrediction''', ['''span_score''', '''relevance_score''', '''doc_id''', '''start_index''', '''end_index''', '''text'''] ) lowerCAmelCase__ : List[Any] = collections.namedtuple('''DPRReaderOutput''', ['''start_logits''', '''end_logits''', '''relevance_logits''']) lowerCAmelCase__ : int = r''' Return a dictionary with the token ids of the input strings and other information to give to `.decode_best_spans`. It converts the strings of a question and different passages (title and text) in a sequence of IDs (integers), using the tokenizer and vocabulary. The resulting `input_ids` is a matrix of size `(n_passages, sequence_length)` with the format: [CLS] <question token ids> [SEP] <titles ids> [SEP] <texts ids> Args: questions (`str` or `List[str]`): The questions to be encoded. You can specify one question for many passages. In this case, the question will be duplicated like `[questions] * n_passages`. Otherwise you have to specify as many questions as in `titles` or `texts`. titles (`str` or `List[str]`): The passages titles to be encoded. This can be a string or a list of strings if there are several passages. texts (`str` or `List[str]`): The passages texts to be encoded. This can be a string or a list of strings if there are several passages. padding (`bool`, `str` or [`~utils.PaddingStrategy`], *optional*, defaults to `False`): Activates and controls padding. Accepts the following values: - `True` or `\'longest\'`: Pad to the longest sequence in the batch (or no padding if only a single sequence if provided). - `\'max_length\'`: Pad to a maximum length specified with the argument `max_length` or to the maximum acceptable input length for the model if that argument is not provided. - `False` or `\'do_not_pad\'` (default): No padding (i.e., can output a batch with sequences of different lengths). truncation (`bool`, `str` or [`~tokenization_utils_base.TruncationStrategy`], *optional*, defaults to `False`): Activates and controls truncation. Accepts the following values: - `True` or `\'longest_first\'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum acceptable input length for the model if that argument is not provided. This will truncate token by token, removing a token from the longest sequence in the pair if a pair of sequences (or a batch of pairs) is provided. - `\'only_first\'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum acceptable input length for the model if that argument is not provided. This will only truncate the first sequence of a pair if a pair of sequences (or a batch of pairs) is provided. - `\'only_second\'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum acceptable input length for the model if that argument is not provided. This will only truncate the second sequence of a pair if a pair of sequences (or a batch of pairs) is provided. - `False` or `\'do_not_truncate\'` (default): No truncation (i.e., can output batch with sequence lengths greater than the model maximum admissible input size). max_length (`int`, *optional*): Controls the maximum length to use by one of the truncation/padding parameters. If left unset or set to `None`, this will use the predefined model maximum length if a maximum length is required by one of the truncation/padding parameters. If the model has no specific maximum input length (like XLNet) truncation/padding to a maximum length will be deactivated. return_tensors (`str` or [`~utils.TensorType`], *optional*): If set, will return tensors instead of list of python integers. Acceptable values are: - `\'tf\'`: Return TensorFlow `tf.constant` objects. - `\'pt\'`: Return PyTorch `torch.Tensor` objects. - `\'np\'`: Return Numpy `np.ndarray` objects. return_attention_mask (`bool`, *optional*): Whether or not to return the attention mask. If not set, will return the attention mask according to the specific tokenizer\'s default, defined by the `return_outputs` attribute. [What are attention masks?](../glossary#attention-mask) Return: `Dict[str, List[List[int]]]`: A dictionary with the following keys: - `input_ids`: List of token ids to be fed to a model. - `attention_mask`: List of indices specifying which tokens should be attended to by the model. ''' @add_start_docstrings(_lowerCamelCase ) class __snake_case : def __call__( self , __UpperCamelCase , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = False , __UpperCamelCase = False , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = None , **__UpperCamelCase , ) -> BatchEncoding: '''simple docstring''' if titles is None and texts is None: return super().__call__( __UpperCamelCase , padding=__UpperCamelCase , truncation=__UpperCamelCase , max_length=__UpperCamelCase , return_tensors=__UpperCamelCase , return_attention_mask=__UpperCamelCase , **__UpperCamelCase , ) elif titles is None or texts is None: snake_case__ : Optional[Any] = titles if texts is None else texts return super().__call__( __UpperCamelCase , __UpperCamelCase , padding=__UpperCamelCase , truncation=__UpperCamelCase , max_length=__UpperCamelCase , return_tensors=__UpperCamelCase , return_attention_mask=__UpperCamelCase , **__UpperCamelCase , ) snake_case__ : int = titles if not isinstance(__UpperCamelCase , __UpperCamelCase ) else [titles] snake_case__ : Optional[int] = texts if not isinstance(__UpperCamelCase , __UpperCamelCase ) else [texts] snake_case__ : List[Any] = len(__UpperCamelCase ) snake_case__ : str = questions if not isinstance(__UpperCamelCase , __UpperCamelCase ) else [questions] * n_passages assert len(__UpperCamelCase ) == len( __UpperCamelCase ), F"""There should be as many titles than texts but got {len(__UpperCamelCase )} titles and {len(__UpperCamelCase )} texts.""" snake_case__ : Optional[int] = super().__call__(__UpperCamelCase , __UpperCamelCase , padding=__UpperCamelCase , truncation=__UpperCamelCase )['input_ids'] snake_case__ : Optional[Any] = super().__call__(__UpperCamelCase , add_special_tokens=__UpperCamelCase , padding=__UpperCamelCase , truncation=__UpperCamelCase )['input_ids'] snake_case__ : Union[str, Any] = { 'input_ids': [ (encoded_question_and_title + encoded_text)[:max_length] if max_length is not None and truncation else encoded_question_and_title + encoded_text for encoded_question_and_title, encoded_text in zip(__UpperCamelCase , __UpperCamelCase ) ] } if return_attention_mask is not False: snake_case__ : List[Any] = [] for input_ids in encoded_inputs["input_ids"]: attention_mask.append([int(input_id != self.pad_token_id ) for input_id in input_ids] ) snake_case__ : Union[str, Any] = attention_mask return self.pad(__UpperCamelCase , padding=__UpperCamelCase , max_length=__UpperCamelCase , return_tensors=__UpperCamelCase ) def __a ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = 16 , __UpperCamelCase = 64 , __UpperCamelCase = 4 , ) -> List[DPRSpanPrediction]: '''simple docstring''' snake_case__ : Optional[Any] = reader_input['input_ids'] snake_case__ , snake_case__ , snake_case__ : Any = reader_output[:3] snake_case__ : List[str] = len(__UpperCamelCase ) snake_case__ : Tuple = sorted(range(__UpperCamelCase ) , reverse=__UpperCamelCase , key=relevance_logits.__getitem__ ) snake_case__ : List[DPRReaderOutput] = [] for doc_id in sorted_docs: snake_case__ : Tuple = list(input_ids[doc_id] ) # assuming question & title information is at the beginning of the sequence snake_case__ : int = sequence_ids.index(self.sep_token_id , 2 ) + 1 # second sep id if sequence_ids[-1] == self.pad_token_id: snake_case__ : Union[str, Any] = sequence_ids.index(self.pad_token_id ) else: snake_case__ : str = len(__UpperCamelCase ) snake_case__ : Dict = self._get_best_spans( start_logits=start_logits[doc_id][passage_offset:sequence_len] , end_logits=end_logits[doc_id][passage_offset:sequence_len] , max_answer_length=__UpperCamelCase , top_spans=__UpperCamelCase , ) for start_index, end_index in best_spans: start_index += passage_offset end_index += passage_offset nbest_spans_predictions.append( DPRSpanPrediction( span_score=start_logits[doc_id][start_index] + end_logits[doc_id][end_index] , relevance_score=relevance_logits[doc_id] , doc_id=__UpperCamelCase , start_index=__UpperCamelCase , end_index=__UpperCamelCase , text=self.decode(sequence_ids[start_index : end_index + 1] ) , ) ) if len(__UpperCamelCase ) >= num_spans: break return nbest_spans_predictions[:num_spans] def __a ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , ) -> List[DPRSpanPrediction]: '''simple docstring''' snake_case__ : Any = [] for start_index, start_score in enumerate(__UpperCamelCase ): for answer_length, end_score in enumerate(end_logits[start_index : start_index + max_answer_length] ): scores.append(((start_index, start_index + answer_length), start_score + end_score) ) snake_case__ : str = sorted(__UpperCamelCase , key=lambda __UpperCamelCase : x[1] , reverse=__UpperCamelCase ) snake_case__ : Any = [] for (start_index, end_index), score in scores: assert start_index <= end_index, F"""Wrong span indices: [{start_index}:{end_index}]""" snake_case__ : str = end_index - start_index + 1 assert length <= max_answer_length, F"""Span is too long: {length} > {max_answer_length}""" if any( start_index <= prev_start_index <= prev_end_index <= end_index or prev_start_index <= start_index <= end_index <= prev_end_index for (prev_start_index, prev_end_index) in chosen_span_intervals ): continue chosen_span_intervals.append((start_index, end_index) ) if len(__UpperCamelCase ) == top_spans: break return chosen_span_intervals @add_end_docstrings(_lowerCamelCase ) class __snake_case ( _lowerCamelCase ,_lowerCamelCase ): __lowerCamelCase = VOCAB_FILES_NAMES __lowerCamelCase = READER_PRETRAINED_VOCAB_FILES_MAP __lowerCamelCase = READER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES __lowerCamelCase = READER_PRETRAINED_INIT_CONFIGURATION __lowerCamelCase = ["""input_ids""", """attention_mask"""] __lowerCamelCase = DPRReaderTokenizer
699
1
import argparse import json import os import time import zipfile from get_ci_error_statistics import download_artifact, get_artifacts_links from transformers import logging lowerCAmelCase__ : List[str] = logging.get_logger(__name__) def UpperCamelCase__ ( A__ , A__ ) -> List[str]: snake_case__ : int = set() snake_case__ : Dict = [] def parse_line(A__ ): for line in fp: if isinstance(A__ , A__ ): snake_case__ : List[Any] = line.decode('UTF-8' ) if "warnings summary (final)" in line: continue # This means we are outside the body of a warning elif not line.startswith(' ' ): # process a single warning and move it to `selected_warnings`. if len(A__ ) > 0: snake_case__ : int = '\n'.join(A__ ) # Only keep the warnings specified in `targets` if any(F""": {x}: """ in warning for x in targets ): selected_warnings.add(A__ ) buffer.clear() continue else: snake_case__ : str = line.strip() buffer.append(A__ ) if from_gh: for filename in os.listdir(A__ ): snake_case__ : int = os.path.join(A__ , A__ ) if not os.path.isdir(A__ ): # read the file if filename != "warnings.txt": continue with open(A__ ) as fp: parse_line(A__ ) else: try: with zipfile.ZipFile(A__ ) as z: for filename in z.namelist(): if not os.path.isdir(A__ ): # read the file if filename != "warnings.txt": continue with z.open(A__ ) as fp: parse_line(A__ ) except Exception: logger.warning( F"""{artifact_path} is either an invalid zip file or something else wrong. This file is skipped.""" ) return selected_warnings def UpperCamelCase__ ( A__ , A__ ) -> List[Any]: snake_case__ : List[str] = set() snake_case__ : int = [os.path.join(A__ , A__ ) for p in os.listdir(A__ ) if (p.endswith('.zip' ) or from_gh)] for p in paths: selected_warnings.update(extract_warnings_from_single_artifact(A__ , A__ ) ) return selected_warnings if __name__ == "__main__": def UpperCamelCase__ ( A__ ) -> Dict: return values.split(',' ) lowerCAmelCase__ : str = argparse.ArgumentParser() # Required parameters parser.add_argument('''--workflow_run_id''', type=str, required=True, help='''A GitHub Actions workflow run id.''') parser.add_argument( '''--output_dir''', type=str, required=True, help='''Where to store the downloaded artifacts and other result files.''', ) parser.add_argument('''--token''', default=None, type=str, help='''A token that has actions:read permission.''') # optional parameters parser.add_argument( '''--targets''', default='''DeprecationWarning,UserWarning,FutureWarning''', type=list_str, help='''Comma-separated list of target warning(s) which we want to extract.''', ) parser.add_argument( '''--from_gh''', action='''store_true''', help='''If running from a GitHub action workflow and collecting warnings from its artifacts.''', ) lowerCAmelCase__ : Optional[int] = parser.parse_args() lowerCAmelCase__ : Dict = args.from_gh if from_gh: # The artifacts have to be downloaded using `actions/download-artifact@v3` pass else: os.makedirs(args.output_dir, exist_ok=True) # get download links lowerCAmelCase__ : Optional[int] = get_artifacts_links(args.workflow_run_id, token=args.token) with open(os.path.join(args.output_dir, '''artifacts.json'''), '''w''', encoding='''UTF-8''') as fp: json.dump(artifacts, fp, ensure_ascii=False, indent=4) # download artifacts for idx, (name, url) in enumerate(artifacts.items()): print(name) print(url) print('''=''' * 80) download_artifact(name, url, args.output_dir, args.token) # Be gentle to GitHub time.sleep(1) # extract warnings from artifacts lowerCAmelCase__ : int = extract_warnings(args.output_dir, args.targets) lowerCAmelCase__ : Tuple = sorted(selected_warnings) with open(os.path.join(args.output_dir, '''selected_warnings.json'''), '''w''', encoding='''UTF-8''') as fp: json.dump(selected_warnings, fp, ensure_ascii=False, indent=4)
699
import gc import random import unittest import numpy as np import torch from PIL import Image from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer from diffusers import ( AutoencoderKL, DDIMScheduler, EulerAncestralDiscreteScheduler, LMSDiscreteScheduler, PNDMScheduler, StableDiffusionInstructPixaPixPipeline, UNetaDConditionModel, ) from diffusers.image_processor import VaeImageProcessor from diffusers.utils import floats_tensor, load_image, slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu from ..pipeline_params import ( IMAGE_TO_IMAGE_IMAGE_PARAMS, TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS, ) from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin enable_full_determinism() class __snake_case ( _lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase ,unittest.TestCase ): __lowerCamelCase = StableDiffusionInstructPixaPixPipeline __lowerCamelCase = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {"""height""", """width""", """cross_attention_kwargs"""} __lowerCamelCase = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS __lowerCamelCase = IMAGE_TO_IMAGE_IMAGE_PARAMS __lowerCamelCase = IMAGE_TO_IMAGE_IMAGE_PARAMS def __a ( self ) -> List[Any]: '''simple docstring''' torch.manual_seed(0 ) snake_case__ : Tuple = UNetaDConditionModel( block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=8 , out_channels=4 , down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') , up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D') , cross_attention_dim=32 , ) snake_case__ : Any = PNDMScheduler(skip_prk_steps=__UpperCamelCase ) torch.manual_seed(0 ) snake_case__ : Dict = AutoencoderKL( block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=4 , ) torch.manual_seed(0 ) snake_case__ : int = CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , ) snake_case__ : Tuple = CLIPTextModel(__UpperCamelCase ) snake_case__ : Optional[int] = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' ) snake_case__ : Optional[int] = { 'unet': unet, 'scheduler': scheduler, 'vae': vae, 'text_encoder': text_encoder, 'tokenizer': tokenizer, 'safety_checker': None, 'feature_extractor': None, } return components def __a ( self , __UpperCamelCase , __UpperCamelCase=0 ) -> Dict: '''simple docstring''' snake_case__ : Optional[Any] = floats_tensor((1, 3, 32, 32) , rng=random.Random(__UpperCamelCase ) ).to(__UpperCamelCase ) snake_case__ : int = image.cpu().permute(0 , 2 , 3 , 1 )[0] snake_case__ : Union[str, Any] = Image.fromarray(np.uinta(__UpperCamelCase ) ).convert('RGB' ) if str(__UpperCamelCase ).startswith('mps' ): snake_case__ : str = torch.manual_seed(__UpperCamelCase ) else: snake_case__ : Dict = torch.Generator(device=__UpperCamelCase ).manual_seed(__UpperCamelCase ) snake_case__ : str = { 'prompt': 'A painting of a squirrel eating a burger', 'image': image, 'generator': generator, 'num_inference_steps': 2, 'guidance_scale': 6.0, 'image_guidance_scale': 1, 'output_type': 'numpy', } return inputs def __a ( self ) -> Tuple: '''simple docstring''' snake_case__ : Dict = 'cpu' # ensure determinism for the device-dependent torch.Generator snake_case__ : Optional[int] = self.get_dummy_components() snake_case__ : str = StableDiffusionInstructPixaPixPipeline(**__UpperCamelCase ) snake_case__ : Optional[int] = sd_pipe.to(__UpperCamelCase ) sd_pipe.set_progress_bar_config(disable=__UpperCamelCase ) snake_case__ : Tuple = self.get_dummy_inputs(__UpperCamelCase ) snake_case__ : List[str] = sd_pipe(**__UpperCamelCase ).images snake_case__ : List[str] = image[0, -3:, -3:, -1] assert image.shape == (1, 32, 32, 3) snake_case__ : str = np.array([0.7_5_2_6, 0.3_7_5_0, 0.4_5_4_7, 0.6_1_1_7, 0.5_8_6_6, 0.5_0_1_6, 0.4_3_2_7, 0.5_6_4_2, 0.4_8_1_5] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3 def __a ( self ) -> Any: '''simple docstring''' snake_case__ : Dict = 'cpu' # ensure determinism for the device-dependent torch.Generator snake_case__ : Union[str, Any] = self.get_dummy_components() snake_case__ : Optional[Any] = StableDiffusionInstructPixaPixPipeline(**__UpperCamelCase ) snake_case__ : List[Any] = sd_pipe.to(__UpperCamelCase ) sd_pipe.set_progress_bar_config(disable=__UpperCamelCase ) snake_case__ : Union[str, Any] = self.get_dummy_inputs(__UpperCamelCase ) snake_case__ : List[str] = 'french fries' snake_case__ : Optional[Any] = sd_pipe(**__UpperCamelCase , negative_prompt=__UpperCamelCase ) snake_case__ : Union[str, Any] = output.images snake_case__ : Tuple = image[0, -3:, -3:, -1] assert image.shape == (1, 32, 32, 3) snake_case__ : Any = np.array([0.7_5_1_1, 0.3_6_4_2, 0.4_5_5_3, 0.6_2_3_6, 0.5_7_9_7, 0.5_0_1_3, 0.4_3_4_3, 0.5_6_1_1, 0.4_8_3_1] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3 def __a ( self ) -> int: '''simple docstring''' snake_case__ : Optional[int] = 'cpu' # ensure determinism for the device-dependent torch.Generator snake_case__ : List[str] = self.get_dummy_components() snake_case__ : str = StableDiffusionInstructPixaPixPipeline(**__UpperCamelCase ) snake_case__ : str = sd_pipe.to(__UpperCamelCase ) sd_pipe.set_progress_bar_config(disable=__UpperCamelCase ) snake_case__ : Dict = self.get_dummy_inputs(__UpperCamelCase ) snake_case__ : Any = [inputs['prompt']] * 2 snake_case__ : Optional[int] = np.array(inputs['image'] ).astype(np.floataa ) / 2_5_5.0 snake_case__ : Optional[int] = torch.from_numpy(__UpperCamelCase ).unsqueeze(0 ).to(__UpperCamelCase ) snake_case__ : Any = image / 2 + 0.5 snake_case__ : Optional[Any] = image.permute(0 , 3 , 1 , 2 ) snake_case__ : List[Any] = image.repeat(2 , 1 , 1 , 1 ) snake_case__ : Optional[int] = sd_pipe(**__UpperCamelCase ).images snake_case__ : Union[str, Any] = image[-1, -3:, -3:, -1] assert image.shape == (2, 32, 32, 3) snake_case__ : List[Any] = np.array([0.5_8_1_2, 0.5_7_4_8, 0.5_2_2_2, 0.5_9_0_8, 0.5_6_9_5, 0.7_1_7_4, 0.6_8_0_4, 0.5_5_2_3, 0.5_5_7_9] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3 def __a ( self ) -> Union[str, Any]: '''simple docstring''' snake_case__ : Optional[int] = 'cpu' # ensure determinism for the device-dependent torch.Generator snake_case__ : Optional[int] = self.get_dummy_components() snake_case__ : Tuple = EulerAncestralDiscreteScheduler( beta_start=0.0_0_0_8_5 , beta_end=0.0_1_2 , beta_schedule='scaled_linear' ) snake_case__ : int = StableDiffusionInstructPixaPixPipeline(**__UpperCamelCase ) snake_case__ : List[str] = sd_pipe.to(__UpperCamelCase ) sd_pipe.set_progress_bar_config(disable=__UpperCamelCase ) snake_case__ : str = self.get_dummy_inputs(__UpperCamelCase ) snake_case__ : Any = sd_pipe(**__UpperCamelCase ).images snake_case__ : int = image[0, -3:, -3:, -1] snake_case__ : Tuple = [round(__UpperCamelCase , 4 ) for x in image_slice.flatten().tolist()] print(','.join([str(__UpperCamelCase ) for x in slice] ) ) assert image.shape == (1, 32, 32, 3) snake_case__ : List[Any] = np.array([0.7_4_1_7, 0.3_8_4_2, 0.4_7_3_2, 0.5_7_7_6, 0.5_8_9_1, 0.5_1_3_9, 0.4_0_5_2, 0.5_6_7_3, 0.4_9_8_6] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3 def __a ( self ) -> int: '''simple docstring''' super().test_inference_batch_single_identical(expected_max_diff=3E-3 ) def __a ( self ) -> Any: '''simple docstring''' snake_case__ : Optional[int] = self.get_dummy_components() snake_case__ : int = StableDiffusionInstructPixaPixPipeline(**__UpperCamelCase ) snake_case__ : Union[str, Any] = VaeImageProcessor(do_resize=__UpperCamelCase , do_normalize=__UpperCamelCase ) snake_case__ : Optional[int] = pipe.to(__UpperCamelCase ) pipe.set_progress_bar_config(disable=__UpperCamelCase ) snake_case__ : Optional[Any] = pipe(**self.get_dummy_inputs_by_type(__UpperCamelCase , input_image_type='pt' ) )[0] snake_case__ : Union[str, Any] = components['vae'] snake_case__ : str = self.get_dummy_inputs_by_type(__UpperCamelCase , input_image_type='pt' ) for image_param in self.image_latents_params: if image_param in inputs.keys(): snake_case__ : List[str] = vae.encode(inputs[image_param] ).latent_dist.mode() snake_case__ : Dict = pipe(**__UpperCamelCase )[0] snake_case__ : str = np.abs(out - out_latents_inputs ).max() self.assertLess(__UpperCamelCase , 1E-4 , 'passing latents as image input generate different result from passing image' ) @slow @require_torch_gpu class __snake_case ( unittest.TestCase ): def __a ( self ) -> List[str]: '''simple docstring''' super().tearDown() gc.collect() torch.cuda.empty_cache() def __a ( self , __UpperCamelCase=0 ) -> Dict: '''simple docstring''' snake_case__ : Optional[Any] = torch.manual_seed(__UpperCamelCase ) snake_case__ : List[str] = load_image( 'https://huggingface.co/datasets/diffusers/test-arrays/resolve/main/stable_diffusion_pix2pix/example.jpg' ) snake_case__ : int = { 'prompt': 'turn him into a cyborg', 'image': image, 'generator': generator, 'num_inference_steps': 3, 'guidance_scale': 7.5, 'image_guidance_scale': 1.0, 'output_type': 'numpy', } return inputs def __a ( self ) -> Dict: '''simple docstring''' snake_case__ : Any = StableDiffusionInstructPixaPixPipeline.from_pretrained( 'timbrooks/instruct-pix2pix' , safety_checker=__UpperCamelCase ) pipe.to(__UpperCamelCase ) pipe.set_progress_bar_config(disable=__UpperCamelCase ) pipe.enable_attention_slicing() snake_case__ : Tuple = self.get_inputs() snake_case__ : List[Any] = pipe(**__UpperCamelCase ).images snake_case__ : Union[str, Any] = image[0, -3:, -3:, -1].flatten() assert image.shape == (1, 512, 512, 3) snake_case__ : Dict = np.array([0.5_9_0_2, 0.6_0_1_5, 0.6_0_2_7, 0.5_9_8_3, 0.6_0_9_2, 0.6_0_6_1, 0.5_7_6_5, 0.5_7_8_5, 0.5_5_5_5] ) assert np.abs(expected_slice - image_slice ).max() < 1E-3 def __a ( self ) -> str: '''simple docstring''' snake_case__ : Any = StableDiffusionInstructPixaPixPipeline.from_pretrained( 'timbrooks/instruct-pix2pix' , safety_checker=__UpperCamelCase ) snake_case__ : Tuple = LMSDiscreteScheduler.from_config(pipe.scheduler.config ) pipe.to(__UpperCamelCase ) pipe.set_progress_bar_config(disable=__UpperCamelCase ) pipe.enable_attention_slicing() snake_case__ : Dict = self.get_inputs() snake_case__ : Dict = pipe(**__UpperCamelCase ).images snake_case__ : Union[str, Any] = image[0, -3:, -3:, -1].flatten() assert image.shape == (1, 512, 512, 3) snake_case__ : List[Any] = np.array([0.6_5_7_8, 0.6_8_1_7, 0.6_9_7_2, 0.6_7_6_1, 0.6_8_5_6, 0.6_9_1_6, 0.6_4_2_8, 0.6_5_1_6, 0.6_3_0_1] ) assert np.abs(expected_slice - image_slice ).max() < 1E-3 def __a ( self ) -> Optional[Any]: '''simple docstring''' snake_case__ : Tuple = StableDiffusionInstructPixaPixPipeline.from_pretrained( 'timbrooks/instruct-pix2pix' , safety_checker=__UpperCamelCase ) snake_case__ : Tuple = DDIMScheduler.from_config(pipe.scheduler.config ) pipe.to(__UpperCamelCase ) pipe.set_progress_bar_config(disable=__UpperCamelCase ) pipe.enable_attention_slicing() snake_case__ : Optional[int] = self.get_inputs() snake_case__ : Optional[int] = pipe(**__UpperCamelCase ).images snake_case__ : Tuple = image[0, -3:, -3:, -1].flatten() assert image.shape == (1, 512, 512, 3) snake_case__ : int = np.array([0.3_8_2_8, 0.3_8_3_4, 0.3_8_1_8, 0.3_7_9_2, 0.3_8_6_5, 0.3_7_5_2, 0.3_7_9_2, 0.3_8_4_7, 0.3_7_5_3] ) assert np.abs(expected_slice - image_slice ).max() < 1E-3 def __a ( self ) -> Union[str, Any]: '''simple docstring''' snake_case__ : int = 0 def callback_fn(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) -> None: snake_case__ : List[Any] = True nonlocal number_of_steps number_of_steps += 1 if step == 1: snake_case__ : Any = latents.detach().cpu().numpy() assert latents.shape == (1, 4, 64, 64) snake_case__ : int = latents[0, -3:, -3:, -1] snake_case__ : List[str] = np.array([-0.2_4_6_3, -0.4_6_4_4, -0.9_7_5_6, 1.5_1_7_6, 1.4_4_1_4, 0.7_8_6_6, 0.9_8_9_7, 0.8_5_2_1, 0.7_9_8_3] ) assert np.abs(latents_slice.flatten() - expected_slice ).max() < 5E-2 elif step == 2: snake_case__ : Dict = latents.detach().cpu().numpy() assert latents.shape == (1, 4, 64, 64) snake_case__ : Dict = latents[0, -3:, -3:, -1] snake_case__ : Optional[Any] = np.array([-0.2_6_4_4, -0.4_6_2_6, -0.9_6_5_3, 1.5_1_7_6, 1.4_5_5_1, 0.7_6_8_6, 0.9_8_0_5, 0.8_4_5_2, 0.8_1_1_5] ) assert np.abs(latents_slice.flatten() - expected_slice ).max() < 5E-2 snake_case__ : str = False snake_case__ : List[Any] = StableDiffusionInstructPixaPixPipeline.from_pretrained( 'timbrooks/instruct-pix2pix' , safety_checker=__UpperCamelCase , torch_dtype=torch.floataa ) snake_case__ : int = pipe.to(__UpperCamelCase ) pipe.set_progress_bar_config(disable=__UpperCamelCase ) pipe.enable_attention_slicing() snake_case__ : int = self.get_inputs() pipe(**__UpperCamelCase , callback=__UpperCamelCase , callback_steps=1 ) assert callback_fn.has_been_called assert number_of_steps == 3 def __a ( self ) -> Any: '''simple docstring''' torch.cuda.empty_cache() torch.cuda.reset_max_memory_allocated() torch.cuda.reset_peak_memory_stats() snake_case__ : str = StableDiffusionInstructPixaPixPipeline.from_pretrained( 'timbrooks/instruct-pix2pix' , safety_checker=__UpperCamelCase , torch_dtype=torch.floataa ) snake_case__ : Dict = pipe.to(__UpperCamelCase ) pipe.set_progress_bar_config(disable=__UpperCamelCase ) pipe.enable_attention_slicing(1 ) pipe.enable_sequential_cpu_offload() snake_case__ : str = self.get_inputs() snake_case__ : Tuple = pipe(**__UpperCamelCase ) snake_case__ : List[Any] = torch.cuda.max_memory_allocated() # make sure that less than 2.2 GB is allocated assert mem_bytes < 2.2 * 10**9 def __a ( self ) -> int: '''simple docstring''' snake_case__ : int = self.get_inputs() # resize to resolution that is divisible by 8 but not 16 or 32 snake_case__ : Tuple = inputs['image'].resize((504, 504) ) snake_case__ : str = 'timbrooks/instruct-pix2pix' snake_case__ : Tuple = StableDiffusionInstructPixaPixPipeline.from_pretrained( __UpperCamelCase , safety_checker=__UpperCamelCase , ) pipe.to(__UpperCamelCase ) pipe.set_progress_bar_config(disable=__UpperCamelCase ) pipe.enable_attention_slicing() snake_case__ : str = pipe(**__UpperCamelCase ) snake_case__ : List[Any] = output.images[0] snake_case__ : List[Any] = image[255:258, 383:386, -1] assert image.shape == (504, 504, 3) snake_case__ : List[str] = np.array([0.2_7_2_6, 0.2_5_2_9, 0.2_6_6_4, 0.2_6_5_5, 0.2_6_4_1, 0.2_6_4_2, 0.2_5_9_1, 0.2_6_4_9, 0.2_5_9_0] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 5E-3
699
1
import gc import random import unittest import numpy as np import torch from PIL import Image from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer from diffusers import ( AutoencoderKL, DDIMScheduler, EulerAncestralDiscreteScheduler, LMSDiscreteScheduler, PNDMScheduler, StableDiffusionInstructPixaPixPipeline, UNetaDConditionModel, ) from diffusers.image_processor import VaeImageProcessor from diffusers.utils import floats_tensor, load_image, slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu from ..pipeline_params import ( IMAGE_TO_IMAGE_IMAGE_PARAMS, TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS, ) from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin enable_full_determinism() class __snake_case ( _lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase ,unittest.TestCase ): __lowerCamelCase = StableDiffusionInstructPixaPixPipeline __lowerCamelCase = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {"""height""", """width""", """cross_attention_kwargs"""} __lowerCamelCase = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS __lowerCamelCase = IMAGE_TO_IMAGE_IMAGE_PARAMS __lowerCamelCase = IMAGE_TO_IMAGE_IMAGE_PARAMS def __a ( self ) -> List[Any]: '''simple docstring''' torch.manual_seed(0 ) snake_case__ : Tuple = UNetaDConditionModel( block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=8 , out_channels=4 , down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') , up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D') , cross_attention_dim=32 , ) snake_case__ : Any = PNDMScheduler(skip_prk_steps=__UpperCamelCase ) torch.manual_seed(0 ) snake_case__ : Dict = AutoencoderKL( block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=4 , ) torch.manual_seed(0 ) snake_case__ : int = CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , ) snake_case__ : Tuple = CLIPTextModel(__UpperCamelCase ) snake_case__ : Optional[int] = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' ) snake_case__ : Optional[int] = { 'unet': unet, 'scheduler': scheduler, 'vae': vae, 'text_encoder': text_encoder, 'tokenizer': tokenizer, 'safety_checker': None, 'feature_extractor': None, } return components def __a ( self , __UpperCamelCase , __UpperCamelCase=0 ) -> Dict: '''simple docstring''' snake_case__ : Optional[Any] = floats_tensor((1, 3, 32, 32) , rng=random.Random(__UpperCamelCase ) ).to(__UpperCamelCase ) snake_case__ : int = image.cpu().permute(0 , 2 , 3 , 1 )[0] snake_case__ : Union[str, Any] = Image.fromarray(np.uinta(__UpperCamelCase ) ).convert('RGB' ) if str(__UpperCamelCase ).startswith('mps' ): snake_case__ : str = torch.manual_seed(__UpperCamelCase ) else: snake_case__ : Dict = torch.Generator(device=__UpperCamelCase ).manual_seed(__UpperCamelCase ) snake_case__ : str = { 'prompt': 'A painting of a squirrel eating a burger', 'image': image, 'generator': generator, 'num_inference_steps': 2, 'guidance_scale': 6.0, 'image_guidance_scale': 1, 'output_type': 'numpy', } return inputs def __a ( self ) -> Tuple: '''simple docstring''' snake_case__ : Dict = 'cpu' # ensure determinism for the device-dependent torch.Generator snake_case__ : Optional[int] = self.get_dummy_components() snake_case__ : str = StableDiffusionInstructPixaPixPipeline(**__UpperCamelCase ) snake_case__ : Optional[int] = sd_pipe.to(__UpperCamelCase ) sd_pipe.set_progress_bar_config(disable=__UpperCamelCase ) snake_case__ : Tuple = self.get_dummy_inputs(__UpperCamelCase ) snake_case__ : List[str] = sd_pipe(**__UpperCamelCase ).images snake_case__ : List[str] = image[0, -3:, -3:, -1] assert image.shape == (1, 32, 32, 3) snake_case__ : str = np.array([0.7_5_2_6, 0.3_7_5_0, 0.4_5_4_7, 0.6_1_1_7, 0.5_8_6_6, 0.5_0_1_6, 0.4_3_2_7, 0.5_6_4_2, 0.4_8_1_5] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3 def __a ( self ) -> Any: '''simple docstring''' snake_case__ : Dict = 'cpu' # ensure determinism for the device-dependent torch.Generator snake_case__ : Union[str, Any] = self.get_dummy_components() snake_case__ : Optional[Any] = StableDiffusionInstructPixaPixPipeline(**__UpperCamelCase ) snake_case__ : List[Any] = sd_pipe.to(__UpperCamelCase ) sd_pipe.set_progress_bar_config(disable=__UpperCamelCase ) snake_case__ : Union[str, Any] = self.get_dummy_inputs(__UpperCamelCase ) snake_case__ : List[str] = 'french fries' snake_case__ : Optional[Any] = sd_pipe(**__UpperCamelCase , negative_prompt=__UpperCamelCase ) snake_case__ : Union[str, Any] = output.images snake_case__ : Tuple = image[0, -3:, -3:, -1] assert image.shape == (1, 32, 32, 3) snake_case__ : Any = np.array([0.7_5_1_1, 0.3_6_4_2, 0.4_5_5_3, 0.6_2_3_6, 0.5_7_9_7, 0.5_0_1_3, 0.4_3_4_3, 0.5_6_1_1, 0.4_8_3_1] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3 def __a ( self ) -> int: '''simple docstring''' snake_case__ : Optional[int] = 'cpu' # ensure determinism for the device-dependent torch.Generator snake_case__ : List[str] = self.get_dummy_components() snake_case__ : str = StableDiffusionInstructPixaPixPipeline(**__UpperCamelCase ) snake_case__ : str = sd_pipe.to(__UpperCamelCase ) sd_pipe.set_progress_bar_config(disable=__UpperCamelCase ) snake_case__ : Dict = self.get_dummy_inputs(__UpperCamelCase ) snake_case__ : Any = [inputs['prompt']] * 2 snake_case__ : Optional[int] = np.array(inputs['image'] ).astype(np.floataa ) / 2_5_5.0 snake_case__ : Optional[int] = torch.from_numpy(__UpperCamelCase ).unsqueeze(0 ).to(__UpperCamelCase ) snake_case__ : Any = image / 2 + 0.5 snake_case__ : Optional[Any] = image.permute(0 , 3 , 1 , 2 ) snake_case__ : List[Any] = image.repeat(2 , 1 , 1 , 1 ) snake_case__ : Optional[int] = sd_pipe(**__UpperCamelCase ).images snake_case__ : Union[str, Any] = image[-1, -3:, -3:, -1] assert image.shape == (2, 32, 32, 3) snake_case__ : List[Any] = np.array([0.5_8_1_2, 0.5_7_4_8, 0.5_2_2_2, 0.5_9_0_8, 0.5_6_9_5, 0.7_1_7_4, 0.6_8_0_4, 0.5_5_2_3, 0.5_5_7_9] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3 def __a ( self ) -> Union[str, Any]: '''simple docstring''' snake_case__ : Optional[int] = 'cpu' # ensure determinism for the device-dependent torch.Generator snake_case__ : Optional[int] = self.get_dummy_components() snake_case__ : Tuple = EulerAncestralDiscreteScheduler( beta_start=0.0_0_0_8_5 , beta_end=0.0_1_2 , beta_schedule='scaled_linear' ) snake_case__ : int = StableDiffusionInstructPixaPixPipeline(**__UpperCamelCase ) snake_case__ : List[str] = sd_pipe.to(__UpperCamelCase ) sd_pipe.set_progress_bar_config(disable=__UpperCamelCase ) snake_case__ : str = self.get_dummy_inputs(__UpperCamelCase ) snake_case__ : Any = sd_pipe(**__UpperCamelCase ).images snake_case__ : int = image[0, -3:, -3:, -1] snake_case__ : Tuple = [round(__UpperCamelCase , 4 ) for x in image_slice.flatten().tolist()] print(','.join([str(__UpperCamelCase ) for x in slice] ) ) assert image.shape == (1, 32, 32, 3) snake_case__ : List[Any] = np.array([0.7_4_1_7, 0.3_8_4_2, 0.4_7_3_2, 0.5_7_7_6, 0.5_8_9_1, 0.5_1_3_9, 0.4_0_5_2, 0.5_6_7_3, 0.4_9_8_6] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3 def __a ( self ) -> int: '''simple docstring''' super().test_inference_batch_single_identical(expected_max_diff=3E-3 ) def __a ( self ) -> Any: '''simple docstring''' snake_case__ : Optional[int] = self.get_dummy_components() snake_case__ : int = StableDiffusionInstructPixaPixPipeline(**__UpperCamelCase ) snake_case__ : Union[str, Any] = VaeImageProcessor(do_resize=__UpperCamelCase , do_normalize=__UpperCamelCase ) snake_case__ : Optional[int] = pipe.to(__UpperCamelCase ) pipe.set_progress_bar_config(disable=__UpperCamelCase ) snake_case__ : Optional[Any] = pipe(**self.get_dummy_inputs_by_type(__UpperCamelCase , input_image_type='pt' ) )[0] snake_case__ : Union[str, Any] = components['vae'] snake_case__ : str = self.get_dummy_inputs_by_type(__UpperCamelCase , input_image_type='pt' ) for image_param in self.image_latents_params: if image_param in inputs.keys(): snake_case__ : List[str] = vae.encode(inputs[image_param] ).latent_dist.mode() snake_case__ : Dict = pipe(**__UpperCamelCase )[0] snake_case__ : str = np.abs(out - out_latents_inputs ).max() self.assertLess(__UpperCamelCase , 1E-4 , 'passing latents as image input generate different result from passing image' ) @slow @require_torch_gpu class __snake_case ( unittest.TestCase ): def __a ( self ) -> List[str]: '''simple docstring''' super().tearDown() gc.collect() torch.cuda.empty_cache() def __a ( self , __UpperCamelCase=0 ) -> Dict: '''simple docstring''' snake_case__ : Optional[Any] = torch.manual_seed(__UpperCamelCase ) snake_case__ : List[str] = load_image( 'https://huggingface.co/datasets/diffusers/test-arrays/resolve/main/stable_diffusion_pix2pix/example.jpg' ) snake_case__ : int = { 'prompt': 'turn him into a cyborg', 'image': image, 'generator': generator, 'num_inference_steps': 3, 'guidance_scale': 7.5, 'image_guidance_scale': 1.0, 'output_type': 'numpy', } return inputs def __a ( self ) -> Dict: '''simple docstring''' snake_case__ : Any = StableDiffusionInstructPixaPixPipeline.from_pretrained( 'timbrooks/instruct-pix2pix' , safety_checker=__UpperCamelCase ) pipe.to(__UpperCamelCase ) pipe.set_progress_bar_config(disable=__UpperCamelCase ) pipe.enable_attention_slicing() snake_case__ : Tuple = self.get_inputs() snake_case__ : List[Any] = pipe(**__UpperCamelCase ).images snake_case__ : Union[str, Any] = image[0, -3:, -3:, -1].flatten() assert image.shape == (1, 512, 512, 3) snake_case__ : Dict = np.array([0.5_9_0_2, 0.6_0_1_5, 0.6_0_2_7, 0.5_9_8_3, 0.6_0_9_2, 0.6_0_6_1, 0.5_7_6_5, 0.5_7_8_5, 0.5_5_5_5] ) assert np.abs(expected_slice - image_slice ).max() < 1E-3 def __a ( self ) -> str: '''simple docstring''' snake_case__ : Any = StableDiffusionInstructPixaPixPipeline.from_pretrained( 'timbrooks/instruct-pix2pix' , safety_checker=__UpperCamelCase ) snake_case__ : Tuple = LMSDiscreteScheduler.from_config(pipe.scheduler.config ) pipe.to(__UpperCamelCase ) pipe.set_progress_bar_config(disable=__UpperCamelCase ) pipe.enable_attention_slicing() snake_case__ : Dict = self.get_inputs() snake_case__ : Dict = pipe(**__UpperCamelCase ).images snake_case__ : Union[str, Any] = image[0, -3:, -3:, -1].flatten() assert image.shape == (1, 512, 512, 3) snake_case__ : List[Any] = np.array([0.6_5_7_8, 0.6_8_1_7, 0.6_9_7_2, 0.6_7_6_1, 0.6_8_5_6, 0.6_9_1_6, 0.6_4_2_8, 0.6_5_1_6, 0.6_3_0_1] ) assert np.abs(expected_slice - image_slice ).max() < 1E-3 def __a ( self ) -> Optional[Any]: '''simple docstring''' snake_case__ : Tuple = StableDiffusionInstructPixaPixPipeline.from_pretrained( 'timbrooks/instruct-pix2pix' , safety_checker=__UpperCamelCase ) snake_case__ : Tuple = DDIMScheduler.from_config(pipe.scheduler.config ) pipe.to(__UpperCamelCase ) pipe.set_progress_bar_config(disable=__UpperCamelCase ) pipe.enable_attention_slicing() snake_case__ : Optional[int] = self.get_inputs() snake_case__ : Optional[int] = pipe(**__UpperCamelCase ).images snake_case__ : Tuple = image[0, -3:, -3:, -1].flatten() assert image.shape == (1, 512, 512, 3) snake_case__ : int = np.array([0.3_8_2_8, 0.3_8_3_4, 0.3_8_1_8, 0.3_7_9_2, 0.3_8_6_5, 0.3_7_5_2, 0.3_7_9_2, 0.3_8_4_7, 0.3_7_5_3] ) assert np.abs(expected_slice - image_slice ).max() < 1E-3 def __a ( self ) -> Union[str, Any]: '''simple docstring''' snake_case__ : int = 0 def callback_fn(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) -> None: snake_case__ : List[Any] = True nonlocal number_of_steps number_of_steps += 1 if step == 1: snake_case__ : Any = latents.detach().cpu().numpy() assert latents.shape == (1, 4, 64, 64) snake_case__ : int = latents[0, -3:, -3:, -1] snake_case__ : List[str] = np.array([-0.2_4_6_3, -0.4_6_4_4, -0.9_7_5_6, 1.5_1_7_6, 1.4_4_1_4, 0.7_8_6_6, 0.9_8_9_7, 0.8_5_2_1, 0.7_9_8_3] ) assert np.abs(latents_slice.flatten() - expected_slice ).max() < 5E-2 elif step == 2: snake_case__ : Dict = latents.detach().cpu().numpy() assert latents.shape == (1, 4, 64, 64) snake_case__ : Dict = latents[0, -3:, -3:, -1] snake_case__ : Optional[Any] = np.array([-0.2_6_4_4, -0.4_6_2_6, -0.9_6_5_3, 1.5_1_7_6, 1.4_5_5_1, 0.7_6_8_6, 0.9_8_0_5, 0.8_4_5_2, 0.8_1_1_5] ) assert np.abs(latents_slice.flatten() - expected_slice ).max() < 5E-2 snake_case__ : str = False snake_case__ : List[Any] = StableDiffusionInstructPixaPixPipeline.from_pretrained( 'timbrooks/instruct-pix2pix' , safety_checker=__UpperCamelCase , torch_dtype=torch.floataa ) snake_case__ : int = pipe.to(__UpperCamelCase ) pipe.set_progress_bar_config(disable=__UpperCamelCase ) pipe.enable_attention_slicing() snake_case__ : int = self.get_inputs() pipe(**__UpperCamelCase , callback=__UpperCamelCase , callback_steps=1 ) assert callback_fn.has_been_called assert number_of_steps == 3 def __a ( self ) -> Any: '''simple docstring''' torch.cuda.empty_cache() torch.cuda.reset_max_memory_allocated() torch.cuda.reset_peak_memory_stats() snake_case__ : str = StableDiffusionInstructPixaPixPipeline.from_pretrained( 'timbrooks/instruct-pix2pix' , safety_checker=__UpperCamelCase , torch_dtype=torch.floataa ) snake_case__ : Dict = pipe.to(__UpperCamelCase ) pipe.set_progress_bar_config(disable=__UpperCamelCase ) pipe.enable_attention_slicing(1 ) pipe.enable_sequential_cpu_offload() snake_case__ : str = self.get_inputs() snake_case__ : Tuple = pipe(**__UpperCamelCase ) snake_case__ : List[Any] = torch.cuda.max_memory_allocated() # make sure that less than 2.2 GB is allocated assert mem_bytes < 2.2 * 10**9 def __a ( self ) -> int: '''simple docstring''' snake_case__ : int = self.get_inputs() # resize to resolution that is divisible by 8 but not 16 or 32 snake_case__ : Tuple = inputs['image'].resize((504, 504) ) snake_case__ : str = 'timbrooks/instruct-pix2pix' snake_case__ : Tuple = StableDiffusionInstructPixaPixPipeline.from_pretrained( __UpperCamelCase , safety_checker=__UpperCamelCase , ) pipe.to(__UpperCamelCase ) pipe.set_progress_bar_config(disable=__UpperCamelCase ) pipe.enable_attention_slicing() snake_case__ : str = pipe(**__UpperCamelCase ) snake_case__ : List[Any] = output.images[0] snake_case__ : List[Any] = image[255:258, 383:386, -1] assert image.shape == (504, 504, 3) snake_case__ : List[str] = np.array([0.2_7_2_6, 0.2_5_2_9, 0.2_6_6_4, 0.2_6_5_5, 0.2_6_4_1, 0.2_6_4_2, 0.2_5_9_1, 0.2_6_4_9, 0.2_5_9_0] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 5E-3
699
from .data_collator import ( DataCollatorForLanguageModeling, DataCollatorForPermutationLanguageModeling, DataCollatorForSeqaSeq, DataCollatorForSOP, DataCollatorForTokenClassification, DataCollatorForWholeWordMask, DataCollatorWithPadding, DefaultDataCollator, default_data_collator, ) from .metrics import glue_compute_metrics, xnli_compute_metrics from .processors import ( DataProcessor, InputExample, InputFeatures, SingleSentenceClassificationProcessor, SquadExample, SquadFeatures, SquadVaProcessor, SquadVaProcessor, glue_convert_examples_to_features, glue_output_modes, glue_processors, glue_tasks_num_labels, squad_convert_examples_to_features, xnli_output_modes, xnli_processors, xnli_tasks_num_labels, )
699
1
def UpperCamelCase__ ( A__ ) -> bool: snake_case__ : Optional[int] = n ** (1 / 3) return (val * val * val) == n if __name__ == "__main__": print(perfect_cube(27)) print(perfect_cube(4))
699
from dataclasses import dataclass, field from typing import Optional from transformers import AutoConfig, AutoImageProcessor, AutoTokenizer, FlaxVisionEncoderDecoderModel, HfArgumentParser @dataclass class __snake_case : __lowerCamelCase = field( metadata={"""help""": """The output directory where the model will be written."""} ,) __lowerCamelCase = field( metadata={ """help""": ( """The encoder model checkpoint for weights initialization.""" """Don't set if you want to train an encoder model from scratch.""" ) } ,) __lowerCamelCase = field( metadata={ """help""": ( """The decoder model checkpoint for weights initialization.""" """Don't set if you want to train a decoder model from scratch.""" ) } ,) __lowerCamelCase = field( default=_lowerCamelCase ,metadata={"""help""": """Pretrained encoder config name or path if not the same as encoder_model_name"""} ) __lowerCamelCase = field( default=_lowerCamelCase ,metadata={"""help""": """Pretrained decoder config name or path if not the same as decoder_model_name"""} ) def UpperCamelCase__ ( ) -> Union[str, Any]: snake_case__ : str = HfArgumentParser((ModelArguments,) ) ((snake_case__) , ) : Dict = parser.parse_args_into_dataclasses() # Load pretrained model and tokenizer # Use explicit specified encoder config if model_args.encoder_config_name: snake_case__ : List[str] = AutoConfig.from_pretrained(model_args.encoder_config_name ) # Use pretrained encoder model's config else: snake_case__ : Optional[int] = AutoConfig.from_pretrained(model_args.encoder_model_name_or_path ) # Use explicit specified decoder config if model_args.decoder_config_name: snake_case__ : Optional[Any] = AutoConfig.from_pretrained(model_args.decoder_config_name ) # Use pretrained decoder model's config else: snake_case__ : List[str] = AutoConfig.from_pretrained(model_args.decoder_model_name_or_path ) # necessary for `from_encoder_decoder_pretrained` when `decoder_config` is passed snake_case__ : Any = True snake_case__ : Dict = True snake_case__ : Tuple = FlaxVisionEncoderDecoderModel.from_encoder_decoder_pretrained( encoder_pretrained_model_name_or_path=model_args.encoder_model_name_or_path , decoder_pretrained_model_name_or_path=model_args.decoder_model_name_or_path , encoder_config=A__ , decoder_config=A__ , ) # GPT2 only has bos/eos tokens but not decoder_start/pad tokens snake_case__ : Optional[Any] = decoder_config.decoder_start_token_id snake_case__ : Tuple = decoder_config.pad_token_id if decoder_start_token_id is None: snake_case__ : Optional[Any] = decoder_config.bos_token_id if pad_token_id is None: snake_case__ : int = decoder_config.eos_token_id # This is necessary to make Flax's generate() work snake_case__ : Union[str, Any] = decoder_config.eos_token_id snake_case__ : Optional[int] = decoder_start_token_id snake_case__ : int = pad_token_id snake_case__ : Tuple = AutoImageProcessor.from_pretrained(model_args.encoder_model_name_or_path ) snake_case__ : int = AutoTokenizer.from_pretrained(model_args.decoder_model_name_or_path ) snake_case__ : Tuple = tokenizer.convert_ids_to_tokens(model.config.pad_token_id ) model.save_pretrained(model_args.output_dir ) image_processor.save_pretrained(model_args.output_dir ) tokenizer.save_pretrained(model_args.output_dir ) if __name__ == "__main__": main()
699
1
from collections import Counter from pathlib import Path from typing import Optional, Tuple import yaml class __snake_case ( yaml.SafeLoader ): def __a ( self , __UpperCamelCase ) -> Any: '''simple docstring''' snake_case__ : List[str] = [self.constructed_objects[key_node] for key_node, _ in node.value] snake_case__ : Optional[Any] = [tuple(__UpperCamelCase ) if isinstance(__UpperCamelCase , __UpperCamelCase ) else key for key in keys] snake_case__ : int = Counter(__UpperCamelCase ) snake_case__ : Optional[int] = [key for key in counter if counter[key] > 1] if duplicate_keys: raise TypeError(F"""Got duplicate yaml keys: {duplicate_keys}""" ) def __a ( self , __UpperCamelCase , __UpperCamelCase=False ) -> Tuple: '''simple docstring''' snake_case__ : int = super().construct_mapping(__UpperCamelCase , deep=__UpperCamelCase ) self._check_no_duplicates_on_constructed_node(__UpperCamelCase ) return mapping def UpperCamelCase__ ( A__ ) -> Tuple[Optional[str], str]: snake_case__ : Optional[int] = list(readme_content.splitlines() ) if full_content and full_content[0] == "---" and "---" in full_content[1:]: snake_case__ : List[Any] = full_content[1:].index('---' ) + 1 snake_case__ : Union[str, Any] = '\n'.join(full_content[1:sep_idx] ) return yamlblock, "\n".join(full_content[sep_idx + 1 :] ) return None, "\n".join(A__ ) class __snake_case ( _lowerCamelCase ): # class attributes __lowerCamelCase = {"""train_eval_index"""} # train-eval-index in the YAML metadata @classmethod def __a ( cls , __UpperCamelCase ) -> "DatasetMetadata": '''simple docstring''' with open(__UpperCamelCase , encoding='utf-8' ) as readme_file: snake_case__ , snake_case__ : Any = _split_yaml_from_readme(readme_file.read() ) if yaml_string is not None: return cls.from_yaml_string(__UpperCamelCase ) else: return cls() def __a ( self , __UpperCamelCase ) -> List[str]: '''simple docstring''' if path.exists(): with open(__UpperCamelCase , encoding='utf-8' ) as readme_file: snake_case__ : Tuple = readme_file.read() else: snake_case__ : Tuple = None snake_case__ : Union[str, Any] = self._to_readme(__UpperCamelCase ) with open(__UpperCamelCase , 'w' , encoding='utf-8' ) as readme_file: readme_file.write(__UpperCamelCase ) def __a ( self , __UpperCamelCase = None ) -> str: '''simple docstring''' if readme_content is not None: snake_case__ , snake_case__ : Dict = _split_yaml_from_readme(__UpperCamelCase ) snake_case__ : List[str] = '---\n' + self.to_yaml_string() + '---\n' + content else: snake_case__ : List[str] = '---\n' + self.to_yaml_string() + '---\n' return full_content @classmethod def __a ( cls , __UpperCamelCase ) -> "DatasetMetadata": '''simple docstring''' snake_case__ : Union[str, Any] = yaml.load(__UpperCamelCase , Loader=_NoDuplicateSafeLoader ) or {} # Convert the YAML keys to DatasetMetadata fields snake_case__ : Tuple = { (key.replace('-' , '_' ) if key.replace('-' , '_' ) in cls._FIELDS_WITH_DASHES else key): value for key, value in metadata_dict.items() } return cls(**__UpperCamelCase ) def __a ( self ) -> str: '''simple docstring''' return yaml.safe_dump( { (key.replace('_' , '-' ) if key in self._FIELDS_WITH_DASHES else key): value for key, value in self.items() } , sort_keys=__UpperCamelCase , allow_unicode=__UpperCamelCase , encoding='utf-8' , ).decode('utf-8' ) lowerCAmelCase__ : Any = { '''image-classification''': [], '''translation''': [], '''image-segmentation''': [], '''fill-mask''': [], '''automatic-speech-recognition''': [], '''token-classification''': [], '''sentence-similarity''': [], '''audio-classification''': [], '''question-answering''': [], '''summarization''': [], '''zero-shot-classification''': [], '''table-to-text''': [], '''feature-extraction''': [], '''other''': [], '''multiple-choice''': [], '''text-classification''': [], '''text-to-image''': [], '''text2text-generation''': [], '''zero-shot-image-classification''': [], '''tabular-classification''': [], '''tabular-regression''': [], '''image-to-image''': [], '''tabular-to-text''': [], '''unconditional-image-generation''': [], '''text-retrieval''': [], '''text-to-speech''': [], '''object-detection''': [], '''audio-to-audio''': [], '''text-generation''': [], '''conversational''': [], '''table-question-answering''': [], '''visual-question-answering''': [], '''image-to-text''': [], '''reinforcement-learning''': [], '''voice-activity-detection''': [], '''time-series-forecasting''': [], '''document-question-answering''': [], } if __name__ == "__main__": from argparse import ArgumentParser lowerCAmelCase__ : Dict = ArgumentParser(usage='''Validate the yaml metadata block of a README.md file.''') ap.add_argument('''readme_filepath''') lowerCAmelCase__ : Optional[Any] = ap.parse_args() lowerCAmelCase__ : Tuple = Path(args.readme_filepath) lowerCAmelCase__ : Dict = DatasetMetadata.from_readme(readme_filepath) print(dataset_metadata) dataset_metadata.to_readme(readme_filepath)
699
import logging import os from dataclasses import dataclass, field from typing import Dict, Optional import datasets import numpy as np import tensorflow as tf from transformers import ( AutoConfig, AutoTokenizer, EvalPrediction, HfArgumentParser, PreTrainedTokenizer, TFAutoModelForSequenceClassification, TFTrainer, TFTrainingArguments, ) from transformers.utils import logging as hf_logging hf_logging.set_verbosity_info() hf_logging.enable_default_handler() hf_logging.enable_explicit_format() def UpperCamelCase__ ( A__ , A__ , A__ , A__ , A__ , A__ = None , ) -> Optional[int]: snake_case__ : List[str] = {} if train_file is not None: snake_case__ : Tuple = [train_file] if eval_file is not None: snake_case__ : Dict = [eval_file] if test_file is not None: snake_case__ : str = [test_file] snake_case__ : Optional[Any] = datasets.load_dataset('csv' , data_files=A__ ) snake_case__ : Any = list(ds[list(files.keys() )[0]].features.keys() ) snake_case__ : Optional[Any] = features_name.pop(A__ ) snake_case__ : Optional[Any] = list(set(ds[list(files.keys() )[0]][label_name] ) ) snake_case__ : str = {label: i for i, label in enumerate(A__ )} snake_case__ : int = tokenizer.model_input_names snake_case__ : int = {} if len(A__ ) == 1: for k in files.keys(): snake_case__ : str = ds[k].map( lambda A__ : tokenizer.batch_encode_plus( example[features_name[0]] , truncation=A__ , max_length=A__ , padding='max_length' ) , batched=A__ , ) elif len(A__ ) == 2: for k in files.keys(): snake_case__ : Optional[int] = ds[k].map( lambda A__ : tokenizer.batch_encode_plus( (example[features_name[0]], example[features_name[1]]) , truncation=A__ , max_length=A__ , padding='max_length' , ) , batched=A__ , ) def gen_train(): for ex in transformed_ds[datasets.Split.TRAIN]: snake_case__ : int = {k: v for k, v in ex.items() if k in input_names} snake_case__ : Any = labelaid[ex[label_name]] yield (d, label) def gen_val(): for ex in transformed_ds[datasets.Split.VALIDATION]: snake_case__ : int = {k: v for k, v in ex.items() if k in input_names} snake_case__ : Union[str, Any] = labelaid[ex[label_name]] yield (d, label) def gen_test(): for ex in transformed_ds[datasets.Split.TEST]: snake_case__ : Dict = {k: v for k, v in ex.items() if k in input_names} snake_case__ : List[str] = labelaid[ex[label_name]] yield (d, label) snake_case__ : Any = ( tf.data.Dataset.from_generator( A__ , ({k: tf.intaa for k in input_names}, tf.intaa) , ({k: tf.TensorShape([None] ) for k in input_names}, tf.TensorShape([] )) , ) if datasets.Split.TRAIN in transformed_ds else None ) if train_ds is not None: snake_case__ : str = train_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.TRAIN] ) ) ) snake_case__ : Optional[int] = ( tf.data.Dataset.from_generator( A__ , ({k: tf.intaa for k in input_names}, tf.intaa) , ({k: tf.TensorShape([None] ) for k in input_names}, tf.TensorShape([] )) , ) if datasets.Split.VALIDATION in transformed_ds else None ) if val_ds is not None: snake_case__ : Optional[int] = val_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.VALIDATION] ) ) ) snake_case__ : List[str] = ( tf.data.Dataset.from_generator( A__ , ({k: tf.intaa for k in input_names}, tf.intaa) , ({k: tf.TensorShape([None] ) for k in input_names}, tf.TensorShape([] )) , ) if datasets.Split.TEST in transformed_ds else None ) if test_ds is not None: snake_case__ : str = test_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.TEST] ) ) ) return train_ds, val_ds, test_ds, labelaid lowerCAmelCase__ : List[str] = logging.getLogger(__name__) @dataclass class __snake_case : __lowerCamelCase = field(metadata={"""help""": """Which column contains the label"""} ) __lowerCamelCase = field(default=_lowerCamelCase ,metadata={"""help""": """The path of the training file"""} ) __lowerCamelCase = field(default=_lowerCamelCase ,metadata={"""help""": """The path of the development file"""} ) __lowerCamelCase = field(default=_lowerCamelCase ,metadata={"""help""": """The path of the test file"""} ) __lowerCamelCase = field( default=128 ,metadata={ """help""": ( """The maximum total input sequence length after tokenization. Sequences longer """ """than this will be truncated, sequences shorter will be padded.""" ) } ,) __lowerCamelCase = field( default=_lowerCamelCase ,metadata={"""help""": """Overwrite the cached training and evaluation sets"""} ) @dataclass class __snake_case : __lowerCamelCase = field( metadata={"""help""": """Path to pretrained model or model identifier from huggingface.co/models"""} ) __lowerCamelCase = field( default=_lowerCamelCase ,metadata={"""help""": """Pretrained config name or path if not the same as model_name"""} ) __lowerCamelCase = field( default=_lowerCamelCase ,metadata={"""help""": """Pretrained tokenizer name or path if not the same as model_name"""} ) __lowerCamelCase = field(default=_lowerCamelCase ,metadata={"""help""": """Set this flag to use fast tokenization."""} ) # If you want to tweak more attributes on your tokenizer, you should do it in a distinct script, # or just modify its tokenizer_config.json. __lowerCamelCase = field( default=_lowerCamelCase ,metadata={"""help""": """Where do you want to store the pretrained models downloaded from huggingface.co"""} ,) def UpperCamelCase__ ( ) -> Union[str, Any]: # See all possible arguments in src/transformers/training_args.py # or by passing the --help flag to this script. # We now keep distinct sets of args, for a cleaner separation of concerns. snake_case__ : int = HfArgumentParser((ModelArguments, DataTrainingArguments, TFTrainingArguments) ) snake_case__ , snake_case__ , snake_case__ : Dict = parser.parse_args_into_dataclasses() if ( os.path.exists(training_args.output_dir ) and os.listdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir ): raise ValueError( F"""Output directory ({training_args.output_dir}) already exists and is not empty. Use""" ' --overwrite_output_dir to overcome.' ) # Setup logging logging.basicConfig( format='%(asctime)s - %(levelname)s - %(name)s - %(message)s' , datefmt='%m/%d/%Y %H:%M:%S' , level=logging.INFO , ) logger.info( F"""n_replicas: {training_args.n_replicas}, distributed training: {bool(training_args.n_replicas > 1 )}, """ F"""16-bits training: {training_args.fpaa}""" ) logger.info(F"""Training/evaluation parameters {training_args}""" ) # Load pretrained model and tokenizer # # Distributed training: # The .from_pretrained methods guarantee that only one local process can concurrently # download model & vocab. snake_case__ : Dict = AutoTokenizer.from_pretrained( model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , ) snake_case__ , snake_case__ , snake_case__ , snake_case__ : Dict = get_tfds( train_file=data_args.train_file , eval_file=data_args.dev_file , test_file=data_args.test_file , tokenizer=A__ , label_column_id=data_args.label_column_id , max_seq_length=data_args.max_seq_length , ) snake_case__ : Dict = AutoConfig.from_pretrained( model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=len(A__ ) , labelaid=A__ , idalabel={id: label for label, id in labelaid.items()} , finetuning_task='text-classification' , cache_dir=model_args.cache_dir , ) with training_args.strategy.scope(): snake_case__ : Optional[Any] = TFAutoModelForSequenceClassification.from_pretrained( model_args.model_name_or_path , from_pt=bool('.bin' in model_args.model_name_or_path ) , config=A__ , cache_dir=model_args.cache_dir , ) def compute_metrics(A__ ) -> Dict: snake_case__ : Optional[Any] = np.argmax(p.predictions , axis=1 ) return {"acc": (preds == p.label_ids).mean()} # Initialize our Trainer snake_case__ : Any = TFTrainer( model=A__ , args=A__ , train_dataset=A__ , eval_dataset=A__ , compute_metrics=A__ , ) # Training if training_args.do_train: trainer.train() trainer.save_model() tokenizer.save_pretrained(training_args.output_dir ) # Evaluation snake_case__ : Dict = {} if training_args.do_eval: logger.info('*** Evaluate ***' ) snake_case__ : Tuple = trainer.evaluate() snake_case__ : Any = os.path.join(training_args.output_dir , 'eval_results.txt' ) with open(A__ , 'w' ) as writer: logger.info('***** Eval results *****' ) for key, value in result.items(): logger.info(F""" {key} = {value}""" ) writer.write(F"""{key} = {value}\n""" ) results.update(A__ ) return results if __name__ == "__main__": main()
699
1
from argparse import ArgumentParser from . import BaseTransformersCLICommand def UpperCamelCase__ ( A__ ) -> Tuple: return DownloadCommand(args.model , args.cache_dir , args.force , args.trust_remote_code ) class __snake_case ( _lowerCamelCase ): @staticmethod def __a ( __UpperCamelCase ) -> Dict: '''simple docstring''' snake_case__ : Tuple = parser.add_parser('download' ) download_parser.add_argument( '--cache-dir' , type=__UpperCamelCase , default=__UpperCamelCase , help='Path to location to store the models' ) download_parser.add_argument( '--force' , action='store_true' , help='Force the model to be download even if already in cache-dir' ) download_parser.add_argument( '--trust-remote-code' , action='store_true' , help='Whether or not to allow for custom models defined on the Hub in their own modeling files. Use only if you\'ve reviewed the code as it will execute on your local machine' , ) download_parser.add_argument('model' , type=__UpperCamelCase , help='Name of the model to download' ) download_parser.set_defaults(func=__UpperCamelCase ) def __init__( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) -> int: '''simple docstring''' snake_case__ : List[Any] = model snake_case__ : Union[str, Any] = cache snake_case__ : Dict = force snake_case__ : Optional[Any] = trust_remote_code def __a ( self ) -> int: '''simple docstring''' from ..models.auto import AutoModel, AutoTokenizer AutoModel.from_pretrained( self._model , cache_dir=self._cache , force_download=self._force , trust_remote_code=self._trust_remote_code ) AutoTokenizer.from_pretrained( self._model , cache_dir=self._cache , force_download=self._force , trust_remote_code=self._trust_remote_code )
699
from typing import List import datasets from datasets.tasks import AudioClassification from ..folder_based_builder import folder_based_builder lowerCAmelCase__ : List[Any] = datasets.utils.logging.get_logger(__name__) class __snake_case ( folder_based_builder.FolderBasedBuilderConfig ): __lowerCamelCase = None __lowerCamelCase = None class __snake_case ( folder_based_builder.FolderBasedBuilder ): __lowerCamelCase = datasets.Audio() __lowerCamelCase = """audio""" __lowerCamelCase = AudioFolderConfig __lowerCamelCase = 42 # definition at the bottom of the script __lowerCamelCase = AudioClassification(audio_column="""audio""" ,label_column="""label""" ) lowerCAmelCase__ : Tuple = [ '''.aiff''', '''.au''', '''.avr''', '''.caf''', '''.flac''', '''.htk''', '''.svx''', '''.mat4''', '''.mat5''', '''.mpc2k''', '''.ogg''', '''.paf''', '''.pvf''', '''.raw''', '''.rf64''', '''.sd2''', '''.sds''', '''.ircam''', '''.voc''', '''.w64''', '''.wav''', '''.nist''', '''.wavex''', '''.wve''', '''.xi''', '''.mp3''', '''.opus''', ] lowerCAmelCase__ : List[Any] = AUDIO_EXTENSIONS
699
1
import argparse import struct import unittest class __snake_case : def __init__( self , __UpperCamelCase ) -> None: '''simple docstring''' snake_case__ : Dict = data # Initialize hash values snake_case__ : Optional[Any] = [ 0x6A09_E667, 0xBB67_AE85, 0x3C6E_F372, 0xA54F_F53A, 0x510E_527F, 0x9B05_688C, 0x1F83_D9AB, 0x5BE0_CD19, ] # Initialize round constants snake_case__ : Optional[int] = [ 0x428A_2F98, 0x7137_4491, 0xB5C0_FBCF, 0xE9B5_DBA5, 0x3956_C25B, 0x59F1_11F1, 0x923F_82A4, 0xAB1C_5ED5, 0xD807_AA98, 0x1283_5B01, 0x2431_85BE, 0x550C_7DC3, 0x72BE_5D74, 0x80DE_B1FE, 0x9BDC_06A7, 0xC19B_F174, 0xE49B_69C1, 0xEFBE_4786, 0x0FC1_9DC6, 0x240C_A1CC, 0x2DE9_2C6F, 0x4A74_84AA, 0x5CB0_A9DC, 0x76F9_88DA, 0x983E_5152, 0xA831_C66D, 0xB003_27C8, 0xBF59_7FC7, 0xC6E0_0BF3, 0xD5A7_9147, 0x06CA_6351, 0x1429_2967, 0x27B7_0A85, 0x2E1B_2138, 0x4D2C_6DFC, 0x5338_0D13, 0x650A_7354, 0x766A_0ABB, 0x81C2_C92E, 0x9272_2C85, 0xA2BF_E8A1, 0xA81A_664B, 0xC24B_8B70, 0xC76C_51A3, 0xD192_E819, 0xD699_0624, 0xF40E_3585, 0x106A_A070, 0x19A4_C116, 0x1E37_6C08, 0x2748_774C, 0x34B0_BCB5, 0x391C_0CB3, 0x4ED8_AA4A, 0x5B9C_CA4F, 0x682E_6FF3, 0x748F_82EE, 0x78A5_636F, 0x84C8_7814, 0x8CC7_0208, 0x90BE_FFFA, 0xA450_6CEB, 0xBEF9_A3F7, 0xC671_78F2, ] snake_case__ : Optional[int] = self.preprocessing(self.data ) self.final_hash() @staticmethod def __a ( __UpperCamelCase ) -> bytes: '''simple docstring''' snake_case__ : List[str] = B'\x80' + (B'\x00' * (63 - (len(__UpperCamelCase ) + 8) % 64)) snake_case__ : List[Any] = struct.pack('>Q' , (len(__UpperCamelCase ) * 8) ) return data + padding + big_endian_integer def __a ( self ) -> None: '''simple docstring''' snake_case__ : Union[str, Any] = [ self.preprocessed_data[x : x + 64] for x in range(0 , len(self.preprocessed_data ) , 64 ) ] for block in self.blocks: # Convert the given block into a list of 4 byte integers snake_case__ : Tuple = list(struct.unpack('>16L' , __UpperCamelCase ) ) # add 48 0-ed integers words += [0] * 48 snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ : List[Any] = self.hashes for index in range(0 , 64 ): if index > 15: # modify the zero-ed indexes at the end of the array snake_case__ : List[str] = ( self.ror(words[index - 15] , 7 ) ^ self.ror(words[index - 15] , 18 ) ^ (words[index - 15] >> 3) ) snake_case__ : Any = ( self.ror(words[index - 2] , 17 ) ^ self.ror(words[index - 2] , 19 ) ^ (words[index - 2] >> 10) ) snake_case__ : List[str] = ( words[index - 16] + sa + words[index - 7] + sa ) % 0x1_0000_0000 # Compression snake_case__ : str = self.ror(__UpperCamelCase , 6 ) ^ self.ror(__UpperCamelCase , 11 ) ^ self.ror(__UpperCamelCase , 25 ) snake_case__ : List[str] = (e & f) ^ ((~e & 0xFFFF_FFFF) & g) snake_case__ : Optional[Any] = ( h + sa + ch + self.round_constants[index] + words[index] ) % 0x1_0000_0000 snake_case__ : Optional[int] = self.ror(__UpperCamelCase , 2 ) ^ self.ror(__UpperCamelCase , 13 ) ^ self.ror(__UpperCamelCase , 22 ) snake_case__ : Any = (a & b) ^ (a & c) ^ (b & c) snake_case__ : Optional[int] = (sa + maj) % 0x1_0000_0000 snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ : int = ( g, f, e, ((d + tempa) % 0x1_0000_0000), c, b, a, ((tempa + tempa) % 0x1_0000_0000), ) snake_case__ : Optional[Any] = [a, b, c, d, e, f, g, h] # Modify final values snake_case__ : Tuple = [ ((element + mutated_hash_values[index]) % 0x1_0000_0000) for index, element in enumerate(self.hashes ) ] snake_case__ : Optional[Any] = ''.join([hex(__UpperCamelCase )[2:].zfill(8 ) for value in self.hashes] ) def __a ( self , __UpperCamelCase , __UpperCamelCase ) -> int: '''simple docstring''' return 0xFFFF_FFFF & (value << (32 - rotations)) | (value >> rotations) class __snake_case ( unittest.TestCase ): def __a ( self ) -> None: '''simple docstring''' import hashlib snake_case__ : List[Any] = bytes('Test String' , 'utf-8' ) self.assertEqual(SHAaaa(__UpperCamelCase ).hash , hashlib.shaaaa(__UpperCamelCase ).hexdigest() ) def UpperCamelCase__ ( ) -> None: import doctest doctest.testmod() snake_case__ : Optional[int] = argparse.ArgumentParser() parser.add_argument( '-s' , '--string' , dest='input_string' , default='Hello World!! Welcome to Cryptography' , help='Hash the string' , ) parser.add_argument( '-f' , '--file' , dest='input_file' , help='Hash contents of a file' ) snake_case__ : List[Any] = parser.parse_args() snake_case__ : List[Any] = args.input_string # hash input should be a bytestring if args.input_file: with open(args.input_file , 'rb' ) as f: snake_case__ : Tuple = f.read() else: snake_case__ : Any = bytes(A__ , 'utf-8' ) print(SHAaaa(A__ ).hash ) if __name__ == "__main__": main()
699
import random import unittest import torch from diffusers import IFInpaintingPipeline from diffusers.utils import floats_tensor from diffusers.utils.import_utils import is_xformers_available from diffusers.utils.testing_utils import skip_mps, torch_device from ..pipeline_params import ( TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS, TEXT_GUIDED_IMAGE_INPAINTING_PARAMS, ) from ..test_pipelines_common import PipelineTesterMixin from . import IFPipelineTesterMixin @skip_mps class __snake_case ( _lowerCamelCase ,_lowerCamelCase ,unittest.TestCase ): __lowerCamelCase = IFInpaintingPipeline __lowerCamelCase = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS - {"""width""", """height"""} __lowerCamelCase = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS __lowerCamelCase = PipelineTesterMixin.required_optional_params - {"""latents"""} def __a ( self ) -> Optional[Any]: '''simple docstring''' return self._get_dummy_components() def __a ( self , __UpperCamelCase , __UpperCamelCase=0 ) -> str: '''simple docstring''' if str(__UpperCamelCase ).startswith('mps' ): snake_case__ : int = torch.manual_seed(__UpperCamelCase ) else: snake_case__ : Union[str, Any] = torch.Generator(device=__UpperCamelCase ).manual_seed(__UpperCamelCase ) snake_case__ : Optional[int] = floats_tensor((1, 3, 32, 32) , rng=random.Random(__UpperCamelCase ) ).to(__UpperCamelCase ) snake_case__ : Dict = floats_tensor((1, 3, 32, 32) , rng=random.Random(__UpperCamelCase ) ).to(__UpperCamelCase ) snake_case__ : Optional[Any] = { 'prompt': 'A painting of a squirrel eating a burger', 'image': image, 'mask_image': mask_image, 'generator': generator, 'num_inference_steps': 2, 'output_type': 'numpy', } return inputs @unittest.skipIf( torch_device != 'cuda' or not is_xformers_available() , reason='XFormers attention is only available with CUDA and `xformers` installed' , ) def __a ( self ) -> List[Any]: '''simple docstring''' self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1E-3 ) def __a ( self ) -> Optional[int]: '''simple docstring''' self._test_save_load_optional_components() @unittest.skipIf(torch_device != 'cuda' , reason='float16 requires CUDA' ) def __a ( self ) -> List[str]: '''simple docstring''' super().test_save_load_floataa(expected_max_diff=1E-1 ) def __a ( self ) -> List[str]: '''simple docstring''' self._test_attention_slicing_forward_pass(expected_max_diff=1E-2 ) def __a ( self ) -> int: '''simple docstring''' self._test_save_load_local() def __a ( self ) -> List[str]: '''simple docstring''' self._test_inference_batch_single_identical( expected_max_diff=1E-2 , )
699
1
import argparse import json from pathlib import Path import requests import timm import torch from huggingface_hub import hf_hub_download from PIL import Image from timm.data import resolve_data_config from timm.data.transforms_factory import create_transform from transformers import ( BitConfig, ViTHybridConfig, ViTHybridForImageClassification, ViTHybridImageProcessor, ViTHybridModel, ) from transformers.image_utils import PILImageResampling from transformers.utils import logging logging.set_verbosity_info() lowerCAmelCase__ : str = logging.get_logger(__name__) def UpperCamelCase__ ( A__ , A__=False ) -> Union[str, Any]: snake_case__ : str = [] # fmt: off # stem: rename_keys.append(('cls_token', 'vit.embeddings.cls_token') ) rename_keys.append(('pos_embed', 'vit.embeddings.position_embeddings') ) rename_keys.append(('patch_embed.proj.weight', 'vit.embeddings.patch_embeddings.projection.weight') ) rename_keys.append(('patch_embed.proj.bias', 'vit.embeddings.patch_embeddings.projection.bias') ) # backbone rename_keys.append(('patch_embed.backbone.stem.conv.weight', 'vit.embeddings.patch_embeddings.backbone.bit.embedder.convolution.weight') ) rename_keys.append(('patch_embed.backbone.stem.norm.weight', 'vit.embeddings.patch_embeddings.backbone.bit.embedder.norm.weight') ) rename_keys.append(('patch_embed.backbone.stem.norm.bias', 'vit.embeddings.patch_embeddings.backbone.bit.embedder.norm.bias') ) for stage_idx in range(len(config.backbone_config.depths ) ): for layer_idx in range(config.backbone_config.depths[stage_idx] ): rename_keys.append((F"""patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.conv1.weight""", F"""vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.conv1.weight""") ) rename_keys.append((F"""patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm1.weight""", F"""vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm1.weight""") ) rename_keys.append((F"""patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm1.bias""", F"""vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm1.bias""") ) rename_keys.append((F"""patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.conv2.weight""", F"""vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.conv2.weight""") ) rename_keys.append((F"""patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm2.weight""", F"""vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm2.weight""") ) rename_keys.append((F"""patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm2.bias""", F"""vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm2.bias""") ) rename_keys.append((F"""patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.conv3.weight""", F"""vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.conv3.weight""") ) rename_keys.append((F"""patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm3.weight""", F"""vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm3.weight""") ) rename_keys.append((F"""patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm3.bias""", F"""vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm3.bias""") ) rename_keys.append((F"""patch_embed.backbone.stages.{stage_idx}.blocks.0.downsample.conv.weight""", F"""vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.0.downsample.conv.weight""") ) rename_keys.append((F"""patch_embed.backbone.stages.{stage_idx}.blocks.0.downsample.norm.weight""", F"""vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.0.downsample.norm.weight""") ) rename_keys.append((F"""patch_embed.backbone.stages.{stage_idx}.blocks.0.downsample.norm.bias""", F"""vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.0.downsample.norm.bias""") ) # transformer encoder for i in range(config.num_hidden_layers ): # encoder layers: output projection, 2 feedforward neural networks and 2 layernorms rename_keys.append((F"""blocks.{i}.norm1.weight""", F"""vit.encoder.layer.{i}.layernorm_before.weight""") ) rename_keys.append((F"""blocks.{i}.norm1.bias""", F"""vit.encoder.layer.{i}.layernorm_before.bias""") ) rename_keys.append((F"""blocks.{i}.attn.proj.weight""", F"""vit.encoder.layer.{i}.attention.output.dense.weight""") ) rename_keys.append((F"""blocks.{i}.attn.proj.bias""", F"""vit.encoder.layer.{i}.attention.output.dense.bias""") ) rename_keys.append((F"""blocks.{i}.norm2.weight""", F"""vit.encoder.layer.{i}.layernorm_after.weight""") ) rename_keys.append((F"""blocks.{i}.norm2.bias""", F"""vit.encoder.layer.{i}.layernorm_after.bias""") ) rename_keys.append((F"""blocks.{i}.mlp.fc1.weight""", F"""vit.encoder.layer.{i}.intermediate.dense.weight""") ) rename_keys.append((F"""blocks.{i}.mlp.fc1.bias""", F"""vit.encoder.layer.{i}.intermediate.dense.bias""") ) rename_keys.append((F"""blocks.{i}.mlp.fc2.weight""", F"""vit.encoder.layer.{i}.output.dense.weight""") ) rename_keys.append((F"""blocks.{i}.mlp.fc2.bias""", F"""vit.encoder.layer.{i}.output.dense.bias""") ) if base_model: # layernorm + pooler rename_keys.extend( [ ('norm.weight', 'layernorm.weight'), ('norm.bias', 'layernorm.bias'), ('pre_logits.fc.weight', 'pooler.dense.weight'), ('pre_logits.fc.bias', 'pooler.dense.bias'), ] ) # if just the base model, we should remove "vit" from all keys that start with "vit" snake_case__ : List[str] = [(pair[0], pair[1][4:]) if pair[1].startswith('vit' ) else pair for pair in rename_keys] else: # layernorm + classification head rename_keys.extend( [ ('norm.weight', 'vit.layernorm.weight'), ('norm.bias', 'vit.layernorm.bias'), ('head.weight', 'classifier.weight'), ('head.bias', 'classifier.bias'), ] ) # fmt: on return rename_keys def UpperCamelCase__ ( A__ , A__ , A__=False ) -> Dict: for i in range(config.num_hidden_layers ): if base_model: snake_case__ : str = '' else: snake_case__ : str = 'vit.' # read in weights + bias of input projection layer (in timm, this is a single matrix + bias) snake_case__ : Tuple = state_dict.pop(F"""blocks.{i}.attn.qkv.weight""" ) snake_case__ : Tuple = state_dict.pop(F"""blocks.{i}.attn.qkv.bias""" ) # next, add query, keys and values (in that order) to the state dict snake_case__ : Any = in_proj_weight[ : config.hidden_size, : ] snake_case__ : Union[str, Any] = in_proj_bias[: config.hidden_size] snake_case__ : int = in_proj_weight[ config.hidden_size : config.hidden_size * 2, : ] snake_case__ : Tuple = in_proj_bias[ config.hidden_size : config.hidden_size * 2 ] snake_case__ : List[Any] = in_proj_weight[ -config.hidden_size :, : ] snake_case__ : Optional[Any] = in_proj_bias[-config.hidden_size :] def UpperCamelCase__ ( A__ ) -> Optional[Any]: snake_case__ : Dict = ['head.weight', 'head.bias'] for k in ignore_keys: state_dict.pop(A__ , A__ ) def UpperCamelCase__ ( A__ , A__ , A__ ) -> Optional[Any]: snake_case__ : str = dct.pop(A__ ) snake_case__ : Dict = val def UpperCamelCase__ ( ) -> Tuple: snake_case__ : List[str] = 'http://images.cocodataset.org/val2017/000000039769.jpg' snake_case__ : Union[str, Any] = Image.open(requests.get(A__ , stream=A__ ).raw ) return im @torch.no_grad() def UpperCamelCase__ ( A__ , A__ , A__=False ) -> Optional[Any]: snake_case__ : Optional[int] = BitConfig( global_padding='same' , layer_type='bottleneck' , depths=(3, 4, 9) , out_features=['stage3'] , embedding_dynamic_padding=A__ , ) snake_case__ : Any = ViTHybridConfig(backbone_config=A__ , image_size=384 , num_labels=1000 ) snake_case__ : List[Any] = False # load original model from timm snake_case__ : Any = timm.create_model(A__ , pretrained=A__ ) timm_model.eval() # load state_dict of original model, remove and rename some keys snake_case__ : Dict = timm_model.state_dict() if base_model: remove_classification_head_(A__ ) snake_case__ : Tuple = create_rename_keys(A__ , A__ ) for src, dest in rename_keys: rename_key(A__ , A__ , A__ ) read_in_q_k_v(A__ , A__ , A__ ) snake_case__ : Any = 'huggingface/label-files' snake_case__ : Union[str, Any] = 'imagenet-1k-id2label.json' snake_case__ : Dict = json.load(open(hf_hub_download(A__ , A__ , repo_type='dataset' ) , 'r' ) ) snake_case__ : Union[str, Any] = {int(A__ ): v for k, v in idalabel.items()} snake_case__ : Union[str, Any] = idalabel snake_case__ : Tuple = {v: k for k, v in idalabel.items()} # load HuggingFace model if vit_name[-5:] == "in21k": snake_case__ : Tuple = ViTHybridModel(A__ ).eval() else: snake_case__ : List[Any] = ViTHybridForImageClassification(A__ ).eval() model.load_state_dict(A__ ) # create image processor snake_case__ : int = create_transform(**resolve_data_config({} , model=A__ ) ) snake_case__ : List[Any] = transform.transforms snake_case__ : List[Any] = { 'bilinear': PILImageResampling.BILINEAR, 'bicubic': PILImageResampling.BICUBIC, 'nearest': PILImageResampling.NEAREST, } snake_case__ : Union[str, Any] = ViTHybridImageProcessor( do_resize=A__ , size={'shortest_edge': timm_transforms[0].size} , resample=pillow_resamplings[timm_transforms[0].interpolation.value] , do_center_crop=A__ , crop_size={'height': timm_transforms[1].size[0], 'width': timm_transforms[1].size[1]} , do_normalize=A__ , image_mean=timm_transforms[-1].mean.tolist() , image_std=timm_transforms[-1].std.tolist() , ) snake_case__ : Union[str, Any] = prepare_img() snake_case__ : List[Any] = transform(A__ ).unsqueeze(0 ) snake_case__ : List[str] = processor(A__ , return_tensors='pt' ).pixel_values # verify pixel values assert torch.allclose(A__ , A__ ) # verify logits with torch.no_grad(): snake_case__ : str = model(A__ ) snake_case__ : List[str] = outputs.logits print('Predicted class:' , logits.argmax(-1 ).item() ) if base_model: snake_case__ : Optional[int] = timm_model.forward_features(A__ ) assert timm_pooled_output.shape == outputs.pooler_output.shape assert torch.allclose(A__ , outputs.pooler_output , atol=1e-3 ) else: snake_case__ : Optional[int] = timm_model(A__ ) assert timm_logits.shape == outputs.logits.shape assert torch.allclose(A__ , outputs.logits , atol=1e-3 ) print('Looks ok!' ) if pytorch_dump_folder_path is not None: Path(A__ ).mkdir(exist_ok=A__ ) print(F"""Saving model {vit_name} to {pytorch_dump_folder_path}""" ) model.save_pretrained(A__ ) print(F"""Saving processor to {pytorch_dump_folder_path}""" ) processor.save_pretrained(A__ ) if push_to_hub: print(F"""Pushing model and processor to the hub {vit_name}""" ) model.push_to_hub(F"""ybelkada/{vit_name}""" ) processor.push_to_hub(F"""ybelkada/{vit_name}""" ) if __name__ == "__main__": lowerCAmelCase__ : str = argparse.ArgumentParser() # Required parameters parser.add_argument( '''--vit_name''', default='''vit_base_r50_s16_384''', type=str, help='''Name of the hybrid ViT timm model you\'d like to convert.''', ) parser.add_argument( '''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.''' ) parser.add_argument( '''--push_to_hub''', action='''store_true''', help='''Whether to upload the model to the HuggingFace hub.''' ) lowerCAmelCase__ : Optional[Any] = parser.parse_args() convert_vit_checkpoint(args.vit_name, args.pytorch_dump_folder_path, args.push_to_hub)
699
import unittest from transformers import BertGenerationTokenizer from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_torch, slow from transformers.utils import cached_property from ...test_tokenization_common import TokenizerTesterMixin lowerCAmelCase__ : List[Any] = '''▁''' lowerCAmelCase__ : int = get_tests_dir('''fixtures/test_sentencepiece.model''') @require_sentencepiece class __snake_case ( _lowerCamelCase ,unittest.TestCase ): __lowerCamelCase = BertGenerationTokenizer __lowerCamelCase = False __lowerCamelCase = True def __a ( self ) -> Optional[int]: '''simple docstring''' super().setUp() snake_case__ : str = BertGenerationTokenizer(__UpperCamelCase , keep_accents=__UpperCamelCase ) tokenizer.save_pretrained(self.tmpdirname ) def __a ( self ) -> Optional[int]: '''simple docstring''' snake_case__ : List[str] = '<s>' snake_case__ : Dict = 1 self.assertEqual(self.get_tokenizer()._convert_token_to_id(__UpperCamelCase ) , __UpperCamelCase ) self.assertEqual(self.get_tokenizer()._convert_id_to_token(__UpperCamelCase ) , __UpperCamelCase ) def __a ( self ) -> List[str]: '''simple docstring''' snake_case__ : Tuple = list(self.get_tokenizer().get_vocab().keys() ) self.assertEqual(vocab_keys[0] , '<unk>' ) self.assertEqual(vocab_keys[1] , '<s>' ) self.assertEqual(vocab_keys[-1] , '<pad>' ) self.assertEqual(len(__UpperCamelCase ) , 1002 ) def __a ( self ) -> int: '''simple docstring''' self.assertEqual(self.get_tokenizer().vocab_size , 1000 ) def __a ( self ) -> Tuple: '''simple docstring''' snake_case__ : Optional[Any] = BertGenerationTokenizer(__UpperCamelCase , keep_accents=__UpperCamelCase ) snake_case__ : int = tokenizer.tokenize('This is a test' ) self.assertListEqual(__UpperCamelCase , ['▁This', '▁is', '▁a', '▁t', 'est'] ) self.assertListEqual( tokenizer.convert_tokens_to_ids(__UpperCamelCase ) , [285, 46, 10, 170, 382] , ) snake_case__ : Any = tokenizer.tokenize('I was born in 92000, and this is falsé.' ) self.assertListEqual( __UpperCamelCase , [ SPIECE_UNDERLINE + 'I', SPIECE_UNDERLINE + 'was', SPIECE_UNDERLINE + 'b', 'or', 'n', SPIECE_UNDERLINE + 'in', SPIECE_UNDERLINE + '', '9', '2', '0', '0', '0', ',', SPIECE_UNDERLINE + 'and', SPIECE_UNDERLINE + 'this', SPIECE_UNDERLINE + 'is', SPIECE_UNDERLINE + 'f', 'al', 's', 'é', '.', ] , ) snake_case__ : Optional[Any] = tokenizer.convert_tokens_to_ids(__UpperCamelCase ) self.assertListEqual( __UpperCamelCase , [8, 21, 84, 55, 24, 19, 7, 0, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 0, 4] , ) snake_case__ : int = tokenizer.convert_ids_to_tokens(__UpperCamelCase ) self.assertListEqual( __UpperCamelCase , [ SPIECE_UNDERLINE + 'I', SPIECE_UNDERLINE + 'was', SPIECE_UNDERLINE + 'b', 'or', 'n', SPIECE_UNDERLINE + 'in', SPIECE_UNDERLINE + '', '<unk>', '2', '0', '0', '0', ',', SPIECE_UNDERLINE + 'and', SPIECE_UNDERLINE + 'this', SPIECE_UNDERLINE + 'is', SPIECE_UNDERLINE + 'f', 'al', 's', '<unk>', '.', ] , ) @cached_property def __a ( self ) -> Dict: '''simple docstring''' return BertGenerationTokenizer.from_pretrained('google/bert_for_seq_generation_L-24_bbc_encoder' ) @slow def __a ( self ) -> Any: '''simple docstring''' snake_case__ : int = 'Hello World!' snake_case__ : Union[str, Any] = [18536, 2260, 101] self.assertListEqual(__UpperCamelCase , self.big_tokenizer.encode(__UpperCamelCase ) ) @slow def __a ( self ) -> Optional[int]: '''simple docstring''' snake_case__ : str = ( 'This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) " [ ] ! : - . Also we will' ' add words that should not exsist and be tokenized to <unk>, such as saoneuhaoesuth' ) snake_case__ : List[Any] = [ 871, 419, 358, 946, 991, 2521, 452, 358, 1357, 387, 7751, 3536, 112, 985, 456, 126, 865, 938, 5400, 5734, 458, 1368, 467, 786, 2462, 5246, 1159, 633, 865, 4519, 457, 582, 852, 2557, 427, 916, 508, 405, 34324, 497, 391, 408, 11342, 1244, 385, 100, 938, 985, 456, 574, 362, 12597, 3200, 3129, 1172, ] self.assertListEqual(__UpperCamelCase , self.big_tokenizer.encode(__UpperCamelCase ) ) @require_torch @slow def __a ( self ) -> List[str]: '''simple docstring''' import torch from transformers import BertGenerationConfig, BertGenerationEncoder # Build sequence snake_case__ : Optional[int] = list(self.big_tokenizer.get_vocab().keys() )[:10] snake_case__ : Optional[int] = ' '.join(__UpperCamelCase ) snake_case__ : int = self.big_tokenizer.encode_plus(__UpperCamelCase , return_tensors='pt' , return_token_type_ids=__UpperCamelCase ) snake_case__ : Tuple = self.big_tokenizer.batch_encode_plus( [sequence + ' ' + sequence] , return_tensors='pt' , return_token_type_ids=__UpperCamelCase ) snake_case__ : Dict = BertGenerationConfig() snake_case__ : List[str] = BertGenerationEncoder(__UpperCamelCase ) assert model.get_input_embeddings().weight.shape[0] >= self.big_tokenizer.vocab_size with torch.no_grad(): model(**__UpperCamelCase ) model(**__UpperCamelCase ) @slow def __a ( self ) -> Dict: '''simple docstring''' snake_case__ : Optional[int] = {'input_ids': [[39286, 458, 36335, 2001, 456, 13073, 13266, 455, 113, 7746, 1741, 11157, 391, 13073, 13266, 455, 113, 3967, 35412, 113, 4936, 109, 3870, 2377, 113, 30084, 45720, 458, 134, 17496, 112, 503, 11672, 113, 118, 112, 5665, 13347, 38687, 112, 1496, 31389, 112, 3268, 47264, 134, 962, 112, 16377, 8035, 23130, 430, 12169, 15518, 28592, 458, 146, 41697, 109, 391, 12169, 15518, 16689, 458, 146, 41358, 109, 452, 726, 4034, 111, 763, 35412, 5082, 388, 1903, 111, 9051, 391, 2870, 48918, 1900, 1123, 550, 998, 112, 9586, 15985, 455, 391, 410, 22955, 37636, 114], [448, 17496, 419, 3663, 385, 763, 113, 27533, 2870, 3283, 13043, 1639, 24713, 523, 656, 24013, 18550, 2521, 517, 27014, 21244, 420, 1212, 1465, 391, 927, 4833, 388, 578, 11786, 114, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [484, 2169, 7687, 21932, 18146, 726, 363, 17032, 3391, 114, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501 # fmt: on self.tokenizer_integration_test_util( expected_encoding=__UpperCamelCase , model_name='google/bert_for_seq_generation_L-24_bbc_encoder' , revision='c817d1fd1be2ffa69431227a1fe320544943d4db' , )
699
1
import argparse import glob import logging import os from argparse import Namespace from importlib import import_module import numpy as np import torch from lightning_base import BaseTransformer, add_generic_args, generic_train from seqeval.metrics import accuracy_score, fa_score, precision_score, recall_score from torch.nn import CrossEntropyLoss from torch.utils.data import DataLoader, TensorDataset from utils_ner import TokenClassificationTask lowerCAmelCase__ : Union[str, Any] = logging.getLogger(__name__) class __snake_case ( _lowerCamelCase ): __lowerCamelCase = """token-classification""" def __init__( self , __UpperCamelCase ) -> List[str]: '''simple docstring''' if type(__UpperCamelCase ) == dict: snake_case__ : Dict = Namespace(**__UpperCamelCase ) snake_case__ : Any = import_module('tasks' ) try: snake_case__ : Tuple = getattr(__UpperCamelCase , hparams.task_type ) snake_case__ : TokenClassificationTask = token_classification_task_clazz() except AttributeError: raise ValueError( F"""Task {hparams.task_type} needs to be defined as a TokenClassificationTask subclass in {module}. """ F"""Available tasks classes are: {TokenClassificationTask.__subclasses__()}""" ) snake_case__ : List[str] = self.token_classification_task.get_labels(hparams.labels ) snake_case__ : Tuple = CrossEntropyLoss().ignore_index super().__init__(__UpperCamelCase , len(self.labels ) , self.mode ) def __a ( self , **__UpperCamelCase ) -> List[str]: '''simple docstring''' return self.model(**__UpperCamelCase ) def __a ( self , __UpperCamelCase , __UpperCamelCase ) -> Tuple: '''simple docstring''' snake_case__ : int = {'input_ids': batch[0], 'attention_mask': batch[1], 'labels': batch[3]} if self.config.model_type != "distilbert": snake_case__ : Union[str, Any] = ( batch[2] if self.config.model_type in ['bert', 'xlnet'] else None ) # XLM and RoBERTa don"t use token_type_ids snake_case__ : Optional[int] = self(**__UpperCamelCase ) snake_case__ : Any = outputs[0] # tensorboard_logs = {"loss": loss, "rate": self.lr_scheduler.get_last_lr()[-1]} return {"loss": loss} def __a ( self ) -> Union[str, Any]: '''simple docstring''' snake_case__ : List[str] = self.hparams for mode in ["train", "dev", "test"]: snake_case__ : Any = self._feature_file(__UpperCamelCase ) if os.path.exists(__UpperCamelCase ) and not args.overwrite_cache: logger.info('Loading features from cached file %s' , __UpperCamelCase ) snake_case__ : List[str] = torch.load(__UpperCamelCase ) else: logger.info('Creating features from dataset file at %s' , args.data_dir ) snake_case__ : int = self.token_classification_task.read_examples_from_file(args.data_dir , __UpperCamelCase ) snake_case__ : Any = self.token_classification_task.convert_examples_to_features( __UpperCamelCase , self.labels , args.max_seq_length , self.tokenizer , cls_token_at_end=bool(self.config.model_type in ['xlnet'] ) , cls_token=self.tokenizer.cls_token , cls_token_segment_id=2 if self.config.model_type in ['xlnet'] else 0 , sep_token=self.tokenizer.sep_token , sep_token_extra=__UpperCamelCase , pad_on_left=bool(self.config.model_type in ['xlnet'] ) , pad_token=self.tokenizer.pad_token_id , pad_token_segment_id=self.tokenizer.pad_token_type_id , pad_token_label_id=self.pad_token_label_id , ) logger.info('Saving features into cached file %s' , __UpperCamelCase ) torch.save(__UpperCamelCase , __UpperCamelCase ) def __a ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = False ) -> DataLoader: '''simple docstring''' snake_case__ : Union[str, Any] = self._feature_file(__UpperCamelCase ) logger.info('Loading features from cached file %s' , __UpperCamelCase ) snake_case__ : str = torch.load(__UpperCamelCase ) snake_case__ : Union[str, Any] = torch.tensor([f.input_ids for f in features] , dtype=torch.long ) snake_case__ : Optional[int] = torch.tensor([f.attention_mask for f in features] , dtype=torch.long ) if features[0].token_type_ids is not None: snake_case__ : str = torch.tensor([f.token_type_ids for f in features] , dtype=torch.long ) else: snake_case__ : Tuple = torch.tensor([0 for f in features] , dtype=torch.long ) # HACK(we will not use this anymore soon) snake_case__ : Union[str, Any] = torch.tensor([f.label_ids for f in features] , dtype=torch.long ) return DataLoader( TensorDataset(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) , batch_size=__UpperCamelCase ) def __a ( self , __UpperCamelCase , __UpperCamelCase ) -> List[str]: '''simple docstring''' """Compute validation""" "" snake_case__ : List[Any] = {'input_ids': batch[0], 'attention_mask': batch[1], 'labels': batch[3]} if self.config.model_type != "distilbert": snake_case__ : Tuple = ( batch[2] if self.config.model_type in ['bert', 'xlnet'] else None ) # XLM and RoBERTa don"t use token_type_ids snake_case__ : Optional[Any] = self(**__UpperCamelCase ) snake_case__ , snake_case__ : str = outputs[:2] snake_case__ : Optional[Any] = logits.detach().cpu().numpy() snake_case__ : Union[str, Any] = inputs['labels'].detach().cpu().numpy() return {"val_loss": tmp_eval_loss.detach().cpu(), "pred": preds, "target": out_label_ids} def __a ( self , __UpperCamelCase ) -> Optional[Any]: '''simple docstring''' snake_case__ : str = torch.stack([x['val_loss'] for x in outputs] ).mean() snake_case__ : Optional[Any] = np.concatenate([x['pred'] for x in outputs] , axis=0 ) snake_case__ : Union[str, Any] = np.argmax(__UpperCamelCase , axis=2 ) snake_case__ : List[str] = np.concatenate([x['target'] for x in outputs] , axis=0 ) snake_case__ : Optional[int] = dict(enumerate(self.labels ) ) snake_case__ : Dict = [[] for _ in range(out_label_ids.shape[0] )] snake_case__ : int = [[] for _ in range(out_label_ids.shape[0] )] for i in range(out_label_ids.shape[0] ): for j in range(out_label_ids.shape[1] ): if out_label_ids[i, j] != self.pad_token_label_id: out_label_list[i].append(label_map[out_label_ids[i][j]] ) preds_list[i].append(label_map[preds[i][j]] ) snake_case__ : List[Any] = { 'val_loss': val_loss_mean, 'accuracy_score': accuracy_score(__UpperCamelCase , __UpperCamelCase ), 'precision': precision_score(__UpperCamelCase , __UpperCamelCase ), 'recall': recall_score(__UpperCamelCase , __UpperCamelCase ), 'f1': fa_score(__UpperCamelCase , __UpperCamelCase ), } snake_case__ : Optional[Any] = dict(results.items() ) snake_case__ : str = results return ret, preds_list, out_label_list def __a ( self , __UpperCamelCase ) -> List[str]: '''simple docstring''' snake_case__ , snake_case__ , snake_case__ : List[Any] = self._eval_end(__UpperCamelCase ) snake_case__ : Any = ret['log'] return {"val_loss": logs["val_loss"], "log": logs, "progress_bar": logs} def __a ( self , __UpperCamelCase ) -> Union[str, Any]: '''simple docstring''' snake_case__ , snake_case__ , snake_case__ : Union[str, Any] = self._eval_end(__UpperCamelCase ) # Converting to the dict required by pl # https://github.com/PyTorchLightning/pytorch-lightning/blob/master/\ # pytorch_lightning/trainer/logging.py#L139 snake_case__ : Dict = ret['log'] # `val_loss` is the key returned by `self._eval_end()` but actually refers to `test_loss` return {"avg_test_loss": logs["val_loss"], "log": logs, "progress_bar": logs} @staticmethod def __a ( __UpperCamelCase , __UpperCamelCase ) -> Union[str, Any]: '''simple docstring''' BaseTransformer.add_model_specific_args(__UpperCamelCase , __UpperCamelCase ) parser.add_argument( '--task_type' , default='NER' , type=__UpperCamelCase , help='Task type to fine tune in training (e.g. NER, POS, etc)' ) parser.add_argument( '--max_seq_length' , default=128 , type=__UpperCamelCase , help=( 'The maximum total input sequence length after tokenization. Sequences longer ' 'than this will be truncated, sequences shorter will be padded.' ) , ) parser.add_argument( '--labels' , default='' , type=__UpperCamelCase , help='Path to a file containing all labels. If not specified, CoNLL-2003 labels are used.' , ) parser.add_argument( '--gpus' , default=0 , type=__UpperCamelCase , help='The number of GPUs allocated for this, it is by default 0 meaning none' , ) parser.add_argument( '--overwrite_cache' , action='store_true' , help='Overwrite the cached training and evaluation sets' ) return parser if __name__ == "__main__": lowerCAmelCase__ : str = argparse.ArgumentParser() add_generic_args(parser, os.getcwd()) lowerCAmelCase__ : List[str] = NERTransformer.add_model_specific_args(parser, os.getcwd()) lowerCAmelCase__ : Union[str, Any] = parser.parse_args() lowerCAmelCase__ : str = NERTransformer(args) lowerCAmelCase__ : Union[str, Any] = generic_train(model, args) if args.do_predict: # See https://github.com/huggingface/transformers/issues/3159 # pl use this default format to create a checkpoint: # https://github.com/PyTorchLightning/pytorch-lightning/blob/master\ # /pytorch_lightning/callbacks/model_checkpoint.py#L322 lowerCAmelCase__ : List[str] = sorted(glob.glob(os.path.join(args.output_dir, '''checkpoint-epoch=*.ckpt'''), recursive=True)) lowerCAmelCase__ : Optional[Any] = model.load_from_checkpoint(checkpoints[-1]) trainer.test(model)
699
import random import torch from huggingface_hub import HfApi from diffusers import UNetaDModel lowerCAmelCase__ : List[str] = HfApi() lowerCAmelCase__ : str = {} # fmt: off lowerCAmelCase__ : int = torch.tensor([ -0.75_15, -1.68_83, 0.24_20, 0.03_00, 0.63_47, 1.34_33, -1.17_43, -3.74_67, 1.23_42, -2.24_85, 0.46_36, 0.80_76, -0.79_91, 0.39_69, 0.84_98, 0.91_89, -1.88_87, -3.35_22, 0.76_39, 0.20_40, 0.62_71, -2.71_48, -1.63_16, 3.08_39, 0.31_86, 0.27_21, -0.97_59, -1.24_61, 2.62_57, 1.35_57 ]) lowerCAmelCase__ : Dict = torch.tensor([ -2.36_39, -2.53_44, 0.00_54, -0.66_74, 1.59_90, 1.01_58, 0.31_24, -2.14_36, 1.87_95, -2.54_29, -0.15_66, -0.39_73, 1.24_90, 2.64_47, 1.22_83, -0.52_08, -2.81_54, -3.51_19, 2.38_38, 1.20_33, 1.72_01, -2.12_56, -1.45_76, 2.79_48, 2.42_04, -0.97_52, -1.25_46, 0.80_27, 3.27_58, 3.13_65 ]) lowerCAmelCase__ : Dict = torch.tensor([ -0.65_31, -0.68_91, -0.31_72, -0.53_75, -0.91_40, -0.53_67, -0.11_75, -0.78_69, -0.38_08, -0.45_13, -0.20_98, -0.00_83, 0.31_83, 0.51_40, 0.22_47, -0.13_04, -0.13_02, -0.28_02, -0.20_84, -0.20_25, -0.49_67, -0.48_73, -0.08_61, 0.69_25, 0.02_50, 0.12_90, -0.15_43, 0.63_16, 1.04_60, 1.49_43 ]) lowerCAmelCase__ : List[str] = torch.tensor([ 0.09_11, 0.11_07, 0.01_82, 0.04_35, -0.08_05, -0.06_08, 0.03_81, 0.21_72, -0.02_80, 0.13_27, -0.02_99, -0.02_55, -0.00_50, -0.11_70, -0.10_46, 0.03_09, 0.13_67, 0.17_28, -0.05_33, -0.07_48, -0.05_34, 0.16_24, 0.03_84, -0.18_05, -0.07_07, 0.06_42, 0.02_20, -0.01_34, -0.13_33, -0.15_05 ]) lowerCAmelCase__ : Union[str, Any] = torch.tensor([ 0.13_21, 0.13_37, 0.04_40, 0.06_22, -0.05_91, -0.03_70, 0.05_03, 0.21_33, -0.01_77, 0.14_15, -0.01_16, -0.01_12, 0.00_44, -0.09_80, -0.07_89, 0.03_95, 0.15_02, 0.17_85, -0.04_88, -0.05_14, -0.04_04, 0.15_39, 0.04_54, -0.15_59, -0.06_65, 0.06_59, 0.03_83, -0.00_05, -0.12_66, -0.13_86 ]) lowerCAmelCase__ : List[Any] = torch.tensor([ 0.11_54, 0.12_18, 0.03_07, 0.05_26, -0.07_11, -0.05_41, 0.03_66, 0.20_78, -0.02_67, 0.13_17, -0.02_26, -0.01_93, -0.00_14, -0.10_55, -0.09_02, 0.03_30, 0.13_91, 0.17_09, -0.05_62, -0.06_93, -0.05_60, 0.14_82, 0.03_81, -0.16_83, -0.06_81, 0.06_61, 0.03_31, -0.00_46, -0.12_68, -0.14_31 ]) lowerCAmelCase__ : Optional[Any] = torch.tensor([ 0.11_92, 0.12_40, 0.04_14, 0.06_06, -0.05_57, -0.04_12, 0.04_30, 0.20_42, -0.02_00, 0.13_85, -0.01_15, -0.01_32, 0.00_17, -0.09_65, -0.08_02, 0.03_98, 0.14_33, 0.17_47, -0.04_58, -0.05_33, -0.04_07, 0.15_45, 0.04_19, -0.15_74, -0.06_45, 0.06_26, 0.03_41, -0.00_10, -0.11_99, -0.13_90 ]) lowerCAmelCase__ : List[str] = torch.tensor([ 0.10_75, 0.10_74, 0.02_05, 0.04_31, -0.07_74, -0.06_07, 0.02_98, 0.20_42, -0.03_20, 0.12_67, -0.02_81, -0.02_50, -0.00_64, -0.10_91, -0.09_46, 0.02_90, 0.13_28, 0.16_50, -0.05_80, -0.07_38, -0.05_86, 0.14_40, 0.03_37, -0.17_46, -0.07_12, 0.06_05, 0.02_50, -0.00_99, -0.13_16, -0.14_73 ]) lowerCAmelCase__ : List[str] = torch.tensor([ -1.45_72, -2.04_81, -0.04_14, -0.60_05, 1.41_36, 0.58_48, 0.40_28, -2.73_30, 1.22_12, -2.12_28, 0.21_55, 0.40_39, 0.76_62, 2.05_35, 0.74_77, -0.32_43, -2.17_58, -2.76_48, 1.69_47, 0.70_26, 1.23_38, -1.60_78, -0.86_82, 2.28_10, 1.85_74, -0.57_18, -0.55_86, -0.01_86, 2.34_15, 2.12_51]) lowerCAmelCase__ : List[Any] = torch.tensor([ -1.36_90, -1.97_20, -0.40_90, -0.69_66, 1.46_60, 0.99_38, -0.13_85, -2.73_24, 0.77_36, -1.89_17, 0.29_23, 0.42_93, 0.16_93, 1.41_12, 1.18_87, -0.31_81, -2.21_60, -2.63_81, 1.31_70, 0.81_63, 0.92_40, -1.65_44, -0.60_99, 2.52_59, 1.64_30, -0.90_90, -0.93_92, -0.01_26, 2.42_68, 2.32_66 ]) lowerCAmelCase__ : Tuple = torch.tensor([ -1.35_25, -1.96_28, -0.39_56, -0.68_60, 1.46_64, 1.00_14, -0.12_59, -2.72_12, 0.77_72, -1.88_11, 0.29_96, 0.43_88, 0.17_04, 1.40_29, 1.17_01, -0.30_27, -2.20_53, -2.62_87, 1.33_50, 0.81_31, 0.92_74, -1.62_92, -0.60_98, 2.51_31, 1.65_05, -0.89_58, -0.92_98, -0.01_51, 2.42_57, 2.33_55 ]) lowerCAmelCase__ : List[str] = torch.tensor([ -2.05_85, -2.78_97, -0.28_50, -0.89_40, 1.90_52, 0.57_02, 0.63_45, -3.89_59, 1.59_32, -3.23_19, 0.19_74, 0.02_87, 1.75_66, 2.65_43, 0.83_87, -0.53_51, -3.27_36, -4.33_75, 2.90_29, 1.63_90, 1.46_40, -2.17_01, -1.90_13, 2.93_41, 3.49_81, -0.62_55, -1.16_44, -0.15_91, 3.70_97, 3.20_66 ]) lowerCAmelCase__ : Dict = torch.tensor([ -2.31_39, -2.55_94, -0.01_97, -0.67_85, 1.70_01, 1.16_06, 0.30_75, -2.17_40, 1.80_71, -2.56_30, -0.09_26, -0.38_11, 1.21_16, 2.62_46, 1.27_31, -0.53_98, -2.81_53, -3.61_40, 2.38_93, 1.32_62, 1.62_58, -2.18_56, -1.32_67, 2.83_95, 2.37_79, -1.06_23, -1.24_68, 0.89_59, 3.33_67, 3.22_43 ]) lowerCAmelCase__ : Dict = torch.tensor([ -2.06_28, -2.76_67, -0.20_89, -0.82_63, 2.05_39, 0.59_92, 0.64_95, -3.83_36, 1.60_25, -3.28_17, 0.17_21, -0.06_33, 1.75_16, 2.70_39, 0.81_00, -0.59_08, -3.21_13, -4.43_43, 2.92_57, 1.36_32, 1.55_62, -2.14_89, -1.98_94, 3.05_60, 3.33_96, -0.73_28, -1.04_17, 0.03_83, 3.70_93, 3.23_43 ]) lowerCAmelCase__ : Any = torch.tensor([ -1.45_74, -2.05_69, -0.04_73, -0.61_17, 1.40_18, 0.57_69, 0.41_29, -2.73_44, 1.22_41, -2.13_97, 0.20_00, 0.39_37, 0.76_16, 2.04_53, 0.73_24, -0.33_91, -2.17_46, -2.77_44, 1.69_63, 0.69_21, 1.21_87, -1.61_72, -0.88_77, 2.24_39, 1.84_71, -0.58_39, -0.56_05, -0.04_64, 2.32_50, 2.12_19 ]) # fmt: on lowerCAmelCase__ : Any = api.list_models(filter='''diffusers''') for mod in models: if "google" in mod.author or mod.modelId == "CompVis/ldm-celebahq-256": lowerCAmelCase__ : List[str] = '''/home/patrick/google_checkpoints/''' + mod.modelId.split('''/''')[-1] print(F'''Started running {mod.modelId}!!!''') if mod.modelId.startswith('''CompVis'''): lowerCAmelCase__ : int = UNetaDModel.from_pretrained(local_checkpoint, subfolder='''unet''') else: lowerCAmelCase__ : str = UNetaDModel.from_pretrained(local_checkpoint) torch.manual_seed(0) random.seed(0) lowerCAmelCase__ : Any = torch.randn(1, model.config.in_channels, model.config.sample_size, model.config.sample_size) lowerCAmelCase__ : List[str] = torch.tensor([10] * noise.shape[0]) with torch.no_grad(): lowerCAmelCase__ : int = model(noise, time_step).sample assert torch.allclose( logits[0, 0, 0, :30], results['''_'''.join('''_'''.join(mod.modelId.split('''/''')).split('''-'''))], atol=1E-3 ) print(F'''{mod.modelId} has passed successfully!!!''')
699
1
import copy import inspect import unittest import numpy as np from huggingface_hub import hf_hub_download from transformers import VideoMAEConfig from transformers.models.auto import get_values from transformers.testing_utils import require_torch, require_vision, slow, torch_device from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from torch import nn from transformers import ( MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING, VideoMAEForPreTraining, VideoMAEForVideoClassification, VideoMAEModel, ) from transformers.models.videomae.modeling_videomae import VIDEOMAE_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from transformers import VideoMAEImageProcessor class __snake_case : def __init__( self , __UpperCamelCase , __UpperCamelCase=13 , __UpperCamelCase=10 , __UpperCamelCase=3 , __UpperCamelCase=2 , __UpperCamelCase=2 , __UpperCamelCase=2 , __UpperCamelCase=True , __UpperCamelCase=True , __UpperCamelCase=32 , __UpperCamelCase=5 , __UpperCamelCase=4 , __UpperCamelCase=37 , __UpperCamelCase="gelu" , __UpperCamelCase=0.1 , __UpperCamelCase=0.1 , __UpperCamelCase=10 , __UpperCamelCase=0.0_2 , __UpperCamelCase=0.9 , __UpperCamelCase=None , ) -> int: '''simple docstring''' snake_case__ : List[Any] = parent snake_case__ : Union[str, Any] = batch_size snake_case__ : Any = image_size snake_case__ : Union[str, Any] = num_channels snake_case__ : Union[str, Any] = patch_size snake_case__ : List[Any] = tubelet_size snake_case__ : List[str] = num_frames snake_case__ : Dict = is_training snake_case__ : Optional[Any] = use_labels snake_case__ : Dict = hidden_size snake_case__ : Optional[Any] = num_hidden_layers snake_case__ : int = num_attention_heads snake_case__ : Tuple = intermediate_size snake_case__ : Dict = hidden_act snake_case__ : Dict = hidden_dropout_prob snake_case__ : Dict = attention_probs_dropout_prob snake_case__ : Optional[Any] = type_sequence_label_size snake_case__ : Optional[int] = initializer_range snake_case__ : List[str] = mask_ratio snake_case__ : Tuple = scope # in VideoMAE, the number of tokens equals num_frames/tubelet_size * num_patches per frame snake_case__ : str = (image_size // patch_size) ** 2 snake_case__ : Dict = (num_frames // tubelet_size) * self.num_patches_per_frame # use this variable to define bool_masked_pos snake_case__ : List[Any] = int(mask_ratio * self.seq_length ) def __a ( self ) -> Tuple: '''simple docstring''' snake_case__ : Union[str, Any] = floats_tensor( [self.batch_size, self.num_frames, self.num_channels, self.image_size, self.image_size] ) snake_case__ : Union[str, Any] = None if self.use_labels: snake_case__ : Dict = ids_tensor([self.batch_size] , self.type_sequence_label_size ) snake_case__ : List[Any] = self.get_config() return config, pixel_values, labels def __a ( self ) -> Optional[int]: '''simple docstring''' return VideoMAEConfig( image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , num_frames=self.num_frames , tubelet_size=self.tubelet_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=__UpperCamelCase , initializer_range=self.initializer_range , ) def __a ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) -> int: '''simple docstring''' snake_case__ : Any = VideoMAEModel(config=__UpperCamelCase ) model.to(__UpperCamelCase ) model.eval() snake_case__ : Dict = model(__UpperCamelCase ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def __a ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) -> List[Any]: '''simple docstring''' snake_case__ : Union[str, Any] = VideoMAEForPreTraining(__UpperCamelCase ) model.to(__UpperCamelCase ) model.eval() # important: each video needs to have the same number of masked patches # hence we define a single mask, which we then repeat for each example in the batch snake_case__ : Any = torch.ones((self.num_masks,) ) snake_case__ : str = torch.cat([mask, torch.zeros(self.seq_length - mask.size(0 ) )] ) snake_case__ : Union[str, Any] = mask.expand(self.batch_size , -1 ).bool() snake_case__ : str = model(__UpperCamelCase , __UpperCamelCase ) # model only returns predictions for masked patches snake_case__ : str = mask.sum().item() snake_case__ : Dict = 3 * self.tubelet_size * self.patch_size**2 self.parent.assertEqual(result.logits.shape , (self.batch_size, num_masked_patches, decoder_num_labels) ) def __a ( self ) -> Any: '''simple docstring''' snake_case__ : List[Any] = self.prepare_config_and_inputs() snake_case__ , snake_case__ , snake_case__ : Optional[int] = config_and_inputs snake_case__ : List[str] = {'pixel_values': pixel_values} return config, inputs_dict @require_torch class __snake_case ( _lowerCamelCase ,_lowerCamelCase ,unittest.TestCase ): __lowerCamelCase = ( (VideoMAEModel, VideoMAEForPreTraining, VideoMAEForVideoClassification) if is_torch_available() else () ) __lowerCamelCase = ( {"""feature-extraction""": VideoMAEModel, """video-classification""": VideoMAEForVideoClassification} if is_torch_available() else {} ) __lowerCamelCase = False __lowerCamelCase = False __lowerCamelCase = False __lowerCamelCase = False def __a ( self ) -> List[Any]: '''simple docstring''' snake_case__ : Optional[Any] = VideoMAEModelTester(self ) snake_case__ : Optional[Any] = ConfigTester(self , config_class=__UpperCamelCase , has_text_modality=__UpperCamelCase , hidden_size=37 ) def __a ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase=False ) -> Optional[Any]: '''simple docstring''' snake_case__ : List[Any] = copy.deepcopy(__UpperCamelCase ) if model_class == VideoMAEForPreTraining: # important: each video needs to have the same number of masked patches # hence we define a single mask, which we then repeat for each example in the batch snake_case__ : int = torch.ones((self.model_tester.num_masks,) ) snake_case__ : Optional[int] = torch.cat([mask, torch.zeros(self.model_tester.seq_length - mask.size(0 ) )] ) snake_case__ : List[Any] = mask.expand(self.model_tester.batch_size , -1 ).bool() snake_case__ : List[str] = bool_masked_pos.to(__UpperCamelCase ) if return_labels: if model_class in [ *get_values(__UpperCamelCase ), ]: snake_case__ : int = torch.zeros( self.model_tester.batch_size , dtype=torch.long , device=__UpperCamelCase ) return inputs_dict def __a ( self ) -> List[Any]: '''simple docstring''' self.config_tester.run_common_tests() @unittest.skip(reason='VideoMAE does not use inputs_embeds' ) def __a ( self ) -> str: '''simple docstring''' pass def __a ( self ) -> int: '''simple docstring''' snake_case__ , snake_case__ : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: snake_case__ : Dict = model_class(__UpperCamelCase ) self.assertIsInstance(model.get_input_embeddings() , (nn.Module) ) snake_case__ : Dict = model.get_output_embeddings() self.assertTrue(x is None or isinstance(__UpperCamelCase , nn.Linear ) ) def __a ( self ) -> Union[str, Any]: '''simple docstring''' snake_case__ , snake_case__ : str = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: snake_case__ : str = model_class(__UpperCamelCase ) snake_case__ : Union[str, Any] = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic snake_case__ : str = [*signature.parameters.keys()] snake_case__ : Optional[Any] = ['pixel_values'] self.assertListEqual(arg_names[:1] , __UpperCamelCase ) def __a ( self ) -> List[Any]: '''simple docstring''' snake_case__ : Optional[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*__UpperCamelCase ) def __a ( self ) -> Union[str, Any]: '''simple docstring''' snake_case__ : int = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_pretraining(*__UpperCamelCase ) @slow def __a ( self ) -> List[str]: '''simple docstring''' for model_name in VIDEOMAE_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: snake_case__ : Optional[int] = VideoMAEModel.from_pretrained(__UpperCamelCase ) self.assertIsNotNone(__UpperCamelCase ) def __a ( self ) -> int: '''simple docstring''' if not self.has_attentions: pass else: snake_case__ , snake_case__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common() snake_case__ : List[str] = True for model_class in self.all_model_classes: snake_case__ : Optional[int] = self.model_tester.seq_length - self.model_tester.num_masks snake_case__ : int = ( num_visible_patches if model_class == VideoMAEForPreTraining else self.model_tester.seq_length ) snake_case__ : List[Any] = True snake_case__ : Dict = False snake_case__ : Dict = True snake_case__ : Tuple = model_class(__UpperCamelCase ) model.to(__UpperCamelCase ) model.eval() with torch.no_grad(): snake_case__ : List[Any] = model(**self._prepare_for_class(__UpperCamelCase , __UpperCamelCase ) ) snake_case__ : List[Any] = outputs.attentions self.assertEqual(len(__UpperCamelCase ) , self.model_tester.num_hidden_layers ) # check that output_attentions also work using config del inputs_dict["output_attentions"] snake_case__ : Union[str, Any] = True snake_case__ : Union[str, Any] = model_class(__UpperCamelCase ) model.to(__UpperCamelCase ) model.eval() with torch.no_grad(): snake_case__ : Optional[int] = model(**self._prepare_for_class(__UpperCamelCase , __UpperCamelCase ) ) snake_case__ : Optional[Any] = outputs.attentions self.assertEqual(len(__UpperCamelCase ) , self.model_tester.num_hidden_layers ) self.assertListEqual( list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len, seq_len] , ) snake_case__ : Dict = len(__UpperCamelCase ) # Check attention is always last and order is fine snake_case__ : int = True snake_case__ : Optional[Any] = True snake_case__ : List[str] = model_class(__UpperCamelCase ) model.to(__UpperCamelCase ) model.eval() with torch.no_grad(): snake_case__ : List[Any] = model(**self._prepare_for_class(__UpperCamelCase , __UpperCamelCase ) ) self.assertEqual(out_len + 1 , len(__UpperCamelCase ) ) snake_case__ : Union[str, Any] = outputs.attentions self.assertEqual(len(__UpperCamelCase ) , self.model_tester.num_hidden_layers ) self.assertListEqual( list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len, seq_len] , ) def __a ( self ) -> Optional[int]: '''simple docstring''' def check_hidden_states_output(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase ): snake_case__ : List[Any] = model_class(__UpperCamelCase ) model.to(__UpperCamelCase ) model.eval() with torch.no_grad(): snake_case__ : List[Any] = model(**self._prepare_for_class(__UpperCamelCase , __UpperCamelCase ) ) snake_case__ : Optional[Any] = outputs.hidden_states snake_case__ : Optional[int] = self.model_tester.num_hidden_layers + 1 self.assertEqual(len(__UpperCamelCase ) , __UpperCamelCase ) snake_case__ : Tuple = self.model_tester.seq_length - self.model_tester.num_masks snake_case__ : Optional[int] = num_visible_patches if model_class == VideoMAEForPreTraining else self.model_tester.seq_length self.assertListEqual( list(hidden_states[0].shape[-2:] ) , [seq_length, self.model_tester.hidden_size] , ) snake_case__ , snake_case__ : Dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: snake_case__ : List[str] = True check_hidden_states_output(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] snake_case__ : str = True check_hidden_states_output(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) @unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.' ) def __a ( self ) -> List[str]: '''simple docstring''' pass def UpperCamelCase__ ( ) -> int: snake_case__ : List[str] = hf_hub_download( repo_id='hf-internal-testing/spaghetti-video' , filename='eating_spaghetti.npy' , repo_type='dataset' ) snake_case__ : Optional[Any] = np.load(A__ ) return list(A__ ) @require_torch @require_vision class __snake_case ( unittest.TestCase ): @cached_property def __a ( self ) -> Optional[Any]: '''simple docstring''' return ( VideoMAEImageProcessor(image_mean=[0.5, 0.5, 0.5] , image_std=[0.5, 0.5, 0.5] ) if is_vision_available() else None ) @slow def __a ( self ) -> Optional[int]: '''simple docstring''' snake_case__ : Optional[Any] = VideoMAEForVideoClassification.from_pretrained('MCG-NJU/videomae-base-finetuned-kinetics' ).to( __UpperCamelCase ) snake_case__ : str = self.default_image_processor snake_case__ : Optional[int] = prepare_video() snake_case__ : Optional[Any] = image_processor(__UpperCamelCase , return_tensors='pt' ).to(__UpperCamelCase ) # forward pass with torch.no_grad(): snake_case__ : Optional[int] = model(**__UpperCamelCase ) # verify the logits snake_case__ : List[Any] = torch.Size((1, 400) ) self.assertEqual(outputs.logits.shape , __UpperCamelCase ) snake_case__ : List[Any] = torch.tensor([0.3_6_6_9, -0.0_6_8_8, -0.2_4_2_1] ).to(__UpperCamelCase ) self.assertTrue(torch.allclose(outputs.logits[0, :3] , __UpperCamelCase , atol=1E-4 ) ) @slow def __a ( self ) -> List[str]: '''simple docstring''' snake_case__ : Dict = VideoMAEForPreTraining.from_pretrained('MCG-NJU/videomae-base-short' ).to(__UpperCamelCase ) snake_case__ : str = self.default_image_processor snake_case__ : Optional[int] = prepare_video() snake_case__ : Tuple = image_processor(__UpperCamelCase , return_tensors='pt' ).to(__UpperCamelCase ) # add boolean mask, indicating which patches to mask snake_case__ : Optional[Any] = hf_hub_download(repo_id='hf-internal-testing/bool-masked-pos' , filename='bool_masked_pos.pt' ) snake_case__ : Dict = torch.load(__UpperCamelCase ) # forward pass with torch.no_grad(): snake_case__ : Tuple = model(**__UpperCamelCase ) # verify the logits snake_case__ : List[str] = torch.Size([1, 1408, 1536] ) snake_case__ : List[Any] = torch.tensor( [[0.7_9_9_4, 0.9_6_1_2, 0.8_5_0_8], [0.7_4_0_1, 0.8_9_5_8, 0.8_3_0_2], [0.5_8_6_2, 0.7_4_6_8, 0.7_3_2_5]] , device=__UpperCamelCase ) self.assertEqual(outputs.logits.shape , __UpperCamelCase ) self.assertTrue(torch.allclose(outputs.logits[0, :3, :3] , __UpperCamelCase , atol=1E-4 ) ) # verify the loss (`config.norm_pix_loss` = `True`) snake_case__ : int = torch.tensor([0.5_1_4_2] , device=__UpperCamelCase ) self.assertTrue(torch.allclose(outputs.loss , __UpperCamelCase , atol=1E-4 ) ) # verify the loss (`config.norm_pix_loss` = `False`) snake_case__ : Tuple = VideoMAEForPreTraining.from_pretrained('MCG-NJU/videomae-base-short' , norm_pix_loss=__UpperCamelCase ).to( __UpperCamelCase ) with torch.no_grad(): snake_case__ : Dict = model(**__UpperCamelCase ) snake_case__ : List[Any] = torch.tensor(torch.tensor([0.6_4_6_9] ) , device=__UpperCamelCase ) self.assertTrue(torch.allclose(outputs.loss , __UpperCamelCase , atol=1E-4 ) )
699
import warnings from ...utils import logging from .image_processing_perceiver import PerceiverImageProcessor lowerCAmelCase__ : Dict = logging.get_logger(__name__) class __snake_case ( _lowerCamelCase ): def __init__( self , *__UpperCamelCase , **__UpperCamelCase ) -> None: '''simple docstring''' warnings.warn( 'The class PerceiverFeatureExtractor is deprecated and will be removed in version 5 of Transformers.' ' Please use PerceiverImageProcessor instead.' , __UpperCamelCase , ) super().__init__(*__UpperCamelCase , **__UpperCamelCase )
699
1
import os import unittest from transformers import FunnelTokenizer, FunnelTokenizerFast from transformers.models.funnel.tokenization_funnel import VOCAB_FILES_NAMES from transformers.testing_utils import require_tokenizers from ...test_tokenization_common import TokenizerTesterMixin @require_tokenizers class __snake_case ( _lowerCamelCase ,unittest.TestCase ): __lowerCamelCase = FunnelTokenizer __lowerCamelCase = FunnelTokenizerFast __lowerCamelCase = True __lowerCamelCase = True def __a ( self ) -> int: '''simple docstring''' super().setUp() snake_case__ : List[Any] = [ '<unk>', '<cls>', '<sep>', 'want', '##want', '##ed', 'wa', 'un', 'runn', '##ing', ',', 'low', 'lowest', ] snake_case__ : str = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] ) with open(self.vocab_file , 'w' , encoding='utf-8' ) as vocab_writer: vocab_writer.write(''.join([x + '\n' for x in vocab_tokens] ) ) def __a ( self , **__UpperCamelCase ) -> List[str]: '''simple docstring''' return FunnelTokenizer.from_pretrained(self.tmpdirname , **__UpperCamelCase ) def __a ( self , **__UpperCamelCase ) -> int: '''simple docstring''' return FunnelTokenizerFast.from_pretrained(self.tmpdirname , **__UpperCamelCase ) def __a ( self , __UpperCamelCase ) -> Union[str, Any]: '''simple docstring''' snake_case__ : Union[str, Any] = 'UNwant\u00E9d,running' snake_case__ : Tuple = 'unwanted, running' return input_text, output_text def __a ( self ) -> List[str]: '''simple docstring''' snake_case__ : Optional[int] = self.tokenizer_class(self.vocab_file ) snake_case__ : List[Any] = tokenizer.tokenize('UNwant\u00E9d,running' ) self.assertListEqual(__UpperCamelCase , ['un', '##want', '##ed', ',', 'runn', '##ing'] ) self.assertListEqual(tokenizer.convert_tokens_to_ids(__UpperCamelCase ) , [7, 4, 5, 10, 8, 9] ) def __a ( self ) -> Any: '''simple docstring''' snake_case__ : List[str] = self.get_tokenizers(do_lower_case=__UpperCamelCase ) for tokenizer in tokenizers: snake_case__ : int = tokenizer('UNwant\u00E9d,running' ) snake_case__ : List[Any] = len(inputs['input_ids'] ) - 1 self.assertListEqual(inputs['token_type_ids'] , [2] + [0] * sentence_len ) snake_case__ : Tuple = tokenizer('UNwant\u00E9d,running' , 'UNwant\u00E9d,running' ) self.assertListEqual(inputs['token_type_ids'] , [2] + [0] * sentence_len + [1] * sentence_len )
699
import io import itertools import json from dataclasses import dataclass from typing import Optional import pyarrow as pa import pyarrow.json as paj import datasets from datasets.table import table_cast from datasets.utils.file_utils import readline lowerCAmelCase__ : List[Any] = datasets.utils.logging.get_logger(__name__) @dataclass class __snake_case ( datasets.BuilderConfig ): __lowerCamelCase = None __lowerCamelCase = "utf-8" __lowerCamelCase = None __lowerCamelCase = None __lowerCamelCase = True # deprecated __lowerCamelCase = None # deprecated __lowerCamelCase = 10 << 20 # 10MB __lowerCamelCase = None class __snake_case ( datasets.ArrowBasedBuilder ): __lowerCamelCase = JsonConfig def __a ( self ) -> Optional[Any]: '''simple docstring''' if self.config.block_size is not None: logger.warning('The JSON loader parameter `block_size` is deprecated. Please use `chunksize` instead' ) snake_case__ : str = self.config.block_size if self.config.use_threads is not True: logger.warning( 'The JSON loader parameter `use_threads` is deprecated and doesn\'t have any effect anymore.' ) if self.config.newlines_in_values is not None: raise ValueError('The JSON loader parameter `newlines_in_values` is no longer supported' ) return datasets.DatasetInfo(features=self.config.features ) def __a ( self , __UpperCamelCase ) -> Dict: '''simple docstring''' if not self.config.data_files: raise ValueError(F"""At least one data file must be specified, but got data_files={self.config.data_files}""" ) snake_case__ : int = dl_manager.download_and_extract(self.config.data_files ) if isinstance(__UpperCamelCase , (str, list, tuple) ): snake_case__ : Any = data_files if isinstance(__UpperCamelCase , __UpperCamelCase ): snake_case__ : Optional[Any] = [files] snake_case__ : List[str] = [dl_manager.iter_files(__UpperCamelCase ) for file in files] return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={'files': files} )] snake_case__ : List[Any] = [] for split_name, files in data_files.items(): if isinstance(__UpperCamelCase , __UpperCamelCase ): snake_case__ : List[Any] = [files] snake_case__ : Any = [dl_manager.iter_files(__UpperCamelCase ) for file in files] splits.append(datasets.SplitGenerator(name=__UpperCamelCase , gen_kwargs={'files': files} ) ) return splits def __a ( self , __UpperCamelCase ) -> pa.Table: '''simple docstring''' if self.config.features is not None: # adding missing columns for column_name in set(self.config.features ) - set(pa_table.column_names ): snake_case__ : List[Any] = self.config.features.arrow_schema.field(__UpperCamelCase ).type snake_case__ : List[str] = pa_table.append_column(__UpperCamelCase , pa.array([None] * len(__UpperCamelCase ) , type=__UpperCamelCase ) ) # more expensive cast to support nested structures with keys in a different order # allows str <-> int/float or str to Audio for example snake_case__ : List[str] = table_cast(__UpperCamelCase , self.config.features.arrow_schema ) return pa_table def __a ( self , __UpperCamelCase ) -> int: '''simple docstring''' for file_idx, file in enumerate(itertools.chain.from_iterable(__UpperCamelCase ) ): # If the file is one json object and if we need to look at the list of items in one specific field if self.config.field is not None: with open(__UpperCamelCase , encoding=self.config.encoding , errors=self.config.encoding_errors ) as f: snake_case__ : Union[str, Any] = json.load(__UpperCamelCase ) # We keep only the field we are interested in snake_case__ : Tuple = dataset[self.config.field] # We accept two format: a list of dicts or a dict of lists if isinstance(__UpperCamelCase , (list, tuple) ): snake_case__ : List[Any] = set().union(*[row.keys() for row in dataset] ) snake_case__ : List[Any] = {col: [row.get(__UpperCamelCase ) for row in dataset] for col in keys} else: snake_case__ : List[Any] = dataset snake_case__ : Dict = pa.Table.from_pydict(__UpperCamelCase ) yield file_idx, self._cast_table(__UpperCamelCase ) # If the file has one json object per line else: with open(__UpperCamelCase , 'rb' ) as f: snake_case__ : Optional[int] = 0 # Use block_size equal to the chunk size divided by 32 to leverage multithreading # Set a default minimum value of 16kB if the chunk size is really small snake_case__ : Tuple = max(self.config.chunksize // 32 , 16 << 10 ) snake_case__ : Optional[Any] = ( self.config.encoding_errors if self.config.encoding_errors is not None else 'strict' ) while True: snake_case__ : Optional[int] = f.read(self.config.chunksize ) if not batch: break # Finish current line try: batch += f.readline() except (AttributeError, io.UnsupportedOperation): batch += readline(__UpperCamelCase ) # PyArrow only accepts utf-8 encoded bytes if self.config.encoding != "utf-8": snake_case__ : int = batch.decode(self.config.encoding , errors=__UpperCamelCase ).encode('utf-8' ) try: while True: try: snake_case__ : List[str] = paj.read_json( io.BytesIO(__UpperCamelCase ) , read_options=paj.ReadOptions(block_size=__UpperCamelCase ) ) break except (pa.ArrowInvalid, pa.ArrowNotImplementedError) as e: if ( isinstance(__UpperCamelCase , pa.ArrowInvalid ) and "straddling" not in str(__UpperCamelCase ) or block_size > len(__UpperCamelCase ) ): raise else: # Increase the block size in case it was too small. # The block size will be reset for the next file. logger.debug( F"""Batch of {len(__UpperCamelCase )} bytes couldn't be parsed with block_size={block_size}. Retrying with block_size={block_size * 2}.""" ) block_size *= 2 except pa.ArrowInvalid as e: try: with open( __UpperCamelCase , encoding=self.config.encoding , errors=self.config.encoding_errors ) as f: snake_case__ : Tuple = json.load(__UpperCamelCase ) except json.JSONDecodeError: logger.error(F"""Failed to read file '{file}' with error {type(__UpperCamelCase )}: {e}""" ) raise e # If possible, parse the file as a list of json objects and exit the loop if isinstance(__UpperCamelCase , __UpperCamelCase ): # list is the only sequence type supported in JSON try: snake_case__ : str = set().union(*[row.keys() for row in dataset] ) snake_case__ : Union[str, Any] = {col: [row.get(__UpperCamelCase ) for row in dataset] for col in keys} snake_case__ : List[str] = pa.Table.from_pydict(__UpperCamelCase ) except (pa.ArrowInvalid, AttributeError) as e: logger.error(F"""Failed to read file '{file}' with error {type(__UpperCamelCase )}: {e}""" ) raise ValueError(F"""Not able to read records in the JSON file at {file}.""" ) from None yield file_idx, self._cast_table(__UpperCamelCase ) break else: logger.error(F"""Failed to read file '{file}' with error {type(__UpperCamelCase )}: {e}""" ) raise ValueError( F"""Not able to read records in the JSON file at {file}. """ F"""You should probably indicate the field of the JSON file containing your records. """ F"""This JSON file contain the following fields: {str(list(dataset.keys() ) )}. """ F"""Select the correct one and provide it as `field='XXX'` to the dataset loading method. """ ) from None # Uncomment for debugging (will print the Arrow table size and elements) # logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}") # logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows))) yield (file_idx, batch_idx), self._cast_table(__UpperCamelCase ) batch_idx += 1
699
1
from itertools import product def UpperCamelCase__ ( A__ , A__ ) -> list[int]: snake_case__ : Optional[Any] = sides_number snake_case__ : Any = max_face_number * dice_number snake_case__ : Optional[int] = [0] * (max_total + 1) snake_case__ : Optional[int] = 1 snake_case__ : str = range(A__ , max_face_number + 1 ) for dice_numbers in product(A__ , repeat=A__ ): snake_case__ : Optional[int] = sum(A__ ) totals_frequencies[total] += 1 return totals_frequencies def UpperCamelCase__ ( ) -> float: snake_case__ : int = total_frequency_distribution( sides_number=4 , dice_number=9 ) snake_case__ : Any = total_frequency_distribution( sides_number=6 , dice_number=6 ) snake_case__ : Tuple = 0 snake_case__ : List[str] = 9 snake_case__ : Any = 4 * 9 snake_case__ : Any = 6 for peter_total in range(A__ , max_peter_total + 1 ): peter_wins_count += peter_totals_frequencies[peter_total] * sum( colin_totals_frequencies[min_colin_total:peter_total] ) snake_case__ : str = (4**9) * (6**6) snake_case__ : Any = peter_wins_count / total_games_number snake_case__ : List[str] = round(A__ , ndigits=7 ) return rounded_peter_win_probability if __name__ == "__main__": print(F'''{solution() = }''')
699
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_sentencepiece_available, is_tf_available, is_tokenizers_available, is_torch_available, ) lowerCAmelCase__ : Any = {'''configuration_xglm''': ['''XGLM_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''XGLMConfig''']} try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase__ : str = ['''XGLMTokenizer'''] try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase__ : Dict = ['''XGLMTokenizerFast'''] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase__ : Optional[int] = [ '''XGLM_PRETRAINED_MODEL_ARCHIVE_LIST''', '''XGLMForCausalLM''', '''XGLMModel''', '''XGLMPreTrainedModel''', ] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase__ : Dict = [ '''FlaxXGLMForCausalLM''', '''FlaxXGLMModel''', '''FlaxXGLMPreTrainedModel''', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase__ : Dict = [ '''TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST''', '''TFXGLMForCausalLM''', '''TFXGLMModel''', '''TFXGLMPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_xglm import XGLM_PRETRAINED_CONFIG_ARCHIVE_MAP, XGLMConfig try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_xglm import XGLMTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_xglm_fast import XGLMTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_xglm import XGLM_PRETRAINED_MODEL_ARCHIVE_LIST, XGLMForCausalLM, XGLMModel, XGLMPreTrainedModel try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_xglm import FlaxXGLMForCausalLM, FlaxXGLMModel, FlaxXGLMPreTrainedModel try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_xglm import ( TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST, TFXGLMForCausalLM, TFXGLMModel, TFXGLMPreTrainedModel, ) else: import sys lowerCAmelCase__ : Tuple = _LazyModule(__name__, globals()['''__file__'''], _import_structure)
699
1
def UpperCamelCase__ ( A__ , A__ ) -> str: snake_case__ : List[str] = '' for word_or_phrase in separated: if not isinstance(A__ , A__ ): raise Exception('join() accepts only strings to be joined' ) joined += word_or_phrase + separator return joined.strip(A__ ) if __name__ == "__main__": from doctest import testmod testmod()
699
from __future__ import annotations import random # Maximum size of the population. Bigger could be faster but is more memory expensive. lowerCAmelCase__ : Dict = 2_00 # Number of elements selected in every generation of evolution. The selection takes # place from best to worst of that generation and must be smaller than N_POPULATION. lowerCAmelCase__ : List[str] = 50 # Probability that an element of a generation can mutate, changing one of its genes. # This will guarantee that all genes will be used during evolution. lowerCAmelCase__ : List[str] = 0.4 # Just a seed to improve randomness required by the algorithm. random.seed(random.randint(0, 10_00)) def UpperCamelCase__ ( A__ , A__ ) -> tuple[str, float]: snake_case__ : Tuple = len([g for position, g in enumerate(A__ ) if g == main_target[position]] ) return (item, float(A__ )) def UpperCamelCase__ ( A__ , A__ ) -> tuple[str, str]: snake_case__ : str = random.randint(0 , len(A__ ) - 1 ) snake_case__ : int = parent_a[:random_slice] + parent_a[random_slice:] snake_case__ : Any = parent_a[:random_slice] + parent_a[random_slice:] return (child_a, child_a) def UpperCamelCase__ ( A__ , A__ ) -> str: snake_case__ : List[Any] = list(A__ ) if random.uniform(0 , 1 ) < MUTATION_PROBABILITY: snake_case__ : Optional[Any] = random.choice(A__ ) return "".join(A__ ) def UpperCamelCase__ ( A__ , A__ , A__ , ) -> list[str]: snake_case__ : Tuple = [] # Generate more children proportionally to the fitness score. snake_case__ : Optional[Any] = int(parent_a[1] * 100 ) + 1 snake_case__ : str = 10 if child_n >= 10 else child_n for _ in range(A__ ): snake_case__ : Any = population_score[random.randint(0 , A__ )][0] snake_case__ , snake_case__ : int = crossover(parent_a[0] , A__ ) # Append new string to the population list. pop.append(mutate(A__ , A__ ) ) pop.append(mutate(A__ , A__ ) ) return pop def UpperCamelCase__ ( A__ , A__ , A__ = True ) -> tuple[int, int, str]: # Verify if N_POPULATION is bigger than N_SELECTED if N_POPULATION < N_SELECTED: snake_case__ : Union[str, Any] = F"""{N_POPULATION} must be bigger than {N_SELECTED}""" raise ValueError(A__ ) # Verify that the target contains no genes besides the ones inside genes variable. snake_case__ : Tuple = sorted({c for c in target if c not in genes} ) if not_in_genes_list: snake_case__ : int = F"""{not_in_genes_list} is not in genes list, evolution cannot converge""" raise ValueError(A__ ) # Generate random starting population. snake_case__ : Union[str, Any] = [] for _ in range(A__ ): population.append(''.join([random.choice(A__ ) for i in range(len(A__ ) )] ) ) # Just some logs to know what the algorithms is doing. snake_case__ , snake_case__ : str = 0, 0 # This loop will end when we find a perfect match for our target. while True: generation += 1 total_population += len(A__ ) # Random population created. Now it's time to evaluate. # Adding a bit of concurrency can make everything faster, # # import concurrent.futures # population_score: list[tuple[str, float]] = [] # with concurrent.futures.ThreadPoolExecutor( # max_workers=NUM_WORKERS) as executor: # futures = {executor.submit(evaluate, item) for item in population} # concurrent.futures.wait(futures) # population_score = [item.result() for item in futures] # # but with a simple algorithm like this, it will probably be slower. # We just need to call evaluate for every item inside the population. snake_case__ : List[Any] = [evaluate(A__ , A__ ) for item in population] # Check if there is a matching evolution. snake_case__ : int = sorted(A__ , key=lambda A__ : x[1] , reverse=A__ ) if population_score[0][0] == target: return (generation, total_population, population_score[0][0]) # Print the best result every 10 generation. # Just to know that the algorithm is working. if debug and generation % 10 == 0: print( F"""\nGeneration: {generation}""" F"""\nTotal Population:{total_population}""" F"""\nBest score: {population_score[0][1]}""" F"""\nBest string: {population_score[0][0]}""" ) # Flush the old population, keeping some of the best evolutions. # Keeping this avoid regression of evolution. snake_case__ : Optional[int] = population[: int(N_POPULATION / 3 )] population.clear() population.extend(A__ ) # Normalize population score to be between 0 and 1. snake_case__ : str = [ (item, score / len(A__ )) for item, score in population_score ] # This is selection for i in range(A__ ): population.extend(select(population_score[int(A__ )] , A__ , A__ ) ) # Check if the population has already reached the maximum value and if so, # break the cycle. If this check is disabled, the algorithm will take # forever to compute large strings, but will also calculate small strings in # a far fewer generations. if len(A__ ) > N_POPULATION: break if __name__ == "__main__": lowerCAmelCase__ : str = ( '''This is a genetic algorithm to evaluate, combine, evolve, and mutate a string!''' ) lowerCAmelCase__ : Optional[Any] = list( ''' ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklm''' '''nopqrstuvwxyz.,;!?+-*#@^\'èéòà€ù=)(&%$£/\\''' ) lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__ : List[str] = basic(target_str, genes_list) print( F'''\nGeneration: {generation}\nTotal Population: {population}\nTarget: {target}''' )
699
1
import inspect import unittest from transformers import ConvNextConfig from transformers.testing_utils import require_torch, require_vision, slow, torch_device from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_backbone_common import BackboneTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ConvNextBackbone, ConvNextForImageClassification, ConvNextModel from transformers.models.convnext.modeling_convnext import CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import AutoImageProcessor class __snake_case : def __init__( self , __UpperCamelCase , __UpperCamelCase=13 , __UpperCamelCase=32 , __UpperCamelCase=3 , __UpperCamelCase=4 , __UpperCamelCase=[10, 20, 30, 40] , __UpperCamelCase=[2, 2, 3, 2] , __UpperCamelCase=True , __UpperCamelCase=True , __UpperCamelCase=37 , __UpperCamelCase="gelu" , __UpperCamelCase=10 , __UpperCamelCase=0.0_2 , __UpperCamelCase=["stage2", "stage3", "stage4"] , __UpperCamelCase=[2, 3, 4] , __UpperCamelCase=None , ) -> int: '''simple docstring''' snake_case__ : List[str] = parent snake_case__ : Optional[int] = batch_size snake_case__ : Dict = image_size snake_case__ : str = num_channels snake_case__ : Tuple = num_stages snake_case__ : Optional[Any] = hidden_sizes snake_case__ : int = depths snake_case__ : Tuple = is_training snake_case__ : Dict = use_labels snake_case__ : List[Any] = intermediate_size snake_case__ : Union[str, Any] = hidden_act snake_case__ : Optional[Any] = num_labels snake_case__ : Dict = initializer_range snake_case__ : List[Any] = out_features snake_case__ : List[Any] = out_indices snake_case__ : Any = scope def __a ( self ) -> Union[str, Any]: '''simple docstring''' snake_case__ : Dict = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) snake_case__ : Union[str, Any] = None if self.use_labels: snake_case__ : str = ids_tensor([self.batch_size] , self.num_labels ) snake_case__ : Dict = self.get_config() return config, pixel_values, labels def __a ( self ) -> int: '''simple docstring''' return ConvNextConfig( num_channels=self.num_channels , hidden_sizes=self.hidden_sizes , depths=self.depths , num_stages=self.num_stages , hidden_act=self.hidden_act , is_decoder=__UpperCamelCase , initializer_range=self.initializer_range , out_features=self.out_features , out_indices=self.out_indices , num_labels=self.num_labels , ) def __a ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) -> Optional[Any]: '''simple docstring''' snake_case__ : Any = ConvNextModel(config=__UpperCamelCase ) model.to(__UpperCamelCase ) model.eval() snake_case__ : List[str] = model(__UpperCamelCase ) # expected last hidden states: B, C, H // 32, W // 32 self.parent.assertEqual( result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , ) def __a ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) -> Union[str, Any]: '''simple docstring''' snake_case__ : str = ConvNextForImageClassification(__UpperCamelCase ) model.to(__UpperCamelCase ) model.eval() snake_case__ : Optional[Any] = model(__UpperCamelCase , labels=__UpperCamelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def __a ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) -> Tuple: '''simple docstring''' snake_case__ : Dict = ConvNextBackbone(config=__UpperCamelCase ) model.to(__UpperCamelCase ) model.eval() snake_case__ : List[Any] = model(__UpperCamelCase ) # verify hidden states self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) ) self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[1], 4, 4] ) # verify channels self.parent.assertEqual(len(model.channels ) , len(config.out_features ) ) self.parent.assertListEqual(model.channels , config.hidden_sizes[1:] ) # verify backbone works with out_features=None snake_case__ : List[str] = None snake_case__ : int = ConvNextBackbone(config=__UpperCamelCase ) model.to(__UpperCamelCase ) model.eval() snake_case__ : Optional[Any] = model(__UpperCamelCase ) # verify feature maps self.parent.assertEqual(len(result.feature_maps ) , 1 ) self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[-1], 1, 1] ) # verify channels self.parent.assertEqual(len(model.channels ) , 1 ) self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]] ) def __a ( self ) -> Optional[int]: '''simple docstring''' snake_case__ : Optional[Any] = self.prepare_config_and_inputs() snake_case__ , snake_case__ , snake_case__ : str = config_and_inputs snake_case__ : Any = {'pixel_values': pixel_values} return config, inputs_dict @require_torch class __snake_case ( _lowerCamelCase ,_lowerCamelCase ,unittest.TestCase ): __lowerCamelCase = ( ( ConvNextModel, ConvNextForImageClassification, ConvNextBackbone, ) if is_torch_available() else () ) __lowerCamelCase = ( {"""feature-extraction""": ConvNextModel, """image-classification""": ConvNextForImageClassification} if is_torch_available() else {} ) __lowerCamelCase = True __lowerCamelCase = False __lowerCamelCase = False __lowerCamelCase = False __lowerCamelCase = False def __a ( self ) -> Dict: '''simple docstring''' snake_case__ : Any = ConvNextModelTester(self ) snake_case__ : Union[str, Any] = ConfigTester(self , config_class=__UpperCamelCase , has_text_modality=__UpperCamelCase , hidden_size=37 ) def __a ( self ) -> str: '''simple docstring''' self.create_and_test_config_common_properties() self.config_tester.create_and_test_config_to_json_string() self.config_tester.create_and_test_config_to_json_file() self.config_tester.create_and_test_config_from_and_save_pretrained() self.config_tester.create_and_test_config_with_num_labels() self.config_tester.check_config_can_be_init_without_params() self.config_tester.check_config_arguments_init() def __a ( self ) -> Tuple: '''simple docstring''' return @unittest.skip(reason='ConvNext does not use inputs_embeds' ) def __a ( self ) -> Any: '''simple docstring''' pass @unittest.skip(reason='ConvNext does not support input and output embeddings' ) def __a ( self ) -> Dict: '''simple docstring''' pass @unittest.skip(reason='ConvNext does not use feedforward chunking' ) def __a ( self ) -> str: '''simple docstring''' pass def __a ( self ) -> Any: '''simple docstring''' snake_case__ , snake_case__ : str = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: snake_case__ : Union[str, Any] = model_class(__UpperCamelCase ) snake_case__ : Optional[int] = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic snake_case__ : Optional[Any] = [*signature.parameters.keys()] snake_case__ : List[Any] = ['pixel_values'] self.assertListEqual(arg_names[:1] , __UpperCamelCase ) def __a ( self ) -> int: '''simple docstring''' snake_case__ : List[str] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*__UpperCamelCase ) def __a ( self ) -> List[Any]: '''simple docstring''' snake_case__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_backbone(*__UpperCamelCase ) def __a ( self ) -> Union[str, Any]: '''simple docstring''' def check_hidden_states_output(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase ): snake_case__ : Tuple = model_class(__UpperCamelCase ) model.to(__UpperCamelCase ) model.eval() with torch.no_grad(): snake_case__ : Optional[int] = model(**self._prepare_for_class(__UpperCamelCase , __UpperCamelCase ) ) snake_case__ : Optional[Any] = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states snake_case__ : int = self.model_tester.num_stages self.assertEqual(len(__UpperCamelCase ) , expected_num_stages + 1 ) # ConvNext's feature maps are of shape (batch_size, num_channels, height, width) self.assertListEqual( list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , ) snake_case__ , snake_case__ : Dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: snake_case__ : List[str] = True check_hidden_states_output(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] snake_case__ : List[str] = True check_hidden_states_output(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) def __a ( self ) -> Optional[int]: '''simple docstring''' snake_case__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*__UpperCamelCase ) @slow def __a ( self ) -> int: '''simple docstring''' for model_name in CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: snake_case__ : Dict = ConvNextModel.from_pretrained(__UpperCamelCase ) self.assertIsNotNone(__UpperCamelCase ) def UpperCamelCase__ ( ) -> Union[str, Any]: snake_case__ : Dict = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' ) return image @require_torch @require_vision class __snake_case ( unittest.TestCase ): @cached_property def __a ( self ) -> Union[str, Any]: '''simple docstring''' return AutoImageProcessor.from_pretrained('facebook/convnext-tiny-224' ) if is_vision_available() else None @slow def __a ( self ) -> Dict: '''simple docstring''' snake_case__ : Optional[int] = ConvNextForImageClassification.from_pretrained('facebook/convnext-tiny-224' ).to(__UpperCamelCase ) snake_case__ : Optional[Any] = self.default_image_processor snake_case__ : List[str] = prepare_img() snake_case__ : str = image_processor(images=__UpperCamelCase , return_tensors='pt' ).to(__UpperCamelCase ) # forward pass with torch.no_grad(): snake_case__ : Any = model(**__UpperCamelCase ) # verify the logits snake_case__ : List[Any] = torch.Size((1, 1000) ) self.assertEqual(outputs.logits.shape , __UpperCamelCase ) snake_case__ : Union[str, Any] = torch.tensor([-0.0_2_6_0, -0.4_7_3_9, 0.1_9_1_1] ).to(__UpperCamelCase ) self.assertTrue(torch.allclose(outputs.logits[0, :3] , __UpperCamelCase , atol=1E-4 ) ) @require_torch class __snake_case ( unittest.TestCase ,_lowerCamelCase ): __lowerCamelCase = (ConvNextBackbone,) if is_torch_available() else () __lowerCamelCase = ConvNextConfig __lowerCamelCase = False def __a ( self ) -> Any: '''simple docstring''' snake_case__ : List[str] = ConvNextModelTester(self )
699
from __future__ import annotations from collections.abc import Iterator from typing import Generic, TypeVar lowerCAmelCase__ : Optional[int] = TypeVar('''T''') class __snake_case ( Generic[T] ): def __init__( self , __UpperCamelCase ) -> Any: '''simple docstring''' snake_case__ : Optional[int] = data snake_case__ : Node[T] | None = None def __str__( self ) -> str: '''simple docstring''' return F"""{self.data}""" class __snake_case ( Generic[T] ): def __init__( self ) -> None: '''simple docstring''' snake_case__ : Node[T] | None = None def __iter__( self ) -> Iterator[T]: '''simple docstring''' snake_case__ : str = self.top while node: yield node.data snake_case__ : Dict = node.next def __str__( self ) -> str: '''simple docstring''' return "->".join([str(__UpperCamelCase ) for item in self] ) def __len__( self ) -> int: '''simple docstring''' return len(tuple(iter(self ) ) ) def __a ( self ) -> bool: '''simple docstring''' return self.top is None def __a ( self , __UpperCamelCase ) -> None: '''simple docstring''' snake_case__ : str = Node(__UpperCamelCase ) if not self.is_empty(): snake_case__ : List[str] = self.top snake_case__ : Tuple = node def __a ( self ) -> T: '''simple docstring''' if self.is_empty(): raise IndexError('pop from empty stack' ) assert isinstance(self.top , __UpperCamelCase ) snake_case__ : List[str] = self.top snake_case__ : Union[str, Any] = self.top.next return pop_node.data def __a ( self ) -> T: '''simple docstring''' if self.is_empty(): raise IndexError('peek from empty stack' ) assert self.top is not None return self.top.data def __a ( self ) -> None: '''simple docstring''' snake_case__ : Any = None if __name__ == "__main__": from doctest import testmod testmod()
699
1
import warnings from ...utils import logging from .image_processing_imagegpt import ImageGPTImageProcessor lowerCAmelCase__ : Union[str, Any] = logging.get_logger(__name__) class __snake_case ( _lowerCamelCase ): def __init__( self , *__UpperCamelCase , **__UpperCamelCase ) -> None: '''simple docstring''' warnings.warn( 'The class ImageGPTFeatureExtractor is deprecated and will be removed in version 5 of Transformers.' ' Please use ImageGPTImageProcessor instead.' , __UpperCamelCase , ) super().__init__(*__UpperCamelCase , **__UpperCamelCase )
699
from collections import OrderedDict from typing import Mapping from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging lowerCAmelCase__ : Dict = logging.get_logger(__name__) lowerCAmelCase__ : int = { '''sail/poolformer_s12''': '''https://huggingface.co/sail/poolformer_s12/resolve/main/config.json''', # See all PoolFormer models at https://huggingface.co/models?filter=poolformer } class __snake_case ( _lowerCamelCase ): __lowerCamelCase = """poolformer""" def __init__( self , __UpperCamelCase=3 , __UpperCamelCase=16 , __UpperCamelCase=16 , __UpperCamelCase=3 , __UpperCamelCase=4.0 , __UpperCamelCase=[2, 2, 6, 2] , __UpperCamelCase=[64, 128, 320, 512] , __UpperCamelCase=[7, 3, 3, 3] , __UpperCamelCase=[4, 2, 2, 2] , __UpperCamelCase=[2, 1, 1, 1] , __UpperCamelCase=4 , __UpperCamelCase=0.0 , __UpperCamelCase="gelu" , __UpperCamelCase=True , __UpperCamelCase=1E-5 , __UpperCamelCase=0.0_2 , **__UpperCamelCase , ) -> Any: '''simple docstring''' snake_case__ : List[str] = num_channels snake_case__ : Dict = patch_size snake_case__ : Optional[int] = stride snake_case__ : str = padding snake_case__ : List[str] = pool_size snake_case__ : List[Any] = hidden_sizes snake_case__ : List[Any] = mlp_ratio snake_case__ : Union[str, Any] = depths snake_case__ : Dict = patch_sizes snake_case__ : Dict = strides snake_case__ : Dict = num_encoder_blocks snake_case__ : Union[str, Any] = drop_path_rate snake_case__ : List[str] = hidden_act snake_case__ : Optional[Any] = use_layer_scale snake_case__ : int = layer_scale_init_value snake_case__ : Dict = initializer_range super().__init__(**__UpperCamelCase ) class __snake_case ( _lowerCamelCase ): __lowerCamelCase = version.parse("""1.11""" ) @property def __a ( self ) -> Mapping[str, Mapping[int, str]]: '''simple docstring''' return OrderedDict( [ ('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}), ] ) @property def __a ( self ) -> float: '''simple docstring''' return 2E-3
699
1
def UpperCamelCase__ ( A__ ) -> str: # noqa: E741 snake_case__ : List[str] = len(A__ ) snake_case__ : List[str] = 0 snake_case__ : Optional[Any] = [0] * n snake_case__ : Optional[int] = [False] * n snake_case__ : Dict = [False] * n def dfs(A__ , A__ , A__ , A__ ): if parent == root: out_edge_count += 1 snake_case__ : int = True snake_case__ : int = at for to in l[at]: if to == parent: pass elif not visited[to]: snake_case__ : Optional[int] = dfs(A__ , A__ , A__ , A__ ) snake_case__ : Tuple = min(low[at] , low[to] ) # AP found via bridge if at < low[to]: snake_case__ : Tuple = True # AP found via cycle if at == low[to]: snake_case__ : List[Any] = True else: snake_case__ : Tuple = min(low[at] , A__ ) return out_edge_count for i in range(A__ ): if not visited[i]: snake_case__ : List[str] = 0 snake_case__ : str = dfs(A__ , A__ , -1 , A__ ) snake_case__ : List[Any] = out_edge_count > 1 for x in range(len(A__ ) ): if is_art[x] is True: print(A__ ) # Adjacency list of graph lowerCAmelCase__ : int = { 0: [1, 2], 1: [0, 2], 2: [0, 1, 3, 5], 3: [2, 4], 4: [3], 5: [2, 6, 8], 6: [5, 7], 7: [6, 8], 8: [5, 7], } compute_ap(data)
699
import numpy as np import qiskit def UpperCamelCase__ ( A__ = 8 , A__ = None ) -> str: snake_case__ : Optional[int] = np.random.default_rng(seed=A__ ) # Roughly 25% of the qubits will contribute to the key. # So we take more than we need. snake_case__ : Tuple = 6 * key_len # Measurement basis for Alice's qubits. snake_case__ : Tuple = rng.integers(2 , size=A__ ) # The set of states Alice will prepare. snake_case__ : List[str] = rng.integers(2 , size=A__ ) # Measurement basis for Bob's qubits. snake_case__ : List[Any] = rng.integers(2 , size=A__ ) # Quantum Circuit to simulate BB84 snake_case__ : Any = qiskit.QuantumCircuit(A__ , name='BB84' ) # Alice prepares her qubits according to rules above. for index, _ in enumerate(A__ ): if alice_state[index] == 1: bbaa_circ.x(A__ ) if alice_basis[index] == 1: bbaa_circ.h(A__ ) bbaa_circ.barrier() # Bob measures the received qubits according to rules above. for index, _ in enumerate(A__ ): if bob_basis[index] == 1: bbaa_circ.h(A__ ) bbaa_circ.barrier() bbaa_circ.measure_all() # Simulate the quantum circuit. snake_case__ : List[str] = qiskit.Aer.get_backend('aer_simulator' ) # We only need to run one shot because the key is unique. # Multiple shots will produce the same key. snake_case__ : Optional[Any] = qiskit.execute(A__ , A__ , shots=1 , seed_simulator=A__ ) # Returns the result of measurement. snake_case__ : Union[str, Any] = job.result().get_counts(A__ ).most_frequent() # Extracting the generated key from the simulation results. # Only keep measurement results where Alice and Bob chose the same basis. snake_case__ : Optional[Any] = ''.join( [ result_bit for alice_basis_bit, bob_basis_bit, result_bit in zip( A__ , A__ , A__ ) if alice_basis_bit == bob_basis_bit ] ) # Get final key. Pad with 0 if too short, otherwise truncate. snake_case__ : Tuple = gen_key[:key_len] if len(A__ ) >= key_len else gen_key.ljust(A__ , '0' ) return key if __name__ == "__main__": print(F'''The generated key is : {bbaa(8, seed=0)}''') from doctest import testmod testmod()
699
1
from __future__ import annotations import math def UpperCamelCase__ ( A__ , A__ , A__ , A__ , A__ ) -> int: if depth < 0: raise ValueError('Depth cannot be less than 0' ) if not scores: raise ValueError('Scores cannot be empty' ) if depth == height: return scores[node_index] return ( max( minimax(depth + 1 , node_index * 2 , A__ , A__ , A__ ) , minimax(depth + 1 , node_index * 2 + 1 , A__ , A__ , A__ ) , ) if is_max else min( minimax(depth + 1 , node_index * 2 , A__ , A__ , A__ ) , minimax(depth + 1 , node_index * 2 + 1 , A__ , A__ , A__ ) , ) ) def UpperCamelCase__ ( ) -> None: snake_case__ : Union[str, Any] = [90, 23, 6, 33, 21, 65, 123, 3_4423] snake_case__ : Dict = math.log(len(A__ ) , 2 ) print(F"""Optimal value : {minimax(0 , 0 , A__ , A__ , A__ )}""" ) if __name__ == "__main__": import doctest doctest.testmod() main()
699
def UpperCamelCase__ ( A__ , A__ , A__ ) -> int: if exponent == 1: return base if exponent % 2 == 0: snake_case__ : Dict = _modexpt(A__ , exponent // 2 , A__ ) % modulo_value return (x * x) % modulo_value else: return (base * _modexpt(A__ , exponent - 1 , A__ )) % modulo_value def UpperCamelCase__ ( A__ = 1777 , A__ = 1855 , A__ = 8 ) -> int: snake_case__ : Tuple = base for _ in range(1 , A__ ): snake_case__ : Any = _modexpt(A__ , A__ , 10**digits ) return result if __name__ == "__main__": print(F'''{solution() = }''')
699
1
import contextlib import importlib import io import unittest import transformers # Try to import everything from transformers to ensure every object can be loaded. from transformers import * # noqa F406 from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, require_flax, require_tf, require_torch from transformers.utils import ContextManagers, find_labels, is_flax_available, is_tf_available, is_torch_available if is_torch_available(): from transformers import BertForPreTraining, BertForQuestionAnswering, BertForSequenceClassification if is_tf_available(): from transformers import TFBertForPreTraining, TFBertForQuestionAnswering, TFBertForSequenceClassification if is_flax_available(): from transformers import FlaxBertForPreTraining, FlaxBertForQuestionAnswering, FlaxBertForSequenceClassification lowerCAmelCase__ : List[str] = DUMMY_UNKNOWN_IDENTIFIER # An actual model hosted on huggingface.co lowerCAmelCase__ : int = '''main''' # Default branch name lowerCAmelCase__ : Dict = '''f2c752cfc5c0ab6f4bdec59acea69eefbee381c2''' # One particular commit (not the top of `main`) lowerCAmelCase__ : Optional[Any] = '''aaaaaaa''' # This commit does not exist, so we should 404. lowerCAmelCase__ : List[Any] = '''d9e9f15bc825e4b2c9249e9578f884bbcb5e3684''' # Sha-1 of config.json on the top of `main`, for checking purposes lowerCAmelCase__ : List[str] = '''4b243c475af8d0a7754e87d7d096c92e5199ec2fe168a2ee7998e3b8e9bcb1d3''' @contextlib.contextmanager def UpperCamelCase__ ( ) -> List[str]: print('Welcome!' ) yield print('Bye!' ) @contextlib.contextmanager def UpperCamelCase__ ( ) -> Optional[Any]: print('Bonjour!' ) yield print('Au revoir!' ) class __snake_case ( unittest.TestCase ): def __a ( self ) -> Union[str, Any]: '''simple docstring''' assert transformers.__spec__ is not None assert importlib.util.find_spec('transformers' ) is not None class __snake_case ( unittest.TestCase ): @unittest.mock.patch('sys.stdout' , new_callable=io.StringIO ) def __a ( self , __UpperCamelCase ) -> Dict: '''simple docstring''' with ContextManagers([] ): print('Transformers are awesome!' ) # The print statement adds a new line at the end of the output self.assertEqual(mock_stdout.getvalue() , 'Transformers are awesome!\n' ) @unittest.mock.patch('sys.stdout' , new_callable=io.StringIO ) def __a ( self , __UpperCamelCase ) -> str: '''simple docstring''' with ContextManagers([context_en()] ): print('Transformers are awesome!' ) # The output should be wrapped with an English welcome and goodbye self.assertEqual(mock_stdout.getvalue() , 'Welcome!\nTransformers are awesome!\nBye!\n' ) @unittest.mock.patch('sys.stdout' , new_callable=io.StringIO ) def __a ( self , __UpperCamelCase ) -> Union[str, Any]: '''simple docstring''' with ContextManagers([context_fr(), context_en()] ): print('Transformers are awesome!' ) # The output should be wrapped with an English and French welcome and goodbye self.assertEqual(mock_stdout.getvalue() , 'Bonjour!\nWelcome!\nTransformers are awesome!\nBye!\nAu revoir!\n' ) @require_torch def __a ( self ) -> List[str]: '''simple docstring''' self.assertEqual(find_labels(__UpperCamelCase ) , ['labels'] ) self.assertEqual(find_labels(__UpperCamelCase ) , ['labels', 'next_sentence_label'] ) self.assertEqual(find_labels(__UpperCamelCase ) , ['start_positions', 'end_positions'] ) class __snake_case ( _lowerCamelCase ): pass self.assertEqual(find_labels(__UpperCamelCase ) , ['labels'] ) @require_tf def __a ( self ) -> int: '''simple docstring''' self.assertEqual(find_labels(__UpperCamelCase ) , ['labels'] ) self.assertEqual(find_labels(__UpperCamelCase ) , ['labels', 'next_sentence_label'] ) self.assertEqual(find_labels(__UpperCamelCase ) , ['start_positions', 'end_positions'] ) class __snake_case ( _lowerCamelCase ): pass self.assertEqual(find_labels(__UpperCamelCase ) , ['labels'] ) @require_flax def __a ( self ) -> Optional[int]: '''simple docstring''' self.assertEqual(find_labels(__UpperCamelCase ) , [] ) self.assertEqual(find_labels(__UpperCamelCase ) , [] ) self.assertEqual(find_labels(__UpperCamelCase ) , [] ) class __snake_case ( _lowerCamelCase ): pass self.assertEqual(find_labels(__UpperCamelCase ) , [] )
699
# tests directory-specific settings - this file is run automatically # by pytest before any tests are run import sys import warnings from os.path import abspath, dirname, join # allow having multiple repository checkouts and not needing to remember to rerun # 'pip install -e .[dev]' when switching between checkouts and running tests. lowerCAmelCase__ : Tuple = abspath(join(dirname(dirname(__file__)), '''src''')) sys.path.insert(1, git_repo_path) # silence FutureWarning warnings in tests since often we can't act on them until # they become normal warnings - i.e. the tests still need to test the current functionality warnings.simplefilter(action='''ignore''', category=FutureWarning) def UpperCamelCase__ ( A__ ) -> Optional[Any]: from diffusers.utils.testing_utils import pytest_addoption_shared pytest_addoption_shared(A__ ) def UpperCamelCase__ ( A__ ) -> Optional[Any]: from diffusers.utils.testing_utils import pytest_terminal_summary_main snake_case__ : Union[str, Any] = terminalreporter.config.getoption('--make-reports' ) if make_reports: pytest_terminal_summary_main(A__ , id=A__ )
699
1
from .glue import glue_convert_examples_to_features, glue_output_modes, glue_processors, glue_tasks_num_labels from .squad import SquadExample, SquadFeatures, SquadVaProcessor, SquadVaProcessor, squad_convert_examples_to_features from .utils import DataProcessor, InputExample, InputFeatures, SingleSentenceClassificationProcessor from .xnli import xnli_output_modes, xnli_processors, xnli_tasks_num_labels
699
def UpperCamelCase__ ( A__ ) -> list[int]: if length <= 0 or not isinstance(A__ , A__ ): raise ValueError('Length must be a positive integer.' ) return [n * (2 * n - 1) for n in range(A__ )] if __name__ == "__main__": print(hexagonal_numbers(length=5)) print(hexagonal_numbers(length=10))
699
1
import os import random import sys from . import cryptomath_module as cryptoMath # noqa: N812 from . import rabin_miller as rabinMiller # noqa: N812 def UpperCamelCase__ ( ) -> None: print('Making key files...' ) make_key_files('rsa' , 1024 ) print('Key files generation successful.' ) def UpperCamelCase__ ( A__ ) -> tuple[tuple[int, int], tuple[int, int]]: print('Generating prime p...' ) snake_case__ : Dict = rabinMiller.generate_large_prime(A__ ) print('Generating prime q...' ) snake_case__ : Optional[Any] = rabinMiller.generate_large_prime(A__ ) snake_case__ : Dict = p * q print('Generating e that is relatively prime to (p - 1) * (q - 1)...' ) while True: snake_case__ : Optional[Any] = random.randrange(2 ** (key_size - 1) , 2 ** (key_size) ) if cryptoMath.gcd(A__ , (p - 1) * (q - 1) ) == 1: break print('Calculating d that is mod inverse of e...' ) snake_case__ : int = cryptoMath.find_mod_inverse(A__ , (p - 1) * (q - 1) ) snake_case__ : Optional[Any] = (n, e) snake_case__ : List[Any] = (n, d) return (public_key, private_key) def UpperCamelCase__ ( A__ , A__ ) -> None: if os.path.exists(F"""{name}_pubkey.txt""" ) or os.path.exists(F"""{name}_privkey.txt""" ): print('\nWARNING:' ) print( F"""\"{name}_pubkey.txt\" or \"{name}_privkey.txt\" already exists. \n""" 'Use a different name or delete these files and re-run this program.' ) sys.exit() snake_case__ , snake_case__ : List[str] = generate_key(A__ ) print(F"""\nWriting public key to file {name}_pubkey.txt...""" ) with open(F"""{name}_pubkey.txt""" , 'w' ) as out_file: out_file.write(F"""{key_size},{public_key[0]},{public_key[1]}""" ) print(F"""Writing private key to file {name}_privkey.txt...""" ) with open(F"""{name}_privkey.txt""" , 'w' ) as out_file: out_file.write(F"""{key_size},{private_key[0]},{private_key[1]}""" ) if __name__ == "__main__": main()
699
import argparse import json from collections import OrderedDict from pathlib import Path import requests import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import ( ConditionalDetrConfig, ConditionalDetrForObjectDetection, ConditionalDetrForSegmentation, ConditionalDetrImageProcessor, ) from transformers.utils import logging logging.set_verbosity_info() lowerCAmelCase__ : Dict = logging.get_logger(__name__) # here we list all keys to be renamed (original name on the left, our name on the right) lowerCAmelCase__ : Optional[Any] = [] for i in range(6): # encoder layers: output projection, 2 feedforward neural networks and 2 layernorms rename_keys.append( (F'''transformer.encoder.layers.{i}.self_attn.out_proj.weight''', F'''encoder.layers.{i}.self_attn.out_proj.weight''') ) rename_keys.append( (F'''transformer.encoder.layers.{i}.self_attn.out_proj.bias''', F'''encoder.layers.{i}.self_attn.out_proj.bias''') ) rename_keys.append((F'''transformer.encoder.layers.{i}.linear1.weight''', F'''encoder.layers.{i}.fc1.weight''')) rename_keys.append((F'''transformer.encoder.layers.{i}.linear1.bias''', F'''encoder.layers.{i}.fc1.bias''')) rename_keys.append((F'''transformer.encoder.layers.{i}.linear2.weight''', F'''encoder.layers.{i}.fc2.weight''')) rename_keys.append((F'''transformer.encoder.layers.{i}.linear2.bias''', F'''encoder.layers.{i}.fc2.bias''')) rename_keys.append( (F'''transformer.encoder.layers.{i}.norm1.weight''', F'''encoder.layers.{i}.self_attn_layer_norm.weight''') ) rename_keys.append((F'''transformer.encoder.layers.{i}.norm1.bias''', F'''encoder.layers.{i}.self_attn_layer_norm.bias''')) rename_keys.append((F'''transformer.encoder.layers.{i}.norm2.weight''', F'''encoder.layers.{i}.final_layer_norm.weight''')) rename_keys.append((F'''transformer.encoder.layers.{i}.norm2.bias''', F'''encoder.layers.{i}.final_layer_norm.bias''')) # decoder layers: 2 times output projection, 2 feedforward neural networks and 3 layernorms rename_keys.append( (F'''transformer.decoder.layers.{i}.self_attn.out_proj.weight''', F'''decoder.layers.{i}.self_attn.out_proj.weight''') ) rename_keys.append( (F'''transformer.decoder.layers.{i}.self_attn.out_proj.bias''', F'''decoder.layers.{i}.self_attn.out_proj.bias''') ) rename_keys.append( ( F'''transformer.decoder.layers.{i}.cross_attn.out_proj.weight''', F'''decoder.layers.{i}.encoder_attn.out_proj.weight''', ) ) rename_keys.append( ( F'''transformer.decoder.layers.{i}.cross_attn.out_proj.bias''', F'''decoder.layers.{i}.encoder_attn.out_proj.bias''', ) ) rename_keys.append((F'''transformer.decoder.layers.{i}.linear1.weight''', F'''decoder.layers.{i}.fc1.weight''')) rename_keys.append((F'''transformer.decoder.layers.{i}.linear1.bias''', F'''decoder.layers.{i}.fc1.bias''')) rename_keys.append((F'''transformer.decoder.layers.{i}.linear2.weight''', F'''decoder.layers.{i}.fc2.weight''')) rename_keys.append((F'''transformer.decoder.layers.{i}.linear2.bias''', F'''decoder.layers.{i}.fc2.bias''')) rename_keys.append( (F'''transformer.decoder.layers.{i}.norm1.weight''', F'''decoder.layers.{i}.self_attn_layer_norm.weight''') ) rename_keys.append((F'''transformer.decoder.layers.{i}.norm1.bias''', F'''decoder.layers.{i}.self_attn_layer_norm.bias''')) rename_keys.append( (F'''transformer.decoder.layers.{i}.norm2.weight''', F'''decoder.layers.{i}.encoder_attn_layer_norm.weight''') ) rename_keys.append( (F'''transformer.decoder.layers.{i}.norm2.bias''', F'''decoder.layers.{i}.encoder_attn_layer_norm.bias''') ) rename_keys.append((F'''transformer.decoder.layers.{i}.norm3.weight''', F'''decoder.layers.{i}.final_layer_norm.weight''')) rename_keys.append((F'''transformer.decoder.layers.{i}.norm3.bias''', F'''decoder.layers.{i}.final_layer_norm.bias''')) # q, k, v projections in self/cross-attention in decoder for conditional DETR rename_keys.append( (F'''transformer.decoder.layers.{i}.sa_qcontent_proj.weight''', F'''decoder.layers.{i}.sa_qcontent_proj.weight''') ) rename_keys.append( (F'''transformer.decoder.layers.{i}.sa_kcontent_proj.weight''', F'''decoder.layers.{i}.sa_kcontent_proj.weight''') ) rename_keys.append( (F'''transformer.decoder.layers.{i}.sa_qpos_proj.weight''', F'''decoder.layers.{i}.sa_qpos_proj.weight''') ) rename_keys.append( (F'''transformer.decoder.layers.{i}.sa_kpos_proj.weight''', F'''decoder.layers.{i}.sa_kpos_proj.weight''') ) rename_keys.append((F'''transformer.decoder.layers.{i}.sa_v_proj.weight''', F'''decoder.layers.{i}.sa_v_proj.weight''')) rename_keys.append( (F'''transformer.decoder.layers.{i}.ca_qcontent_proj.weight''', F'''decoder.layers.{i}.ca_qcontent_proj.weight''') ) # rename_keys.append((f"transformer.decoder.layers.{i}.ca_qpos_proj.weight", f"decoder.layers.{i}.ca_qpos_proj.weight")) rename_keys.append( (F'''transformer.decoder.layers.{i}.ca_kcontent_proj.weight''', F'''decoder.layers.{i}.ca_kcontent_proj.weight''') ) rename_keys.append( (F'''transformer.decoder.layers.{i}.ca_kpos_proj.weight''', F'''decoder.layers.{i}.ca_kpos_proj.weight''') ) rename_keys.append((F'''transformer.decoder.layers.{i}.ca_v_proj.weight''', F'''decoder.layers.{i}.ca_v_proj.weight''')) rename_keys.append( (F'''transformer.decoder.layers.{i}.ca_qpos_sine_proj.weight''', F'''decoder.layers.{i}.ca_qpos_sine_proj.weight''') ) rename_keys.append( (F'''transformer.decoder.layers.{i}.sa_qcontent_proj.bias''', F'''decoder.layers.{i}.sa_qcontent_proj.bias''') ) rename_keys.append( (F'''transformer.decoder.layers.{i}.sa_kcontent_proj.bias''', F'''decoder.layers.{i}.sa_kcontent_proj.bias''') ) rename_keys.append((F'''transformer.decoder.layers.{i}.sa_qpos_proj.bias''', F'''decoder.layers.{i}.sa_qpos_proj.bias''')) rename_keys.append((F'''transformer.decoder.layers.{i}.sa_kpos_proj.bias''', F'''decoder.layers.{i}.sa_kpos_proj.bias''')) rename_keys.append((F'''transformer.decoder.layers.{i}.sa_v_proj.bias''', F'''decoder.layers.{i}.sa_v_proj.bias''')) rename_keys.append( (F'''transformer.decoder.layers.{i}.ca_qcontent_proj.bias''', F'''decoder.layers.{i}.ca_qcontent_proj.bias''') ) # rename_keys.append((f"transformer.decoder.layers.{i}.ca_qpos_proj.bias", f"decoder.layers.{i}.ca_qpos_proj.bias")) rename_keys.append( (F'''transformer.decoder.layers.{i}.ca_kcontent_proj.bias''', F'''decoder.layers.{i}.ca_kcontent_proj.bias''') ) rename_keys.append((F'''transformer.decoder.layers.{i}.ca_kpos_proj.bias''', F'''decoder.layers.{i}.ca_kpos_proj.bias''')) rename_keys.append((F'''transformer.decoder.layers.{i}.ca_v_proj.bias''', F'''decoder.layers.{i}.ca_v_proj.bias''')) rename_keys.append( (F'''transformer.decoder.layers.{i}.ca_qpos_sine_proj.bias''', F'''decoder.layers.{i}.ca_qpos_sine_proj.bias''') ) # convolutional projection + query embeddings + layernorm of decoder + class and bounding box heads # for conditional DETR, also convert reference point head and query scale MLP rename_keys.extend( [ ('''input_proj.weight''', '''input_projection.weight'''), ('''input_proj.bias''', '''input_projection.bias'''), ('''query_embed.weight''', '''query_position_embeddings.weight'''), ('''transformer.decoder.norm.weight''', '''decoder.layernorm.weight'''), ('''transformer.decoder.norm.bias''', '''decoder.layernorm.bias'''), ('''class_embed.weight''', '''class_labels_classifier.weight'''), ('''class_embed.bias''', '''class_labels_classifier.bias'''), ('''bbox_embed.layers.0.weight''', '''bbox_predictor.layers.0.weight'''), ('''bbox_embed.layers.0.bias''', '''bbox_predictor.layers.0.bias'''), ('''bbox_embed.layers.1.weight''', '''bbox_predictor.layers.1.weight'''), ('''bbox_embed.layers.1.bias''', '''bbox_predictor.layers.1.bias'''), ('''bbox_embed.layers.2.weight''', '''bbox_predictor.layers.2.weight'''), ('''bbox_embed.layers.2.bias''', '''bbox_predictor.layers.2.bias'''), ('''transformer.decoder.ref_point_head.layers.0.weight''', '''decoder.ref_point_head.layers.0.weight'''), ('''transformer.decoder.ref_point_head.layers.0.bias''', '''decoder.ref_point_head.layers.0.bias'''), ('''transformer.decoder.ref_point_head.layers.1.weight''', '''decoder.ref_point_head.layers.1.weight'''), ('''transformer.decoder.ref_point_head.layers.1.bias''', '''decoder.ref_point_head.layers.1.bias'''), ('''transformer.decoder.query_scale.layers.0.weight''', '''decoder.query_scale.layers.0.weight'''), ('''transformer.decoder.query_scale.layers.0.bias''', '''decoder.query_scale.layers.0.bias'''), ('''transformer.decoder.query_scale.layers.1.weight''', '''decoder.query_scale.layers.1.weight'''), ('''transformer.decoder.query_scale.layers.1.bias''', '''decoder.query_scale.layers.1.bias'''), ('''transformer.decoder.layers.0.ca_qpos_proj.weight''', '''decoder.layers.0.ca_qpos_proj.weight'''), ('''transformer.decoder.layers.0.ca_qpos_proj.bias''', '''decoder.layers.0.ca_qpos_proj.bias'''), ] ) def UpperCamelCase__ ( A__ , A__ , A__ ) -> List[str]: snake_case__ : int = state_dict.pop(A__ ) snake_case__ : Union[str, Any] = val def UpperCamelCase__ ( A__ ) -> int: snake_case__ : List[Any] = OrderedDict() for key, value in state_dict.items(): if "backbone.0.body" in key: snake_case__ : Any = key.replace('backbone.0.body' , 'backbone.conv_encoder.model' ) snake_case__ : Optional[int] = value else: snake_case__ : Optional[int] = value return new_state_dict def UpperCamelCase__ ( A__ , A__=False ) -> Optional[int]: snake_case__ : Optional[int] = '' if is_panoptic: snake_case__ : Tuple = 'conditional_detr.' # first: transformer encoder for i in range(6 ): # read in weights + bias of input projection layer (in PyTorch's MultiHeadAttention, this is a single matrix + bias) snake_case__ : int = state_dict.pop(F"""{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_weight""" ) snake_case__ : str = state_dict.pop(F"""{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_bias""" ) # next, add query, keys and values (in that order) to the state dict snake_case__ : Union[str, Any] = in_proj_weight[:256, :] snake_case__ : Union[str, Any] = in_proj_bias[:256] snake_case__ : Union[str, Any] = in_proj_weight[256:512, :] snake_case__ : Optional[Any] = in_proj_bias[256:512] snake_case__ : List[str] = in_proj_weight[-256:, :] snake_case__ : Tuple = in_proj_bias[-256:] def UpperCamelCase__ ( ) -> Tuple: snake_case__ : int = 'http://images.cocodataset.org/val2017/000000039769.jpg' snake_case__ : str = Image.open(requests.get(A__ , stream=A__ ).raw ) return im @torch.no_grad() def UpperCamelCase__ ( A__ , A__ ) -> str: snake_case__ : List[Any] = ConditionalDetrConfig() # set backbone and dilation attributes if "resnet101" in model_name: snake_case__ : Any = 'resnet101' if "dc5" in model_name: snake_case__ : Any = True snake_case__ : int = 'panoptic' in model_name if is_panoptic: snake_case__ : str = 250 else: snake_case__ : Union[str, Any] = 91 snake_case__ : Optional[int] = 'huggingface/label-files' snake_case__ : Optional[Any] = 'coco-detection-id2label.json' snake_case__ : str = json.load(open(hf_hub_download(A__ , A__ , repo_type='dataset' ) , 'r' ) ) snake_case__ : List[Any] = {int(A__ ): v for k, v in idalabel.items()} snake_case__ : Any = idalabel snake_case__ : int = {v: k for k, v in idalabel.items()} # load image processor snake_case__ : List[Any] = 'coco_panoptic' if is_panoptic else 'coco_detection' snake_case__ : List[Any] = ConditionalDetrImageProcessor(format=A__ ) # prepare image snake_case__ : List[str] = prepare_img() snake_case__ : Any = image_processor(images=A__ , return_tensors='pt' ) snake_case__ : Dict = encoding['pixel_values'] logger.info(F"""Converting model {model_name}...""" ) # load original model from torch hub snake_case__ : Any = torch.hub.load('DeppMeng/ConditionalDETR' , A__ , pretrained=A__ ).eval() snake_case__ : Tuple = conditional_detr.state_dict() # rename keys for src, dest in rename_keys: if is_panoptic: snake_case__ : List[Any] = 'conditional_detr.' + src rename_key(A__ , A__ , A__ ) snake_case__ : Dict = rename_backbone_keys(A__ ) # query, key and value matrices need special treatment read_in_q_k_v(A__ , is_panoptic=A__ ) # important: we need to prepend a prefix to each of the base model keys as the head models use different attributes for them snake_case__ : Optional[int] = 'conditional_detr.model.' if is_panoptic else 'model.' for key in state_dict.copy().keys(): if is_panoptic: if ( key.startswith('conditional_detr' ) and not key.startswith('class_labels_classifier' ) and not key.startswith('bbox_predictor' ) ): snake_case__ : List[Any] = state_dict.pop(A__ ) snake_case__ : Optional[int] = val elif "class_labels_classifier" in key or "bbox_predictor" in key: snake_case__ : str = state_dict.pop(A__ ) snake_case__ : List[Any] = val elif key.startswith('bbox_attention' ) or key.startswith('mask_head' ): continue else: snake_case__ : Union[str, Any] = state_dict.pop(A__ ) snake_case__ : Dict = val else: if not key.startswith('class_labels_classifier' ) and not key.startswith('bbox_predictor' ): snake_case__ : List[Any] = state_dict.pop(A__ ) snake_case__ : Optional[int] = val # finally, create HuggingFace model and load state dict snake_case__ : Union[str, Any] = ConditionalDetrForSegmentation(A__ ) if is_panoptic else ConditionalDetrForObjectDetection(A__ ) model.load_state_dict(A__ ) model.eval() model.push_to_hub(repo_id=A__ , organization='DepuMeng' , commit_message='Add model' ) # verify our conversion snake_case__ : Tuple = conditional_detr(A__ ) snake_case__ : str = model(A__ ) assert torch.allclose(outputs.logits , original_outputs['pred_logits'] , atol=1e-4 ) assert torch.allclose(outputs.pred_boxes , original_outputs['pred_boxes'] , atol=1e-4 ) if is_panoptic: assert torch.allclose(outputs.pred_masks , original_outputs['pred_masks'] , atol=1e-4 ) # Save model and image processor logger.info(F"""Saving PyTorch model and image processor to {pytorch_dump_folder_path}...""" ) Path(A__ ).mkdir(exist_ok=A__ ) model.save_pretrained(A__ ) image_processor.save_pretrained(A__ ) if __name__ == "__main__": lowerCAmelCase__ : Any = argparse.ArgumentParser() parser.add_argument( '''--model_name''', default='''conditional_detr_resnet50''', type=str, help='''Name of the CONDITIONAL_DETR model you\'d like to convert.''', ) parser.add_argument( '''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the folder to output PyTorch model.''' ) lowerCAmelCase__ : int = parser.parse_args() convert_conditional_detr_checkpoint(args.model_name, args.pytorch_dump_folder_path)
699
1
import gc import unittest import numpy as np import torch from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer from diffusers import ( AutoencoderKL, DDIMScheduler, StableDiffusionAttendAndExcitePipeline, UNetaDConditionModel, ) from diffusers.utils import load_numpy, skip_mps, slow from diffusers.utils.testing_utils import require_torch_gpu from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin lowerCAmelCase__ : str = False @skip_mps class __snake_case ( _lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase ,unittest.TestCase ): __lowerCamelCase = StableDiffusionAttendAndExcitePipeline __lowerCamelCase = False __lowerCamelCase = TEXT_TO_IMAGE_PARAMS __lowerCamelCase = TEXT_TO_IMAGE_BATCH_PARAMS.union({"""token_indices"""} ) __lowerCamelCase = TEXT_TO_IMAGE_IMAGE_PARAMS __lowerCamelCase = TEXT_TO_IMAGE_IMAGE_PARAMS @classmethod def __a ( cls ) -> Any: '''simple docstring''' super().setUpClass() torch.use_deterministic_algorithms(__UpperCamelCase ) @classmethod def __a ( cls ) -> Any: '''simple docstring''' super().tearDownClass() torch.use_deterministic_algorithms(__UpperCamelCase ) def __a ( self ) -> int: '''simple docstring''' torch.manual_seed(0 ) snake_case__ : str = UNetaDConditionModel( block_out_channels=(32, 64) , layers_per_block=1 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') , up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D') , cross_attention_dim=32 , attention_head_dim=(2, 4) , use_linear_projection=__UpperCamelCase , ) snake_case__ : Dict = DDIMScheduler( beta_start=0.0_0_0_8_5 , beta_end=0.0_1_2 , beta_schedule='scaled_linear' , clip_sample=__UpperCamelCase , set_alpha_to_one=__UpperCamelCase , ) torch.manual_seed(0 ) snake_case__ : Tuple = AutoencoderKL( block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=4 , sample_size=128 , ) torch.manual_seed(0 ) snake_case__ : List[str] = CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , hidden_act='gelu' , projection_dim=512 , ) snake_case__ : Tuple = CLIPTextModel(__UpperCamelCase ) snake_case__ : Union[str, Any] = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' ) snake_case__ : Any = { 'unet': unet, 'scheduler': scheduler, 'vae': vae, 'text_encoder': text_encoder, 'tokenizer': tokenizer, 'safety_checker': None, 'feature_extractor': None, } return components def __a ( self , __UpperCamelCase , __UpperCamelCase=0 ) -> Dict: '''simple docstring''' if str(__UpperCamelCase ).startswith('mps' ): snake_case__ : str = torch.manual_seed(__UpperCamelCase ) else: snake_case__ : Any = torch.Generator(device=__UpperCamelCase ).manual_seed(__UpperCamelCase ) snake_case__ : List[str] = { 'prompt': 'a cat and a frog', 'token_indices': [2, 5], 'generator': generator, 'num_inference_steps': 1, 'guidance_scale': 6.0, 'output_type': 'numpy', 'max_iter_to_alter': 2, 'thresholds': {0: 0.7}, } return inputs def __a ( self ) -> Dict: '''simple docstring''' snake_case__ : List[Any] = 'cpu' snake_case__ : int = self.get_dummy_components() snake_case__ : Tuple = self.pipeline_class(**__UpperCamelCase ) pipe.to(__UpperCamelCase ) pipe.set_progress_bar_config(disable=__UpperCamelCase ) snake_case__ : Union[str, Any] = self.get_dummy_inputs(__UpperCamelCase ) snake_case__ : List[Any] = pipe(**__UpperCamelCase ).images snake_case__ : List[Any] = image[0, -3:, -3:, -1] self.assertEqual(image.shape , (1, 64, 64, 3) ) snake_case__ : Dict = np.array( [0.6_3_9_0_5_3_6_4, 0.6_2_8_9_7_3_0_7, 0.4_8_5_9_9_0_1_7, 0.5_1_3_3_6_2_4, 0.5_5_5_0_0_4_8, 0.4_5_7_6_9_5_1_6, 0.5_0_3_2_6_9_7_3, 0.5_0_2_3_1_3_9, 0.4_5_3_8_4_4_9_6] ) snake_case__ : Optional[Any] = np.abs(image_slice.flatten() - expected_slice ).max() self.assertLessEqual(__UpperCamelCase , 1E-3 ) def __a ( self ) -> str: '''simple docstring''' super().test_cpu_offload_forward_pass(expected_max_diff=5E-4 ) def __a ( self ) -> Dict: '''simple docstring''' self._test_inference_batch_consistent(batch_sizes=[1, 2] ) def __a ( self ) -> Union[str, Any]: '''simple docstring''' self._test_inference_batch_single_identical(batch_size=2 , expected_max_diff=7E-4 ) def __a ( self ) -> List[Any]: '''simple docstring''' super().test_dict_tuple_outputs_equivalent(expected_max_difference=3E-3 ) def __a ( self ) -> List[str]: '''simple docstring''' super().test_pt_np_pil_outputs_equivalent(expected_max_diff=5E-4 ) def __a ( self ) -> List[str]: '''simple docstring''' super().test_save_load_local(expected_max_difference=5E-4 ) def __a ( self ) -> List[str]: '''simple docstring''' super().test_save_load_optional_components(expected_max_difference=4E-4 ) @require_torch_gpu @slow class __snake_case ( unittest.TestCase ): @classmethod def __a ( cls ) -> List[str]: '''simple docstring''' super().setUpClass() torch.use_deterministic_algorithms(__UpperCamelCase ) @classmethod def __a ( cls ) -> Optional[Any]: '''simple docstring''' super().tearDownClass() torch.use_deterministic_algorithms(__UpperCamelCase ) def __a ( self ) -> Optional[Any]: '''simple docstring''' super().tearDown() gc.collect() torch.cuda.empty_cache() def __a ( self ) -> int: '''simple docstring''' snake_case__ : Union[str, Any] = torch.manual_seed(51 ) snake_case__ : Dict = StableDiffusionAttendAndExcitePipeline.from_pretrained( 'CompVis/stable-diffusion-v1-4' , safety_checker=__UpperCamelCase , torch_dtype=torch.floataa ) pipe.to('cuda' ) snake_case__ : Optional[int] = 'a painting of an elephant with glasses' snake_case__ : Any = [5, 7] snake_case__ : List[Any] = pipe( prompt=__UpperCamelCase , token_indices=__UpperCamelCase , guidance_scale=7.5 , generator=__UpperCamelCase , num_inference_steps=5 , max_iter_to_alter=5 , output_type='numpy' , ).images[0] snake_case__ : List[str] = load_numpy( 'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/attend-and-excite/elephant_glasses.npy' ) assert np.abs((expected_image - image).max() ) < 5E-1
699
from collections import namedtuple lowerCAmelCase__ : Union[str, Any] = namedtuple('''from_to''', '''from_ to''') lowerCAmelCase__ : Tuple = { '''cubicmeter''': from_to(1, 1), '''litre''': from_to(0.0_01, 10_00), '''kilolitre''': from_to(1, 1), '''gallon''': from_to(0.0_04_54, 2_64.1_72), '''cubicyard''': from_to(0.7_64_55, 1.3_07_95), '''cubicfoot''': from_to(0.0_28, 35.31_47), '''cup''': from_to(0.0_00_23_65_88, 42_26.75), } def UpperCamelCase__ ( A__ , A__ , A__ ) -> float: if from_type not in METRIC_CONVERSION: raise ValueError( F"""Invalid 'from_type' value: {from_type!r} Supported values are:\n""" + ', '.join(A__ ) ) if to_type not in METRIC_CONVERSION: raise ValueError( F"""Invalid 'to_type' value: {to_type!r}. Supported values are:\n""" + ', '.join(A__ ) ) return value * METRIC_CONVERSION[from_type].from_ * METRIC_CONVERSION[to_type].to if __name__ == "__main__": import doctest doctest.testmod()
699
1
import random import unittest import numpy as np import torch from diffusers import ( DPMSolverMultistepScheduler, EulerAncestralDiscreteScheduler, EulerDiscreteScheduler, LMSDiscreteScheduler, OnnxStableDiffusionUpscalePipeline, PNDMScheduler, ) from diffusers.utils import floats_tensor from diffusers.utils.testing_utils import ( is_onnx_available, load_image, nightly, require_onnxruntime, require_torch_gpu, ) from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin if is_onnx_available(): import onnxruntime as ort class __snake_case ( _lowerCamelCase ,unittest.TestCase ): # TODO: is there an appropriate internal test set? __lowerCamelCase = """ssube/stable-diffusion-x4-upscaler-onnx""" def __a ( self , __UpperCamelCase=0 ) -> Tuple: '''simple docstring''' snake_case__ : str = floats_tensor((1, 3, 128, 128) , rng=random.Random(__UpperCamelCase ) ) snake_case__ : Union[str, Any] = torch.manual_seed(__UpperCamelCase ) snake_case__ : Union[str, Any] = { 'prompt': 'A painting of a squirrel eating a burger', 'image': image, 'generator': generator, 'num_inference_steps': 3, 'guidance_scale': 7.5, 'output_type': 'numpy', } return inputs def __a ( self ) -> Optional[Any]: '''simple docstring''' snake_case__ : str = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint , provider='CPUExecutionProvider' ) pipe.set_progress_bar_config(disable=__UpperCamelCase ) snake_case__ : Optional[Any] = self.get_dummy_inputs() snake_case__ : List[str] = pipe(**__UpperCamelCase ).images snake_case__ : Any = image[0, -3:, -3:, -1].flatten() # started as 128, should now be 512 assert image.shape == (1, 512, 512, 3) snake_case__ : List[str] = np.array( [0.6_9_7_4_7_8_2, 0.6_8_9_0_2_0_9_3, 0.7_0_1_3_5_8_8_5, 0.7_5_8_3_6_1_8, 0.7_8_0_4_5_4_5, 0.7_8_5_4_9_1_2, 0.7_8_6_6_7_4_2_6, 0.7_8_7_4_3_8_6_3, 0.7_8_0_7_0_2_2_3] ) assert np.abs(image_slice - expected_slice ).max() < 1E-1 def __a ( self ) -> Dict: '''simple docstring''' snake_case__ : Tuple = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint , provider='CPUExecutionProvider' ) snake_case__ : Dict = PNDMScheduler.from_config(pipe.scheduler.config , skip_prk_steps=__UpperCamelCase ) pipe.set_progress_bar_config(disable=__UpperCamelCase ) snake_case__ : List[Any] = self.get_dummy_inputs() snake_case__ : Dict = pipe(**__UpperCamelCase ).images snake_case__ : int = image[0, -3:, -3:, -1] assert image.shape == (1, 512, 512, 3) snake_case__ : Optional[int] = np.array( [0.6_8_9_8_8_9_2, 0.5_9_2_4_0_5_5_6, 0.5_2_4_9_9_5_2_7, 0.5_8_8_6_6_2_1_5, 0.5_2_2_5_8_2_3_5, 0.5_2_5_7_2_7_1_5, 0.6_2_4_1_4_4_7_3, 0.6_1_7_4_3_8_7, 0.6_2_1_4_9_6_4] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1 def __a ( self ) -> Dict: '''simple docstring''' snake_case__ : Optional[int] = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint , provider='CPUExecutionProvider' ) snake_case__ : str = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config ) pipe.set_progress_bar_config(disable=__UpperCamelCase ) snake_case__ : int = self.get_dummy_inputs() snake_case__ : Any = pipe(**__UpperCamelCase ).images snake_case__ : Union[str, Any] = image[0, -3:, -3:, -1] assert image.shape == (1, 512, 512, 3) snake_case__ : Optional[int] = np.array( [0.7_6_5_9_2_7_8, 0.7_6_4_3_7_6_6_4, 0.7_5_5_7_9_1_0_7, 0.7_6_9_1_1_1_6, 0.7_7_6_6_6_9_8_6, 0.7_7_2_7_6_7_2, 0.7_7_5_8_6_6_4, 0.7_8_1_2_2_2_6, 0.7_6_9_4_2_5_1_5] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1 def __a ( self ) -> Optional[int]: '''simple docstring''' snake_case__ : Any = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint , provider='CPUExecutionProvider' ) snake_case__ : Optional[Any] = EulerDiscreteScheduler.from_config(pipe.scheduler.config ) pipe.set_progress_bar_config(disable=__UpperCamelCase ) snake_case__ : Union[str, Any] = self.get_dummy_inputs() snake_case__ : int = pipe(**__UpperCamelCase ).images snake_case__ : Tuple = image[0, -3:, -3:, -1] assert image.shape == (1, 512, 512, 3) snake_case__ : int = np.array( [0.6_9_7_4_7_8_2, 0.6_8_9_0_2_0_9_3, 0.7_0_1_3_5_8_8_5, 0.7_5_8_3_6_1_8, 0.7_8_0_4_5_4_5, 0.7_8_5_4_9_1_2, 0.7_8_6_6_7_4_2_6, 0.7_8_7_4_3_8_6_3, 0.7_8_0_7_0_2_2_3] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1 def __a ( self ) -> Optional[int]: '''simple docstring''' snake_case__ : Dict = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint , provider='CPUExecutionProvider' ) snake_case__ : str = EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config ) pipe.set_progress_bar_config(disable=__UpperCamelCase ) snake_case__ : str = self.get_dummy_inputs() snake_case__ : Any = pipe(**__UpperCamelCase ).images snake_case__ : Optional[int] = image[0, -3:, -3:, -1] assert image.shape == (1, 512, 512, 3) snake_case__ : Any = np.array( [0.7_7_4_2_4_4_9_6, 0.7_7_3_6_0_1, 0.7_6_4_5_2_8_8, 0.7_7_6_9_5_9_8, 0.7_7_7_2_7_3_9, 0.7_7_3_8_6_8_8, 0.7_8_1_8_7_2_3_3, 0.7_7_8_7_9_5_8_4, 0.7_6_7_0_4_3] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1 @nightly @require_onnxruntime @require_torch_gpu class __snake_case ( unittest.TestCase ): @property def __a ( self ) -> Dict: '''simple docstring''' return ( "CUDAExecutionProvider", { "gpu_mem_limit": "15000000000", # 15GB "arena_extend_strategy": "kSameAsRequested", }, ) @property def __a ( self ) -> str: '''simple docstring''' snake_case__ : List[str] = ort.SessionOptions() snake_case__ : Dict = False return options def __a ( self ) -> Optional[int]: '''simple docstring''' snake_case__ : str = load_image( 'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main' '/img2img/sketch-mountains-input.jpg' ) snake_case__ : List[str] = init_image.resize((128, 128) ) # using the PNDM scheduler by default snake_case__ : Union[str, Any] = OnnxStableDiffusionUpscalePipeline.from_pretrained( 'ssube/stable-diffusion-x4-upscaler-onnx' , provider=self.gpu_provider , sess_options=self.gpu_options , ) pipe.set_progress_bar_config(disable=__UpperCamelCase ) snake_case__ : Union[str, Any] = 'A fantasy landscape, trending on artstation' snake_case__ : Union[str, Any] = torch.manual_seed(0 ) snake_case__ : str = pipe( prompt=__UpperCamelCase , image=__UpperCamelCase , guidance_scale=7.5 , num_inference_steps=10 , generator=__UpperCamelCase , output_type='np' , ) snake_case__ : Tuple = output.images snake_case__ : Optional[Any] = images[0, 255:258, 383:386, -1] assert images.shape == (1, 512, 512, 3) snake_case__ : Optional[Any] = np.array([0.4_8_8_3, 0.4_9_4_7, 0.4_9_8_0, 0.4_9_7_5, 0.4_9_8_2, 0.4_9_8_0, 0.5_0_0_0, 0.5_0_0_6, 0.4_9_7_2] ) # TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues assert np.abs(image_slice.flatten() - expected_slice ).max() < 2E-2 def __a ( self ) -> Tuple: '''simple docstring''' snake_case__ : Tuple = load_image( 'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main' '/img2img/sketch-mountains-input.jpg' ) snake_case__ : Optional[Any] = init_image.resize((128, 128) ) snake_case__ : Dict = LMSDiscreteScheduler.from_pretrained( 'ssube/stable-diffusion-x4-upscaler-onnx' , subfolder='scheduler' ) snake_case__ : str = OnnxStableDiffusionUpscalePipeline.from_pretrained( 'ssube/stable-diffusion-x4-upscaler-onnx' , scheduler=__UpperCamelCase , provider=self.gpu_provider , sess_options=self.gpu_options , ) pipe.set_progress_bar_config(disable=__UpperCamelCase ) snake_case__ : int = 'A fantasy landscape, trending on artstation' snake_case__ : str = torch.manual_seed(0 ) snake_case__ : List[Any] = pipe( prompt=__UpperCamelCase , image=__UpperCamelCase , guidance_scale=7.5 , num_inference_steps=20 , generator=__UpperCamelCase , output_type='np' , ) snake_case__ : Union[str, Any] = output.images snake_case__ : Dict = images[0, 255:258, 383:386, -1] assert images.shape == (1, 512, 512, 3) snake_case__ : Union[str, Any] = np.array( [0.5_0_1_7_3_7_5_3, 0.5_0_2_2_3_3_5_6, 0.5_0_2_0_3_9, 0.5_0_2_3_3_0_3_6, 0.5_0_2_3_7_2_5, 0.5_0_2_2_6_0_1, 0.5_0_1_8_7_5_8, 0.5_0_2_3_4_0_8_5, 0.5_0_2_4_1_5_6_6] ) # TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues assert np.abs(image_slice.flatten() - expected_slice ).max() < 2E-2
699
import os from shutil import copyfile from typing import Any, Dict, List, Optional, Tuple import sentencepiece as spm from ...tokenization_utils import PreTrainedTokenizer from ...utils import logging lowerCAmelCase__ : Tuple = logging.get_logger(__name__) lowerCAmelCase__ : Union[str, Any] = '''▁''' lowerCAmelCase__ : List[Any] = {'''vocab_file''': '''sentencepiece.bpe.model'''} lowerCAmelCase__ : Optional[Any] = { '''vocab_file''': { '''facebook/xglm-564M''': '''https://huggingface.co/facebook/xglm-564M/resolve/main/sentencepiece.bpe.model''', } } lowerCAmelCase__ : str = { '''facebook/xglm-564M''': 20_48, } class __snake_case ( _lowerCamelCase ): __lowerCamelCase = VOCAB_FILES_NAMES __lowerCamelCase = PRETRAINED_VOCAB_FILES_MAP __lowerCamelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES __lowerCamelCase = ["""input_ids""", """attention_mask"""] def __init__( self , __UpperCamelCase , __UpperCamelCase="<s>" , __UpperCamelCase="</s>" , __UpperCamelCase="</s>" , __UpperCamelCase="<s>" , __UpperCamelCase="<unk>" , __UpperCamelCase="<pad>" , __UpperCamelCase = None , **__UpperCamelCase , ) -> None: '''simple docstring''' snake_case__ : Any = {} if sp_model_kwargs is None else sp_model_kwargs # Compatibility with the original tokenizer snake_case__ : Tuple = 7 snake_case__ : Dict = [F"""<madeupword{i}>""" for i in range(self.num_madeup_words )] snake_case__ : Union[str, Any] = kwargs.get('additional_special_tokens' , [] ) kwargs["additional_special_tokens"] += [ word for word in madeup_words if word not in kwargs["additional_special_tokens"] ] super().__init__( bos_token=__UpperCamelCase , eos_token=__UpperCamelCase , unk_token=__UpperCamelCase , sep_token=__UpperCamelCase , cls_token=__UpperCamelCase , pad_token=__UpperCamelCase , sp_model_kwargs=self.sp_model_kwargs , **__UpperCamelCase , ) snake_case__ : Optional[int] = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(str(__UpperCamelCase ) ) snake_case__ : Optional[Any] = vocab_file # Original fairseq vocab and spm vocab must be "aligned": # Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 # -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ---- # fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-' # spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a' # The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab snake_case__ : Tuple = 1 # Mimic fairseq token-to-id alignment for the first 4 token snake_case__ : Tuple = {'<s>': 0, '<pad>': 1, '</s>': 2, '<unk>': 3} snake_case__ : List[Any] = len(self.sp_model ) snake_case__ : Optional[Any] = {F"""<madeupword{i}>""": sp_size + i + self.fairseq_offset for i in range(self.num_madeup_words )} self.fairseq_tokens_to_ids.update(__UpperCamelCase ) snake_case__ : Union[str, Any] = {v: k for k, v in self.fairseq_tokens_to_ids.items()} def __getstate__( self ) -> List[Any]: '''simple docstring''' snake_case__ : Union[str, Any] = self.__dict__.copy() snake_case__ : Optional[Any] = None snake_case__ : Tuple = self.sp_model.serialized_model_proto() return state def __setstate__( self , __UpperCamelCase ) -> Union[str, Any]: '''simple docstring''' snake_case__ : Union[str, Any] = d # for backward compatibility if not hasattr(self , 'sp_model_kwargs' ): snake_case__ : Any = {} snake_case__ : Dict = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.LoadFromSerializedProto(self.sp_model_proto ) def __a ( self , __UpperCamelCase , __UpperCamelCase = None ) -> List[int]: '''simple docstring''' if token_ids_a is None: return [self.sep_token_id] + token_ids_a snake_case__ : str = [self.sep_token_id] return sep + token_ids_a + sep + sep + token_ids_a def __a ( self , __UpperCamelCase , __UpperCamelCase = None , __UpperCamelCase = False ) -> List[int]: '''simple docstring''' if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=__UpperCamelCase , token_ids_a=__UpperCamelCase , already_has_special_tokens=__UpperCamelCase ) if token_ids_a is None: return [1] + ([0] * len(__UpperCamelCase )) return [1] + ([0] * len(__UpperCamelCase )) + [1, 1] + ([0] * len(__UpperCamelCase )) def __a ( self , __UpperCamelCase , __UpperCamelCase = None ) -> List[int]: '''simple docstring''' snake_case__ : int = [self.sep_token_id] if token_ids_a is None: return len(sep + token_ids_a ) * [0] return len(sep + token_ids_a + sep + sep + token_ids_a ) * [0] @property def __a ( self ) -> Tuple: '''simple docstring''' return len(self.sp_model ) + self.fairseq_offset + self.num_madeup_words def __a ( self ) -> Union[str, Any]: '''simple docstring''' snake_case__ : int = {self.convert_ids_to_tokens(__UpperCamelCase ): i for i in range(self.vocab_size )} vocab.update(self.added_tokens_encoder ) return vocab def __a ( self , __UpperCamelCase ) -> List[str]: '''simple docstring''' return self.sp_model.encode(__UpperCamelCase , out_type=__UpperCamelCase ) def __a ( self , __UpperCamelCase ) -> Union[str, Any]: '''simple docstring''' if token in self.fairseq_tokens_to_ids: return self.fairseq_tokens_to_ids[token] snake_case__ : Optional[Any] = self.sp_model.PieceToId(__UpperCamelCase ) # Need to return unknown token if the SP model returned 0 return spm_id + self.fairseq_offset if spm_id else self.unk_token_id def __a ( self , __UpperCamelCase ) -> Dict: '''simple docstring''' if index in self.fairseq_ids_to_tokens: return self.fairseq_ids_to_tokens[index] return self.sp_model.IdToPiece(index - self.fairseq_offset ) def __a ( self , __UpperCamelCase ) -> int: '''simple docstring''' snake_case__ : int = ''.join(__UpperCamelCase ).replace(__UpperCamelCase , ' ' ).strip() return out_string def __a ( self , __UpperCamelCase , __UpperCamelCase = None ) -> Tuple[str]: '''simple docstring''' if not os.path.isdir(__UpperCamelCase ): logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" ) return snake_case__ : List[str] = os.path.join( __UpperCamelCase , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(__UpperCamelCase ) and os.path.isfile(self.vocab_file ): copyfile(self.vocab_file , __UpperCamelCase ) elif not os.path.isfile(self.vocab_file ): with open(__UpperCamelCase , 'wb' ) as fi: snake_case__ : Any = self.sp_model.serialized_model_proto() fi.write(__UpperCamelCase ) return (out_vocab_file,)
699
1
import importlib import shutil import threading import warnings from typing import List import fsspec import fsspec.asyn from . import compression from .hffilesystem import HfFileSystem lowerCAmelCase__ : Optional[Any] = importlib.util.find_spec('''s3fs''') is not None if _has_safs: from .safilesystem import SaFileSystem # noqa: F401 lowerCAmelCase__ : List[compression.BaseCompressedFileFileSystem] = [ compression.BzaFileSystem, compression.GzipFileSystem, compression.LzaFileSystem, compression.XzFileSystem, compression.ZstdFileSystem, ] # Register custom filesystems for fs_class in COMPRESSION_FILESYSTEMS + [HfFileSystem]: if fs_class.protocol in fsspec.registry and fsspec.registry[fs_class.protocol] is not fs_class: warnings.warn(F'''A filesystem protocol was already set for {fs_class.protocol} and will be overwritten.''') fsspec.register_implementation(fs_class.protocol, fs_class, clobber=True) def UpperCamelCase__ ( A__ ) -> str: if "://" in dataset_path: snake_case__ : Optional[int] = dataset_path.split('://' )[1] return dataset_path def UpperCamelCase__ ( A__ ) -> bool: if fs is not None and fs.protocol != "file": return True else: return False def UpperCamelCase__ ( A__ , A__ , A__ ) -> List[str]: snake_case__ : Any = not is_remote_filesystem(A__ ) if is_local: # LocalFileSystem.mv does copy + rm, it is more efficient to simply move a local directory shutil.move(fs._strip_protocol(A__ ) , fs._strip_protocol(A__ ) ) else: fs.mv(A__ , A__ , recursive=A__ ) def UpperCamelCase__ ( ) -> None: if hasattr(fsspec.asyn , 'reset_lock' ): # for future fsspec>2022.05.0 fsspec.asyn.reset_lock() else: snake_case__ : Optional[Any] = None snake_case__ : Union[str, Any] = None snake_case__ : List[str] = threading.Lock()
699
import collections from typing import List, Optional, Union from ...tokenization_utils_base import BatchEncoding from ...utils import TensorType, add_end_docstrings, add_start_docstrings, logging from ..bert.tokenization_bert_fast import BertTokenizerFast from .tokenization_dpr import DPRContextEncoderTokenizer, DPRQuestionEncoderTokenizer, DPRReaderTokenizer lowerCAmelCase__ : Any = logging.get_logger(__name__) lowerCAmelCase__ : List[Any] = {'''vocab_file''': '''vocab.txt''', '''tokenizer_file''': '''tokenizer.json'''} lowerCAmelCase__ : Any = { '''vocab_file''': { '''facebook/dpr-ctx_encoder-single-nq-base''': ( '''https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/vocab.txt''' ), '''facebook/dpr-ctx_encoder-multiset-base''': ( '''https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/vocab.txt''' ), }, '''tokenizer_file''': { '''facebook/dpr-ctx_encoder-single-nq-base''': ( '''https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/tokenizer.json''' ), '''facebook/dpr-ctx_encoder-multiset-base''': ( '''https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/tokenizer.json''' ), }, } lowerCAmelCase__ : Any = { '''vocab_file''': { '''facebook/dpr-question_encoder-single-nq-base''': ( '''https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/vocab.txt''' ), '''facebook/dpr-question_encoder-multiset-base''': ( '''https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/vocab.txt''' ), }, '''tokenizer_file''': { '''facebook/dpr-question_encoder-single-nq-base''': ( '''https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/tokenizer.json''' ), '''facebook/dpr-question_encoder-multiset-base''': ( '''https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/tokenizer.json''' ), }, } lowerCAmelCase__ : Tuple = { '''vocab_file''': { '''facebook/dpr-reader-single-nq-base''': ( '''https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/vocab.txt''' ), '''facebook/dpr-reader-multiset-base''': ( '''https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/vocab.txt''' ), }, '''tokenizer_file''': { '''facebook/dpr-reader-single-nq-base''': ( '''https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/tokenizer.json''' ), '''facebook/dpr-reader-multiset-base''': ( '''https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/tokenizer.json''' ), }, } lowerCAmelCase__ : Dict = { '''facebook/dpr-ctx_encoder-single-nq-base''': 5_12, '''facebook/dpr-ctx_encoder-multiset-base''': 5_12, } lowerCAmelCase__ : Union[str, Any] = { '''facebook/dpr-question_encoder-single-nq-base''': 5_12, '''facebook/dpr-question_encoder-multiset-base''': 5_12, } lowerCAmelCase__ : Optional[Any] = { '''facebook/dpr-reader-single-nq-base''': 5_12, '''facebook/dpr-reader-multiset-base''': 5_12, } lowerCAmelCase__ : Tuple = { '''facebook/dpr-ctx_encoder-single-nq-base''': {'''do_lower_case''': True}, '''facebook/dpr-ctx_encoder-multiset-base''': {'''do_lower_case''': True}, } lowerCAmelCase__ : Any = { '''facebook/dpr-question_encoder-single-nq-base''': {'''do_lower_case''': True}, '''facebook/dpr-question_encoder-multiset-base''': {'''do_lower_case''': True}, } lowerCAmelCase__ : List[str] = { '''facebook/dpr-reader-single-nq-base''': {'''do_lower_case''': True}, '''facebook/dpr-reader-multiset-base''': {'''do_lower_case''': True}, } class __snake_case ( _lowerCamelCase ): __lowerCamelCase = VOCAB_FILES_NAMES __lowerCamelCase = CONTEXT_ENCODER_PRETRAINED_VOCAB_FILES_MAP __lowerCamelCase = CONTEXT_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES __lowerCamelCase = CONTEXT_ENCODER_PRETRAINED_INIT_CONFIGURATION __lowerCamelCase = DPRContextEncoderTokenizer class __snake_case ( _lowerCamelCase ): __lowerCamelCase = VOCAB_FILES_NAMES __lowerCamelCase = QUESTION_ENCODER_PRETRAINED_VOCAB_FILES_MAP __lowerCamelCase = QUESTION_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES __lowerCamelCase = QUESTION_ENCODER_PRETRAINED_INIT_CONFIGURATION __lowerCamelCase = DPRQuestionEncoderTokenizer lowerCAmelCase__ : Tuple = collections.namedtuple( '''DPRSpanPrediction''', ['''span_score''', '''relevance_score''', '''doc_id''', '''start_index''', '''end_index''', '''text'''] ) lowerCAmelCase__ : List[Any] = collections.namedtuple('''DPRReaderOutput''', ['''start_logits''', '''end_logits''', '''relevance_logits''']) lowerCAmelCase__ : int = r''' Return a dictionary with the token ids of the input strings and other information to give to `.decode_best_spans`. It converts the strings of a question and different passages (title and text) in a sequence of IDs (integers), using the tokenizer and vocabulary. The resulting `input_ids` is a matrix of size `(n_passages, sequence_length)` with the format: [CLS] <question token ids> [SEP] <titles ids> [SEP] <texts ids> Args: questions (`str` or `List[str]`): The questions to be encoded. You can specify one question for many passages. In this case, the question will be duplicated like `[questions] * n_passages`. Otherwise you have to specify as many questions as in `titles` or `texts`. titles (`str` or `List[str]`): The passages titles to be encoded. This can be a string or a list of strings if there are several passages. texts (`str` or `List[str]`): The passages texts to be encoded. This can be a string or a list of strings if there are several passages. padding (`bool`, `str` or [`~utils.PaddingStrategy`], *optional*, defaults to `False`): Activates and controls padding. Accepts the following values: - `True` or `\'longest\'`: Pad to the longest sequence in the batch (or no padding if only a single sequence if provided). - `\'max_length\'`: Pad to a maximum length specified with the argument `max_length` or to the maximum acceptable input length for the model if that argument is not provided. - `False` or `\'do_not_pad\'` (default): No padding (i.e., can output a batch with sequences of different lengths). truncation (`bool`, `str` or [`~tokenization_utils_base.TruncationStrategy`], *optional*, defaults to `False`): Activates and controls truncation. Accepts the following values: - `True` or `\'longest_first\'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum acceptable input length for the model if that argument is not provided. This will truncate token by token, removing a token from the longest sequence in the pair if a pair of sequences (or a batch of pairs) is provided. - `\'only_first\'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum acceptable input length for the model if that argument is not provided. This will only truncate the first sequence of a pair if a pair of sequences (or a batch of pairs) is provided. - `\'only_second\'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum acceptable input length for the model if that argument is not provided. This will only truncate the second sequence of a pair if a pair of sequences (or a batch of pairs) is provided. - `False` or `\'do_not_truncate\'` (default): No truncation (i.e., can output batch with sequence lengths greater than the model maximum admissible input size). max_length (`int`, *optional*): Controls the maximum length to use by one of the truncation/padding parameters. If left unset or set to `None`, this will use the predefined model maximum length if a maximum length is required by one of the truncation/padding parameters. If the model has no specific maximum input length (like XLNet) truncation/padding to a maximum length will be deactivated. return_tensors (`str` or [`~utils.TensorType`], *optional*): If set, will return tensors instead of list of python integers. Acceptable values are: - `\'tf\'`: Return TensorFlow `tf.constant` objects. - `\'pt\'`: Return PyTorch `torch.Tensor` objects. - `\'np\'`: Return Numpy `np.ndarray` objects. return_attention_mask (`bool`, *optional*): Whether or not to return the attention mask. If not set, will return the attention mask according to the specific tokenizer\'s default, defined by the `return_outputs` attribute. [What are attention masks?](../glossary#attention-mask) Return: `Dict[str, List[List[int]]]`: A dictionary with the following keys: - `input_ids`: List of token ids to be fed to a model. - `attention_mask`: List of indices specifying which tokens should be attended to by the model. ''' @add_start_docstrings(_lowerCamelCase ) class __snake_case : def __call__( self , __UpperCamelCase , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = False , __UpperCamelCase = False , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = None , **__UpperCamelCase , ) -> BatchEncoding: '''simple docstring''' if titles is None and texts is None: return super().__call__( __UpperCamelCase , padding=__UpperCamelCase , truncation=__UpperCamelCase , max_length=__UpperCamelCase , return_tensors=__UpperCamelCase , return_attention_mask=__UpperCamelCase , **__UpperCamelCase , ) elif titles is None or texts is None: snake_case__ : Optional[Any] = titles if texts is None else texts return super().__call__( __UpperCamelCase , __UpperCamelCase , padding=__UpperCamelCase , truncation=__UpperCamelCase , max_length=__UpperCamelCase , return_tensors=__UpperCamelCase , return_attention_mask=__UpperCamelCase , **__UpperCamelCase , ) snake_case__ : int = titles if not isinstance(__UpperCamelCase , __UpperCamelCase ) else [titles] snake_case__ : Optional[int] = texts if not isinstance(__UpperCamelCase , __UpperCamelCase ) else [texts] snake_case__ : List[Any] = len(__UpperCamelCase ) snake_case__ : str = questions if not isinstance(__UpperCamelCase , __UpperCamelCase ) else [questions] * n_passages assert len(__UpperCamelCase ) == len( __UpperCamelCase ), F"""There should be as many titles than texts but got {len(__UpperCamelCase )} titles and {len(__UpperCamelCase )} texts.""" snake_case__ : Optional[int] = super().__call__(__UpperCamelCase , __UpperCamelCase , padding=__UpperCamelCase , truncation=__UpperCamelCase )['input_ids'] snake_case__ : Optional[Any] = super().__call__(__UpperCamelCase , add_special_tokens=__UpperCamelCase , padding=__UpperCamelCase , truncation=__UpperCamelCase )['input_ids'] snake_case__ : Union[str, Any] = { 'input_ids': [ (encoded_question_and_title + encoded_text)[:max_length] if max_length is not None and truncation else encoded_question_and_title + encoded_text for encoded_question_and_title, encoded_text in zip(__UpperCamelCase , __UpperCamelCase ) ] } if return_attention_mask is not False: snake_case__ : List[Any] = [] for input_ids in encoded_inputs["input_ids"]: attention_mask.append([int(input_id != self.pad_token_id ) for input_id in input_ids] ) snake_case__ : Union[str, Any] = attention_mask return self.pad(__UpperCamelCase , padding=__UpperCamelCase , max_length=__UpperCamelCase , return_tensors=__UpperCamelCase ) def __a ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = 16 , __UpperCamelCase = 64 , __UpperCamelCase = 4 , ) -> List[DPRSpanPrediction]: '''simple docstring''' snake_case__ : Optional[Any] = reader_input['input_ids'] snake_case__ , snake_case__ , snake_case__ : Any = reader_output[:3] snake_case__ : List[str] = len(__UpperCamelCase ) snake_case__ : Tuple = sorted(range(__UpperCamelCase ) , reverse=__UpperCamelCase , key=relevance_logits.__getitem__ ) snake_case__ : List[DPRReaderOutput] = [] for doc_id in sorted_docs: snake_case__ : Tuple = list(input_ids[doc_id] ) # assuming question & title information is at the beginning of the sequence snake_case__ : int = sequence_ids.index(self.sep_token_id , 2 ) + 1 # second sep id if sequence_ids[-1] == self.pad_token_id: snake_case__ : Union[str, Any] = sequence_ids.index(self.pad_token_id ) else: snake_case__ : str = len(__UpperCamelCase ) snake_case__ : Dict = self._get_best_spans( start_logits=start_logits[doc_id][passage_offset:sequence_len] , end_logits=end_logits[doc_id][passage_offset:sequence_len] , max_answer_length=__UpperCamelCase , top_spans=__UpperCamelCase , ) for start_index, end_index in best_spans: start_index += passage_offset end_index += passage_offset nbest_spans_predictions.append( DPRSpanPrediction( span_score=start_logits[doc_id][start_index] + end_logits[doc_id][end_index] , relevance_score=relevance_logits[doc_id] , doc_id=__UpperCamelCase , start_index=__UpperCamelCase , end_index=__UpperCamelCase , text=self.decode(sequence_ids[start_index : end_index + 1] ) , ) ) if len(__UpperCamelCase ) >= num_spans: break return nbest_spans_predictions[:num_spans] def __a ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , ) -> List[DPRSpanPrediction]: '''simple docstring''' snake_case__ : Any = [] for start_index, start_score in enumerate(__UpperCamelCase ): for answer_length, end_score in enumerate(end_logits[start_index : start_index + max_answer_length] ): scores.append(((start_index, start_index + answer_length), start_score + end_score) ) snake_case__ : str = sorted(__UpperCamelCase , key=lambda __UpperCamelCase : x[1] , reverse=__UpperCamelCase ) snake_case__ : Any = [] for (start_index, end_index), score in scores: assert start_index <= end_index, F"""Wrong span indices: [{start_index}:{end_index}]""" snake_case__ : str = end_index - start_index + 1 assert length <= max_answer_length, F"""Span is too long: {length} > {max_answer_length}""" if any( start_index <= prev_start_index <= prev_end_index <= end_index or prev_start_index <= start_index <= end_index <= prev_end_index for (prev_start_index, prev_end_index) in chosen_span_intervals ): continue chosen_span_intervals.append((start_index, end_index) ) if len(__UpperCamelCase ) == top_spans: break return chosen_span_intervals @add_end_docstrings(_lowerCamelCase ) class __snake_case ( _lowerCamelCase ,_lowerCamelCase ): __lowerCamelCase = VOCAB_FILES_NAMES __lowerCamelCase = READER_PRETRAINED_VOCAB_FILES_MAP __lowerCamelCase = READER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES __lowerCamelCase = READER_PRETRAINED_INIT_CONFIGURATION __lowerCamelCase = ["""input_ids""", """attention_mask"""] __lowerCamelCase = DPRReaderTokenizer
699
1
from collections import OrderedDict from typing import Mapping from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging lowerCAmelCase__ : Dict = logging.get_logger(__name__) lowerCAmelCase__ : int = { '''sail/poolformer_s12''': '''https://huggingface.co/sail/poolformer_s12/resolve/main/config.json''', # See all PoolFormer models at https://huggingface.co/models?filter=poolformer } class __snake_case ( _lowerCamelCase ): __lowerCamelCase = """poolformer""" def __init__( self , __UpperCamelCase=3 , __UpperCamelCase=16 , __UpperCamelCase=16 , __UpperCamelCase=3 , __UpperCamelCase=4.0 , __UpperCamelCase=[2, 2, 6, 2] , __UpperCamelCase=[64, 128, 320, 512] , __UpperCamelCase=[7, 3, 3, 3] , __UpperCamelCase=[4, 2, 2, 2] , __UpperCamelCase=[2, 1, 1, 1] , __UpperCamelCase=4 , __UpperCamelCase=0.0 , __UpperCamelCase="gelu" , __UpperCamelCase=True , __UpperCamelCase=1E-5 , __UpperCamelCase=0.0_2 , **__UpperCamelCase , ) -> Any: '''simple docstring''' snake_case__ : List[str] = num_channels snake_case__ : Dict = patch_size snake_case__ : Optional[int] = stride snake_case__ : str = padding snake_case__ : List[str] = pool_size snake_case__ : List[Any] = hidden_sizes snake_case__ : List[Any] = mlp_ratio snake_case__ : Union[str, Any] = depths snake_case__ : Dict = patch_sizes snake_case__ : Dict = strides snake_case__ : Dict = num_encoder_blocks snake_case__ : Union[str, Any] = drop_path_rate snake_case__ : List[str] = hidden_act snake_case__ : Optional[Any] = use_layer_scale snake_case__ : int = layer_scale_init_value snake_case__ : Dict = initializer_range super().__init__(**__UpperCamelCase ) class __snake_case ( _lowerCamelCase ): __lowerCamelCase = version.parse("""1.11""" ) @property def __a ( self ) -> Mapping[str, Mapping[int, str]]: '''simple docstring''' return OrderedDict( [ ('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}), ] ) @property def __a ( self ) -> float: '''simple docstring''' return 2E-3
699
import gc import random import unittest import numpy as np import torch from PIL import Image from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer from diffusers import ( AutoencoderKL, DDIMScheduler, EulerAncestralDiscreteScheduler, LMSDiscreteScheduler, PNDMScheduler, StableDiffusionInstructPixaPixPipeline, UNetaDConditionModel, ) from diffusers.image_processor import VaeImageProcessor from diffusers.utils import floats_tensor, load_image, slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu from ..pipeline_params import ( IMAGE_TO_IMAGE_IMAGE_PARAMS, TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS, ) from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin enable_full_determinism() class __snake_case ( _lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase ,unittest.TestCase ): __lowerCamelCase = StableDiffusionInstructPixaPixPipeline __lowerCamelCase = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {"""height""", """width""", """cross_attention_kwargs"""} __lowerCamelCase = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS __lowerCamelCase = IMAGE_TO_IMAGE_IMAGE_PARAMS __lowerCamelCase = IMAGE_TO_IMAGE_IMAGE_PARAMS def __a ( self ) -> List[Any]: '''simple docstring''' torch.manual_seed(0 ) snake_case__ : Tuple = UNetaDConditionModel( block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=8 , out_channels=4 , down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') , up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D') , cross_attention_dim=32 , ) snake_case__ : Any = PNDMScheduler(skip_prk_steps=__UpperCamelCase ) torch.manual_seed(0 ) snake_case__ : Dict = AutoencoderKL( block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=4 , ) torch.manual_seed(0 ) snake_case__ : int = CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , ) snake_case__ : Tuple = CLIPTextModel(__UpperCamelCase ) snake_case__ : Optional[int] = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' ) snake_case__ : Optional[int] = { 'unet': unet, 'scheduler': scheduler, 'vae': vae, 'text_encoder': text_encoder, 'tokenizer': tokenizer, 'safety_checker': None, 'feature_extractor': None, } return components def __a ( self , __UpperCamelCase , __UpperCamelCase=0 ) -> Dict: '''simple docstring''' snake_case__ : Optional[Any] = floats_tensor((1, 3, 32, 32) , rng=random.Random(__UpperCamelCase ) ).to(__UpperCamelCase ) snake_case__ : int = image.cpu().permute(0 , 2 , 3 , 1 )[0] snake_case__ : Union[str, Any] = Image.fromarray(np.uinta(__UpperCamelCase ) ).convert('RGB' ) if str(__UpperCamelCase ).startswith('mps' ): snake_case__ : str = torch.manual_seed(__UpperCamelCase ) else: snake_case__ : Dict = torch.Generator(device=__UpperCamelCase ).manual_seed(__UpperCamelCase ) snake_case__ : str = { 'prompt': 'A painting of a squirrel eating a burger', 'image': image, 'generator': generator, 'num_inference_steps': 2, 'guidance_scale': 6.0, 'image_guidance_scale': 1, 'output_type': 'numpy', } return inputs def __a ( self ) -> Tuple: '''simple docstring''' snake_case__ : Dict = 'cpu' # ensure determinism for the device-dependent torch.Generator snake_case__ : Optional[int] = self.get_dummy_components() snake_case__ : str = StableDiffusionInstructPixaPixPipeline(**__UpperCamelCase ) snake_case__ : Optional[int] = sd_pipe.to(__UpperCamelCase ) sd_pipe.set_progress_bar_config(disable=__UpperCamelCase ) snake_case__ : Tuple = self.get_dummy_inputs(__UpperCamelCase ) snake_case__ : List[str] = sd_pipe(**__UpperCamelCase ).images snake_case__ : List[str] = image[0, -3:, -3:, -1] assert image.shape == (1, 32, 32, 3) snake_case__ : str = np.array([0.7_5_2_6, 0.3_7_5_0, 0.4_5_4_7, 0.6_1_1_7, 0.5_8_6_6, 0.5_0_1_6, 0.4_3_2_7, 0.5_6_4_2, 0.4_8_1_5] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3 def __a ( self ) -> Any: '''simple docstring''' snake_case__ : Dict = 'cpu' # ensure determinism for the device-dependent torch.Generator snake_case__ : Union[str, Any] = self.get_dummy_components() snake_case__ : Optional[Any] = StableDiffusionInstructPixaPixPipeline(**__UpperCamelCase ) snake_case__ : List[Any] = sd_pipe.to(__UpperCamelCase ) sd_pipe.set_progress_bar_config(disable=__UpperCamelCase ) snake_case__ : Union[str, Any] = self.get_dummy_inputs(__UpperCamelCase ) snake_case__ : List[str] = 'french fries' snake_case__ : Optional[Any] = sd_pipe(**__UpperCamelCase , negative_prompt=__UpperCamelCase ) snake_case__ : Union[str, Any] = output.images snake_case__ : Tuple = image[0, -3:, -3:, -1] assert image.shape == (1, 32, 32, 3) snake_case__ : Any = np.array([0.7_5_1_1, 0.3_6_4_2, 0.4_5_5_3, 0.6_2_3_6, 0.5_7_9_7, 0.5_0_1_3, 0.4_3_4_3, 0.5_6_1_1, 0.4_8_3_1] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3 def __a ( self ) -> int: '''simple docstring''' snake_case__ : Optional[int] = 'cpu' # ensure determinism for the device-dependent torch.Generator snake_case__ : List[str] = self.get_dummy_components() snake_case__ : str = StableDiffusionInstructPixaPixPipeline(**__UpperCamelCase ) snake_case__ : str = sd_pipe.to(__UpperCamelCase ) sd_pipe.set_progress_bar_config(disable=__UpperCamelCase ) snake_case__ : Dict = self.get_dummy_inputs(__UpperCamelCase ) snake_case__ : Any = [inputs['prompt']] * 2 snake_case__ : Optional[int] = np.array(inputs['image'] ).astype(np.floataa ) / 2_5_5.0 snake_case__ : Optional[int] = torch.from_numpy(__UpperCamelCase ).unsqueeze(0 ).to(__UpperCamelCase ) snake_case__ : Any = image / 2 + 0.5 snake_case__ : Optional[Any] = image.permute(0 , 3 , 1 , 2 ) snake_case__ : List[Any] = image.repeat(2 , 1 , 1 , 1 ) snake_case__ : Optional[int] = sd_pipe(**__UpperCamelCase ).images snake_case__ : Union[str, Any] = image[-1, -3:, -3:, -1] assert image.shape == (2, 32, 32, 3) snake_case__ : List[Any] = np.array([0.5_8_1_2, 0.5_7_4_8, 0.5_2_2_2, 0.5_9_0_8, 0.5_6_9_5, 0.7_1_7_4, 0.6_8_0_4, 0.5_5_2_3, 0.5_5_7_9] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3 def __a ( self ) -> Union[str, Any]: '''simple docstring''' snake_case__ : Optional[int] = 'cpu' # ensure determinism for the device-dependent torch.Generator snake_case__ : Optional[int] = self.get_dummy_components() snake_case__ : Tuple = EulerAncestralDiscreteScheduler( beta_start=0.0_0_0_8_5 , beta_end=0.0_1_2 , beta_schedule='scaled_linear' ) snake_case__ : int = StableDiffusionInstructPixaPixPipeline(**__UpperCamelCase ) snake_case__ : List[str] = sd_pipe.to(__UpperCamelCase ) sd_pipe.set_progress_bar_config(disable=__UpperCamelCase ) snake_case__ : str = self.get_dummy_inputs(__UpperCamelCase ) snake_case__ : Any = sd_pipe(**__UpperCamelCase ).images snake_case__ : int = image[0, -3:, -3:, -1] snake_case__ : Tuple = [round(__UpperCamelCase , 4 ) for x in image_slice.flatten().tolist()] print(','.join([str(__UpperCamelCase ) for x in slice] ) ) assert image.shape == (1, 32, 32, 3) snake_case__ : List[Any] = np.array([0.7_4_1_7, 0.3_8_4_2, 0.4_7_3_2, 0.5_7_7_6, 0.5_8_9_1, 0.5_1_3_9, 0.4_0_5_2, 0.5_6_7_3, 0.4_9_8_6] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3 def __a ( self ) -> int: '''simple docstring''' super().test_inference_batch_single_identical(expected_max_diff=3E-3 ) def __a ( self ) -> Any: '''simple docstring''' snake_case__ : Optional[int] = self.get_dummy_components() snake_case__ : int = StableDiffusionInstructPixaPixPipeline(**__UpperCamelCase ) snake_case__ : Union[str, Any] = VaeImageProcessor(do_resize=__UpperCamelCase , do_normalize=__UpperCamelCase ) snake_case__ : Optional[int] = pipe.to(__UpperCamelCase ) pipe.set_progress_bar_config(disable=__UpperCamelCase ) snake_case__ : Optional[Any] = pipe(**self.get_dummy_inputs_by_type(__UpperCamelCase , input_image_type='pt' ) )[0] snake_case__ : Union[str, Any] = components['vae'] snake_case__ : str = self.get_dummy_inputs_by_type(__UpperCamelCase , input_image_type='pt' ) for image_param in self.image_latents_params: if image_param in inputs.keys(): snake_case__ : List[str] = vae.encode(inputs[image_param] ).latent_dist.mode() snake_case__ : Dict = pipe(**__UpperCamelCase )[0] snake_case__ : str = np.abs(out - out_latents_inputs ).max() self.assertLess(__UpperCamelCase , 1E-4 , 'passing latents as image input generate different result from passing image' ) @slow @require_torch_gpu class __snake_case ( unittest.TestCase ): def __a ( self ) -> List[str]: '''simple docstring''' super().tearDown() gc.collect() torch.cuda.empty_cache() def __a ( self , __UpperCamelCase=0 ) -> Dict: '''simple docstring''' snake_case__ : Optional[Any] = torch.manual_seed(__UpperCamelCase ) snake_case__ : List[str] = load_image( 'https://huggingface.co/datasets/diffusers/test-arrays/resolve/main/stable_diffusion_pix2pix/example.jpg' ) snake_case__ : int = { 'prompt': 'turn him into a cyborg', 'image': image, 'generator': generator, 'num_inference_steps': 3, 'guidance_scale': 7.5, 'image_guidance_scale': 1.0, 'output_type': 'numpy', } return inputs def __a ( self ) -> Dict: '''simple docstring''' snake_case__ : Any = StableDiffusionInstructPixaPixPipeline.from_pretrained( 'timbrooks/instruct-pix2pix' , safety_checker=__UpperCamelCase ) pipe.to(__UpperCamelCase ) pipe.set_progress_bar_config(disable=__UpperCamelCase ) pipe.enable_attention_slicing() snake_case__ : Tuple = self.get_inputs() snake_case__ : List[Any] = pipe(**__UpperCamelCase ).images snake_case__ : Union[str, Any] = image[0, -3:, -3:, -1].flatten() assert image.shape == (1, 512, 512, 3) snake_case__ : Dict = np.array([0.5_9_0_2, 0.6_0_1_5, 0.6_0_2_7, 0.5_9_8_3, 0.6_0_9_2, 0.6_0_6_1, 0.5_7_6_5, 0.5_7_8_5, 0.5_5_5_5] ) assert np.abs(expected_slice - image_slice ).max() < 1E-3 def __a ( self ) -> str: '''simple docstring''' snake_case__ : Any = StableDiffusionInstructPixaPixPipeline.from_pretrained( 'timbrooks/instruct-pix2pix' , safety_checker=__UpperCamelCase ) snake_case__ : Tuple = LMSDiscreteScheduler.from_config(pipe.scheduler.config ) pipe.to(__UpperCamelCase ) pipe.set_progress_bar_config(disable=__UpperCamelCase ) pipe.enable_attention_slicing() snake_case__ : Dict = self.get_inputs() snake_case__ : Dict = pipe(**__UpperCamelCase ).images snake_case__ : Union[str, Any] = image[0, -3:, -3:, -1].flatten() assert image.shape == (1, 512, 512, 3) snake_case__ : List[Any] = np.array([0.6_5_7_8, 0.6_8_1_7, 0.6_9_7_2, 0.6_7_6_1, 0.6_8_5_6, 0.6_9_1_6, 0.6_4_2_8, 0.6_5_1_6, 0.6_3_0_1] ) assert np.abs(expected_slice - image_slice ).max() < 1E-3 def __a ( self ) -> Optional[Any]: '''simple docstring''' snake_case__ : Tuple = StableDiffusionInstructPixaPixPipeline.from_pretrained( 'timbrooks/instruct-pix2pix' , safety_checker=__UpperCamelCase ) snake_case__ : Tuple = DDIMScheduler.from_config(pipe.scheduler.config ) pipe.to(__UpperCamelCase ) pipe.set_progress_bar_config(disable=__UpperCamelCase ) pipe.enable_attention_slicing() snake_case__ : Optional[int] = self.get_inputs() snake_case__ : Optional[int] = pipe(**__UpperCamelCase ).images snake_case__ : Tuple = image[0, -3:, -3:, -1].flatten() assert image.shape == (1, 512, 512, 3) snake_case__ : int = np.array([0.3_8_2_8, 0.3_8_3_4, 0.3_8_1_8, 0.3_7_9_2, 0.3_8_6_5, 0.3_7_5_2, 0.3_7_9_2, 0.3_8_4_7, 0.3_7_5_3] ) assert np.abs(expected_slice - image_slice ).max() < 1E-3 def __a ( self ) -> Union[str, Any]: '''simple docstring''' snake_case__ : int = 0 def callback_fn(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) -> None: snake_case__ : List[Any] = True nonlocal number_of_steps number_of_steps += 1 if step == 1: snake_case__ : Any = latents.detach().cpu().numpy() assert latents.shape == (1, 4, 64, 64) snake_case__ : int = latents[0, -3:, -3:, -1] snake_case__ : List[str] = np.array([-0.2_4_6_3, -0.4_6_4_4, -0.9_7_5_6, 1.5_1_7_6, 1.4_4_1_4, 0.7_8_6_6, 0.9_8_9_7, 0.8_5_2_1, 0.7_9_8_3] ) assert np.abs(latents_slice.flatten() - expected_slice ).max() < 5E-2 elif step == 2: snake_case__ : Dict = latents.detach().cpu().numpy() assert latents.shape == (1, 4, 64, 64) snake_case__ : Dict = latents[0, -3:, -3:, -1] snake_case__ : Optional[Any] = np.array([-0.2_6_4_4, -0.4_6_2_6, -0.9_6_5_3, 1.5_1_7_6, 1.4_5_5_1, 0.7_6_8_6, 0.9_8_0_5, 0.8_4_5_2, 0.8_1_1_5] ) assert np.abs(latents_slice.flatten() - expected_slice ).max() < 5E-2 snake_case__ : str = False snake_case__ : List[Any] = StableDiffusionInstructPixaPixPipeline.from_pretrained( 'timbrooks/instruct-pix2pix' , safety_checker=__UpperCamelCase , torch_dtype=torch.floataa ) snake_case__ : int = pipe.to(__UpperCamelCase ) pipe.set_progress_bar_config(disable=__UpperCamelCase ) pipe.enable_attention_slicing() snake_case__ : int = self.get_inputs() pipe(**__UpperCamelCase , callback=__UpperCamelCase , callback_steps=1 ) assert callback_fn.has_been_called assert number_of_steps == 3 def __a ( self ) -> Any: '''simple docstring''' torch.cuda.empty_cache() torch.cuda.reset_max_memory_allocated() torch.cuda.reset_peak_memory_stats() snake_case__ : str = StableDiffusionInstructPixaPixPipeline.from_pretrained( 'timbrooks/instruct-pix2pix' , safety_checker=__UpperCamelCase , torch_dtype=torch.floataa ) snake_case__ : Dict = pipe.to(__UpperCamelCase ) pipe.set_progress_bar_config(disable=__UpperCamelCase ) pipe.enable_attention_slicing(1 ) pipe.enable_sequential_cpu_offload() snake_case__ : str = self.get_inputs() snake_case__ : Tuple = pipe(**__UpperCamelCase ) snake_case__ : List[Any] = torch.cuda.max_memory_allocated() # make sure that less than 2.2 GB is allocated assert mem_bytes < 2.2 * 10**9 def __a ( self ) -> int: '''simple docstring''' snake_case__ : int = self.get_inputs() # resize to resolution that is divisible by 8 but not 16 or 32 snake_case__ : Tuple = inputs['image'].resize((504, 504) ) snake_case__ : str = 'timbrooks/instruct-pix2pix' snake_case__ : Tuple = StableDiffusionInstructPixaPixPipeline.from_pretrained( __UpperCamelCase , safety_checker=__UpperCamelCase , ) pipe.to(__UpperCamelCase ) pipe.set_progress_bar_config(disable=__UpperCamelCase ) pipe.enable_attention_slicing() snake_case__ : str = pipe(**__UpperCamelCase ) snake_case__ : List[Any] = output.images[0] snake_case__ : List[Any] = image[255:258, 383:386, -1] assert image.shape == (504, 504, 3) snake_case__ : List[str] = np.array([0.2_7_2_6, 0.2_5_2_9, 0.2_6_6_4, 0.2_6_5_5, 0.2_6_4_1, 0.2_6_4_2, 0.2_5_9_1, 0.2_6_4_9, 0.2_5_9_0] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 5E-3
699
1
from __future__ import annotations import unittest from transformers import is_tf_available from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow if is_tf_available(): import numpy as np import tensorflow as tf from transformers import TFCamembertModel @require_tf @require_sentencepiece @require_tokenizers class __snake_case ( unittest.TestCase ): @slow def __a ( self ) -> Optional[int]: '''simple docstring''' snake_case__ : List[Any] = TFCamembertModel.from_pretrained('jplu/tf-camembert-base' ) snake_case__ : Union[str, Any] = tf.convert_to_tensor( [[5, 121, 11, 660, 16, 730, 25543, 110, 83, 6]] , dtype=tf.intaa , ) # J'aime le camembert !" snake_case__ : Optional[Any] = model(__UpperCamelCase )['last_hidden_state'] snake_case__ : Union[str, Any] = tf.TensorShape((1, 10, 768) ) self.assertEqual(output.shape , __UpperCamelCase ) # compare the actual values for a slice. snake_case__ : Optional[int] = tf.convert_to_tensor( [[[-0.0_2_5_4, 0.0_2_3_5, 0.1_0_2_7], [0.0_6_0_6, -0.1_8_1_1, -0.0_4_1_8], [-0.1_5_6_1, -0.1_1_2_7, 0.2_6_8_7]]] , dtype=tf.floataa , ) # camembert = torch.hub.load('pytorch/fairseq', 'camembert.v0') # camembert.eval() # expected_slice = roberta.model.forward(input_ids)[0][:, :3, :3].detach() self.assertTrue(np.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1E-4 ) )
699
from .data_collator import ( DataCollatorForLanguageModeling, DataCollatorForPermutationLanguageModeling, DataCollatorForSeqaSeq, DataCollatorForSOP, DataCollatorForTokenClassification, DataCollatorForWholeWordMask, DataCollatorWithPadding, DefaultDataCollator, default_data_collator, ) from .metrics import glue_compute_metrics, xnli_compute_metrics from .processors import ( DataProcessor, InputExample, InputFeatures, SingleSentenceClassificationProcessor, SquadExample, SquadFeatures, SquadVaProcessor, SquadVaProcessor, glue_convert_examples_to_features, glue_output_modes, glue_processors, glue_tasks_num_labels, squad_convert_examples_to_features, xnli_output_modes, xnli_processors, xnli_tasks_num_labels, )
699
1
import unittest from transformers import is_torch_available, is_vision_available from transformers.testing_utils import require_torch, require_vision, slow, torch_device if is_torch_available(): import torch from transformers import AutoModelForImageClassification if is_vision_available(): from transformers import AutoImageProcessor @require_torch @require_vision class __snake_case ( unittest.TestCase ): @slow def __a ( self ) -> Any: '''simple docstring''' snake_case__ : Any = AutoImageProcessor.from_pretrained('microsoft/dit-base-finetuned-rvlcdip' ) snake_case__ : Optional[int] = AutoModelForImageClassification.from_pretrained('microsoft/dit-base-finetuned-rvlcdip' ) model.to(__UpperCamelCase ) from datasets import load_dataset snake_case__ : Optional[int] = load_dataset('nielsr/rvlcdip-demo' ) snake_case__ : List[Any] = dataset['train'][0]['image'].convert('RGB' ) snake_case__ : Any = image_processor(__UpperCamelCase , return_tensors='pt' ).to(__UpperCamelCase ) # forward pass with torch.no_grad(): snake_case__ : int = model(**__UpperCamelCase ) snake_case__ : Any = outputs.logits snake_case__ : List[Any] = torch.Size((1, 16) ) self.assertEqual(logits.shape , __UpperCamelCase ) snake_case__ : List[str] = torch.tensor( [-0.4_1_5_8, -0.4_0_9_2, -0.4_3_4_7] , device=__UpperCamelCase , dtype=torch.float , ) self.assertTrue(torch.allclose(logits[0, :3] , __UpperCamelCase , atol=1E-4 ) )
699
from dataclasses import dataclass, field from typing import Optional from transformers import AutoConfig, AutoImageProcessor, AutoTokenizer, FlaxVisionEncoderDecoderModel, HfArgumentParser @dataclass class __snake_case : __lowerCamelCase = field( metadata={"""help""": """The output directory where the model will be written."""} ,) __lowerCamelCase = field( metadata={ """help""": ( """The encoder model checkpoint for weights initialization.""" """Don't set if you want to train an encoder model from scratch.""" ) } ,) __lowerCamelCase = field( metadata={ """help""": ( """The decoder model checkpoint for weights initialization.""" """Don't set if you want to train a decoder model from scratch.""" ) } ,) __lowerCamelCase = field( default=_lowerCamelCase ,metadata={"""help""": """Pretrained encoder config name or path if not the same as encoder_model_name"""} ) __lowerCamelCase = field( default=_lowerCamelCase ,metadata={"""help""": """Pretrained decoder config name or path if not the same as decoder_model_name"""} ) def UpperCamelCase__ ( ) -> Union[str, Any]: snake_case__ : str = HfArgumentParser((ModelArguments,) ) ((snake_case__) , ) : Dict = parser.parse_args_into_dataclasses() # Load pretrained model and tokenizer # Use explicit specified encoder config if model_args.encoder_config_name: snake_case__ : List[str] = AutoConfig.from_pretrained(model_args.encoder_config_name ) # Use pretrained encoder model's config else: snake_case__ : Optional[int] = AutoConfig.from_pretrained(model_args.encoder_model_name_or_path ) # Use explicit specified decoder config if model_args.decoder_config_name: snake_case__ : Optional[Any] = AutoConfig.from_pretrained(model_args.decoder_config_name ) # Use pretrained decoder model's config else: snake_case__ : List[str] = AutoConfig.from_pretrained(model_args.decoder_model_name_or_path ) # necessary for `from_encoder_decoder_pretrained` when `decoder_config` is passed snake_case__ : Any = True snake_case__ : Dict = True snake_case__ : Tuple = FlaxVisionEncoderDecoderModel.from_encoder_decoder_pretrained( encoder_pretrained_model_name_or_path=model_args.encoder_model_name_or_path , decoder_pretrained_model_name_or_path=model_args.decoder_model_name_or_path , encoder_config=A__ , decoder_config=A__ , ) # GPT2 only has bos/eos tokens but not decoder_start/pad tokens snake_case__ : Optional[Any] = decoder_config.decoder_start_token_id snake_case__ : Tuple = decoder_config.pad_token_id if decoder_start_token_id is None: snake_case__ : Optional[Any] = decoder_config.bos_token_id if pad_token_id is None: snake_case__ : int = decoder_config.eos_token_id # This is necessary to make Flax's generate() work snake_case__ : Union[str, Any] = decoder_config.eos_token_id snake_case__ : Optional[int] = decoder_start_token_id snake_case__ : int = pad_token_id snake_case__ : Tuple = AutoImageProcessor.from_pretrained(model_args.encoder_model_name_or_path ) snake_case__ : int = AutoTokenizer.from_pretrained(model_args.decoder_model_name_or_path ) snake_case__ : Tuple = tokenizer.convert_ids_to_tokens(model.config.pad_token_id ) model.save_pretrained(model_args.output_dir ) image_processor.save_pretrained(model_args.output_dir ) tokenizer.save_pretrained(model_args.output_dir ) if __name__ == "__main__": main()
699
1
from typing import List, Optional, Union from ...processing_utils import ProcessorMixin from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy from ...utils import TensorType class __snake_case ( _lowerCamelCase ): __lowerCamelCase = ["""image_processor""", """tokenizer"""] __lowerCamelCase = """BridgeTowerImageProcessor""" __lowerCamelCase = ("""RobertaTokenizer""", """RobertaTokenizerFast""") def __init__( self , __UpperCamelCase , __UpperCamelCase ) -> Dict: '''simple docstring''' super().__init__(__UpperCamelCase , __UpperCamelCase ) def __call__( self , __UpperCamelCase , __UpperCamelCase = None , __UpperCamelCase = True , __UpperCamelCase = False , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = 0 , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = False , __UpperCamelCase = False , __UpperCamelCase = False , __UpperCamelCase = False , __UpperCamelCase = True , __UpperCamelCase = None , **__UpperCamelCase , ) -> BatchEncoding: '''simple docstring''' snake_case__ : str = self.tokenizer( text=__UpperCamelCase , add_special_tokens=__UpperCamelCase , padding=__UpperCamelCase , truncation=__UpperCamelCase , max_length=__UpperCamelCase , stride=__UpperCamelCase , pad_to_multiple_of=__UpperCamelCase , return_token_type_ids=__UpperCamelCase , return_attention_mask=__UpperCamelCase , return_overflowing_tokens=__UpperCamelCase , return_special_tokens_mask=__UpperCamelCase , return_offsets_mapping=__UpperCamelCase , return_length=__UpperCamelCase , verbose=__UpperCamelCase , return_tensors=__UpperCamelCase , **__UpperCamelCase , ) # add pixel_values + pixel_mask snake_case__ : Any = self.image_processor( __UpperCamelCase , return_tensors=__UpperCamelCase , do_normalize=__UpperCamelCase , do_center_crop=__UpperCamelCase , **__UpperCamelCase ) encoding.update(__UpperCamelCase ) return encoding def __a ( self , *__UpperCamelCase , **__UpperCamelCase ) -> Tuple: '''simple docstring''' return self.tokenizer.batch_decode(*__UpperCamelCase , **__UpperCamelCase ) def __a ( self , *__UpperCamelCase , **__UpperCamelCase ) -> Tuple: '''simple docstring''' return self.tokenizer.decode(*__UpperCamelCase , **__UpperCamelCase ) @property def __a ( self ) -> Any: '''simple docstring''' snake_case__ : Optional[int] = self.tokenizer.model_input_names snake_case__ : List[Any] = self.image_processor.model_input_names return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
699
import logging import os from dataclasses import dataclass, field from typing import Dict, Optional import datasets import numpy as np import tensorflow as tf from transformers import ( AutoConfig, AutoTokenizer, EvalPrediction, HfArgumentParser, PreTrainedTokenizer, TFAutoModelForSequenceClassification, TFTrainer, TFTrainingArguments, ) from transformers.utils import logging as hf_logging hf_logging.set_verbosity_info() hf_logging.enable_default_handler() hf_logging.enable_explicit_format() def UpperCamelCase__ ( A__ , A__ , A__ , A__ , A__ , A__ = None , ) -> Optional[int]: snake_case__ : List[str] = {} if train_file is not None: snake_case__ : Tuple = [train_file] if eval_file is not None: snake_case__ : Dict = [eval_file] if test_file is not None: snake_case__ : str = [test_file] snake_case__ : Optional[Any] = datasets.load_dataset('csv' , data_files=A__ ) snake_case__ : Any = list(ds[list(files.keys() )[0]].features.keys() ) snake_case__ : Optional[Any] = features_name.pop(A__ ) snake_case__ : Optional[Any] = list(set(ds[list(files.keys() )[0]][label_name] ) ) snake_case__ : str = {label: i for i, label in enumerate(A__ )} snake_case__ : int = tokenizer.model_input_names snake_case__ : int = {} if len(A__ ) == 1: for k in files.keys(): snake_case__ : str = ds[k].map( lambda A__ : tokenizer.batch_encode_plus( example[features_name[0]] , truncation=A__ , max_length=A__ , padding='max_length' ) , batched=A__ , ) elif len(A__ ) == 2: for k in files.keys(): snake_case__ : Optional[int] = ds[k].map( lambda A__ : tokenizer.batch_encode_plus( (example[features_name[0]], example[features_name[1]]) , truncation=A__ , max_length=A__ , padding='max_length' , ) , batched=A__ , ) def gen_train(): for ex in transformed_ds[datasets.Split.TRAIN]: snake_case__ : int = {k: v for k, v in ex.items() if k in input_names} snake_case__ : Any = labelaid[ex[label_name]] yield (d, label) def gen_val(): for ex in transformed_ds[datasets.Split.VALIDATION]: snake_case__ : int = {k: v for k, v in ex.items() if k in input_names} snake_case__ : Union[str, Any] = labelaid[ex[label_name]] yield (d, label) def gen_test(): for ex in transformed_ds[datasets.Split.TEST]: snake_case__ : Dict = {k: v for k, v in ex.items() if k in input_names} snake_case__ : List[str] = labelaid[ex[label_name]] yield (d, label) snake_case__ : Any = ( tf.data.Dataset.from_generator( A__ , ({k: tf.intaa for k in input_names}, tf.intaa) , ({k: tf.TensorShape([None] ) for k in input_names}, tf.TensorShape([] )) , ) if datasets.Split.TRAIN in transformed_ds else None ) if train_ds is not None: snake_case__ : str = train_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.TRAIN] ) ) ) snake_case__ : Optional[int] = ( tf.data.Dataset.from_generator( A__ , ({k: tf.intaa for k in input_names}, tf.intaa) , ({k: tf.TensorShape([None] ) for k in input_names}, tf.TensorShape([] )) , ) if datasets.Split.VALIDATION in transformed_ds else None ) if val_ds is not None: snake_case__ : Optional[int] = val_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.VALIDATION] ) ) ) snake_case__ : List[str] = ( tf.data.Dataset.from_generator( A__ , ({k: tf.intaa for k in input_names}, tf.intaa) , ({k: tf.TensorShape([None] ) for k in input_names}, tf.TensorShape([] )) , ) if datasets.Split.TEST in transformed_ds else None ) if test_ds is not None: snake_case__ : str = test_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.TEST] ) ) ) return train_ds, val_ds, test_ds, labelaid lowerCAmelCase__ : List[str] = logging.getLogger(__name__) @dataclass class __snake_case : __lowerCamelCase = field(metadata={"""help""": """Which column contains the label"""} ) __lowerCamelCase = field(default=_lowerCamelCase ,metadata={"""help""": """The path of the training file"""} ) __lowerCamelCase = field(default=_lowerCamelCase ,metadata={"""help""": """The path of the development file"""} ) __lowerCamelCase = field(default=_lowerCamelCase ,metadata={"""help""": """The path of the test file"""} ) __lowerCamelCase = field( default=128 ,metadata={ """help""": ( """The maximum total input sequence length after tokenization. Sequences longer """ """than this will be truncated, sequences shorter will be padded.""" ) } ,) __lowerCamelCase = field( default=_lowerCamelCase ,metadata={"""help""": """Overwrite the cached training and evaluation sets"""} ) @dataclass class __snake_case : __lowerCamelCase = field( metadata={"""help""": """Path to pretrained model or model identifier from huggingface.co/models"""} ) __lowerCamelCase = field( default=_lowerCamelCase ,metadata={"""help""": """Pretrained config name or path if not the same as model_name"""} ) __lowerCamelCase = field( default=_lowerCamelCase ,metadata={"""help""": """Pretrained tokenizer name or path if not the same as model_name"""} ) __lowerCamelCase = field(default=_lowerCamelCase ,metadata={"""help""": """Set this flag to use fast tokenization."""} ) # If you want to tweak more attributes on your tokenizer, you should do it in a distinct script, # or just modify its tokenizer_config.json. __lowerCamelCase = field( default=_lowerCamelCase ,metadata={"""help""": """Where do you want to store the pretrained models downloaded from huggingface.co"""} ,) def UpperCamelCase__ ( ) -> Union[str, Any]: # See all possible arguments in src/transformers/training_args.py # or by passing the --help flag to this script. # We now keep distinct sets of args, for a cleaner separation of concerns. snake_case__ : int = HfArgumentParser((ModelArguments, DataTrainingArguments, TFTrainingArguments) ) snake_case__ , snake_case__ , snake_case__ : Dict = parser.parse_args_into_dataclasses() if ( os.path.exists(training_args.output_dir ) and os.listdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir ): raise ValueError( F"""Output directory ({training_args.output_dir}) already exists and is not empty. Use""" ' --overwrite_output_dir to overcome.' ) # Setup logging logging.basicConfig( format='%(asctime)s - %(levelname)s - %(name)s - %(message)s' , datefmt='%m/%d/%Y %H:%M:%S' , level=logging.INFO , ) logger.info( F"""n_replicas: {training_args.n_replicas}, distributed training: {bool(training_args.n_replicas > 1 )}, """ F"""16-bits training: {training_args.fpaa}""" ) logger.info(F"""Training/evaluation parameters {training_args}""" ) # Load pretrained model and tokenizer # # Distributed training: # The .from_pretrained methods guarantee that only one local process can concurrently # download model & vocab. snake_case__ : Dict = AutoTokenizer.from_pretrained( model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , ) snake_case__ , snake_case__ , snake_case__ , snake_case__ : Dict = get_tfds( train_file=data_args.train_file , eval_file=data_args.dev_file , test_file=data_args.test_file , tokenizer=A__ , label_column_id=data_args.label_column_id , max_seq_length=data_args.max_seq_length , ) snake_case__ : Dict = AutoConfig.from_pretrained( model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=len(A__ ) , labelaid=A__ , idalabel={id: label for label, id in labelaid.items()} , finetuning_task='text-classification' , cache_dir=model_args.cache_dir , ) with training_args.strategy.scope(): snake_case__ : Optional[Any] = TFAutoModelForSequenceClassification.from_pretrained( model_args.model_name_or_path , from_pt=bool('.bin' in model_args.model_name_or_path ) , config=A__ , cache_dir=model_args.cache_dir , ) def compute_metrics(A__ ) -> Dict: snake_case__ : Optional[Any] = np.argmax(p.predictions , axis=1 ) return {"acc": (preds == p.label_ids).mean()} # Initialize our Trainer snake_case__ : Any = TFTrainer( model=A__ , args=A__ , train_dataset=A__ , eval_dataset=A__ , compute_metrics=A__ , ) # Training if training_args.do_train: trainer.train() trainer.save_model() tokenizer.save_pretrained(training_args.output_dir ) # Evaluation snake_case__ : Dict = {} if training_args.do_eval: logger.info('*** Evaluate ***' ) snake_case__ : Tuple = trainer.evaluate() snake_case__ : Any = os.path.join(training_args.output_dir , 'eval_results.txt' ) with open(A__ , 'w' ) as writer: logger.info('***** Eval results *****' ) for key, value in result.items(): logger.info(F""" {key} = {value}""" ) writer.write(F"""{key} = {value}\n""" ) results.update(A__ ) return results if __name__ == "__main__": main()
699
1
import argparse import torch from transformers import LxmertConfig, LxmertForPreTraining, load_tf_weights_in_lxmert from transformers.utils import logging logging.set_verbosity_info() def UpperCamelCase__ ( A__ , A__ , A__ ) -> List[str]: # Initialise PyTorch model snake_case__ : Optional[Any] = LxmertConfig.from_json_file(A__ ) print(F"""Building PyTorch model from configuration: {config}""" ) snake_case__ : Tuple = LxmertForPreTraining(A__ ) # Load weights from tf checkpoint load_tf_weights_in_lxmert(A__ , A__ , A__ ) # Save pytorch-model print(F"""Save PyTorch model to {pytorch_dump_path}""" ) torch.save(model.state_dict() , A__ ) if __name__ == "__main__": lowerCAmelCase__ : Union[str, Any] = argparse.ArgumentParser() # Required parameters parser.add_argument( '''--tf_checkpoint_path''', default=None, type=str, required=True, help='''Path to the TensorFlow checkpoint path.''' ) parser.add_argument( '''--config_file''', default=None, type=str, required=True, help='''The config json file corresponding to the pre-trained model. \nThis specifies the model architecture.''', ) parser.add_argument( '''--pytorch_dump_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.''' ) lowerCAmelCase__ : Any = parser.parse_args() convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.config_file, args.pytorch_dump_path)
699
from typing import List import datasets from datasets.tasks import AudioClassification from ..folder_based_builder import folder_based_builder lowerCAmelCase__ : List[Any] = datasets.utils.logging.get_logger(__name__) class __snake_case ( folder_based_builder.FolderBasedBuilderConfig ): __lowerCamelCase = None __lowerCamelCase = None class __snake_case ( folder_based_builder.FolderBasedBuilder ): __lowerCamelCase = datasets.Audio() __lowerCamelCase = """audio""" __lowerCamelCase = AudioFolderConfig __lowerCamelCase = 42 # definition at the bottom of the script __lowerCamelCase = AudioClassification(audio_column="""audio""" ,label_column="""label""" ) lowerCAmelCase__ : Tuple = [ '''.aiff''', '''.au''', '''.avr''', '''.caf''', '''.flac''', '''.htk''', '''.svx''', '''.mat4''', '''.mat5''', '''.mpc2k''', '''.ogg''', '''.paf''', '''.pvf''', '''.raw''', '''.rf64''', '''.sd2''', '''.sds''', '''.ircam''', '''.voc''', '''.w64''', '''.wav''', '''.nist''', '''.wavex''', '''.wve''', '''.xi''', '''.mp3''', '''.opus''', ] lowerCAmelCase__ : List[Any] = AUDIO_EXTENSIONS
699
1
import warnings from ...processing_utils import ProcessorMixin from ...tokenization_utils_base import BatchEncoding class __snake_case ( _lowerCamelCase ): __lowerCamelCase = ["""image_processor""", """tokenizer"""] __lowerCamelCase = """ViTImageProcessor""" __lowerCamelCase = ("""CLIPTokenizer""", """CLIPTokenizerFast""") def __init__( self , __UpperCamelCase=None , __UpperCamelCase=None , **__UpperCamelCase ) -> Any: '''simple docstring''' snake_case__ : Optional[int] = None if "feature_extractor" in kwargs: warnings.warn( 'The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`' ' instead.' , __UpperCamelCase , ) snake_case__ : Dict = kwargs.pop('feature_extractor' ) snake_case__ : List[str] = image_processor if image_processor is not None else feature_extractor if image_processor is None: raise ValueError('You need to specify an `image_processor`.' ) if tokenizer is None: raise ValueError('You need to specify a `tokenizer`.' ) super().__init__(__UpperCamelCase , __UpperCamelCase ) def __call__( self , __UpperCamelCase=None , __UpperCamelCase=None , __UpperCamelCase=None , __UpperCamelCase=None , **__UpperCamelCase ) -> Dict: '''simple docstring''' if text is None and visual_prompt is None and images is None: raise ValueError('You have to specify either text, visual prompt or images.' ) if text is not None and visual_prompt is not None: raise ValueError('You have to specify exactly one type of prompt. Either text or visual prompt.' ) if text is not None: snake_case__ : Union[str, Any] = self.tokenizer(__UpperCamelCase , return_tensors=__UpperCamelCase , **__UpperCamelCase ) if visual_prompt is not None: snake_case__ : Optional[int] = self.image_processor(__UpperCamelCase , return_tensors=__UpperCamelCase , **__UpperCamelCase ) if images is not None: snake_case__ : str = self.image_processor(__UpperCamelCase , return_tensors=__UpperCamelCase , **__UpperCamelCase ) if visual_prompt is not None and images is not None: snake_case__ : int = { 'pixel_values': image_features.pixel_values, 'conditional_pixel_values': prompt_features.pixel_values, } return encoding elif text is not None and images is not None: snake_case__ : List[str] = image_features.pixel_values return encoding elif text is not None: return encoding elif visual_prompt is not None: snake_case__ : str = { 'conditional_pixel_values': prompt_features.pixel_values, } return encoding else: return BatchEncoding(data=dict(**__UpperCamelCase ) , tensor_type=__UpperCamelCase ) def __a ( self , *__UpperCamelCase , **__UpperCamelCase ) -> List[Any]: '''simple docstring''' return self.tokenizer.batch_decode(*__UpperCamelCase , **__UpperCamelCase ) def __a ( self , *__UpperCamelCase , **__UpperCamelCase ) -> List[str]: '''simple docstring''' return self.tokenizer.decode(*__UpperCamelCase , **__UpperCamelCase ) @property def __a ( self ) -> str: '''simple docstring''' warnings.warn( '`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.' , __UpperCamelCase , ) return self.image_processor_class @property def __a ( self ) -> str: '''simple docstring''' warnings.warn( '`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.' , __UpperCamelCase , ) return self.image_processor
699
import random import unittest import torch from diffusers import IFInpaintingPipeline from diffusers.utils import floats_tensor from diffusers.utils.import_utils import is_xformers_available from diffusers.utils.testing_utils import skip_mps, torch_device from ..pipeline_params import ( TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS, TEXT_GUIDED_IMAGE_INPAINTING_PARAMS, ) from ..test_pipelines_common import PipelineTesterMixin from . import IFPipelineTesterMixin @skip_mps class __snake_case ( _lowerCamelCase ,_lowerCamelCase ,unittest.TestCase ): __lowerCamelCase = IFInpaintingPipeline __lowerCamelCase = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS - {"""width""", """height"""} __lowerCamelCase = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS __lowerCamelCase = PipelineTesterMixin.required_optional_params - {"""latents"""} def __a ( self ) -> Optional[Any]: '''simple docstring''' return self._get_dummy_components() def __a ( self , __UpperCamelCase , __UpperCamelCase=0 ) -> str: '''simple docstring''' if str(__UpperCamelCase ).startswith('mps' ): snake_case__ : int = torch.manual_seed(__UpperCamelCase ) else: snake_case__ : Union[str, Any] = torch.Generator(device=__UpperCamelCase ).manual_seed(__UpperCamelCase ) snake_case__ : Optional[int] = floats_tensor((1, 3, 32, 32) , rng=random.Random(__UpperCamelCase ) ).to(__UpperCamelCase ) snake_case__ : Dict = floats_tensor((1, 3, 32, 32) , rng=random.Random(__UpperCamelCase ) ).to(__UpperCamelCase ) snake_case__ : Optional[Any] = { 'prompt': 'A painting of a squirrel eating a burger', 'image': image, 'mask_image': mask_image, 'generator': generator, 'num_inference_steps': 2, 'output_type': 'numpy', } return inputs @unittest.skipIf( torch_device != 'cuda' or not is_xformers_available() , reason='XFormers attention is only available with CUDA and `xformers` installed' , ) def __a ( self ) -> List[Any]: '''simple docstring''' self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1E-3 ) def __a ( self ) -> Optional[int]: '''simple docstring''' self._test_save_load_optional_components() @unittest.skipIf(torch_device != 'cuda' , reason='float16 requires CUDA' ) def __a ( self ) -> List[str]: '''simple docstring''' super().test_save_load_floataa(expected_max_diff=1E-1 ) def __a ( self ) -> List[str]: '''simple docstring''' self._test_attention_slicing_forward_pass(expected_max_diff=1E-2 ) def __a ( self ) -> int: '''simple docstring''' self._test_save_load_local() def __a ( self ) -> List[str]: '''simple docstring''' self._test_inference_batch_single_identical( expected_max_diff=1E-2 , )
699
1
import pprint import requests lowerCAmelCase__ : List[Any] = '''https://zenquotes.io/api''' def UpperCamelCase__ ( ) -> list: return requests.get(API_ENDPOINT_URL + '/today' ).json() def UpperCamelCase__ ( ) -> list: return requests.get(API_ENDPOINT_URL + '/random' ).json() if __name__ == "__main__": lowerCAmelCase__ : List[Any] = random_quotes() pprint.pprint(response)
699
import unittest from transformers import BertGenerationTokenizer from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_torch, slow from transformers.utils import cached_property from ...test_tokenization_common import TokenizerTesterMixin lowerCAmelCase__ : List[Any] = '''▁''' lowerCAmelCase__ : int = get_tests_dir('''fixtures/test_sentencepiece.model''') @require_sentencepiece class __snake_case ( _lowerCamelCase ,unittest.TestCase ): __lowerCamelCase = BertGenerationTokenizer __lowerCamelCase = False __lowerCamelCase = True def __a ( self ) -> Optional[int]: '''simple docstring''' super().setUp() snake_case__ : str = BertGenerationTokenizer(__UpperCamelCase , keep_accents=__UpperCamelCase ) tokenizer.save_pretrained(self.tmpdirname ) def __a ( self ) -> Optional[int]: '''simple docstring''' snake_case__ : List[str] = '<s>' snake_case__ : Dict = 1 self.assertEqual(self.get_tokenizer()._convert_token_to_id(__UpperCamelCase ) , __UpperCamelCase ) self.assertEqual(self.get_tokenizer()._convert_id_to_token(__UpperCamelCase ) , __UpperCamelCase ) def __a ( self ) -> List[str]: '''simple docstring''' snake_case__ : Tuple = list(self.get_tokenizer().get_vocab().keys() ) self.assertEqual(vocab_keys[0] , '<unk>' ) self.assertEqual(vocab_keys[1] , '<s>' ) self.assertEqual(vocab_keys[-1] , '<pad>' ) self.assertEqual(len(__UpperCamelCase ) , 1002 ) def __a ( self ) -> int: '''simple docstring''' self.assertEqual(self.get_tokenizer().vocab_size , 1000 ) def __a ( self ) -> Tuple: '''simple docstring''' snake_case__ : Optional[Any] = BertGenerationTokenizer(__UpperCamelCase , keep_accents=__UpperCamelCase ) snake_case__ : int = tokenizer.tokenize('This is a test' ) self.assertListEqual(__UpperCamelCase , ['▁This', '▁is', '▁a', '▁t', 'est'] ) self.assertListEqual( tokenizer.convert_tokens_to_ids(__UpperCamelCase ) , [285, 46, 10, 170, 382] , ) snake_case__ : Any = tokenizer.tokenize('I was born in 92000, and this is falsé.' ) self.assertListEqual( __UpperCamelCase , [ SPIECE_UNDERLINE + 'I', SPIECE_UNDERLINE + 'was', SPIECE_UNDERLINE + 'b', 'or', 'n', SPIECE_UNDERLINE + 'in', SPIECE_UNDERLINE + '', '9', '2', '0', '0', '0', ',', SPIECE_UNDERLINE + 'and', SPIECE_UNDERLINE + 'this', SPIECE_UNDERLINE + 'is', SPIECE_UNDERLINE + 'f', 'al', 's', 'é', '.', ] , ) snake_case__ : Optional[Any] = tokenizer.convert_tokens_to_ids(__UpperCamelCase ) self.assertListEqual( __UpperCamelCase , [8, 21, 84, 55, 24, 19, 7, 0, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 0, 4] , ) snake_case__ : int = tokenizer.convert_ids_to_tokens(__UpperCamelCase ) self.assertListEqual( __UpperCamelCase , [ SPIECE_UNDERLINE + 'I', SPIECE_UNDERLINE + 'was', SPIECE_UNDERLINE + 'b', 'or', 'n', SPIECE_UNDERLINE + 'in', SPIECE_UNDERLINE + '', '<unk>', '2', '0', '0', '0', ',', SPIECE_UNDERLINE + 'and', SPIECE_UNDERLINE + 'this', SPIECE_UNDERLINE + 'is', SPIECE_UNDERLINE + 'f', 'al', 's', '<unk>', '.', ] , ) @cached_property def __a ( self ) -> Dict: '''simple docstring''' return BertGenerationTokenizer.from_pretrained('google/bert_for_seq_generation_L-24_bbc_encoder' ) @slow def __a ( self ) -> Any: '''simple docstring''' snake_case__ : int = 'Hello World!' snake_case__ : Union[str, Any] = [18536, 2260, 101] self.assertListEqual(__UpperCamelCase , self.big_tokenizer.encode(__UpperCamelCase ) ) @slow def __a ( self ) -> Optional[int]: '''simple docstring''' snake_case__ : str = ( 'This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) " [ ] ! : - . Also we will' ' add words that should not exsist and be tokenized to <unk>, such as saoneuhaoesuth' ) snake_case__ : List[Any] = [ 871, 419, 358, 946, 991, 2521, 452, 358, 1357, 387, 7751, 3536, 112, 985, 456, 126, 865, 938, 5400, 5734, 458, 1368, 467, 786, 2462, 5246, 1159, 633, 865, 4519, 457, 582, 852, 2557, 427, 916, 508, 405, 34324, 497, 391, 408, 11342, 1244, 385, 100, 938, 985, 456, 574, 362, 12597, 3200, 3129, 1172, ] self.assertListEqual(__UpperCamelCase , self.big_tokenizer.encode(__UpperCamelCase ) ) @require_torch @slow def __a ( self ) -> List[str]: '''simple docstring''' import torch from transformers import BertGenerationConfig, BertGenerationEncoder # Build sequence snake_case__ : Optional[int] = list(self.big_tokenizer.get_vocab().keys() )[:10] snake_case__ : Optional[int] = ' '.join(__UpperCamelCase ) snake_case__ : int = self.big_tokenizer.encode_plus(__UpperCamelCase , return_tensors='pt' , return_token_type_ids=__UpperCamelCase ) snake_case__ : Tuple = self.big_tokenizer.batch_encode_plus( [sequence + ' ' + sequence] , return_tensors='pt' , return_token_type_ids=__UpperCamelCase ) snake_case__ : Dict = BertGenerationConfig() snake_case__ : List[str] = BertGenerationEncoder(__UpperCamelCase ) assert model.get_input_embeddings().weight.shape[0] >= self.big_tokenizer.vocab_size with torch.no_grad(): model(**__UpperCamelCase ) model(**__UpperCamelCase ) @slow def __a ( self ) -> Dict: '''simple docstring''' snake_case__ : Optional[int] = {'input_ids': [[39286, 458, 36335, 2001, 456, 13073, 13266, 455, 113, 7746, 1741, 11157, 391, 13073, 13266, 455, 113, 3967, 35412, 113, 4936, 109, 3870, 2377, 113, 30084, 45720, 458, 134, 17496, 112, 503, 11672, 113, 118, 112, 5665, 13347, 38687, 112, 1496, 31389, 112, 3268, 47264, 134, 962, 112, 16377, 8035, 23130, 430, 12169, 15518, 28592, 458, 146, 41697, 109, 391, 12169, 15518, 16689, 458, 146, 41358, 109, 452, 726, 4034, 111, 763, 35412, 5082, 388, 1903, 111, 9051, 391, 2870, 48918, 1900, 1123, 550, 998, 112, 9586, 15985, 455, 391, 410, 22955, 37636, 114], [448, 17496, 419, 3663, 385, 763, 113, 27533, 2870, 3283, 13043, 1639, 24713, 523, 656, 24013, 18550, 2521, 517, 27014, 21244, 420, 1212, 1465, 391, 927, 4833, 388, 578, 11786, 114, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [484, 2169, 7687, 21932, 18146, 726, 363, 17032, 3391, 114, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501 # fmt: on self.tokenizer_integration_test_util( expected_encoding=__UpperCamelCase , model_name='google/bert_for_seq_generation_L-24_bbc_encoder' , revision='c817d1fd1be2ffa69431227a1fe320544943d4db' , )
699
1
import argparse import json import os import tensorstore as ts import torch from flax import serialization from flax.traverse_util import flatten_dict, unflatten_dict from tensorflow.io import gfile from transformers.modeling_utils import dtype_byte_size from transformers.models.switch_transformers.convert_switch_transformers_original_flax_checkpoint_to_pytorch import ( rename_keys, ) from transformers.utils import WEIGHTS_INDEX_NAME, WEIGHTS_NAME from transformers.utils.hub import convert_file_size_to_int def UpperCamelCase__ ( A__ , A__ ) -> Any: if flax_key_tuple[-1] == "kernel" and flax_tensor.ndim == 3: # expert layer snake_case__ : Optional[int] = flax_key_tuple[:-1] + ('weight',) snake_case__ : int = torch.permute(A__ , (0, 2, 1) ) elif flax_key_tuple[-1] == "kernel" and ".".join(A__ ): # linear layer snake_case__ : str = flax_key_tuple[:-1] + ('weight',) snake_case__ : Optional[int] = flax_tensor.T elif flax_key_tuple[-1] in ["scale", "embedding"]: snake_case__ : Any = flax_key_tuple[:-1] + ('weight',) return flax_key_tuple, flax_tensor def UpperCamelCase__ ( A__ , A__ , A__ ) -> Optional[int]: if "metadata" in layer: snake_case__ : int = layer.split('metadata' ) snake_case__ : Tuple = ''.join(split_layer[0] )[:-1] snake_case__ : int = [tuple(('metadata' + split_layer[1]).split('/' ) )] elif "kvstore" in layer: snake_case__ : Any = layer.split('kvstore' ) snake_case__ : List[str] = ''.join(split_layer[0] )[:-1] snake_case__ : List[Any] = [tuple(('kvstore' + split_layer[1]).split('/' ) )] else: snake_case__ : Tuple = layer.split('/' ) snake_case__ : Dict = '/'.join(split_layer[:-1] ) snake_case__ : List[Any] = (split_layer[-1],) if "kvstore/path" in layer: snake_case__ : List[Any] = F"""{switch_checkpoint_path}/{checkpoint_info[layer]}""" elif "kvstore/driver" in layer: snake_case__ : Union[str, Any] = 'file' else: snake_case__ : int = checkpoint_info[layer] return curr_real_layer_name, split_layer, content def UpperCamelCase__ ( A__ , A__ ) -> List[str]: snake_case__ : List[Any] = rename_keys(A__ ) snake_case__ : List[str] = {} for k, v in current_block.items(): snake_case__ : Union[str, Any] = v snake_case__ : List[Any] = new_current_block torch.save(A__ , A__ ) def UpperCamelCase__ ( A__ , A__ , A__ , A__ , A__ = WEIGHTS_NAME ) -> Dict: snake_case__ : Any = convert_file_size_to_int(A__ ) snake_case__ : Tuple = [] snake_case__ : int = {} snake_case__ : Dict = 0 snake_case__ : int = 0 os.makedirs(A__ , exist_ok=A__ ) with gfile.GFile(switch_checkpoint_path + '/checkpoint' , 'rb' ) as fp: snake_case__ : Optional[Any] = serialization.msgpack_restore(fp.read() )['optimizer']['target'] snake_case__ : List[Any] = flatten_dict(A__ , sep='/' ) snake_case__ : Union[str, Any] = {} for layer in checkpoint_info.keys(): snake_case__ , snake_case__ , snake_case__ : List[Any] = get_key_and_tensorstore_dict( A__ , A__ , A__ ) if curr_real_layer_name in all_layers: snake_case__ : Union[str, Any] = content else: snake_case__ : Optional[Any] = {split_layer[-1]: content} for key in all_layers.keys(): # open tensorstore file snake_case__ : Optional[Any] = ts.open(unflatten_dict(all_layers[key] ) ).result().read().result() snake_case__ : Optional[int] = torch.tensor(A__ ) snake_case__ : Optional[Any] = raw_weights.numel() * dtype_byte_size(raw_weights.dtype ) # use the renaming pattern from the small conversion scripts snake_case__ , snake_case__ : Dict = rename_base_flax_keys(tuple(key.split('/' ) ) , A__ ) snake_case__ : Dict = '/'.join(A__ ) # If this weight is going to tip up over the maximal size, we split. if current_block_size + weight_size > max_shard_size: snake_case__ : str = os.path.join( A__ , weights_name.replace('.bin' , F"""-{len(A__ )+1:05d}-of-???.bin""" ) ) rename_and_save_block(A__ , A__ ) sharded_state_dicts.append(current_block.keys() ) del current_block snake_case__ : Dict = {} snake_case__ : Optional[Any] = 0 snake_case__ : Optional[Any] = raw_weights.to(getattr(A__ , A__ ) ) current_block_size += weight_size total_size += weight_size # Add the last block snake_case__ : Tuple = os.path.join(A__ , weights_name.replace('.bin' , F"""-{len(A__ )+1:05d}-of-???.bin""" ) ) rename_and_save_block(A__ , A__ ) sharded_state_dicts.append(current_block.keys() ) # If we only have one shard, we return it if len(A__ ) == 1: return {weights_name: sharded_state_dicts[0]}, None # Otherwise, let's build the index snake_case__ : List[str] = {} snake_case__ : int = {} for idx, shard in enumerate(A__ ): snake_case__ : int = weights_name.replace( '.bin' , F"""-{idx+1:05d}-of-{len(A__ ):05d}.bin""" ) # len(sharded_state_dicts):05d} snake_case__ : str = os.path.join(A__ , weights_name.replace('.bin' , F"""-{idx+1:05d}-of-???.bin""" ) ) os.rename(A__ , os.path.join(A__ , A__ ) ) snake_case__ : Optional[int] = shard for key in shard: snake_case__ : List[str] = shard_file # Add the metadata snake_case__ : Dict = {'total_size': total_size} snake_case__ : Any = {'metadata': metadata, 'weight_map': weight_map} with open(os.path.join(A__ , A__ ) , 'w' , encoding='utf-8' ) as f: snake_case__ : Union[str, Any] = json.dumps(A__ , indent=2 , sort_keys=A__ ) + '\n' f.write(A__ ) return metadata, index if __name__ == "__main__": lowerCAmelCase__ : Optional[Any] = argparse.ArgumentParser() # Required parameters parser.add_argument( '''--switch_t5x_checkpoint_path''', default='''/mnt/disks/disk_switch/original_checkpoints/switch-xxl-128/checkpoint_634600''', type=str, required=False, help='''Path to a directory containing a folder per layer. Follows the original Google format.''', ) parser.add_argument('''--max_shard_size''', default='''10GB''', required=False, help='''Max shard size''') parser.add_argument('''--dtype''', default='''bfloat16''', type=str, required=False, help='''dtype of the saved model''') parser.add_argument( '''--pytorch_dump_folder_path''', default='''/mnt/disks/disk_switch/original_checkpoints/switch-xxl-128-converted''', type=str, required=False, help='''Path to the output pytorch model.''', ) lowerCAmelCase__ : List[str] = parser.parse_args() shard_on_the_fly( args.switch_tax_checkpoint_path, args.pytorch_dump_folder_path, args.max_shard_size, args.dtype, ) def UpperCamelCase__ ( ) -> str: from transformers import SwitchTransformersConfig, SwitchTransformersForConditionalGeneration, TaTokenizer snake_case__ : Tuple = SwitchTransformersConfig.from_pretrained('google/switch-base-8' ) config.save_pretrained('/home/arthur_huggingface_co/transformers/switch_converted' ) snake_case__ : Tuple = SwitchTransformersForConditionalGeneration.from_pretrained( '/home/arthur_huggingface_co/transformers/switch_converted' , device_map='auto' ) snake_case__ : int = TaTokenizer.from_pretrained('t5-small' ) snake_case__ : Optional[Any] = 'A <extra_id_0> walks into a bar a orders a <extra_id_1> with <extra_id_2> pinch of <extra_id_3>.' snake_case__ : Optional[int] = tokenizer(A__ , return_tensors='pt' ).input_ids snake_case__ : List[str] = model.generate(A__ , decoder_start_token_id=0 ) print(tokenizer.decode(out[0] ) )
699
import random import torch from huggingface_hub import HfApi from diffusers import UNetaDModel lowerCAmelCase__ : List[str] = HfApi() lowerCAmelCase__ : str = {} # fmt: off lowerCAmelCase__ : int = torch.tensor([ -0.75_15, -1.68_83, 0.24_20, 0.03_00, 0.63_47, 1.34_33, -1.17_43, -3.74_67, 1.23_42, -2.24_85, 0.46_36, 0.80_76, -0.79_91, 0.39_69, 0.84_98, 0.91_89, -1.88_87, -3.35_22, 0.76_39, 0.20_40, 0.62_71, -2.71_48, -1.63_16, 3.08_39, 0.31_86, 0.27_21, -0.97_59, -1.24_61, 2.62_57, 1.35_57 ]) lowerCAmelCase__ : Dict = torch.tensor([ -2.36_39, -2.53_44, 0.00_54, -0.66_74, 1.59_90, 1.01_58, 0.31_24, -2.14_36, 1.87_95, -2.54_29, -0.15_66, -0.39_73, 1.24_90, 2.64_47, 1.22_83, -0.52_08, -2.81_54, -3.51_19, 2.38_38, 1.20_33, 1.72_01, -2.12_56, -1.45_76, 2.79_48, 2.42_04, -0.97_52, -1.25_46, 0.80_27, 3.27_58, 3.13_65 ]) lowerCAmelCase__ : Dict = torch.tensor([ -0.65_31, -0.68_91, -0.31_72, -0.53_75, -0.91_40, -0.53_67, -0.11_75, -0.78_69, -0.38_08, -0.45_13, -0.20_98, -0.00_83, 0.31_83, 0.51_40, 0.22_47, -0.13_04, -0.13_02, -0.28_02, -0.20_84, -0.20_25, -0.49_67, -0.48_73, -0.08_61, 0.69_25, 0.02_50, 0.12_90, -0.15_43, 0.63_16, 1.04_60, 1.49_43 ]) lowerCAmelCase__ : List[str] = torch.tensor([ 0.09_11, 0.11_07, 0.01_82, 0.04_35, -0.08_05, -0.06_08, 0.03_81, 0.21_72, -0.02_80, 0.13_27, -0.02_99, -0.02_55, -0.00_50, -0.11_70, -0.10_46, 0.03_09, 0.13_67, 0.17_28, -0.05_33, -0.07_48, -0.05_34, 0.16_24, 0.03_84, -0.18_05, -0.07_07, 0.06_42, 0.02_20, -0.01_34, -0.13_33, -0.15_05 ]) lowerCAmelCase__ : Union[str, Any] = torch.tensor([ 0.13_21, 0.13_37, 0.04_40, 0.06_22, -0.05_91, -0.03_70, 0.05_03, 0.21_33, -0.01_77, 0.14_15, -0.01_16, -0.01_12, 0.00_44, -0.09_80, -0.07_89, 0.03_95, 0.15_02, 0.17_85, -0.04_88, -0.05_14, -0.04_04, 0.15_39, 0.04_54, -0.15_59, -0.06_65, 0.06_59, 0.03_83, -0.00_05, -0.12_66, -0.13_86 ]) lowerCAmelCase__ : List[Any] = torch.tensor([ 0.11_54, 0.12_18, 0.03_07, 0.05_26, -0.07_11, -0.05_41, 0.03_66, 0.20_78, -0.02_67, 0.13_17, -0.02_26, -0.01_93, -0.00_14, -0.10_55, -0.09_02, 0.03_30, 0.13_91, 0.17_09, -0.05_62, -0.06_93, -0.05_60, 0.14_82, 0.03_81, -0.16_83, -0.06_81, 0.06_61, 0.03_31, -0.00_46, -0.12_68, -0.14_31 ]) lowerCAmelCase__ : Optional[Any] = torch.tensor([ 0.11_92, 0.12_40, 0.04_14, 0.06_06, -0.05_57, -0.04_12, 0.04_30, 0.20_42, -0.02_00, 0.13_85, -0.01_15, -0.01_32, 0.00_17, -0.09_65, -0.08_02, 0.03_98, 0.14_33, 0.17_47, -0.04_58, -0.05_33, -0.04_07, 0.15_45, 0.04_19, -0.15_74, -0.06_45, 0.06_26, 0.03_41, -0.00_10, -0.11_99, -0.13_90 ]) lowerCAmelCase__ : List[str] = torch.tensor([ 0.10_75, 0.10_74, 0.02_05, 0.04_31, -0.07_74, -0.06_07, 0.02_98, 0.20_42, -0.03_20, 0.12_67, -0.02_81, -0.02_50, -0.00_64, -0.10_91, -0.09_46, 0.02_90, 0.13_28, 0.16_50, -0.05_80, -0.07_38, -0.05_86, 0.14_40, 0.03_37, -0.17_46, -0.07_12, 0.06_05, 0.02_50, -0.00_99, -0.13_16, -0.14_73 ]) lowerCAmelCase__ : List[str] = torch.tensor([ -1.45_72, -2.04_81, -0.04_14, -0.60_05, 1.41_36, 0.58_48, 0.40_28, -2.73_30, 1.22_12, -2.12_28, 0.21_55, 0.40_39, 0.76_62, 2.05_35, 0.74_77, -0.32_43, -2.17_58, -2.76_48, 1.69_47, 0.70_26, 1.23_38, -1.60_78, -0.86_82, 2.28_10, 1.85_74, -0.57_18, -0.55_86, -0.01_86, 2.34_15, 2.12_51]) lowerCAmelCase__ : List[Any] = torch.tensor([ -1.36_90, -1.97_20, -0.40_90, -0.69_66, 1.46_60, 0.99_38, -0.13_85, -2.73_24, 0.77_36, -1.89_17, 0.29_23, 0.42_93, 0.16_93, 1.41_12, 1.18_87, -0.31_81, -2.21_60, -2.63_81, 1.31_70, 0.81_63, 0.92_40, -1.65_44, -0.60_99, 2.52_59, 1.64_30, -0.90_90, -0.93_92, -0.01_26, 2.42_68, 2.32_66 ]) lowerCAmelCase__ : Tuple = torch.tensor([ -1.35_25, -1.96_28, -0.39_56, -0.68_60, 1.46_64, 1.00_14, -0.12_59, -2.72_12, 0.77_72, -1.88_11, 0.29_96, 0.43_88, 0.17_04, 1.40_29, 1.17_01, -0.30_27, -2.20_53, -2.62_87, 1.33_50, 0.81_31, 0.92_74, -1.62_92, -0.60_98, 2.51_31, 1.65_05, -0.89_58, -0.92_98, -0.01_51, 2.42_57, 2.33_55 ]) lowerCAmelCase__ : List[str] = torch.tensor([ -2.05_85, -2.78_97, -0.28_50, -0.89_40, 1.90_52, 0.57_02, 0.63_45, -3.89_59, 1.59_32, -3.23_19, 0.19_74, 0.02_87, 1.75_66, 2.65_43, 0.83_87, -0.53_51, -3.27_36, -4.33_75, 2.90_29, 1.63_90, 1.46_40, -2.17_01, -1.90_13, 2.93_41, 3.49_81, -0.62_55, -1.16_44, -0.15_91, 3.70_97, 3.20_66 ]) lowerCAmelCase__ : Dict = torch.tensor([ -2.31_39, -2.55_94, -0.01_97, -0.67_85, 1.70_01, 1.16_06, 0.30_75, -2.17_40, 1.80_71, -2.56_30, -0.09_26, -0.38_11, 1.21_16, 2.62_46, 1.27_31, -0.53_98, -2.81_53, -3.61_40, 2.38_93, 1.32_62, 1.62_58, -2.18_56, -1.32_67, 2.83_95, 2.37_79, -1.06_23, -1.24_68, 0.89_59, 3.33_67, 3.22_43 ]) lowerCAmelCase__ : Dict = torch.tensor([ -2.06_28, -2.76_67, -0.20_89, -0.82_63, 2.05_39, 0.59_92, 0.64_95, -3.83_36, 1.60_25, -3.28_17, 0.17_21, -0.06_33, 1.75_16, 2.70_39, 0.81_00, -0.59_08, -3.21_13, -4.43_43, 2.92_57, 1.36_32, 1.55_62, -2.14_89, -1.98_94, 3.05_60, 3.33_96, -0.73_28, -1.04_17, 0.03_83, 3.70_93, 3.23_43 ]) lowerCAmelCase__ : Any = torch.tensor([ -1.45_74, -2.05_69, -0.04_73, -0.61_17, 1.40_18, 0.57_69, 0.41_29, -2.73_44, 1.22_41, -2.13_97, 0.20_00, 0.39_37, 0.76_16, 2.04_53, 0.73_24, -0.33_91, -2.17_46, -2.77_44, 1.69_63, 0.69_21, 1.21_87, -1.61_72, -0.88_77, 2.24_39, 1.84_71, -0.58_39, -0.56_05, -0.04_64, 2.32_50, 2.12_19 ]) # fmt: on lowerCAmelCase__ : Any = api.list_models(filter='''diffusers''') for mod in models: if "google" in mod.author or mod.modelId == "CompVis/ldm-celebahq-256": lowerCAmelCase__ : List[str] = '''/home/patrick/google_checkpoints/''' + mod.modelId.split('''/''')[-1] print(F'''Started running {mod.modelId}!!!''') if mod.modelId.startswith('''CompVis'''): lowerCAmelCase__ : int = UNetaDModel.from_pretrained(local_checkpoint, subfolder='''unet''') else: lowerCAmelCase__ : str = UNetaDModel.from_pretrained(local_checkpoint) torch.manual_seed(0) random.seed(0) lowerCAmelCase__ : Any = torch.randn(1, model.config.in_channels, model.config.sample_size, model.config.sample_size) lowerCAmelCase__ : List[str] = torch.tensor([10] * noise.shape[0]) with torch.no_grad(): lowerCAmelCase__ : int = model(noise, time_step).sample assert torch.allclose( logits[0, 0, 0, :30], results['''_'''.join('''_'''.join(mod.modelId.split('''/''')).split('''-'''))], atol=1E-3 ) print(F'''{mod.modelId} has passed successfully!!!''')
699
1
# Usage: # ./gen-card-allenai-wmt16.py import os from pathlib import Path def UpperCamelCase__ ( A__ , A__ , A__ , A__ ) -> Dict: snake_case__ : int = { 'en': 'Machine learning is great, isn\'t it?', 'ru': 'Машинное обучение - это здорово, не так ли?', 'de': 'Maschinelles Lernen ist großartig, nicht wahr?', } # BLUE scores as follows: # "pair": [fairseq, transformers] snake_case__ : List[str] = { 'wmt16-en-de-dist-12-1': [2_8.3, 2_7.5_2], 'wmt16-en-de-dist-6-1': [2_7.4, 2_7.1_1], 'wmt16-en-de-12-1': [2_6.9, 2_5.7_5], } snake_case__ : Any = F"""{src_lang}-{tgt_lang}""" snake_case__ : List[Any] = F""" --- language: - {src_lang} - {tgt_lang} thumbnail: tags: - translation - wmt16 - allenai license: apache-2.0 datasets: - wmt16 metrics: - bleu --- # FSMT ## Model description This is a ported version of fairseq-based [wmt16 transformer](https://github.com/jungokasai/deep-shallow/) for {src_lang}-{tgt_lang}. For more details, please, see [Deep Encoder, Shallow Decoder: Reevaluating the Speed-Quality Tradeoff in Machine Translation](https://arxiv.org/abs/2006.10369). All 3 models are available: * [wmt16-en-de-dist-12-1](https://huggingface.co/allenai/wmt16-en-de-dist-12-1) * [wmt16-en-de-dist-6-1](https://huggingface.co/allenai/wmt16-en-de-dist-6-1) * [wmt16-en-de-12-1](https://huggingface.co/allenai/wmt16-en-de-12-1) ## Intended uses & limitations #### How to use ```python from transformers import FSMTForConditionalGeneration, FSMTTokenizer mname = \"allenai/{model_name}\" tokenizer = FSMTTokenizer.from_pretrained(mname) model = FSMTForConditionalGeneration.from_pretrained(mname) input = \"{texts[src_lang]}\" input_ids = tokenizer.encode(input, return_tensors=\"pt\") outputs = model.generate(input_ids) decoded = tokenizer.decode(outputs[0], skip_special_tokens=True) print(decoded) # {texts[tgt_lang]} ``` #### Limitations and bias ## Training data Pretrained weights were left identical to the original model released by allenai. For more details, please, see the [paper](https://arxiv.org/abs/2006.10369). ## Eval results Here are the BLEU scores: model | fairseq | transformers -------|---------|---------- {model_name} | {scores[model_name][0]} | {scores[model_name][1]} The score is slightly below the score reported in the paper, as the researchers don't use `sacrebleu` and measure the score on tokenized outputs. `transformers` score was measured using `sacrebleu` on detokenized outputs. The score was calculated using this code: ```bash git clone https://github.com/huggingface/transformers cd transformers export PAIR={pair} export DATA_DIR=data/$PAIR export SAVE_DIR=data/$PAIR export BS=8 export NUM_BEAMS=5 mkdir -p $DATA_DIR sacrebleu -t wmt16 -l $PAIR --echo src > $DATA_DIR/val.source sacrebleu -t wmt16 -l $PAIR --echo ref > $DATA_DIR/val.target echo $PAIR PYTHONPATH=\"src:examples/seq2seq\" python examples/seq2seq/run_eval.py allenai/{model_name} $DATA_DIR/val.source $SAVE_DIR/test_translations.txt --reference_path $DATA_DIR/val.target --score_path $SAVE_DIR/test_bleu.json --bs $BS --task translation --num_beams $NUM_BEAMS ``` ## Data Sources - [training, etc.](http://www.statmt.org/wmt16/) - [test set](http://matrix.statmt.org/test_sets/newstest2016.tgz?1504722372) ### BibTeX entry and citation info ``` @misc{{kasai2020deep, title={{Deep Encoder, Shallow Decoder: Reevaluating the Speed-Quality Tradeoff in Machine Translation}}, author={{Jungo Kasai and Nikolaos Pappas and Hao Peng and James Cross and Noah A. Smith}}, year={{2020}}, eprint={{2006.10369}}, archivePrefix={{arXiv}}, primaryClass={{cs.CL}} }} ``` """ model_card_dir.mkdir(parents=A__ , exist_ok=A__ ) snake_case__ : Tuple = os.path.join(A__ , 'README.md' ) print(F"""Generating {path}""" ) with open(A__ , 'w' , encoding='utf-8' ) as f: f.write(A__ ) # make sure we are under the root of the project lowerCAmelCase__ : List[Any] = Path(__file__).resolve().parent.parent.parent lowerCAmelCase__ : str = repo_dir / '''model_cards''' for model_name in ["wmt16-en-de-dist-12-1", "wmt16-en-de-dist-6-1", "wmt16-en-de-12-1"]: lowerCAmelCase__ : Optional[Any] = model_cards_dir / '''allenai''' / model_name write_model_card(model_card_dir, src_lang='''en''', tgt_lang='''de''', model_name=model_name)
699
import warnings from ...utils import logging from .image_processing_perceiver import PerceiverImageProcessor lowerCAmelCase__ : Dict = logging.get_logger(__name__) class __snake_case ( _lowerCamelCase ): def __init__( self , *__UpperCamelCase , **__UpperCamelCase ) -> None: '''simple docstring''' warnings.warn( 'The class PerceiverFeatureExtractor is deprecated and will be removed in version 5 of Transformers.' ' Please use PerceiverImageProcessor instead.' , __UpperCamelCase , ) super().__init__(*__UpperCamelCase , **__UpperCamelCase )
699
1
import unittest from transformers import SPIECE_UNDERLINE from transformers.models.speechta import SpeechTaTokenizer from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow from transformers.tokenization_utils import AddedToken from ...test_tokenization_common import TokenizerTesterMixin lowerCAmelCase__ : Dict = get_tests_dir('''fixtures/test_sentencepiece_bpe_char.model''') @require_sentencepiece @require_tokenizers class __snake_case ( _lowerCamelCase ,unittest.TestCase ): __lowerCamelCase = SpeechTaTokenizer __lowerCamelCase = False __lowerCamelCase = True def __a ( self ) -> str: '''simple docstring''' super().setUp() # We have a SentencePiece fixture for testing snake_case__ : Dict = SpeechTaTokenizer(__UpperCamelCase ) snake_case__ : List[str] = AddedToken('<mask>' , lstrip=__UpperCamelCase , rstrip=__UpperCamelCase ) snake_case__ : int = mask_token tokenizer.add_special_tokens({'mask_token': mask_token} ) tokenizer.add_tokens(['<ctc_blank>'] ) tokenizer.save_pretrained(self.tmpdirname ) def __a ( self , __UpperCamelCase ) -> Any: '''simple docstring''' snake_case__ : int = 'this is a test' snake_case__ : Optional[Any] = 'this is a test' return input_text, output_text def __a ( self , __UpperCamelCase , __UpperCamelCase=False , __UpperCamelCase=20 , __UpperCamelCase=5 ) -> int: '''simple docstring''' snake_case__ , snake_case__ : Any = self.get_input_output_texts(__UpperCamelCase ) snake_case__ : Optional[int] = tokenizer.encode(__UpperCamelCase , add_special_tokens=__UpperCamelCase ) snake_case__ : List[Any] = tokenizer.decode(__UpperCamelCase , clean_up_tokenization_spaces=__UpperCamelCase ) return text, ids def __a ( self ) -> List[Any]: '''simple docstring''' snake_case__ : Union[str, Any] = '<pad>' snake_case__ : Any = 1 self.assertEqual(self.get_tokenizer()._convert_token_to_id(__UpperCamelCase ) , __UpperCamelCase ) self.assertEqual(self.get_tokenizer()._convert_id_to_token(__UpperCamelCase ) , __UpperCamelCase ) def __a ( self ) -> int: '''simple docstring''' snake_case__ : Optional[int] = list(self.get_tokenizer().get_vocab().keys() ) self.assertEqual(vocab_keys[0] , '<s>' ) self.assertEqual(vocab_keys[1] , '<pad>' ) self.assertEqual(vocab_keys[-4] , 'œ' ) self.assertEqual(vocab_keys[-2] , '<mask>' ) self.assertEqual(vocab_keys[-1] , '<ctc_blank>' ) self.assertEqual(len(__UpperCamelCase ) , 81 ) def __a ( self ) -> Tuple: '''simple docstring''' self.assertEqual(self.get_tokenizer().vocab_size , 79 ) def __a ( self ) -> Tuple: '''simple docstring''' snake_case__ : Union[str, Any] = self.get_tokenizers(do_lower_case=__UpperCamelCase ) for tokenizer in tokenizers: with self.subTest(F"""{tokenizer.__class__.__name__}""" ): snake_case__ : List[Any] = tokenizer.vocab_size snake_case__ : Optional[Any] = len(__UpperCamelCase ) self.assertNotEqual(__UpperCamelCase , 0 ) # We usually have added tokens from the start in tests because our vocab fixtures are # smaller than the original vocabs - let's not assert this # self.assertEqual(vocab_size, all_size) snake_case__ : Tuple = ['aaaaa bbbbbb', 'cccccccccdddddddd'] snake_case__ : int = tokenizer.add_tokens(__UpperCamelCase ) snake_case__ : Optional[Any] = tokenizer.vocab_size snake_case__ : Optional[int] = len(__UpperCamelCase ) self.assertNotEqual(__UpperCamelCase , 0 ) self.assertEqual(__UpperCamelCase , __UpperCamelCase ) self.assertEqual(__UpperCamelCase , len(__UpperCamelCase ) ) self.assertEqual(__UpperCamelCase , all_size + len(__UpperCamelCase ) ) snake_case__ : Optional[Any] = tokenizer.encode('aaaaa bbbbbb low cccccccccdddddddd l' , add_special_tokens=__UpperCamelCase ) self.assertGreaterEqual(len(__UpperCamelCase ) , 4 ) self.assertGreater(tokens[0] , tokenizer.vocab_size - 1 ) self.assertGreater(tokens[-3] , tokenizer.vocab_size - 1 ) snake_case__ : Union[str, Any] = {'eos_token': '>>>>|||<||<<|<<', 'pad_token': '<<<<<|||>|>>>>|>'} snake_case__ : Optional[int] = tokenizer.add_special_tokens(__UpperCamelCase ) snake_case__ : List[str] = tokenizer.vocab_size snake_case__ : Union[str, Any] = len(__UpperCamelCase ) self.assertNotEqual(__UpperCamelCase , 0 ) self.assertEqual(__UpperCamelCase , __UpperCamelCase ) self.assertEqual(__UpperCamelCase , len(__UpperCamelCase ) ) self.assertEqual(__UpperCamelCase , all_size_a + len(__UpperCamelCase ) ) snake_case__ : List[str] = tokenizer.encode( '>>>>|||<||<<|<< aaaaabbbbbb low cccccccccdddddddd <<<<<|||>|>>>>|> l' , add_special_tokens=__UpperCamelCase ) self.assertGreaterEqual(len(__UpperCamelCase ) , 6 ) self.assertGreater(tokens[0] , tokenizer.vocab_size - 1 ) self.assertGreater(tokens[0] , tokens[1] ) self.assertGreater(tokens[-3] , tokenizer.vocab_size - 1 ) self.assertGreater(tokens[-3] , tokens[-4] ) self.assertEqual(tokens[0] , tokenizer.eos_token_id ) self.assertEqual(tokens[-3] , tokenizer.pad_token_id ) def __a ( self ) -> Optional[int]: '''simple docstring''' pass def __a ( self ) -> List[Any]: '''simple docstring''' pass def __a ( self ) -> Tuple: '''simple docstring''' snake_case__ : Any = self.get_tokenizer() snake_case__ : List[str] = tokenizer.tokenize('This is a test' ) # fmt: off self.assertListEqual(__UpperCamelCase , [SPIECE_UNDERLINE, 'T', 'h', 'i', 's', SPIECE_UNDERLINE, 'i', 's', SPIECE_UNDERLINE, 'a', SPIECE_UNDERLINE, 't', 'e', 's', 't'] ) # fmt: on self.assertListEqual( tokenizer.convert_tokens_to_ids(__UpperCamelCase ) , [4, 32, 11, 10, 12, 4, 10, 12, 4, 7, 4, 6, 5, 12, 6] , ) snake_case__ : Dict = tokenizer.tokenize('I was born in 92000, and this is falsé.' ) self.assertListEqual( __UpperCamelCase , [SPIECE_UNDERLINE, 'I', SPIECE_UNDERLINE, 'w', 'a', 's', SPIECE_UNDERLINE, 'b', 'o', 'r', 'n', SPIECE_UNDERLINE, 'i', 'n', SPIECE_UNDERLINE, '92000', ',', SPIECE_UNDERLINE, 'a', 'n', 'd', SPIECE_UNDERLINE, 't', 'h', 'i', 's', SPIECE_UNDERLINE, 'i', 's', SPIECE_UNDERLINE, 'f', 'a', 'l', 's', 'é', '.'] ) snake_case__ : Dict = tokenizer.convert_tokens_to_ids(__UpperCamelCase ) # fmt: off self.assertListEqual(__UpperCamelCase , [4, 30, 4, 20, 7, 12, 4, 25, 8, 13, 9, 4, 10, 9, 4, 3, 23, 4, 7, 9, 14, 4, 6, 11, 10, 12, 4, 10, 12, 4, 19, 7, 15, 12, 73, 26] ) # fmt: on snake_case__ : Union[str, Any] = tokenizer.convert_ids_to_tokens(__UpperCamelCase ) self.assertListEqual( __UpperCamelCase , [SPIECE_UNDERLINE, 'I', SPIECE_UNDERLINE, 'w', 'a', 's', SPIECE_UNDERLINE, 'b', 'o', 'r', 'n', SPIECE_UNDERLINE, 'i', 'n', SPIECE_UNDERLINE, '<unk>', ',', SPIECE_UNDERLINE, 'a', 'n', 'd', SPIECE_UNDERLINE, 't', 'h', 'i', 's', SPIECE_UNDERLINE, 'i', 's', SPIECE_UNDERLINE, 'f', 'a', 'l', 's', 'é', '.'] ) @slow def __a ( self ) -> Dict: '''simple docstring''' snake_case__ : Optional[Any] = [ 'Transformers (formerly known as pytorch-transformers and pytorch-pretrained-bert) provides ' 'general-purpose architectures (BERT, GPT, RoBERTa, XLM, DistilBert, XLNet...) for Natural ' 'Language Understanding (NLU) and Natural Language Generation (NLG) with over thirty-two pretrained ' 'models in one hundred plus languages and deep interoperability between Jax, PyTorch and TensorFlow.', 'BERT is designed to pre-train deep bidirectional representations from unlabeled text by jointly ' 'conditioning on both left and right context in all layers.', 'The quick brown fox jumps over the lazy dog.', ] # fmt: off snake_case__ : Tuple = { 'input_ids': [ [4, 32, 13, 7, 9, 12, 19, 8, 13, 18, 5, 13, 12, 4, 64, 19, 8, 13, 18, 5, 13, 15, 22, 4, 28, 9, 8, 20, 9, 4, 7, 12, 4, 24, 22, 6, 8, 13, 17, 11, 39, 6, 13, 7, 9, 12, 19, 8, 13, 18, 5, 13, 12, 4, 7, 9, 14, 4, 24, 22, 6, 8, 13, 17, 11, 39, 24, 13, 5, 6, 13, 7, 10, 9, 5, 14, 39, 25, 5, 13, 6, 63, 4, 24, 13, 8, 27, 10, 14, 5, 12, 4, 21, 5, 9, 5, 13, 7, 15, 39, 24, 16, 13, 24, 8, 12, 5, 4, 7, 13, 17, 11, 10, 6, 5, 17, 6, 16, 13, 5, 12, 4, 64, 40, 47, 54, 32, 23, 4, 53, 49, 32, 23, 4, 54, 8, 40, 47, 54, 32, 7, 23, 4, 69, 52, 43, 23, 4, 51, 10, 12, 6, 10, 15, 40, 5, 13, 6, 23, 4, 69, 52, 48, 5, 6, 26, 26, 26, 63, 4, 19, 8, 13, 4, 48, 7, 6, 16, 13, 7, 15, 4, 52, 7, 9, 21, 16, 7, 21, 5, 4, 61, 9, 14, 5, 13, 12, 6, 7, 9, 14, 10, 9, 21, 4, 64, 48, 52, 61, 63, 4, 7, 9, 14, 4, 48, 7, 6, 16, 13, 7, 15, 4, 52, 7, 9, 21, 16, 7, 21, 5, 4, 53, 5, 9, 5, 13, 7, 6, 10, 8, 9, 4, 64, 48, 52, 53, 63, 4, 20, 10, 6, 11, 4, 8, 27, 5, 13, 4, 6, 11, 10, 13, 6, 22, 39, 6, 20, 8, 4, 24, 13, 5, 6, 13, 7, 10, 9, 5, 14, 4, 18, 8, 14, 5, 15, 12, 4, 10, 9, 4, 8, 9, 5, 4, 11, 16, 9, 14, 13, 5, 14, 4, 24, 15, 16, 12, 4, 15, 7, 9, 21, 16, 7, 21, 5, 12, 4, 7, 9, 14, 4, 14, 5, 5, 24, 4, 10, 9, 6, 5, 13, 8, 24, 5, 13, 7, 25, 10, 15, 10, 6, 22, 4, 25, 5, 6, 20, 5, 5, 9, 4, 58, 7, 37, 23, 4, 49, 22, 32, 8, 13, 17, 11, 4, 7, 9, 14, 4, 32, 5, 9, 12, 8, 13, 55, 15, 8, 20, 26, 2], [4, 40, 47, 54, 32, 4, 10, 12, 4, 14, 5, 12, 10, 21, 9, 5, 14, 4, 6, 8, 4, 24, 13, 5, 39, 6, 13, 7, 10, 9, 4, 14, 5, 5, 24, 4, 25, 10, 14, 10, 13, 5, 17, 6, 10, 8, 9, 7, 15, 4, 13, 5, 24, 13, 5, 12, 5, 9, 6, 7, 6, 10, 8, 9, 12, 4, 19, 13, 8, 18, 4, 16, 9, 15, 7, 25, 5, 15, 5, 14, 4, 6, 5, 37, 6, 4, 25, 22, 4, 46, 8, 10, 9, 6, 15, 22, 4, 17, 8, 9, 14, 10, 6, 10, 8, 9, 10, 9, 21, 4, 8, 9, 4, 25, 8, 6, 11, 4, 15, 5, 19, 6, 4, 7, 9, 14, 4, 13, 10, 21, 11, 6, 4, 17, 8, 9, 6, 5, 37, 6, 4, 10, 9, 4, 7, 15, 15, 4, 15, 7, 22, 5, 13, 12, 26, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [4, 32, 11, 5, 4, 45, 16, 10, 17, 28, 4, 25, 13, 8, 20, 9, 4, 19, 8, 37, 4, 46, 16, 18, 24, 12, 4, 8, 27, 5, 13, 4, 6, 11, 5, 4, 15, 7, 57, 22, 4, 14, 8, 21, 26, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], ], 'attention_mask': [ [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], ] } # fmt: on self.tokenizer_integration_test_util( expected_encoding=__UpperCamelCase , model_name='microsoft/speecht5_asr' , revision='c5ef64c71905caeccde0e4462ef3f9077224c524' , sequences=__UpperCamelCase , )
699
import io import itertools import json from dataclasses import dataclass from typing import Optional import pyarrow as pa import pyarrow.json as paj import datasets from datasets.table import table_cast from datasets.utils.file_utils import readline lowerCAmelCase__ : List[Any] = datasets.utils.logging.get_logger(__name__) @dataclass class __snake_case ( datasets.BuilderConfig ): __lowerCamelCase = None __lowerCamelCase = "utf-8" __lowerCamelCase = None __lowerCamelCase = None __lowerCamelCase = True # deprecated __lowerCamelCase = None # deprecated __lowerCamelCase = 10 << 20 # 10MB __lowerCamelCase = None class __snake_case ( datasets.ArrowBasedBuilder ): __lowerCamelCase = JsonConfig def __a ( self ) -> Optional[Any]: '''simple docstring''' if self.config.block_size is not None: logger.warning('The JSON loader parameter `block_size` is deprecated. Please use `chunksize` instead' ) snake_case__ : str = self.config.block_size if self.config.use_threads is not True: logger.warning( 'The JSON loader parameter `use_threads` is deprecated and doesn\'t have any effect anymore.' ) if self.config.newlines_in_values is not None: raise ValueError('The JSON loader parameter `newlines_in_values` is no longer supported' ) return datasets.DatasetInfo(features=self.config.features ) def __a ( self , __UpperCamelCase ) -> Dict: '''simple docstring''' if not self.config.data_files: raise ValueError(F"""At least one data file must be specified, but got data_files={self.config.data_files}""" ) snake_case__ : int = dl_manager.download_and_extract(self.config.data_files ) if isinstance(__UpperCamelCase , (str, list, tuple) ): snake_case__ : Any = data_files if isinstance(__UpperCamelCase , __UpperCamelCase ): snake_case__ : Optional[Any] = [files] snake_case__ : List[str] = [dl_manager.iter_files(__UpperCamelCase ) for file in files] return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={'files': files} )] snake_case__ : List[Any] = [] for split_name, files in data_files.items(): if isinstance(__UpperCamelCase , __UpperCamelCase ): snake_case__ : List[Any] = [files] snake_case__ : Any = [dl_manager.iter_files(__UpperCamelCase ) for file in files] splits.append(datasets.SplitGenerator(name=__UpperCamelCase , gen_kwargs={'files': files} ) ) return splits def __a ( self , __UpperCamelCase ) -> pa.Table: '''simple docstring''' if self.config.features is not None: # adding missing columns for column_name in set(self.config.features ) - set(pa_table.column_names ): snake_case__ : List[Any] = self.config.features.arrow_schema.field(__UpperCamelCase ).type snake_case__ : List[str] = pa_table.append_column(__UpperCamelCase , pa.array([None] * len(__UpperCamelCase ) , type=__UpperCamelCase ) ) # more expensive cast to support nested structures with keys in a different order # allows str <-> int/float or str to Audio for example snake_case__ : List[str] = table_cast(__UpperCamelCase , self.config.features.arrow_schema ) return pa_table def __a ( self , __UpperCamelCase ) -> int: '''simple docstring''' for file_idx, file in enumerate(itertools.chain.from_iterable(__UpperCamelCase ) ): # If the file is one json object and if we need to look at the list of items in one specific field if self.config.field is not None: with open(__UpperCamelCase , encoding=self.config.encoding , errors=self.config.encoding_errors ) as f: snake_case__ : Union[str, Any] = json.load(__UpperCamelCase ) # We keep only the field we are interested in snake_case__ : Tuple = dataset[self.config.field] # We accept two format: a list of dicts or a dict of lists if isinstance(__UpperCamelCase , (list, tuple) ): snake_case__ : List[Any] = set().union(*[row.keys() for row in dataset] ) snake_case__ : List[Any] = {col: [row.get(__UpperCamelCase ) for row in dataset] for col in keys} else: snake_case__ : List[Any] = dataset snake_case__ : Dict = pa.Table.from_pydict(__UpperCamelCase ) yield file_idx, self._cast_table(__UpperCamelCase ) # If the file has one json object per line else: with open(__UpperCamelCase , 'rb' ) as f: snake_case__ : Optional[int] = 0 # Use block_size equal to the chunk size divided by 32 to leverage multithreading # Set a default minimum value of 16kB if the chunk size is really small snake_case__ : Tuple = max(self.config.chunksize // 32 , 16 << 10 ) snake_case__ : Optional[Any] = ( self.config.encoding_errors if self.config.encoding_errors is not None else 'strict' ) while True: snake_case__ : Optional[int] = f.read(self.config.chunksize ) if not batch: break # Finish current line try: batch += f.readline() except (AttributeError, io.UnsupportedOperation): batch += readline(__UpperCamelCase ) # PyArrow only accepts utf-8 encoded bytes if self.config.encoding != "utf-8": snake_case__ : int = batch.decode(self.config.encoding , errors=__UpperCamelCase ).encode('utf-8' ) try: while True: try: snake_case__ : List[str] = paj.read_json( io.BytesIO(__UpperCamelCase ) , read_options=paj.ReadOptions(block_size=__UpperCamelCase ) ) break except (pa.ArrowInvalid, pa.ArrowNotImplementedError) as e: if ( isinstance(__UpperCamelCase , pa.ArrowInvalid ) and "straddling" not in str(__UpperCamelCase ) or block_size > len(__UpperCamelCase ) ): raise else: # Increase the block size in case it was too small. # The block size will be reset for the next file. logger.debug( F"""Batch of {len(__UpperCamelCase )} bytes couldn't be parsed with block_size={block_size}. Retrying with block_size={block_size * 2}.""" ) block_size *= 2 except pa.ArrowInvalid as e: try: with open( __UpperCamelCase , encoding=self.config.encoding , errors=self.config.encoding_errors ) as f: snake_case__ : Tuple = json.load(__UpperCamelCase ) except json.JSONDecodeError: logger.error(F"""Failed to read file '{file}' with error {type(__UpperCamelCase )}: {e}""" ) raise e # If possible, parse the file as a list of json objects and exit the loop if isinstance(__UpperCamelCase , __UpperCamelCase ): # list is the only sequence type supported in JSON try: snake_case__ : str = set().union(*[row.keys() for row in dataset] ) snake_case__ : Union[str, Any] = {col: [row.get(__UpperCamelCase ) for row in dataset] for col in keys} snake_case__ : List[str] = pa.Table.from_pydict(__UpperCamelCase ) except (pa.ArrowInvalid, AttributeError) as e: logger.error(F"""Failed to read file '{file}' with error {type(__UpperCamelCase )}: {e}""" ) raise ValueError(F"""Not able to read records in the JSON file at {file}.""" ) from None yield file_idx, self._cast_table(__UpperCamelCase ) break else: logger.error(F"""Failed to read file '{file}' with error {type(__UpperCamelCase )}: {e}""" ) raise ValueError( F"""Not able to read records in the JSON file at {file}. """ F"""You should probably indicate the field of the JSON file containing your records. """ F"""This JSON file contain the following fields: {str(list(dataset.keys() ) )}. """ F"""Select the correct one and provide it as `field='XXX'` to the dataset loading method. """ ) from None # Uncomment for debugging (will print the Arrow table size and elements) # logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}") # logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows))) yield (file_idx, batch_idx), self._cast_table(__UpperCamelCase ) batch_idx += 1
699
1
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_sentencepiece_available, is_tf_available, is_tokenizers_available, is_torch_available, ) lowerCAmelCase__ : Any = {'''configuration_xglm''': ['''XGLM_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''XGLMConfig''']} try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase__ : str = ['''XGLMTokenizer'''] try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase__ : Dict = ['''XGLMTokenizerFast'''] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase__ : Optional[int] = [ '''XGLM_PRETRAINED_MODEL_ARCHIVE_LIST''', '''XGLMForCausalLM''', '''XGLMModel''', '''XGLMPreTrainedModel''', ] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase__ : Dict = [ '''FlaxXGLMForCausalLM''', '''FlaxXGLMModel''', '''FlaxXGLMPreTrainedModel''', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase__ : Dict = [ '''TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST''', '''TFXGLMForCausalLM''', '''TFXGLMModel''', '''TFXGLMPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_xglm import XGLM_PRETRAINED_CONFIG_ARCHIVE_MAP, XGLMConfig try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_xglm import XGLMTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_xglm_fast import XGLMTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_xglm import XGLM_PRETRAINED_MODEL_ARCHIVE_LIST, XGLMForCausalLM, XGLMModel, XGLMPreTrainedModel try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_xglm import FlaxXGLMForCausalLM, FlaxXGLMModel, FlaxXGLMPreTrainedModel try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_xglm import ( TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST, TFXGLMForCausalLM, TFXGLMModel, TFXGLMPreTrainedModel, ) else: import sys lowerCAmelCase__ : Tuple = _LazyModule(__name__, globals()['''__file__'''], _import_structure)
699
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_sentencepiece_available, is_tf_available, is_tokenizers_available, is_torch_available, ) lowerCAmelCase__ : Any = {'''configuration_xglm''': ['''XGLM_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''XGLMConfig''']} try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase__ : str = ['''XGLMTokenizer'''] try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase__ : Dict = ['''XGLMTokenizerFast'''] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase__ : Optional[int] = [ '''XGLM_PRETRAINED_MODEL_ARCHIVE_LIST''', '''XGLMForCausalLM''', '''XGLMModel''', '''XGLMPreTrainedModel''', ] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase__ : Dict = [ '''FlaxXGLMForCausalLM''', '''FlaxXGLMModel''', '''FlaxXGLMPreTrainedModel''', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase__ : Dict = [ '''TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST''', '''TFXGLMForCausalLM''', '''TFXGLMModel''', '''TFXGLMPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_xglm import XGLM_PRETRAINED_CONFIG_ARCHIVE_MAP, XGLMConfig try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_xglm import XGLMTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_xglm_fast import XGLMTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_xglm import XGLM_PRETRAINED_MODEL_ARCHIVE_LIST, XGLMForCausalLM, XGLMModel, XGLMPreTrainedModel try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_xglm import FlaxXGLMForCausalLM, FlaxXGLMModel, FlaxXGLMPreTrainedModel try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_xglm import ( TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST, TFXGLMForCausalLM, TFXGLMModel, TFXGLMPreTrainedModel, ) else: import sys lowerCAmelCase__ : Tuple = _LazyModule(__name__, globals()['''__file__'''], _import_structure)
699
1
import argparse from transformers import TaConfig, TaForConditionalGeneration, load_tf_weights_in_ta from transformers.utils import logging logging.set_verbosity_info() def UpperCamelCase__ ( A__ , A__ , A__ ) -> Any: # Initialise PyTorch model snake_case__ : Dict = TaConfig.from_json_file(A__ ) print(F"""Building PyTorch model from configuration: {config}""" ) snake_case__ : Union[str, Any] = TaForConditionalGeneration(A__ ) # Load weights from tf checkpoint load_tf_weights_in_ta(A__ , A__ , A__ ) # Save pytorch-model print(F"""Save PyTorch model to {pytorch_dump_path}""" ) model.save_pretrained(A__ ) if __name__ == "__main__": lowerCAmelCase__ : Dict = argparse.ArgumentParser() # Required parameters parser.add_argument( '''--tf_checkpoint_path''', default=None, type=str, required=True, help='''Path to the TensorFlow checkpoint path.''' ) parser.add_argument( '''--config_file''', default=None, type=str, required=True, help=( '''The config json file corresponding to the pre-trained T5 model. \nThis specifies the model architecture.''' ), ) parser.add_argument( '''--pytorch_dump_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.''' ) lowerCAmelCase__ : Dict = parser.parse_args() convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.config_file, args.pytorch_dump_path)
699
from __future__ import annotations import random # Maximum size of the population. Bigger could be faster but is more memory expensive. lowerCAmelCase__ : Dict = 2_00 # Number of elements selected in every generation of evolution. The selection takes # place from best to worst of that generation and must be smaller than N_POPULATION. lowerCAmelCase__ : List[str] = 50 # Probability that an element of a generation can mutate, changing one of its genes. # This will guarantee that all genes will be used during evolution. lowerCAmelCase__ : List[str] = 0.4 # Just a seed to improve randomness required by the algorithm. random.seed(random.randint(0, 10_00)) def UpperCamelCase__ ( A__ , A__ ) -> tuple[str, float]: snake_case__ : Tuple = len([g for position, g in enumerate(A__ ) if g == main_target[position]] ) return (item, float(A__ )) def UpperCamelCase__ ( A__ , A__ ) -> tuple[str, str]: snake_case__ : str = random.randint(0 , len(A__ ) - 1 ) snake_case__ : int = parent_a[:random_slice] + parent_a[random_slice:] snake_case__ : Any = parent_a[:random_slice] + parent_a[random_slice:] return (child_a, child_a) def UpperCamelCase__ ( A__ , A__ ) -> str: snake_case__ : List[Any] = list(A__ ) if random.uniform(0 , 1 ) < MUTATION_PROBABILITY: snake_case__ : Optional[Any] = random.choice(A__ ) return "".join(A__ ) def UpperCamelCase__ ( A__ , A__ , A__ , ) -> list[str]: snake_case__ : Tuple = [] # Generate more children proportionally to the fitness score. snake_case__ : Optional[Any] = int(parent_a[1] * 100 ) + 1 snake_case__ : str = 10 if child_n >= 10 else child_n for _ in range(A__ ): snake_case__ : Any = population_score[random.randint(0 , A__ )][0] snake_case__ , snake_case__ : int = crossover(parent_a[0] , A__ ) # Append new string to the population list. pop.append(mutate(A__ , A__ ) ) pop.append(mutate(A__ , A__ ) ) return pop def UpperCamelCase__ ( A__ , A__ , A__ = True ) -> tuple[int, int, str]: # Verify if N_POPULATION is bigger than N_SELECTED if N_POPULATION < N_SELECTED: snake_case__ : Union[str, Any] = F"""{N_POPULATION} must be bigger than {N_SELECTED}""" raise ValueError(A__ ) # Verify that the target contains no genes besides the ones inside genes variable. snake_case__ : Tuple = sorted({c for c in target if c not in genes} ) if not_in_genes_list: snake_case__ : int = F"""{not_in_genes_list} is not in genes list, evolution cannot converge""" raise ValueError(A__ ) # Generate random starting population. snake_case__ : Union[str, Any] = [] for _ in range(A__ ): population.append(''.join([random.choice(A__ ) for i in range(len(A__ ) )] ) ) # Just some logs to know what the algorithms is doing. snake_case__ , snake_case__ : str = 0, 0 # This loop will end when we find a perfect match for our target. while True: generation += 1 total_population += len(A__ ) # Random population created. Now it's time to evaluate. # Adding a bit of concurrency can make everything faster, # # import concurrent.futures # population_score: list[tuple[str, float]] = [] # with concurrent.futures.ThreadPoolExecutor( # max_workers=NUM_WORKERS) as executor: # futures = {executor.submit(evaluate, item) for item in population} # concurrent.futures.wait(futures) # population_score = [item.result() for item in futures] # # but with a simple algorithm like this, it will probably be slower. # We just need to call evaluate for every item inside the population. snake_case__ : List[Any] = [evaluate(A__ , A__ ) for item in population] # Check if there is a matching evolution. snake_case__ : int = sorted(A__ , key=lambda A__ : x[1] , reverse=A__ ) if population_score[0][0] == target: return (generation, total_population, population_score[0][0]) # Print the best result every 10 generation. # Just to know that the algorithm is working. if debug and generation % 10 == 0: print( F"""\nGeneration: {generation}""" F"""\nTotal Population:{total_population}""" F"""\nBest score: {population_score[0][1]}""" F"""\nBest string: {population_score[0][0]}""" ) # Flush the old population, keeping some of the best evolutions. # Keeping this avoid regression of evolution. snake_case__ : Optional[int] = population[: int(N_POPULATION / 3 )] population.clear() population.extend(A__ ) # Normalize population score to be between 0 and 1. snake_case__ : str = [ (item, score / len(A__ )) for item, score in population_score ] # This is selection for i in range(A__ ): population.extend(select(population_score[int(A__ )] , A__ , A__ ) ) # Check if the population has already reached the maximum value and if so, # break the cycle. If this check is disabled, the algorithm will take # forever to compute large strings, but will also calculate small strings in # a far fewer generations. if len(A__ ) > N_POPULATION: break if __name__ == "__main__": lowerCAmelCase__ : str = ( '''This is a genetic algorithm to evaluate, combine, evolve, and mutate a string!''' ) lowerCAmelCase__ : Optional[Any] = list( ''' ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklm''' '''nopqrstuvwxyz.,;!?+-*#@^\'èéòà€ù=)(&%$£/\\''' ) lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__ : List[str] = basic(target_str, genes_list) print( F'''\nGeneration: {generation}\nTotal Population: {population}\nTarget: {target}''' )
699
1
def UpperCamelCase__ ( A__ ) -> list: if len(A__ ) <= 1: return [tuple(A__ )] snake_case__ : Dict = [] def generate(A__ , A__ ): if k == 1: res.append(tuple(arr[:] ) ) return generate(k - 1 , A__ ) for i in range(k - 1 ): if k % 2 == 0: # k is even snake_case__ , snake_case__ : Optional[Any] = arr[k - 1], arr[i] else: # k is odd snake_case__ , snake_case__ : Optional[Any] = arr[k - 1], arr[0] generate(k - 1 , A__ ) generate(len(A__ ) , A__ ) return res if __name__ == "__main__": lowerCAmelCase__ : Tuple = input('''Enter numbers separated by a comma:\n''').strip() lowerCAmelCase__ : Dict = [int(item) for item in user_input.split(''',''')] print(heaps(arr))
699
from __future__ import annotations from collections.abc import Iterator from typing import Generic, TypeVar lowerCAmelCase__ : Optional[int] = TypeVar('''T''') class __snake_case ( Generic[T] ): def __init__( self , __UpperCamelCase ) -> Any: '''simple docstring''' snake_case__ : Optional[int] = data snake_case__ : Node[T] | None = None def __str__( self ) -> str: '''simple docstring''' return F"""{self.data}""" class __snake_case ( Generic[T] ): def __init__( self ) -> None: '''simple docstring''' snake_case__ : Node[T] | None = None def __iter__( self ) -> Iterator[T]: '''simple docstring''' snake_case__ : str = self.top while node: yield node.data snake_case__ : Dict = node.next def __str__( self ) -> str: '''simple docstring''' return "->".join([str(__UpperCamelCase ) for item in self] ) def __len__( self ) -> int: '''simple docstring''' return len(tuple(iter(self ) ) ) def __a ( self ) -> bool: '''simple docstring''' return self.top is None def __a ( self , __UpperCamelCase ) -> None: '''simple docstring''' snake_case__ : str = Node(__UpperCamelCase ) if not self.is_empty(): snake_case__ : List[str] = self.top snake_case__ : Tuple = node def __a ( self ) -> T: '''simple docstring''' if self.is_empty(): raise IndexError('pop from empty stack' ) assert isinstance(self.top , __UpperCamelCase ) snake_case__ : List[str] = self.top snake_case__ : Union[str, Any] = self.top.next return pop_node.data def __a ( self ) -> T: '''simple docstring''' if self.is_empty(): raise IndexError('peek from empty stack' ) assert self.top is not None return self.top.data def __a ( self ) -> None: '''simple docstring''' snake_case__ : Any = None if __name__ == "__main__": from doctest import testmod testmod()
699
1
# Copyright 2021 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import argparse from .config import config_command_parser from .config_args import default_config_file, load_config_from_file # noqa: F401 from .default import default_command_parser from .update import update_command_parser def UpperCamelCase__ ( A__=None ) -> int: snake_case__ : Any = argparse.ArgumentParser(add_help=A__ , allow_abbrev=A__ ) # The main config parser snake_case__ : List[Any] = config_command_parser(A__ ) # The subparser to add commands to snake_case__ : Tuple = config_parser.add_subparsers(title='subcommands' , dest='subcommand' ) # Then add other parsers with the parent parser default_command_parser(A__ , parents=[parent_parser] ) update_command_parser(A__ , parents=[parent_parser] ) return config_parser def UpperCamelCase__ ( ) -> List[str]: snake_case__ : Union[str, Any] = get_config_parser() snake_case__ : List[str] = config_parser.parse_args() if not hasattr(A__ , 'func' ): config_parser.print_help() exit(1 ) # Run args.func(A__ ) if __name__ == "__main__": main()
699
from collections import OrderedDict from typing import Mapping from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging lowerCAmelCase__ : Dict = logging.get_logger(__name__) lowerCAmelCase__ : int = { '''sail/poolformer_s12''': '''https://huggingface.co/sail/poolformer_s12/resolve/main/config.json''', # See all PoolFormer models at https://huggingface.co/models?filter=poolformer } class __snake_case ( _lowerCamelCase ): __lowerCamelCase = """poolformer""" def __init__( self , __UpperCamelCase=3 , __UpperCamelCase=16 , __UpperCamelCase=16 , __UpperCamelCase=3 , __UpperCamelCase=4.0 , __UpperCamelCase=[2, 2, 6, 2] , __UpperCamelCase=[64, 128, 320, 512] , __UpperCamelCase=[7, 3, 3, 3] , __UpperCamelCase=[4, 2, 2, 2] , __UpperCamelCase=[2, 1, 1, 1] , __UpperCamelCase=4 , __UpperCamelCase=0.0 , __UpperCamelCase="gelu" , __UpperCamelCase=True , __UpperCamelCase=1E-5 , __UpperCamelCase=0.0_2 , **__UpperCamelCase , ) -> Any: '''simple docstring''' snake_case__ : List[str] = num_channels snake_case__ : Dict = patch_size snake_case__ : Optional[int] = stride snake_case__ : str = padding snake_case__ : List[str] = pool_size snake_case__ : List[Any] = hidden_sizes snake_case__ : List[Any] = mlp_ratio snake_case__ : Union[str, Any] = depths snake_case__ : Dict = patch_sizes snake_case__ : Dict = strides snake_case__ : Dict = num_encoder_blocks snake_case__ : Union[str, Any] = drop_path_rate snake_case__ : List[str] = hidden_act snake_case__ : Optional[Any] = use_layer_scale snake_case__ : int = layer_scale_init_value snake_case__ : Dict = initializer_range super().__init__(**__UpperCamelCase ) class __snake_case ( _lowerCamelCase ): __lowerCamelCase = version.parse("""1.11""" ) @property def __a ( self ) -> Mapping[str, Mapping[int, str]]: '''simple docstring''' return OrderedDict( [ ('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}), ] ) @property def __a ( self ) -> float: '''simple docstring''' return 2E-3
699
1
import random import torch from huggingface_hub import HfApi from diffusers import UNetaDModel lowerCAmelCase__ : List[str] = HfApi() lowerCAmelCase__ : str = {} # fmt: off lowerCAmelCase__ : int = torch.tensor([ -0.75_15, -1.68_83, 0.24_20, 0.03_00, 0.63_47, 1.34_33, -1.17_43, -3.74_67, 1.23_42, -2.24_85, 0.46_36, 0.80_76, -0.79_91, 0.39_69, 0.84_98, 0.91_89, -1.88_87, -3.35_22, 0.76_39, 0.20_40, 0.62_71, -2.71_48, -1.63_16, 3.08_39, 0.31_86, 0.27_21, -0.97_59, -1.24_61, 2.62_57, 1.35_57 ]) lowerCAmelCase__ : Dict = torch.tensor([ -2.36_39, -2.53_44, 0.00_54, -0.66_74, 1.59_90, 1.01_58, 0.31_24, -2.14_36, 1.87_95, -2.54_29, -0.15_66, -0.39_73, 1.24_90, 2.64_47, 1.22_83, -0.52_08, -2.81_54, -3.51_19, 2.38_38, 1.20_33, 1.72_01, -2.12_56, -1.45_76, 2.79_48, 2.42_04, -0.97_52, -1.25_46, 0.80_27, 3.27_58, 3.13_65 ]) lowerCAmelCase__ : Dict = torch.tensor([ -0.65_31, -0.68_91, -0.31_72, -0.53_75, -0.91_40, -0.53_67, -0.11_75, -0.78_69, -0.38_08, -0.45_13, -0.20_98, -0.00_83, 0.31_83, 0.51_40, 0.22_47, -0.13_04, -0.13_02, -0.28_02, -0.20_84, -0.20_25, -0.49_67, -0.48_73, -0.08_61, 0.69_25, 0.02_50, 0.12_90, -0.15_43, 0.63_16, 1.04_60, 1.49_43 ]) lowerCAmelCase__ : List[str] = torch.tensor([ 0.09_11, 0.11_07, 0.01_82, 0.04_35, -0.08_05, -0.06_08, 0.03_81, 0.21_72, -0.02_80, 0.13_27, -0.02_99, -0.02_55, -0.00_50, -0.11_70, -0.10_46, 0.03_09, 0.13_67, 0.17_28, -0.05_33, -0.07_48, -0.05_34, 0.16_24, 0.03_84, -0.18_05, -0.07_07, 0.06_42, 0.02_20, -0.01_34, -0.13_33, -0.15_05 ]) lowerCAmelCase__ : Union[str, Any] = torch.tensor([ 0.13_21, 0.13_37, 0.04_40, 0.06_22, -0.05_91, -0.03_70, 0.05_03, 0.21_33, -0.01_77, 0.14_15, -0.01_16, -0.01_12, 0.00_44, -0.09_80, -0.07_89, 0.03_95, 0.15_02, 0.17_85, -0.04_88, -0.05_14, -0.04_04, 0.15_39, 0.04_54, -0.15_59, -0.06_65, 0.06_59, 0.03_83, -0.00_05, -0.12_66, -0.13_86 ]) lowerCAmelCase__ : List[Any] = torch.tensor([ 0.11_54, 0.12_18, 0.03_07, 0.05_26, -0.07_11, -0.05_41, 0.03_66, 0.20_78, -0.02_67, 0.13_17, -0.02_26, -0.01_93, -0.00_14, -0.10_55, -0.09_02, 0.03_30, 0.13_91, 0.17_09, -0.05_62, -0.06_93, -0.05_60, 0.14_82, 0.03_81, -0.16_83, -0.06_81, 0.06_61, 0.03_31, -0.00_46, -0.12_68, -0.14_31 ]) lowerCAmelCase__ : Optional[Any] = torch.tensor([ 0.11_92, 0.12_40, 0.04_14, 0.06_06, -0.05_57, -0.04_12, 0.04_30, 0.20_42, -0.02_00, 0.13_85, -0.01_15, -0.01_32, 0.00_17, -0.09_65, -0.08_02, 0.03_98, 0.14_33, 0.17_47, -0.04_58, -0.05_33, -0.04_07, 0.15_45, 0.04_19, -0.15_74, -0.06_45, 0.06_26, 0.03_41, -0.00_10, -0.11_99, -0.13_90 ]) lowerCAmelCase__ : List[str] = torch.tensor([ 0.10_75, 0.10_74, 0.02_05, 0.04_31, -0.07_74, -0.06_07, 0.02_98, 0.20_42, -0.03_20, 0.12_67, -0.02_81, -0.02_50, -0.00_64, -0.10_91, -0.09_46, 0.02_90, 0.13_28, 0.16_50, -0.05_80, -0.07_38, -0.05_86, 0.14_40, 0.03_37, -0.17_46, -0.07_12, 0.06_05, 0.02_50, -0.00_99, -0.13_16, -0.14_73 ]) lowerCAmelCase__ : List[str] = torch.tensor([ -1.45_72, -2.04_81, -0.04_14, -0.60_05, 1.41_36, 0.58_48, 0.40_28, -2.73_30, 1.22_12, -2.12_28, 0.21_55, 0.40_39, 0.76_62, 2.05_35, 0.74_77, -0.32_43, -2.17_58, -2.76_48, 1.69_47, 0.70_26, 1.23_38, -1.60_78, -0.86_82, 2.28_10, 1.85_74, -0.57_18, -0.55_86, -0.01_86, 2.34_15, 2.12_51]) lowerCAmelCase__ : List[Any] = torch.tensor([ -1.36_90, -1.97_20, -0.40_90, -0.69_66, 1.46_60, 0.99_38, -0.13_85, -2.73_24, 0.77_36, -1.89_17, 0.29_23, 0.42_93, 0.16_93, 1.41_12, 1.18_87, -0.31_81, -2.21_60, -2.63_81, 1.31_70, 0.81_63, 0.92_40, -1.65_44, -0.60_99, 2.52_59, 1.64_30, -0.90_90, -0.93_92, -0.01_26, 2.42_68, 2.32_66 ]) lowerCAmelCase__ : Tuple = torch.tensor([ -1.35_25, -1.96_28, -0.39_56, -0.68_60, 1.46_64, 1.00_14, -0.12_59, -2.72_12, 0.77_72, -1.88_11, 0.29_96, 0.43_88, 0.17_04, 1.40_29, 1.17_01, -0.30_27, -2.20_53, -2.62_87, 1.33_50, 0.81_31, 0.92_74, -1.62_92, -0.60_98, 2.51_31, 1.65_05, -0.89_58, -0.92_98, -0.01_51, 2.42_57, 2.33_55 ]) lowerCAmelCase__ : List[str] = torch.tensor([ -2.05_85, -2.78_97, -0.28_50, -0.89_40, 1.90_52, 0.57_02, 0.63_45, -3.89_59, 1.59_32, -3.23_19, 0.19_74, 0.02_87, 1.75_66, 2.65_43, 0.83_87, -0.53_51, -3.27_36, -4.33_75, 2.90_29, 1.63_90, 1.46_40, -2.17_01, -1.90_13, 2.93_41, 3.49_81, -0.62_55, -1.16_44, -0.15_91, 3.70_97, 3.20_66 ]) lowerCAmelCase__ : Dict = torch.tensor([ -2.31_39, -2.55_94, -0.01_97, -0.67_85, 1.70_01, 1.16_06, 0.30_75, -2.17_40, 1.80_71, -2.56_30, -0.09_26, -0.38_11, 1.21_16, 2.62_46, 1.27_31, -0.53_98, -2.81_53, -3.61_40, 2.38_93, 1.32_62, 1.62_58, -2.18_56, -1.32_67, 2.83_95, 2.37_79, -1.06_23, -1.24_68, 0.89_59, 3.33_67, 3.22_43 ]) lowerCAmelCase__ : Dict = torch.tensor([ -2.06_28, -2.76_67, -0.20_89, -0.82_63, 2.05_39, 0.59_92, 0.64_95, -3.83_36, 1.60_25, -3.28_17, 0.17_21, -0.06_33, 1.75_16, 2.70_39, 0.81_00, -0.59_08, -3.21_13, -4.43_43, 2.92_57, 1.36_32, 1.55_62, -2.14_89, -1.98_94, 3.05_60, 3.33_96, -0.73_28, -1.04_17, 0.03_83, 3.70_93, 3.23_43 ]) lowerCAmelCase__ : Any = torch.tensor([ -1.45_74, -2.05_69, -0.04_73, -0.61_17, 1.40_18, 0.57_69, 0.41_29, -2.73_44, 1.22_41, -2.13_97, 0.20_00, 0.39_37, 0.76_16, 2.04_53, 0.73_24, -0.33_91, -2.17_46, -2.77_44, 1.69_63, 0.69_21, 1.21_87, -1.61_72, -0.88_77, 2.24_39, 1.84_71, -0.58_39, -0.56_05, -0.04_64, 2.32_50, 2.12_19 ]) # fmt: on lowerCAmelCase__ : Any = api.list_models(filter='''diffusers''') for mod in models: if "google" in mod.author or mod.modelId == "CompVis/ldm-celebahq-256": lowerCAmelCase__ : List[str] = '''/home/patrick/google_checkpoints/''' + mod.modelId.split('''/''')[-1] print(F'''Started running {mod.modelId}!!!''') if mod.modelId.startswith('''CompVis'''): lowerCAmelCase__ : int = UNetaDModel.from_pretrained(local_checkpoint, subfolder='''unet''') else: lowerCAmelCase__ : str = UNetaDModel.from_pretrained(local_checkpoint) torch.manual_seed(0) random.seed(0) lowerCAmelCase__ : Any = torch.randn(1, model.config.in_channels, model.config.sample_size, model.config.sample_size) lowerCAmelCase__ : List[str] = torch.tensor([10] * noise.shape[0]) with torch.no_grad(): lowerCAmelCase__ : int = model(noise, time_step).sample assert torch.allclose( logits[0, 0, 0, :30], results['''_'''.join('''_'''.join(mod.modelId.split('''/''')).split('''-'''))], atol=1E-3 ) print(F'''{mod.modelId} has passed successfully!!!''')
699
import numpy as np import qiskit def UpperCamelCase__ ( A__ = 8 , A__ = None ) -> str: snake_case__ : Optional[int] = np.random.default_rng(seed=A__ ) # Roughly 25% of the qubits will contribute to the key. # So we take more than we need. snake_case__ : Tuple = 6 * key_len # Measurement basis for Alice's qubits. snake_case__ : Tuple = rng.integers(2 , size=A__ ) # The set of states Alice will prepare. snake_case__ : List[str] = rng.integers(2 , size=A__ ) # Measurement basis for Bob's qubits. snake_case__ : List[Any] = rng.integers(2 , size=A__ ) # Quantum Circuit to simulate BB84 snake_case__ : Any = qiskit.QuantumCircuit(A__ , name='BB84' ) # Alice prepares her qubits according to rules above. for index, _ in enumerate(A__ ): if alice_state[index] == 1: bbaa_circ.x(A__ ) if alice_basis[index] == 1: bbaa_circ.h(A__ ) bbaa_circ.barrier() # Bob measures the received qubits according to rules above. for index, _ in enumerate(A__ ): if bob_basis[index] == 1: bbaa_circ.h(A__ ) bbaa_circ.barrier() bbaa_circ.measure_all() # Simulate the quantum circuit. snake_case__ : List[str] = qiskit.Aer.get_backend('aer_simulator' ) # We only need to run one shot because the key is unique. # Multiple shots will produce the same key. snake_case__ : Optional[Any] = qiskit.execute(A__ , A__ , shots=1 , seed_simulator=A__ ) # Returns the result of measurement. snake_case__ : Union[str, Any] = job.result().get_counts(A__ ).most_frequent() # Extracting the generated key from the simulation results. # Only keep measurement results where Alice and Bob chose the same basis. snake_case__ : Optional[Any] = ''.join( [ result_bit for alice_basis_bit, bob_basis_bit, result_bit in zip( A__ , A__ , A__ ) if alice_basis_bit == bob_basis_bit ] ) # Get final key. Pad with 0 if too short, otherwise truncate. snake_case__ : Tuple = gen_key[:key_len] if len(A__ ) >= key_len else gen_key.ljust(A__ , '0' ) return key if __name__ == "__main__": print(F'''The generated key is : {bbaa(8, seed=0)}''') from doctest import testmod testmod()
699
1
import argparse import re import requests import torch # git clone https://github.com/salesforce/BLIP.git from models.blip import blip_decoder from models.blip_itm import blip_itm from models.blip_vqa import blip_vqa from PIL import Image from torchvision import transforms from torchvision.transforms.functional import InterpolationMode from transformers import ( BertTokenizer, BlipConfig, BlipForConditionalGeneration, BlipForImageTextRetrieval, BlipForQuestionAnswering, ) def UpperCamelCase__ ( A__ , A__ ) -> Any: snake_case__ : List[Any] = 'https://storage.googleapis.com/sfr-vision-language-research/BLIP/demo.jpg' snake_case__ : str = Image.open(requests.get(A__ , stream=A__ ).raw ).convert('RGB' ) snake_case__ : Union[str, Any] = transforms.Compose( [ transforms.Resize((image_size, image_size) , interpolation=InterpolationMode.BICUBIC ), transforms.ToTensor(), transforms.Normalize((0.4_8_1_4_5_4_6_6, 0.4_5_7_8_2_7_5, 0.4_0_8_2_1_0_7_3) , (0.2_6_8_6_2_9_5_4, 0.2_6_1_3_0_2_5_8, 0.2_7_5_7_7_7_1_1) ), ] ) snake_case__ : Union[str, Any] = transform(A__ ).unsqueeze(0 ).to(A__ ) return image def UpperCamelCase__ ( A__ ) -> Tuple: if "visual_encoder" in key: snake_case__ : str = re.sub('visual_encoder*' , 'vision_model.encoder' , A__ ) if "blocks" in key: snake_case__ : List[str] = re.sub(r'blocks' , 'layers' , A__ ) if "attn" in key: snake_case__ : Tuple = re.sub(r'attn' , 'self_attn' , A__ ) if "norm1" in key: snake_case__ : Tuple = re.sub(r'norm1' , 'layer_norm1' , A__ ) if "norm2" in key: snake_case__ : Dict = re.sub(r'norm2' , 'layer_norm2' , A__ ) if "encoder.norm" in key: snake_case__ : Tuple = re.sub(r'encoder.norm' , 'post_layernorm' , A__ ) if "encoder.patch_embed.proj" in key: snake_case__ : Dict = re.sub(r'encoder.patch_embed.proj' , 'embeddings.patch_embedding' , A__ ) if "encoder.pos_embed" in key: snake_case__ : List[str] = re.sub(r'encoder.pos_embed' , 'embeddings.position_embedding' , A__ ) if "encoder.cls_token" in key: snake_case__ : Union[str, Any] = re.sub(r'encoder.cls_token' , 'embeddings.class_embedding' , A__ ) if "self_attn" in key: snake_case__ : int = re.sub(r'self_attn.proj' , 'self_attn.projection' , A__ ) return key @torch.no_grad() def UpperCamelCase__ ( A__ , A__=None ) -> int: if config_path is not None: snake_case__ : Optional[Any] = BlipConfig.from_pretrained(A__ ) else: snake_case__ : str = BlipConfig(projection_dim=512 , text_config={} , vision_config={} ) snake_case__ : Dict = BlipForConditionalGeneration(A__ ).eval() snake_case__ : Tuple = 'https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_base_capfilt_large.pth' snake_case__ : Tuple = blip_decoder(pretrained=A__ , image_size=384 , vit='base' ) snake_case__ : Any = pt_model.eval() snake_case__ : Optional[int] = pt_model.state_dict() for key in modified_state_dict.copy(): snake_case__ : int = modified_state_dict.pop(A__ ) snake_case__ : Union[str, Any] = rename_key(A__ ) snake_case__ : Any = value hf_model.load_state_dict(A__ ) snake_case__ : Optional[Any] = 384 snake_case__ : List[str] = load_demo_image(image_size=A__ , device='cpu' ) snake_case__ : Any = BertTokenizer.from_pretrained('bert-base-uncased' ) snake_case__ : Optional[Any] = tokenizer(['a picture of'] ).input_ids snake_case__ : Tuple = hf_model.generate(A__ , A__ ) assert out[0].tolist() == [3_0522, 1037, 3861, 1997, 1037, 2450, 3564, 2006, 1996, 3509, 2007, 2014, 3899, 102] snake_case__ : int = hf_model.generate(A__ ) assert out[0].tolist() == [3_0522, 1037, 2450, 3564, 2006, 1996, 3509, 2007, 2014, 3899, 102] if pytorch_dump_folder_path is not None: hf_model.save_pretrained(A__ ) # model_url = 'https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_vqa.pth' snake_case__ : Optional[int] = ( 'https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_base_vqa_capfilt_large.pth' ) snake_case__ : str = blip_vqa(pretrained=A__ , image_size=A__ , vit='base' ) vqa_model.eval() snake_case__ : Union[str, Any] = vqa_model.state_dict() for key in modified_state_dict.copy(): snake_case__ : Union[str, Any] = modified_state_dict.pop(A__ ) snake_case__ : Union[str, Any] = rename_key(A__ ) snake_case__ : int = value snake_case__ : str = BlipForQuestionAnswering(A__ ) hf_vqa_model.load_state_dict(A__ ) snake_case__ : Dict = ['How many dogs are in this image?'] snake_case__ : List[str] = tokenizer(A__ , return_tensors='pt' ).input_ids snake_case__ : Any = hf_vqa_model.generate(A__ , A__ ) print(tokenizer.decode(answer[0] ) ) assert tokenizer.decode(answer[0] ) == "[UNK] 1 [SEP]" if pytorch_dump_folder_path is not None: hf_vqa_model.save_pretrained(pytorch_dump_folder_path + '_vqa' ) snake_case__ : int = 'https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_base_retrieval_coco.pth' snake_case__ : Optional[Any] = blip_itm(pretrained=A__ , image_size=A__ , vit='base' ) itm_model.eval() snake_case__ : Optional[Any] = itm_model.state_dict() for key in modified_state_dict.copy(): snake_case__ : str = modified_state_dict.pop(A__ ) snake_case__ : List[Any] = rename_key(A__ ) snake_case__ : str = value snake_case__ : int = BlipForImageTextRetrieval(A__ ) snake_case__ : Optional[Any] = ['A picture of a woman with a dog sitting in a beach'] snake_case__ : List[Any] = tokenizer( A__ , return_tensors='pt' , padding='max_length' , truncation=A__ , max_length=35 , ).input_ids hf_itm_model.load_state_dict(A__ ) hf_itm_model.eval() snake_case__ : Optional[Any] = hf_itm_model(A__ , A__ , use_itm_head=A__ ) snake_case__ : str = hf_itm_model(A__ , A__ , use_itm_head=A__ ) assert out[0].item() == 0.2_1_1_0_6_8_7_4_9_4_2_7_7_9_5_4 assert torch.nn.functional.softmax(out_itm[0] , dim=1 )[:, 1].item() == 0.4_5_6_9_8_8_4_5_3_8_6_5_0_5_1_2_7 if pytorch_dump_folder_path is not None: hf_itm_model.save_pretrained(pytorch_dump_folder_path + '_itm' ) if __name__ == "__main__": lowerCAmelCase__ : Optional[int] = argparse.ArgumentParser() parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''') parser.add_argument('''--config_path''', default=None, type=str, help='''Path to hf config.json of model to convert''') lowerCAmelCase__ : str = parser.parse_args() convert_blip_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path)
699
def UpperCamelCase__ ( A__ , A__ , A__ ) -> int: if exponent == 1: return base if exponent % 2 == 0: snake_case__ : Dict = _modexpt(A__ , exponent // 2 , A__ ) % modulo_value return (x * x) % modulo_value else: return (base * _modexpt(A__ , exponent - 1 , A__ )) % modulo_value def UpperCamelCase__ ( A__ = 1777 , A__ = 1855 , A__ = 8 ) -> int: snake_case__ : Tuple = base for _ in range(1 , A__ ): snake_case__ : Any = _modexpt(A__ , A__ , 10**digits ) return result if __name__ == "__main__": print(F'''{solution() = }''')
699
1
from typing import List import jiwer import jiwer.transforms as tr from packaging import version import datasets from datasets.config import PY_VERSION if PY_VERSION < version.parse('''3.8'''): import importlib_metadata else: import importlib.metadata as importlib_metadata lowerCAmelCase__ : List[Any] = '''''' if version.parse(importlib_metadata.version('''jiwer''')) < version.parse('''2.3.0'''): class __snake_case ( tr.AbstractTransform ): def __init__( self , __UpperCamelCase = " " ) -> Optional[Any]: '''simple docstring''' snake_case__ : Union[str, Any] = sentence_delimiter def __a ( self , __UpperCamelCase ) -> Dict: '''simple docstring''' return list(__UpperCamelCase ) def __a ( self , __UpperCamelCase ) -> List[Any]: '''simple docstring''' snake_case__ : List[str] = [] for sent_idx, sentence in enumerate(__UpperCamelCase ): chars.extend(self.process_string(__UpperCamelCase ) ) if self.sentence_delimiter is not None and self.sentence_delimiter != "" and sent_idx < len(__UpperCamelCase ) - 1: chars.append(self.sentence_delimiter ) return chars lowerCAmelCase__ : List[Any] = tr.Compose( [tr.RemoveMultipleSpaces(), tr.Strip(), SentencesToListOfCharacters(SENTENCE_DELIMITER)] ) else: lowerCAmelCase__ : List[Any] = tr.Compose( [ tr.RemoveMultipleSpaces(), tr.Strip(), tr.ReduceToSingleSentence(SENTENCE_DELIMITER), tr.ReduceToListOfListOfChars(), ] ) lowerCAmelCase__ : List[str] = '''\ @inproceedings{inproceedings, author = {Morris, Andrew and Maier, Viktoria and Green, Phil}, year = {2004}, month = {01}, pages = {}, title = {From WER and RIL to MER and WIL: improved evaluation measures for connected speech recognition.} } ''' lowerCAmelCase__ : List[Any] = '''\ Character error rate (CER) is a common metric of the performance of an automatic speech recognition system. CER is similar to Word Error Rate (WER), but operates on character instead of word. Please refer to docs of WER for further information. Character error rate can be computed as: CER = (S + D + I) / N = (S + D + I) / (S + D + C) where S is the number of substitutions, D is the number of deletions, I is the number of insertions, C is the number of correct characters, N is the number of characters in the reference (N=S+D+C). CER\'s output is not always a number between 0 and 1, in particular when there is a high number of insertions. This value is often associated to the percentage of characters that were incorrectly predicted. The lower the value, the better the performance of the ASR system with a CER of 0 being a perfect score. ''' lowerCAmelCase__ : int = ''' Computes CER score of transcribed segments against references. Args: references: list of references for each speech input. predictions: list of transcribtions to score. concatenate_texts: Whether or not to concatenate sentences before evaluation, set to True for more accurate result. Returns: (float): the character error rate Examples: >>> predictions = ["this is the prediction", "there is an other sample"] >>> references = ["this is the reference", "there is another one"] >>> cer = datasets.load_metric("cer") >>> cer_score = cer.compute(predictions=predictions, references=references) >>> print(cer_score) 0.34146341463414637 ''' @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION ,_KWARGS_DESCRIPTION ) class __snake_case ( datasets.Metric ): def __a ( self ) -> Any: '''simple docstring''' return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { 'predictions': datasets.Value('string' , id='sequence' ), 'references': datasets.Value('string' , id='sequence' ), } ) , codebase_urls=['https://github.com/jitsi/jiwer/'] , reference_urls=[ 'https://en.wikipedia.org/wiki/Word_error_rate', 'https://sites.google.com/site/textdigitisation/qualitymeasures/computingerrorrates', ] , ) def __a ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase=False ) -> Optional[Any]: '''simple docstring''' if concatenate_texts: return jiwer.compute_measures( __UpperCamelCase , __UpperCamelCase , truth_transform=__UpperCamelCase , hypothesis_transform=__UpperCamelCase , )["wer"] snake_case__ : Optional[Any] = 0 snake_case__ : List[Any] = 0 for prediction, reference in zip(__UpperCamelCase , __UpperCamelCase ): snake_case__ : List[str] = jiwer.compute_measures( __UpperCamelCase , __UpperCamelCase , truth_transform=__UpperCamelCase , hypothesis_transform=__UpperCamelCase , ) incorrect += measures["substitutions"] + measures["deletions"] + measures["insertions"] total += measures["substitutions"] + measures["deletions"] + measures["hits"] return incorrect / total
699
# tests directory-specific settings - this file is run automatically # by pytest before any tests are run import sys import warnings from os.path import abspath, dirname, join # allow having multiple repository checkouts and not needing to remember to rerun # 'pip install -e .[dev]' when switching between checkouts and running tests. lowerCAmelCase__ : Tuple = abspath(join(dirname(dirname(__file__)), '''src''')) sys.path.insert(1, git_repo_path) # silence FutureWarning warnings in tests since often we can't act on them until # they become normal warnings - i.e. the tests still need to test the current functionality warnings.simplefilter(action='''ignore''', category=FutureWarning) def UpperCamelCase__ ( A__ ) -> Optional[Any]: from diffusers.utils.testing_utils import pytest_addoption_shared pytest_addoption_shared(A__ ) def UpperCamelCase__ ( A__ ) -> Optional[Any]: from diffusers.utils.testing_utils import pytest_terminal_summary_main snake_case__ : Union[str, Any] = terminalreporter.config.getoption('--make-reports' ) if make_reports: pytest_terminal_summary_main(A__ , id=A__ )
699
1
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available, is_vision_available, ) lowerCAmelCase__ : Any = { '''configuration_blip''': [ '''BLIP_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''BlipConfig''', '''BlipTextConfig''', '''BlipVisionConfig''', ], '''processing_blip''': ['''BlipProcessor'''], } try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase__ : Optional[int] = ['''BlipImageProcessor'''] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase__ : Tuple = [ '''BLIP_PRETRAINED_MODEL_ARCHIVE_LIST''', '''BlipModel''', '''BlipPreTrainedModel''', '''BlipForConditionalGeneration''', '''BlipForQuestionAnswering''', '''BlipVisionModel''', '''BlipTextModel''', '''BlipForImageTextRetrieval''', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase__ : Union[str, Any] = [ '''TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST''', '''TFBlipModel''', '''TFBlipPreTrainedModel''', '''TFBlipForConditionalGeneration''', '''TFBlipForQuestionAnswering''', '''TFBlipVisionModel''', '''TFBlipTextModel''', '''TFBlipForImageTextRetrieval''', ] if TYPE_CHECKING: from .configuration_blip import BLIP_PRETRAINED_CONFIG_ARCHIVE_MAP, BlipConfig, BlipTextConfig, BlipVisionConfig from .processing_blip import BlipProcessor try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .image_processing_blip import BlipImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_blip import ( BLIP_PRETRAINED_MODEL_ARCHIVE_LIST, BlipForConditionalGeneration, BlipForImageTextRetrieval, BlipForQuestionAnswering, BlipModel, BlipPreTrainedModel, BlipTextModel, BlipVisionModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_blip import ( TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST, TFBlipForConditionalGeneration, TFBlipForImageTextRetrieval, TFBlipForQuestionAnswering, TFBlipModel, TFBlipPreTrainedModel, TFBlipTextModel, TFBlipVisionModel, ) else: import sys lowerCAmelCase__ : int = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
699
def UpperCamelCase__ ( A__ ) -> list[int]: if length <= 0 or not isinstance(A__ , A__ ): raise ValueError('Length must be a positive integer.' ) return [n * (2 * n - 1) for n in range(A__ )] if __name__ == "__main__": print(hexagonal_numbers(length=5)) print(hexagonal_numbers(length=10))
699
1
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_tokenizers_available, is_torch_available, ) lowerCAmelCase__ : Dict = { '''configuration_funnel''': ['''FUNNEL_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''FunnelConfig'''], '''convert_funnel_original_tf_checkpoint_to_pytorch''': [], '''tokenization_funnel''': ['''FunnelTokenizer'''], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase__ : Any = ['''FunnelTokenizerFast'''] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase__ : str = [ '''FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST''', '''FunnelBaseModel''', '''FunnelForMaskedLM''', '''FunnelForMultipleChoice''', '''FunnelForPreTraining''', '''FunnelForQuestionAnswering''', '''FunnelForSequenceClassification''', '''FunnelForTokenClassification''', '''FunnelModel''', '''FunnelPreTrainedModel''', '''load_tf_weights_in_funnel''', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase__ : Optional[Any] = [ '''TF_FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST''', '''TFFunnelBaseModel''', '''TFFunnelForMaskedLM''', '''TFFunnelForMultipleChoice''', '''TFFunnelForPreTraining''', '''TFFunnelForQuestionAnswering''', '''TFFunnelForSequenceClassification''', '''TFFunnelForTokenClassification''', '''TFFunnelModel''', '''TFFunnelPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_funnel import FUNNEL_PRETRAINED_CONFIG_ARCHIVE_MAP, FunnelConfig from .tokenization_funnel import FunnelTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_funnel_fast import FunnelTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_funnel import ( FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST, FunnelBaseModel, FunnelForMaskedLM, FunnelForMultipleChoice, FunnelForPreTraining, FunnelForQuestionAnswering, FunnelForSequenceClassification, FunnelForTokenClassification, FunnelModel, FunnelPreTrainedModel, load_tf_weights_in_funnel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_funnel import ( TF_FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST, TFFunnelBaseModel, TFFunnelForMaskedLM, TFFunnelForMultipleChoice, TFFunnelForPreTraining, TFFunnelForQuestionAnswering, TFFunnelForSequenceClassification, TFFunnelForTokenClassification, TFFunnelModel, TFFunnelPreTrainedModel, ) else: import sys lowerCAmelCase__ : Union[str, Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
699
import argparse import json from collections import OrderedDict from pathlib import Path import requests import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import ( ConditionalDetrConfig, ConditionalDetrForObjectDetection, ConditionalDetrForSegmentation, ConditionalDetrImageProcessor, ) from transformers.utils import logging logging.set_verbosity_info() lowerCAmelCase__ : Dict = logging.get_logger(__name__) # here we list all keys to be renamed (original name on the left, our name on the right) lowerCAmelCase__ : Optional[Any] = [] for i in range(6): # encoder layers: output projection, 2 feedforward neural networks and 2 layernorms rename_keys.append( (F'''transformer.encoder.layers.{i}.self_attn.out_proj.weight''', F'''encoder.layers.{i}.self_attn.out_proj.weight''') ) rename_keys.append( (F'''transformer.encoder.layers.{i}.self_attn.out_proj.bias''', F'''encoder.layers.{i}.self_attn.out_proj.bias''') ) rename_keys.append((F'''transformer.encoder.layers.{i}.linear1.weight''', F'''encoder.layers.{i}.fc1.weight''')) rename_keys.append((F'''transformer.encoder.layers.{i}.linear1.bias''', F'''encoder.layers.{i}.fc1.bias''')) rename_keys.append((F'''transformer.encoder.layers.{i}.linear2.weight''', F'''encoder.layers.{i}.fc2.weight''')) rename_keys.append((F'''transformer.encoder.layers.{i}.linear2.bias''', F'''encoder.layers.{i}.fc2.bias''')) rename_keys.append( (F'''transformer.encoder.layers.{i}.norm1.weight''', F'''encoder.layers.{i}.self_attn_layer_norm.weight''') ) rename_keys.append((F'''transformer.encoder.layers.{i}.norm1.bias''', F'''encoder.layers.{i}.self_attn_layer_norm.bias''')) rename_keys.append((F'''transformer.encoder.layers.{i}.norm2.weight''', F'''encoder.layers.{i}.final_layer_norm.weight''')) rename_keys.append((F'''transformer.encoder.layers.{i}.norm2.bias''', F'''encoder.layers.{i}.final_layer_norm.bias''')) # decoder layers: 2 times output projection, 2 feedforward neural networks and 3 layernorms rename_keys.append( (F'''transformer.decoder.layers.{i}.self_attn.out_proj.weight''', F'''decoder.layers.{i}.self_attn.out_proj.weight''') ) rename_keys.append( (F'''transformer.decoder.layers.{i}.self_attn.out_proj.bias''', F'''decoder.layers.{i}.self_attn.out_proj.bias''') ) rename_keys.append( ( F'''transformer.decoder.layers.{i}.cross_attn.out_proj.weight''', F'''decoder.layers.{i}.encoder_attn.out_proj.weight''', ) ) rename_keys.append( ( F'''transformer.decoder.layers.{i}.cross_attn.out_proj.bias''', F'''decoder.layers.{i}.encoder_attn.out_proj.bias''', ) ) rename_keys.append((F'''transformer.decoder.layers.{i}.linear1.weight''', F'''decoder.layers.{i}.fc1.weight''')) rename_keys.append((F'''transformer.decoder.layers.{i}.linear1.bias''', F'''decoder.layers.{i}.fc1.bias''')) rename_keys.append((F'''transformer.decoder.layers.{i}.linear2.weight''', F'''decoder.layers.{i}.fc2.weight''')) rename_keys.append((F'''transformer.decoder.layers.{i}.linear2.bias''', F'''decoder.layers.{i}.fc2.bias''')) rename_keys.append( (F'''transformer.decoder.layers.{i}.norm1.weight''', F'''decoder.layers.{i}.self_attn_layer_norm.weight''') ) rename_keys.append((F'''transformer.decoder.layers.{i}.norm1.bias''', F'''decoder.layers.{i}.self_attn_layer_norm.bias''')) rename_keys.append( (F'''transformer.decoder.layers.{i}.norm2.weight''', F'''decoder.layers.{i}.encoder_attn_layer_norm.weight''') ) rename_keys.append( (F'''transformer.decoder.layers.{i}.norm2.bias''', F'''decoder.layers.{i}.encoder_attn_layer_norm.bias''') ) rename_keys.append((F'''transformer.decoder.layers.{i}.norm3.weight''', F'''decoder.layers.{i}.final_layer_norm.weight''')) rename_keys.append((F'''transformer.decoder.layers.{i}.norm3.bias''', F'''decoder.layers.{i}.final_layer_norm.bias''')) # q, k, v projections in self/cross-attention in decoder for conditional DETR rename_keys.append( (F'''transformer.decoder.layers.{i}.sa_qcontent_proj.weight''', F'''decoder.layers.{i}.sa_qcontent_proj.weight''') ) rename_keys.append( (F'''transformer.decoder.layers.{i}.sa_kcontent_proj.weight''', F'''decoder.layers.{i}.sa_kcontent_proj.weight''') ) rename_keys.append( (F'''transformer.decoder.layers.{i}.sa_qpos_proj.weight''', F'''decoder.layers.{i}.sa_qpos_proj.weight''') ) rename_keys.append( (F'''transformer.decoder.layers.{i}.sa_kpos_proj.weight''', F'''decoder.layers.{i}.sa_kpos_proj.weight''') ) rename_keys.append((F'''transformer.decoder.layers.{i}.sa_v_proj.weight''', F'''decoder.layers.{i}.sa_v_proj.weight''')) rename_keys.append( (F'''transformer.decoder.layers.{i}.ca_qcontent_proj.weight''', F'''decoder.layers.{i}.ca_qcontent_proj.weight''') ) # rename_keys.append((f"transformer.decoder.layers.{i}.ca_qpos_proj.weight", f"decoder.layers.{i}.ca_qpos_proj.weight")) rename_keys.append( (F'''transformer.decoder.layers.{i}.ca_kcontent_proj.weight''', F'''decoder.layers.{i}.ca_kcontent_proj.weight''') ) rename_keys.append( (F'''transformer.decoder.layers.{i}.ca_kpos_proj.weight''', F'''decoder.layers.{i}.ca_kpos_proj.weight''') ) rename_keys.append((F'''transformer.decoder.layers.{i}.ca_v_proj.weight''', F'''decoder.layers.{i}.ca_v_proj.weight''')) rename_keys.append( (F'''transformer.decoder.layers.{i}.ca_qpos_sine_proj.weight''', F'''decoder.layers.{i}.ca_qpos_sine_proj.weight''') ) rename_keys.append( (F'''transformer.decoder.layers.{i}.sa_qcontent_proj.bias''', F'''decoder.layers.{i}.sa_qcontent_proj.bias''') ) rename_keys.append( (F'''transformer.decoder.layers.{i}.sa_kcontent_proj.bias''', F'''decoder.layers.{i}.sa_kcontent_proj.bias''') ) rename_keys.append((F'''transformer.decoder.layers.{i}.sa_qpos_proj.bias''', F'''decoder.layers.{i}.sa_qpos_proj.bias''')) rename_keys.append((F'''transformer.decoder.layers.{i}.sa_kpos_proj.bias''', F'''decoder.layers.{i}.sa_kpos_proj.bias''')) rename_keys.append((F'''transformer.decoder.layers.{i}.sa_v_proj.bias''', F'''decoder.layers.{i}.sa_v_proj.bias''')) rename_keys.append( (F'''transformer.decoder.layers.{i}.ca_qcontent_proj.bias''', F'''decoder.layers.{i}.ca_qcontent_proj.bias''') ) # rename_keys.append((f"transformer.decoder.layers.{i}.ca_qpos_proj.bias", f"decoder.layers.{i}.ca_qpos_proj.bias")) rename_keys.append( (F'''transformer.decoder.layers.{i}.ca_kcontent_proj.bias''', F'''decoder.layers.{i}.ca_kcontent_proj.bias''') ) rename_keys.append((F'''transformer.decoder.layers.{i}.ca_kpos_proj.bias''', F'''decoder.layers.{i}.ca_kpos_proj.bias''')) rename_keys.append((F'''transformer.decoder.layers.{i}.ca_v_proj.bias''', F'''decoder.layers.{i}.ca_v_proj.bias''')) rename_keys.append( (F'''transformer.decoder.layers.{i}.ca_qpos_sine_proj.bias''', F'''decoder.layers.{i}.ca_qpos_sine_proj.bias''') ) # convolutional projection + query embeddings + layernorm of decoder + class and bounding box heads # for conditional DETR, also convert reference point head and query scale MLP rename_keys.extend( [ ('''input_proj.weight''', '''input_projection.weight'''), ('''input_proj.bias''', '''input_projection.bias'''), ('''query_embed.weight''', '''query_position_embeddings.weight'''), ('''transformer.decoder.norm.weight''', '''decoder.layernorm.weight'''), ('''transformer.decoder.norm.bias''', '''decoder.layernorm.bias'''), ('''class_embed.weight''', '''class_labels_classifier.weight'''), ('''class_embed.bias''', '''class_labels_classifier.bias'''), ('''bbox_embed.layers.0.weight''', '''bbox_predictor.layers.0.weight'''), ('''bbox_embed.layers.0.bias''', '''bbox_predictor.layers.0.bias'''), ('''bbox_embed.layers.1.weight''', '''bbox_predictor.layers.1.weight'''), ('''bbox_embed.layers.1.bias''', '''bbox_predictor.layers.1.bias'''), ('''bbox_embed.layers.2.weight''', '''bbox_predictor.layers.2.weight'''), ('''bbox_embed.layers.2.bias''', '''bbox_predictor.layers.2.bias'''), ('''transformer.decoder.ref_point_head.layers.0.weight''', '''decoder.ref_point_head.layers.0.weight'''), ('''transformer.decoder.ref_point_head.layers.0.bias''', '''decoder.ref_point_head.layers.0.bias'''), ('''transformer.decoder.ref_point_head.layers.1.weight''', '''decoder.ref_point_head.layers.1.weight'''), ('''transformer.decoder.ref_point_head.layers.1.bias''', '''decoder.ref_point_head.layers.1.bias'''), ('''transformer.decoder.query_scale.layers.0.weight''', '''decoder.query_scale.layers.0.weight'''), ('''transformer.decoder.query_scale.layers.0.bias''', '''decoder.query_scale.layers.0.bias'''), ('''transformer.decoder.query_scale.layers.1.weight''', '''decoder.query_scale.layers.1.weight'''), ('''transformer.decoder.query_scale.layers.1.bias''', '''decoder.query_scale.layers.1.bias'''), ('''transformer.decoder.layers.0.ca_qpos_proj.weight''', '''decoder.layers.0.ca_qpos_proj.weight'''), ('''transformer.decoder.layers.0.ca_qpos_proj.bias''', '''decoder.layers.0.ca_qpos_proj.bias'''), ] ) def UpperCamelCase__ ( A__ , A__ , A__ ) -> List[str]: snake_case__ : int = state_dict.pop(A__ ) snake_case__ : Union[str, Any] = val def UpperCamelCase__ ( A__ ) -> int: snake_case__ : List[Any] = OrderedDict() for key, value in state_dict.items(): if "backbone.0.body" in key: snake_case__ : Any = key.replace('backbone.0.body' , 'backbone.conv_encoder.model' ) snake_case__ : Optional[int] = value else: snake_case__ : Optional[int] = value return new_state_dict def UpperCamelCase__ ( A__ , A__=False ) -> Optional[int]: snake_case__ : Optional[int] = '' if is_panoptic: snake_case__ : Tuple = 'conditional_detr.' # first: transformer encoder for i in range(6 ): # read in weights + bias of input projection layer (in PyTorch's MultiHeadAttention, this is a single matrix + bias) snake_case__ : int = state_dict.pop(F"""{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_weight""" ) snake_case__ : str = state_dict.pop(F"""{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_bias""" ) # next, add query, keys and values (in that order) to the state dict snake_case__ : Union[str, Any] = in_proj_weight[:256, :] snake_case__ : Union[str, Any] = in_proj_bias[:256] snake_case__ : Union[str, Any] = in_proj_weight[256:512, :] snake_case__ : Optional[Any] = in_proj_bias[256:512] snake_case__ : List[str] = in_proj_weight[-256:, :] snake_case__ : Tuple = in_proj_bias[-256:] def UpperCamelCase__ ( ) -> Tuple: snake_case__ : int = 'http://images.cocodataset.org/val2017/000000039769.jpg' snake_case__ : str = Image.open(requests.get(A__ , stream=A__ ).raw ) return im @torch.no_grad() def UpperCamelCase__ ( A__ , A__ ) -> str: snake_case__ : List[Any] = ConditionalDetrConfig() # set backbone and dilation attributes if "resnet101" in model_name: snake_case__ : Any = 'resnet101' if "dc5" in model_name: snake_case__ : Any = True snake_case__ : int = 'panoptic' in model_name if is_panoptic: snake_case__ : str = 250 else: snake_case__ : Union[str, Any] = 91 snake_case__ : Optional[int] = 'huggingface/label-files' snake_case__ : Optional[Any] = 'coco-detection-id2label.json' snake_case__ : str = json.load(open(hf_hub_download(A__ , A__ , repo_type='dataset' ) , 'r' ) ) snake_case__ : List[Any] = {int(A__ ): v for k, v in idalabel.items()} snake_case__ : Any = idalabel snake_case__ : int = {v: k for k, v in idalabel.items()} # load image processor snake_case__ : List[Any] = 'coco_panoptic' if is_panoptic else 'coco_detection' snake_case__ : List[Any] = ConditionalDetrImageProcessor(format=A__ ) # prepare image snake_case__ : List[str] = prepare_img() snake_case__ : Any = image_processor(images=A__ , return_tensors='pt' ) snake_case__ : Dict = encoding['pixel_values'] logger.info(F"""Converting model {model_name}...""" ) # load original model from torch hub snake_case__ : Any = torch.hub.load('DeppMeng/ConditionalDETR' , A__ , pretrained=A__ ).eval() snake_case__ : Tuple = conditional_detr.state_dict() # rename keys for src, dest in rename_keys: if is_panoptic: snake_case__ : List[Any] = 'conditional_detr.' + src rename_key(A__ , A__ , A__ ) snake_case__ : Dict = rename_backbone_keys(A__ ) # query, key and value matrices need special treatment read_in_q_k_v(A__ , is_panoptic=A__ ) # important: we need to prepend a prefix to each of the base model keys as the head models use different attributes for them snake_case__ : Optional[int] = 'conditional_detr.model.' if is_panoptic else 'model.' for key in state_dict.copy().keys(): if is_panoptic: if ( key.startswith('conditional_detr' ) and not key.startswith('class_labels_classifier' ) and not key.startswith('bbox_predictor' ) ): snake_case__ : List[Any] = state_dict.pop(A__ ) snake_case__ : Optional[int] = val elif "class_labels_classifier" in key or "bbox_predictor" in key: snake_case__ : str = state_dict.pop(A__ ) snake_case__ : List[Any] = val elif key.startswith('bbox_attention' ) or key.startswith('mask_head' ): continue else: snake_case__ : Union[str, Any] = state_dict.pop(A__ ) snake_case__ : Dict = val else: if not key.startswith('class_labels_classifier' ) and not key.startswith('bbox_predictor' ): snake_case__ : List[Any] = state_dict.pop(A__ ) snake_case__ : Optional[int] = val # finally, create HuggingFace model and load state dict snake_case__ : Union[str, Any] = ConditionalDetrForSegmentation(A__ ) if is_panoptic else ConditionalDetrForObjectDetection(A__ ) model.load_state_dict(A__ ) model.eval() model.push_to_hub(repo_id=A__ , organization='DepuMeng' , commit_message='Add model' ) # verify our conversion snake_case__ : Tuple = conditional_detr(A__ ) snake_case__ : str = model(A__ ) assert torch.allclose(outputs.logits , original_outputs['pred_logits'] , atol=1e-4 ) assert torch.allclose(outputs.pred_boxes , original_outputs['pred_boxes'] , atol=1e-4 ) if is_panoptic: assert torch.allclose(outputs.pred_masks , original_outputs['pred_masks'] , atol=1e-4 ) # Save model and image processor logger.info(F"""Saving PyTorch model and image processor to {pytorch_dump_folder_path}...""" ) Path(A__ ).mkdir(exist_ok=A__ ) model.save_pretrained(A__ ) image_processor.save_pretrained(A__ ) if __name__ == "__main__": lowerCAmelCase__ : Any = argparse.ArgumentParser() parser.add_argument( '''--model_name''', default='''conditional_detr_resnet50''', type=str, help='''Name of the CONDITIONAL_DETR model you\'d like to convert.''', ) parser.add_argument( '''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the folder to output PyTorch model.''' ) lowerCAmelCase__ : int = parser.parse_args() convert_conditional_detr_checkpoint(args.model_name, args.pytorch_dump_folder_path)
699
1
import collections from typing import List, Optional, Union from ...tokenization_utils_base import BatchEncoding from ...utils import TensorType, add_end_docstrings, add_start_docstrings, logging from ..bert.tokenization_bert_fast import BertTokenizerFast from .tokenization_dpr import DPRContextEncoderTokenizer, DPRQuestionEncoderTokenizer, DPRReaderTokenizer lowerCAmelCase__ : Any = logging.get_logger(__name__) lowerCAmelCase__ : List[Any] = {'''vocab_file''': '''vocab.txt''', '''tokenizer_file''': '''tokenizer.json'''} lowerCAmelCase__ : Any = { '''vocab_file''': { '''facebook/dpr-ctx_encoder-single-nq-base''': ( '''https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/vocab.txt''' ), '''facebook/dpr-ctx_encoder-multiset-base''': ( '''https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/vocab.txt''' ), }, '''tokenizer_file''': { '''facebook/dpr-ctx_encoder-single-nq-base''': ( '''https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/tokenizer.json''' ), '''facebook/dpr-ctx_encoder-multiset-base''': ( '''https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/tokenizer.json''' ), }, } lowerCAmelCase__ : Any = { '''vocab_file''': { '''facebook/dpr-question_encoder-single-nq-base''': ( '''https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/vocab.txt''' ), '''facebook/dpr-question_encoder-multiset-base''': ( '''https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/vocab.txt''' ), }, '''tokenizer_file''': { '''facebook/dpr-question_encoder-single-nq-base''': ( '''https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/tokenizer.json''' ), '''facebook/dpr-question_encoder-multiset-base''': ( '''https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/tokenizer.json''' ), }, } lowerCAmelCase__ : Tuple = { '''vocab_file''': { '''facebook/dpr-reader-single-nq-base''': ( '''https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/vocab.txt''' ), '''facebook/dpr-reader-multiset-base''': ( '''https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/vocab.txt''' ), }, '''tokenizer_file''': { '''facebook/dpr-reader-single-nq-base''': ( '''https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/tokenizer.json''' ), '''facebook/dpr-reader-multiset-base''': ( '''https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/tokenizer.json''' ), }, } lowerCAmelCase__ : Dict = { '''facebook/dpr-ctx_encoder-single-nq-base''': 5_12, '''facebook/dpr-ctx_encoder-multiset-base''': 5_12, } lowerCAmelCase__ : Union[str, Any] = { '''facebook/dpr-question_encoder-single-nq-base''': 5_12, '''facebook/dpr-question_encoder-multiset-base''': 5_12, } lowerCAmelCase__ : Optional[Any] = { '''facebook/dpr-reader-single-nq-base''': 5_12, '''facebook/dpr-reader-multiset-base''': 5_12, } lowerCAmelCase__ : Tuple = { '''facebook/dpr-ctx_encoder-single-nq-base''': {'''do_lower_case''': True}, '''facebook/dpr-ctx_encoder-multiset-base''': {'''do_lower_case''': True}, } lowerCAmelCase__ : Any = { '''facebook/dpr-question_encoder-single-nq-base''': {'''do_lower_case''': True}, '''facebook/dpr-question_encoder-multiset-base''': {'''do_lower_case''': True}, } lowerCAmelCase__ : List[str] = { '''facebook/dpr-reader-single-nq-base''': {'''do_lower_case''': True}, '''facebook/dpr-reader-multiset-base''': {'''do_lower_case''': True}, } class __snake_case ( _lowerCamelCase ): __lowerCamelCase = VOCAB_FILES_NAMES __lowerCamelCase = CONTEXT_ENCODER_PRETRAINED_VOCAB_FILES_MAP __lowerCamelCase = CONTEXT_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES __lowerCamelCase = CONTEXT_ENCODER_PRETRAINED_INIT_CONFIGURATION __lowerCamelCase = DPRContextEncoderTokenizer class __snake_case ( _lowerCamelCase ): __lowerCamelCase = VOCAB_FILES_NAMES __lowerCamelCase = QUESTION_ENCODER_PRETRAINED_VOCAB_FILES_MAP __lowerCamelCase = QUESTION_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES __lowerCamelCase = QUESTION_ENCODER_PRETRAINED_INIT_CONFIGURATION __lowerCamelCase = DPRQuestionEncoderTokenizer lowerCAmelCase__ : Tuple = collections.namedtuple( '''DPRSpanPrediction''', ['''span_score''', '''relevance_score''', '''doc_id''', '''start_index''', '''end_index''', '''text'''] ) lowerCAmelCase__ : List[Any] = collections.namedtuple('''DPRReaderOutput''', ['''start_logits''', '''end_logits''', '''relevance_logits''']) lowerCAmelCase__ : int = r''' Return a dictionary with the token ids of the input strings and other information to give to `.decode_best_spans`. It converts the strings of a question and different passages (title and text) in a sequence of IDs (integers), using the tokenizer and vocabulary. The resulting `input_ids` is a matrix of size `(n_passages, sequence_length)` with the format: [CLS] <question token ids> [SEP] <titles ids> [SEP] <texts ids> Args: questions (`str` or `List[str]`): The questions to be encoded. You can specify one question for many passages. In this case, the question will be duplicated like `[questions] * n_passages`. Otherwise you have to specify as many questions as in `titles` or `texts`. titles (`str` or `List[str]`): The passages titles to be encoded. This can be a string or a list of strings if there are several passages. texts (`str` or `List[str]`): The passages texts to be encoded. This can be a string or a list of strings if there are several passages. padding (`bool`, `str` or [`~utils.PaddingStrategy`], *optional*, defaults to `False`): Activates and controls padding. Accepts the following values: - `True` or `\'longest\'`: Pad to the longest sequence in the batch (or no padding if only a single sequence if provided). - `\'max_length\'`: Pad to a maximum length specified with the argument `max_length` or to the maximum acceptable input length for the model if that argument is not provided. - `False` or `\'do_not_pad\'` (default): No padding (i.e., can output a batch with sequences of different lengths). truncation (`bool`, `str` or [`~tokenization_utils_base.TruncationStrategy`], *optional*, defaults to `False`): Activates and controls truncation. Accepts the following values: - `True` or `\'longest_first\'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum acceptable input length for the model if that argument is not provided. This will truncate token by token, removing a token from the longest sequence in the pair if a pair of sequences (or a batch of pairs) is provided. - `\'only_first\'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum acceptable input length for the model if that argument is not provided. This will only truncate the first sequence of a pair if a pair of sequences (or a batch of pairs) is provided. - `\'only_second\'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum acceptable input length for the model if that argument is not provided. This will only truncate the second sequence of a pair if a pair of sequences (or a batch of pairs) is provided. - `False` or `\'do_not_truncate\'` (default): No truncation (i.e., can output batch with sequence lengths greater than the model maximum admissible input size). max_length (`int`, *optional*): Controls the maximum length to use by one of the truncation/padding parameters. If left unset or set to `None`, this will use the predefined model maximum length if a maximum length is required by one of the truncation/padding parameters. If the model has no specific maximum input length (like XLNet) truncation/padding to a maximum length will be deactivated. return_tensors (`str` or [`~utils.TensorType`], *optional*): If set, will return tensors instead of list of python integers. Acceptable values are: - `\'tf\'`: Return TensorFlow `tf.constant` objects. - `\'pt\'`: Return PyTorch `torch.Tensor` objects. - `\'np\'`: Return Numpy `np.ndarray` objects. return_attention_mask (`bool`, *optional*): Whether or not to return the attention mask. If not set, will return the attention mask according to the specific tokenizer\'s default, defined by the `return_outputs` attribute. [What are attention masks?](../glossary#attention-mask) Return: `Dict[str, List[List[int]]]`: A dictionary with the following keys: - `input_ids`: List of token ids to be fed to a model. - `attention_mask`: List of indices specifying which tokens should be attended to by the model. ''' @add_start_docstrings(_lowerCamelCase ) class __snake_case : def __call__( self , __UpperCamelCase , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = False , __UpperCamelCase = False , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = None , **__UpperCamelCase , ) -> BatchEncoding: '''simple docstring''' if titles is None and texts is None: return super().__call__( __UpperCamelCase , padding=__UpperCamelCase , truncation=__UpperCamelCase , max_length=__UpperCamelCase , return_tensors=__UpperCamelCase , return_attention_mask=__UpperCamelCase , **__UpperCamelCase , ) elif titles is None or texts is None: snake_case__ : Optional[Any] = titles if texts is None else texts return super().__call__( __UpperCamelCase , __UpperCamelCase , padding=__UpperCamelCase , truncation=__UpperCamelCase , max_length=__UpperCamelCase , return_tensors=__UpperCamelCase , return_attention_mask=__UpperCamelCase , **__UpperCamelCase , ) snake_case__ : int = titles if not isinstance(__UpperCamelCase , __UpperCamelCase ) else [titles] snake_case__ : Optional[int] = texts if not isinstance(__UpperCamelCase , __UpperCamelCase ) else [texts] snake_case__ : List[Any] = len(__UpperCamelCase ) snake_case__ : str = questions if not isinstance(__UpperCamelCase , __UpperCamelCase ) else [questions] * n_passages assert len(__UpperCamelCase ) == len( __UpperCamelCase ), F"""There should be as many titles than texts but got {len(__UpperCamelCase )} titles and {len(__UpperCamelCase )} texts.""" snake_case__ : Optional[int] = super().__call__(__UpperCamelCase , __UpperCamelCase , padding=__UpperCamelCase , truncation=__UpperCamelCase )['input_ids'] snake_case__ : Optional[Any] = super().__call__(__UpperCamelCase , add_special_tokens=__UpperCamelCase , padding=__UpperCamelCase , truncation=__UpperCamelCase )['input_ids'] snake_case__ : Union[str, Any] = { 'input_ids': [ (encoded_question_and_title + encoded_text)[:max_length] if max_length is not None and truncation else encoded_question_and_title + encoded_text for encoded_question_and_title, encoded_text in zip(__UpperCamelCase , __UpperCamelCase ) ] } if return_attention_mask is not False: snake_case__ : List[Any] = [] for input_ids in encoded_inputs["input_ids"]: attention_mask.append([int(input_id != self.pad_token_id ) for input_id in input_ids] ) snake_case__ : Union[str, Any] = attention_mask return self.pad(__UpperCamelCase , padding=__UpperCamelCase , max_length=__UpperCamelCase , return_tensors=__UpperCamelCase ) def __a ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = 16 , __UpperCamelCase = 64 , __UpperCamelCase = 4 , ) -> List[DPRSpanPrediction]: '''simple docstring''' snake_case__ : Optional[Any] = reader_input['input_ids'] snake_case__ , snake_case__ , snake_case__ : Any = reader_output[:3] snake_case__ : List[str] = len(__UpperCamelCase ) snake_case__ : Tuple = sorted(range(__UpperCamelCase ) , reverse=__UpperCamelCase , key=relevance_logits.__getitem__ ) snake_case__ : List[DPRReaderOutput] = [] for doc_id in sorted_docs: snake_case__ : Tuple = list(input_ids[doc_id] ) # assuming question & title information is at the beginning of the sequence snake_case__ : int = sequence_ids.index(self.sep_token_id , 2 ) + 1 # second sep id if sequence_ids[-1] == self.pad_token_id: snake_case__ : Union[str, Any] = sequence_ids.index(self.pad_token_id ) else: snake_case__ : str = len(__UpperCamelCase ) snake_case__ : Dict = self._get_best_spans( start_logits=start_logits[doc_id][passage_offset:sequence_len] , end_logits=end_logits[doc_id][passage_offset:sequence_len] , max_answer_length=__UpperCamelCase , top_spans=__UpperCamelCase , ) for start_index, end_index in best_spans: start_index += passage_offset end_index += passage_offset nbest_spans_predictions.append( DPRSpanPrediction( span_score=start_logits[doc_id][start_index] + end_logits[doc_id][end_index] , relevance_score=relevance_logits[doc_id] , doc_id=__UpperCamelCase , start_index=__UpperCamelCase , end_index=__UpperCamelCase , text=self.decode(sequence_ids[start_index : end_index + 1] ) , ) ) if len(__UpperCamelCase ) >= num_spans: break return nbest_spans_predictions[:num_spans] def __a ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , ) -> List[DPRSpanPrediction]: '''simple docstring''' snake_case__ : Any = [] for start_index, start_score in enumerate(__UpperCamelCase ): for answer_length, end_score in enumerate(end_logits[start_index : start_index + max_answer_length] ): scores.append(((start_index, start_index + answer_length), start_score + end_score) ) snake_case__ : str = sorted(__UpperCamelCase , key=lambda __UpperCamelCase : x[1] , reverse=__UpperCamelCase ) snake_case__ : Any = [] for (start_index, end_index), score in scores: assert start_index <= end_index, F"""Wrong span indices: [{start_index}:{end_index}]""" snake_case__ : str = end_index - start_index + 1 assert length <= max_answer_length, F"""Span is too long: {length} > {max_answer_length}""" if any( start_index <= prev_start_index <= prev_end_index <= end_index or prev_start_index <= start_index <= end_index <= prev_end_index for (prev_start_index, prev_end_index) in chosen_span_intervals ): continue chosen_span_intervals.append((start_index, end_index) ) if len(__UpperCamelCase ) == top_spans: break return chosen_span_intervals @add_end_docstrings(_lowerCamelCase ) class __snake_case ( _lowerCamelCase ,_lowerCamelCase ): __lowerCamelCase = VOCAB_FILES_NAMES __lowerCamelCase = READER_PRETRAINED_VOCAB_FILES_MAP __lowerCamelCase = READER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES __lowerCamelCase = READER_PRETRAINED_INIT_CONFIGURATION __lowerCamelCase = ["""input_ids""", """attention_mask"""] __lowerCamelCase = DPRReaderTokenizer
699
from collections import namedtuple lowerCAmelCase__ : Union[str, Any] = namedtuple('''from_to''', '''from_ to''') lowerCAmelCase__ : Tuple = { '''cubicmeter''': from_to(1, 1), '''litre''': from_to(0.0_01, 10_00), '''kilolitre''': from_to(1, 1), '''gallon''': from_to(0.0_04_54, 2_64.1_72), '''cubicyard''': from_to(0.7_64_55, 1.3_07_95), '''cubicfoot''': from_to(0.0_28, 35.31_47), '''cup''': from_to(0.0_00_23_65_88, 42_26.75), } def UpperCamelCase__ ( A__ , A__ , A__ ) -> float: if from_type not in METRIC_CONVERSION: raise ValueError( F"""Invalid 'from_type' value: {from_type!r} Supported values are:\n""" + ', '.join(A__ ) ) if to_type not in METRIC_CONVERSION: raise ValueError( F"""Invalid 'to_type' value: {to_type!r}. Supported values are:\n""" + ', '.join(A__ ) ) return value * METRIC_CONVERSION[from_type].from_ * METRIC_CONVERSION[to_type].to if __name__ == "__main__": import doctest doctest.testmod()
699
1
import logging from dataclasses import dataclass, field from typing import Optional from seqaseq_trainer import arg_to_scheduler from transformers import TrainingArguments lowerCAmelCase__ : Any = logging.getLogger(__name__) @dataclass class __snake_case ( _lowerCamelCase ): __lowerCamelCase = field( default=0.0 ,metadata={"""help""": """The label smoothing epsilon to apply (if not zero)."""} ) __lowerCamelCase = field(default=_lowerCamelCase ,metadata={"""help""": """Whether to SortishSamler or not."""} ) __lowerCamelCase = field( default=_lowerCamelCase ,metadata={"""help""": """Whether to use generate to calculate generative metrics (ROUGE, BLEU)."""} ) __lowerCamelCase = field(default=_lowerCamelCase ,metadata={"""help""": """whether to use adafactor"""} ) __lowerCamelCase = field( default=_lowerCamelCase ,metadata={"""help""": """Encoder layer dropout probability. Goes into model.config."""} ) __lowerCamelCase = field( default=_lowerCamelCase ,metadata={"""help""": """Decoder layer dropout probability. Goes into model.config."""} ) __lowerCamelCase = field(default=_lowerCamelCase ,metadata={"""help""": """Dropout probability. Goes into model.config."""} ) __lowerCamelCase = field( default=_lowerCamelCase ,metadata={"""help""": """Attention dropout probability. Goes into model.config."""} ) __lowerCamelCase = field( default="""linear""" ,metadata={"""help""": F"""Which lr scheduler to use. Selected in {sorted(arg_to_scheduler.keys() )}"""} ,)
699
import os from shutil import copyfile from typing import Any, Dict, List, Optional, Tuple import sentencepiece as spm from ...tokenization_utils import PreTrainedTokenizer from ...utils import logging lowerCAmelCase__ : Tuple = logging.get_logger(__name__) lowerCAmelCase__ : Union[str, Any] = '''▁''' lowerCAmelCase__ : List[Any] = {'''vocab_file''': '''sentencepiece.bpe.model'''} lowerCAmelCase__ : Optional[Any] = { '''vocab_file''': { '''facebook/xglm-564M''': '''https://huggingface.co/facebook/xglm-564M/resolve/main/sentencepiece.bpe.model''', } } lowerCAmelCase__ : str = { '''facebook/xglm-564M''': 20_48, } class __snake_case ( _lowerCamelCase ): __lowerCamelCase = VOCAB_FILES_NAMES __lowerCamelCase = PRETRAINED_VOCAB_FILES_MAP __lowerCamelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES __lowerCamelCase = ["""input_ids""", """attention_mask"""] def __init__( self , __UpperCamelCase , __UpperCamelCase="<s>" , __UpperCamelCase="</s>" , __UpperCamelCase="</s>" , __UpperCamelCase="<s>" , __UpperCamelCase="<unk>" , __UpperCamelCase="<pad>" , __UpperCamelCase = None , **__UpperCamelCase , ) -> None: '''simple docstring''' snake_case__ : Any = {} if sp_model_kwargs is None else sp_model_kwargs # Compatibility with the original tokenizer snake_case__ : Tuple = 7 snake_case__ : Dict = [F"""<madeupword{i}>""" for i in range(self.num_madeup_words )] snake_case__ : Union[str, Any] = kwargs.get('additional_special_tokens' , [] ) kwargs["additional_special_tokens"] += [ word for word in madeup_words if word not in kwargs["additional_special_tokens"] ] super().__init__( bos_token=__UpperCamelCase , eos_token=__UpperCamelCase , unk_token=__UpperCamelCase , sep_token=__UpperCamelCase , cls_token=__UpperCamelCase , pad_token=__UpperCamelCase , sp_model_kwargs=self.sp_model_kwargs , **__UpperCamelCase , ) snake_case__ : Optional[int] = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(str(__UpperCamelCase ) ) snake_case__ : Optional[Any] = vocab_file # Original fairseq vocab and spm vocab must be "aligned": # Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 # -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ---- # fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-' # spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a' # The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab snake_case__ : Tuple = 1 # Mimic fairseq token-to-id alignment for the first 4 token snake_case__ : Tuple = {'<s>': 0, '<pad>': 1, '</s>': 2, '<unk>': 3} snake_case__ : List[Any] = len(self.sp_model ) snake_case__ : Optional[Any] = {F"""<madeupword{i}>""": sp_size + i + self.fairseq_offset for i in range(self.num_madeup_words )} self.fairseq_tokens_to_ids.update(__UpperCamelCase ) snake_case__ : Union[str, Any] = {v: k for k, v in self.fairseq_tokens_to_ids.items()} def __getstate__( self ) -> List[Any]: '''simple docstring''' snake_case__ : Union[str, Any] = self.__dict__.copy() snake_case__ : Optional[Any] = None snake_case__ : Tuple = self.sp_model.serialized_model_proto() return state def __setstate__( self , __UpperCamelCase ) -> Union[str, Any]: '''simple docstring''' snake_case__ : Union[str, Any] = d # for backward compatibility if not hasattr(self , 'sp_model_kwargs' ): snake_case__ : Any = {} snake_case__ : Dict = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.LoadFromSerializedProto(self.sp_model_proto ) def __a ( self , __UpperCamelCase , __UpperCamelCase = None ) -> List[int]: '''simple docstring''' if token_ids_a is None: return [self.sep_token_id] + token_ids_a snake_case__ : str = [self.sep_token_id] return sep + token_ids_a + sep + sep + token_ids_a def __a ( self , __UpperCamelCase , __UpperCamelCase = None , __UpperCamelCase = False ) -> List[int]: '''simple docstring''' if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=__UpperCamelCase , token_ids_a=__UpperCamelCase , already_has_special_tokens=__UpperCamelCase ) if token_ids_a is None: return [1] + ([0] * len(__UpperCamelCase )) return [1] + ([0] * len(__UpperCamelCase )) + [1, 1] + ([0] * len(__UpperCamelCase )) def __a ( self , __UpperCamelCase , __UpperCamelCase = None ) -> List[int]: '''simple docstring''' snake_case__ : int = [self.sep_token_id] if token_ids_a is None: return len(sep + token_ids_a ) * [0] return len(sep + token_ids_a + sep + sep + token_ids_a ) * [0] @property def __a ( self ) -> Tuple: '''simple docstring''' return len(self.sp_model ) + self.fairseq_offset + self.num_madeup_words def __a ( self ) -> Union[str, Any]: '''simple docstring''' snake_case__ : int = {self.convert_ids_to_tokens(__UpperCamelCase ): i for i in range(self.vocab_size )} vocab.update(self.added_tokens_encoder ) return vocab def __a ( self , __UpperCamelCase ) -> List[str]: '''simple docstring''' return self.sp_model.encode(__UpperCamelCase , out_type=__UpperCamelCase ) def __a ( self , __UpperCamelCase ) -> Union[str, Any]: '''simple docstring''' if token in self.fairseq_tokens_to_ids: return self.fairseq_tokens_to_ids[token] snake_case__ : Optional[Any] = self.sp_model.PieceToId(__UpperCamelCase ) # Need to return unknown token if the SP model returned 0 return spm_id + self.fairseq_offset if spm_id else self.unk_token_id def __a ( self , __UpperCamelCase ) -> Dict: '''simple docstring''' if index in self.fairseq_ids_to_tokens: return self.fairseq_ids_to_tokens[index] return self.sp_model.IdToPiece(index - self.fairseq_offset ) def __a ( self , __UpperCamelCase ) -> int: '''simple docstring''' snake_case__ : int = ''.join(__UpperCamelCase ).replace(__UpperCamelCase , ' ' ).strip() return out_string def __a ( self , __UpperCamelCase , __UpperCamelCase = None ) -> Tuple[str]: '''simple docstring''' if not os.path.isdir(__UpperCamelCase ): logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" ) return snake_case__ : List[str] = os.path.join( __UpperCamelCase , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(__UpperCamelCase ) and os.path.isfile(self.vocab_file ): copyfile(self.vocab_file , __UpperCamelCase ) elif not os.path.isfile(self.vocab_file ): with open(__UpperCamelCase , 'wb' ) as fi: snake_case__ : Any = self.sp_model.serialized_model_proto() fi.write(__UpperCamelCase ) return (out_vocab_file,)
699
1
import inspect import unittest from transformers import MobileViTConfig from transformers.testing_utils import require_torch, require_vision, slow, torch_device from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import MobileViTForImageClassification, MobileViTForSemanticSegmentation, MobileViTModel from transformers.models.mobilevit.modeling_mobilevit import MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import MobileViTImageProcessor class __snake_case ( _lowerCamelCase ): def __a ( self ) -> int: '''simple docstring''' snake_case__ : Optional[int] = self.config_class(**self.inputs_dict ) self.parent.assertTrue(hasattr(__UpperCamelCase , 'hidden_sizes' ) ) self.parent.assertTrue(hasattr(__UpperCamelCase , 'neck_hidden_sizes' ) ) self.parent.assertTrue(hasattr(__UpperCamelCase , 'num_attention_heads' ) ) class __snake_case : def __init__( self , __UpperCamelCase , __UpperCamelCase=13 , __UpperCamelCase=32 , __UpperCamelCase=2 , __UpperCamelCase=3 , __UpperCamelCase=640 , __UpperCamelCase=4 , __UpperCamelCase="silu" , __UpperCamelCase=3 , __UpperCamelCase=32 , __UpperCamelCase=0.1 , __UpperCamelCase=0.1 , __UpperCamelCase=0.1 , __UpperCamelCase=0.0_2 , __UpperCamelCase=True , __UpperCamelCase=True , __UpperCamelCase=10 , __UpperCamelCase=None , ) -> Union[str, Any]: '''simple docstring''' snake_case__ : List[str] = parent snake_case__ : Tuple = batch_size snake_case__ : Any = image_size snake_case__ : Any = patch_size snake_case__ : Optional[int] = num_channels snake_case__ : int = last_hidden_size snake_case__ : Optional[int] = num_attention_heads snake_case__ : List[str] = hidden_act snake_case__ : List[Any] = conv_kernel_size snake_case__ : List[Any] = output_stride snake_case__ : Union[str, Any] = hidden_dropout_prob snake_case__ : Dict = attention_probs_dropout_prob snake_case__ : str = classifier_dropout_prob snake_case__ : str = use_labels snake_case__ : Tuple = is_training snake_case__ : str = num_labels snake_case__ : Optional[Any] = initializer_range snake_case__ : Optional[int] = scope def __a ( self ) -> List[Any]: '''simple docstring''' snake_case__ : Optional[Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) snake_case__ : Optional[int] = None snake_case__ : Any = None if self.use_labels: snake_case__ : Optional[Any] = ids_tensor([self.batch_size] , self.num_labels ) snake_case__ : Optional[Any] = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels ) snake_case__ : Any = self.get_config() return config, pixel_values, labels, pixel_labels def __a ( self ) -> Optional[int]: '''simple docstring''' return MobileViTConfig( image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , num_attention_heads=self.num_attention_heads , hidden_act=self.hidden_act , conv_kernel_size=self.conv_kernel_size , output_stride=self.output_stride , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , classifier_dropout_prob=self.classifier_dropout_prob , initializer_range=self.initializer_range , ) def __a ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) -> Optional[Any]: '''simple docstring''' snake_case__ : List[str] = MobileViTModel(config=__UpperCamelCase ) model.to(__UpperCamelCase ) model.eval() snake_case__ : Optional[Any] = model(__UpperCamelCase ) self.parent.assertEqual( result.last_hidden_state.shape , ( self.batch_size, self.last_hidden_size, self.image_size // self.output_stride, self.image_size // self.output_stride, ) , ) def __a ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) -> int: '''simple docstring''' snake_case__ : Optional[Any] = self.num_labels snake_case__ : Tuple = MobileViTForImageClassification(__UpperCamelCase ) model.to(__UpperCamelCase ) model.eval() snake_case__ : Optional[Any] = model(__UpperCamelCase , labels=__UpperCamelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def __a ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) -> List[Any]: '''simple docstring''' snake_case__ : str = self.num_labels snake_case__ : int = MobileViTForSemanticSegmentation(__UpperCamelCase ) model.to(__UpperCamelCase ) model.eval() snake_case__ : int = model(__UpperCamelCase ) self.parent.assertEqual( result.logits.shape , ( self.batch_size, self.num_labels, self.image_size // self.output_stride, self.image_size // self.output_stride, ) , ) snake_case__ : Any = model(__UpperCamelCase , labels=__UpperCamelCase ) self.parent.assertEqual( result.logits.shape , ( self.batch_size, self.num_labels, self.image_size // self.output_stride, self.image_size // self.output_stride, ) , ) def __a ( self ) -> str: '''simple docstring''' snake_case__ : str = self.prepare_config_and_inputs() snake_case__ , snake_case__ , snake_case__ , snake_case__ : Tuple = config_and_inputs snake_case__ : List[Any] = {'pixel_values': pixel_values} return config, inputs_dict @require_torch class __snake_case ( _lowerCamelCase ,_lowerCamelCase ,unittest.TestCase ): __lowerCamelCase = ( (MobileViTModel, MobileViTForImageClassification, MobileViTForSemanticSegmentation) if is_torch_available() else () ) __lowerCamelCase = ( { """feature-extraction""": MobileViTModel, """image-classification""": MobileViTForImageClassification, """image-segmentation""": MobileViTForSemanticSegmentation, } if is_torch_available() else {} ) __lowerCamelCase = False __lowerCamelCase = False __lowerCamelCase = False __lowerCamelCase = False def __a ( self ) -> Optional[Any]: '''simple docstring''' snake_case__ : Any = MobileViTModelTester(self ) snake_case__ : Optional[int] = MobileViTConfigTester(self , config_class=__UpperCamelCase , has_text_modality=__UpperCamelCase ) def __a ( self ) -> Tuple: '''simple docstring''' self.config_tester.run_common_tests() @unittest.skip(reason='MobileViT does not use inputs_embeds' ) def __a ( self ) -> str: '''simple docstring''' pass @unittest.skip(reason='MobileViT does not support input and output embeddings' ) def __a ( self ) -> List[str]: '''simple docstring''' pass @unittest.skip(reason='MobileViT does not output attentions' ) def __a ( self ) -> Optional[int]: '''simple docstring''' pass def __a ( self ) -> Tuple: '''simple docstring''' snake_case__ , snake_case__ : List[str] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: snake_case__ : int = model_class(__UpperCamelCase ) snake_case__ : Tuple = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic snake_case__ : Optional[int] = [*signature.parameters.keys()] snake_case__ : Tuple = ['pixel_values'] self.assertListEqual(arg_names[:1] , __UpperCamelCase ) @unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.' ) def __a ( self ) -> List[Any]: '''simple docstring''' pass def __a ( self ) -> List[str]: '''simple docstring''' snake_case__ : List[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*__UpperCamelCase ) def __a ( self ) -> Optional[Any]: '''simple docstring''' def check_hidden_states_output(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase ): snake_case__ : List[str] = model_class(__UpperCamelCase ) model.to(__UpperCamelCase ) model.eval() with torch.no_grad(): snake_case__ : str = model(**self._prepare_for_class(__UpperCamelCase , __UpperCamelCase ) ) snake_case__ : Optional[Any] = outputs.hidden_states snake_case__ : Tuple = 5 self.assertEqual(len(__UpperCamelCase ) , __UpperCamelCase ) # MobileViT's feature maps are of shape (batch_size, num_channels, height, width) # with the width and height being successively divided by 2. snake_case__ : Dict = 2 for i in range(len(__UpperCamelCase ) ): self.assertListEqual( list(hidden_states[i].shape[-2:] ) , [self.model_tester.image_size // divisor, self.model_tester.image_size // divisor] , ) divisor *= 2 self.assertEqual(self.model_tester.output_stride , divisor // 2 ) snake_case__ , snake_case__ : Any = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: snake_case__ : Union[str, Any] = True check_hidden_states_output(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] snake_case__ : Optional[Any] = True check_hidden_states_output(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) def __a ( self ) -> List[Any]: '''simple docstring''' snake_case__ : Tuple = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*__UpperCamelCase ) def __a ( self ) -> Optional[int]: '''simple docstring''' snake_case__ : Any = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_semantic_segmentation(*__UpperCamelCase ) @slow def __a ( self ) -> List[str]: '''simple docstring''' for model_name in MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: snake_case__ : List[str] = MobileViTModel.from_pretrained(__UpperCamelCase ) self.assertIsNotNone(__UpperCamelCase ) def UpperCamelCase__ ( ) -> Optional[Any]: snake_case__ : Any = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' ) return image @require_torch @require_vision class __snake_case ( unittest.TestCase ): @cached_property def __a ( self ) -> int: '''simple docstring''' return MobileViTImageProcessor.from_pretrained('apple/mobilevit-xx-small' ) if is_vision_available() else None @slow def __a ( self ) -> Tuple: '''simple docstring''' snake_case__ : List[str] = MobileViTForImageClassification.from_pretrained('apple/mobilevit-xx-small' ).to(__UpperCamelCase ) snake_case__ : Union[str, Any] = self.default_image_processor snake_case__ : List[str] = prepare_img() snake_case__ : List[str] = image_processor(images=__UpperCamelCase , return_tensors='pt' ).to(__UpperCamelCase ) # forward pass with torch.no_grad(): snake_case__ : Tuple = model(**__UpperCamelCase ) # verify the logits snake_case__ : Optional[Any] = torch.Size((1, 1000) ) self.assertEqual(outputs.logits.shape , __UpperCamelCase ) snake_case__ : Optional[Any] = torch.tensor([-1.9_3_6_4, -1.2_3_2_7, -0.4_6_5_3] ).to(__UpperCamelCase ) self.assertTrue(torch.allclose(outputs.logits[0, :3] , __UpperCamelCase , atol=1E-4 ) ) @slow def __a ( self ) -> Union[str, Any]: '''simple docstring''' snake_case__ : Any = MobileViTForSemanticSegmentation.from_pretrained('apple/deeplabv3-mobilevit-xx-small' ) snake_case__ : str = model.to(__UpperCamelCase ) snake_case__ : Union[str, Any] = MobileViTImageProcessor.from_pretrained('apple/deeplabv3-mobilevit-xx-small' ) snake_case__ : int = prepare_img() snake_case__ : Optional[Any] = image_processor(images=__UpperCamelCase , return_tensors='pt' ).to(__UpperCamelCase ) # forward pass with torch.no_grad(): snake_case__ : Any = model(**__UpperCamelCase ) snake_case__ : Tuple = outputs.logits # verify the logits snake_case__ : Optional[int] = torch.Size((1, 21, 32, 32) ) self.assertEqual(logits.shape , __UpperCamelCase ) snake_case__ : Dict = torch.tensor( [ [[6.9_7_1_3, 6.9_7_8_6, 7.2_4_2_2], [7.2_8_9_3, 7.2_8_2_5, 7.4_4_4_6], [7.6_5_8_0, 7.8_7_9_7, 7.9_4_2_0]], [[-1_0.6_8_6_9, -1_0.3_2_5_0, -1_0.3_4_7_1], [-1_0.4_2_2_8, -9.9_8_6_8, -9.7_1_3_2], [-1_1.0_4_0_5, -1_1.0_2_2_1, -1_0.7_3_1_8]], [[-3.3_0_8_9, -2.8_5_3_9, -2.6_7_4_0], [-3.2_7_0_6, -2.5_6_2_1, -2.5_1_0_8], [-3.2_5_3_4, -2.6_6_1_5, -2.6_6_5_1]], ] , device=__UpperCamelCase , ) self.assertTrue(torch.allclose(logits[0, :3, :3, :3] , __UpperCamelCase , atol=1E-4 ) ) @slow def __a ( self ) -> List[str]: '''simple docstring''' snake_case__ : Union[str, Any] = MobileViTForSemanticSegmentation.from_pretrained('apple/deeplabv3-mobilevit-xx-small' ) snake_case__ : List[str] = model.to(__UpperCamelCase ) snake_case__ : Any = MobileViTImageProcessor.from_pretrained('apple/deeplabv3-mobilevit-xx-small' ) snake_case__ : str = prepare_img() snake_case__ : Optional[int] = image_processor(images=__UpperCamelCase , return_tensors='pt' ).to(__UpperCamelCase ) # forward pass with torch.no_grad(): snake_case__ : Tuple = model(**__UpperCamelCase ) snake_case__ : str = outputs.logits.detach().cpu() snake_case__ : Optional[int] = image_processor.post_process_semantic_segmentation(outputs=__UpperCamelCase , target_sizes=[(50, 60)] ) snake_case__ : List[str] = torch.Size((50, 60) ) self.assertEqual(segmentation[0].shape , __UpperCamelCase ) snake_case__ : Tuple = image_processor.post_process_semantic_segmentation(outputs=__UpperCamelCase ) snake_case__ : Tuple = torch.Size((32, 32) ) self.assertEqual(segmentation[0].shape , __UpperCamelCase )
699
import collections from typing import List, Optional, Union from ...tokenization_utils_base import BatchEncoding from ...utils import TensorType, add_end_docstrings, add_start_docstrings, logging from ..bert.tokenization_bert_fast import BertTokenizerFast from .tokenization_dpr import DPRContextEncoderTokenizer, DPRQuestionEncoderTokenizer, DPRReaderTokenizer lowerCAmelCase__ : Any = logging.get_logger(__name__) lowerCAmelCase__ : List[Any] = {'''vocab_file''': '''vocab.txt''', '''tokenizer_file''': '''tokenizer.json'''} lowerCAmelCase__ : Any = { '''vocab_file''': { '''facebook/dpr-ctx_encoder-single-nq-base''': ( '''https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/vocab.txt''' ), '''facebook/dpr-ctx_encoder-multiset-base''': ( '''https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/vocab.txt''' ), }, '''tokenizer_file''': { '''facebook/dpr-ctx_encoder-single-nq-base''': ( '''https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/tokenizer.json''' ), '''facebook/dpr-ctx_encoder-multiset-base''': ( '''https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/tokenizer.json''' ), }, } lowerCAmelCase__ : Any = { '''vocab_file''': { '''facebook/dpr-question_encoder-single-nq-base''': ( '''https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/vocab.txt''' ), '''facebook/dpr-question_encoder-multiset-base''': ( '''https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/vocab.txt''' ), }, '''tokenizer_file''': { '''facebook/dpr-question_encoder-single-nq-base''': ( '''https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/tokenizer.json''' ), '''facebook/dpr-question_encoder-multiset-base''': ( '''https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/tokenizer.json''' ), }, } lowerCAmelCase__ : Tuple = { '''vocab_file''': { '''facebook/dpr-reader-single-nq-base''': ( '''https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/vocab.txt''' ), '''facebook/dpr-reader-multiset-base''': ( '''https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/vocab.txt''' ), }, '''tokenizer_file''': { '''facebook/dpr-reader-single-nq-base''': ( '''https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/tokenizer.json''' ), '''facebook/dpr-reader-multiset-base''': ( '''https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/tokenizer.json''' ), }, } lowerCAmelCase__ : Dict = { '''facebook/dpr-ctx_encoder-single-nq-base''': 5_12, '''facebook/dpr-ctx_encoder-multiset-base''': 5_12, } lowerCAmelCase__ : Union[str, Any] = { '''facebook/dpr-question_encoder-single-nq-base''': 5_12, '''facebook/dpr-question_encoder-multiset-base''': 5_12, } lowerCAmelCase__ : Optional[Any] = { '''facebook/dpr-reader-single-nq-base''': 5_12, '''facebook/dpr-reader-multiset-base''': 5_12, } lowerCAmelCase__ : Tuple = { '''facebook/dpr-ctx_encoder-single-nq-base''': {'''do_lower_case''': True}, '''facebook/dpr-ctx_encoder-multiset-base''': {'''do_lower_case''': True}, } lowerCAmelCase__ : Any = { '''facebook/dpr-question_encoder-single-nq-base''': {'''do_lower_case''': True}, '''facebook/dpr-question_encoder-multiset-base''': {'''do_lower_case''': True}, } lowerCAmelCase__ : List[str] = { '''facebook/dpr-reader-single-nq-base''': {'''do_lower_case''': True}, '''facebook/dpr-reader-multiset-base''': {'''do_lower_case''': True}, } class __snake_case ( _lowerCamelCase ): __lowerCamelCase = VOCAB_FILES_NAMES __lowerCamelCase = CONTEXT_ENCODER_PRETRAINED_VOCAB_FILES_MAP __lowerCamelCase = CONTEXT_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES __lowerCamelCase = CONTEXT_ENCODER_PRETRAINED_INIT_CONFIGURATION __lowerCamelCase = DPRContextEncoderTokenizer class __snake_case ( _lowerCamelCase ): __lowerCamelCase = VOCAB_FILES_NAMES __lowerCamelCase = QUESTION_ENCODER_PRETRAINED_VOCAB_FILES_MAP __lowerCamelCase = QUESTION_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES __lowerCamelCase = QUESTION_ENCODER_PRETRAINED_INIT_CONFIGURATION __lowerCamelCase = DPRQuestionEncoderTokenizer lowerCAmelCase__ : Tuple = collections.namedtuple( '''DPRSpanPrediction''', ['''span_score''', '''relevance_score''', '''doc_id''', '''start_index''', '''end_index''', '''text'''] ) lowerCAmelCase__ : List[Any] = collections.namedtuple('''DPRReaderOutput''', ['''start_logits''', '''end_logits''', '''relevance_logits''']) lowerCAmelCase__ : int = r''' Return a dictionary with the token ids of the input strings and other information to give to `.decode_best_spans`. It converts the strings of a question and different passages (title and text) in a sequence of IDs (integers), using the tokenizer and vocabulary. The resulting `input_ids` is a matrix of size `(n_passages, sequence_length)` with the format: [CLS] <question token ids> [SEP] <titles ids> [SEP] <texts ids> Args: questions (`str` or `List[str]`): The questions to be encoded. You can specify one question for many passages. In this case, the question will be duplicated like `[questions] * n_passages`. Otherwise you have to specify as many questions as in `titles` or `texts`. titles (`str` or `List[str]`): The passages titles to be encoded. This can be a string or a list of strings if there are several passages. texts (`str` or `List[str]`): The passages texts to be encoded. This can be a string or a list of strings if there are several passages. padding (`bool`, `str` or [`~utils.PaddingStrategy`], *optional*, defaults to `False`): Activates and controls padding. Accepts the following values: - `True` or `\'longest\'`: Pad to the longest sequence in the batch (or no padding if only a single sequence if provided). - `\'max_length\'`: Pad to a maximum length specified with the argument `max_length` or to the maximum acceptable input length for the model if that argument is not provided. - `False` or `\'do_not_pad\'` (default): No padding (i.e., can output a batch with sequences of different lengths). truncation (`bool`, `str` or [`~tokenization_utils_base.TruncationStrategy`], *optional*, defaults to `False`): Activates and controls truncation. Accepts the following values: - `True` or `\'longest_first\'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum acceptable input length for the model if that argument is not provided. This will truncate token by token, removing a token from the longest sequence in the pair if a pair of sequences (or a batch of pairs) is provided. - `\'only_first\'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum acceptable input length for the model if that argument is not provided. This will only truncate the first sequence of a pair if a pair of sequences (or a batch of pairs) is provided. - `\'only_second\'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum acceptable input length for the model if that argument is not provided. This will only truncate the second sequence of a pair if a pair of sequences (or a batch of pairs) is provided. - `False` or `\'do_not_truncate\'` (default): No truncation (i.e., can output batch with sequence lengths greater than the model maximum admissible input size). max_length (`int`, *optional*): Controls the maximum length to use by one of the truncation/padding parameters. If left unset or set to `None`, this will use the predefined model maximum length if a maximum length is required by one of the truncation/padding parameters. If the model has no specific maximum input length (like XLNet) truncation/padding to a maximum length will be deactivated. return_tensors (`str` or [`~utils.TensorType`], *optional*): If set, will return tensors instead of list of python integers. Acceptable values are: - `\'tf\'`: Return TensorFlow `tf.constant` objects. - `\'pt\'`: Return PyTorch `torch.Tensor` objects. - `\'np\'`: Return Numpy `np.ndarray` objects. return_attention_mask (`bool`, *optional*): Whether or not to return the attention mask. If not set, will return the attention mask according to the specific tokenizer\'s default, defined by the `return_outputs` attribute. [What are attention masks?](../glossary#attention-mask) Return: `Dict[str, List[List[int]]]`: A dictionary with the following keys: - `input_ids`: List of token ids to be fed to a model. - `attention_mask`: List of indices specifying which tokens should be attended to by the model. ''' @add_start_docstrings(_lowerCamelCase ) class __snake_case : def __call__( self , __UpperCamelCase , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = False , __UpperCamelCase = False , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = None , **__UpperCamelCase , ) -> BatchEncoding: '''simple docstring''' if titles is None and texts is None: return super().__call__( __UpperCamelCase , padding=__UpperCamelCase , truncation=__UpperCamelCase , max_length=__UpperCamelCase , return_tensors=__UpperCamelCase , return_attention_mask=__UpperCamelCase , **__UpperCamelCase , ) elif titles is None or texts is None: snake_case__ : Optional[Any] = titles if texts is None else texts return super().__call__( __UpperCamelCase , __UpperCamelCase , padding=__UpperCamelCase , truncation=__UpperCamelCase , max_length=__UpperCamelCase , return_tensors=__UpperCamelCase , return_attention_mask=__UpperCamelCase , **__UpperCamelCase , ) snake_case__ : int = titles if not isinstance(__UpperCamelCase , __UpperCamelCase ) else [titles] snake_case__ : Optional[int] = texts if not isinstance(__UpperCamelCase , __UpperCamelCase ) else [texts] snake_case__ : List[Any] = len(__UpperCamelCase ) snake_case__ : str = questions if not isinstance(__UpperCamelCase , __UpperCamelCase ) else [questions] * n_passages assert len(__UpperCamelCase ) == len( __UpperCamelCase ), F"""There should be as many titles than texts but got {len(__UpperCamelCase )} titles and {len(__UpperCamelCase )} texts.""" snake_case__ : Optional[int] = super().__call__(__UpperCamelCase , __UpperCamelCase , padding=__UpperCamelCase , truncation=__UpperCamelCase )['input_ids'] snake_case__ : Optional[Any] = super().__call__(__UpperCamelCase , add_special_tokens=__UpperCamelCase , padding=__UpperCamelCase , truncation=__UpperCamelCase )['input_ids'] snake_case__ : Union[str, Any] = { 'input_ids': [ (encoded_question_and_title + encoded_text)[:max_length] if max_length is not None and truncation else encoded_question_and_title + encoded_text for encoded_question_and_title, encoded_text in zip(__UpperCamelCase , __UpperCamelCase ) ] } if return_attention_mask is not False: snake_case__ : List[Any] = [] for input_ids in encoded_inputs["input_ids"]: attention_mask.append([int(input_id != self.pad_token_id ) for input_id in input_ids] ) snake_case__ : Union[str, Any] = attention_mask return self.pad(__UpperCamelCase , padding=__UpperCamelCase , max_length=__UpperCamelCase , return_tensors=__UpperCamelCase ) def __a ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = 16 , __UpperCamelCase = 64 , __UpperCamelCase = 4 , ) -> List[DPRSpanPrediction]: '''simple docstring''' snake_case__ : Optional[Any] = reader_input['input_ids'] snake_case__ , snake_case__ , snake_case__ : Any = reader_output[:3] snake_case__ : List[str] = len(__UpperCamelCase ) snake_case__ : Tuple = sorted(range(__UpperCamelCase ) , reverse=__UpperCamelCase , key=relevance_logits.__getitem__ ) snake_case__ : List[DPRReaderOutput] = [] for doc_id in sorted_docs: snake_case__ : Tuple = list(input_ids[doc_id] ) # assuming question & title information is at the beginning of the sequence snake_case__ : int = sequence_ids.index(self.sep_token_id , 2 ) + 1 # second sep id if sequence_ids[-1] == self.pad_token_id: snake_case__ : Union[str, Any] = sequence_ids.index(self.pad_token_id ) else: snake_case__ : str = len(__UpperCamelCase ) snake_case__ : Dict = self._get_best_spans( start_logits=start_logits[doc_id][passage_offset:sequence_len] , end_logits=end_logits[doc_id][passage_offset:sequence_len] , max_answer_length=__UpperCamelCase , top_spans=__UpperCamelCase , ) for start_index, end_index in best_spans: start_index += passage_offset end_index += passage_offset nbest_spans_predictions.append( DPRSpanPrediction( span_score=start_logits[doc_id][start_index] + end_logits[doc_id][end_index] , relevance_score=relevance_logits[doc_id] , doc_id=__UpperCamelCase , start_index=__UpperCamelCase , end_index=__UpperCamelCase , text=self.decode(sequence_ids[start_index : end_index + 1] ) , ) ) if len(__UpperCamelCase ) >= num_spans: break return nbest_spans_predictions[:num_spans] def __a ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , ) -> List[DPRSpanPrediction]: '''simple docstring''' snake_case__ : Any = [] for start_index, start_score in enumerate(__UpperCamelCase ): for answer_length, end_score in enumerate(end_logits[start_index : start_index + max_answer_length] ): scores.append(((start_index, start_index + answer_length), start_score + end_score) ) snake_case__ : str = sorted(__UpperCamelCase , key=lambda __UpperCamelCase : x[1] , reverse=__UpperCamelCase ) snake_case__ : Any = [] for (start_index, end_index), score in scores: assert start_index <= end_index, F"""Wrong span indices: [{start_index}:{end_index}]""" snake_case__ : str = end_index - start_index + 1 assert length <= max_answer_length, F"""Span is too long: {length} > {max_answer_length}""" if any( start_index <= prev_start_index <= prev_end_index <= end_index or prev_start_index <= start_index <= end_index <= prev_end_index for (prev_start_index, prev_end_index) in chosen_span_intervals ): continue chosen_span_intervals.append((start_index, end_index) ) if len(__UpperCamelCase ) == top_spans: break return chosen_span_intervals @add_end_docstrings(_lowerCamelCase ) class __snake_case ( _lowerCamelCase ,_lowerCamelCase ): __lowerCamelCase = VOCAB_FILES_NAMES __lowerCamelCase = READER_PRETRAINED_VOCAB_FILES_MAP __lowerCamelCase = READER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES __lowerCamelCase = READER_PRETRAINED_INIT_CONFIGURATION __lowerCamelCase = ["""input_ids""", """attention_mask"""] __lowerCamelCase = DPRReaderTokenizer
699
1
import inspect import unittest from transformers import DecisionTransformerConfig, is_torch_available from transformers.testing_utils import require_torch, slow, torch_device from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import DecisionTransformerModel from transformers.models.decision_transformer.modeling_decision_transformer import ( DECISION_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, ) class __snake_case : def __init__( self , __UpperCamelCase , __UpperCamelCase=13 , __UpperCamelCase=7 , __UpperCamelCase=6 , __UpperCamelCase=17 , __UpperCamelCase=23 , __UpperCamelCase=11 , __UpperCamelCase=True , ) -> List[str]: '''simple docstring''' snake_case__ : str = parent snake_case__ : Dict = batch_size snake_case__ : Optional[int] = seq_length snake_case__ : Optional[Any] = act_dim snake_case__ : List[Any] = state_dim snake_case__ : Optional[int] = hidden_size snake_case__ : Dict = max_length snake_case__ : List[str] = is_training def __a ( self ) -> List[Any]: '''simple docstring''' snake_case__ : str = floats_tensor((self.batch_size, self.seq_length, self.state_dim) ) snake_case__ : Optional[Any] = floats_tensor((self.batch_size, self.seq_length, self.act_dim) ) snake_case__ : Dict = floats_tensor((self.batch_size, self.seq_length, 1) ) snake_case__ : str = floats_tensor((self.batch_size, self.seq_length, 1) ) snake_case__ : str = ids_tensor((self.batch_size, self.seq_length) , vocab_size=1000 ) snake_case__ : Optional[int] = random_attention_mask((self.batch_size, self.seq_length) ) snake_case__ : Tuple = self.get_config() return ( config, states, actions, rewards, returns_to_go, timesteps, attention_mask, ) def __a ( self ) -> List[str]: '''simple docstring''' return DecisionTransformerConfig( batch_size=self.batch_size , seq_length=self.seq_length , act_dim=self.act_dim , state_dim=self.state_dim , hidden_size=self.hidden_size , max_length=self.max_length , ) def __a ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , ) -> Any: '''simple docstring''' snake_case__ : Any = DecisionTransformerModel(config=__UpperCamelCase ) model.to(__UpperCamelCase ) model.eval() snake_case__ : Union[str, Any] = model(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) self.parent.assertEqual(result.state_preds.shape , states.shape ) self.parent.assertEqual(result.action_preds.shape , actions.shape ) self.parent.assertEqual(result.return_preds.shape , returns_to_go.shape ) self.parent.assertEqual( result.last_hidden_state.shape , (self.batch_size, self.seq_length * 3, self.hidden_size) ) # seq length *3 as there are 3 modelities: states, returns and actions def __a ( self ) -> List[str]: '''simple docstring''' snake_case__ : Optional[Any] = self.prepare_config_and_inputs() ( ( snake_case__ ) , ( snake_case__ ) , ( snake_case__ ) , ( snake_case__ ) , ( snake_case__ ) , ( snake_case__ ) , ( snake_case__ ) , ) : List[str] = config_and_inputs snake_case__ : List[Any] = { 'states': states, 'actions': actions, 'rewards': rewards, 'returns_to_go': returns_to_go, 'timesteps': timesteps, 'attention_mask': attention_mask, } return config, inputs_dict @require_torch class __snake_case ( _lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase ,unittest.TestCase ): __lowerCamelCase = (DecisionTransformerModel,) if is_torch_available() else () __lowerCamelCase = () __lowerCamelCase = {"""feature-extraction""": DecisionTransformerModel} if is_torch_available() else {} # Ignoring of a failing test from GenerationTesterMixin, as the model does not use inputs_ids __lowerCamelCase = False # Ignoring of a failing tests from ModelTesterMixin, as the model does not implement these features __lowerCamelCase = False __lowerCamelCase = False __lowerCamelCase = False __lowerCamelCase = False __lowerCamelCase = False __lowerCamelCase = False __lowerCamelCase = False __lowerCamelCase = False __lowerCamelCase = False def __a ( self ) -> List[str]: '''simple docstring''' snake_case__ : Optional[Any] = DecisionTransformerModelTester(self ) snake_case__ : List[Any] = ConfigTester(self , config_class=__UpperCamelCase , hidden_size=37 ) def __a ( self ) -> Optional[int]: '''simple docstring''' self.config_tester.run_common_tests() def __a ( self ) -> Optional[Any]: '''simple docstring''' snake_case__ : List[str] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*__UpperCamelCase ) @slow def __a ( self ) -> str: '''simple docstring''' for model_name in DECISION_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: snake_case__ : Optional[int] = DecisionTransformerModel.from_pretrained(__UpperCamelCase ) self.assertIsNotNone(__UpperCamelCase ) def __a ( self ) -> List[Any]: '''simple docstring''' snake_case__ , snake_case__ : Dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: snake_case__ : List[Any] = model_class(__UpperCamelCase ) snake_case__ : Union[str, Any] = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic snake_case__ : Dict = [*signature.parameters.keys()] snake_case__ : Optional[int] = [ 'states', 'actions', 'rewards', 'returns_to_go', 'timesteps', 'attention_mask', ] self.assertListEqual(arg_names[: len(__UpperCamelCase )] , __UpperCamelCase ) @require_torch class __snake_case ( unittest.TestCase ): @slow def __a ( self ) -> Tuple: '''simple docstring''' snake_case__ : List[str] = 2 # number of steps of autoregressive prediction we will perform snake_case__ : int = 10 # defined by the RL environment, may be normalized snake_case__ : List[Any] = DecisionTransformerModel.from_pretrained('edbeeching/decision-transformer-gym-hopper-expert' ) snake_case__ : Optional[Any] = model.to(__UpperCamelCase ) snake_case__ : Any = model.config torch.manual_seed(0 ) snake_case__ : str = torch.randn(1 , 1 , config.state_dim ).to(device=__UpperCamelCase , dtype=torch.floataa ) # env.reset() snake_case__ : str = torch.tensor( [[0.2_4_2_7_9_3, -0.2_8_6_9_3_0_7_4, 0.8_7_4_2_6_1_3], [0.6_7_8_1_5_2_7_4, -0.0_8_1_0_1_0_8_5, -0.1_2_9_5_2_1_4_7]] , device=__UpperCamelCase ) snake_case__ : Optional[Any] = torch.tensor(__UpperCamelCase , device=__UpperCamelCase , dtype=torch.floataa ).reshape(1 , 1 , 1 ) snake_case__ : int = state snake_case__ : Optional[int] = torch.zeros(1 , 0 , config.act_dim , device=__UpperCamelCase , dtype=torch.floataa ) snake_case__ : Dict = torch.zeros(1 , 0 , device=__UpperCamelCase , dtype=torch.floataa ) snake_case__ : Optional[int] = torch.tensor(0 , device=__UpperCamelCase , dtype=torch.long ).reshape(1 , 1 ) for step in range(__UpperCamelCase ): snake_case__ : Any = torch.cat([actions, torch.zeros(1 , 1 , config.act_dim , device=__UpperCamelCase )] , dim=1 ) snake_case__ : Any = torch.cat([rewards, torch.zeros(1 , 1 , device=__UpperCamelCase )] , dim=1 ) snake_case__ : Any = torch.ones(1 , states.shape[1] ).to(dtype=torch.long , device=states.device ) with torch.no_grad(): snake_case__ , snake_case__ , snake_case__ : List[str] = model( states=__UpperCamelCase , actions=__UpperCamelCase , rewards=__UpperCamelCase , returns_to_go=__UpperCamelCase , timesteps=__UpperCamelCase , attention_mask=__UpperCamelCase , return_dict=__UpperCamelCase , ) self.assertEqual(action_pred.shape , actions.shape ) self.assertTrue(torch.allclose(action_pred[0, -1] , expected_outputs[step] , atol=1E-4 ) ) snake_case__ , snake_case__ , snake_case__ , snake_case__ : List[str] = ( # env.step(action) torch.randn(1 , 1 , config.state_dim ).to(device=__UpperCamelCase , dtype=torch.floataa ), 1.0, False, {}, ) snake_case__ : Tuple = action_pred[0, -1] snake_case__ : List[Any] = torch.cat([states, state] , dim=1 ) snake_case__ : int = returns_to_go[0, -1] - reward snake_case__ : Tuple = torch.cat([returns_to_go, pred_return.reshape(1 , 1 , 1 )] , dim=1 ) snake_case__ : List[str] = torch.cat( [timesteps, torch.ones((1, 1) , device=__UpperCamelCase , dtype=torch.long ) * (step + 1)] , dim=1 )
699
import gc import random import unittest import numpy as np import torch from PIL import Image from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer from diffusers import ( AutoencoderKL, DDIMScheduler, EulerAncestralDiscreteScheduler, LMSDiscreteScheduler, PNDMScheduler, StableDiffusionInstructPixaPixPipeline, UNetaDConditionModel, ) from diffusers.image_processor import VaeImageProcessor from diffusers.utils import floats_tensor, load_image, slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu from ..pipeline_params import ( IMAGE_TO_IMAGE_IMAGE_PARAMS, TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS, ) from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin enable_full_determinism() class __snake_case ( _lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase ,unittest.TestCase ): __lowerCamelCase = StableDiffusionInstructPixaPixPipeline __lowerCamelCase = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {"""height""", """width""", """cross_attention_kwargs"""} __lowerCamelCase = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS __lowerCamelCase = IMAGE_TO_IMAGE_IMAGE_PARAMS __lowerCamelCase = IMAGE_TO_IMAGE_IMAGE_PARAMS def __a ( self ) -> List[Any]: '''simple docstring''' torch.manual_seed(0 ) snake_case__ : Tuple = UNetaDConditionModel( block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=8 , out_channels=4 , down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') , up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D') , cross_attention_dim=32 , ) snake_case__ : Any = PNDMScheduler(skip_prk_steps=__UpperCamelCase ) torch.manual_seed(0 ) snake_case__ : Dict = AutoencoderKL( block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=4 , ) torch.manual_seed(0 ) snake_case__ : int = CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , ) snake_case__ : Tuple = CLIPTextModel(__UpperCamelCase ) snake_case__ : Optional[int] = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' ) snake_case__ : Optional[int] = { 'unet': unet, 'scheduler': scheduler, 'vae': vae, 'text_encoder': text_encoder, 'tokenizer': tokenizer, 'safety_checker': None, 'feature_extractor': None, } return components def __a ( self , __UpperCamelCase , __UpperCamelCase=0 ) -> Dict: '''simple docstring''' snake_case__ : Optional[Any] = floats_tensor((1, 3, 32, 32) , rng=random.Random(__UpperCamelCase ) ).to(__UpperCamelCase ) snake_case__ : int = image.cpu().permute(0 , 2 , 3 , 1 )[0] snake_case__ : Union[str, Any] = Image.fromarray(np.uinta(__UpperCamelCase ) ).convert('RGB' ) if str(__UpperCamelCase ).startswith('mps' ): snake_case__ : str = torch.manual_seed(__UpperCamelCase ) else: snake_case__ : Dict = torch.Generator(device=__UpperCamelCase ).manual_seed(__UpperCamelCase ) snake_case__ : str = { 'prompt': 'A painting of a squirrel eating a burger', 'image': image, 'generator': generator, 'num_inference_steps': 2, 'guidance_scale': 6.0, 'image_guidance_scale': 1, 'output_type': 'numpy', } return inputs def __a ( self ) -> Tuple: '''simple docstring''' snake_case__ : Dict = 'cpu' # ensure determinism for the device-dependent torch.Generator snake_case__ : Optional[int] = self.get_dummy_components() snake_case__ : str = StableDiffusionInstructPixaPixPipeline(**__UpperCamelCase ) snake_case__ : Optional[int] = sd_pipe.to(__UpperCamelCase ) sd_pipe.set_progress_bar_config(disable=__UpperCamelCase ) snake_case__ : Tuple = self.get_dummy_inputs(__UpperCamelCase ) snake_case__ : List[str] = sd_pipe(**__UpperCamelCase ).images snake_case__ : List[str] = image[0, -3:, -3:, -1] assert image.shape == (1, 32, 32, 3) snake_case__ : str = np.array([0.7_5_2_6, 0.3_7_5_0, 0.4_5_4_7, 0.6_1_1_7, 0.5_8_6_6, 0.5_0_1_6, 0.4_3_2_7, 0.5_6_4_2, 0.4_8_1_5] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3 def __a ( self ) -> Any: '''simple docstring''' snake_case__ : Dict = 'cpu' # ensure determinism for the device-dependent torch.Generator snake_case__ : Union[str, Any] = self.get_dummy_components() snake_case__ : Optional[Any] = StableDiffusionInstructPixaPixPipeline(**__UpperCamelCase ) snake_case__ : List[Any] = sd_pipe.to(__UpperCamelCase ) sd_pipe.set_progress_bar_config(disable=__UpperCamelCase ) snake_case__ : Union[str, Any] = self.get_dummy_inputs(__UpperCamelCase ) snake_case__ : List[str] = 'french fries' snake_case__ : Optional[Any] = sd_pipe(**__UpperCamelCase , negative_prompt=__UpperCamelCase ) snake_case__ : Union[str, Any] = output.images snake_case__ : Tuple = image[0, -3:, -3:, -1] assert image.shape == (1, 32, 32, 3) snake_case__ : Any = np.array([0.7_5_1_1, 0.3_6_4_2, 0.4_5_5_3, 0.6_2_3_6, 0.5_7_9_7, 0.5_0_1_3, 0.4_3_4_3, 0.5_6_1_1, 0.4_8_3_1] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3 def __a ( self ) -> int: '''simple docstring''' snake_case__ : Optional[int] = 'cpu' # ensure determinism for the device-dependent torch.Generator snake_case__ : List[str] = self.get_dummy_components() snake_case__ : str = StableDiffusionInstructPixaPixPipeline(**__UpperCamelCase ) snake_case__ : str = sd_pipe.to(__UpperCamelCase ) sd_pipe.set_progress_bar_config(disable=__UpperCamelCase ) snake_case__ : Dict = self.get_dummy_inputs(__UpperCamelCase ) snake_case__ : Any = [inputs['prompt']] * 2 snake_case__ : Optional[int] = np.array(inputs['image'] ).astype(np.floataa ) / 2_5_5.0 snake_case__ : Optional[int] = torch.from_numpy(__UpperCamelCase ).unsqueeze(0 ).to(__UpperCamelCase ) snake_case__ : Any = image / 2 + 0.5 snake_case__ : Optional[Any] = image.permute(0 , 3 , 1 , 2 ) snake_case__ : List[Any] = image.repeat(2 , 1 , 1 , 1 ) snake_case__ : Optional[int] = sd_pipe(**__UpperCamelCase ).images snake_case__ : Union[str, Any] = image[-1, -3:, -3:, -1] assert image.shape == (2, 32, 32, 3) snake_case__ : List[Any] = np.array([0.5_8_1_2, 0.5_7_4_8, 0.5_2_2_2, 0.5_9_0_8, 0.5_6_9_5, 0.7_1_7_4, 0.6_8_0_4, 0.5_5_2_3, 0.5_5_7_9] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3 def __a ( self ) -> Union[str, Any]: '''simple docstring''' snake_case__ : Optional[int] = 'cpu' # ensure determinism for the device-dependent torch.Generator snake_case__ : Optional[int] = self.get_dummy_components() snake_case__ : Tuple = EulerAncestralDiscreteScheduler( beta_start=0.0_0_0_8_5 , beta_end=0.0_1_2 , beta_schedule='scaled_linear' ) snake_case__ : int = StableDiffusionInstructPixaPixPipeline(**__UpperCamelCase ) snake_case__ : List[str] = sd_pipe.to(__UpperCamelCase ) sd_pipe.set_progress_bar_config(disable=__UpperCamelCase ) snake_case__ : str = self.get_dummy_inputs(__UpperCamelCase ) snake_case__ : Any = sd_pipe(**__UpperCamelCase ).images snake_case__ : int = image[0, -3:, -3:, -1] snake_case__ : Tuple = [round(__UpperCamelCase , 4 ) for x in image_slice.flatten().tolist()] print(','.join([str(__UpperCamelCase ) for x in slice] ) ) assert image.shape == (1, 32, 32, 3) snake_case__ : List[Any] = np.array([0.7_4_1_7, 0.3_8_4_2, 0.4_7_3_2, 0.5_7_7_6, 0.5_8_9_1, 0.5_1_3_9, 0.4_0_5_2, 0.5_6_7_3, 0.4_9_8_6] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3 def __a ( self ) -> int: '''simple docstring''' super().test_inference_batch_single_identical(expected_max_diff=3E-3 ) def __a ( self ) -> Any: '''simple docstring''' snake_case__ : Optional[int] = self.get_dummy_components() snake_case__ : int = StableDiffusionInstructPixaPixPipeline(**__UpperCamelCase ) snake_case__ : Union[str, Any] = VaeImageProcessor(do_resize=__UpperCamelCase , do_normalize=__UpperCamelCase ) snake_case__ : Optional[int] = pipe.to(__UpperCamelCase ) pipe.set_progress_bar_config(disable=__UpperCamelCase ) snake_case__ : Optional[Any] = pipe(**self.get_dummy_inputs_by_type(__UpperCamelCase , input_image_type='pt' ) )[0] snake_case__ : Union[str, Any] = components['vae'] snake_case__ : str = self.get_dummy_inputs_by_type(__UpperCamelCase , input_image_type='pt' ) for image_param in self.image_latents_params: if image_param in inputs.keys(): snake_case__ : List[str] = vae.encode(inputs[image_param] ).latent_dist.mode() snake_case__ : Dict = pipe(**__UpperCamelCase )[0] snake_case__ : str = np.abs(out - out_latents_inputs ).max() self.assertLess(__UpperCamelCase , 1E-4 , 'passing latents as image input generate different result from passing image' ) @slow @require_torch_gpu class __snake_case ( unittest.TestCase ): def __a ( self ) -> List[str]: '''simple docstring''' super().tearDown() gc.collect() torch.cuda.empty_cache() def __a ( self , __UpperCamelCase=0 ) -> Dict: '''simple docstring''' snake_case__ : Optional[Any] = torch.manual_seed(__UpperCamelCase ) snake_case__ : List[str] = load_image( 'https://huggingface.co/datasets/diffusers/test-arrays/resolve/main/stable_diffusion_pix2pix/example.jpg' ) snake_case__ : int = { 'prompt': 'turn him into a cyborg', 'image': image, 'generator': generator, 'num_inference_steps': 3, 'guidance_scale': 7.5, 'image_guidance_scale': 1.0, 'output_type': 'numpy', } return inputs def __a ( self ) -> Dict: '''simple docstring''' snake_case__ : Any = StableDiffusionInstructPixaPixPipeline.from_pretrained( 'timbrooks/instruct-pix2pix' , safety_checker=__UpperCamelCase ) pipe.to(__UpperCamelCase ) pipe.set_progress_bar_config(disable=__UpperCamelCase ) pipe.enable_attention_slicing() snake_case__ : Tuple = self.get_inputs() snake_case__ : List[Any] = pipe(**__UpperCamelCase ).images snake_case__ : Union[str, Any] = image[0, -3:, -3:, -1].flatten() assert image.shape == (1, 512, 512, 3) snake_case__ : Dict = np.array([0.5_9_0_2, 0.6_0_1_5, 0.6_0_2_7, 0.5_9_8_3, 0.6_0_9_2, 0.6_0_6_1, 0.5_7_6_5, 0.5_7_8_5, 0.5_5_5_5] ) assert np.abs(expected_slice - image_slice ).max() < 1E-3 def __a ( self ) -> str: '''simple docstring''' snake_case__ : Any = StableDiffusionInstructPixaPixPipeline.from_pretrained( 'timbrooks/instruct-pix2pix' , safety_checker=__UpperCamelCase ) snake_case__ : Tuple = LMSDiscreteScheduler.from_config(pipe.scheduler.config ) pipe.to(__UpperCamelCase ) pipe.set_progress_bar_config(disable=__UpperCamelCase ) pipe.enable_attention_slicing() snake_case__ : Dict = self.get_inputs() snake_case__ : Dict = pipe(**__UpperCamelCase ).images snake_case__ : Union[str, Any] = image[0, -3:, -3:, -1].flatten() assert image.shape == (1, 512, 512, 3) snake_case__ : List[Any] = np.array([0.6_5_7_8, 0.6_8_1_7, 0.6_9_7_2, 0.6_7_6_1, 0.6_8_5_6, 0.6_9_1_6, 0.6_4_2_8, 0.6_5_1_6, 0.6_3_0_1] ) assert np.abs(expected_slice - image_slice ).max() < 1E-3 def __a ( self ) -> Optional[Any]: '''simple docstring''' snake_case__ : Tuple = StableDiffusionInstructPixaPixPipeline.from_pretrained( 'timbrooks/instruct-pix2pix' , safety_checker=__UpperCamelCase ) snake_case__ : Tuple = DDIMScheduler.from_config(pipe.scheduler.config ) pipe.to(__UpperCamelCase ) pipe.set_progress_bar_config(disable=__UpperCamelCase ) pipe.enable_attention_slicing() snake_case__ : Optional[int] = self.get_inputs() snake_case__ : Optional[int] = pipe(**__UpperCamelCase ).images snake_case__ : Tuple = image[0, -3:, -3:, -1].flatten() assert image.shape == (1, 512, 512, 3) snake_case__ : int = np.array([0.3_8_2_8, 0.3_8_3_4, 0.3_8_1_8, 0.3_7_9_2, 0.3_8_6_5, 0.3_7_5_2, 0.3_7_9_2, 0.3_8_4_7, 0.3_7_5_3] ) assert np.abs(expected_slice - image_slice ).max() < 1E-3 def __a ( self ) -> Union[str, Any]: '''simple docstring''' snake_case__ : int = 0 def callback_fn(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) -> None: snake_case__ : List[Any] = True nonlocal number_of_steps number_of_steps += 1 if step == 1: snake_case__ : Any = latents.detach().cpu().numpy() assert latents.shape == (1, 4, 64, 64) snake_case__ : int = latents[0, -3:, -3:, -1] snake_case__ : List[str] = np.array([-0.2_4_6_3, -0.4_6_4_4, -0.9_7_5_6, 1.5_1_7_6, 1.4_4_1_4, 0.7_8_6_6, 0.9_8_9_7, 0.8_5_2_1, 0.7_9_8_3] ) assert np.abs(latents_slice.flatten() - expected_slice ).max() < 5E-2 elif step == 2: snake_case__ : Dict = latents.detach().cpu().numpy() assert latents.shape == (1, 4, 64, 64) snake_case__ : Dict = latents[0, -3:, -3:, -1] snake_case__ : Optional[Any] = np.array([-0.2_6_4_4, -0.4_6_2_6, -0.9_6_5_3, 1.5_1_7_6, 1.4_5_5_1, 0.7_6_8_6, 0.9_8_0_5, 0.8_4_5_2, 0.8_1_1_5] ) assert np.abs(latents_slice.flatten() - expected_slice ).max() < 5E-2 snake_case__ : str = False snake_case__ : List[Any] = StableDiffusionInstructPixaPixPipeline.from_pretrained( 'timbrooks/instruct-pix2pix' , safety_checker=__UpperCamelCase , torch_dtype=torch.floataa ) snake_case__ : int = pipe.to(__UpperCamelCase ) pipe.set_progress_bar_config(disable=__UpperCamelCase ) pipe.enable_attention_slicing() snake_case__ : int = self.get_inputs() pipe(**__UpperCamelCase , callback=__UpperCamelCase , callback_steps=1 ) assert callback_fn.has_been_called assert number_of_steps == 3 def __a ( self ) -> Any: '''simple docstring''' torch.cuda.empty_cache() torch.cuda.reset_max_memory_allocated() torch.cuda.reset_peak_memory_stats() snake_case__ : str = StableDiffusionInstructPixaPixPipeline.from_pretrained( 'timbrooks/instruct-pix2pix' , safety_checker=__UpperCamelCase , torch_dtype=torch.floataa ) snake_case__ : Dict = pipe.to(__UpperCamelCase ) pipe.set_progress_bar_config(disable=__UpperCamelCase ) pipe.enable_attention_slicing(1 ) pipe.enable_sequential_cpu_offload() snake_case__ : str = self.get_inputs() snake_case__ : Tuple = pipe(**__UpperCamelCase ) snake_case__ : List[Any] = torch.cuda.max_memory_allocated() # make sure that less than 2.2 GB is allocated assert mem_bytes < 2.2 * 10**9 def __a ( self ) -> int: '''simple docstring''' snake_case__ : int = self.get_inputs() # resize to resolution that is divisible by 8 but not 16 or 32 snake_case__ : Tuple = inputs['image'].resize((504, 504) ) snake_case__ : str = 'timbrooks/instruct-pix2pix' snake_case__ : Tuple = StableDiffusionInstructPixaPixPipeline.from_pretrained( __UpperCamelCase , safety_checker=__UpperCamelCase , ) pipe.to(__UpperCamelCase ) pipe.set_progress_bar_config(disable=__UpperCamelCase ) pipe.enable_attention_slicing() snake_case__ : str = pipe(**__UpperCamelCase ) snake_case__ : List[Any] = output.images[0] snake_case__ : List[Any] = image[255:258, 383:386, -1] assert image.shape == (504, 504, 3) snake_case__ : List[str] = np.array([0.2_7_2_6, 0.2_5_2_9, 0.2_6_6_4, 0.2_6_5_5, 0.2_6_4_1, 0.2_6_4_2, 0.2_5_9_1, 0.2_6_4_9, 0.2_5_9_0] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 5E-3
699
1
from __future__ import annotations from collections.abc import MutableSequence class __snake_case : def __init__( self , __UpperCamelCase , __UpperCamelCase ) -> None: '''simple docstring''' if len(__UpperCamelCase ) != degree + 1: raise ValueError( 'The number of coefficients should be equal to the degree + 1.' ) snake_case__ : list[float] = list(__UpperCamelCase ) snake_case__ : Any = degree def __add__( self , __UpperCamelCase ) -> Polynomial: '''simple docstring''' if self.degree > polynomial_a.degree: snake_case__ : Optional[Any] = self.coefficients[:] for i in range(polynomial_a.degree + 1 ): coefficients[i] += polynomial_a.coefficients[i] return Polynomial(self.degree , __UpperCamelCase ) else: snake_case__ : Optional[Any] = polynomial_a.coefficients[:] for i in range(self.degree + 1 ): coefficients[i] += self.coefficients[i] return Polynomial(polynomial_a.degree , __UpperCamelCase ) def __sub__( self , __UpperCamelCase ) -> Polynomial: '''simple docstring''' return self + polynomial_a * Polynomial(0 , [-1] ) def __neg__( self ) -> Polynomial: '''simple docstring''' return Polynomial(self.degree , [-c for c in self.coefficients] ) def __mul__( self , __UpperCamelCase ) -> Polynomial: '''simple docstring''' snake_case__ : list[float] = [0] * (self.degree + polynomial_a.degree + 1) for i in range(self.degree + 1 ): for j in range(polynomial_a.degree + 1 ): coefficients[i + j] += ( self.coefficients[i] * polynomial_a.coefficients[j] ) return Polynomial(self.degree + polynomial_a.degree , __UpperCamelCase ) def __a ( self , __UpperCamelCase ) -> int | float: '''simple docstring''' snake_case__ : int | float = 0 for i in range(self.degree + 1 ): result += self.coefficients[i] * (substitution**i) return result def __str__( self ) -> str: '''simple docstring''' snake_case__ : Any = '' for i in range(self.degree , -1 , -1 ): if self.coefficients[i] == 0: continue elif self.coefficients[i] > 0: if polynomial: polynomial += " + " else: polynomial += " - " if i == 0: polynomial += str(abs(self.coefficients[i] ) ) elif i == 1: polynomial += str(abs(self.coefficients[i] ) ) + "x" else: polynomial += str(abs(self.coefficients[i] ) ) + "x^" + str(__UpperCamelCase ) return polynomial def __repr__( self ) -> str: '''simple docstring''' return self.__str__() def __a ( self ) -> Polynomial: '''simple docstring''' snake_case__ : list[float] = [0] * self.degree for i in range(self.degree ): snake_case__ : Any = self.coefficients[i + 1] * (i + 1) return Polynomial(self.degree - 1 , __UpperCamelCase ) def __a ( self , __UpperCamelCase = 0 ) -> Polynomial: '''simple docstring''' snake_case__ : list[float] = [0] * (self.degree + 2) snake_case__ : Optional[int] = constant for i in range(self.degree + 1 ): snake_case__ : str = self.coefficients[i] / (i + 1) return Polynomial(self.degree + 1 , __UpperCamelCase ) def __eq__( self , __UpperCamelCase ) -> bool: '''simple docstring''' if not isinstance(__UpperCamelCase , __UpperCamelCase ): return False if self.degree != polynomial_a.degree: return False for i in range(self.degree + 1 ): if self.coefficients[i] != polynomial_a.coefficients[i]: return False return True def __ne__( self , __UpperCamelCase ) -> bool: '''simple docstring''' return not self.__eq__(__UpperCamelCase )
699
from .data_collator import ( DataCollatorForLanguageModeling, DataCollatorForPermutationLanguageModeling, DataCollatorForSeqaSeq, DataCollatorForSOP, DataCollatorForTokenClassification, DataCollatorForWholeWordMask, DataCollatorWithPadding, DefaultDataCollator, default_data_collator, ) from .metrics import glue_compute_metrics, xnli_compute_metrics from .processors import ( DataProcessor, InputExample, InputFeatures, SingleSentenceClassificationProcessor, SquadExample, SquadFeatures, SquadVaProcessor, SquadVaProcessor, glue_convert_examples_to_features, glue_output_modes, glue_processors, glue_tasks_num_labels, squad_convert_examples_to_features, xnli_output_modes, xnli_processors, xnli_tasks_num_labels, )
699
1
def UpperCamelCase__ ( A__ ) -> List[Any]: if not head: return True # split the list to two parts snake_case__ , snake_case__ : Dict = head.next, head while fast and fast.next: snake_case__ : Any = fast.next.next snake_case__ : Tuple = slow.next snake_case__ : Dict = slow.next snake_case__ : List[str] = None # Don't forget here! But forget still works! # reverse the second part snake_case__ : Optional[Any] = None while second: snake_case__ : List[Any] = second.next snake_case__ : Optional[int] = node snake_case__ : Tuple = second snake_case__ : Any = nxt # compare two parts # second part has the same or one less node while node: if node.val != head.val: return False snake_case__ : List[Any] = node.next snake_case__ : Dict = head.next return True def UpperCamelCase__ ( A__ ) -> Dict: if not head or not head.next: return True # 1. Get the midpoint (slow) snake_case__ : Union[str, Any] = head while fast and fast.next: snake_case__ , snake_case__ : Optional[int] = fast.next.next, slow.next # 2. Push the second half into the stack snake_case__ : int = [slow.val] while slow.next: snake_case__ : str = slow.next stack.append(slow.val ) # 3. Comparison while stack: if stack.pop() != cur.val: return False snake_case__ : Optional[int] = cur.next return True def UpperCamelCase__ ( A__ ) -> Union[str, Any]: if not head or not head.next: return True snake_case__ : Tuple = {} snake_case__ : Dict = 0 while head: if head.val in d: d[head.val].append(A__ ) else: snake_case__ : int = [pos] snake_case__ : Optional[Any] = head.next pos += 1 snake_case__ : Union[str, Any] = pos - 1 snake_case__ : List[Any] = 0 for v in d.values(): if len(A__ ) % 2 != 0: middle += 1 else: snake_case__ : Any = 0 for i in range(0 , len(A__ ) ): if v[i] + v[len(A__ ) - 1 - step] != checksum: return False step += 1 if middle > 1: return False return True
699
from dataclasses import dataclass, field from typing import Optional from transformers import AutoConfig, AutoImageProcessor, AutoTokenizer, FlaxVisionEncoderDecoderModel, HfArgumentParser @dataclass class __snake_case : __lowerCamelCase = field( metadata={"""help""": """The output directory where the model will be written."""} ,) __lowerCamelCase = field( metadata={ """help""": ( """The encoder model checkpoint for weights initialization.""" """Don't set if you want to train an encoder model from scratch.""" ) } ,) __lowerCamelCase = field( metadata={ """help""": ( """The decoder model checkpoint for weights initialization.""" """Don't set if you want to train a decoder model from scratch.""" ) } ,) __lowerCamelCase = field( default=_lowerCamelCase ,metadata={"""help""": """Pretrained encoder config name or path if not the same as encoder_model_name"""} ) __lowerCamelCase = field( default=_lowerCamelCase ,metadata={"""help""": """Pretrained decoder config name or path if not the same as decoder_model_name"""} ) def UpperCamelCase__ ( ) -> Union[str, Any]: snake_case__ : str = HfArgumentParser((ModelArguments,) ) ((snake_case__) , ) : Dict = parser.parse_args_into_dataclasses() # Load pretrained model and tokenizer # Use explicit specified encoder config if model_args.encoder_config_name: snake_case__ : List[str] = AutoConfig.from_pretrained(model_args.encoder_config_name ) # Use pretrained encoder model's config else: snake_case__ : Optional[int] = AutoConfig.from_pretrained(model_args.encoder_model_name_or_path ) # Use explicit specified decoder config if model_args.decoder_config_name: snake_case__ : Optional[Any] = AutoConfig.from_pretrained(model_args.decoder_config_name ) # Use pretrained decoder model's config else: snake_case__ : List[str] = AutoConfig.from_pretrained(model_args.decoder_model_name_or_path ) # necessary for `from_encoder_decoder_pretrained` when `decoder_config` is passed snake_case__ : Any = True snake_case__ : Dict = True snake_case__ : Tuple = FlaxVisionEncoderDecoderModel.from_encoder_decoder_pretrained( encoder_pretrained_model_name_or_path=model_args.encoder_model_name_or_path , decoder_pretrained_model_name_or_path=model_args.decoder_model_name_or_path , encoder_config=A__ , decoder_config=A__ , ) # GPT2 only has bos/eos tokens but not decoder_start/pad tokens snake_case__ : Optional[Any] = decoder_config.decoder_start_token_id snake_case__ : Tuple = decoder_config.pad_token_id if decoder_start_token_id is None: snake_case__ : Optional[Any] = decoder_config.bos_token_id if pad_token_id is None: snake_case__ : int = decoder_config.eos_token_id # This is necessary to make Flax's generate() work snake_case__ : Union[str, Any] = decoder_config.eos_token_id snake_case__ : Optional[int] = decoder_start_token_id snake_case__ : int = pad_token_id snake_case__ : Tuple = AutoImageProcessor.from_pretrained(model_args.encoder_model_name_or_path ) snake_case__ : int = AutoTokenizer.from_pretrained(model_args.decoder_model_name_or_path ) snake_case__ : Tuple = tokenizer.convert_ids_to_tokens(model.config.pad_token_id ) model.save_pretrained(model_args.output_dir ) image_processor.save_pretrained(model_args.output_dir ) tokenizer.save_pretrained(model_args.output_dir ) if __name__ == "__main__": main()
699
1
import torch from transformers import AutoModel class __snake_case ( torch.nn.Module ): def __init__( self , __UpperCamelCase="sayef/fsner-bert-base-uncased" ) -> List[str]: '''simple docstring''' super(__UpperCamelCase , self ).__init__() snake_case__ : Tuple = AutoModel.from_pretrained(__UpperCamelCase , return_dict=__UpperCamelCase ) snake_case__ : Optional[Any] = torch.nn.CosineSimilarity(3 , 1E-08 ) snake_case__ : Optional[int] = torch.nn.Softmax(dim=1 ) def __a ( self , **__UpperCamelCase ) -> Any: '''simple docstring''' return self.bert(**__UpperCamelCase ).last_hidden_state def __a ( self , __UpperCamelCase ) -> Optional[int]: '''simple docstring''' return token_embeddings.sum(2 , keepdim=__UpperCamelCase ) def __a ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase=1 ) -> int: '''simple docstring''' return self.softmax(T * self.cos(__UpperCamelCase , __UpperCamelCase ) ) def __a ( self , __UpperCamelCase , __UpperCamelCase ) -> Dict: '''simple docstring''' snake_case__ : Any = W_supports['sizes'].tolist() snake_case__ : Union[str, Any] = W_supports['start_token_id'].item() snake_case__ : str = W_supports['end_token_id'].item() del W_supports["sizes"] del W_supports["start_token_id"] del W_supports["end_token_id"] snake_case__ : str = self.BERT(**__UpperCamelCase ) snake_case__ : Tuple = self.BERT(**__UpperCamelCase ) snake_case__ : List[Any] = None snake_case__ : Any = None snake_case__ : str = W_supports['input_ids'] == start_token_id snake_case__ : Dict = W_supports['input_ids'] == end_token_id for i, size in enumerate(__UpperCamelCase ): if i == 0: snake_case__ : Dict = 0 else: snake_case__ : Union[str, Any] = support_sizes[i - 1] snake_case__ : int = S[s : s + size][start_token_masks[s : s + size]] snake_case__ : Optional[int] = S[s : s + size][end_token_masks[s : s + size]] snake_case__ : List[str] = torch.matmul(q[i] , s_start.T ).sum(1 ).softmax(0 ) snake_case__ : Dict = torch.matmul(q[i] , s_end.T ).sum(1 ).softmax(0 ) if p_starts is not None: snake_case__ : str = torch.vstack((p_starts, p_start) ) snake_case__ : Optional[Any] = torch.vstack((p_ends, p_end) ) else: snake_case__ : Union[str, Any] = p_start snake_case__ : List[Any] = p_end return p_starts, p_ends
699
import logging import os from dataclasses import dataclass, field from typing import Dict, Optional import datasets import numpy as np import tensorflow as tf from transformers import ( AutoConfig, AutoTokenizer, EvalPrediction, HfArgumentParser, PreTrainedTokenizer, TFAutoModelForSequenceClassification, TFTrainer, TFTrainingArguments, ) from transformers.utils import logging as hf_logging hf_logging.set_verbosity_info() hf_logging.enable_default_handler() hf_logging.enable_explicit_format() def UpperCamelCase__ ( A__ , A__ , A__ , A__ , A__ , A__ = None , ) -> Optional[int]: snake_case__ : List[str] = {} if train_file is not None: snake_case__ : Tuple = [train_file] if eval_file is not None: snake_case__ : Dict = [eval_file] if test_file is not None: snake_case__ : str = [test_file] snake_case__ : Optional[Any] = datasets.load_dataset('csv' , data_files=A__ ) snake_case__ : Any = list(ds[list(files.keys() )[0]].features.keys() ) snake_case__ : Optional[Any] = features_name.pop(A__ ) snake_case__ : Optional[Any] = list(set(ds[list(files.keys() )[0]][label_name] ) ) snake_case__ : str = {label: i for i, label in enumerate(A__ )} snake_case__ : int = tokenizer.model_input_names snake_case__ : int = {} if len(A__ ) == 1: for k in files.keys(): snake_case__ : str = ds[k].map( lambda A__ : tokenizer.batch_encode_plus( example[features_name[0]] , truncation=A__ , max_length=A__ , padding='max_length' ) , batched=A__ , ) elif len(A__ ) == 2: for k in files.keys(): snake_case__ : Optional[int] = ds[k].map( lambda A__ : tokenizer.batch_encode_plus( (example[features_name[0]], example[features_name[1]]) , truncation=A__ , max_length=A__ , padding='max_length' , ) , batched=A__ , ) def gen_train(): for ex in transformed_ds[datasets.Split.TRAIN]: snake_case__ : int = {k: v for k, v in ex.items() if k in input_names} snake_case__ : Any = labelaid[ex[label_name]] yield (d, label) def gen_val(): for ex in transformed_ds[datasets.Split.VALIDATION]: snake_case__ : int = {k: v for k, v in ex.items() if k in input_names} snake_case__ : Union[str, Any] = labelaid[ex[label_name]] yield (d, label) def gen_test(): for ex in transformed_ds[datasets.Split.TEST]: snake_case__ : Dict = {k: v for k, v in ex.items() if k in input_names} snake_case__ : List[str] = labelaid[ex[label_name]] yield (d, label) snake_case__ : Any = ( tf.data.Dataset.from_generator( A__ , ({k: tf.intaa for k in input_names}, tf.intaa) , ({k: tf.TensorShape([None] ) for k in input_names}, tf.TensorShape([] )) , ) if datasets.Split.TRAIN in transformed_ds else None ) if train_ds is not None: snake_case__ : str = train_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.TRAIN] ) ) ) snake_case__ : Optional[int] = ( tf.data.Dataset.from_generator( A__ , ({k: tf.intaa for k in input_names}, tf.intaa) , ({k: tf.TensorShape([None] ) for k in input_names}, tf.TensorShape([] )) , ) if datasets.Split.VALIDATION in transformed_ds else None ) if val_ds is not None: snake_case__ : Optional[int] = val_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.VALIDATION] ) ) ) snake_case__ : List[str] = ( tf.data.Dataset.from_generator( A__ , ({k: tf.intaa for k in input_names}, tf.intaa) , ({k: tf.TensorShape([None] ) for k in input_names}, tf.TensorShape([] )) , ) if datasets.Split.TEST in transformed_ds else None ) if test_ds is not None: snake_case__ : str = test_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.TEST] ) ) ) return train_ds, val_ds, test_ds, labelaid lowerCAmelCase__ : List[str] = logging.getLogger(__name__) @dataclass class __snake_case : __lowerCamelCase = field(metadata={"""help""": """Which column contains the label"""} ) __lowerCamelCase = field(default=_lowerCamelCase ,metadata={"""help""": """The path of the training file"""} ) __lowerCamelCase = field(default=_lowerCamelCase ,metadata={"""help""": """The path of the development file"""} ) __lowerCamelCase = field(default=_lowerCamelCase ,metadata={"""help""": """The path of the test file"""} ) __lowerCamelCase = field( default=128 ,metadata={ """help""": ( """The maximum total input sequence length after tokenization. Sequences longer """ """than this will be truncated, sequences shorter will be padded.""" ) } ,) __lowerCamelCase = field( default=_lowerCamelCase ,metadata={"""help""": """Overwrite the cached training and evaluation sets"""} ) @dataclass class __snake_case : __lowerCamelCase = field( metadata={"""help""": """Path to pretrained model or model identifier from huggingface.co/models"""} ) __lowerCamelCase = field( default=_lowerCamelCase ,metadata={"""help""": """Pretrained config name or path if not the same as model_name"""} ) __lowerCamelCase = field( default=_lowerCamelCase ,metadata={"""help""": """Pretrained tokenizer name or path if not the same as model_name"""} ) __lowerCamelCase = field(default=_lowerCamelCase ,metadata={"""help""": """Set this flag to use fast tokenization."""} ) # If you want to tweak more attributes on your tokenizer, you should do it in a distinct script, # or just modify its tokenizer_config.json. __lowerCamelCase = field( default=_lowerCamelCase ,metadata={"""help""": """Where do you want to store the pretrained models downloaded from huggingface.co"""} ,) def UpperCamelCase__ ( ) -> Union[str, Any]: # See all possible arguments in src/transformers/training_args.py # or by passing the --help flag to this script. # We now keep distinct sets of args, for a cleaner separation of concerns. snake_case__ : int = HfArgumentParser((ModelArguments, DataTrainingArguments, TFTrainingArguments) ) snake_case__ , snake_case__ , snake_case__ : Dict = parser.parse_args_into_dataclasses() if ( os.path.exists(training_args.output_dir ) and os.listdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir ): raise ValueError( F"""Output directory ({training_args.output_dir}) already exists and is not empty. Use""" ' --overwrite_output_dir to overcome.' ) # Setup logging logging.basicConfig( format='%(asctime)s - %(levelname)s - %(name)s - %(message)s' , datefmt='%m/%d/%Y %H:%M:%S' , level=logging.INFO , ) logger.info( F"""n_replicas: {training_args.n_replicas}, distributed training: {bool(training_args.n_replicas > 1 )}, """ F"""16-bits training: {training_args.fpaa}""" ) logger.info(F"""Training/evaluation parameters {training_args}""" ) # Load pretrained model and tokenizer # # Distributed training: # The .from_pretrained methods guarantee that only one local process can concurrently # download model & vocab. snake_case__ : Dict = AutoTokenizer.from_pretrained( model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , ) snake_case__ , snake_case__ , snake_case__ , snake_case__ : Dict = get_tfds( train_file=data_args.train_file , eval_file=data_args.dev_file , test_file=data_args.test_file , tokenizer=A__ , label_column_id=data_args.label_column_id , max_seq_length=data_args.max_seq_length , ) snake_case__ : Dict = AutoConfig.from_pretrained( model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=len(A__ ) , labelaid=A__ , idalabel={id: label for label, id in labelaid.items()} , finetuning_task='text-classification' , cache_dir=model_args.cache_dir , ) with training_args.strategy.scope(): snake_case__ : Optional[Any] = TFAutoModelForSequenceClassification.from_pretrained( model_args.model_name_or_path , from_pt=bool('.bin' in model_args.model_name_or_path ) , config=A__ , cache_dir=model_args.cache_dir , ) def compute_metrics(A__ ) -> Dict: snake_case__ : Optional[Any] = np.argmax(p.predictions , axis=1 ) return {"acc": (preds == p.label_ids).mean()} # Initialize our Trainer snake_case__ : Any = TFTrainer( model=A__ , args=A__ , train_dataset=A__ , eval_dataset=A__ , compute_metrics=A__ , ) # Training if training_args.do_train: trainer.train() trainer.save_model() tokenizer.save_pretrained(training_args.output_dir ) # Evaluation snake_case__ : Dict = {} if training_args.do_eval: logger.info('*** Evaluate ***' ) snake_case__ : Tuple = trainer.evaluate() snake_case__ : Any = os.path.join(training_args.output_dir , 'eval_results.txt' ) with open(A__ , 'w' ) as writer: logger.info('***** Eval results *****' ) for key, value in result.items(): logger.info(F""" {key} = {value}""" ) writer.write(F"""{key} = {value}\n""" ) results.update(A__ ) return results if __name__ == "__main__": main()
699
1
import logging import numpy as np import pytest from scipy.linalg import eigh logging.basicConfig(level=logging.INFO, format='''%(message)s''') def UpperCamelCase__ ( A__ ) -> np.ndarray: return input_array.reshape((input_array.size, 1) ) def UpperCamelCase__ ( A__ , A__ , A__ ) -> np.ndarray: snake_case__ : Tuple = np.nan for i in range(A__ ): snake_case__ : Optional[Any] = features[:, labels == i] snake_case__ : Tuple = data.mean(1 ) # Centralize the data of class i snake_case__ : Optional[Any] = data - column_reshape(A__ ) if i > 0: # If covariance_sum is not None covariance_sum += np.dot(A__ , centered_data.T ) else: # If covariance_sum is np.nan (i.e. first loop) snake_case__ : Optional[int] = np.dot(A__ , centered_data.T ) return covariance_sum / features.shape[1] def UpperCamelCase__ ( A__ , A__ , A__ ) -> np.ndarray: snake_case__ : Tuple = features.mean(1 ) snake_case__ : Any = np.nan for i in range(A__ ): snake_case__ : Dict = features[:, labels == i] snake_case__ : Optional[Any] = data.shape[1] snake_case__ : List[Any] = data.mean(1 ) if i > 0: # If covariance_sum is not None covariance_sum += device_data * np.dot( column_reshape(A__ ) - column_reshape(A__ ) , (column_reshape(A__ ) - column_reshape(A__ )).T , ) else: # If covariance_sum is np.nan (i.e. first loop) snake_case__ : Any = device_data * np.dot( column_reshape(A__ ) - column_reshape(A__ ) , (column_reshape(A__ ) - column_reshape(A__ )).T , ) return covariance_sum / features.shape[1] def UpperCamelCase__ ( A__ , A__ ) -> np.ndarray: # Check if the features have been loaded if features.any(): snake_case__ : List[str] = features.mean(1 ) # Center the dataset snake_case__ : Optional[Any] = features - np.reshape(A__ , (data_mean.size, 1) ) snake_case__ : List[str] = np.dot(A__ , centered_data.T ) / features.shape[1] snake_case__ , snake_case__ : List[str] = np.linalg.eigh(A__ ) # Take all the columns in the reverse order (-1), and then takes only the first snake_case__ : Tuple = eigenvectors[:, ::-1][:, 0:dimensions] # Project the database on the new space snake_case__ : Optional[int] = np.dot(filtered_eigenvectors.T , A__ ) logging.info('Principal Component Analysis computed' ) return projected_data else: logging.basicConfig(level=logging.ERROR , format='%(message)s' , force=A__ ) logging.error('Dataset empty' ) raise AssertionError def UpperCamelCase__ ( A__ , A__ , A__ , A__ ) -> np.ndarray: assert classes > dimensions # Check if features have been already loaded if features.any: snake_case__ , snake_case__ : List[str] = eigh( covariance_between_classes(A__ , A__ , A__ ) , covariance_within_classes(A__ , A__ , A__ ) , ) snake_case__ : str = eigenvectors[:, ::-1][:, :dimensions] snake_case__ , snake_case__ , snake_case__ : Dict = np.linalg.svd(A__ ) snake_case__ : List[str] = svd_matrix[:, 0:dimensions] snake_case__ : Any = np.dot(filtered_svd_matrix.T , A__ ) logging.info('Linear Discriminant Analysis computed' ) return projected_data else: logging.basicConfig(level=logging.ERROR , format='%(message)s' , force=A__ ) logging.error('Dataset empty' ) raise AssertionError def UpperCamelCase__ ( ) -> None: # Create dummy dataset with 2 classes and 3 features snake_case__ : int = np.array([[1, 2, 3, 4, 5], [2, 3, 4, 5, 6], [3, 4, 5, 6, 7]] ) snake_case__ : Any = np.array([0, 0, 0, 1, 1] ) snake_case__ : List[str] = 2 snake_case__ : Union[str, Any] = 2 # Assert that the function raises an AssertionError if dimensions > classes with pytest.raises(A__ ) as error_info: snake_case__ : Optional[Any] = linear_discriminant_analysis( A__ , A__ , A__ , A__ ) if isinstance(A__ , np.ndarray ): raise AssertionError( 'Did not raise AssertionError for dimensions > classes' ) assert error_info.type is AssertionError def UpperCamelCase__ ( ) -> None: snake_case__ : int = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]] ) snake_case__ : List[Any] = 2 snake_case__ : Any = np.array([[6.9_2_8_2_0_3_2_3, 8.6_6_0_2_5_4_0_4, 1_0.3_9_2_3_0_4_8_5], [3.0, 3.0, 3.0]] ) with pytest.raises(A__ ) as error_info: snake_case__ : int = principal_component_analysis(A__ , A__ ) if not np.allclose(A__ , A__ ): raise AssertionError assert error_info.type is AssertionError if __name__ == "__main__": import doctest doctest.testmod()
699
from typing import List import datasets from datasets.tasks import AudioClassification from ..folder_based_builder import folder_based_builder lowerCAmelCase__ : List[Any] = datasets.utils.logging.get_logger(__name__) class __snake_case ( folder_based_builder.FolderBasedBuilderConfig ): __lowerCamelCase = None __lowerCamelCase = None class __snake_case ( folder_based_builder.FolderBasedBuilder ): __lowerCamelCase = datasets.Audio() __lowerCamelCase = """audio""" __lowerCamelCase = AudioFolderConfig __lowerCamelCase = 42 # definition at the bottom of the script __lowerCamelCase = AudioClassification(audio_column="""audio""" ,label_column="""label""" ) lowerCAmelCase__ : Tuple = [ '''.aiff''', '''.au''', '''.avr''', '''.caf''', '''.flac''', '''.htk''', '''.svx''', '''.mat4''', '''.mat5''', '''.mpc2k''', '''.ogg''', '''.paf''', '''.pvf''', '''.raw''', '''.rf64''', '''.sd2''', '''.sds''', '''.ircam''', '''.voc''', '''.w64''', '''.wav''', '''.nist''', '''.wavex''', '''.wve''', '''.xi''', '''.mp3''', '''.opus''', ] lowerCAmelCase__ : List[Any] = AUDIO_EXTENSIONS
699
1
import gc import random import unittest import numpy as np import torch from diffusers import DDIMScheduler, KandinskyVaaPipeline, KandinskyVaaPriorPipeline, UNetaDConditionModel, VQModel from diffusers.utils import floats_tensor, load_numpy, slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference enable_full_determinism() class __snake_case ( _lowerCamelCase ,unittest.TestCase ): __lowerCamelCase = KandinskyVaaPipeline __lowerCamelCase = [ """image_embeds""", """negative_image_embeds""", ] __lowerCamelCase = ["""image_embeds""", """negative_image_embeds"""] __lowerCamelCase = [ """generator""", """height""", """width""", """latents""", """guidance_scale""", """num_inference_steps""", """return_dict""", """guidance_scale""", """num_images_per_prompt""", """output_type""", """return_dict""", ] __lowerCamelCase = False @property def __a ( self ) -> Optional[Any]: '''simple docstring''' return 32 @property def __a ( self ) -> Any: '''simple docstring''' return 32 @property def __a ( self ) -> Optional[Any]: '''simple docstring''' return self.time_input_dim @property def __a ( self ) -> str: '''simple docstring''' return self.time_input_dim * 4 @property def __a ( self ) -> int: '''simple docstring''' return 100 @property def __a ( self ) -> Tuple: '''simple docstring''' torch.manual_seed(0 ) snake_case__ : Union[str, Any] = { 'in_channels': 4, # Out channels is double in channels because predicts mean and variance 'out_channels': 8, 'addition_embed_type': 'image', 'down_block_types': ('ResnetDownsampleBlock2D', 'SimpleCrossAttnDownBlock2D'), 'up_block_types': ('SimpleCrossAttnUpBlock2D', 'ResnetUpsampleBlock2D'), 'mid_block_type': 'UNetMidBlock2DSimpleCrossAttn', 'block_out_channels': (self.block_out_channels_a, self.block_out_channels_a * 2), 'layers_per_block': 1, 'encoder_hid_dim': self.text_embedder_hidden_size, 'encoder_hid_dim_type': 'image_proj', 'cross_attention_dim': self.cross_attention_dim, 'attention_head_dim': 4, 'resnet_time_scale_shift': 'scale_shift', 'class_embed_type': None, } snake_case__ : Optional[int] = UNetaDConditionModel(**__UpperCamelCase ) return model @property def __a ( self ) -> str: '''simple docstring''' return { "block_out_channels": [32, 64], "down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"], "in_channels": 3, "latent_channels": 4, "layers_per_block": 1, "norm_num_groups": 8, "norm_type": "spatial", "num_vq_embeddings": 12, "out_channels": 3, "up_block_types": [ "AttnUpDecoderBlock2D", "UpDecoderBlock2D", ], "vq_embed_dim": 4, } @property def __a ( self ) -> List[str]: '''simple docstring''' torch.manual_seed(0 ) snake_case__ : Any = VQModel(**self.dummy_movq_kwargs ) return model def __a ( self ) -> str: '''simple docstring''' snake_case__ : Optional[int] = self.dummy_unet snake_case__ : int = self.dummy_movq snake_case__ : Union[str, Any] = DDIMScheduler( num_train_timesteps=1000 , beta_schedule='linear' , beta_start=0.0_0_0_8_5 , beta_end=0.0_1_2 , clip_sample=__UpperCamelCase , set_alpha_to_one=__UpperCamelCase , steps_offset=1 , prediction_type='epsilon' , thresholding=__UpperCamelCase , ) snake_case__ : int = { 'unet': unet, 'scheduler': scheduler, 'movq': movq, } return components def __a ( self , __UpperCamelCase , __UpperCamelCase=0 ) -> Dict: '''simple docstring''' snake_case__ : str = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(__UpperCamelCase ) ).to(__UpperCamelCase ) snake_case__ : List[str] = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(seed + 1 ) ).to( __UpperCamelCase ) if str(__UpperCamelCase ).startswith('mps' ): snake_case__ : Dict = torch.manual_seed(__UpperCamelCase ) else: snake_case__ : Union[str, Any] = torch.Generator(device=__UpperCamelCase ).manual_seed(__UpperCamelCase ) snake_case__ : Any = { 'image_embeds': image_embeds, 'negative_image_embeds': negative_image_embeds, 'generator': generator, 'height': 64, 'width': 64, 'guidance_scale': 4.0, 'num_inference_steps': 2, 'output_type': 'np', } return inputs def __a ( self ) -> Dict: '''simple docstring''' snake_case__ : Optional[int] = 'cpu' snake_case__ : Any = self.get_dummy_components() snake_case__ : Dict = self.pipeline_class(**__UpperCamelCase ) snake_case__ : Any = pipe.to(__UpperCamelCase ) pipe.set_progress_bar_config(disable=__UpperCamelCase ) snake_case__ : Union[str, Any] = pipe(**self.get_dummy_inputs(__UpperCamelCase ) ) snake_case__ : List[str] = output.images snake_case__ : List[Any] = pipe( **self.get_dummy_inputs(__UpperCamelCase ) , return_dict=__UpperCamelCase , )[0] snake_case__ : Tuple = image[0, -3:, -3:, -1] snake_case__ : List[Any] = image_from_tuple[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) snake_case__ : Optional[Any] = np.array( [0.6_2_3_7_9_7_6, 1.0, 0.3_6_4_4_1_3_3_2, 1.0, 0.7_0_6_3_9_6_3_4, 0.2_9_8_7_7_1_8_6, 0.8_5_6_5_2_1_2_5, 0.5_2_1_6_8_4_3, 0.5_4_4_5_4_0_4_6] ) assert ( np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 ), F""" expected_slice {expected_slice}, but got {image_slice.flatten()}""" assert ( np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2 ), F""" expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}""" @slow @require_torch_gpu class __snake_case ( unittest.TestCase ): def __a ( self ) -> Any: '''simple docstring''' super().tearDown() gc.collect() torch.cuda.empty_cache() def __a ( self ) -> List[Any]: '''simple docstring''' snake_case__ : str = load_numpy( 'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main' '/kandinskyv22/kandinskyv22_text2img_cat_fp16.npy' ) snake_case__ : int = KandinskyVaaPriorPipeline.from_pretrained( 'kandinsky-community/kandinsky-2-2-prior' , torch_dtype=torch.floataa ) pipe_prior.to(__UpperCamelCase ) snake_case__ : List[str] = KandinskyVaaPipeline.from_pretrained( 'kandinsky-community/kandinsky-2-2-decoder' , torch_dtype=torch.floataa ) snake_case__ : Optional[Any] = pipeline.to(__UpperCamelCase ) pipeline.set_progress_bar_config(disable=__UpperCamelCase ) snake_case__ : Any = 'red cat, 4k photo' snake_case__ : Tuple = torch.Generator(device='cuda' ).manual_seed(0 ) snake_case__ , snake_case__ : Union[str, Any] = pipe_prior( __UpperCamelCase , generator=__UpperCamelCase , num_inference_steps=5 , negative_prompt='' , ).to_tuple() snake_case__ : List[Any] = torch.Generator(device='cuda' ).manual_seed(0 ) snake_case__ : Dict = pipeline( image_embeds=__UpperCamelCase , negative_image_embeds=__UpperCamelCase , generator=__UpperCamelCase , num_inference_steps=100 , output_type='np' , ) snake_case__ : List[str] = output.images[0] assert image.shape == (512, 512, 3) assert_mean_pixel_difference(__UpperCamelCase , __UpperCamelCase )
699
import random import unittest import torch from diffusers import IFInpaintingPipeline from diffusers.utils import floats_tensor from diffusers.utils.import_utils import is_xformers_available from diffusers.utils.testing_utils import skip_mps, torch_device from ..pipeline_params import ( TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS, TEXT_GUIDED_IMAGE_INPAINTING_PARAMS, ) from ..test_pipelines_common import PipelineTesterMixin from . import IFPipelineTesterMixin @skip_mps class __snake_case ( _lowerCamelCase ,_lowerCamelCase ,unittest.TestCase ): __lowerCamelCase = IFInpaintingPipeline __lowerCamelCase = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS - {"""width""", """height"""} __lowerCamelCase = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS __lowerCamelCase = PipelineTesterMixin.required_optional_params - {"""latents"""} def __a ( self ) -> Optional[Any]: '''simple docstring''' return self._get_dummy_components() def __a ( self , __UpperCamelCase , __UpperCamelCase=0 ) -> str: '''simple docstring''' if str(__UpperCamelCase ).startswith('mps' ): snake_case__ : int = torch.manual_seed(__UpperCamelCase ) else: snake_case__ : Union[str, Any] = torch.Generator(device=__UpperCamelCase ).manual_seed(__UpperCamelCase ) snake_case__ : Optional[int] = floats_tensor((1, 3, 32, 32) , rng=random.Random(__UpperCamelCase ) ).to(__UpperCamelCase ) snake_case__ : Dict = floats_tensor((1, 3, 32, 32) , rng=random.Random(__UpperCamelCase ) ).to(__UpperCamelCase ) snake_case__ : Optional[Any] = { 'prompt': 'A painting of a squirrel eating a burger', 'image': image, 'mask_image': mask_image, 'generator': generator, 'num_inference_steps': 2, 'output_type': 'numpy', } return inputs @unittest.skipIf( torch_device != 'cuda' or not is_xformers_available() , reason='XFormers attention is only available with CUDA and `xformers` installed' , ) def __a ( self ) -> List[Any]: '''simple docstring''' self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1E-3 ) def __a ( self ) -> Optional[int]: '''simple docstring''' self._test_save_load_optional_components() @unittest.skipIf(torch_device != 'cuda' , reason='float16 requires CUDA' ) def __a ( self ) -> List[str]: '''simple docstring''' super().test_save_load_floataa(expected_max_diff=1E-1 ) def __a ( self ) -> List[str]: '''simple docstring''' self._test_attention_slicing_forward_pass(expected_max_diff=1E-2 ) def __a ( self ) -> int: '''simple docstring''' self._test_save_load_local() def __a ( self ) -> List[str]: '''simple docstring''' self._test_inference_batch_single_identical( expected_max_diff=1E-2 , )
699
1
from __future__ import annotations import math def UpperCamelCase__ ( A__ , A__ ) -> float: snake_case__ : Dict = u for i in range(1 , A__ ): snake_case__ : List[str] = temp * (u - i) return temp def UpperCamelCase__ ( ) -> None: snake_case__ : List[str] = int(input('enter the numbers of values: ' ) ) snake_case__ : list[list[float]] = [] for _ in range(A__ ): y.append([] ) for i in range(A__ ): for j in range(A__ ): y[i].append(A__ ) snake_case__ : List[Any] = 0 print('enter the values of parameters in a list: ' ) snake_case__ : str = list(map(A__ , input().split() ) ) print('enter the values of corresponding parameters: ' ) for i in range(A__ ): snake_case__ : Tuple = float(input() ) snake_case__ : Tuple = int(input('enter the value to interpolate: ' ) ) snake_case__ : Union[str, Any] = (value - x[0]) / (x[1] - x[0]) # for calculating forward difference table for i in range(1 , A__ ): for j in range(n - i ): snake_case__ : Any = y[j + 1][i - 1] - y[j][i - 1] snake_case__ : int = y[0][0] for i in range(1 , A__ ): summ += (ucal(A__ , A__ ) * y[0][i]) / math.factorial(A__ ) print(F"""the value at {value} is {summ}""" ) if __name__ == "__main__": main()
699
import unittest from transformers import BertGenerationTokenizer from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_torch, slow from transformers.utils import cached_property from ...test_tokenization_common import TokenizerTesterMixin lowerCAmelCase__ : List[Any] = '''▁''' lowerCAmelCase__ : int = get_tests_dir('''fixtures/test_sentencepiece.model''') @require_sentencepiece class __snake_case ( _lowerCamelCase ,unittest.TestCase ): __lowerCamelCase = BertGenerationTokenizer __lowerCamelCase = False __lowerCamelCase = True def __a ( self ) -> Optional[int]: '''simple docstring''' super().setUp() snake_case__ : str = BertGenerationTokenizer(__UpperCamelCase , keep_accents=__UpperCamelCase ) tokenizer.save_pretrained(self.tmpdirname ) def __a ( self ) -> Optional[int]: '''simple docstring''' snake_case__ : List[str] = '<s>' snake_case__ : Dict = 1 self.assertEqual(self.get_tokenizer()._convert_token_to_id(__UpperCamelCase ) , __UpperCamelCase ) self.assertEqual(self.get_tokenizer()._convert_id_to_token(__UpperCamelCase ) , __UpperCamelCase ) def __a ( self ) -> List[str]: '''simple docstring''' snake_case__ : Tuple = list(self.get_tokenizer().get_vocab().keys() ) self.assertEqual(vocab_keys[0] , '<unk>' ) self.assertEqual(vocab_keys[1] , '<s>' ) self.assertEqual(vocab_keys[-1] , '<pad>' ) self.assertEqual(len(__UpperCamelCase ) , 1002 ) def __a ( self ) -> int: '''simple docstring''' self.assertEqual(self.get_tokenizer().vocab_size , 1000 ) def __a ( self ) -> Tuple: '''simple docstring''' snake_case__ : Optional[Any] = BertGenerationTokenizer(__UpperCamelCase , keep_accents=__UpperCamelCase ) snake_case__ : int = tokenizer.tokenize('This is a test' ) self.assertListEqual(__UpperCamelCase , ['▁This', '▁is', '▁a', '▁t', 'est'] ) self.assertListEqual( tokenizer.convert_tokens_to_ids(__UpperCamelCase ) , [285, 46, 10, 170, 382] , ) snake_case__ : Any = tokenizer.tokenize('I was born in 92000, and this is falsé.' ) self.assertListEqual( __UpperCamelCase , [ SPIECE_UNDERLINE + 'I', SPIECE_UNDERLINE + 'was', SPIECE_UNDERLINE + 'b', 'or', 'n', SPIECE_UNDERLINE + 'in', SPIECE_UNDERLINE + '', '9', '2', '0', '0', '0', ',', SPIECE_UNDERLINE + 'and', SPIECE_UNDERLINE + 'this', SPIECE_UNDERLINE + 'is', SPIECE_UNDERLINE + 'f', 'al', 's', 'é', '.', ] , ) snake_case__ : Optional[Any] = tokenizer.convert_tokens_to_ids(__UpperCamelCase ) self.assertListEqual( __UpperCamelCase , [8, 21, 84, 55, 24, 19, 7, 0, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 0, 4] , ) snake_case__ : int = tokenizer.convert_ids_to_tokens(__UpperCamelCase ) self.assertListEqual( __UpperCamelCase , [ SPIECE_UNDERLINE + 'I', SPIECE_UNDERLINE + 'was', SPIECE_UNDERLINE + 'b', 'or', 'n', SPIECE_UNDERLINE + 'in', SPIECE_UNDERLINE + '', '<unk>', '2', '0', '0', '0', ',', SPIECE_UNDERLINE + 'and', SPIECE_UNDERLINE + 'this', SPIECE_UNDERLINE + 'is', SPIECE_UNDERLINE + 'f', 'al', 's', '<unk>', '.', ] , ) @cached_property def __a ( self ) -> Dict: '''simple docstring''' return BertGenerationTokenizer.from_pretrained('google/bert_for_seq_generation_L-24_bbc_encoder' ) @slow def __a ( self ) -> Any: '''simple docstring''' snake_case__ : int = 'Hello World!' snake_case__ : Union[str, Any] = [18536, 2260, 101] self.assertListEqual(__UpperCamelCase , self.big_tokenizer.encode(__UpperCamelCase ) ) @slow def __a ( self ) -> Optional[int]: '''simple docstring''' snake_case__ : str = ( 'This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) " [ ] ! : - . Also we will' ' add words that should not exsist and be tokenized to <unk>, such as saoneuhaoesuth' ) snake_case__ : List[Any] = [ 871, 419, 358, 946, 991, 2521, 452, 358, 1357, 387, 7751, 3536, 112, 985, 456, 126, 865, 938, 5400, 5734, 458, 1368, 467, 786, 2462, 5246, 1159, 633, 865, 4519, 457, 582, 852, 2557, 427, 916, 508, 405, 34324, 497, 391, 408, 11342, 1244, 385, 100, 938, 985, 456, 574, 362, 12597, 3200, 3129, 1172, ] self.assertListEqual(__UpperCamelCase , self.big_tokenizer.encode(__UpperCamelCase ) ) @require_torch @slow def __a ( self ) -> List[str]: '''simple docstring''' import torch from transformers import BertGenerationConfig, BertGenerationEncoder # Build sequence snake_case__ : Optional[int] = list(self.big_tokenizer.get_vocab().keys() )[:10] snake_case__ : Optional[int] = ' '.join(__UpperCamelCase ) snake_case__ : int = self.big_tokenizer.encode_plus(__UpperCamelCase , return_tensors='pt' , return_token_type_ids=__UpperCamelCase ) snake_case__ : Tuple = self.big_tokenizer.batch_encode_plus( [sequence + ' ' + sequence] , return_tensors='pt' , return_token_type_ids=__UpperCamelCase ) snake_case__ : Dict = BertGenerationConfig() snake_case__ : List[str] = BertGenerationEncoder(__UpperCamelCase ) assert model.get_input_embeddings().weight.shape[0] >= self.big_tokenizer.vocab_size with torch.no_grad(): model(**__UpperCamelCase ) model(**__UpperCamelCase ) @slow def __a ( self ) -> Dict: '''simple docstring''' snake_case__ : Optional[int] = {'input_ids': [[39286, 458, 36335, 2001, 456, 13073, 13266, 455, 113, 7746, 1741, 11157, 391, 13073, 13266, 455, 113, 3967, 35412, 113, 4936, 109, 3870, 2377, 113, 30084, 45720, 458, 134, 17496, 112, 503, 11672, 113, 118, 112, 5665, 13347, 38687, 112, 1496, 31389, 112, 3268, 47264, 134, 962, 112, 16377, 8035, 23130, 430, 12169, 15518, 28592, 458, 146, 41697, 109, 391, 12169, 15518, 16689, 458, 146, 41358, 109, 452, 726, 4034, 111, 763, 35412, 5082, 388, 1903, 111, 9051, 391, 2870, 48918, 1900, 1123, 550, 998, 112, 9586, 15985, 455, 391, 410, 22955, 37636, 114], [448, 17496, 419, 3663, 385, 763, 113, 27533, 2870, 3283, 13043, 1639, 24713, 523, 656, 24013, 18550, 2521, 517, 27014, 21244, 420, 1212, 1465, 391, 927, 4833, 388, 578, 11786, 114, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [484, 2169, 7687, 21932, 18146, 726, 363, 17032, 3391, 114, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501 # fmt: on self.tokenizer_integration_test_util( expected_encoding=__UpperCamelCase , model_name='google/bert_for_seq_generation_L-24_bbc_encoder' , revision='c817d1fd1be2ffa69431227a1fe320544943d4db' , )
699
1
import json import sys def UpperCamelCase__ ( A__ , A__ ) -> Union[str, Any]: with open(A__ , encoding='utf-8' ) as f: snake_case__ : Optional[int] = json.load(A__ ) snake_case__ : Optional[int] = ['<details>', '<summary>Show updated benchmarks!</summary>', ' '] for benchmark_name in sorted(A__ ): snake_case__ : Union[str, Any] = results[benchmark_name] snake_case__ : List[Any] = benchmark_name.split('/' )[-1] output_md.append(F"""### Benchmark: {benchmark_file_name}""" ) snake_case__ : List[str] = '| metric |' snake_case__ : Any = '|--------|' snake_case__ : List[Any] = '| new / old (diff) |' for metric_name in sorted(A__ ): snake_case__ : Union[str, Any] = benchmark_res[metric_name] snake_case__ : int = metric_vals['new'] snake_case__ : Optional[Any] = metric_vals.get('old' , A__ ) snake_case__ : List[Any] = metric_vals.get('diff' , A__ ) snake_case__ : List[str] = F""" {new_val:f}""" if isinstance(A__ , (int, float) ) else 'None' if old_val is not None: val_str += F""" / {old_val:f}""" if isinstance(A__ , (int, float) ) else "None" if dif_val is not None: val_str += F""" ({dif_val:f})""" if isinstance(A__ , (int, float) ) else "None" title += " " + metric_name + " |" lines += "---|" value += val_str + " |" output_md += [title, lines, value, " "] output_md.append('</details>' ) with open(A__ , 'w' , encoding='utf-8' ) as f: f.writelines('\n'.join(A__ ) ) if __name__ == "__main__": lowerCAmelCase__ : Tuple = sys.argv[1] lowerCAmelCase__ : List[str] = sys.argv[2] format_json_to_md(input_json_file, output_md_file)
699
import random import torch from huggingface_hub import HfApi from diffusers import UNetaDModel lowerCAmelCase__ : List[str] = HfApi() lowerCAmelCase__ : str = {} # fmt: off lowerCAmelCase__ : int = torch.tensor([ -0.75_15, -1.68_83, 0.24_20, 0.03_00, 0.63_47, 1.34_33, -1.17_43, -3.74_67, 1.23_42, -2.24_85, 0.46_36, 0.80_76, -0.79_91, 0.39_69, 0.84_98, 0.91_89, -1.88_87, -3.35_22, 0.76_39, 0.20_40, 0.62_71, -2.71_48, -1.63_16, 3.08_39, 0.31_86, 0.27_21, -0.97_59, -1.24_61, 2.62_57, 1.35_57 ]) lowerCAmelCase__ : Dict = torch.tensor([ -2.36_39, -2.53_44, 0.00_54, -0.66_74, 1.59_90, 1.01_58, 0.31_24, -2.14_36, 1.87_95, -2.54_29, -0.15_66, -0.39_73, 1.24_90, 2.64_47, 1.22_83, -0.52_08, -2.81_54, -3.51_19, 2.38_38, 1.20_33, 1.72_01, -2.12_56, -1.45_76, 2.79_48, 2.42_04, -0.97_52, -1.25_46, 0.80_27, 3.27_58, 3.13_65 ]) lowerCAmelCase__ : Dict = torch.tensor([ -0.65_31, -0.68_91, -0.31_72, -0.53_75, -0.91_40, -0.53_67, -0.11_75, -0.78_69, -0.38_08, -0.45_13, -0.20_98, -0.00_83, 0.31_83, 0.51_40, 0.22_47, -0.13_04, -0.13_02, -0.28_02, -0.20_84, -0.20_25, -0.49_67, -0.48_73, -0.08_61, 0.69_25, 0.02_50, 0.12_90, -0.15_43, 0.63_16, 1.04_60, 1.49_43 ]) lowerCAmelCase__ : List[str] = torch.tensor([ 0.09_11, 0.11_07, 0.01_82, 0.04_35, -0.08_05, -0.06_08, 0.03_81, 0.21_72, -0.02_80, 0.13_27, -0.02_99, -0.02_55, -0.00_50, -0.11_70, -0.10_46, 0.03_09, 0.13_67, 0.17_28, -0.05_33, -0.07_48, -0.05_34, 0.16_24, 0.03_84, -0.18_05, -0.07_07, 0.06_42, 0.02_20, -0.01_34, -0.13_33, -0.15_05 ]) lowerCAmelCase__ : Union[str, Any] = torch.tensor([ 0.13_21, 0.13_37, 0.04_40, 0.06_22, -0.05_91, -0.03_70, 0.05_03, 0.21_33, -0.01_77, 0.14_15, -0.01_16, -0.01_12, 0.00_44, -0.09_80, -0.07_89, 0.03_95, 0.15_02, 0.17_85, -0.04_88, -0.05_14, -0.04_04, 0.15_39, 0.04_54, -0.15_59, -0.06_65, 0.06_59, 0.03_83, -0.00_05, -0.12_66, -0.13_86 ]) lowerCAmelCase__ : List[Any] = torch.tensor([ 0.11_54, 0.12_18, 0.03_07, 0.05_26, -0.07_11, -0.05_41, 0.03_66, 0.20_78, -0.02_67, 0.13_17, -0.02_26, -0.01_93, -0.00_14, -0.10_55, -0.09_02, 0.03_30, 0.13_91, 0.17_09, -0.05_62, -0.06_93, -0.05_60, 0.14_82, 0.03_81, -0.16_83, -0.06_81, 0.06_61, 0.03_31, -0.00_46, -0.12_68, -0.14_31 ]) lowerCAmelCase__ : Optional[Any] = torch.tensor([ 0.11_92, 0.12_40, 0.04_14, 0.06_06, -0.05_57, -0.04_12, 0.04_30, 0.20_42, -0.02_00, 0.13_85, -0.01_15, -0.01_32, 0.00_17, -0.09_65, -0.08_02, 0.03_98, 0.14_33, 0.17_47, -0.04_58, -0.05_33, -0.04_07, 0.15_45, 0.04_19, -0.15_74, -0.06_45, 0.06_26, 0.03_41, -0.00_10, -0.11_99, -0.13_90 ]) lowerCAmelCase__ : List[str] = torch.tensor([ 0.10_75, 0.10_74, 0.02_05, 0.04_31, -0.07_74, -0.06_07, 0.02_98, 0.20_42, -0.03_20, 0.12_67, -0.02_81, -0.02_50, -0.00_64, -0.10_91, -0.09_46, 0.02_90, 0.13_28, 0.16_50, -0.05_80, -0.07_38, -0.05_86, 0.14_40, 0.03_37, -0.17_46, -0.07_12, 0.06_05, 0.02_50, -0.00_99, -0.13_16, -0.14_73 ]) lowerCAmelCase__ : List[str] = torch.tensor([ -1.45_72, -2.04_81, -0.04_14, -0.60_05, 1.41_36, 0.58_48, 0.40_28, -2.73_30, 1.22_12, -2.12_28, 0.21_55, 0.40_39, 0.76_62, 2.05_35, 0.74_77, -0.32_43, -2.17_58, -2.76_48, 1.69_47, 0.70_26, 1.23_38, -1.60_78, -0.86_82, 2.28_10, 1.85_74, -0.57_18, -0.55_86, -0.01_86, 2.34_15, 2.12_51]) lowerCAmelCase__ : List[Any] = torch.tensor([ -1.36_90, -1.97_20, -0.40_90, -0.69_66, 1.46_60, 0.99_38, -0.13_85, -2.73_24, 0.77_36, -1.89_17, 0.29_23, 0.42_93, 0.16_93, 1.41_12, 1.18_87, -0.31_81, -2.21_60, -2.63_81, 1.31_70, 0.81_63, 0.92_40, -1.65_44, -0.60_99, 2.52_59, 1.64_30, -0.90_90, -0.93_92, -0.01_26, 2.42_68, 2.32_66 ]) lowerCAmelCase__ : Tuple = torch.tensor([ -1.35_25, -1.96_28, -0.39_56, -0.68_60, 1.46_64, 1.00_14, -0.12_59, -2.72_12, 0.77_72, -1.88_11, 0.29_96, 0.43_88, 0.17_04, 1.40_29, 1.17_01, -0.30_27, -2.20_53, -2.62_87, 1.33_50, 0.81_31, 0.92_74, -1.62_92, -0.60_98, 2.51_31, 1.65_05, -0.89_58, -0.92_98, -0.01_51, 2.42_57, 2.33_55 ]) lowerCAmelCase__ : List[str] = torch.tensor([ -2.05_85, -2.78_97, -0.28_50, -0.89_40, 1.90_52, 0.57_02, 0.63_45, -3.89_59, 1.59_32, -3.23_19, 0.19_74, 0.02_87, 1.75_66, 2.65_43, 0.83_87, -0.53_51, -3.27_36, -4.33_75, 2.90_29, 1.63_90, 1.46_40, -2.17_01, -1.90_13, 2.93_41, 3.49_81, -0.62_55, -1.16_44, -0.15_91, 3.70_97, 3.20_66 ]) lowerCAmelCase__ : Dict = torch.tensor([ -2.31_39, -2.55_94, -0.01_97, -0.67_85, 1.70_01, 1.16_06, 0.30_75, -2.17_40, 1.80_71, -2.56_30, -0.09_26, -0.38_11, 1.21_16, 2.62_46, 1.27_31, -0.53_98, -2.81_53, -3.61_40, 2.38_93, 1.32_62, 1.62_58, -2.18_56, -1.32_67, 2.83_95, 2.37_79, -1.06_23, -1.24_68, 0.89_59, 3.33_67, 3.22_43 ]) lowerCAmelCase__ : Dict = torch.tensor([ -2.06_28, -2.76_67, -0.20_89, -0.82_63, 2.05_39, 0.59_92, 0.64_95, -3.83_36, 1.60_25, -3.28_17, 0.17_21, -0.06_33, 1.75_16, 2.70_39, 0.81_00, -0.59_08, -3.21_13, -4.43_43, 2.92_57, 1.36_32, 1.55_62, -2.14_89, -1.98_94, 3.05_60, 3.33_96, -0.73_28, -1.04_17, 0.03_83, 3.70_93, 3.23_43 ]) lowerCAmelCase__ : Any = torch.tensor([ -1.45_74, -2.05_69, -0.04_73, -0.61_17, 1.40_18, 0.57_69, 0.41_29, -2.73_44, 1.22_41, -2.13_97, 0.20_00, 0.39_37, 0.76_16, 2.04_53, 0.73_24, -0.33_91, -2.17_46, -2.77_44, 1.69_63, 0.69_21, 1.21_87, -1.61_72, -0.88_77, 2.24_39, 1.84_71, -0.58_39, -0.56_05, -0.04_64, 2.32_50, 2.12_19 ]) # fmt: on lowerCAmelCase__ : Any = api.list_models(filter='''diffusers''') for mod in models: if "google" in mod.author or mod.modelId == "CompVis/ldm-celebahq-256": lowerCAmelCase__ : List[str] = '''/home/patrick/google_checkpoints/''' + mod.modelId.split('''/''')[-1] print(F'''Started running {mod.modelId}!!!''') if mod.modelId.startswith('''CompVis'''): lowerCAmelCase__ : int = UNetaDModel.from_pretrained(local_checkpoint, subfolder='''unet''') else: lowerCAmelCase__ : str = UNetaDModel.from_pretrained(local_checkpoint) torch.manual_seed(0) random.seed(0) lowerCAmelCase__ : Any = torch.randn(1, model.config.in_channels, model.config.sample_size, model.config.sample_size) lowerCAmelCase__ : List[str] = torch.tensor([10] * noise.shape[0]) with torch.no_grad(): lowerCAmelCase__ : int = model(noise, time_step).sample assert torch.allclose( logits[0, 0, 0, :30], results['''_'''.join('''_'''.join(mod.modelId.split('''/''')).split('''-'''))], atol=1E-3 ) print(F'''{mod.modelId} has passed successfully!!!''')
699
1
import argparse from typing import List import evaluate import numpy as np import torch from datasets import DatasetDict, load_dataset # New Code # # We'll be using StratifiedKFold for this example from sklearn.model_selection import StratifiedKFold from torch.optim import AdamW from torch.utils.data import DataLoader from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed from accelerate import Accelerator, DistributedType ######################################################################## # This is a fully working simple example to use Accelerate, # specifically showcasing how to perform Cross Validation, # and builds off the `nlp_example.py` script. # # This example trains a Bert base model on GLUE MRPC # in any of the following settings (with the same script): # - single CPU or single GPU # - multi GPUS (using PyTorch distributed mode) # - (multi) TPUs # - fp16 (mixed-precision) or fp32 (normal precision) # # To help focus on the differences in the code, building `DataLoaders` # was refactored into its own function. # New additions from the base script can be found quickly by # looking for the # New Code # tags # # To run it in each of these various modes, follow the instructions # in the readme for examples: # https://github.com/huggingface/accelerate/tree/main/examples # ######################################################################## lowerCAmelCase__ : Tuple = 16 lowerCAmelCase__ : Optional[Any] = 32 def UpperCamelCase__ ( A__ , A__ , A__ , A__ , A__ = 16 ) -> List[str]: snake_case__ : Tuple = AutoTokenizer.from_pretrained('bert-base-cased' ) snake_case__ : int = DatasetDict( { 'train': dataset['train'].select(A__ ), 'validation': dataset['train'].select(A__ ), 'test': dataset['validation'], } ) def tokenize_function(A__ ): # max_length=None => use the model max length (it's actually the default) snake_case__ : Any = tokenizer(examples['sentence1'] , examples['sentence2'] , truncation=A__ , max_length=A__ ) return outputs # Apply the method we just defined to all the examples in all the splits of the dataset # starting with the main process first: with accelerator.main_process_first(): snake_case__ : Optional[Any] = datasets.map( A__ , batched=A__ , remove_columns=['idx', 'sentence1', 'sentence2'] , ) # We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the # transformers library snake_case__ : str = tokenized_datasets.rename_column('label' , 'labels' ) def collate_fn(A__ ): # On TPU it's best to pad everything to the same length or training will be very slow. snake_case__ : Optional[int] = 128 if accelerator.distributed_type == DistributedType.TPU else None # When using mixed precision we want round multiples of 8/16 if accelerator.mixed_precision == "fp8": snake_case__ : str = 16 elif accelerator.mixed_precision != "no": snake_case__ : int = 8 else: snake_case__ : str = None return tokenizer.pad( A__ , padding='longest' , max_length=A__ , pad_to_multiple_of=A__ , return_tensors='pt' , ) # Instantiate dataloaders. snake_case__ : Optional[int] = DataLoader( tokenized_datasets['train'] , shuffle=A__ , collate_fn=A__ , batch_size=A__ ) snake_case__ : str = DataLoader( tokenized_datasets['validation'] , shuffle=A__ , collate_fn=A__ , batch_size=A__ ) snake_case__ : List[str] = DataLoader( tokenized_datasets['test'] , shuffle=A__ , collate_fn=A__ , batch_size=A__ ) return train_dataloader, eval_dataloader, test_dataloader def UpperCamelCase__ ( A__ , A__ ) -> int: # New Code # snake_case__ : Tuple = [] # Download the dataset snake_case__ : Optional[int] = load_dataset('glue' , 'mrpc' ) # Create our splits snake_case__ : Any = StratifiedKFold(n_splits=int(args.num_folds ) ) # Initialize accelerator snake_case__ : Optional[int] = Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision ) # Sample hyper-parameters for learning rate, batch size, seed and a few other HPs snake_case__ : List[str] = config['lr'] snake_case__ : Union[str, Any] = int(config['num_epochs'] ) snake_case__ : Tuple = int(config['seed'] ) snake_case__ : str = int(config['batch_size'] ) snake_case__ : Any = evaluate.load('glue' , 'mrpc' ) # If the batch size is too big we use gradient accumulation snake_case__ : List[Any] = 1 if batch_size > MAX_GPU_BATCH_SIZE and accelerator.distributed_type != DistributedType.TPU: snake_case__ : Union[str, Any] = batch_size // MAX_GPU_BATCH_SIZE snake_case__ : Tuple = MAX_GPU_BATCH_SIZE set_seed(A__ ) # New Code # # Create our folds: snake_case__ : Any = kfold.split(np.zeros(datasets['train'].num_rows ) , datasets['train']['label'] ) snake_case__ : Union[str, Any] = [] # Iterate over them for i, (train_idxs, valid_idxs) in enumerate(A__ ): snake_case__ , snake_case__ , snake_case__ : int = get_fold_dataloaders( A__ , A__ , A__ , A__ , ) # Instantiate the model (we build the model here so that the seed also control new weights initialization) snake_case__ : Optional[Any] = AutoModelForSequenceClassification.from_pretrained('bert-base-cased' , return_dict=A__ ) # We could avoid this line since the accelerator is set with `device_placement=True` (default value). # Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer # creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that). snake_case__ : str = model.to(accelerator.device ) # Instantiate optimizer snake_case__ : Optional[int] = AdamW(params=model.parameters() , lr=A__ ) # Instantiate scheduler snake_case__ : List[Any] = get_linear_schedule_with_warmup( optimizer=A__ , num_warmup_steps=100 , num_training_steps=(len(A__ ) * num_epochs) // gradient_accumulation_steps , ) # Prepare everything # There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the # prepare method. snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ : int = accelerator.prepare( A__ , A__ , A__ , A__ , A__ ) # Now we train the model for epoch in range(A__ ): model.train() for step, batch in enumerate(A__ ): # We could avoid this line since we set the accelerator with `device_placement=True`. batch.to(accelerator.device ) snake_case__ : List[Any] = model(**A__ ) snake_case__ : Any = outputs.loss snake_case__ : Optional[int] = loss / gradient_accumulation_steps accelerator.backward(A__ ) if step % gradient_accumulation_steps == 0: optimizer.step() lr_scheduler.step() optimizer.zero_grad() model.eval() for step, batch in enumerate(A__ ): # We could avoid this line since we set the accelerator with `device_placement=True`. batch.to(accelerator.device ) with torch.no_grad(): snake_case__ : Any = model(**A__ ) snake_case__ : str = outputs.logits.argmax(dim=-1 ) snake_case__ , snake_case__ : List[Any] = accelerator.gather_for_metrics((predictions, batch['labels']) ) metric.add_batch( predictions=A__ , references=A__ , ) snake_case__ : Optional[int] = metric.compute() # Use accelerator.print to print only on the main process. accelerator.print(F"""epoch {epoch}:""" , A__ ) # New Code # # We also run predictions on the test set at the very end snake_case__ : List[str] = [] for step, batch in enumerate(A__ ): # We could avoid this line since we set the accelerator with `device_placement=True`. batch.to(accelerator.device ) with torch.no_grad(): snake_case__ : int = model(**A__ ) snake_case__ : Dict = outputs.logits snake_case__ , snake_case__ : List[Any] = accelerator.gather_for_metrics((predictions, batch['labels']) ) fold_predictions.append(predictions.cpu() ) if i == 0: # We need all of the test predictions test_references.append(references.cpu() ) # Use accelerator.print to print only on the main process. test_predictions.append(torch.cat(A__ , dim=0 ) ) # We now need to release all our memory and get rid of the current model, optimizer, etc accelerator.free_memory() # New Code # # Finally we check the accuracy of our folded results: snake_case__ : str = torch.cat(A__ , dim=0 ) snake_case__ : Tuple = torch.stack(A__ , dim=0 ).sum(dim=0 ).div(int(args.num_folds ) ).argmax(dim=-1 ) snake_case__ : Union[str, Any] = metric.compute(predictions=A__ , references=A__ ) accelerator.print('Average test metrics from all folds:' , A__ ) def UpperCamelCase__ ( ) -> Optional[int]: snake_case__ : Tuple = argparse.ArgumentParser(description='Simple example of training script.' ) parser.add_argument( '--mixed_precision' , type=A__ , default=A__ , choices=['no', 'fp16', 'bf16', 'fp8'] , help='Whether to use mixed precision. Choose' 'between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10.' 'and an Nvidia Ampere GPU.' , ) parser.add_argument('--cpu' , action='store_true' , help='If passed, will train on the CPU.' ) # New Code # parser.add_argument('--num_folds' , type=A__ , default=3 , help='The number of splits to perform across the dataset' ) snake_case__ : Any = parser.parse_args() snake_case__ : Optional[Any] = {'lr': 2e-5, 'num_epochs': 3, 'seed': 42, 'batch_size': 16} training_function(A__ , A__ ) if __name__ == "__main__": main()
699
import warnings from ...utils import logging from .image_processing_perceiver import PerceiverImageProcessor lowerCAmelCase__ : Dict = logging.get_logger(__name__) class __snake_case ( _lowerCamelCase ): def __init__( self , *__UpperCamelCase , **__UpperCamelCase ) -> None: '''simple docstring''' warnings.warn( 'The class PerceiverFeatureExtractor is deprecated and will be removed in version 5 of Transformers.' ' Please use PerceiverImageProcessor instead.' , __UpperCamelCase , ) super().__init__(*__UpperCamelCase , **__UpperCamelCase )
699
1
import dataclasses import re import string from typing import Any, Dict, Iterator, List, Mapping, Optional, Sequence, Tuple import numpy as np from . import residue_constants lowerCAmelCase__ : Optional[Any] = Mapping[str, np.ndarray] lowerCAmelCase__ : List[Any] = Mapping[str, Any] # Is a nested dict. lowerCAmelCase__ : Tuple = 0.01 @dataclasses.dataclass(frozen=_lowerCamelCase ) class __snake_case : __lowerCamelCase = 42 # [num_res, num_atom_type, 3] # Amino-acid type for each residue represented as an integer between 0 and # 20, where 20 is 'X'. __lowerCamelCase = 42 # [num_res] # Binary float mask to indicate presence of a particular atom. 1.0 if an atom # is present and 0.0 if not. This should be used for loss masking. __lowerCamelCase = 42 # [num_res, num_atom_type] # Residue index as used in PDB. It is not necessarily continuous or 0-indexed. __lowerCamelCase = 42 # [num_res] # B-factors, or temperature factors, of each residue (in sq. angstroms units), # representing the displacement of the residue from its ground truth mean # value. __lowerCamelCase = 42 # [num_res, num_atom_type] # Chain indices for multi-chain predictions __lowerCamelCase = None # Optional remark about the protein. Included as a comment in output PDB # files __lowerCamelCase = None # Templates used to generate this protein (prediction-only) __lowerCamelCase = None # Chain corresponding to each parent __lowerCamelCase = None def UpperCamelCase__ ( A__ ) -> Protein: snake_case__ : Dict = r'(\[[A-Z]+\]\n)' snake_case__ : List[str] = [tag.strip() for tag in re.split(A__ , A__ ) if len(A__ ) > 0] snake_case__ : Iterator[Tuple[str, List[str]]] = zip(tags[0::2] , [l.split('\n' ) for l in tags[1::2]] ) snake_case__ : List[str] = ["N", "CA", "C"] snake_case__ : List[str] = None snake_case__ : Union[str, Any] = None snake_case__ : List[Any] = None for g in groups: if "[PRIMARY]" == g[0]: snake_case__ : List[Any] = g[1][0].strip() for i in range(len(A__ ) ): if seq[i] not in residue_constants.restypes: snake_case__ : str = 'X' # FIXME: strings are immutable snake_case__ : Optional[Any] = np.array( [residue_constants.restype_order.get(A__ , residue_constants.restype_num ) for res_symbol in seq] ) elif "[TERTIARY]" == g[0]: snake_case__ : List[List[float]] = [] for axis in range(3 ): tertiary.append(list(map(A__ , g[1][axis].split() ) ) ) snake_case__ : Union[str, Any] = np.array(A__ ) snake_case__ : int = np.zeros((len(tertiary[0] ) // 3, residue_constants.atom_type_num, 3) ).astype(np.floataa ) for i, atom in enumerate(A__ ): snake_case__ : str = np.transpose(tertiary_np[:, i::3] ) atom_positions *= PICO_TO_ANGSTROM elif "[MASK]" == g[0]: snake_case__ : Dict = np.array(list(map({'-': 0, '+': 1}.get , g[1][0].strip() ) ) ) snake_case__ : int = np.zeros( ( len(A__ ), residue_constants.atom_type_num, ) ).astype(np.floataa ) for i, atom in enumerate(A__ ): snake_case__ : List[str] = 1 atom_mask *= mask[..., None] assert aatype is not None return Protein( atom_positions=A__ , atom_mask=A__ , aatype=A__ , residue_index=np.arange(len(A__ ) ) , b_factors=A__ , ) def UpperCamelCase__ ( A__ , A__ = 0 ) -> List[str]: snake_case__ : List[str] = [] snake_case__ : Tuple = prot.remark if remark is not None: pdb_headers.append(F"""REMARK {remark}""" ) snake_case__ : List[str] = prot.parents snake_case__ : Dict = prot.parents_chain_index if parents is not None and parents_chain_index is not None: snake_case__ : Tuple = [p for i, p in zip(A__ , A__ ) if i == chain_id] if parents is None or len(A__ ) == 0: snake_case__ : int = ['N/A'] pdb_headers.append(F"""PARENT {' '.join(A__ )}""" ) return pdb_headers def UpperCamelCase__ ( A__ , A__ ) -> str: snake_case__ : List[str] = [] snake_case__ : Optional[Any] = pdb_str.split('\n' ) snake_case__ : str = prot.remark if remark is not None: out_pdb_lines.append(F"""REMARK {remark}""" ) snake_case__ : List[List[str]] if prot.parents is not None and len(prot.parents ) > 0: snake_case__ : Optional[int] = [] if prot.parents_chain_index is not None: snake_case__ : Dict[str, List[str]] = {} for p, i in zip(prot.parents , prot.parents_chain_index ): parent_dict.setdefault(str(A__ ) , [] ) parent_dict[str(A__ )].append(A__ ) snake_case__ : Dict = max([int(A__ ) for chain_idx in parent_dict] ) for i in range(max_idx + 1 ): snake_case__ : Dict = parent_dict.get(str(A__ ) , ['N/A'] ) parents_per_chain.append(A__ ) else: parents_per_chain.append(list(prot.parents ) ) else: snake_case__ : List[Any] = [['N/A']] def make_parent_line(A__ ) -> str: return F"""PARENT {' '.join(A__ )}""" out_pdb_lines.append(make_parent_line(parents_per_chain[0] ) ) snake_case__ : str = 0 for i, l in enumerate(A__ ): if "PARENT" not in l and "REMARK" not in l: out_pdb_lines.append(A__ ) if "TER" in l and "END" not in lines[i + 1]: chain_counter += 1 if not chain_counter >= len(A__ ): snake_case__ : int = parents_per_chain[chain_counter] else: snake_case__ : List[Any] = ['N/A'] out_pdb_lines.append(make_parent_line(A__ ) ) return "\n".join(A__ ) def UpperCamelCase__ ( A__ ) -> str: snake_case__ : Optional[int] = residue_constants.restypes + ['X'] def res_atoa(A__ ) -> str: return residue_constants.restype_atoa.get(restypes[r] , 'UNK' ) snake_case__ : Optional[Any] = residue_constants.atom_types snake_case__ : List[str] = [] snake_case__ : str = prot.atom_mask snake_case__ : Optional[Any] = prot.aatype snake_case__ : int = prot.atom_positions snake_case__ : Union[str, Any] = prot.residue_index.astype(np.intaa ) snake_case__ : int = prot.b_factors snake_case__ : Optional[Any] = prot.chain_index if np.any(aatype > residue_constants.restype_num ): raise ValueError('Invalid aatypes.' ) snake_case__ : List[Any] = get_pdb_headers(A__ ) if len(A__ ) > 0: pdb_lines.extend(A__ ) snake_case__ : List[Any] = aatype.shape[0] snake_case__ : str = 1 snake_case__ : Union[str, Any] = 0 snake_case__ : Union[str, Any] = string.ascii_uppercase snake_case__ : Optional[int] = None # Add all atom sites. for i in range(A__ ): snake_case__ : Dict = res_atoa(aatype[i] ) for atom_name, pos, mask, b_factor in zip(A__ , atom_positions[i] , atom_mask[i] , b_factors[i] ): if mask < 0.5: continue snake_case__ : str = 'ATOM' snake_case__ : Any = atom_name if len(A__ ) == 4 else F""" {atom_name}""" snake_case__ : List[str] = '' snake_case__ : Any = '' snake_case__ : List[str] = 1.0_0 snake_case__ : int = atom_name[0] # Protein supports only C, N, O, S, this works. snake_case__ : Optional[Any] = '' snake_case__ : Tuple = 'A' if chain_index is not None: snake_case__ : Dict = chain_tags[chain_index[i]] # PDB is a columnar format, every space matters here! snake_case__ : str = ( F"""{record_type:<6}{atom_index:>5} {name:<4}{alt_loc:>1}""" F"""{res_name_a:>3} {chain_tag:>1}""" F"""{residue_index[i]:>4}{insertion_code:>1} """ F"""{pos[0]:>8.3f}{pos[1]:>8.3f}{pos[2]:>8.3f}""" F"""{occupancy:>6.2f}{b_factor:>6.2f} """ F"""{element:>2}{charge:>2}""" ) pdb_lines.append(A__ ) atom_index += 1 snake_case__ : str = i == n - 1 if chain_index is not None: if i != n - 1 and chain_index[i + 1] != prev_chain_index: snake_case__ : Union[str, Any] = True snake_case__ : Dict = chain_index[i + 1] if should_terminate: # Close the chain. snake_case__ : Union[str, Any] = 'TER' snake_case__ : Optional[int] = ( F"""{chain_end:<6}{atom_index:>5} {res_atoa(aatype[i] ):>3} {chain_tag:>1}{residue_index[i]:>4}""" ) pdb_lines.append(A__ ) atom_index += 1 if i != n - 1: # "prev" is a misnomer here. This happens at the beginning of # each new chain. pdb_lines.extend(get_pdb_headers(A__ , A__ ) ) pdb_lines.append('END' ) pdb_lines.append('' ) return "\n".join(A__ ) def UpperCamelCase__ ( A__ ) -> np.ndarray: return residue_constants.STANDARD_ATOM_MASK[prot.aatype] def UpperCamelCase__ ( A__ , A__ , A__ = None , A__ = None , A__ = None , A__ = None , A__ = None , ) -> Protein: return Protein( aatype=features['aatype'] , atom_positions=result['final_atom_positions'] , atom_mask=result['final_atom_mask'] , residue_index=features['residue_index'] + 1 , b_factors=b_factors if b_factors is not None else np.zeros_like(result['final_atom_mask'] ) , chain_index=A__ , remark=A__ , parents=A__ , parents_chain_index=A__ , )
699
import io import itertools import json from dataclasses import dataclass from typing import Optional import pyarrow as pa import pyarrow.json as paj import datasets from datasets.table import table_cast from datasets.utils.file_utils import readline lowerCAmelCase__ : List[Any] = datasets.utils.logging.get_logger(__name__) @dataclass class __snake_case ( datasets.BuilderConfig ): __lowerCamelCase = None __lowerCamelCase = "utf-8" __lowerCamelCase = None __lowerCamelCase = None __lowerCamelCase = True # deprecated __lowerCamelCase = None # deprecated __lowerCamelCase = 10 << 20 # 10MB __lowerCamelCase = None class __snake_case ( datasets.ArrowBasedBuilder ): __lowerCamelCase = JsonConfig def __a ( self ) -> Optional[Any]: '''simple docstring''' if self.config.block_size is not None: logger.warning('The JSON loader parameter `block_size` is deprecated. Please use `chunksize` instead' ) snake_case__ : str = self.config.block_size if self.config.use_threads is not True: logger.warning( 'The JSON loader parameter `use_threads` is deprecated and doesn\'t have any effect anymore.' ) if self.config.newlines_in_values is not None: raise ValueError('The JSON loader parameter `newlines_in_values` is no longer supported' ) return datasets.DatasetInfo(features=self.config.features ) def __a ( self , __UpperCamelCase ) -> Dict: '''simple docstring''' if not self.config.data_files: raise ValueError(F"""At least one data file must be specified, but got data_files={self.config.data_files}""" ) snake_case__ : int = dl_manager.download_and_extract(self.config.data_files ) if isinstance(__UpperCamelCase , (str, list, tuple) ): snake_case__ : Any = data_files if isinstance(__UpperCamelCase , __UpperCamelCase ): snake_case__ : Optional[Any] = [files] snake_case__ : List[str] = [dl_manager.iter_files(__UpperCamelCase ) for file in files] return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={'files': files} )] snake_case__ : List[Any] = [] for split_name, files in data_files.items(): if isinstance(__UpperCamelCase , __UpperCamelCase ): snake_case__ : List[Any] = [files] snake_case__ : Any = [dl_manager.iter_files(__UpperCamelCase ) for file in files] splits.append(datasets.SplitGenerator(name=__UpperCamelCase , gen_kwargs={'files': files} ) ) return splits def __a ( self , __UpperCamelCase ) -> pa.Table: '''simple docstring''' if self.config.features is not None: # adding missing columns for column_name in set(self.config.features ) - set(pa_table.column_names ): snake_case__ : List[Any] = self.config.features.arrow_schema.field(__UpperCamelCase ).type snake_case__ : List[str] = pa_table.append_column(__UpperCamelCase , pa.array([None] * len(__UpperCamelCase ) , type=__UpperCamelCase ) ) # more expensive cast to support nested structures with keys in a different order # allows str <-> int/float or str to Audio for example snake_case__ : List[str] = table_cast(__UpperCamelCase , self.config.features.arrow_schema ) return pa_table def __a ( self , __UpperCamelCase ) -> int: '''simple docstring''' for file_idx, file in enumerate(itertools.chain.from_iterable(__UpperCamelCase ) ): # If the file is one json object and if we need to look at the list of items in one specific field if self.config.field is not None: with open(__UpperCamelCase , encoding=self.config.encoding , errors=self.config.encoding_errors ) as f: snake_case__ : Union[str, Any] = json.load(__UpperCamelCase ) # We keep only the field we are interested in snake_case__ : Tuple = dataset[self.config.field] # We accept two format: a list of dicts or a dict of lists if isinstance(__UpperCamelCase , (list, tuple) ): snake_case__ : List[Any] = set().union(*[row.keys() for row in dataset] ) snake_case__ : List[Any] = {col: [row.get(__UpperCamelCase ) for row in dataset] for col in keys} else: snake_case__ : List[Any] = dataset snake_case__ : Dict = pa.Table.from_pydict(__UpperCamelCase ) yield file_idx, self._cast_table(__UpperCamelCase ) # If the file has one json object per line else: with open(__UpperCamelCase , 'rb' ) as f: snake_case__ : Optional[int] = 0 # Use block_size equal to the chunk size divided by 32 to leverage multithreading # Set a default minimum value of 16kB if the chunk size is really small snake_case__ : Tuple = max(self.config.chunksize // 32 , 16 << 10 ) snake_case__ : Optional[Any] = ( self.config.encoding_errors if self.config.encoding_errors is not None else 'strict' ) while True: snake_case__ : Optional[int] = f.read(self.config.chunksize ) if not batch: break # Finish current line try: batch += f.readline() except (AttributeError, io.UnsupportedOperation): batch += readline(__UpperCamelCase ) # PyArrow only accepts utf-8 encoded bytes if self.config.encoding != "utf-8": snake_case__ : int = batch.decode(self.config.encoding , errors=__UpperCamelCase ).encode('utf-8' ) try: while True: try: snake_case__ : List[str] = paj.read_json( io.BytesIO(__UpperCamelCase ) , read_options=paj.ReadOptions(block_size=__UpperCamelCase ) ) break except (pa.ArrowInvalid, pa.ArrowNotImplementedError) as e: if ( isinstance(__UpperCamelCase , pa.ArrowInvalid ) and "straddling" not in str(__UpperCamelCase ) or block_size > len(__UpperCamelCase ) ): raise else: # Increase the block size in case it was too small. # The block size will be reset for the next file. logger.debug( F"""Batch of {len(__UpperCamelCase )} bytes couldn't be parsed with block_size={block_size}. Retrying with block_size={block_size * 2}.""" ) block_size *= 2 except pa.ArrowInvalid as e: try: with open( __UpperCamelCase , encoding=self.config.encoding , errors=self.config.encoding_errors ) as f: snake_case__ : Tuple = json.load(__UpperCamelCase ) except json.JSONDecodeError: logger.error(F"""Failed to read file '{file}' with error {type(__UpperCamelCase )}: {e}""" ) raise e # If possible, parse the file as a list of json objects and exit the loop if isinstance(__UpperCamelCase , __UpperCamelCase ): # list is the only sequence type supported in JSON try: snake_case__ : str = set().union(*[row.keys() for row in dataset] ) snake_case__ : Union[str, Any] = {col: [row.get(__UpperCamelCase ) for row in dataset] for col in keys} snake_case__ : List[str] = pa.Table.from_pydict(__UpperCamelCase ) except (pa.ArrowInvalid, AttributeError) as e: logger.error(F"""Failed to read file '{file}' with error {type(__UpperCamelCase )}: {e}""" ) raise ValueError(F"""Not able to read records in the JSON file at {file}.""" ) from None yield file_idx, self._cast_table(__UpperCamelCase ) break else: logger.error(F"""Failed to read file '{file}' with error {type(__UpperCamelCase )}: {e}""" ) raise ValueError( F"""Not able to read records in the JSON file at {file}. """ F"""You should probably indicate the field of the JSON file containing your records. """ F"""This JSON file contain the following fields: {str(list(dataset.keys() ) )}. """ F"""Select the correct one and provide it as `field='XXX'` to the dataset loading method. """ ) from None # Uncomment for debugging (will print the Arrow table size and elements) # logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}") # logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows))) yield (file_idx, batch_idx), self._cast_table(__UpperCamelCase ) batch_idx += 1
699
1
import unittest import numpy as np from transformers import RoFormerConfig, is_flax_available from transformers.testing_utils import require_flax, slow from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask if is_flax_available(): import jax.numpy as jnp from transformers.models.roformer.modeling_flax_roformer import ( FlaxRoFormerForMaskedLM, FlaxRoFormerForMultipleChoice, FlaxRoFormerForQuestionAnswering, FlaxRoFormerForSequenceClassification, FlaxRoFormerForTokenClassification, FlaxRoFormerModel, ) class __snake_case ( unittest.TestCase ): def __init__( self , __UpperCamelCase , __UpperCamelCase=13 , __UpperCamelCase=7 , __UpperCamelCase=True , __UpperCamelCase=True , __UpperCamelCase=True , __UpperCamelCase=True , __UpperCamelCase=99 , __UpperCamelCase=32 , __UpperCamelCase=5 , __UpperCamelCase=4 , __UpperCamelCase=37 , __UpperCamelCase="gelu" , __UpperCamelCase=0.1 , __UpperCamelCase=0.1 , __UpperCamelCase=512 , __UpperCamelCase=16 , __UpperCamelCase=2 , __UpperCamelCase=0.0_2 , __UpperCamelCase=4 , ) -> Optional[Any]: '''simple docstring''' snake_case__ : str = parent snake_case__ : List[str] = batch_size snake_case__ : List[Any] = seq_length snake_case__ : Optional[int] = is_training snake_case__ : Union[str, Any] = use_attention_mask snake_case__ : Dict = use_token_type_ids snake_case__ : str = use_labels snake_case__ : List[Any] = vocab_size snake_case__ : Optional[Any] = hidden_size snake_case__ : Tuple = num_hidden_layers snake_case__ : Union[str, Any] = num_attention_heads snake_case__ : List[Any] = intermediate_size snake_case__ : str = hidden_act snake_case__ : Optional[Any] = hidden_dropout_prob snake_case__ : Dict = attention_probs_dropout_prob snake_case__ : List[str] = max_position_embeddings snake_case__ : int = type_vocab_size snake_case__ : List[str] = type_sequence_label_size snake_case__ : List[str] = initializer_range snake_case__ : Any = num_choices def __a ( self ) -> List[Any]: '''simple docstring''' snake_case__ : Any = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) snake_case__ : List[Any] = None if self.use_attention_mask: snake_case__ : List[Any] = random_attention_mask([self.batch_size, self.seq_length] ) snake_case__ : Union[str, Any] = None if self.use_token_type_ids: snake_case__ : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) snake_case__ : Any = RoFormerConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=__UpperCamelCase , initializer_range=self.initializer_range , ) return config, input_ids, token_type_ids, attention_mask def __a ( self ) -> Any: '''simple docstring''' snake_case__ : List[Any] = self.prepare_config_and_inputs() snake_case__ , snake_case__ , snake_case__ , snake_case__ : str = config_and_inputs snake_case__ : List[str] = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': attention_mask} return config, inputs_dict @require_flax class __snake_case ( _lowerCamelCase ,unittest.TestCase ): __lowerCamelCase = True __lowerCamelCase = ( ( FlaxRoFormerModel, FlaxRoFormerForMaskedLM, FlaxRoFormerForSequenceClassification, FlaxRoFormerForTokenClassification, FlaxRoFormerForMultipleChoice, FlaxRoFormerForQuestionAnswering, ) if is_flax_available() else () ) def __a ( self ) -> Optional[int]: '''simple docstring''' snake_case__ : List[str] = FlaxRoFormerModelTester(self ) @slow def __a ( self ) -> Optional[Any]: '''simple docstring''' for model_class_name in self.all_model_classes: snake_case__ : List[str] = model_class_name.from_pretrained('junnyu/roformer_chinese_small' , from_pt=__UpperCamelCase ) snake_case__ : List[str] = model(np.ones((1, 1) ) ) self.assertIsNotNone(__UpperCamelCase ) @require_flax class __snake_case ( unittest.TestCase ): @slow def __a ( self ) -> Optional[Any]: '''simple docstring''' snake_case__ : Any = FlaxRoFormerForMaskedLM.from_pretrained('junnyu/roformer_chinese_base' ) snake_case__ : Union[str, Any] = jnp.array([[0, 1, 2, 3, 4, 5]] ) snake_case__ : int = model(__UpperCamelCase )[0] snake_case__ : Optional[Any] = 50000 snake_case__ : int = (1, 6, vocab_size) self.assertEqual(output.shape , __UpperCamelCase ) snake_case__ : List[Any] = jnp.array( [[[-0.1_2_0_5, -1.0_2_6_5, 0.2_9_2_2], [-1.5_1_3_4, 0.1_9_7_4, 0.1_5_1_9], [-5.0_1_3_5, -3.9_0_0_3, -0.8_4_0_4]]] ) self.assertTrue(jnp.allclose(output[:, :3, :3] , __UpperCamelCase , atol=1E-4 ) )
699
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_sentencepiece_available, is_tf_available, is_tokenizers_available, is_torch_available, ) lowerCAmelCase__ : Any = {'''configuration_xglm''': ['''XGLM_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''XGLMConfig''']} try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase__ : str = ['''XGLMTokenizer'''] try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase__ : Dict = ['''XGLMTokenizerFast'''] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase__ : Optional[int] = [ '''XGLM_PRETRAINED_MODEL_ARCHIVE_LIST''', '''XGLMForCausalLM''', '''XGLMModel''', '''XGLMPreTrainedModel''', ] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase__ : Dict = [ '''FlaxXGLMForCausalLM''', '''FlaxXGLMModel''', '''FlaxXGLMPreTrainedModel''', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase__ : Dict = [ '''TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST''', '''TFXGLMForCausalLM''', '''TFXGLMModel''', '''TFXGLMPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_xglm import XGLM_PRETRAINED_CONFIG_ARCHIVE_MAP, XGLMConfig try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_xglm import XGLMTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_xglm_fast import XGLMTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_xglm import XGLM_PRETRAINED_MODEL_ARCHIVE_LIST, XGLMForCausalLM, XGLMModel, XGLMPreTrainedModel try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_xglm import FlaxXGLMForCausalLM, FlaxXGLMModel, FlaxXGLMPreTrainedModel try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_xglm import ( TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST, TFXGLMForCausalLM, TFXGLMModel, TFXGLMPreTrainedModel, ) else: import sys lowerCAmelCase__ : Tuple = _LazyModule(__name__, globals()['''__file__'''], _import_structure)
699
1
import unittest from transformers import PegasusTokenizer, PegasusTokenizerFast from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, require_torch, slow from transformers.utils import cached_property from ...test_tokenization_common import TokenizerTesterMixin lowerCAmelCase__ : Any = get_tests_dir('''fixtures/test_sentencepiece_no_bos.model''') @require_sentencepiece @require_tokenizers class __snake_case ( _lowerCamelCase ,unittest.TestCase ): __lowerCamelCase = PegasusTokenizer __lowerCamelCase = PegasusTokenizerFast __lowerCamelCase = True __lowerCamelCase = True def __a ( self ) -> Optional[Any]: '''simple docstring''' super().setUp() # We have a SentencePiece fixture for testing snake_case__ : str = PegasusTokenizer(__UpperCamelCase ) tokenizer.save_pretrained(self.tmpdirname ) @cached_property def __a ( self ) -> Tuple: '''simple docstring''' return PegasusTokenizer.from_pretrained('google/pegasus-large' ) def __a ( self , **__UpperCamelCase ) -> PegasusTokenizer: '''simple docstring''' return PegasusTokenizer.from_pretrained(self.tmpdirname , **__UpperCamelCase ) def __a ( self , __UpperCamelCase ) -> Union[str, Any]: '''simple docstring''' return ("This is a test", "This is a test") def __a ( self ) -> Optional[int]: '''simple docstring''' snake_case__ : List[str] = '</s>' snake_case__ : int = 1 self.assertEqual(self.get_tokenizer()._convert_token_to_id(__UpperCamelCase ) , __UpperCamelCase ) self.assertEqual(self.get_tokenizer()._convert_id_to_token(__UpperCamelCase ) , __UpperCamelCase ) def __a ( self ) -> List[Any]: '''simple docstring''' snake_case__ : int = list(self.get_tokenizer().get_vocab().keys() ) self.assertEqual(vocab_keys[0] , '<pad>' ) self.assertEqual(vocab_keys[1] , '</s>' ) self.assertEqual(vocab_keys[-1] , 'v' ) self.assertEqual(len(__UpperCamelCase ) , 1103 ) def __a ( self ) -> Union[str, Any]: '''simple docstring''' self.assertEqual(self.get_tokenizer().vocab_size , 1103 ) def __a ( self ) -> Any: '''simple docstring''' snake_case__ : Dict = self.rust_tokenizer_class.from_pretrained(self.tmpdirname ) snake_case__ : int = self.tokenizer_class.from_pretrained(self.tmpdirname ) snake_case__ : Any = ( 'Let\'s see which <unk> is the better <unk_token_11> one <mask_1> It seems like this <mask_2> was important' ' </s> <pad> <pad> <pad>' ) snake_case__ : List[str] = rust_tokenizer([raw_input_str] , return_tensors=__UpperCamelCase , add_special_tokens=__UpperCamelCase ).input_ids[0] snake_case__ : List[str] = py_tokenizer([raw_input_str] , return_tensors=__UpperCamelCase , add_special_tokens=__UpperCamelCase ).input_ids[0] self.assertListEqual(__UpperCamelCase , __UpperCamelCase ) def __a ( self ) -> Optional[Any]: '''simple docstring''' snake_case__ : str = self._large_tokenizer # <mask_1> masks whole sentence while <mask_2> masks single word snake_case__ : Tuple = '<mask_1> To ensure a <mask_2> flow of bank resolutions.' snake_case__ : List[Any] = [2, 413, 615, 114, 3, 1971, 113, 1679, 10710, 107, 1] snake_case__ : Any = tokenizer([raw_input_str] , return_tensors=__UpperCamelCase ).input_ids[0] self.assertListEqual(__UpperCamelCase , __UpperCamelCase ) def __a ( self ) -> Any: '''simple docstring''' snake_case__ : Any = self._large_tokenizer # The tracebacks for the following asserts are **better** without messages or self.assertEqual assert tokenizer.vocab_size == 96103 assert tokenizer.pad_token_id == 0 assert tokenizer.eos_token_id == 1 assert tokenizer.offset == 103 assert tokenizer.unk_token_id == tokenizer.offset + 2 == 105 assert tokenizer.unk_token == "<unk>" assert tokenizer.model_max_length == 1024 snake_case__ : Tuple = 'To ensure a smooth flow of bank resolutions.' snake_case__ : List[Any] = [413, 615, 114, 2291, 1971, 113, 1679, 10710, 107, 1] snake_case__ : int = tokenizer([raw_input_str] , return_tensors=__UpperCamelCase ).input_ids[0] self.assertListEqual(__UpperCamelCase , __UpperCamelCase ) assert tokenizer.convert_ids_to_tokens([0, 1, 2, 3] ) == ["<pad>", "</s>", "<mask_1>", "<mask_2>"] @require_torch def __a ( self ) -> Union[str, Any]: '''simple docstring''' snake_case__ : str = ['This is going to be way too long.' * 150, 'short example'] snake_case__ : Dict = ['not super long but more than 5 tokens', 'tiny'] snake_case__ : Optional[Any] = self._large_tokenizer(__UpperCamelCase , padding=__UpperCamelCase , truncation=__UpperCamelCase , return_tensors='pt' ) snake_case__ : Optional[int] = self._large_tokenizer( text_target=__UpperCamelCase , max_length=5 , padding=__UpperCamelCase , truncation=__UpperCamelCase , return_tensors='pt' ) assert batch.input_ids.shape == (2, 1024) assert batch.attention_mask.shape == (2, 1024) assert targets["input_ids"].shape == (2, 5) assert len(__UpperCamelCase ) == 2 # input_ids, attention_mask. @slow def __a ( self ) -> Dict: '''simple docstring''' snake_case__ : int = {'input_ids': [[38979, 143, 18485, 606, 130, 26669, 87686, 121, 54189, 1129, 111, 26669, 87686, 121, 9114, 14787, 121, 13249, 158, 592, 956, 121, 14621, 31576, 143, 62613, 108, 9688, 930, 43430, 11562, 62613, 304, 108, 11443, 897, 108, 9314, 17415, 63399, 108, 11443, 7614, 18316, 118, 4284, 7148, 12430, 143, 1400, 25703, 158, 111, 4284, 7148, 11772, 143, 21297, 1064, 158, 122, 204, 3506, 1754, 1133, 14787, 1581, 115, 33224, 4482, 111, 1355, 110, 29173, 317, 50833, 108, 20147, 94665, 111, 77198, 107, 1], [110, 62613, 117, 638, 112, 1133, 121, 20098, 1355, 79050, 13872, 135, 1596, 53541, 1352, 141, 13039, 5542, 124, 302, 518, 111, 268, 2956, 115, 149, 4427, 107, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [139, 1235, 2799, 18289, 17780, 204, 109, 9474, 1296, 107, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501 # fmt: on self.tokenizer_integration_test_util( expected_encoding=__UpperCamelCase , model_name='google/bigbird-pegasus-large-arxiv' , revision='ba85d0851d708441f91440d509690f1ab6353415' , ) @require_sentencepiece @require_tokenizers class __snake_case ( _lowerCamelCase ,unittest.TestCase ): __lowerCamelCase = PegasusTokenizer __lowerCamelCase = PegasusTokenizerFast __lowerCamelCase = True __lowerCamelCase = True def __a ( self ) -> str: '''simple docstring''' super().setUp() # We have a SentencePiece fixture for testing snake_case__ : Optional[int] = PegasusTokenizer(__UpperCamelCase , offset=0 , mask_token_sent=__UpperCamelCase , mask_token='[MASK]' ) tokenizer.save_pretrained(self.tmpdirname ) @cached_property def __a ( self ) -> Union[str, Any]: '''simple docstring''' return PegasusTokenizer.from_pretrained('google/bigbird-pegasus-large-arxiv' ) def __a ( self , **__UpperCamelCase ) -> PegasusTokenizer: '''simple docstring''' return PegasusTokenizer.from_pretrained(self.tmpdirname , **__UpperCamelCase ) def __a ( self , __UpperCamelCase ) -> List[Any]: '''simple docstring''' return ("This is a test", "This is a test") def __a ( self ) -> str: '''simple docstring''' snake_case__ : Optional[int] = self.rust_tokenizer_class.from_pretrained(self.tmpdirname ) snake_case__ : Optional[int] = self.tokenizer_class.from_pretrained(self.tmpdirname ) snake_case__ : List[Any] = ( 'Let\'s see which <unk> is the better <unk_token> one [MASK] It seems like this [MASK] was important </s>' ' <pad> <pad> <pad>' ) snake_case__ : List[str] = rust_tokenizer([raw_input_str] , return_tensors=__UpperCamelCase , add_special_tokens=__UpperCamelCase ).input_ids[0] snake_case__ : List[str] = py_tokenizer([raw_input_str] , return_tensors=__UpperCamelCase , add_special_tokens=__UpperCamelCase ).input_ids[0] self.assertListEqual(__UpperCamelCase , __UpperCamelCase ) @require_torch def __a ( self ) -> Optional[int]: '''simple docstring''' snake_case__ : List[Any] = ['This is going to be way too long.' * 1000, 'short example'] snake_case__ : Optional[int] = ['not super long but more than 5 tokens', 'tiny'] snake_case__ : Tuple = self._large_tokenizer(__UpperCamelCase , padding=__UpperCamelCase , truncation=__UpperCamelCase , return_tensors='pt' ) snake_case__ : Any = self._large_tokenizer( text_target=__UpperCamelCase , max_length=5 , padding=__UpperCamelCase , truncation=__UpperCamelCase , return_tensors='pt' ) assert batch.input_ids.shape == (2, 4096) assert batch.attention_mask.shape == (2, 4096) assert targets["input_ids"].shape == (2, 5) assert len(__UpperCamelCase ) == 2 # input_ids, attention_mask. def __a ( self ) -> Optional[int]: '''simple docstring''' snake_case__ : Dict = ( 'This is an example string that is used to test the original TF implementation against the HF' ' implementation' ) snake_case__ : List[str] = self._large_tokenizer(__UpperCamelCase ).input_ids self.assertListEqual( __UpperCamelCase , [182, 117, 142, 587, 4211, 120, 117, 263, 112, 804, 109, 856, 25016, 3137, 464, 109, 26955, 3137, 1] , )
699
from __future__ import annotations import random # Maximum size of the population. Bigger could be faster but is more memory expensive. lowerCAmelCase__ : Dict = 2_00 # Number of elements selected in every generation of evolution. The selection takes # place from best to worst of that generation and must be smaller than N_POPULATION. lowerCAmelCase__ : List[str] = 50 # Probability that an element of a generation can mutate, changing one of its genes. # This will guarantee that all genes will be used during evolution. lowerCAmelCase__ : List[str] = 0.4 # Just a seed to improve randomness required by the algorithm. random.seed(random.randint(0, 10_00)) def UpperCamelCase__ ( A__ , A__ ) -> tuple[str, float]: snake_case__ : Tuple = len([g for position, g in enumerate(A__ ) if g == main_target[position]] ) return (item, float(A__ )) def UpperCamelCase__ ( A__ , A__ ) -> tuple[str, str]: snake_case__ : str = random.randint(0 , len(A__ ) - 1 ) snake_case__ : int = parent_a[:random_slice] + parent_a[random_slice:] snake_case__ : Any = parent_a[:random_slice] + parent_a[random_slice:] return (child_a, child_a) def UpperCamelCase__ ( A__ , A__ ) -> str: snake_case__ : List[Any] = list(A__ ) if random.uniform(0 , 1 ) < MUTATION_PROBABILITY: snake_case__ : Optional[Any] = random.choice(A__ ) return "".join(A__ ) def UpperCamelCase__ ( A__ , A__ , A__ , ) -> list[str]: snake_case__ : Tuple = [] # Generate more children proportionally to the fitness score. snake_case__ : Optional[Any] = int(parent_a[1] * 100 ) + 1 snake_case__ : str = 10 if child_n >= 10 else child_n for _ in range(A__ ): snake_case__ : Any = population_score[random.randint(0 , A__ )][0] snake_case__ , snake_case__ : int = crossover(parent_a[0] , A__ ) # Append new string to the population list. pop.append(mutate(A__ , A__ ) ) pop.append(mutate(A__ , A__ ) ) return pop def UpperCamelCase__ ( A__ , A__ , A__ = True ) -> tuple[int, int, str]: # Verify if N_POPULATION is bigger than N_SELECTED if N_POPULATION < N_SELECTED: snake_case__ : Union[str, Any] = F"""{N_POPULATION} must be bigger than {N_SELECTED}""" raise ValueError(A__ ) # Verify that the target contains no genes besides the ones inside genes variable. snake_case__ : Tuple = sorted({c for c in target if c not in genes} ) if not_in_genes_list: snake_case__ : int = F"""{not_in_genes_list} is not in genes list, evolution cannot converge""" raise ValueError(A__ ) # Generate random starting population. snake_case__ : Union[str, Any] = [] for _ in range(A__ ): population.append(''.join([random.choice(A__ ) for i in range(len(A__ ) )] ) ) # Just some logs to know what the algorithms is doing. snake_case__ , snake_case__ : str = 0, 0 # This loop will end when we find a perfect match for our target. while True: generation += 1 total_population += len(A__ ) # Random population created. Now it's time to evaluate. # Adding a bit of concurrency can make everything faster, # # import concurrent.futures # population_score: list[tuple[str, float]] = [] # with concurrent.futures.ThreadPoolExecutor( # max_workers=NUM_WORKERS) as executor: # futures = {executor.submit(evaluate, item) for item in population} # concurrent.futures.wait(futures) # population_score = [item.result() for item in futures] # # but with a simple algorithm like this, it will probably be slower. # We just need to call evaluate for every item inside the population. snake_case__ : List[Any] = [evaluate(A__ , A__ ) for item in population] # Check if there is a matching evolution. snake_case__ : int = sorted(A__ , key=lambda A__ : x[1] , reverse=A__ ) if population_score[0][0] == target: return (generation, total_population, population_score[0][0]) # Print the best result every 10 generation. # Just to know that the algorithm is working. if debug and generation % 10 == 0: print( F"""\nGeneration: {generation}""" F"""\nTotal Population:{total_population}""" F"""\nBest score: {population_score[0][1]}""" F"""\nBest string: {population_score[0][0]}""" ) # Flush the old population, keeping some of the best evolutions. # Keeping this avoid regression of evolution. snake_case__ : Optional[int] = population[: int(N_POPULATION / 3 )] population.clear() population.extend(A__ ) # Normalize population score to be between 0 and 1. snake_case__ : str = [ (item, score / len(A__ )) for item, score in population_score ] # This is selection for i in range(A__ ): population.extend(select(population_score[int(A__ )] , A__ , A__ ) ) # Check if the population has already reached the maximum value and if so, # break the cycle. If this check is disabled, the algorithm will take # forever to compute large strings, but will also calculate small strings in # a far fewer generations. if len(A__ ) > N_POPULATION: break if __name__ == "__main__": lowerCAmelCase__ : str = ( '''This is a genetic algorithm to evaluate, combine, evolve, and mutate a string!''' ) lowerCAmelCase__ : Optional[Any] = list( ''' ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklm''' '''nopqrstuvwxyz.,;!?+-*#@^\'èéòà€ù=)(&%$£/\\''' ) lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__ : List[str] = basic(target_str, genes_list) print( F'''\nGeneration: {generation}\nTotal Population: {population}\nTarget: {target}''' )
699
1
import warnings from ...processing_utils import ProcessorMixin from ...tokenization_utils_base import BatchEncoding class __snake_case ( _lowerCamelCase ): __lowerCamelCase = ["""image_processor""", """tokenizer"""] __lowerCamelCase = """CLIPImageProcessor""" __lowerCamelCase = ("""XLMRobertaTokenizer""", """XLMRobertaTokenizerFast""") def __init__( self , __UpperCamelCase=None , __UpperCamelCase=None , **__UpperCamelCase ) -> Tuple: '''simple docstring''' snake_case__ : List[str] = None if "feature_extractor" in kwargs: warnings.warn( 'The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`' ' instead.' , A__ , ) snake_case__ : Dict = kwargs.pop('feature_extractor' ) snake_case__ : List[str] = image_processor if image_processor is not None else feature_extractor if image_processor is None: raise ValueError('You need to specify an `image_processor`.' ) if tokenizer is None: raise ValueError('You need to specify a `tokenizer`.' ) super().__init__(A__ , A__ ) def __call__( self , __UpperCamelCase=None , __UpperCamelCase=None , __UpperCamelCase=None , **__UpperCamelCase ) -> List[str]: '''simple docstring''' if text is None and images is None: raise ValueError('You have to specify either text or images. Both cannot be none.' ) if text is not None: snake_case__ : Any = self.tokenizer(A__ , return_tensors=A__ , **A__ ) if images is not None: snake_case__ : int = self.image_processor(A__ , return_tensors=A__ , **A__ ) if text is not None and images is not None: snake_case__ : Dict = image_features.pixel_values return encoding elif text is not None: return encoding else: return BatchEncoding(data=dict(**A__ ) , tensor_type=A__ ) def __a ( self , *__UpperCamelCase , **__UpperCamelCase ) -> Optional[int]: '''simple docstring''' return self.tokenizer.batch_decode(*A__ , **A__ ) def __a ( self , *__UpperCamelCase , **__UpperCamelCase ) -> Optional[Any]: '''simple docstring''' return self.tokenizer.decode(*A__ , **A__ ) @property def __a ( self ) -> Dict: '''simple docstring''' snake_case__ : Optional[Any] = self.tokenizer.model_input_names snake_case__ : Optional[Any] = self.image_processor.model_input_names return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
700
from __future__ import annotations from collections.abc import Iterator from typing import Generic, TypeVar lowerCAmelCase__ : Optional[int] = TypeVar('''T''') class __snake_case ( Generic[T] ): def __init__( self , __UpperCamelCase ) -> Any: '''simple docstring''' snake_case__ : Optional[int] = data snake_case__ : Node[T] | None = None def __str__( self ) -> str: '''simple docstring''' return F"""{self.data}""" class __snake_case ( Generic[T] ): def __init__( self ) -> None: '''simple docstring''' snake_case__ : Node[T] | None = None def __iter__( self ) -> Iterator[T]: '''simple docstring''' snake_case__ : str = self.top while node: yield node.data snake_case__ : Dict = node.next def __str__( self ) -> str: '''simple docstring''' return "->".join([str(__UpperCamelCase ) for item in self] ) def __len__( self ) -> int: '''simple docstring''' return len(tuple(iter(self ) ) ) def __a ( self ) -> bool: '''simple docstring''' return self.top is None def __a ( self , __UpperCamelCase ) -> None: '''simple docstring''' snake_case__ : str = Node(__UpperCamelCase ) if not self.is_empty(): snake_case__ : List[str] = self.top snake_case__ : Tuple = node def __a ( self ) -> T: '''simple docstring''' if self.is_empty(): raise IndexError('pop from empty stack' ) assert isinstance(self.top , __UpperCamelCase ) snake_case__ : List[str] = self.top snake_case__ : Union[str, Any] = self.top.next return pop_node.data def __a ( self ) -> T: '''simple docstring''' if self.is_empty(): raise IndexError('peek from empty stack' ) assert self.top is not None return self.top.data def __a ( self ) -> None: '''simple docstring''' snake_case__ : Any = None if __name__ == "__main__": from doctest import testmod testmod()
699
0
import os import sys import tempfile import torch from .state import AcceleratorState from .utils import PrecisionType, PrepareForLaunch, is_mps_available, patch_environment def UpperCamelCase__ ( A__ , A__=() , A__=None , A__="no" , A__="29500" ) -> Dict: snake_case__ : List[Any] = False snake_case__ : List[str] = False if any(key.startswith('KAGGLE' ) for key in os.environ.keys() ): snake_case__ : Tuple = True elif "IPython" in sys.modules: snake_case__ : Dict = 'google.colab' in str(sys.modules['IPython'].get_ipython() ) try: snake_case__ : Any = PrecisionType(mixed_precision.lower() ) except ValueError: raise ValueError( F"""Unknown mixed_precision mode: {args.mixed_precision.lower()}. Choose between {PrecisionType.list()}.""" ) if (in_colab or in_kaggle) and (os.environ.get('TPU_NAME' , __A ) is not None): # TPU launch import torch_xla.distributed.xla_multiprocessing as xmp if len(AcceleratorState._shared_state ) > 0: raise ValueError( 'To train on TPU in Colab or Kaggle Kernel, the `Accelerator` should only be initialized inside ' 'your training function. Restart your notebook and make sure no cells initializes an ' '`Accelerator`.' ) if num_processes is None: snake_case__ : int = 8 snake_case__ : int = PrepareForLaunch(__A , distributed_type='TPU' ) print(F"""Launching a training on {num_processes} TPU cores.""" ) xmp.spawn(__A , args=__A , nprocs=__A , start_method='fork' ) elif in_colab: # No need for a distributed launch otherwise as it's either CPU or one GPU. if torch.cuda.is_available(): print('Launching training on one GPU.' ) else: print('Launching training on one CPU.' ) function(*__A ) else: if num_processes is None: raise ValueError( 'You have to specify the number of GPUs you would like to use, add `num_processes=...` to your call.' ) if num_processes > 1: # Multi-GPU launch from torch.multiprocessing import start_processes from torch.multiprocessing.spawn import ProcessRaisedException if len(AcceleratorState._shared_state ) > 0: raise ValueError( 'To launch a multi-GPU training from your notebook, the `Accelerator` should only be initialized ' 'inside your training function. Restart your notebook and make sure no cells initializes an ' '`Accelerator`.' ) if torch.cuda.is_initialized(): raise ValueError( 'To launch a multi-GPU training from your notebook, you need to avoid running any instruction ' 'using `torch.cuda` in any cell. Restart your notebook and make sure no cells use any CUDA ' 'function.' ) # torch.distributed will expect a few environment variable to be here. We set the ones common to each # process here (the other ones will be set be the launcher). with patch_environment( world_size=__A , master_addr='127.0.01' , master_port=__A , mixed_precision=__A ): snake_case__ : List[str] = PrepareForLaunch(__A , distributed_type='MULTI_GPU' ) print(F"""Launching training on {num_processes} GPUs.""" ) try: start_processes(__A , args=__A , nprocs=__A , start_method='fork' ) except ProcessRaisedException as e: if "Cannot re-initialize CUDA in forked subprocess" in e.args[0]: raise RuntimeError( 'CUDA has been initialized before the `notebook_launcher` could create a forked subprocess. ' 'This likely stems from an outside import causing issues once the `notebook_launcher()` is called. ' 'Please review your imports and test them when running the `notebook_launcher()` to identify ' 'which one is problematic.' ) from e else: # No need for a distributed launch otherwise as it's either CPU, GPU or MPS. if is_mps_available(): snake_case__ : Optional[Any] = '1' print('Launching training on MPS.' ) elif torch.cuda.is_available(): print('Launching training on one GPU.' ) else: print('Launching training on CPU.' ) function(*__A ) def UpperCamelCase__ ( A__ , A__=() , A__=2 ) -> Tuple: from torch.multiprocessing import start_processes with tempfile.NamedTemporaryFile() as tmp_file: # torch.distributed will expect a few environment variable to be here. We set the ones common to each # process here (the other ones will be set be the launcher). with patch_environment( world_size=__A , master_addr='127.0.01' , master_port='29500' , accelerate_mixed_precision='no' , accelerate_debug_rdv_file=tmp_file.name , accelerate_use_cpu='yes' , ): snake_case__ : Union[str, Any] = PrepareForLaunch(__A , debug=__A ) start_processes(__A , args=__A , nprocs=__A , start_method='fork' )
701
from collections import OrderedDict from typing import Mapping from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging lowerCAmelCase__ : Dict = logging.get_logger(__name__) lowerCAmelCase__ : int = { '''sail/poolformer_s12''': '''https://huggingface.co/sail/poolformer_s12/resolve/main/config.json''', # See all PoolFormer models at https://huggingface.co/models?filter=poolformer } class __snake_case ( _lowerCamelCase ): __lowerCamelCase = """poolformer""" def __init__( self , __UpperCamelCase=3 , __UpperCamelCase=16 , __UpperCamelCase=16 , __UpperCamelCase=3 , __UpperCamelCase=4.0 , __UpperCamelCase=[2, 2, 6, 2] , __UpperCamelCase=[64, 128, 320, 512] , __UpperCamelCase=[7, 3, 3, 3] , __UpperCamelCase=[4, 2, 2, 2] , __UpperCamelCase=[2, 1, 1, 1] , __UpperCamelCase=4 , __UpperCamelCase=0.0 , __UpperCamelCase="gelu" , __UpperCamelCase=True , __UpperCamelCase=1E-5 , __UpperCamelCase=0.0_2 , **__UpperCamelCase , ) -> Any: '''simple docstring''' snake_case__ : List[str] = num_channels snake_case__ : Dict = patch_size snake_case__ : Optional[int] = stride snake_case__ : str = padding snake_case__ : List[str] = pool_size snake_case__ : List[Any] = hidden_sizes snake_case__ : List[Any] = mlp_ratio snake_case__ : Union[str, Any] = depths snake_case__ : Dict = patch_sizes snake_case__ : Dict = strides snake_case__ : Dict = num_encoder_blocks snake_case__ : Union[str, Any] = drop_path_rate snake_case__ : List[str] = hidden_act snake_case__ : Optional[Any] = use_layer_scale snake_case__ : int = layer_scale_init_value snake_case__ : Dict = initializer_range super().__init__(**__UpperCamelCase ) class __snake_case ( _lowerCamelCase ): __lowerCamelCase = version.parse("""1.11""" ) @property def __a ( self ) -> Mapping[str, Mapping[int, str]]: '''simple docstring''' return OrderedDict( [ ('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}), ] ) @property def __a ( self ) -> float: '''simple docstring''' return 2E-3
699
0
import importlib import os import sys # This is required to make the module import works (when the python process is running from the root of the repo) sys.path.append('''.''') def UpperCamelCase__ ( A__ ) -> Any: snake_case__ : Union[str, Any] = test_file.split(os.path.sep ) if components[0:2] != ["tests", "models"]: raise ValueError( '`test_file` should start with `tests/models/` (with `/` being the OS specific path separator). Got ' F"""{test_file} instead.""" ) snake_case__ : int = components[-1] if not test_fn.endswith('py' ): raise ValueError(F"""`test_file` should be a python file. Got {test_fn} instead.""" ) if not test_fn.startswith('test_modeling_' ): raise ValueError( F"""`test_file` should point to a file name of the form `test_modeling_*.py`. Got {test_fn} instead.""" ) snake_case__ : List[Any] = components[:-1] + [test_fn.replace('.py' , '' )] snake_case__ : Union[str, Any] = '.'.join(_SCREAMING_SNAKE_CASE ) return test_module_path def UpperCamelCase__ ( A__ ) -> Any: snake_case__ : Any = get_module_path(_SCREAMING_SNAKE_CASE ) snake_case__ : str = importlib.import_module(_SCREAMING_SNAKE_CASE ) return test_module def UpperCamelCase__ ( A__ ) -> List[str]: snake_case__ : List[Any] = [] snake_case__ : Optional[int] = get_test_module(_SCREAMING_SNAKE_CASE ) for attr in dir(_SCREAMING_SNAKE_CASE ): if attr.endswith('ModelTester' ): tester_classes.append(getattr(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ) # sort with class names return sorted(_SCREAMING_SNAKE_CASE , key=lambda A__ : x.__name__ ) def UpperCamelCase__ ( A__ ) -> Optional[Any]: snake_case__ : Any = [] snake_case__ : Optional[int] = get_test_module(_SCREAMING_SNAKE_CASE ) for attr in dir(_SCREAMING_SNAKE_CASE ): snake_case__ : Optional[Any] = getattr(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) # (TF/Flax)ModelTesterMixin is also an attribute in specific model test module. Let's exclude them by checking # `all_model_classes` is not empty (which also excludes other special classes). snake_case__ : Union[str, Any] = getattr(_SCREAMING_SNAKE_CASE , 'all_model_classes' , [] ) if len(_SCREAMING_SNAKE_CASE ) > 0: test_classes.append(_SCREAMING_SNAKE_CASE ) # sort with class names return sorted(_SCREAMING_SNAKE_CASE , key=lambda A__ : x.__name__ ) def UpperCamelCase__ ( A__ ) -> Any: snake_case__ : Optional[Any] = get_test_classes(_SCREAMING_SNAKE_CASE ) snake_case__ : Optional[int] = set() for test_class in test_classes: model_classes.update(test_class.all_model_classes ) # sort with class names return sorted(_SCREAMING_SNAKE_CASE , key=lambda A__ : x.__name__ ) def UpperCamelCase__ ( A__ ) -> Any: snake_case__ : Union[str, Any] = test_class() if hasattr(_SCREAMING_SNAKE_CASE , 'setUp' ): test.setUp() snake_case__ : List[str] = None if hasattr(_SCREAMING_SNAKE_CASE , 'model_tester' ): # `(TF/Flax)ModelTesterMixin` has this attribute default to `None`. Let's skip this case. if test.model_tester is not None: snake_case__ : Any = test.model_tester.__class__ return model_tester def UpperCamelCase__ ( A__ , A__ ) -> Tuple: snake_case__ : Tuple = get_test_classes(_SCREAMING_SNAKE_CASE ) snake_case__ : Optional[int] = [] for test_class in test_classes: if model_class in test_class.all_model_classes: target_test_classes.append(_SCREAMING_SNAKE_CASE ) # sort with class names return sorted(_SCREAMING_SNAKE_CASE , key=lambda A__ : x.__name__ ) def UpperCamelCase__ ( A__ , A__ ) -> int: snake_case__ : Tuple = get_test_classes_for_model(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) snake_case__ : Union[str, Any] = [] for test_class in test_classes: snake_case__ : Union[str, Any] = get_model_tester_from_test_class(_SCREAMING_SNAKE_CASE ) if tester_class is not None: tester_classes.append(_SCREAMING_SNAKE_CASE ) # sort with class names return sorted(_SCREAMING_SNAKE_CASE , key=lambda A__ : x.__name__ ) def UpperCamelCase__ ( A__ ) -> Dict: snake_case__ : Optional[int] = get_test_classes(_SCREAMING_SNAKE_CASE ) snake_case__ : Union[str, Any] = {test_class: get_model_tester_from_test_class(_SCREAMING_SNAKE_CASE ) for test_class in test_classes} return test_tester_mapping def UpperCamelCase__ ( A__ ) -> List[Any]: snake_case__ : List[str] = get_model_classes(_SCREAMING_SNAKE_CASE ) snake_case__ : List[Any] = { model_class: get_test_classes_for_model(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) for model_class in model_classes } return model_test_mapping def UpperCamelCase__ ( A__ ) -> Union[str, Any]: snake_case__ : Any = get_model_classes(_SCREAMING_SNAKE_CASE ) snake_case__ : int = { model_class: get_tester_classes_for_model(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) for model_class in model_classes } return model_to_tester_mapping def UpperCamelCase__ ( A__ ) -> Any: if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): return o elif isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): return o.__name__ elif isinstance(_SCREAMING_SNAKE_CASE , (list, tuple) ): return [to_json(_SCREAMING_SNAKE_CASE ) for x in o] elif isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): return {to_json(_SCREAMING_SNAKE_CASE ): to_json(_SCREAMING_SNAKE_CASE ) for k, v in o.items()} else: return o
702
import numpy as np import qiskit def UpperCamelCase__ ( A__ = 8 , A__ = None ) -> str: snake_case__ : Optional[int] = np.random.default_rng(seed=A__ ) # Roughly 25% of the qubits will contribute to the key. # So we take more than we need. snake_case__ : Tuple = 6 * key_len # Measurement basis for Alice's qubits. snake_case__ : Tuple = rng.integers(2 , size=A__ ) # The set of states Alice will prepare. snake_case__ : List[str] = rng.integers(2 , size=A__ ) # Measurement basis for Bob's qubits. snake_case__ : List[Any] = rng.integers(2 , size=A__ ) # Quantum Circuit to simulate BB84 snake_case__ : Any = qiskit.QuantumCircuit(A__ , name='BB84' ) # Alice prepares her qubits according to rules above. for index, _ in enumerate(A__ ): if alice_state[index] == 1: bbaa_circ.x(A__ ) if alice_basis[index] == 1: bbaa_circ.h(A__ ) bbaa_circ.barrier() # Bob measures the received qubits according to rules above. for index, _ in enumerate(A__ ): if bob_basis[index] == 1: bbaa_circ.h(A__ ) bbaa_circ.barrier() bbaa_circ.measure_all() # Simulate the quantum circuit. snake_case__ : List[str] = qiskit.Aer.get_backend('aer_simulator' ) # We only need to run one shot because the key is unique. # Multiple shots will produce the same key. snake_case__ : Optional[Any] = qiskit.execute(A__ , A__ , shots=1 , seed_simulator=A__ ) # Returns the result of measurement. snake_case__ : Union[str, Any] = job.result().get_counts(A__ ).most_frequent() # Extracting the generated key from the simulation results. # Only keep measurement results where Alice and Bob chose the same basis. snake_case__ : Optional[Any] = ''.join( [ result_bit for alice_basis_bit, bob_basis_bit, result_bit in zip( A__ , A__ , A__ ) if alice_basis_bit == bob_basis_bit ] ) # Get final key. Pad with 0 if too short, otherwise truncate. snake_case__ : Tuple = gen_key[:key_len] if len(A__ ) >= key_len else gen_key.ljust(A__ , '0' ) return key if __name__ == "__main__": print(F'''The generated key is : {bbaa(8, seed=0)}''') from doctest import testmod testmod()
699
0
'''simple docstring''' import os import tempfile import unittest import numpy as np from diffusers.utils import is_flax_available from diffusers.utils.testing_utils import require_flax, slow if is_flax_available(): import jax import jax.numpy as jnp from flax.jax_utils import replicate from flax.training.common_utils import shard from diffusers import FlaxDDIMScheduler, FlaxDiffusionPipeline, FlaxStableDiffusionPipeline @require_flax class __snake_case ( unittest.TestCase ): def __a ( self ) -> int: '''simple docstring''' with tempfile.TemporaryDirectory() as tmpdirname: # pipeline has Flax weights snake_case__ : str = FlaxDiffusionPipeline.from_pretrained( 'hf-internal-testing/tiny-stable-diffusion-pipe' , safety_checker=A_ , cache_dir=A_ ) snake_case__ : Any = [t[-1] for t in os.walk(os.path.join(A_ , os.listdir(A_ )[0] , 'snapshots' ) )] snake_case__ : Optional[int] = [item for sublist in all_root_files for item in sublist] # None of the downloaded files should be a PyTorch file even if we have some here: # https://huggingface.co/hf-internal-testing/tiny-stable-diffusion-pipe/blob/main/unet/diffusion_pytorch_model.bin assert not any(f.endswith('.bin' ) for f in files ) @slow @require_flax class __snake_case ( unittest.TestCase ): def __a ( self ) -> List[Any]: '''simple docstring''' snake_case__ , snake_case__ : Tuple = FlaxStableDiffusionPipeline.from_pretrained( 'hf-internal-testing/tiny-stable-diffusion-pipe' , safety_checker=A_ ) snake_case__ : int = ( 'A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of' ' field, close up, split lighting, cinematic' ) snake_case__ : Optional[int] = jax.random.PRNGKey(0 ) snake_case__ : Any = 4 snake_case__ : List[Any] = jax.device_count() snake_case__ : Optional[Any] = num_samples * [prompt] snake_case__ : int = pipeline.prepare_inputs(A_ ) # shard inputs and rng snake_case__ : Optional[Any] = replicate(A_ ) snake_case__ : Any = jax.random.split(A_ , A_ ) snake_case__ : Optional[int] = shard(A_ ) snake_case__ : Optional[Any] = pipeline(A_ , A_ , A_ , A_ , jit=A_ ).images assert images.shape == (num_samples, 1, 64, 64, 3) if jax.device_count() == 8: assert np.abs(np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa ).sum() - 4.1_5_1_4_7_4_5 ) < 1E-3 assert np.abs(np.abs(A_ , dtype=np.floataa ).sum() - 49947.875 ) < 5E-1 snake_case__ : Optional[int] = pipeline.numpy_to_pil(np.asarray(images.reshape((num_samples,) + images.shape[-3:] ) ) ) assert len(A_ ) == num_samples def __a ( self ) -> List[Any]: '''simple docstring''' snake_case__ , snake_case__ : List[str] = FlaxStableDiffusionPipeline.from_pretrained( 'CompVis/stable-diffusion-v1-4' , revision='flax' , safety_checker=A_ ) snake_case__ : Tuple = ( 'A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of' ' field, close up, split lighting, cinematic' ) snake_case__ : Optional[int] = jax.random.PRNGKey(0 ) snake_case__ : List[str] = 50 snake_case__ : Optional[Any] = jax.device_count() snake_case__ : List[Any] = num_samples * [prompt] snake_case__ : List[Any] = pipeline.prepare_inputs(A_ ) # shard inputs and rng snake_case__ : str = replicate(A_ ) snake_case__ : List[str] = jax.random.split(A_ , A_ ) snake_case__ : Dict = shard(A_ ) snake_case__ : Union[str, Any] = pipeline(A_ , A_ , A_ , A_ , jit=A_ ).images assert images.shape == (num_samples, 1, 512, 512, 3) if jax.device_count() == 8: assert np.abs((np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa ).sum() - 0.0_5_6_5_2_4_0_1) ) < 1E-3 assert np.abs((np.abs(A_ , dtype=np.floataa ).sum() - 2383808.2) ) < 5E-1 def __a ( self ) -> str: '''simple docstring''' snake_case__ , snake_case__ : List[str] = FlaxStableDiffusionPipeline.from_pretrained( 'CompVis/stable-diffusion-v1-4' , revision='bf16' , dtype=jnp.bfloataa , safety_checker=A_ ) snake_case__ : Optional[Any] = ( 'A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of' ' field, close up, split lighting, cinematic' ) snake_case__ : Any = jax.random.PRNGKey(0 ) snake_case__ : Optional[Any] = 50 snake_case__ : Union[str, Any] = jax.device_count() snake_case__ : Optional[Any] = num_samples * [prompt] snake_case__ : int = pipeline.prepare_inputs(A_ ) # shard inputs and rng snake_case__ : Dict = replicate(A_ ) snake_case__ : Optional[int] = jax.random.split(A_ , A_ ) snake_case__ : Optional[Any] = shard(A_ ) snake_case__ : Optional[Any] = pipeline(A_ , A_ , A_ , A_ , jit=A_ ).images assert images.shape == (num_samples, 1, 512, 512, 3) if jax.device_count() == 8: assert np.abs((np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa ).sum() - 0.0_4_0_0_3_9_0_6) ) < 1E-3 assert np.abs((np.abs(A_ , dtype=np.floataa ).sum() - 2373516.75) ) < 5E-1 def __a ( self ) -> Any: '''simple docstring''' snake_case__ , snake_case__ : List[str] = FlaxStableDiffusionPipeline.from_pretrained( 'CompVis/stable-diffusion-v1-4' , revision='bf16' , dtype=jnp.bfloataa ) snake_case__ : str = ( 'A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of' ' field, close up, split lighting, cinematic' ) snake_case__ : Dict = jax.random.PRNGKey(0 ) snake_case__ : List[Any] = 50 snake_case__ : str = jax.device_count() snake_case__ : Dict = num_samples * [prompt] snake_case__ : Optional[Any] = pipeline.prepare_inputs(A_ ) # shard inputs and rng snake_case__ : Dict = replicate(A_ ) snake_case__ : Union[str, Any] = jax.random.split(A_ , A_ ) snake_case__ : Tuple = shard(A_ ) snake_case__ : str = pipeline(A_ , A_ , A_ , A_ , jit=A_ ).images assert images.shape == (num_samples, 1, 512, 512, 3) if jax.device_count() == 8: assert np.abs((np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa ).sum() - 0.0_4_0_0_3_9_0_6) ) < 1E-3 assert np.abs((np.abs(A_ , dtype=np.floataa ).sum() - 2373516.75) ) < 5E-1 def __a ( self ) -> Optional[Any]: '''simple docstring''' snake_case__ : List[str] = FlaxDDIMScheduler( beta_start=0.0_0_0_8_5 , beta_end=0.0_1_2 , beta_schedule='scaled_linear' , set_alpha_to_one=A_ , steps_offset=1 , ) snake_case__ , snake_case__ : List[str] = FlaxStableDiffusionPipeline.from_pretrained( 'CompVis/stable-diffusion-v1-4' , revision='bf16' , dtype=jnp.bfloataa , scheduler=A_ , safety_checker=A_ , ) snake_case__ : List[str] = scheduler.create_state() snake_case__ : Dict = scheduler_state snake_case__ : Union[str, Any] = ( 'A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of' ' field, close up, split lighting, cinematic' ) snake_case__ : Dict = jax.random.PRNGKey(0 ) snake_case__ : Optional[Any] = 50 snake_case__ : Dict = jax.device_count() snake_case__ : Optional[Any] = num_samples * [prompt] snake_case__ : Optional[int] = pipeline.prepare_inputs(A_ ) # shard inputs and rng snake_case__ : Dict = replicate(A_ ) snake_case__ : Any = jax.random.split(A_ , A_ ) snake_case__ : Dict = shard(A_ ) snake_case__ : Optional[Any] = pipeline(A_ , A_ , A_ , A_ , jit=A_ ).images assert images.shape == (num_samples, 1, 512, 512, 3) if jax.device_count() == 8: assert np.abs((np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa ).sum() - 0.0_4_5_0_4_3_9_4_5) ) < 1E-3 assert np.abs((np.abs(A_ , dtype=np.floataa ).sum() - 2347693.5) ) < 5E-1 def __a ( self ) -> Union[str, Any]: '''simple docstring''' snake_case__ : Dict = ( 'A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of' ' field, close up, split lighting, cinematic' ) snake_case__ : str = jax.device_count() snake_case__ : Optional[int] = num_samples * [prompt] snake_case__ : Dict = jax.random.split(jax.random.PRNGKey(0 ) , A_ ) snake_case__ , snake_case__ : Optional[Any] = FlaxStableDiffusionPipeline.from_pretrained( 'CompVis/stable-diffusion-v1-4' , revision='bf16' , dtype=jnp.bfloataa , safety_checker=A_ , ) snake_case__ : Union[str, Any] = replicate(A_ ) snake_case__ : Dict = pipeline.prepare_inputs(A_ ) snake_case__ : Any = shard(A_ ) snake_case__ : Optional[int] = pipeline(A_ , A_ , A_ , jit=A_ ).images assert images.shape == (num_samples, 1, 512, 512, 3) snake_case__ : Any = images[2, 0, 256, 10:17, 1] # With memory efficient attention snake_case__ , snake_case__ : Union[str, Any] = FlaxStableDiffusionPipeline.from_pretrained( 'CompVis/stable-diffusion-v1-4' , revision='bf16' , dtype=jnp.bfloataa , safety_checker=A_ , use_memory_efficient_attention=A_ , ) snake_case__ : List[str] = replicate(A_ ) snake_case__ : str = pipeline.prepare_inputs(A_ ) snake_case__ : List[str] = shard(A_ ) snake_case__ : Optional[Any] = pipeline(A_ , A_ , A_ , jit=A_ ).images assert images_eff.shape == (num_samples, 1, 512, 512, 3) snake_case__ : List[str] = images[2, 0, 256, 10:17, 1] # I checked the results visually and they are very similar. However, I saw that the max diff is `1` and the `sum` # over the 8 images is exactly `256`, which is very suspicious. Testing a random slice for now. assert abs(slice_eff - slice ).max() < 1E-2
703
def UpperCamelCase__ ( A__ , A__ , A__ ) -> int: if exponent == 1: return base if exponent % 2 == 0: snake_case__ : Dict = _modexpt(A__ , exponent // 2 , A__ ) % modulo_value return (x * x) % modulo_value else: return (base * _modexpt(A__ , exponent - 1 , A__ )) % modulo_value def UpperCamelCase__ ( A__ = 1777 , A__ = 1855 , A__ = 8 ) -> int: snake_case__ : Tuple = base for _ in range(1 , A__ ): snake_case__ : Any = _modexpt(A__ , A__ , 10**digits ) return result if __name__ == "__main__": print(F'''{solution() = }''')
699
0
from collections import UserDict from typing import List, Union from ..utils import ( add_end_docstrings, is_tf_available, is_torch_available, is_vision_available, logging, requires_backends, ) from .base import PIPELINE_INIT_ARGS, Pipeline if is_vision_available(): from PIL import Image from ..image_utils import load_image if is_torch_available(): from ..models.auto.modeling_auto import MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING if is_tf_available(): from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING from ..tf_utils import stable_softmax lowerCAmelCase__ : Optional[int] = logging.get_logger(__name__) @add_end_docstrings(_lowerCamelCase ) class __snake_case ( _lowerCamelCase ): def __init__( self , **__UpperCamelCase ) -> int: '''simple docstring''' super().__init__(**UpperCamelCase_ ) requires_backends(self , 'vision' ) self.check_model_type( TF_MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING if self.framework == 'tf' else MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING ) def __call__( self , __UpperCamelCase , **__UpperCamelCase ) -> List[Any]: '''simple docstring''' return super().__call__(UpperCamelCase_ , **UpperCamelCase_ ) def __a ( self , **__UpperCamelCase ) -> List[str]: '''simple docstring''' snake_case__ : Optional[int] = {} if "candidate_labels" in kwargs: snake_case__ : Union[str, Any] = kwargs['candidate_labels'] if "hypothesis_template" in kwargs: snake_case__ : int = kwargs['hypothesis_template'] return preprocess_params, {}, {} def __a ( self , __UpperCamelCase , __UpperCamelCase=None , __UpperCamelCase="This is a photo of {}." ) -> Union[str, Any]: '''simple docstring''' snake_case__ : Dict = load_image(UpperCamelCase_ ) snake_case__ : List[str] = self.image_processor(images=[image] , return_tensors=self.framework ) snake_case__ : Optional[Any] = candidate_labels snake_case__ : List[Any] = [hypothesis_template.format(UpperCamelCase_ ) for x in candidate_labels] snake_case__ : Union[str, Any] = self.tokenizer(UpperCamelCase_ , return_tensors=self.framework , padding=UpperCamelCase_ ) snake_case__ : Any = [text_inputs] return inputs def __a ( self , __UpperCamelCase ) -> Optional[Any]: '''simple docstring''' snake_case__ : Optional[Any] = model_inputs.pop('candidate_labels' ) snake_case__ : List[str] = model_inputs.pop('text_inputs' ) if isinstance(text_inputs[0] , UpperCamelCase_ ): snake_case__ : Optional[int] = text_inputs[0] else: # Batching case. snake_case__ : List[str] = text_inputs[0][0] snake_case__ : Optional[Any] = self.model(**UpperCamelCase_ , **UpperCamelCase_ ) snake_case__ : Optional[Any] = { 'candidate_labels': candidate_labels, 'logits': outputs.logits_per_image, } return model_outputs def __a ( self , __UpperCamelCase ) -> List[str]: '''simple docstring''' snake_case__ : Optional[int] = model_outputs.pop('candidate_labels' ) snake_case__ : Optional[int] = model_outputs['logits'][0] if self.framework == "pt": snake_case__ : List[Any] = logits.softmax(dim=-1 ).squeeze(-1 ) snake_case__ : Tuple = probs.tolist() if not isinstance(UpperCamelCase_ , UpperCamelCase_ ): snake_case__ : List[Any] = [scores] elif self.framework == "tf": snake_case__ : Optional[int] = stable_softmax(UpperCamelCase_ , axis=-1 ) snake_case__ : List[Any] = probs.numpy().tolist() else: raise ValueError(F"""Unsupported framework: {self.framework}""" ) snake_case__ : List[Any] = [ {'score': score, 'label': candidate_label} for score, candidate_label in sorted(zip(UpperCamelCase_ , UpperCamelCase_ ) , key=lambda __UpperCamelCase : -x[0] ) ] return result
704
# tests directory-specific settings - this file is run automatically # by pytest before any tests are run import sys import warnings from os.path import abspath, dirname, join # allow having multiple repository checkouts and not needing to remember to rerun # 'pip install -e .[dev]' when switching between checkouts and running tests. lowerCAmelCase__ : Tuple = abspath(join(dirname(dirname(__file__)), '''src''')) sys.path.insert(1, git_repo_path) # silence FutureWarning warnings in tests since often we can't act on them until # they become normal warnings - i.e. the tests still need to test the current functionality warnings.simplefilter(action='''ignore''', category=FutureWarning) def UpperCamelCase__ ( A__ ) -> Optional[Any]: from diffusers.utils.testing_utils import pytest_addoption_shared pytest_addoption_shared(A__ ) def UpperCamelCase__ ( A__ ) -> Optional[Any]: from diffusers.utils.testing_utils import pytest_terminal_summary_main snake_case__ : Union[str, Any] = terminalreporter.config.getoption('--make-reports' ) if make_reports: pytest_terminal_summary_main(A__ , id=A__ )
699
0
from __future__ import annotations from collections import deque from collections.abc import Sequence from dataclasses import dataclass from typing import Any @dataclass class __snake_case : __lowerCamelCase = 42 __lowerCamelCase = None __lowerCamelCase = None def UpperCamelCase__ ( ) -> str: snake_case__ : int = Node(1 ) snake_case__ : str = Node(2 ) snake_case__ : int = Node(3 ) snake_case__ : Any = Node(4 ) snake_case__ : Dict = Node(5 ) return tree def UpperCamelCase__ ( A__ ) -> Any: return [root.data, *preorder(root.left ), *preorder(root.right )] if root else [] def UpperCamelCase__ ( A__ ) -> List[str]: return postorder(root.left ) + postorder(root.right ) + [root.data] if root else [] def UpperCamelCase__ ( A__ ) -> Dict: return [*inorder(root.left ), root.data, *inorder(root.right )] if root else [] def UpperCamelCase__ ( A__ ) -> Union[str, Any]: return (max(height(root.left ) , height(root.right ) ) + 1) if root else 0 def UpperCamelCase__ ( A__ ) -> Optional[Any]: snake_case__ : list[Any] = [] if root is None: return output snake_case__ : Optional[Any] = deque([root] ) while process_queue: snake_case__ : List[Any] = process_queue.popleft() output.append(node.data ) if node.left: process_queue.append(node.left ) if node.right: process_queue.append(node.right ) return output def UpperCamelCase__ ( A__ , A__ ) -> Optional[Any]: snake_case__ : list[Any] = [] def populate_output(A__ , A__ ) -> None: if not root: return if level == 1: output.append(root.data ) elif level > 1: populate_output(root.left , level - 1 ) populate_output(root.right , level - 1 ) populate_output(_A , _A ) return output def UpperCamelCase__ ( A__ , A__ ) -> Dict: snake_case__ : list[Any] = [] def populate_output(A__ , A__ ) -> None: if root is None: return if level == 1: output.append(root.data ) elif level > 1: populate_output(root.right , level - 1 ) populate_output(root.left , level - 1 ) populate_output(_A , _A ) return output def UpperCamelCase__ ( A__ ) -> Optional[int]: if root is None: return [] snake_case__ : list[Sequence[Node | None]] = [] snake_case__ : Optional[Any] = 0 snake_case__ : int = height(_A ) for h in range(1 , height_tree + 1 ): if not flag: output.append(get_nodes_from_left_to_right(_A , _A ) ) snake_case__ : str = 1 else: output.append(get_nodes_from_right_to_left(_A , _A ) ) snake_case__ : int = 0 return output def UpperCamelCase__ ( ) -> Optional[int]: # Main function for testing. snake_case__ : str = make_tree() print(F"""In-order Traversal: {inorder(_A )}""" ) print(F"""Pre-order Traversal: {preorder(_A )}""" ) print(F"""Post-order Traversal: {postorder(_A )}""" , '\n' ) print(F"""Height of Tree: {height(_A )}""" , '\n' ) print('Complete Level Order Traversal: ' ) print(level_order(_A ) , '\n' ) print('Level-wise order Traversal: ' ) for level in range(1 , height(_A ) + 1 ): print(F"""Level {level}:""" , get_nodes_from_left_to_right(_A , level=_A ) ) print('\nZigZag order Traversal: ' ) print(zigzag(_A ) ) if __name__ == "__main__": import doctest doctest.testmod() main()
705
def UpperCamelCase__ ( A__ ) -> list[int]: if length <= 0 or not isinstance(A__ , A__ ): raise ValueError('Length must be a positive integer.' ) return [n * (2 * n - 1) for n in range(A__ )] if __name__ == "__main__": print(hexagonal_numbers(length=5)) print(hexagonal_numbers(length=10))
699
0
from pathlib import Path import fire def UpperCamelCase__ ( A__ , A__ , A__ ) -> Tuple: snake_case__ : List[str] = Path(lowerCamelCase_ ) snake_case__ : Any = Path(lowerCamelCase_ ) dest_dir.mkdir(exist_ok=lowerCamelCase_ ) for path in src_dir.iterdir(): snake_case__ : List[str] = [x.rstrip() for x in list(path.open().readlines() )][:n] snake_case__ : Optional[Any] = dest_dir.joinpath(path.name ) print(lowerCamelCase_ ) dest_path.open('w' ).write('\n'.join(lowerCamelCase_ ) ) if __name__ == "__main__": fire.Fire(minify)
706
import argparse import json from collections import OrderedDict from pathlib import Path import requests import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import ( ConditionalDetrConfig, ConditionalDetrForObjectDetection, ConditionalDetrForSegmentation, ConditionalDetrImageProcessor, ) from transformers.utils import logging logging.set_verbosity_info() lowerCAmelCase__ : Dict = logging.get_logger(__name__) # here we list all keys to be renamed (original name on the left, our name on the right) lowerCAmelCase__ : Optional[Any] = [] for i in range(6): # encoder layers: output projection, 2 feedforward neural networks and 2 layernorms rename_keys.append( (F'''transformer.encoder.layers.{i}.self_attn.out_proj.weight''', F'''encoder.layers.{i}.self_attn.out_proj.weight''') ) rename_keys.append( (F'''transformer.encoder.layers.{i}.self_attn.out_proj.bias''', F'''encoder.layers.{i}.self_attn.out_proj.bias''') ) rename_keys.append((F'''transformer.encoder.layers.{i}.linear1.weight''', F'''encoder.layers.{i}.fc1.weight''')) rename_keys.append((F'''transformer.encoder.layers.{i}.linear1.bias''', F'''encoder.layers.{i}.fc1.bias''')) rename_keys.append((F'''transformer.encoder.layers.{i}.linear2.weight''', F'''encoder.layers.{i}.fc2.weight''')) rename_keys.append((F'''transformer.encoder.layers.{i}.linear2.bias''', F'''encoder.layers.{i}.fc2.bias''')) rename_keys.append( (F'''transformer.encoder.layers.{i}.norm1.weight''', F'''encoder.layers.{i}.self_attn_layer_norm.weight''') ) rename_keys.append((F'''transformer.encoder.layers.{i}.norm1.bias''', F'''encoder.layers.{i}.self_attn_layer_norm.bias''')) rename_keys.append((F'''transformer.encoder.layers.{i}.norm2.weight''', F'''encoder.layers.{i}.final_layer_norm.weight''')) rename_keys.append((F'''transformer.encoder.layers.{i}.norm2.bias''', F'''encoder.layers.{i}.final_layer_norm.bias''')) # decoder layers: 2 times output projection, 2 feedforward neural networks and 3 layernorms rename_keys.append( (F'''transformer.decoder.layers.{i}.self_attn.out_proj.weight''', F'''decoder.layers.{i}.self_attn.out_proj.weight''') ) rename_keys.append( (F'''transformer.decoder.layers.{i}.self_attn.out_proj.bias''', F'''decoder.layers.{i}.self_attn.out_proj.bias''') ) rename_keys.append( ( F'''transformer.decoder.layers.{i}.cross_attn.out_proj.weight''', F'''decoder.layers.{i}.encoder_attn.out_proj.weight''', ) ) rename_keys.append( ( F'''transformer.decoder.layers.{i}.cross_attn.out_proj.bias''', F'''decoder.layers.{i}.encoder_attn.out_proj.bias''', ) ) rename_keys.append((F'''transformer.decoder.layers.{i}.linear1.weight''', F'''decoder.layers.{i}.fc1.weight''')) rename_keys.append((F'''transformer.decoder.layers.{i}.linear1.bias''', F'''decoder.layers.{i}.fc1.bias''')) rename_keys.append((F'''transformer.decoder.layers.{i}.linear2.weight''', F'''decoder.layers.{i}.fc2.weight''')) rename_keys.append((F'''transformer.decoder.layers.{i}.linear2.bias''', F'''decoder.layers.{i}.fc2.bias''')) rename_keys.append( (F'''transformer.decoder.layers.{i}.norm1.weight''', F'''decoder.layers.{i}.self_attn_layer_norm.weight''') ) rename_keys.append((F'''transformer.decoder.layers.{i}.norm1.bias''', F'''decoder.layers.{i}.self_attn_layer_norm.bias''')) rename_keys.append( (F'''transformer.decoder.layers.{i}.norm2.weight''', F'''decoder.layers.{i}.encoder_attn_layer_norm.weight''') ) rename_keys.append( (F'''transformer.decoder.layers.{i}.norm2.bias''', F'''decoder.layers.{i}.encoder_attn_layer_norm.bias''') ) rename_keys.append((F'''transformer.decoder.layers.{i}.norm3.weight''', F'''decoder.layers.{i}.final_layer_norm.weight''')) rename_keys.append((F'''transformer.decoder.layers.{i}.norm3.bias''', F'''decoder.layers.{i}.final_layer_norm.bias''')) # q, k, v projections in self/cross-attention in decoder for conditional DETR rename_keys.append( (F'''transformer.decoder.layers.{i}.sa_qcontent_proj.weight''', F'''decoder.layers.{i}.sa_qcontent_proj.weight''') ) rename_keys.append( (F'''transformer.decoder.layers.{i}.sa_kcontent_proj.weight''', F'''decoder.layers.{i}.sa_kcontent_proj.weight''') ) rename_keys.append( (F'''transformer.decoder.layers.{i}.sa_qpos_proj.weight''', F'''decoder.layers.{i}.sa_qpos_proj.weight''') ) rename_keys.append( (F'''transformer.decoder.layers.{i}.sa_kpos_proj.weight''', F'''decoder.layers.{i}.sa_kpos_proj.weight''') ) rename_keys.append((F'''transformer.decoder.layers.{i}.sa_v_proj.weight''', F'''decoder.layers.{i}.sa_v_proj.weight''')) rename_keys.append( (F'''transformer.decoder.layers.{i}.ca_qcontent_proj.weight''', F'''decoder.layers.{i}.ca_qcontent_proj.weight''') ) # rename_keys.append((f"transformer.decoder.layers.{i}.ca_qpos_proj.weight", f"decoder.layers.{i}.ca_qpos_proj.weight")) rename_keys.append( (F'''transformer.decoder.layers.{i}.ca_kcontent_proj.weight''', F'''decoder.layers.{i}.ca_kcontent_proj.weight''') ) rename_keys.append( (F'''transformer.decoder.layers.{i}.ca_kpos_proj.weight''', F'''decoder.layers.{i}.ca_kpos_proj.weight''') ) rename_keys.append((F'''transformer.decoder.layers.{i}.ca_v_proj.weight''', F'''decoder.layers.{i}.ca_v_proj.weight''')) rename_keys.append( (F'''transformer.decoder.layers.{i}.ca_qpos_sine_proj.weight''', F'''decoder.layers.{i}.ca_qpos_sine_proj.weight''') ) rename_keys.append( (F'''transformer.decoder.layers.{i}.sa_qcontent_proj.bias''', F'''decoder.layers.{i}.sa_qcontent_proj.bias''') ) rename_keys.append( (F'''transformer.decoder.layers.{i}.sa_kcontent_proj.bias''', F'''decoder.layers.{i}.sa_kcontent_proj.bias''') ) rename_keys.append((F'''transformer.decoder.layers.{i}.sa_qpos_proj.bias''', F'''decoder.layers.{i}.sa_qpos_proj.bias''')) rename_keys.append((F'''transformer.decoder.layers.{i}.sa_kpos_proj.bias''', F'''decoder.layers.{i}.sa_kpos_proj.bias''')) rename_keys.append((F'''transformer.decoder.layers.{i}.sa_v_proj.bias''', F'''decoder.layers.{i}.sa_v_proj.bias''')) rename_keys.append( (F'''transformer.decoder.layers.{i}.ca_qcontent_proj.bias''', F'''decoder.layers.{i}.ca_qcontent_proj.bias''') ) # rename_keys.append((f"transformer.decoder.layers.{i}.ca_qpos_proj.bias", f"decoder.layers.{i}.ca_qpos_proj.bias")) rename_keys.append( (F'''transformer.decoder.layers.{i}.ca_kcontent_proj.bias''', F'''decoder.layers.{i}.ca_kcontent_proj.bias''') ) rename_keys.append((F'''transformer.decoder.layers.{i}.ca_kpos_proj.bias''', F'''decoder.layers.{i}.ca_kpos_proj.bias''')) rename_keys.append((F'''transformer.decoder.layers.{i}.ca_v_proj.bias''', F'''decoder.layers.{i}.ca_v_proj.bias''')) rename_keys.append( (F'''transformer.decoder.layers.{i}.ca_qpos_sine_proj.bias''', F'''decoder.layers.{i}.ca_qpos_sine_proj.bias''') ) # convolutional projection + query embeddings + layernorm of decoder + class and bounding box heads # for conditional DETR, also convert reference point head and query scale MLP rename_keys.extend( [ ('''input_proj.weight''', '''input_projection.weight'''), ('''input_proj.bias''', '''input_projection.bias'''), ('''query_embed.weight''', '''query_position_embeddings.weight'''), ('''transformer.decoder.norm.weight''', '''decoder.layernorm.weight'''), ('''transformer.decoder.norm.bias''', '''decoder.layernorm.bias'''), ('''class_embed.weight''', '''class_labels_classifier.weight'''), ('''class_embed.bias''', '''class_labels_classifier.bias'''), ('''bbox_embed.layers.0.weight''', '''bbox_predictor.layers.0.weight'''), ('''bbox_embed.layers.0.bias''', '''bbox_predictor.layers.0.bias'''), ('''bbox_embed.layers.1.weight''', '''bbox_predictor.layers.1.weight'''), ('''bbox_embed.layers.1.bias''', '''bbox_predictor.layers.1.bias'''), ('''bbox_embed.layers.2.weight''', '''bbox_predictor.layers.2.weight'''), ('''bbox_embed.layers.2.bias''', '''bbox_predictor.layers.2.bias'''), ('''transformer.decoder.ref_point_head.layers.0.weight''', '''decoder.ref_point_head.layers.0.weight'''), ('''transformer.decoder.ref_point_head.layers.0.bias''', '''decoder.ref_point_head.layers.0.bias'''), ('''transformer.decoder.ref_point_head.layers.1.weight''', '''decoder.ref_point_head.layers.1.weight'''), ('''transformer.decoder.ref_point_head.layers.1.bias''', '''decoder.ref_point_head.layers.1.bias'''), ('''transformer.decoder.query_scale.layers.0.weight''', '''decoder.query_scale.layers.0.weight'''), ('''transformer.decoder.query_scale.layers.0.bias''', '''decoder.query_scale.layers.0.bias'''), ('''transformer.decoder.query_scale.layers.1.weight''', '''decoder.query_scale.layers.1.weight'''), ('''transformer.decoder.query_scale.layers.1.bias''', '''decoder.query_scale.layers.1.bias'''), ('''transformer.decoder.layers.0.ca_qpos_proj.weight''', '''decoder.layers.0.ca_qpos_proj.weight'''), ('''transformer.decoder.layers.0.ca_qpos_proj.bias''', '''decoder.layers.0.ca_qpos_proj.bias'''), ] ) def UpperCamelCase__ ( A__ , A__ , A__ ) -> List[str]: snake_case__ : int = state_dict.pop(A__ ) snake_case__ : Union[str, Any] = val def UpperCamelCase__ ( A__ ) -> int: snake_case__ : List[Any] = OrderedDict() for key, value in state_dict.items(): if "backbone.0.body" in key: snake_case__ : Any = key.replace('backbone.0.body' , 'backbone.conv_encoder.model' ) snake_case__ : Optional[int] = value else: snake_case__ : Optional[int] = value return new_state_dict def UpperCamelCase__ ( A__ , A__=False ) -> Optional[int]: snake_case__ : Optional[int] = '' if is_panoptic: snake_case__ : Tuple = 'conditional_detr.' # first: transformer encoder for i in range(6 ): # read in weights + bias of input projection layer (in PyTorch's MultiHeadAttention, this is a single matrix + bias) snake_case__ : int = state_dict.pop(F"""{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_weight""" ) snake_case__ : str = state_dict.pop(F"""{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_bias""" ) # next, add query, keys and values (in that order) to the state dict snake_case__ : Union[str, Any] = in_proj_weight[:256, :] snake_case__ : Union[str, Any] = in_proj_bias[:256] snake_case__ : Union[str, Any] = in_proj_weight[256:512, :] snake_case__ : Optional[Any] = in_proj_bias[256:512] snake_case__ : List[str] = in_proj_weight[-256:, :] snake_case__ : Tuple = in_proj_bias[-256:] def UpperCamelCase__ ( ) -> Tuple: snake_case__ : int = 'http://images.cocodataset.org/val2017/000000039769.jpg' snake_case__ : str = Image.open(requests.get(A__ , stream=A__ ).raw ) return im @torch.no_grad() def UpperCamelCase__ ( A__ , A__ ) -> str: snake_case__ : List[Any] = ConditionalDetrConfig() # set backbone and dilation attributes if "resnet101" in model_name: snake_case__ : Any = 'resnet101' if "dc5" in model_name: snake_case__ : Any = True snake_case__ : int = 'panoptic' in model_name if is_panoptic: snake_case__ : str = 250 else: snake_case__ : Union[str, Any] = 91 snake_case__ : Optional[int] = 'huggingface/label-files' snake_case__ : Optional[Any] = 'coco-detection-id2label.json' snake_case__ : str = json.load(open(hf_hub_download(A__ , A__ , repo_type='dataset' ) , 'r' ) ) snake_case__ : List[Any] = {int(A__ ): v for k, v in idalabel.items()} snake_case__ : Any = idalabel snake_case__ : int = {v: k for k, v in idalabel.items()} # load image processor snake_case__ : List[Any] = 'coco_panoptic' if is_panoptic else 'coco_detection' snake_case__ : List[Any] = ConditionalDetrImageProcessor(format=A__ ) # prepare image snake_case__ : List[str] = prepare_img() snake_case__ : Any = image_processor(images=A__ , return_tensors='pt' ) snake_case__ : Dict = encoding['pixel_values'] logger.info(F"""Converting model {model_name}...""" ) # load original model from torch hub snake_case__ : Any = torch.hub.load('DeppMeng/ConditionalDETR' , A__ , pretrained=A__ ).eval() snake_case__ : Tuple = conditional_detr.state_dict() # rename keys for src, dest in rename_keys: if is_panoptic: snake_case__ : List[Any] = 'conditional_detr.' + src rename_key(A__ , A__ , A__ ) snake_case__ : Dict = rename_backbone_keys(A__ ) # query, key and value matrices need special treatment read_in_q_k_v(A__ , is_panoptic=A__ ) # important: we need to prepend a prefix to each of the base model keys as the head models use different attributes for them snake_case__ : Optional[int] = 'conditional_detr.model.' if is_panoptic else 'model.' for key in state_dict.copy().keys(): if is_panoptic: if ( key.startswith('conditional_detr' ) and not key.startswith('class_labels_classifier' ) and not key.startswith('bbox_predictor' ) ): snake_case__ : List[Any] = state_dict.pop(A__ ) snake_case__ : Optional[int] = val elif "class_labels_classifier" in key or "bbox_predictor" in key: snake_case__ : str = state_dict.pop(A__ ) snake_case__ : List[Any] = val elif key.startswith('bbox_attention' ) or key.startswith('mask_head' ): continue else: snake_case__ : Union[str, Any] = state_dict.pop(A__ ) snake_case__ : Dict = val else: if not key.startswith('class_labels_classifier' ) and not key.startswith('bbox_predictor' ): snake_case__ : List[Any] = state_dict.pop(A__ ) snake_case__ : Optional[int] = val # finally, create HuggingFace model and load state dict snake_case__ : Union[str, Any] = ConditionalDetrForSegmentation(A__ ) if is_panoptic else ConditionalDetrForObjectDetection(A__ ) model.load_state_dict(A__ ) model.eval() model.push_to_hub(repo_id=A__ , organization='DepuMeng' , commit_message='Add model' ) # verify our conversion snake_case__ : Tuple = conditional_detr(A__ ) snake_case__ : str = model(A__ ) assert torch.allclose(outputs.logits , original_outputs['pred_logits'] , atol=1e-4 ) assert torch.allclose(outputs.pred_boxes , original_outputs['pred_boxes'] , atol=1e-4 ) if is_panoptic: assert torch.allclose(outputs.pred_masks , original_outputs['pred_masks'] , atol=1e-4 ) # Save model and image processor logger.info(F"""Saving PyTorch model and image processor to {pytorch_dump_folder_path}...""" ) Path(A__ ).mkdir(exist_ok=A__ ) model.save_pretrained(A__ ) image_processor.save_pretrained(A__ ) if __name__ == "__main__": lowerCAmelCase__ : Any = argparse.ArgumentParser() parser.add_argument( '''--model_name''', default='''conditional_detr_resnet50''', type=str, help='''Name of the CONDITIONAL_DETR model you\'d like to convert.''', ) parser.add_argument( '''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the folder to output PyTorch model.''' ) lowerCAmelCase__ : int = parser.parse_args() convert_conditional_detr_checkpoint(args.model_name, args.pytorch_dump_folder_path)
699
0
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available lowerCAmelCase__ : str = { "configuration_clipseg": [ "CLIPSEG_PRETRAINED_CONFIG_ARCHIVE_MAP", "CLIPSegConfig", "CLIPSegTextConfig", "CLIPSegVisionConfig", ], "processing_clipseg": ["CLIPSegProcessor"], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase__ : Union[str, Any] = [ "CLIPSEG_PRETRAINED_MODEL_ARCHIVE_LIST", "CLIPSegModel", "CLIPSegPreTrainedModel", "CLIPSegTextModel", "CLIPSegVisionModel", "CLIPSegForImageSegmentation", ] if TYPE_CHECKING: from .configuration_clipseg import ( CLIPSEG_PRETRAINED_CONFIG_ARCHIVE_MAP, CLIPSegConfig, CLIPSegTextConfig, CLIPSegVisionConfig, ) from .processing_clipseg import CLIPSegProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_clipseg import ( CLIPSEG_PRETRAINED_MODEL_ARCHIVE_LIST, CLIPSegForImageSegmentation, CLIPSegModel, CLIPSegPreTrainedModel, CLIPSegTextModel, CLIPSegVisionModel, ) else: import sys lowerCAmelCase__ : Optional[int] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
707
from collections import namedtuple lowerCAmelCase__ : Union[str, Any] = namedtuple('''from_to''', '''from_ to''') lowerCAmelCase__ : Tuple = { '''cubicmeter''': from_to(1, 1), '''litre''': from_to(0.0_01, 10_00), '''kilolitre''': from_to(1, 1), '''gallon''': from_to(0.0_04_54, 2_64.1_72), '''cubicyard''': from_to(0.7_64_55, 1.3_07_95), '''cubicfoot''': from_to(0.0_28, 35.31_47), '''cup''': from_to(0.0_00_23_65_88, 42_26.75), } def UpperCamelCase__ ( A__ , A__ , A__ ) -> float: if from_type not in METRIC_CONVERSION: raise ValueError( F"""Invalid 'from_type' value: {from_type!r} Supported values are:\n""" + ', '.join(A__ ) ) if to_type not in METRIC_CONVERSION: raise ValueError( F"""Invalid 'to_type' value: {to_type!r}. Supported values are:\n""" + ', '.join(A__ ) ) return value * METRIC_CONVERSION[from_type].from_ * METRIC_CONVERSION[to_type].to if __name__ == "__main__": import doctest doctest.testmod()
699
0
import numpy as np import datasets lowerCAmelCase__ : int = '''\nCompute the Mahalanobis Distance\n\nMahalonobis distance is the distance between a point and a distribution.\nAnd not between two distinct points. It is effectively a multivariate equivalent of the Euclidean distance.\nIt was introduced by Prof. P. C. Mahalanobis in 1936\nand has been used in various statistical applications ever since\n[source: https://www.machinelearningplus.com/statistics/mahalanobis-distance/]\n''' lowerCAmelCase__ : List[Any] = '''\\n@article{de2000mahalanobis,\n title={The mahalanobis distance},\n author={De Maesschalck, Roy and Jouan-Rimbaud, Delphine and Massart, D{\'e}sir{\'e} L},\n journal={Chemometrics and intelligent laboratory systems},\n volume={50},\n number={1},\n pages={1--18},\n year={2000},\n publisher={Elsevier}\n}\n''' lowerCAmelCase__ : List[Any] = '''\nArgs:\n X: List of datapoints to be compared with the `reference_distribution`.\n reference_distribution: List of datapoints from the reference distribution we want to compare to.\nReturns:\n mahalanobis: The Mahalonobis distance for each datapoint in `X`.\nExamples:\n\n >>> mahalanobis_metric = datasets.load_metric("mahalanobis")\n >>> results = mahalanobis_metric.compute(reference_distribution=[[0, 1], [1, 0]], X=[[0, 1]])\n >>> print(results)\n {\'mahalanobis\': array([0.5])}\n''' @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION ,_KWARGS_DESCRIPTION ) class __snake_case ( datasets.Metric ): def __a ( self ) -> Dict: '''simple docstring''' return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { 'X': datasets.Sequence(datasets.Value('float' , id='sequence' ) , id='X' ), } ) , ) def __a ( self , __UpperCamelCase , __UpperCamelCase ) -> Tuple: '''simple docstring''' snake_case__ : List[Any] = np.array(__UpperCamelCase ) snake_case__ : Optional[int] = np.array(__UpperCamelCase ) # Assert that arrays are 2D if len(X.shape ) != 2: raise ValueError('Expected `X` to be a 2D vector' ) if len(reference_distribution.shape ) != 2: raise ValueError('Expected `reference_distribution` to be a 2D vector' ) if reference_distribution.shape[0] < 2: raise ValueError( 'Expected `reference_distribution` to be a 2D vector with more than one element in the first dimension' ) # Get mahalanobis distance for each prediction snake_case__ : Optional[Any] = X - np.mean(__UpperCamelCase ) snake_case__ : List[Any] = np.cov(reference_distribution.T ) try: snake_case__ : str = np.linalg.inv(__UpperCamelCase ) except np.linalg.LinAlgError: snake_case__ : Union[str, Any] = np.linalg.pinv(__UpperCamelCase ) snake_case__ : str = np.dot(__UpperCamelCase , __UpperCamelCase ) snake_case__ : Tuple = np.dot(__UpperCamelCase , X_minus_mu.T ).diagonal() return {"mahalanobis": mahal_dist}
708
import os from shutil import copyfile from typing import Any, Dict, List, Optional, Tuple import sentencepiece as spm from ...tokenization_utils import PreTrainedTokenizer from ...utils import logging lowerCAmelCase__ : Tuple = logging.get_logger(__name__) lowerCAmelCase__ : Union[str, Any] = '''▁''' lowerCAmelCase__ : List[Any] = {'''vocab_file''': '''sentencepiece.bpe.model'''} lowerCAmelCase__ : Optional[Any] = { '''vocab_file''': { '''facebook/xglm-564M''': '''https://huggingface.co/facebook/xglm-564M/resolve/main/sentencepiece.bpe.model''', } } lowerCAmelCase__ : str = { '''facebook/xglm-564M''': 20_48, } class __snake_case ( _lowerCamelCase ): __lowerCamelCase = VOCAB_FILES_NAMES __lowerCamelCase = PRETRAINED_VOCAB_FILES_MAP __lowerCamelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES __lowerCamelCase = ["""input_ids""", """attention_mask"""] def __init__( self , __UpperCamelCase , __UpperCamelCase="<s>" , __UpperCamelCase="</s>" , __UpperCamelCase="</s>" , __UpperCamelCase="<s>" , __UpperCamelCase="<unk>" , __UpperCamelCase="<pad>" , __UpperCamelCase = None , **__UpperCamelCase , ) -> None: '''simple docstring''' snake_case__ : Any = {} if sp_model_kwargs is None else sp_model_kwargs # Compatibility with the original tokenizer snake_case__ : Tuple = 7 snake_case__ : Dict = [F"""<madeupword{i}>""" for i in range(self.num_madeup_words )] snake_case__ : Union[str, Any] = kwargs.get('additional_special_tokens' , [] ) kwargs["additional_special_tokens"] += [ word for word in madeup_words if word not in kwargs["additional_special_tokens"] ] super().__init__( bos_token=__UpperCamelCase , eos_token=__UpperCamelCase , unk_token=__UpperCamelCase , sep_token=__UpperCamelCase , cls_token=__UpperCamelCase , pad_token=__UpperCamelCase , sp_model_kwargs=self.sp_model_kwargs , **__UpperCamelCase , ) snake_case__ : Optional[int] = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(str(__UpperCamelCase ) ) snake_case__ : Optional[Any] = vocab_file # Original fairseq vocab and spm vocab must be "aligned": # Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 # -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ---- # fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-' # spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a' # The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab snake_case__ : Tuple = 1 # Mimic fairseq token-to-id alignment for the first 4 token snake_case__ : Tuple = {'<s>': 0, '<pad>': 1, '</s>': 2, '<unk>': 3} snake_case__ : List[Any] = len(self.sp_model ) snake_case__ : Optional[Any] = {F"""<madeupword{i}>""": sp_size + i + self.fairseq_offset for i in range(self.num_madeup_words )} self.fairseq_tokens_to_ids.update(__UpperCamelCase ) snake_case__ : Union[str, Any] = {v: k for k, v in self.fairseq_tokens_to_ids.items()} def __getstate__( self ) -> List[Any]: '''simple docstring''' snake_case__ : Union[str, Any] = self.__dict__.copy() snake_case__ : Optional[Any] = None snake_case__ : Tuple = self.sp_model.serialized_model_proto() return state def __setstate__( self , __UpperCamelCase ) -> Union[str, Any]: '''simple docstring''' snake_case__ : Union[str, Any] = d # for backward compatibility if not hasattr(self , 'sp_model_kwargs' ): snake_case__ : Any = {} snake_case__ : Dict = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.LoadFromSerializedProto(self.sp_model_proto ) def __a ( self , __UpperCamelCase , __UpperCamelCase = None ) -> List[int]: '''simple docstring''' if token_ids_a is None: return [self.sep_token_id] + token_ids_a snake_case__ : str = [self.sep_token_id] return sep + token_ids_a + sep + sep + token_ids_a def __a ( self , __UpperCamelCase , __UpperCamelCase = None , __UpperCamelCase = False ) -> List[int]: '''simple docstring''' if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=__UpperCamelCase , token_ids_a=__UpperCamelCase , already_has_special_tokens=__UpperCamelCase ) if token_ids_a is None: return [1] + ([0] * len(__UpperCamelCase )) return [1] + ([0] * len(__UpperCamelCase )) + [1, 1] + ([0] * len(__UpperCamelCase )) def __a ( self , __UpperCamelCase , __UpperCamelCase = None ) -> List[int]: '''simple docstring''' snake_case__ : int = [self.sep_token_id] if token_ids_a is None: return len(sep + token_ids_a ) * [0] return len(sep + token_ids_a + sep + sep + token_ids_a ) * [0] @property def __a ( self ) -> Tuple: '''simple docstring''' return len(self.sp_model ) + self.fairseq_offset + self.num_madeup_words def __a ( self ) -> Union[str, Any]: '''simple docstring''' snake_case__ : int = {self.convert_ids_to_tokens(__UpperCamelCase ): i for i in range(self.vocab_size )} vocab.update(self.added_tokens_encoder ) return vocab def __a ( self , __UpperCamelCase ) -> List[str]: '''simple docstring''' return self.sp_model.encode(__UpperCamelCase , out_type=__UpperCamelCase ) def __a ( self , __UpperCamelCase ) -> Union[str, Any]: '''simple docstring''' if token in self.fairseq_tokens_to_ids: return self.fairseq_tokens_to_ids[token] snake_case__ : Optional[Any] = self.sp_model.PieceToId(__UpperCamelCase ) # Need to return unknown token if the SP model returned 0 return spm_id + self.fairseq_offset if spm_id else self.unk_token_id def __a ( self , __UpperCamelCase ) -> Dict: '''simple docstring''' if index in self.fairseq_ids_to_tokens: return self.fairseq_ids_to_tokens[index] return self.sp_model.IdToPiece(index - self.fairseq_offset ) def __a ( self , __UpperCamelCase ) -> int: '''simple docstring''' snake_case__ : int = ''.join(__UpperCamelCase ).replace(__UpperCamelCase , ' ' ).strip() return out_string def __a ( self , __UpperCamelCase , __UpperCamelCase = None ) -> Tuple[str]: '''simple docstring''' if not os.path.isdir(__UpperCamelCase ): logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" ) return snake_case__ : List[str] = os.path.join( __UpperCamelCase , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(__UpperCamelCase ) and os.path.isfile(self.vocab_file ): copyfile(self.vocab_file , __UpperCamelCase ) elif not os.path.isfile(self.vocab_file ): with open(__UpperCamelCase , 'wb' ) as fi: snake_case__ : Any = self.sp_model.serialized_model_proto() fi.write(__UpperCamelCase ) return (out_vocab_file,)
699
0
import dataclasses import json import sys import types from argparse import ArgumentDefaultsHelpFormatter, ArgumentParser, ArgumentTypeError from copy import copy from enum import Enum from inspect import isclass from pathlib import Path from typing import Any, Callable, Dict, Iterable, List, Literal, NewType, Optional, Tuple, Union, get_type_hints import yaml lowerCAmelCase__ : Optional[Any] = NewType('''DataClass''', Any) lowerCAmelCase__ : Dict = NewType('''DataClassType''', Any) def UpperCamelCase__ ( A__ ) -> Tuple: if isinstance(__lowerCAmelCase , __lowerCAmelCase ): return v if v.lower() in ("yes", "true", "t", "y", "1"): return True elif v.lower() in ("no", "false", "f", "n", "0"): return False else: raise ArgumentTypeError( F"""Truthy value expected: got {v} but expected one of yes/no, true/false, t/f, y/n, 1/0 (case insensitive).""" ) def UpperCamelCase__ ( A__ ) -> Any: snake_case__ : str = {str(__lowerCAmelCase ): choice for choice in choices} return lambda A__ : str_to_choice.get(__lowerCAmelCase , __lowerCAmelCase ) def UpperCamelCase__ ( *, A__ = None , A__ = None , A__ = dataclasses.MISSING , A__ = dataclasses.MISSING , A__ = None , **A__ , ) -> Optional[Any]: if metadata is None: # Important, don't use as default param in function signature because dict is mutable and shared across function calls snake_case__ : Any = {} if aliases is not None: snake_case__ : Any = aliases if help is not None: snake_case__ : List[str] = help return dataclasses.field(metadata=__lowerCAmelCase , default=__lowerCAmelCase , default_factory=__lowerCAmelCase , **__lowerCAmelCase ) class __snake_case ( _lowerCamelCase ): __lowerCamelCase = 42 def __init__( self , __UpperCamelCase , **__UpperCamelCase ) -> Optional[int]: '''simple docstring''' if "formatter_class" not in kwargs: snake_case__ : str = ArgumentDefaultsHelpFormatter super().__init__(**_lowerCAmelCase ) if dataclasses.is_dataclass(_lowerCAmelCase ): snake_case__ : List[Any] = [dataclass_types] snake_case__ : Dict = list(_lowerCAmelCase ) for dtype in self.dataclass_types: self._add_dataclass_arguments(_lowerCAmelCase ) @staticmethod def __a ( __UpperCamelCase , __UpperCamelCase ) -> Union[str, Any]: '''simple docstring''' snake_case__ : Optional[Any] = F"""--{field.name}""" snake_case__ : List[str] = field.metadata.copy() # field.metadata is not used at all by Data Classes, # it is provided as a third-party extension mechanism. if isinstance(field.type , _lowerCAmelCase ): raise RuntimeError( 'Unresolved type detected, which should have been done with the help of ' '`typing.get_type_hints` method by default' ) snake_case__ : Dict = kwargs.pop('aliases' , [] ) if isinstance(_lowerCAmelCase , _lowerCAmelCase ): snake_case__ : Dict = [aliases] snake_case__ : Optional[int] = getattr(field.type , '__origin__' , field.type ) if origin_type is Union or (hasattr(_lowerCAmelCase , 'UnionType' ) and isinstance(_lowerCAmelCase , types.UnionType )): if str not in field.type.__args__ and ( len(field.type.__args__ ) != 2 or type(_lowerCAmelCase ) not in field.type.__args__ ): raise ValueError( 'Only `Union[X, NoneType]` (i.e., `Optional[X]`) is allowed for `Union` because' ' the argument parser only supports one type per argument.' F""" Problem encountered in field \'{field.name}\'.""" ) if type(_lowerCAmelCase ) not in field.type.__args__: # filter `str` in Union snake_case__ : Dict = field.type.__args__[0] if field.type.__args__[1] == str else field.type.__args__[1] snake_case__ : str = getattr(field.type , '__origin__' , field.type ) elif bool not in field.type.__args__: # filter `NoneType` in Union (except for `Union[bool, NoneType]`) snake_case__ : Any = ( field.type.__args__[0] if isinstance(_lowerCAmelCase , field.type.__args__[1] ) else field.type.__args__[1] ) snake_case__ : Tuple = getattr(field.type , '__origin__' , field.type ) # A variable to store kwargs for a boolean field, if needed # so that we can init a `no_*` complement argument (see below) snake_case__ : int = {} if origin_type is Literal or (isinstance(field.type , _lowerCAmelCase ) and issubclass(field.type , _lowerCAmelCase )): if origin_type is Literal: snake_case__ : str = field.type.__args__ else: snake_case__ : Tuple = [x.value for x in field.type] snake_case__ : List[str] = make_choice_type_function(kwargs['choices'] ) if field.default is not dataclasses.MISSING: snake_case__ : Union[str, Any] = field.default else: snake_case__ : List[str] = True elif field.type is bool or field.type == Optional[bool]: # Copy the currect kwargs to use to instantiate a `no_*` complement argument below. # We do not initialize it here because the `no_*` alternative must be instantiated after the real argument snake_case__ : Union[str, Any] = copy(_lowerCAmelCase ) # Hack because type=bool in argparse does not behave as we want. snake_case__ : Union[str, Any] = string_to_bool if field.type is bool or (field.default is not None and field.default is not dataclasses.MISSING): # Default value is False if we have no default when of type bool. snake_case__ : Union[str, Any] = False if field.default is dataclasses.MISSING else field.default # This is the value that will get picked if we don't include --field_name in any way snake_case__ : int = default # This tells argparse we accept 0 or 1 value after --field_name snake_case__ : int = '?' # This is the value that will get picked if we do --field_name (without value) snake_case__ : int = True elif isclass(_lowerCAmelCase ) and issubclass(_lowerCAmelCase , _lowerCAmelCase ): snake_case__ : List[str] = field.type.__args__[0] snake_case__ : Union[str, Any] = '+' if field.default_factory is not dataclasses.MISSING: snake_case__ : List[str] = field.default_factory() elif field.default is dataclasses.MISSING: snake_case__ : Union[str, Any] = True else: snake_case__ : Optional[int] = field.type if field.default is not dataclasses.MISSING: snake_case__ : int = field.default elif field.default_factory is not dataclasses.MISSING: snake_case__ : Dict = field.default_factory() else: snake_case__ : int = True parser.add_argument(_lowerCAmelCase , *_lowerCAmelCase , **_lowerCAmelCase ) # Add a complement `no_*` argument for a boolean field AFTER the initial field has already been added. # Order is important for arguments with the same destination! # We use a copy of earlier kwargs because the original kwargs have changed a lot before reaching down # here and we do not need those changes/additional keys. if field.default is True and (field.type is bool or field.type == Optional[bool]): snake_case__ : Optional[int] = False parser.add_argument(F"""--no_{field.name}""" , action='store_false' , dest=field.name , **_lowerCAmelCase ) def __a ( self , __UpperCamelCase ) -> List[Any]: '''simple docstring''' if hasattr(_lowerCAmelCase , '_argument_group_name' ): snake_case__ : List[Any] = self.add_argument_group(dtype._argument_group_name ) else: snake_case__ : List[str] = self try: snake_case__ : Dict = get_type_hints(_lowerCAmelCase ) except NameError: raise RuntimeError( F"""Type resolution failed for {dtype}. Try declaring the class in global scope or """ 'removing line of `from __future__ import annotations` which opts in Postponed ' 'Evaluation of Annotations (PEP 563)' ) except TypeError as ex: # Remove this block when we drop Python 3.9 support if sys.version_info[:2] < (3, 10) and "unsupported operand type(s) for |" in str(_lowerCAmelCase ): snake_case__ : Optional[Any] = '.'.join(map(_lowerCAmelCase , sys.version_info[:3] ) ) raise RuntimeError( F"""Type resolution failed for {dtype} on Python {python_version}. Try removing """ 'line of `from __future__ import annotations` which opts in union types as ' '`X | Y` (PEP 604) via Postponed Evaluation of Annotations (PEP 563). To ' 'support Python versions that lower than 3.10, you need to use ' '`typing.Union[X, Y]` instead of `X | Y` and `typing.Optional[X]` instead of ' '`X | None`.' ) from ex raise for field in dataclasses.fields(_lowerCAmelCase ): if not field.init: continue snake_case__ : int = type_hints[field.name] self._parse_dataclass_field(_lowerCAmelCase , _lowerCAmelCase ) def __a ( self , __UpperCamelCase=None , __UpperCamelCase=False , __UpperCamelCase=True , __UpperCamelCase=None , __UpperCamelCase=None , ) -> List[str]: '''simple docstring''' if args_file_flag or args_filename or (look_for_args_file and len(sys.argv )): snake_case__ : Tuple = [] if args_filename: args_files.append(Path(_lowerCAmelCase ) ) elif look_for_args_file and len(sys.argv ): args_files.append(Path(sys.argv[0] ).with_suffix('.args' ) ) # args files specified via command line flag should overwrite default args files so we add them last if args_file_flag: # Create special parser just to extract the args_file_flag values snake_case__ : Union[str, Any] = ArgumentParser() args_file_parser.add_argument(_lowerCAmelCase , type=_lowerCAmelCase , action='append' ) # Use only remaining args for further parsing (remove the args_file_flag) snake_case__ , snake_case__ : List[str] = args_file_parser.parse_known_args(args=_lowerCAmelCase ) snake_case__ : str = vars(_lowerCAmelCase ).get(args_file_flag.lstrip('-' ) , _lowerCAmelCase ) if cmd_args_file_paths: args_files.extend([Path(_lowerCAmelCase ) for p in cmd_args_file_paths] ) snake_case__ : Any = [] for args_file in args_files: if args_file.exists(): file_args += args_file.read_text().split() # in case of duplicate arguments the last one has precedence # args specified via the command line should overwrite args from files, so we add them last snake_case__ : int = file_args + args if args is not None else file_args + sys.argv[1:] snake_case__ , snake_case__ : str = self.parse_known_args(args=_lowerCAmelCase ) snake_case__ : List[Any] = [] for dtype in self.dataclass_types: snake_case__ : str = {f.name for f in dataclasses.fields(_lowerCAmelCase ) if f.init} snake_case__ : str = {k: v for k, v in vars(_lowerCAmelCase ).items() if k in keys} for k in keys: delattr(_lowerCAmelCase , _lowerCAmelCase ) snake_case__ : List[Any] = dtype(**_lowerCAmelCase ) outputs.append(_lowerCAmelCase ) if len(namespace.__dict__ ) > 0: # additional namespace. outputs.append(_lowerCAmelCase ) if return_remaining_strings: return (*outputs, remaining_args) else: if remaining_args: raise ValueError(F"""Some specified arguments are not used by the HfArgumentParser: {remaining_args}""" ) return (*outputs,) def __a ( self , __UpperCamelCase , __UpperCamelCase = False ) -> Union[str, Any]: '''simple docstring''' snake_case__ : Optional[int] = set(args.keys() ) snake_case__ : Optional[int] = [] for dtype in self.dataclass_types: snake_case__ : Any = {f.name for f in dataclasses.fields(_lowerCAmelCase ) if f.init} snake_case__ : List[Any] = {k: v for k, v in args.items() if k in keys} unused_keys.difference_update(inputs.keys() ) snake_case__ : List[Any] = dtype(**_lowerCAmelCase ) outputs.append(_lowerCAmelCase ) if not allow_extra_keys and unused_keys: raise ValueError(F"""Some keys are not used by the HfArgumentParser: {sorted(_lowerCAmelCase )}""" ) return tuple(_lowerCAmelCase ) def __a ( self , __UpperCamelCase , __UpperCamelCase = False ) -> Any: '''simple docstring''' with open(Path(_lowerCAmelCase ) , encoding='utf-8' ) as open_json_file: snake_case__ : Optional[int] = json.loads(open_json_file.read() ) snake_case__ : Dict = self.parse_dict(_lowerCAmelCase , allow_extra_keys=_lowerCAmelCase ) return tuple(_lowerCAmelCase ) def __a ( self , __UpperCamelCase , __UpperCamelCase = False ) -> Dict: '''simple docstring''' snake_case__ : Optional[int] = self.parse_dict(yaml.safe_load(Path(_lowerCAmelCase ).read_text() ) , allow_extra_keys=_lowerCAmelCase ) return tuple(_lowerCAmelCase )
709
import collections from typing import List, Optional, Union from ...tokenization_utils_base import BatchEncoding from ...utils import TensorType, add_end_docstrings, add_start_docstrings, logging from ..bert.tokenization_bert_fast import BertTokenizerFast from .tokenization_dpr import DPRContextEncoderTokenizer, DPRQuestionEncoderTokenizer, DPRReaderTokenizer lowerCAmelCase__ : Any = logging.get_logger(__name__) lowerCAmelCase__ : List[Any] = {'''vocab_file''': '''vocab.txt''', '''tokenizer_file''': '''tokenizer.json'''} lowerCAmelCase__ : Any = { '''vocab_file''': { '''facebook/dpr-ctx_encoder-single-nq-base''': ( '''https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/vocab.txt''' ), '''facebook/dpr-ctx_encoder-multiset-base''': ( '''https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/vocab.txt''' ), }, '''tokenizer_file''': { '''facebook/dpr-ctx_encoder-single-nq-base''': ( '''https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/tokenizer.json''' ), '''facebook/dpr-ctx_encoder-multiset-base''': ( '''https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/tokenizer.json''' ), }, } lowerCAmelCase__ : Any = { '''vocab_file''': { '''facebook/dpr-question_encoder-single-nq-base''': ( '''https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/vocab.txt''' ), '''facebook/dpr-question_encoder-multiset-base''': ( '''https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/vocab.txt''' ), }, '''tokenizer_file''': { '''facebook/dpr-question_encoder-single-nq-base''': ( '''https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/tokenizer.json''' ), '''facebook/dpr-question_encoder-multiset-base''': ( '''https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/tokenizer.json''' ), }, } lowerCAmelCase__ : Tuple = { '''vocab_file''': { '''facebook/dpr-reader-single-nq-base''': ( '''https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/vocab.txt''' ), '''facebook/dpr-reader-multiset-base''': ( '''https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/vocab.txt''' ), }, '''tokenizer_file''': { '''facebook/dpr-reader-single-nq-base''': ( '''https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/tokenizer.json''' ), '''facebook/dpr-reader-multiset-base''': ( '''https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/tokenizer.json''' ), }, } lowerCAmelCase__ : Dict = { '''facebook/dpr-ctx_encoder-single-nq-base''': 5_12, '''facebook/dpr-ctx_encoder-multiset-base''': 5_12, } lowerCAmelCase__ : Union[str, Any] = { '''facebook/dpr-question_encoder-single-nq-base''': 5_12, '''facebook/dpr-question_encoder-multiset-base''': 5_12, } lowerCAmelCase__ : Optional[Any] = { '''facebook/dpr-reader-single-nq-base''': 5_12, '''facebook/dpr-reader-multiset-base''': 5_12, } lowerCAmelCase__ : Tuple = { '''facebook/dpr-ctx_encoder-single-nq-base''': {'''do_lower_case''': True}, '''facebook/dpr-ctx_encoder-multiset-base''': {'''do_lower_case''': True}, } lowerCAmelCase__ : Any = { '''facebook/dpr-question_encoder-single-nq-base''': {'''do_lower_case''': True}, '''facebook/dpr-question_encoder-multiset-base''': {'''do_lower_case''': True}, } lowerCAmelCase__ : List[str] = { '''facebook/dpr-reader-single-nq-base''': {'''do_lower_case''': True}, '''facebook/dpr-reader-multiset-base''': {'''do_lower_case''': True}, } class __snake_case ( _lowerCamelCase ): __lowerCamelCase = VOCAB_FILES_NAMES __lowerCamelCase = CONTEXT_ENCODER_PRETRAINED_VOCAB_FILES_MAP __lowerCamelCase = CONTEXT_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES __lowerCamelCase = CONTEXT_ENCODER_PRETRAINED_INIT_CONFIGURATION __lowerCamelCase = DPRContextEncoderTokenizer class __snake_case ( _lowerCamelCase ): __lowerCamelCase = VOCAB_FILES_NAMES __lowerCamelCase = QUESTION_ENCODER_PRETRAINED_VOCAB_FILES_MAP __lowerCamelCase = QUESTION_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES __lowerCamelCase = QUESTION_ENCODER_PRETRAINED_INIT_CONFIGURATION __lowerCamelCase = DPRQuestionEncoderTokenizer lowerCAmelCase__ : Tuple = collections.namedtuple( '''DPRSpanPrediction''', ['''span_score''', '''relevance_score''', '''doc_id''', '''start_index''', '''end_index''', '''text'''] ) lowerCAmelCase__ : List[Any] = collections.namedtuple('''DPRReaderOutput''', ['''start_logits''', '''end_logits''', '''relevance_logits''']) lowerCAmelCase__ : int = r''' Return a dictionary with the token ids of the input strings and other information to give to `.decode_best_spans`. It converts the strings of a question and different passages (title and text) in a sequence of IDs (integers), using the tokenizer and vocabulary. The resulting `input_ids` is a matrix of size `(n_passages, sequence_length)` with the format: [CLS] <question token ids> [SEP] <titles ids> [SEP] <texts ids> Args: questions (`str` or `List[str]`): The questions to be encoded. You can specify one question for many passages. In this case, the question will be duplicated like `[questions] * n_passages`. Otherwise you have to specify as many questions as in `titles` or `texts`. titles (`str` or `List[str]`): The passages titles to be encoded. This can be a string or a list of strings if there are several passages. texts (`str` or `List[str]`): The passages texts to be encoded. This can be a string or a list of strings if there are several passages. padding (`bool`, `str` or [`~utils.PaddingStrategy`], *optional*, defaults to `False`): Activates and controls padding. Accepts the following values: - `True` or `\'longest\'`: Pad to the longest sequence in the batch (or no padding if only a single sequence if provided). - `\'max_length\'`: Pad to a maximum length specified with the argument `max_length` or to the maximum acceptable input length for the model if that argument is not provided. - `False` or `\'do_not_pad\'` (default): No padding (i.e., can output a batch with sequences of different lengths). truncation (`bool`, `str` or [`~tokenization_utils_base.TruncationStrategy`], *optional*, defaults to `False`): Activates and controls truncation. Accepts the following values: - `True` or `\'longest_first\'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum acceptable input length for the model if that argument is not provided. This will truncate token by token, removing a token from the longest sequence in the pair if a pair of sequences (or a batch of pairs) is provided. - `\'only_first\'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum acceptable input length for the model if that argument is not provided. This will only truncate the first sequence of a pair if a pair of sequences (or a batch of pairs) is provided. - `\'only_second\'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum acceptable input length for the model if that argument is not provided. This will only truncate the second sequence of a pair if a pair of sequences (or a batch of pairs) is provided. - `False` or `\'do_not_truncate\'` (default): No truncation (i.e., can output batch with sequence lengths greater than the model maximum admissible input size). max_length (`int`, *optional*): Controls the maximum length to use by one of the truncation/padding parameters. If left unset or set to `None`, this will use the predefined model maximum length if a maximum length is required by one of the truncation/padding parameters. If the model has no specific maximum input length (like XLNet) truncation/padding to a maximum length will be deactivated. return_tensors (`str` or [`~utils.TensorType`], *optional*): If set, will return tensors instead of list of python integers. Acceptable values are: - `\'tf\'`: Return TensorFlow `tf.constant` objects. - `\'pt\'`: Return PyTorch `torch.Tensor` objects. - `\'np\'`: Return Numpy `np.ndarray` objects. return_attention_mask (`bool`, *optional*): Whether or not to return the attention mask. If not set, will return the attention mask according to the specific tokenizer\'s default, defined by the `return_outputs` attribute. [What are attention masks?](../glossary#attention-mask) Return: `Dict[str, List[List[int]]]`: A dictionary with the following keys: - `input_ids`: List of token ids to be fed to a model. - `attention_mask`: List of indices specifying which tokens should be attended to by the model. ''' @add_start_docstrings(_lowerCamelCase ) class __snake_case : def __call__( self , __UpperCamelCase , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = False , __UpperCamelCase = False , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = None , **__UpperCamelCase , ) -> BatchEncoding: '''simple docstring''' if titles is None and texts is None: return super().__call__( __UpperCamelCase , padding=__UpperCamelCase , truncation=__UpperCamelCase , max_length=__UpperCamelCase , return_tensors=__UpperCamelCase , return_attention_mask=__UpperCamelCase , **__UpperCamelCase , ) elif titles is None or texts is None: snake_case__ : Optional[Any] = titles if texts is None else texts return super().__call__( __UpperCamelCase , __UpperCamelCase , padding=__UpperCamelCase , truncation=__UpperCamelCase , max_length=__UpperCamelCase , return_tensors=__UpperCamelCase , return_attention_mask=__UpperCamelCase , **__UpperCamelCase , ) snake_case__ : int = titles if not isinstance(__UpperCamelCase , __UpperCamelCase ) else [titles] snake_case__ : Optional[int] = texts if not isinstance(__UpperCamelCase , __UpperCamelCase ) else [texts] snake_case__ : List[Any] = len(__UpperCamelCase ) snake_case__ : str = questions if not isinstance(__UpperCamelCase , __UpperCamelCase ) else [questions] * n_passages assert len(__UpperCamelCase ) == len( __UpperCamelCase ), F"""There should be as many titles than texts but got {len(__UpperCamelCase )} titles and {len(__UpperCamelCase )} texts.""" snake_case__ : Optional[int] = super().__call__(__UpperCamelCase , __UpperCamelCase , padding=__UpperCamelCase , truncation=__UpperCamelCase )['input_ids'] snake_case__ : Optional[Any] = super().__call__(__UpperCamelCase , add_special_tokens=__UpperCamelCase , padding=__UpperCamelCase , truncation=__UpperCamelCase )['input_ids'] snake_case__ : Union[str, Any] = { 'input_ids': [ (encoded_question_and_title + encoded_text)[:max_length] if max_length is not None and truncation else encoded_question_and_title + encoded_text for encoded_question_and_title, encoded_text in zip(__UpperCamelCase , __UpperCamelCase ) ] } if return_attention_mask is not False: snake_case__ : List[Any] = [] for input_ids in encoded_inputs["input_ids"]: attention_mask.append([int(input_id != self.pad_token_id ) for input_id in input_ids] ) snake_case__ : Union[str, Any] = attention_mask return self.pad(__UpperCamelCase , padding=__UpperCamelCase , max_length=__UpperCamelCase , return_tensors=__UpperCamelCase ) def __a ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = 16 , __UpperCamelCase = 64 , __UpperCamelCase = 4 , ) -> List[DPRSpanPrediction]: '''simple docstring''' snake_case__ : Optional[Any] = reader_input['input_ids'] snake_case__ , snake_case__ , snake_case__ : Any = reader_output[:3] snake_case__ : List[str] = len(__UpperCamelCase ) snake_case__ : Tuple = sorted(range(__UpperCamelCase ) , reverse=__UpperCamelCase , key=relevance_logits.__getitem__ ) snake_case__ : List[DPRReaderOutput] = [] for doc_id in sorted_docs: snake_case__ : Tuple = list(input_ids[doc_id] ) # assuming question & title information is at the beginning of the sequence snake_case__ : int = sequence_ids.index(self.sep_token_id , 2 ) + 1 # second sep id if sequence_ids[-1] == self.pad_token_id: snake_case__ : Union[str, Any] = sequence_ids.index(self.pad_token_id ) else: snake_case__ : str = len(__UpperCamelCase ) snake_case__ : Dict = self._get_best_spans( start_logits=start_logits[doc_id][passage_offset:sequence_len] , end_logits=end_logits[doc_id][passage_offset:sequence_len] , max_answer_length=__UpperCamelCase , top_spans=__UpperCamelCase , ) for start_index, end_index in best_spans: start_index += passage_offset end_index += passage_offset nbest_spans_predictions.append( DPRSpanPrediction( span_score=start_logits[doc_id][start_index] + end_logits[doc_id][end_index] , relevance_score=relevance_logits[doc_id] , doc_id=__UpperCamelCase , start_index=__UpperCamelCase , end_index=__UpperCamelCase , text=self.decode(sequence_ids[start_index : end_index + 1] ) , ) ) if len(__UpperCamelCase ) >= num_spans: break return nbest_spans_predictions[:num_spans] def __a ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , ) -> List[DPRSpanPrediction]: '''simple docstring''' snake_case__ : Any = [] for start_index, start_score in enumerate(__UpperCamelCase ): for answer_length, end_score in enumerate(end_logits[start_index : start_index + max_answer_length] ): scores.append(((start_index, start_index + answer_length), start_score + end_score) ) snake_case__ : str = sorted(__UpperCamelCase , key=lambda __UpperCamelCase : x[1] , reverse=__UpperCamelCase ) snake_case__ : Any = [] for (start_index, end_index), score in scores: assert start_index <= end_index, F"""Wrong span indices: [{start_index}:{end_index}]""" snake_case__ : str = end_index - start_index + 1 assert length <= max_answer_length, F"""Span is too long: {length} > {max_answer_length}""" if any( start_index <= prev_start_index <= prev_end_index <= end_index or prev_start_index <= start_index <= end_index <= prev_end_index for (prev_start_index, prev_end_index) in chosen_span_intervals ): continue chosen_span_intervals.append((start_index, end_index) ) if len(__UpperCamelCase ) == top_spans: break return chosen_span_intervals @add_end_docstrings(_lowerCamelCase ) class __snake_case ( _lowerCamelCase ,_lowerCamelCase ): __lowerCamelCase = VOCAB_FILES_NAMES __lowerCamelCase = READER_PRETRAINED_VOCAB_FILES_MAP __lowerCamelCase = READER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES __lowerCamelCase = READER_PRETRAINED_INIT_CONFIGURATION __lowerCamelCase = ["""input_ids""", """attention_mask"""] __lowerCamelCase = DPRReaderTokenizer
699
0
'''simple docstring''' from collections import OrderedDict from typing import Mapping from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging lowerCAmelCase__ : List[str] = logging.get_logger(__name__) lowerCAmelCase__ : Any = { "kssteven/ibert-roberta-base": "https://huggingface.co/kssteven/ibert-roberta-base/resolve/main/config.json", "kssteven/ibert-roberta-large": "https://huggingface.co/kssteven/ibert-roberta-large/resolve/main/config.json", "kssteven/ibert-roberta-large-mnli": ( "https://huggingface.co/kssteven/ibert-roberta-large-mnli/resolve/main/config.json" ), } class __snake_case ( _lowerCamelCase ): __lowerCamelCase = "ibert" def __init__( self , __UpperCamelCase=30522 , __UpperCamelCase=768 , __UpperCamelCase=12 , __UpperCamelCase=12 , __UpperCamelCase=3072 , __UpperCamelCase="gelu" , __UpperCamelCase=0.1 , __UpperCamelCase=0.1 , __UpperCamelCase=512 , __UpperCamelCase=2 , __UpperCamelCase=0.0_2 , __UpperCamelCase=1E-12 , __UpperCamelCase=1 , __UpperCamelCase=0 , __UpperCamelCase=2 , __UpperCamelCase="absolute" , __UpperCamelCase=False , __UpperCamelCase="none" , **__UpperCamelCase , ) -> Optional[Any]: '''simple docstring''' super().__init__(pad_token_id=UpperCamelCase_ , bos_token_id=UpperCamelCase_ , eos_token_id=UpperCamelCase_ , **UpperCamelCase_ ) snake_case__ : List[Any] = vocab_size snake_case__ : Optional[Any] = hidden_size snake_case__ : List[Any] = num_hidden_layers snake_case__ : Any = num_attention_heads snake_case__ : List[str] = hidden_act snake_case__ : List[str] = intermediate_size snake_case__ : Optional[int] = hidden_dropout_prob snake_case__ : Union[str, Any] = attention_probs_dropout_prob snake_case__ : str = max_position_embeddings snake_case__ : List[str] = type_vocab_size snake_case__ : Dict = initializer_range snake_case__ : Optional[int] = layer_norm_eps snake_case__ : Any = position_embedding_type snake_case__ : Tuple = quant_mode snake_case__ : Union[str, Any] = force_dequant class __snake_case ( _lowerCamelCase ): @property def __a ( self ) -> str: '''simple docstring''' if self.task == "multiple-choice": snake_case__ : Optional[int] = {0: "batch", 1: "choice", 2: "sequence"} else: snake_case__ : Optional[int] = {0: "batch", 1: "sequence"} return OrderedDict( [ ('input_ids', dynamic_axis), ('attention_mask', dynamic_axis), ] )
710
import gc import random import unittest import numpy as np import torch from PIL import Image from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer from diffusers import ( AutoencoderKL, DDIMScheduler, EulerAncestralDiscreteScheduler, LMSDiscreteScheduler, PNDMScheduler, StableDiffusionInstructPixaPixPipeline, UNetaDConditionModel, ) from diffusers.image_processor import VaeImageProcessor from diffusers.utils import floats_tensor, load_image, slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu from ..pipeline_params import ( IMAGE_TO_IMAGE_IMAGE_PARAMS, TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS, ) from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin enable_full_determinism() class __snake_case ( _lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase ,unittest.TestCase ): __lowerCamelCase = StableDiffusionInstructPixaPixPipeline __lowerCamelCase = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {"""height""", """width""", """cross_attention_kwargs"""} __lowerCamelCase = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS __lowerCamelCase = IMAGE_TO_IMAGE_IMAGE_PARAMS __lowerCamelCase = IMAGE_TO_IMAGE_IMAGE_PARAMS def __a ( self ) -> List[Any]: '''simple docstring''' torch.manual_seed(0 ) snake_case__ : Tuple = UNetaDConditionModel( block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=8 , out_channels=4 , down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') , up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D') , cross_attention_dim=32 , ) snake_case__ : Any = PNDMScheduler(skip_prk_steps=__UpperCamelCase ) torch.manual_seed(0 ) snake_case__ : Dict = AutoencoderKL( block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=4 , ) torch.manual_seed(0 ) snake_case__ : int = CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , ) snake_case__ : Tuple = CLIPTextModel(__UpperCamelCase ) snake_case__ : Optional[int] = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' ) snake_case__ : Optional[int] = { 'unet': unet, 'scheduler': scheduler, 'vae': vae, 'text_encoder': text_encoder, 'tokenizer': tokenizer, 'safety_checker': None, 'feature_extractor': None, } return components def __a ( self , __UpperCamelCase , __UpperCamelCase=0 ) -> Dict: '''simple docstring''' snake_case__ : Optional[Any] = floats_tensor((1, 3, 32, 32) , rng=random.Random(__UpperCamelCase ) ).to(__UpperCamelCase ) snake_case__ : int = image.cpu().permute(0 , 2 , 3 , 1 )[0] snake_case__ : Union[str, Any] = Image.fromarray(np.uinta(__UpperCamelCase ) ).convert('RGB' ) if str(__UpperCamelCase ).startswith('mps' ): snake_case__ : str = torch.manual_seed(__UpperCamelCase ) else: snake_case__ : Dict = torch.Generator(device=__UpperCamelCase ).manual_seed(__UpperCamelCase ) snake_case__ : str = { 'prompt': 'A painting of a squirrel eating a burger', 'image': image, 'generator': generator, 'num_inference_steps': 2, 'guidance_scale': 6.0, 'image_guidance_scale': 1, 'output_type': 'numpy', } return inputs def __a ( self ) -> Tuple: '''simple docstring''' snake_case__ : Dict = 'cpu' # ensure determinism for the device-dependent torch.Generator snake_case__ : Optional[int] = self.get_dummy_components() snake_case__ : str = StableDiffusionInstructPixaPixPipeline(**__UpperCamelCase ) snake_case__ : Optional[int] = sd_pipe.to(__UpperCamelCase ) sd_pipe.set_progress_bar_config(disable=__UpperCamelCase ) snake_case__ : Tuple = self.get_dummy_inputs(__UpperCamelCase ) snake_case__ : List[str] = sd_pipe(**__UpperCamelCase ).images snake_case__ : List[str] = image[0, -3:, -3:, -1] assert image.shape == (1, 32, 32, 3) snake_case__ : str = np.array([0.7_5_2_6, 0.3_7_5_0, 0.4_5_4_7, 0.6_1_1_7, 0.5_8_6_6, 0.5_0_1_6, 0.4_3_2_7, 0.5_6_4_2, 0.4_8_1_5] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3 def __a ( self ) -> Any: '''simple docstring''' snake_case__ : Dict = 'cpu' # ensure determinism for the device-dependent torch.Generator snake_case__ : Union[str, Any] = self.get_dummy_components() snake_case__ : Optional[Any] = StableDiffusionInstructPixaPixPipeline(**__UpperCamelCase ) snake_case__ : List[Any] = sd_pipe.to(__UpperCamelCase ) sd_pipe.set_progress_bar_config(disable=__UpperCamelCase ) snake_case__ : Union[str, Any] = self.get_dummy_inputs(__UpperCamelCase ) snake_case__ : List[str] = 'french fries' snake_case__ : Optional[Any] = sd_pipe(**__UpperCamelCase , negative_prompt=__UpperCamelCase ) snake_case__ : Union[str, Any] = output.images snake_case__ : Tuple = image[0, -3:, -3:, -1] assert image.shape == (1, 32, 32, 3) snake_case__ : Any = np.array([0.7_5_1_1, 0.3_6_4_2, 0.4_5_5_3, 0.6_2_3_6, 0.5_7_9_7, 0.5_0_1_3, 0.4_3_4_3, 0.5_6_1_1, 0.4_8_3_1] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3 def __a ( self ) -> int: '''simple docstring''' snake_case__ : Optional[int] = 'cpu' # ensure determinism for the device-dependent torch.Generator snake_case__ : List[str] = self.get_dummy_components() snake_case__ : str = StableDiffusionInstructPixaPixPipeline(**__UpperCamelCase ) snake_case__ : str = sd_pipe.to(__UpperCamelCase ) sd_pipe.set_progress_bar_config(disable=__UpperCamelCase ) snake_case__ : Dict = self.get_dummy_inputs(__UpperCamelCase ) snake_case__ : Any = [inputs['prompt']] * 2 snake_case__ : Optional[int] = np.array(inputs['image'] ).astype(np.floataa ) / 2_5_5.0 snake_case__ : Optional[int] = torch.from_numpy(__UpperCamelCase ).unsqueeze(0 ).to(__UpperCamelCase ) snake_case__ : Any = image / 2 + 0.5 snake_case__ : Optional[Any] = image.permute(0 , 3 , 1 , 2 ) snake_case__ : List[Any] = image.repeat(2 , 1 , 1 , 1 ) snake_case__ : Optional[int] = sd_pipe(**__UpperCamelCase ).images snake_case__ : Union[str, Any] = image[-1, -3:, -3:, -1] assert image.shape == (2, 32, 32, 3) snake_case__ : List[Any] = np.array([0.5_8_1_2, 0.5_7_4_8, 0.5_2_2_2, 0.5_9_0_8, 0.5_6_9_5, 0.7_1_7_4, 0.6_8_0_4, 0.5_5_2_3, 0.5_5_7_9] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3 def __a ( self ) -> Union[str, Any]: '''simple docstring''' snake_case__ : Optional[int] = 'cpu' # ensure determinism for the device-dependent torch.Generator snake_case__ : Optional[int] = self.get_dummy_components() snake_case__ : Tuple = EulerAncestralDiscreteScheduler( beta_start=0.0_0_0_8_5 , beta_end=0.0_1_2 , beta_schedule='scaled_linear' ) snake_case__ : int = StableDiffusionInstructPixaPixPipeline(**__UpperCamelCase ) snake_case__ : List[str] = sd_pipe.to(__UpperCamelCase ) sd_pipe.set_progress_bar_config(disable=__UpperCamelCase ) snake_case__ : str = self.get_dummy_inputs(__UpperCamelCase ) snake_case__ : Any = sd_pipe(**__UpperCamelCase ).images snake_case__ : int = image[0, -3:, -3:, -1] snake_case__ : Tuple = [round(__UpperCamelCase , 4 ) for x in image_slice.flatten().tolist()] print(','.join([str(__UpperCamelCase ) for x in slice] ) ) assert image.shape == (1, 32, 32, 3) snake_case__ : List[Any] = np.array([0.7_4_1_7, 0.3_8_4_2, 0.4_7_3_2, 0.5_7_7_6, 0.5_8_9_1, 0.5_1_3_9, 0.4_0_5_2, 0.5_6_7_3, 0.4_9_8_6] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3 def __a ( self ) -> int: '''simple docstring''' super().test_inference_batch_single_identical(expected_max_diff=3E-3 ) def __a ( self ) -> Any: '''simple docstring''' snake_case__ : Optional[int] = self.get_dummy_components() snake_case__ : int = StableDiffusionInstructPixaPixPipeline(**__UpperCamelCase ) snake_case__ : Union[str, Any] = VaeImageProcessor(do_resize=__UpperCamelCase , do_normalize=__UpperCamelCase ) snake_case__ : Optional[int] = pipe.to(__UpperCamelCase ) pipe.set_progress_bar_config(disable=__UpperCamelCase ) snake_case__ : Optional[Any] = pipe(**self.get_dummy_inputs_by_type(__UpperCamelCase , input_image_type='pt' ) )[0] snake_case__ : Union[str, Any] = components['vae'] snake_case__ : str = self.get_dummy_inputs_by_type(__UpperCamelCase , input_image_type='pt' ) for image_param in self.image_latents_params: if image_param in inputs.keys(): snake_case__ : List[str] = vae.encode(inputs[image_param] ).latent_dist.mode() snake_case__ : Dict = pipe(**__UpperCamelCase )[0] snake_case__ : str = np.abs(out - out_latents_inputs ).max() self.assertLess(__UpperCamelCase , 1E-4 , 'passing latents as image input generate different result from passing image' ) @slow @require_torch_gpu class __snake_case ( unittest.TestCase ): def __a ( self ) -> List[str]: '''simple docstring''' super().tearDown() gc.collect() torch.cuda.empty_cache() def __a ( self , __UpperCamelCase=0 ) -> Dict: '''simple docstring''' snake_case__ : Optional[Any] = torch.manual_seed(__UpperCamelCase ) snake_case__ : List[str] = load_image( 'https://huggingface.co/datasets/diffusers/test-arrays/resolve/main/stable_diffusion_pix2pix/example.jpg' ) snake_case__ : int = { 'prompt': 'turn him into a cyborg', 'image': image, 'generator': generator, 'num_inference_steps': 3, 'guidance_scale': 7.5, 'image_guidance_scale': 1.0, 'output_type': 'numpy', } return inputs def __a ( self ) -> Dict: '''simple docstring''' snake_case__ : Any = StableDiffusionInstructPixaPixPipeline.from_pretrained( 'timbrooks/instruct-pix2pix' , safety_checker=__UpperCamelCase ) pipe.to(__UpperCamelCase ) pipe.set_progress_bar_config(disable=__UpperCamelCase ) pipe.enable_attention_slicing() snake_case__ : Tuple = self.get_inputs() snake_case__ : List[Any] = pipe(**__UpperCamelCase ).images snake_case__ : Union[str, Any] = image[0, -3:, -3:, -1].flatten() assert image.shape == (1, 512, 512, 3) snake_case__ : Dict = np.array([0.5_9_0_2, 0.6_0_1_5, 0.6_0_2_7, 0.5_9_8_3, 0.6_0_9_2, 0.6_0_6_1, 0.5_7_6_5, 0.5_7_8_5, 0.5_5_5_5] ) assert np.abs(expected_slice - image_slice ).max() < 1E-3 def __a ( self ) -> str: '''simple docstring''' snake_case__ : Any = StableDiffusionInstructPixaPixPipeline.from_pretrained( 'timbrooks/instruct-pix2pix' , safety_checker=__UpperCamelCase ) snake_case__ : Tuple = LMSDiscreteScheduler.from_config(pipe.scheduler.config ) pipe.to(__UpperCamelCase ) pipe.set_progress_bar_config(disable=__UpperCamelCase ) pipe.enable_attention_slicing() snake_case__ : Dict = self.get_inputs() snake_case__ : Dict = pipe(**__UpperCamelCase ).images snake_case__ : Union[str, Any] = image[0, -3:, -3:, -1].flatten() assert image.shape == (1, 512, 512, 3) snake_case__ : List[Any] = np.array([0.6_5_7_8, 0.6_8_1_7, 0.6_9_7_2, 0.6_7_6_1, 0.6_8_5_6, 0.6_9_1_6, 0.6_4_2_8, 0.6_5_1_6, 0.6_3_0_1] ) assert np.abs(expected_slice - image_slice ).max() < 1E-3 def __a ( self ) -> Optional[Any]: '''simple docstring''' snake_case__ : Tuple = StableDiffusionInstructPixaPixPipeline.from_pretrained( 'timbrooks/instruct-pix2pix' , safety_checker=__UpperCamelCase ) snake_case__ : Tuple = DDIMScheduler.from_config(pipe.scheduler.config ) pipe.to(__UpperCamelCase ) pipe.set_progress_bar_config(disable=__UpperCamelCase ) pipe.enable_attention_slicing() snake_case__ : Optional[int] = self.get_inputs() snake_case__ : Optional[int] = pipe(**__UpperCamelCase ).images snake_case__ : Tuple = image[0, -3:, -3:, -1].flatten() assert image.shape == (1, 512, 512, 3) snake_case__ : int = np.array([0.3_8_2_8, 0.3_8_3_4, 0.3_8_1_8, 0.3_7_9_2, 0.3_8_6_5, 0.3_7_5_2, 0.3_7_9_2, 0.3_8_4_7, 0.3_7_5_3] ) assert np.abs(expected_slice - image_slice ).max() < 1E-3 def __a ( self ) -> Union[str, Any]: '''simple docstring''' snake_case__ : int = 0 def callback_fn(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) -> None: snake_case__ : List[Any] = True nonlocal number_of_steps number_of_steps += 1 if step == 1: snake_case__ : Any = latents.detach().cpu().numpy() assert latents.shape == (1, 4, 64, 64) snake_case__ : int = latents[0, -3:, -3:, -1] snake_case__ : List[str] = np.array([-0.2_4_6_3, -0.4_6_4_4, -0.9_7_5_6, 1.5_1_7_6, 1.4_4_1_4, 0.7_8_6_6, 0.9_8_9_7, 0.8_5_2_1, 0.7_9_8_3] ) assert np.abs(latents_slice.flatten() - expected_slice ).max() < 5E-2 elif step == 2: snake_case__ : Dict = latents.detach().cpu().numpy() assert latents.shape == (1, 4, 64, 64) snake_case__ : Dict = latents[0, -3:, -3:, -1] snake_case__ : Optional[Any] = np.array([-0.2_6_4_4, -0.4_6_2_6, -0.9_6_5_3, 1.5_1_7_6, 1.4_5_5_1, 0.7_6_8_6, 0.9_8_0_5, 0.8_4_5_2, 0.8_1_1_5] ) assert np.abs(latents_slice.flatten() - expected_slice ).max() < 5E-2 snake_case__ : str = False snake_case__ : List[Any] = StableDiffusionInstructPixaPixPipeline.from_pretrained( 'timbrooks/instruct-pix2pix' , safety_checker=__UpperCamelCase , torch_dtype=torch.floataa ) snake_case__ : int = pipe.to(__UpperCamelCase ) pipe.set_progress_bar_config(disable=__UpperCamelCase ) pipe.enable_attention_slicing() snake_case__ : int = self.get_inputs() pipe(**__UpperCamelCase , callback=__UpperCamelCase , callback_steps=1 ) assert callback_fn.has_been_called assert number_of_steps == 3 def __a ( self ) -> Any: '''simple docstring''' torch.cuda.empty_cache() torch.cuda.reset_max_memory_allocated() torch.cuda.reset_peak_memory_stats() snake_case__ : str = StableDiffusionInstructPixaPixPipeline.from_pretrained( 'timbrooks/instruct-pix2pix' , safety_checker=__UpperCamelCase , torch_dtype=torch.floataa ) snake_case__ : Dict = pipe.to(__UpperCamelCase ) pipe.set_progress_bar_config(disable=__UpperCamelCase ) pipe.enable_attention_slicing(1 ) pipe.enable_sequential_cpu_offload() snake_case__ : str = self.get_inputs() snake_case__ : Tuple = pipe(**__UpperCamelCase ) snake_case__ : List[Any] = torch.cuda.max_memory_allocated() # make sure that less than 2.2 GB is allocated assert mem_bytes < 2.2 * 10**9 def __a ( self ) -> int: '''simple docstring''' snake_case__ : int = self.get_inputs() # resize to resolution that is divisible by 8 but not 16 or 32 snake_case__ : Tuple = inputs['image'].resize((504, 504) ) snake_case__ : str = 'timbrooks/instruct-pix2pix' snake_case__ : Tuple = StableDiffusionInstructPixaPixPipeline.from_pretrained( __UpperCamelCase , safety_checker=__UpperCamelCase , ) pipe.to(__UpperCamelCase ) pipe.set_progress_bar_config(disable=__UpperCamelCase ) pipe.enable_attention_slicing() snake_case__ : str = pipe(**__UpperCamelCase ) snake_case__ : List[Any] = output.images[0] snake_case__ : List[Any] = image[255:258, 383:386, -1] assert image.shape == (504, 504, 3) snake_case__ : List[str] = np.array([0.2_7_2_6, 0.2_5_2_9, 0.2_6_6_4, 0.2_6_5_5, 0.2_6_4_1, 0.2_6_4_2, 0.2_5_9_1, 0.2_6_4_9, 0.2_5_9_0] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 5E-3
699
0
import unittest import numpy as np from transformers.file_utils import is_torch_available, is_vision_available from transformers.testing_utils import require_torch, require_vision from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import DPTImageProcessor class __snake_case ( unittest.TestCase ): def __init__( self , __UpperCamelCase , __UpperCamelCase=7 , __UpperCamelCase=3 , __UpperCamelCase=18 , __UpperCamelCase=30 , __UpperCamelCase=400 , __UpperCamelCase=True , __UpperCamelCase=None , __UpperCamelCase=True , __UpperCamelCase=[0.5, 0.5, 0.5] , __UpperCamelCase=[0.5, 0.5, 0.5] , ) -> Optional[Any]: '''simple docstring''' snake_case__ : Any = size if size is not None else {"height": 18, "width": 18} snake_case__ : List[Any] = parent snake_case__ : int = batch_size snake_case__ : Any = num_channels snake_case__ : List[Any] = image_size snake_case__ : int = min_resolution snake_case__ : str = max_resolution snake_case__ : Dict = do_resize snake_case__ : str = size snake_case__ : Optional[int] = do_normalize snake_case__ : Dict = image_mean snake_case__ : int = image_std def __a ( self ) -> Optional[Any]: '''simple docstring''' return { "image_mean": self.image_mean, "image_std": self.image_std, "do_normalize": self.do_normalize, "do_resize": self.do_resize, "size": self.size, } @require_torch @require_vision class __snake_case ( _lowerCamelCase ,unittest.TestCase ): __lowerCamelCase = DPTImageProcessor if is_vision_available() else None def __a ( self ) -> Any: '''simple docstring''' snake_case__ : List[Any] = DPTImageProcessingTester(self ) @property def __a ( self ) -> List[str]: '''simple docstring''' return self.image_processor_tester.prepare_image_processor_dict() def __a ( self ) -> Union[str, Any]: '''simple docstring''' snake_case__ : Tuple = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(__A , 'image_mean' ) ) self.assertTrue(hasattr(__A , 'image_std' ) ) self.assertTrue(hasattr(__A , 'do_normalize' ) ) self.assertTrue(hasattr(__A , 'do_resize' ) ) self.assertTrue(hasattr(__A , 'size' ) ) def __a ( self ) -> List[Any]: '''simple docstring''' snake_case__ : Union[str, Any] = self.image_processing_class.from_dict(self.image_processor_dict ) self.assertEqual(image_processor.size , {'height': 18, 'width': 18} ) snake_case__ : Tuple = self.image_processing_class.from_dict(self.image_processor_dict , size=42 ) self.assertEqual(image_processor.size , {'height': 42, 'width': 42} ) def __a ( self ) -> Dict: '''simple docstring''' snake_case__ : str = self.image_processing_class(**self.image_processor_dict ) # create random PIL images snake_case__ : Optional[int] = prepare_image_inputs(self.image_processor_tester , equal_resolution=__A ) for image in image_inputs: self.assertIsInstance(__A , Image.Image ) # Test not batched input snake_case__ : str = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.size['height'], self.image_processor_tester.size['width'], ) , ) # Test batched snake_case__ : Optional[int] = image_processing(__A , return_tensors='pt' ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.size['height'], self.image_processor_tester.size['width'], ) , ) def __a ( self ) -> Union[str, Any]: '''simple docstring''' snake_case__ : List[Any] = self.image_processing_class(**self.image_processor_dict ) # create random numpy tensors snake_case__ : int = prepare_image_inputs(self.image_processor_tester , equal_resolution=__A , numpify=__A ) for image in image_inputs: self.assertIsInstance(__A , np.ndarray ) # Test not batched input snake_case__ : str = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.size['height'], self.image_processor_tester.size['width'], ) , ) # Test batched snake_case__ : str = image_processing(__A , return_tensors='pt' ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.size['height'], self.image_processor_tester.size['width'], ) , ) def __a ( self ) -> List[Any]: '''simple docstring''' snake_case__ : List[str] = self.image_processing_class(**self.image_processor_dict ) # create random PyTorch tensors snake_case__ : int = prepare_image_inputs(self.image_processor_tester , equal_resolution=__A , torchify=__A ) for image in image_inputs: self.assertIsInstance(__A , torch.Tensor ) # Test not batched input snake_case__ : Optional[int] = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.size['height'], self.image_processor_tester.size['width'], ) , ) # Test batched snake_case__ : int = image_processing(__A , return_tensors='pt' ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.size['height'], self.image_processor_tester.size['width'], ) , )
711
from .data_collator import ( DataCollatorForLanguageModeling, DataCollatorForPermutationLanguageModeling, DataCollatorForSeqaSeq, DataCollatorForSOP, DataCollatorForTokenClassification, DataCollatorForWholeWordMask, DataCollatorWithPadding, DefaultDataCollator, default_data_collator, ) from .metrics import glue_compute_metrics, xnli_compute_metrics from .processors import ( DataProcessor, InputExample, InputFeatures, SingleSentenceClassificationProcessor, SquadExample, SquadFeatures, SquadVaProcessor, SquadVaProcessor, glue_convert_examples_to_features, glue_output_modes, glue_processors, glue_tasks_num_labels, squad_convert_examples_to_features, xnli_output_modes, xnli_processors, xnli_tasks_num_labels, )
699
0
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_torch_available, ) lowerCAmelCase__ : Any = { "configuration_roberta_prelayernorm": [ "ROBERTA_PRELAYERNORM_PRETRAINED_CONFIG_ARCHIVE_MAP", "RobertaPreLayerNormConfig", "RobertaPreLayerNormOnnxConfig", ], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase__ : Union[str, Any] = [ "ROBERTA_PRELAYERNORM_PRETRAINED_MODEL_ARCHIVE_LIST", "RobertaPreLayerNormForCausalLM", "RobertaPreLayerNormForMaskedLM", "RobertaPreLayerNormForMultipleChoice", "RobertaPreLayerNormForQuestionAnswering", "RobertaPreLayerNormForSequenceClassification", "RobertaPreLayerNormForTokenClassification", "RobertaPreLayerNormModel", "RobertaPreLayerNormPreTrainedModel", ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase__ : Optional[Any] = [ "TF_ROBERTA_PRELAYERNORM_PRETRAINED_MODEL_ARCHIVE_LIST", "TFRobertaPreLayerNormForCausalLM", "TFRobertaPreLayerNormForMaskedLM", "TFRobertaPreLayerNormForMultipleChoice", "TFRobertaPreLayerNormForQuestionAnswering", "TFRobertaPreLayerNormForSequenceClassification", "TFRobertaPreLayerNormForTokenClassification", "TFRobertaPreLayerNormMainLayer", "TFRobertaPreLayerNormModel", "TFRobertaPreLayerNormPreTrainedModel", ] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase__ : List[Any] = [ "FlaxRobertaPreLayerNormForCausalLM", "FlaxRobertaPreLayerNormForMaskedLM", "FlaxRobertaPreLayerNormForMultipleChoice", "FlaxRobertaPreLayerNormForQuestionAnswering", "FlaxRobertaPreLayerNormForSequenceClassification", "FlaxRobertaPreLayerNormForTokenClassification", "FlaxRobertaPreLayerNormModel", "FlaxRobertaPreLayerNormPreTrainedModel", ] if TYPE_CHECKING: from .configuration_roberta_prelayernorm import ( ROBERTA_PRELAYERNORM_PRETRAINED_CONFIG_ARCHIVE_MAP, RobertaPreLayerNormConfig, RobertaPreLayerNormOnnxConfig, ) try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_roberta_prelayernorm import ( ROBERTA_PRELAYERNORM_PRETRAINED_MODEL_ARCHIVE_LIST, RobertaPreLayerNormForCausalLM, RobertaPreLayerNormForMaskedLM, RobertaPreLayerNormForMultipleChoice, RobertaPreLayerNormForQuestionAnswering, RobertaPreLayerNormForSequenceClassification, RobertaPreLayerNormForTokenClassification, RobertaPreLayerNormModel, RobertaPreLayerNormPreTrainedModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_roberta_prelayernorm import ( TF_ROBERTA_PRELAYERNORM_PRETRAINED_MODEL_ARCHIVE_LIST, TFRobertaPreLayerNormForCausalLM, TFRobertaPreLayerNormForMaskedLM, TFRobertaPreLayerNormForMultipleChoice, TFRobertaPreLayerNormForQuestionAnswering, TFRobertaPreLayerNormForSequenceClassification, TFRobertaPreLayerNormForTokenClassification, TFRobertaPreLayerNormMainLayer, TFRobertaPreLayerNormModel, TFRobertaPreLayerNormPreTrainedModel, ) try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_roberta_prelayernorm import ( FlaxRobertaPreLayerNormForCausalLM, FlaxRobertaPreLayerNormForMaskedLM, FlaxRobertaPreLayerNormForMultipleChoice, FlaxRobertaPreLayerNormForQuestionAnswering, FlaxRobertaPreLayerNormForSequenceClassification, FlaxRobertaPreLayerNormForTokenClassification, FlaxRobertaPreLayerNormModel, FlaxRobertaPreLayerNormPreTrainedModel, ) else: import sys lowerCAmelCase__ : Optional[int] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
712
from dataclasses import dataclass, field from typing import Optional from transformers import AutoConfig, AutoImageProcessor, AutoTokenizer, FlaxVisionEncoderDecoderModel, HfArgumentParser @dataclass class __snake_case : __lowerCamelCase = field( metadata={"""help""": """The output directory where the model will be written."""} ,) __lowerCamelCase = field( metadata={ """help""": ( """The encoder model checkpoint for weights initialization.""" """Don't set if you want to train an encoder model from scratch.""" ) } ,) __lowerCamelCase = field( metadata={ """help""": ( """The decoder model checkpoint for weights initialization.""" """Don't set if you want to train a decoder model from scratch.""" ) } ,) __lowerCamelCase = field( default=_lowerCamelCase ,metadata={"""help""": """Pretrained encoder config name or path if not the same as encoder_model_name"""} ) __lowerCamelCase = field( default=_lowerCamelCase ,metadata={"""help""": """Pretrained decoder config name or path if not the same as decoder_model_name"""} ) def UpperCamelCase__ ( ) -> Union[str, Any]: snake_case__ : str = HfArgumentParser((ModelArguments,) ) ((snake_case__) , ) : Dict = parser.parse_args_into_dataclasses() # Load pretrained model and tokenizer # Use explicit specified encoder config if model_args.encoder_config_name: snake_case__ : List[str] = AutoConfig.from_pretrained(model_args.encoder_config_name ) # Use pretrained encoder model's config else: snake_case__ : Optional[int] = AutoConfig.from_pretrained(model_args.encoder_model_name_or_path ) # Use explicit specified decoder config if model_args.decoder_config_name: snake_case__ : Optional[Any] = AutoConfig.from_pretrained(model_args.decoder_config_name ) # Use pretrained decoder model's config else: snake_case__ : List[str] = AutoConfig.from_pretrained(model_args.decoder_model_name_or_path ) # necessary for `from_encoder_decoder_pretrained` when `decoder_config` is passed snake_case__ : Any = True snake_case__ : Dict = True snake_case__ : Tuple = FlaxVisionEncoderDecoderModel.from_encoder_decoder_pretrained( encoder_pretrained_model_name_or_path=model_args.encoder_model_name_or_path , decoder_pretrained_model_name_or_path=model_args.decoder_model_name_or_path , encoder_config=A__ , decoder_config=A__ , ) # GPT2 only has bos/eos tokens but not decoder_start/pad tokens snake_case__ : Optional[Any] = decoder_config.decoder_start_token_id snake_case__ : Tuple = decoder_config.pad_token_id if decoder_start_token_id is None: snake_case__ : Optional[Any] = decoder_config.bos_token_id if pad_token_id is None: snake_case__ : int = decoder_config.eos_token_id # This is necessary to make Flax's generate() work snake_case__ : Union[str, Any] = decoder_config.eos_token_id snake_case__ : Optional[int] = decoder_start_token_id snake_case__ : int = pad_token_id snake_case__ : Tuple = AutoImageProcessor.from_pretrained(model_args.encoder_model_name_or_path ) snake_case__ : int = AutoTokenizer.from_pretrained(model_args.decoder_model_name_or_path ) snake_case__ : Tuple = tokenizer.convert_ids_to_tokens(model.config.pad_token_id ) model.save_pretrained(model_args.output_dir ) image_processor.save_pretrained(model_args.output_dir ) tokenizer.save_pretrained(model_args.output_dir ) if __name__ == "__main__": main()
699
0
import math import os import sys def UpperCamelCase__ ( A__ ) -> str: snake_case__ : List[Any] = '' try: with open(__UpperCAmelCase , 'rb' ) as binary_file: snake_case__ : List[Any] = binary_file.read() for dat in data: snake_case__ : Any = F"""{dat:08b}""" result += curr_byte return result except OSError: print('File not accessible' ) sys.exit() def UpperCamelCase__ ( A__ , A__ , A__ , A__ ) -> None: lexicon.pop(__UpperCAmelCase ) snake_case__ : Tuple = last_match_id if math.loga(__UpperCAmelCase ).is_integer(): for curr_key in lexicon: snake_case__ : Optional[int] = '0' + lexicon[curr_key] snake_case__ : str = bin(__UpperCAmelCase )[2:] def UpperCamelCase__ ( A__ ) -> str: snake_case__ : Any = {'0': '0', '1': '1'} snake_case__ , snake_case__ : Tuple = '', '' snake_case__ : Any = len(__UpperCAmelCase ) for i in range(len(__UpperCAmelCase ) ): curr_string += data_bits[i] if curr_string not in lexicon: continue snake_case__ : List[str] = lexicon[curr_string] result += last_match_id add_key_to_lexicon(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) index += 1 snake_case__ : Dict = '' while curr_string != "" and curr_string not in lexicon: curr_string += "0" if curr_string != "": snake_case__ : Dict = lexicon[curr_string] result += last_match_id return result def UpperCamelCase__ ( A__ , A__ ) -> str: snake_case__ : Union[str, Any] = os.path.getsize(__UpperCAmelCase ) snake_case__ : List[Any] = bin(__UpperCAmelCase )[2:] snake_case__ : List[str] = len(__UpperCAmelCase ) return "0" * (length_length - 1) + file_length_binary + compressed def UpperCamelCase__ ( A__ , A__ ) -> None: snake_case__ : int = 8 try: with open(__UpperCAmelCase , 'wb' ) as opened_file: snake_case__ : Union[str, Any] = [ to_write[i : i + byte_length] for i in range(0 , len(__UpperCAmelCase ) , __UpperCAmelCase ) ] if len(result_byte_array[-1] ) % byte_length == 0: result_byte_array.append('10000000' ) else: result_byte_array[-1] += "1" + "0" * ( byte_length - len(result_byte_array[-1] ) - 1 ) for elem in result_byte_array: opened_file.write(int(__UpperCAmelCase , 2 ).to_bytes(1 , byteorder='big' ) ) except OSError: print('File not accessible' ) sys.exit() def UpperCamelCase__ ( A__ , A__ ) -> None: snake_case__ : Dict = read_file_binary(__UpperCAmelCase ) snake_case__ : Optional[int] = compress_data(__UpperCAmelCase ) snake_case__ : Tuple = add_file_length(__UpperCAmelCase , __UpperCAmelCase ) write_file_binary(__UpperCAmelCase , __UpperCAmelCase ) if __name__ == "__main__": compress(sys.argv[1], sys.argv[2])
713
import logging import os from dataclasses import dataclass, field from typing import Dict, Optional import datasets import numpy as np import tensorflow as tf from transformers import ( AutoConfig, AutoTokenizer, EvalPrediction, HfArgumentParser, PreTrainedTokenizer, TFAutoModelForSequenceClassification, TFTrainer, TFTrainingArguments, ) from transformers.utils import logging as hf_logging hf_logging.set_verbosity_info() hf_logging.enable_default_handler() hf_logging.enable_explicit_format() def UpperCamelCase__ ( A__ , A__ , A__ , A__ , A__ , A__ = None , ) -> Optional[int]: snake_case__ : List[str] = {} if train_file is not None: snake_case__ : Tuple = [train_file] if eval_file is not None: snake_case__ : Dict = [eval_file] if test_file is not None: snake_case__ : str = [test_file] snake_case__ : Optional[Any] = datasets.load_dataset('csv' , data_files=A__ ) snake_case__ : Any = list(ds[list(files.keys() )[0]].features.keys() ) snake_case__ : Optional[Any] = features_name.pop(A__ ) snake_case__ : Optional[Any] = list(set(ds[list(files.keys() )[0]][label_name] ) ) snake_case__ : str = {label: i for i, label in enumerate(A__ )} snake_case__ : int = tokenizer.model_input_names snake_case__ : int = {} if len(A__ ) == 1: for k in files.keys(): snake_case__ : str = ds[k].map( lambda A__ : tokenizer.batch_encode_plus( example[features_name[0]] , truncation=A__ , max_length=A__ , padding='max_length' ) , batched=A__ , ) elif len(A__ ) == 2: for k in files.keys(): snake_case__ : Optional[int] = ds[k].map( lambda A__ : tokenizer.batch_encode_plus( (example[features_name[0]], example[features_name[1]]) , truncation=A__ , max_length=A__ , padding='max_length' , ) , batched=A__ , ) def gen_train(): for ex in transformed_ds[datasets.Split.TRAIN]: snake_case__ : int = {k: v for k, v in ex.items() if k in input_names} snake_case__ : Any = labelaid[ex[label_name]] yield (d, label) def gen_val(): for ex in transformed_ds[datasets.Split.VALIDATION]: snake_case__ : int = {k: v for k, v in ex.items() if k in input_names} snake_case__ : Union[str, Any] = labelaid[ex[label_name]] yield (d, label) def gen_test(): for ex in transformed_ds[datasets.Split.TEST]: snake_case__ : Dict = {k: v for k, v in ex.items() if k in input_names} snake_case__ : List[str] = labelaid[ex[label_name]] yield (d, label) snake_case__ : Any = ( tf.data.Dataset.from_generator( A__ , ({k: tf.intaa for k in input_names}, tf.intaa) , ({k: tf.TensorShape([None] ) for k in input_names}, tf.TensorShape([] )) , ) if datasets.Split.TRAIN in transformed_ds else None ) if train_ds is not None: snake_case__ : str = train_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.TRAIN] ) ) ) snake_case__ : Optional[int] = ( tf.data.Dataset.from_generator( A__ , ({k: tf.intaa for k in input_names}, tf.intaa) , ({k: tf.TensorShape([None] ) for k in input_names}, tf.TensorShape([] )) , ) if datasets.Split.VALIDATION in transformed_ds else None ) if val_ds is not None: snake_case__ : Optional[int] = val_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.VALIDATION] ) ) ) snake_case__ : List[str] = ( tf.data.Dataset.from_generator( A__ , ({k: tf.intaa for k in input_names}, tf.intaa) , ({k: tf.TensorShape([None] ) for k in input_names}, tf.TensorShape([] )) , ) if datasets.Split.TEST in transformed_ds else None ) if test_ds is not None: snake_case__ : str = test_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.TEST] ) ) ) return train_ds, val_ds, test_ds, labelaid lowerCAmelCase__ : List[str] = logging.getLogger(__name__) @dataclass class __snake_case : __lowerCamelCase = field(metadata={"""help""": """Which column contains the label"""} ) __lowerCamelCase = field(default=_lowerCamelCase ,metadata={"""help""": """The path of the training file"""} ) __lowerCamelCase = field(default=_lowerCamelCase ,metadata={"""help""": """The path of the development file"""} ) __lowerCamelCase = field(default=_lowerCamelCase ,metadata={"""help""": """The path of the test file"""} ) __lowerCamelCase = field( default=128 ,metadata={ """help""": ( """The maximum total input sequence length after tokenization. Sequences longer """ """than this will be truncated, sequences shorter will be padded.""" ) } ,) __lowerCamelCase = field( default=_lowerCamelCase ,metadata={"""help""": """Overwrite the cached training and evaluation sets"""} ) @dataclass class __snake_case : __lowerCamelCase = field( metadata={"""help""": """Path to pretrained model or model identifier from huggingface.co/models"""} ) __lowerCamelCase = field( default=_lowerCamelCase ,metadata={"""help""": """Pretrained config name or path if not the same as model_name"""} ) __lowerCamelCase = field( default=_lowerCamelCase ,metadata={"""help""": """Pretrained tokenizer name or path if not the same as model_name"""} ) __lowerCamelCase = field(default=_lowerCamelCase ,metadata={"""help""": """Set this flag to use fast tokenization."""} ) # If you want to tweak more attributes on your tokenizer, you should do it in a distinct script, # or just modify its tokenizer_config.json. __lowerCamelCase = field( default=_lowerCamelCase ,metadata={"""help""": """Where do you want to store the pretrained models downloaded from huggingface.co"""} ,) def UpperCamelCase__ ( ) -> Union[str, Any]: # See all possible arguments in src/transformers/training_args.py # or by passing the --help flag to this script. # We now keep distinct sets of args, for a cleaner separation of concerns. snake_case__ : int = HfArgumentParser((ModelArguments, DataTrainingArguments, TFTrainingArguments) ) snake_case__ , snake_case__ , snake_case__ : Dict = parser.parse_args_into_dataclasses() if ( os.path.exists(training_args.output_dir ) and os.listdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir ): raise ValueError( F"""Output directory ({training_args.output_dir}) already exists and is not empty. Use""" ' --overwrite_output_dir to overcome.' ) # Setup logging logging.basicConfig( format='%(asctime)s - %(levelname)s - %(name)s - %(message)s' , datefmt='%m/%d/%Y %H:%M:%S' , level=logging.INFO , ) logger.info( F"""n_replicas: {training_args.n_replicas}, distributed training: {bool(training_args.n_replicas > 1 )}, """ F"""16-bits training: {training_args.fpaa}""" ) logger.info(F"""Training/evaluation parameters {training_args}""" ) # Load pretrained model and tokenizer # # Distributed training: # The .from_pretrained methods guarantee that only one local process can concurrently # download model & vocab. snake_case__ : Dict = AutoTokenizer.from_pretrained( model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , ) snake_case__ , snake_case__ , snake_case__ , snake_case__ : Dict = get_tfds( train_file=data_args.train_file , eval_file=data_args.dev_file , test_file=data_args.test_file , tokenizer=A__ , label_column_id=data_args.label_column_id , max_seq_length=data_args.max_seq_length , ) snake_case__ : Dict = AutoConfig.from_pretrained( model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=len(A__ ) , labelaid=A__ , idalabel={id: label for label, id in labelaid.items()} , finetuning_task='text-classification' , cache_dir=model_args.cache_dir , ) with training_args.strategy.scope(): snake_case__ : Optional[Any] = TFAutoModelForSequenceClassification.from_pretrained( model_args.model_name_or_path , from_pt=bool('.bin' in model_args.model_name_or_path ) , config=A__ , cache_dir=model_args.cache_dir , ) def compute_metrics(A__ ) -> Dict: snake_case__ : Optional[Any] = np.argmax(p.predictions , axis=1 ) return {"acc": (preds == p.label_ids).mean()} # Initialize our Trainer snake_case__ : Any = TFTrainer( model=A__ , args=A__ , train_dataset=A__ , eval_dataset=A__ , compute_metrics=A__ , ) # Training if training_args.do_train: trainer.train() trainer.save_model() tokenizer.save_pretrained(training_args.output_dir ) # Evaluation snake_case__ : Dict = {} if training_args.do_eval: logger.info('*** Evaluate ***' ) snake_case__ : Tuple = trainer.evaluate() snake_case__ : Any = os.path.join(training_args.output_dir , 'eval_results.txt' ) with open(A__ , 'w' ) as writer: logger.info('***** Eval results *****' ) for key, value in result.items(): logger.info(F""" {key} = {value}""" ) writer.write(F"""{key} = {value}\n""" ) results.update(A__ ) return results if __name__ == "__main__": main()
699
0
from __future__ import annotations lowerCAmelCase__ : str = { '''A''': ['''B''', '''C''', '''E'''], '''B''': ['''A''', '''D''', '''E'''], '''C''': ['''A''', '''F''', '''G'''], '''D''': ['''B'''], '''E''': ['''A''', '''B''', '''D'''], '''F''': ['''C'''], '''G''': ['''C'''], } class __snake_case : def __init__( self , __UpperCamelCase , __UpperCamelCase ) -> None: '''simple docstring''' snake_case__ : List[Any] = graph # mapping node to its parent in resulting breadth first tree snake_case__ : Optional[Any] = {} snake_case__ : Optional[Any] = source_vertex def __a ( self ) -> None: '''simple docstring''' snake_case__ : Optional[Any] = {self.source_vertex} snake_case__ : Any = None snake_case__ : Optional[int] = [self.source_vertex] # first in first out queue while queue: snake_case__ : Tuple = queue.pop(0 ) for adjacent_vertex in self.graph[vertex]: if adjacent_vertex not in visited: visited.add(_UpperCAmelCase ) snake_case__ : List[Any] = vertex queue.append(_UpperCAmelCase ) def __a ( self , __UpperCamelCase ) -> str: '''simple docstring''' if target_vertex == self.source_vertex: return self.source_vertex snake_case__ : List[Any] = self.parent.get(_UpperCAmelCase ) if target_vertex_parent is None: snake_case__ : int = ( F"""No path from vertex: {self.source_vertex} to vertex: {target_vertex}""" ) raise ValueError(_UpperCAmelCase ) return self.shortest_path(_UpperCAmelCase ) + F"""->{target_vertex}""" if __name__ == "__main__": lowerCAmelCase__ : Any = Graph(graph, '''G''') g.breath_first_search() print(g.shortest_path('''D''')) print(g.shortest_path('''G''')) print(g.shortest_path('''Foo'''))
714
from typing import List import datasets from datasets.tasks import AudioClassification from ..folder_based_builder import folder_based_builder lowerCAmelCase__ : List[Any] = datasets.utils.logging.get_logger(__name__) class __snake_case ( folder_based_builder.FolderBasedBuilderConfig ): __lowerCamelCase = None __lowerCamelCase = None class __snake_case ( folder_based_builder.FolderBasedBuilder ): __lowerCamelCase = datasets.Audio() __lowerCamelCase = """audio""" __lowerCamelCase = AudioFolderConfig __lowerCamelCase = 42 # definition at the bottom of the script __lowerCamelCase = AudioClassification(audio_column="""audio""" ,label_column="""label""" ) lowerCAmelCase__ : Tuple = [ '''.aiff''', '''.au''', '''.avr''', '''.caf''', '''.flac''', '''.htk''', '''.svx''', '''.mat4''', '''.mat5''', '''.mpc2k''', '''.ogg''', '''.paf''', '''.pvf''', '''.raw''', '''.rf64''', '''.sd2''', '''.sds''', '''.ircam''', '''.voc''', '''.w64''', '''.wav''', '''.nist''', '''.wavex''', '''.wve''', '''.xi''', '''.mp3''', '''.opus''', ] lowerCAmelCase__ : List[Any] = AUDIO_EXTENSIONS
699
0
import absl # noqa: F401 # Here to have a nice missing dependency error message early on import nltk # noqa: F401 # Here to have a nice missing dependency error message early on import numpy # noqa: F401 # Here to have a nice missing dependency error message early on import six # noqa: F401 # Here to have a nice missing dependency error message early on from rouge_score import rouge_scorer, scoring import datasets lowerCAmelCase__ : Union[str, Any] = "\\n@inproceedings{lin-2004-rouge,\n title = \"{ROUGE}: A Package for Automatic Evaluation of Summaries\",\n author = \"Lin, Chin-Yew\",\n booktitle = \"Text Summarization Branches Out\",\n month = jul,\n year = \"2004\",\n address = \"Barcelona, Spain\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://www.aclweb.org/anthology/W04-1013\",\n pages = \"74--81\",\n}\n" lowerCAmelCase__ : Optional[int] = "\\nROUGE, or Recall-Oriented Understudy for Gisting Evaluation, is a set of metrics and a software package used for\nevaluating automatic summarization and machine translation software in natural language processing.\nThe metrics compare an automatically produced summary or translation against a reference or a set of references (human-produced) summary or translation.\n\nNote that ROUGE is case insensitive, meaning that upper case letters are treated the same way as lower case letters.\n\nThis metrics is a wrapper around Google Research reimplementation of ROUGE:\nhttps://github.com/google-research/google-research/tree/master/rouge\n" lowerCAmelCase__ : str = "\nCalculates average rouge scores for a list of hypotheses and references\nArgs:\n predictions: list of predictions to score. Each prediction\n should be a string with tokens separated by spaces.\n references: list of reference for each prediction. Each\n reference should be a string with tokens separated by spaces.\n rouge_types: A list of rouge types to calculate.\n Valid names:\n `\"rouge{n}\"` (e.g. `\"rouge1\"`, `\"rouge2\"`) where: {n} is the n-gram based scoring,\n `\"rougeL\"`: Longest common subsequence based scoring.\n `\"rougeLSum\"`: rougeLsum splits text using `\"\n\"`.\n See details in https://github.com/huggingface/datasets/issues/617\n use_stemmer: Bool indicating whether Porter stemmer should be used to strip word suffixes.\n use_aggregator: Return aggregates if this is set to True\nReturns:\n rouge1: rouge_1 (precision, recall, f1),\n rouge2: rouge_2 (precision, recall, f1),\n rougeL: rouge_l (precision, recall, f1),\n rougeLsum: rouge_lsum (precision, recall, f1)\nExamples:\n\n >>> rouge = datasets.load_metric('rouge')\n >>> predictions = [\"hello there\", \"general kenobi\"]\n >>> references = [\"hello there\", \"general kenobi\"]\n >>> results = rouge.compute(predictions=predictions, references=references)\n >>> print(list(results.keys()))\n ['rouge1', 'rouge2', 'rougeL', 'rougeLsum']\n >>> print(results[\"rouge1\"])\n AggregateScore(low=Score(precision=1.0, recall=1.0, fmeasure=1.0), mid=Score(precision=1.0, recall=1.0, fmeasure=1.0), high=Score(precision=1.0, recall=1.0, fmeasure=1.0))\n >>> print(results[\"rouge1\"].mid.fmeasure)\n 1.0\n" @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION ,_KWARGS_DESCRIPTION ) class __snake_case ( datasets.Metric ): def __a ( self ) -> Union[str, Any]: '''simple docstring''' return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { 'predictions': datasets.Value('string' , id='sequence' ), 'references': datasets.Value('string' , id='sequence' ), } ) , codebase_urls=['https://github.com/google-research/google-research/tree/master/rouge'] , reference_urls=[ 'https://en.wikipedia.org/wiki/ROUGE_(metric)', 'https://github.com/google-research/google-research/tree/master/rouge', ] , ) def __a ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase=None , __UpperCamelCase=True , __UpperCamelCase=False ) -> int: '''simple docstring''' if rouge_types is None: snake_case__ : str = ["""rouge1""", """rouge2""", """rougeL""", """rougeLsum"""] snake_case__ : Dict = rouge_scorer.RougeScorer(rouge_types=_UpperCamelCase , use_stemmer=_UpperCamelCase ) if use_aggregator: snake_case__ : int = scoring.BootstrapAggregator() else: snake_case__ : Union[str, Any] = [] for ref, pred in zip(_UpperCamelCase , _UpperCamelCase ): snake_case__ : Dict = scorer.score(_UpperCamelCase , _UpperCamelCase ) if use_aggregator: aggregator.add_scores(_UpperCamelCase ) else: scores.append(_UpperCamelCase ) if use_aggregator: snake_case__ : str = aggregator.aggregate() else: snake_case__ : Dict = {} for key in scores[0]: snake_case__ : str = [score[key] for score in scores] return result
715
import random import unittest import torch from diffusers import IFInpaintingPipeline from diffusers.utils import floats_tensor from diffusers.utils.import_utils import is_xformers_available from diffusers.utils.testing_utils import skip_mps, torch_device from ..pipeline_params import ( TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS, TEXT_GUIDED_IMAGE_INPAINTING_PARAMS, ) from ..test_pipelines_common import PipelineTesterMixin from . import IFPipelineTesterMixin @skip_mps class __snake_case ( _lowerCamelCase ,_lowerCamelCase ,unittest.TestCase ): __lowerCamelCase = IFInpaintingPipeline __lowerCamelCase = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS - {"""width""", """height"""} __lowerCamelCase = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS __lowerCamelCase = PipelineTesterMixin.required_optional_params - {"""latents"""} def __a ( self ) -> Optional[Any]: '''simple docstring''' return self._get_dummy_components() def __a ( self , __UpperCamelCase , __UpperCamelCase=0 ) -> str: '''simple docstring''' if str(__UpperCamelCase ).startswith('mps' ): snake_case__ : int = torch.manual_seed(__UpperCamelCase ) else: snake_case__ : Union[str, Any] = torch.Generator(device=__UpperCamelCase ).manual_seed(__UpperCamelCase ) snake_case__ : Optional[int] = floats_tensor((1, 3, 32, 32) , rng=random.Random(__UpperCamelCase ) ).to(__UpperCamelCase ) snake_case__ : Dict = floats_tensor((1, 3, 32, 32) , rng=random.Random(__UpperCamelCase ) ).to(__UpperCamelCase ) snake_case__ : Optional[Any] = { 'prompt': 'A painting of a squirrel eating a burger', 'image': image, 'mask_image': mask_image, 'generator': generator, 'num_inference_steps': 2, 'output_type': 'numpy', } return inputs @unittest.skipIf( torch_device != 'cuda' or not is_xformers_available() , reason='XFormers attention is only available with CUDA and `xformers` installed' , ) def __a ( self ) -> List[Any]: '''simple docstring''' self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1E-3 ) def __a ( self ) -> Optional[int]: '''simple docstring''' self._test_save_load_optional_components() @unittest.skipIf(torch_device != 'cuda' , reason='float16 requires CUDA' ) def __a ( self ) -> List[str]: '''simple docstring''' super().test_save_load_floataa(expected_max_diff=1E-1 ) def __a ( self ) -> List[str]: '''simple docstring''' self._test_attention_slicing_forward_pass(expected_max_diff=1E-2 ) def __a ( self ) -> int: '''simple docstring''' self._test_save_load_local() def __a ( self ) -> List[str]: '''simple docstring''' self._test_inference_batch_single_identical( expected_max_diff=1E-2 , )
699
0
import inspect import unittest from transformers import ConvNextConfig from transformers.testing_utils import require_torch, require_vision, slow, torch_device from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_backbone_common import BackboneTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ConvNextBackbone, ConvNextForImageClassification, ConvNextModel from transformers.models.convnext.modeling_convnext import CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import AutoImageProcessor class __snake_case : def __init__( self , __UpperCamelCase , __UpperCamelCase=13 , __UpperCamelCase=32 , __UpperCamelCase=3 , __UpperCamelCase=4 , __UpperCamelCase=[10, 20, 30, 40] , __UpperCamelCase=[2, 2, 3, 2] , __UpperCamelCase=True , __UpperCamelCase=True , __UpperCamelCase=37 , __UpperCamelCase="gelu" , __UpperCamelCase=10 , __UpperCamelCase=0.0_2 , __UpperCamelCase=["stage2", "stage3", "stage4"] , __UpperCamelCase=[2, 3, 4] , __UpperCamelCase=None , ) -> Dict: '''simple docstring''' snake_case__ : int = parent snake_case__ : Optional[Any] = batch_size snake_case__ : Dict = image_size snake_case__ : Union[str, Any] = num_channels snake_case__ : Any = num_stages snake_case__ : List[str] = hidden_sizes snake_case__ : str = depths snake_case__ : List[Any] = is_training snake_case__ : Optional[int] = use_labels snake_case__ : Tuple = intermediate_size snake_case__ : Tuple = hidden_act snake_case__ : Union[str, Any] = num_labels snake_case__ : Tuple = initializer_range snake_case__ : List[Any] = out_features snake_case__ : Union[str, Any] = out_indices snake_case__ : int = scope def __a ( self ) -> str: '''simple docstring''' snake_case__ : List[Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) snake_case__ : Optional[Any] = None if self.use_labels: snake_case__ : int = ids_tensor([self.batch_size] , self.num_labels ) snake_case__ : List[Any] = self.get_config() return config, pixel_values, labels def __a ( self ) -> Tuple: '''simple docstring''' return ConvNextConfig( num_channels=self.num_channels , hidden_sizes=self.hidden_sizes , depths=self.depths , num_stages=self.num_stages , hidden_act=self.hidden_act , is_decoder=_A , initializer_range=self.initializer_range , out_features=self.out_features , out_indices=self.out_indices , num_labels=self.num_labels , ) def __a ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) -> Tuple: '''simple docstring''' snake_case__ : str = ConvNextModel(config=_A ) model.to(_A ) model.eval() snake_case__ : str = model(_A ) # expected last hidden states: B, C, H // 32, W // 32 self.parent.assertEqual( result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , ) def __a ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) -> Optional[Any]: '''simple docstring''' snake_case__ : int = ConvNextForImageClassification(_A ) model.to(_A ) model.eval() snake_case__ : int = model(_A , labels=_A ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def __a ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) -> List[str]: '''simple docstring''' snake_case__ : List[Any] = ConvNextBackbone(config=_A ) model.to(_A ) model.eval() snake_case__ : List[str] = model(_A ) # verify hidden states self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) ) self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[1], 4, 4] ) # verify channels self.parent.assertEqual(len(model.channels ) , len(config.out_features ) ) self.parent.assertListEqual(model.channels , config.hidden_sizes[1:] ) # verify backbone works with out_features=None snake_case__ : Tuple = None snake_case__ : Optional[Any] = ConvNextBackbone(config=_A ) model.to(_A ) model.eval() snake_case__ : List[Any] = model(_A ) # verify feature maps self.parent.assertEqual(len(result.feature_maps ) , 1 ) self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[-1], 1, 1] ) # verify channels self.parent.assertEqual(len(model.channels ) , 1 ) self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]] ) def __a ( self ) -> Any: '''simple docstring''' snake_case__ : Tuple = self.prepare_config_and_inputs() snake_case__ : Any = config_and_inputs snake_case__ : str = {'pixel_values': pixel_values} return config, inputs_dict @require_torch class __snake_case ( a__ ,a__ ,unittest.TestCase ): __lowerCamelCase = ( ( ConvNextModel, ConvNextForImageClassification, ConvNextBackbone, ) if is_torch_available() else () ) __lowerCamelCase = ( {"""feature-extraction""": ConvNextModel, """image-classification""": ConvNextForImageClassification} if is_torch_available() else {} ) __lowerCamelCase = True __lowerCamelCase = False __lowerCamelCase = False __lowerCamelCase = False __lowerCamelCase = False def __a ( self ) -> Any: '''simple docstring''' snake_case__ : str = ConvNextModelTester(self ) snake_case__ : Any = ConfigTester(self , config_class=_A , has_text_modality=_A , hidden_size=37 ) def __a ( self ) -> Tuple: '''simple docstring''' self.create_and_test_config_common_properties() self.config_tester.create_and_test_config_to_json_string() self.config_tester.create_and_test_config_to_json_file() self.config_tester.create_and_test_config_from_and_save_pretrained() self.config_tester.create_and_test_config_with_num_labels() self.config_tester.check_config_can_be_init_without_params() self.config_tester.check_config_arguments_init() def __a ( self ) -> Tuple: '''simple docstring''' return @unittest.skip(reason='ConvNext does not use inputs_embeds' ) def __a ( self ) -> Union[str, Any]: '''simple docstring''' pass @unittest.skip(reason='ConvNext does not support input and output embeddings' ) def __a ( self ) -> Dict: '''simple docstring''' pass @unittest.skip(reason='ConvNext does not use feedforward chunking' ) def __a ( self ) -> Any: '''simple docstring''' pass def __a ( self ) -> List[Any]: '''simple docstring''' snake_case__ : int = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: snake_case__ : Union[str, Any] = model_class(_A ) snake_case__ : Tuple = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic snake_case__ : Optional[Any] = [*signature.parameters.keys()] snake_case__ : List[str] = ['pixel_values'] self.assertListEqual(arg_names[:1] , _A ) def __a ( self ) -> Dict: '''simple docstring''' snake_case__ : Optional[int] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*_A ) def __a ( self ) -> Optional[Any]: '''simple docstring''' snake_case__ : Tuple = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_backbone(*_A ) def __a ( self ) -> List[Any]: '''simple docstring''' def check_hidden_states_output(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase ): snake_case__ : List[str] = model_class(_A ) model.to(_A ) model.eval() with torch.no_grad(): snake_case__ : Union[str, Any] = model(**self._prepare_for_class(_A , _A ) ) snake_case__ : Dict = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states snake_case__ : Optional[Any] = self.model_tester.num_stages self.assertEqual(len(_A ) , expected_num_stages + 1 ) # ConvNext's feature maps are of shape (batch_size, num_channels, height, width) self.assertListEqual( list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , ) snake_case__ : Any = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: snake_case__ : Any = True check_hidden_states_output(_A , _A , _A ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] snake_case__ : int = True check_hidden_states_output(_A , _A , _A ) def __a ( self ) -> List[Any]: '''simple docstring''' snake_case__ : List[str] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*_A ) @slow def __a ( self ) -> List[Any]: '''simple docstring''' for model_name in CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: snake_case__ : List[Any] = ConvNextModel.from_pretrained(_A ) self.assertIsNotNone(_A ) def UpperCamelCase__ ( ) -> int: snake_case__ : List[Any] = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' ) return image @require_torch @require_vision class __snake_case ( unittest.TestCase ): @cached_property def __a ( self ) -> int: '''simple docstring''' return AutoImageProcessor.from_pretrained('facebook/convnext-tiny-224' ) if is_vision_available() else None @slow def __a ( self ) -> List[Any]: '''simple docstring''' snake_case__ : str = ConvNextForImageClassification.from_pretrained('facebook/convnext-tiny-224' ).to(_A ) snake_case__ : Union[str, Any] = self.default_image_processor snake_case__ : Optional[int] = prepare_img() snake_case__ : Union[str, Any] = image_processor(images=_A , return_tensors='pt' ).to(_A ) # forward pass with torch.no_grad(): snake_case__ : str = model(**_A ) # verify the logits snake_case__ : List[Any] = torch.Size((1, 1000) ) self.assertEqual(outputs.logits.shape , _A ) snake_case__ : List[Any] = torch.tensor([-0.0_2_6_0, -0.4_7_3_9, 0.1_9_1_1] ).to(_A ) self.assertTrue(torch.allclose(outputs.logits[0, :3] , _A , atol=1E-4 ) ) @require_torch class __snake_case ( unittest.TestCase ,a__ ): __lowerCamelCase = (ConvNextBackbone,) if is_torch_available() else () __lowerCamelCase = ConvNextConfig __lowerCamelCase = False def __a ( self ) -> Tuple: '''simple docstring''' snake_case__ : Optional[int] = ConvNextModelTester(self )
716
import unittest from transformers import BertGenerationTokenizer from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_torch, slow from transformers.utils import cached_property from ...test_tokenization_common import TokenizerTesterMixin lowerCAmelCase__ : List[Any] = '''▁''' lowerCAmelCase__ : int = get_tests_dir('''fixtures/test_sentencepiece.model''') @require_sentencepiece class __snake_case ( _lowerCamelCase ,unittest.TestCase ): __lowerCamelCase = BertGenerationTokenizer __lowerCamelCase = False __lowerCamelCase = True def __a ( self ) -> Optional[int]: '''simple docstring''' super().setUp() snake_case__ : str = BertGenerationTokenizer(__UpperCamelCase , keep_accents=__UpperCamelCase ) tokenizer.save_pretrained(self.tmpdirname ) def __a ( self ) -> Optional[int]: '''simple docstring''' snake_case__ : List[str] = '<s>' snake_case__ : Dict = 1 self.assertEqual(self.get_tokenizer()._convert_token_to_id(__UpperCamelCase ) , __UpperCamelCase ) self.assertEqual(self.get_tokenizer()._convert_id_to_token(__UpperCamelCase ) , __UpperCamelCase ) def __a ( self ) -> List[str]: '''simple docstring''' snake_case__ : Tuple = list(self.get_tokenizer().get_vocab().keys() ) self.assertEqual(vocab_keys[0] , '<unk>' ) self.assertEqual(vocab_keys[1] , '<s>' ) self.assertEqual(vocab_keys[-1] , '<pad>' ) self.assertEqual(len(__UpperCamelCase ) , 1002 ) def __a ( self ) -> int: '''simple docstring''' self.assertEqual(self.get_tokenizer().vocab_size , 1000 ) def __a ( self ) -> Tuple: '''simple docstring''' snake_case__ : Optional[Any] = BertGenerationTokenizer(__UpperCamelCase , keep_accents=__UpperCamelCase ) snake_case__ : int = tokenizer.tokenize('This is a test' ) self.assertListEqual(__UpperCamelCase , ['▁This', '▁is', '▁a', '▁t', 'est'] ) self.assertListEqual( tokenizer.convert_tokens_to_ids(__UpperCamelCase ) , [285, 46, 10, 170, 382] , ) snake_case__ : Any = tokenizer.tokenize('I was born in 92000, and this is falsé.' ) self.assertListEqual( __UpperCamelCase , [ SPIECE_UNDERLINE + 'I', SPIECE_UNDERLINE + 'was', SPIECE_UNDERLINE + 'b', 'or', 'n', SPIECE_UNDERLINE + 'in', SPIECE_UNDERLINE + '', '9', '2', '0', '0', '0', ',', SPIECE_UNDERLINE + 'and', SPIECE_UNDERLINE + 'this', SPIECE_UNDERLINE + 'is', SPIECE_UNDERLINE + 'f', 'al', 's', 'é', '.', ] , ) snake_case__ : Optional[Any] = tokenizer.convert_tokens_to_ids(__UpperCamelCase ) self.assertListEqual( __UpperCamelCase , [8, 21, 84, 55, 24, 19, 7, 0, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 0, 4] , ) snake_case__ : int = tokenizer.convert_ids_to_tokens(__UpperCamelCase ) self.assertListEqual( __UpperCamelCase , [ SPIECE_UNDERLINE + 'I', SPIECE_UNDERLINE + 'was', SPIECE_UNDERLINE + 'b', 'or', 'n', SPIECE_UNDERLINE + 'in', SPIECE_UNDERLINE + '', '<unk>', '2', '0', '0', '0', ',', SPIECE_UNDERLINE + 'and', SPIECE_UNDERLINE + 'this', SPIECE_UNDERLINE + 'is', SPIECE_UNDERLINE + 'f', 'al', 's', '<unk>', '.', ] , ) @cached_property def __a ( self ) -> Dict: '''simple docstring''' return BertGenerationTokenizer.from_pretrained('google/bert_for_seq_generation_L-24_bbc_encoder' ) @slow def __a ( self ) -> Any: '''simple docstring''' snake_case__ : int = 'Hello World!' snake_case__ : Union[str, Any] = [18536, 2260, 101] self.assertListEqual(__UpperCamelCase , self.big_tokenizer.encode(__UpperCamelCase ) ) @slow def __a ( self ) -> Optional[int]: '''simple docstring''' snake_case__ : str = ( 'This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) " [ ] ! : - . Also we will' ' add words that should not exsist and be tokenized to <unk>, such as saoneuhaoesuth' ) snake_case__ : List[Any] = [ 871, 419, 358, 946, 991, 2521, 452, 358, 1357, 387, 7751, 3536, 112, 985, 456, 126, 865, 938, 5400, 5734, 458, 1368, 467, 786, 2462, 5246, 1159, 633, 865, 4519, 457, 582, 852, 2557, 427, 916, 508, 405, 34324, 497, 391, 408, 11342, 1244, 385, 100, 938, 985, 456, 574, 362, 12597, 3200, 3129, 1172, ] self.assertListEqual(__UpperCamelCase , self.big_tokenizer.encode(__UpperCamelCase ) ) @require_torch @slow def __a ( self ) -> List[str]: '''simple docstring''' import torch from transformers import BertGenerationConfig, BertGenerationEncoder # Build sequence snake_case__ : Optional[int] = list(self.big_tokenizer.get_vocab().keys() )[:10] snake_case__ : Optional[int] = ' '.join(__UpperCamelCase ) snake_case__ : int = self.big_tokenizer.encode_plus(__UpperCamelCase , return_tensors='pt' , return_token_type_ids=__UpperCamelCase ) snake_case__ : Tuple = self.big_tokenizer.batch_encode_plus( [sequence + ' ' + sequence] , return_tensors='pt' , return_token_type_ids=__UpperCamelCase ) snake_case__ : Dict = BertGenerationConfig() snake_case__ : List[str] = BertGenerationEncoder(__UpperCamelCase ) assert model.get_input_embeddings().weight.shape[0] >= self.big_tokenizer.vocab_size with torch.no_grad(): model(**__UpperCamelCase ) model(**__UpperCamelCase ) @slow def __a ( self ) -> Dict: '''simple docstring''' snake_case__ : Optional[int] = {'input_ids': [[39286, 458, 36335, 2001, 456, 13073, 13266, 455, 113, 7746, 1741, 11157, 391, 13073, 13266, 455, 113, 3967, 35412, 113, 4936, 109, 3870, 2377, 113, 30084, 45720, 458, 134, 17496, 112, 503, 11672, 113, 118, 112, 5665, 13347, 38687, 112, 1496, 31389, 112, 3268, 47264, 134, 962, 112, 16377, 8035, 23130, 430, 12169, 15518, 28592, 458, 146, 41697, 109, 391, 12169, 15518, 16689, 458, 146, 41358, 109, 452, 726, 4034, 111, 763, 35412, 5082, 388, 1903, 111, 9051, 391, 2870, 48918, 1900, 1123, 550, 998, 112, 9586, 15985, 455, 391, 410, 22955, 37636, 114], [448, 17496, 419, 3663, 385, 763, 113, 27533, 2870, 3283, 13043, 1639, 24713, 523, 656, 24013, 18550, 2521, 517, 27014, 21244, 420, 1212, 1465, 391, 927, 4833, 388, 578, 11786, 114, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [484, 2169, 7687, 21932, 18146, 726, 363, 17032, 3391, 114, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501 # fmt: on self.tokenizer_integration_test_util( expected_encoding=__UpperCamelCase , model_name='google/bert_for_seq_generation_L-24_bbc_encoder' , revision='c817d1fd1be2ffa69431227a1fe320544943d4db' , )
699
0
import torch from transformers import PreTrainedModel, XLMRobertaConfig, XLMRobertaModel class __snake_case ( _A ): __lowerCamelCase = """M-CLIP""" def __init__( self , __UpperCamelCase=1024 , __UpperCamelCase=768 , **__UpperCamelCase ) -> Optional[int]: '''simple docstring''' snake_case__ : List[str] = transformerDimSize snake_case__ : Dict = imageDimSize super().__init__(**__lowerCamelCase ) class __snake_case ( _A ): __lowerCamelCase = MCLIPConfig def __init__( self , __UpperCamelCase , *__UpperCamelCase , **__UpperCamelCase ) -> Optional[Any]: '''simple docstring''' super().__init__(__lowerCamelCase , *__lowerCamelCase , **__lowerCamelCase ) snake_case__ : Dict = XLMRobertaModel(__lowerCamelCase ) snake_case__ : List[str] = torch.nn.Linear( in_features=config.transformerDimensions , out_features=config.numDims ) def __a ( self , __UpperCamelCase , __UpperCamelCase ) -> int: '''simple docstring''' snake_case__ : Any = self.transformer(input_ids=__lowerCamelCase , attention_mask=__lowerCamelCase )[0] snake_case__ : Any = (embs * attention_mask.unsqueeze(2 )).sum(dim=1 ) / attention_mask.sum(dim=1 )[:, None] return self.LinearTransformation(__lowerCamelCase ), embs
717
import random import torch from huggingface_hub import HfApi from diffusers import UNetaDModel lowerCAmelCase__ : List[str] = HfApi() lowerCAmelCase__ : str = {} # fmt: off lowerCAmelCase__ : int = torch.tensor([ -0.75_15, -1.68_83, 0.24_20, 0.03_00, 0.63_47, 1.34_33, -1.17_43, -3.74_67, 1.23_42, -2.24_85, 0.46_36, 0.80_76, -0.79_91, 0.39_69, 0.84_98, 0.91_89, -1.88_87, -3.35_22, 0.76_39, 0.20_40, 0.62_71, -2.71_48, -1.63_16, 3.08_39, 0.31_86, 0.27_21, -0.97_59, -1.24_61, 2.62_57, 1.35_57 ]) lowerCAmelCase__ : Dict = torch.tensor([ -2.36_39, -2.53_44, 0.00_54, -0.66_74, 1.59_90, 1.01_58, 0.31_24, -2.14_36, 1.87_95, -2.54_29, -0.15_66, -0.39_73, 1.24_90, 2.64_47, 1.22_83, -0.52_08, -2.81_54, -3.51_19, 2.38_38, 1.20_33, 1.72_01, -2.12_56, -1.45_76, 2.79_48, 2.42_04, -0.97_52, -1.25_46, 0.80_27, 3.27_58, 3.13_65 ]) lowerCAmelCase__ : Dict = torch.tensor([ -0.65_31, -0.68_91, -0.31_72, -0.53_75, -0.91_40, -0.53_67, -0.11_75, -0.78_69, -0.38_08, -0.45_13, -0.20_98, -0.00_83, 0.31_83, 0.51_40, 0.22_47, -0.13_04, -0.13_02, -0.28_02, -0.20_84, -0.20_25, -0.49_67, -0.48_73, -0.08_61, 0.69_25, 0.02_50, 0.12_90, -0.15_43, 0.63_16, 1.04_60, 1.49_43 ]) lowerCAmelCase__ : List[str] = torch.tensor([ 0.09_11, 0.11_07, 0.01_82, 0.04_35, -0.08_05, -0.06_08, 0.03_81, 0.21_72, -0.02_80, 0.13_27, -0.02_99, -0.02_55, -0.00_50, -0.11_70, -0.10_46, 0.03_09, 0.13_67, 0.17_28, -0.05_33, -0.07_48, -0.05_34, 0.16_24, 0.03_84, -0.18_05, -0.07_07, 0.06_42, 0.02_20, -0.01_34, -0.13_33, -0.15_05 ]) lowerCAmelCase__ : Union[str, Any] = torch.tensor([ 0.13_21, 0.13_37, 0.04_40, 0.06_22, -0.05_91, -0.03_70, 0.05_03, 0.21_33, -0.01_77, 0.14_15, -0.01_16, -0.01_12, 0.00_44, -0.09_80, -0.07_89, 0.03_95, 0.15_02, 0.17_85, -0.04_88, -0.05_14, -0.04_04, 0.15_39, 0.04_54, -0.15_59, -0.06_65, 0.06_59, 0.03_83, -0.00_05, -0.12_66, -0.13_86 ]) lowerCAmelCase__ : List[Any] = torch.tensor([ 0.11_54, 0.12_18, 0.03_07, 0.05_26, -0.07_11, -0.05_41, 0.03_66, 0.20_78, -0.02_67, 0.13_17, -0.02_26, -0.01_93, -0.00_14, -0.10_55, -0.09_02, 0.03_30, 0.13_91, 0.17_09, -0.05_62, -0.06_93, -0.05_60, 0.14_82, 0.03_81, -0.16_83, -0.06_81, 0.06_61, 0.03_31, -0.00_46, -0.12_68, -0.14_31 ]) lowerCAmelCase__ : Optional[Any] = torch.tensor([ 0.11_92, 0.12_40, 0.04_14, 0.06_06, -0.05_57, -0.04_12, 0.04_30, 0.20_42, -0.02_00, 0.13_85, -0.01_15, -0.01_32, 0.00_17, -0.09_65, -0.08_02, 0.03_98, 0.14_33, 0.17_47, -0.04_58, -0.05_33, -0.04_07, 0.15_45, 0.04_19, -0.15_74, -0.06_45, 0.06_26, 0.03_41, -0.00_10, -0.11_99, -0.13_90 ]) lowerCAmelCase__ : List[str] = torch.tensor([ 0.10_75, 0.10_74, 0.02_05, 0.04_31, -0.07_74, -0.06_07, 0.02_98, 0.20_42, -0.03_20, 0.12_67, -0.02_81, -0.02_50, -0.00_64, -0.10_91, -0.09_46, 0.02_90, 0.13_28, 0.16_50, -0.05_80, -0.07_38, -0.05_86, 0.14_40, 0.03_37, -0.17_46, -0.07_12, 0.06_05, 0.02_50, -0.00_99, -0.13_16, -0.14_73 ]) lowerCAmelCase__ : List[str] = torch.tensor([ -1.45_72, -2.04_81, -0.04_14, -0.60_05, 1.41_36, 0.58_48, 0.40_28, -2.73_30, 1.22_12, -2.12_28, 0.21_55, 0.40_39, 0.76_62, 2.05_35, 0.74_77, -0.32_43, -2.17_58, -2.76_48, 1.69_47, 0.70_26, 1.23_38, -1.60_78, -0.86_82, 2.28_10, 1.85_74, -0.57_18, -0.55_86, -0.01_86, 2.34_15, 2.12_51]) lowerCAmelCase__ : List[Any] = torch.tensor([ -1.36_90, -1.97_20, -0.40_90, -0.69_66, 1.46_60, 0.99_38, -0.13_85, -2.73_24, 0.77_36, -1.89_17, 0.29_23, 0.42_93, 0.16_93, 1.41_12, 1.18_87, -0.31_81, -2.21_60, -2.63_81, 1.31_70, 0.81_63, 0.92_40, -1.65_44, -0.60_99, 2.52_59, 1.64_30, -0.90_90, -0.93_92, -0.01_26, 2.42_68, 2.32_66 ]) lowerCAmelCase__ : Tuple = torch.tensor([ -1.35_25, -1.96_28, -0.39_56, -0.68_60, 1.46_64, 1.00_14, -0.12_59, -2.72_12, 0.77_72, -1.88_11, 0.29_96, 0.43_88, 0.17_04, 1.40_29, 1.17_01, -0.30_27, -2.20_53, -2.62_87, 1.33_50, 0.81_31, 0.92_74, -1.62_92, -0.60_98, 2.51_31, 1.65_05, -0.89_58, -0.92_98, -0.01_51, 2.42_57, 2.33_55 ]) lowerCAmelCase__ : List[str] = torch.tensor([ -2.05_85, -2.78_97, -0.28_50, -0.89_40, 1.90_52, 0.57_02, 0.63_45, -3.89_59, 1.59_32, -3.23_19, 0.19_74, 0.02_87, 1.75_66, 2.65_43, 0.83_87, -0.53_51, -3.27_36, -4.33_75, 2.90_29, 1.63_90, 1.46_40, -2.17_01, -1.90_13, 2.93_41, 3.49_81, -0.62_55, -1.16_44, -0.15_91, 3.70_97, 3.20_66 ]) lowerCAmelCase__ : Dict = torch.tensor([ -2.31_39, -2.55_94, -0.01_97, -0.67_85, 1.70_01, 1.16_06, 0.30_75, -2.17_40, 1.80_71, -2.56_30, -0.09_26, -0.38_11, 1.21_16, 2.62_46, 1.27_31, -0.53_98, -2.81_53, -3.61_40, 2.38_93, 1.32_62, 1.62_58, -2.18_56, -1.32_67, 2.83_95, 2.37_79, -1.06_23, -1.24_68, 0.89_59, 3.33_67, 3.22_43 ]) lowerCAmelCase__ : Dict = torch.tensor([ -2.06_28, -2.76_67, -0.20_89, -0.82_63, 2.05_39, 0.59_92, 0.64_95, -3.83_36, 1.60_25, -3.28_17, 0.17_21, -0.06_33, 1.75_16, 2.70_39, 0.81_00, -0.59_08, -3.21_13, -4.43_43, 2.92_57, 1.36_32, 1.55_62, -2.14_89, -1.98_94, 3.05_60, 3.33_96, -0.73_28, -1.04_17, 0.03_83, 3.70_93, 3.23_43 ]) lowerCAmelCase__ : Any = torch.tensor([ -1.45_74, -2.05_69, -0.04_73, -0.61_17, 1.40_18, 0.57_69, 0.41_29, -2.73_44, 1.22_41, -2.13_97, 0.20_00, 0.39_37, 0.76_16, 2.04_53, 0.73_24, -0.33_91, -2.17_46, -2.77_44, 1.69_63, 0.69_21, 1.21_87, -1.61_72, -0.88_77, 2.24_39, 1.84_71, -0.58_39, -0.56_05, -0.04_64, 2.32_50, 2.12_19 ]) # fmt: on lowerCAmelCase__ : Any = api.list_models(filter='''diffusers''') for mod in models: if "google" in mod.author or mod.modelId == "CompVis/ldm-celebahq-256": lowerCAmelCase__ : List[str] = '''/home/patrick/google_checkpoints/''' + mod.modelId.split('''/''')[-1] print(F'''Started running {mod.modelId}!!!''') if mod.modelId.startswith('''CompVis'''): lowerCAmelCase__ : int = UNetaDModel.from_pretrained(local_checkpoint, subfolder='''unet''') else: lowerCAmelCase__ : str = UNetaDModel.from_pretrained(local_checkpoint) torch.manual_seed(0) random.seed(0) lowerCAmelCase__ : Any = torch.randn(1, model.config.in_channels, model.config.sample_size, model.config.sample_size) lowerCAmelCase__ : List[str] = torch.tensor([10] * noise.shape[0]) with torch.no_grad(): lowerCAmelCase__ : int = model(noise, time_step).sample assert torch.allclose( logits[0, 0, 0, :30], results['''_'''.join('''_'''.join(mod.modelId.split('''/''')).split('''-'''))], atol=1E-3 ) print(F'''{mod.modelId} has passed successfully!!!''')
699
0
import numpy as np from nltk.translate import meteor_score import datasets from datasets.config import importlib_metadata, version lowerCAmelCase__ : Dict = version.parse(importlib_metadata.version('''nltk''')) if NLTK_VERSION >= version.Version('''3.6.4'''): from nltk import word_tokenize lowerCAmelCase__ : Dict = "\\n@inproceedings{banarjee2005,\n title = {{METEOR}: An Automatic Metric for {MT} Evaluation with Improved Correlation with Human Judgments},\n author = {Banerjee, Satanjeev and Lavie, Alon},\n booktitle = {Proceedings of the {ACL} Workshop on Intrinsic and Extrinsic Evaluation Measures for Machine Translation and/or Summarization},\n month = jun,\n year = {2005},\n address = {Ann Arbor, Michigan},\n publisher = {Association for Computational Linguistics},\n url = {https://www.aclweb.org/anthology/W05-0909},\n pages = {65--72},\n}\n" lowerCAmelCase__ : Optional[int] = "\\nMETEOR, an automatic metric for machine translation evaluation\nthat is based on a generalized concept of unigram matching between the\nmachine-produced translation and human-produced reference translations.\nUnigrams can be matched based on their surface forms, stemmed forms,\nand meanings; furthermore, METEOR can be easily extended to include more\nadvanced matching strategies. Once all generalized unigram matches\nbetween the two strings have been found, METEOR computes a score for\nthis matching using a combination of unigram-precision, unigram-recall, and\na measure of fragmentation that is designed to directly capture how\nwell-ordered the matched words in the machine translation are in relation\nto the reference.\n\nMETEOR gets an R correlation value of 0.347 with human evaluation on the Arabic\ndata and 0.331 on the Chinese data. This is shown to be an improvement on\nusing simply unigram-precision, unigram-recall and their harmonic F1\ncombination.\n" lowerCAmelCase__ : Tuple = "\nComputes METEOR score of translated segments against one or more references.\nArgs:\n predictions: list of predictions to score. Each prediction\n should be a string with tokens separated by spaces.\n references: list of reference for each prediction. Each\n reference should be a string with tokens separated by spaces.\n alpha: Parameter for controlling relative weights of precision and recall. default: 0.9\n beta: Parameter for controlling shape of penalty as a function of fragmentation. default: 3\n gamma: Relative weight assigned to fragmentation penalty. default: 0.5\nReturns:\n 'meteor': meteor score.\nExamples:\n\n >>> meteor = datasets.load_metric('meteor')\n >>> predictions = [\"It is a guide to action which ensures that the military always obeys the commands of the party\"]\n >>> references = [\"It is a guide to action that ensures that the military will forever heed Party commands\"]\n >>> results = meteor.compute(predictions=predictions, references=references)\n >>> print(round(results[\"meteor\"], 4))\n 0.6944\n" @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION ,_KWARGS_DESCRIPTION ) class __snake_case ( datasets.Metric ): def __a ( self ) -> List[str]: '''simple docstring''' return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { 'predictions': datasets.Value('string' , id='sequence' ), 'references': datasets.Value('string' , id='sequence' ), } ) , codebase_urls=['https://github.com/nltk/nltk/blob/develop/nltk/translate/meteor_score.py'] , reference_urls=[ 'https://www.nltk.org/api/nltk.translate.html#module-nltk.translate.meteor_score', 'https://en.wikipedia.org/wiki/METEOR', ] , ) def __a ( self , __UpperCamelCase ) -> Any: '''simple docstring''' import nltk nltk.download('wordnet' ) if NLTK_VERSION >= version.Version('3.6.5' ): nltk.download('punkt' ) if NLTK_VERSION >= version.Version('3.6.6' ): nltk.download('omw-1.4' ) def __a ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase=0.9 , __UpperCamelCase=3 , __UpperCamelCase=0.5 ) -> int: '''simple docstring''' if NLTK_VERSION >= version.Version('3.6.5' ): snake_case__ : List[Any] = [ meteor_score.single_meteor_score( word_tokenize(lowerCamelCase_ ) , word_tokenize(lowerCamelCase_ ) , alpha=lowerCamelCase_ , beta=lowerCamelCase_ , gamma=lowerCamelCase_ ) for ref, pred in zip(lowerCamelCase_ , lowerCamelCase_ ) ] else: snake_case__ : Dict = [ meteor_score.single_meteor_score(lowerCamelCase_ , lowerCamelCase_ , alpha=lowerCamelCase_ , beta=lowerCamelCase_ , gamma=lowerCamelCase_ ) for ref, pred in zip(lowerCamelCase_ , lowerCamelCase_ ) ] return {"meteor": np.mean(lowerCamelCase_ )}
718
import warnings from ...utils import logging from .image_processing_perceiver import PerceiverImageProcessor lowerCAmelCase__ : Dict = logging.get_logger(__name__) class __snake_case ( _lowerCamelCase ): def __init__( self , *__UpperCamelCase , **__UpperCamelCase ) -> None: '''simple docstring''' warnings.warn( 'The class PerceiverFeatureExtractor is deprecated and will be removed in version 5 of Transformers.' ' Please use PerceiverImageProcessor instead.' , __UpperCamelCase , ) super().__init__(*__UpperCamelCase , **__UpperCamelCase )
699
0
def UpperCamelCase__ ( ) -> Tuple: snake_case__ : Optional[int] = [31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31] snake_case__ : Optional[int] = 6 snake_case__ : Optional[Any] = 1 snake_case__ : Optional[int] = 1901 snake_case__ : Optional[int] = 0 while year < 2001: day += 7 if (year % 4 == 0 and year % 100 != 0) or (year % 400 == 0): if day > days_per_month[month - 1] and month != 2: month += 1 snake_case__ : Optional[int] = day - days_per_month[month - 2] elif day > 29 and month == 2: month += 1 snake_case__ : Tuple = day - 29 else: if day > days_per_month[month - 1]: month += 1 snake_case__ : Union[str, Any] = day - days_per_month[month - 2] if month > 12: year += 1 snake_case__ : Optional[Any] = 1 if year < 2001 and day == 1: sundays += 1 return sundays if __name__ == "__main__": print(solution())
719
import io import itertools import json from dataclasses import dataclass from typing import Optional import pyarrow as pa import pyarrow.json as paj import datasets from datasets.table import table_cast from datasets.utils.file_utils import readline lowerCAmelCase__ : List[Any] = datasets.utils.logging.get_logger(__name__) @dataclass class __snake_case ( datasets.BuilderConfig ): __lowerCamelCase = None __lowerCamelCase = "utf-8" __lowerCamelCase = None __lowerCamelCase = None __lowerCamelCase = True # deprecated __lowerCamelCase = None # deprecated __lowerCamelCase = 10 << 20 # 10MB __lowerCamelCase = None class __snake_case ( datasets.ArrowBasedBuilder ): __lowerCamelCase = JsonConfig def __a ( self ) -> Optional[Any]: '''simple docstring''' if self.config.block_size is not None: logger.warning('The JSON loader parameter `block_size` is deprecated. Please use `chunksize` instead' ) snake_case__ : str = self.config.block_size if self.config.use_threads is not True: logger.warning( 'The JSON loader parameter `use_threads` is deprecated and doesn\'t have any effect anymore.' ) if self.config.newlines_in_values is not None: raise ValueError('The JSON loader parameter `newlines_in_values` is no longer supported' ) return datasets.DatasetInfo(features=self.config.features ) def __a ( self , __UpperCamelCase ) -> Dict: '''simple docstring''' if not self.config.data_files: raise ValueError(F"""At least one data file must be specified, but got data_files={self.config.data_files}""" ) snake_case__ : int = dl_manager.download_and_extract(self.config.data_files ) if isinstance(__UpperCamelCase , (str, list, tuple) ): snake_case__ : Any = data_files if isinstance(__UpperCamelCase , __UpperCamelCase ): snake_case__ : Optional[Any] = [files] snake_case__ : List[str] = [dl_manager.iter_files(__UpperCamelCase ) for file in files] return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={'files': files} )] snake_case__ : List[Any] = [] for split_name, files in data_files.items(): if isinstance(__UpperCamelCase , __UpperCamelCase ): snake_case__ : List[Any] = [files] snake_case__ : Any = [dl_manager.iter_files(__UpperCamelCase ) for file in files] splits.append(datasets.SplitGenerator(name=__UpperCamelCase , gen_kwargs={'files': files} ) ) return splits def __a ( self , __UpperCamelCase ) -> pa.Table: '''simple docstring''' if self.config.features is not None: # adding missing columns for column_name in set(self.config.features ) - set(pa_table.column_names ): snake_case__ : List[Any] = self.config.features.arrow_schema.field(__UpperCamelCase ).type snake_case__ : List[str] = pa_table.append_column(__UpperCamelCase , pa.array([None] * len(__UpperCamelCase ) , type=__UpperCamelCase ) ) # more expensive cast to support nested structures with keys in a different order # allows str <-> int/float or str to Audio for example snake_case__ : List[str] = table_cast(__UpperCamelCase , self.config.features.arrow_schema ) return pa_table def __a ( self , __UpperCamelCase ) -> int: '''simple docstring''' for file_idx, file in enumerate(itertools.chain.from_iterable(__UpperCamelCase ) ): # If the file is one json object and if we need to look at the list of items in one specific field if self.config.field is not None: with open(__UpperCamelCase , encoding=self.config.encoding , errors=self.config.encoding_errors ) as f: snake_case__ : Union[str, Any] = json.load(__UpperCamelCase ) # We keep only the field we are interested in snake_case__ : Tuple = dataset[self.config.field] # We accept two format: a list of dicts or a dict of lists if isinstance(__UpperCamelCase , (list, tuple) ): snake_case__ : List[Any] = set().union(*[row.keys() for row in dataset] ) snake_case__ : List[Any] = {col: [row.get(__UpperCamelCase ) for row in dataset] for col in keys} else: snake_case__ : List[Any] = dataset snake_case__ : Dict = pa.Table.from_pydict(__UpperCamelCase ) yield file_idx, self._cast_table(__UpperCamelCase ) # If the file has one json object per line else: with open(__UpperCamelCase , 'rb' ) as f: snake_case__ : Optional[int] = 0 # Use block_size equal to the chunk size divided by 32 to leverage multithreading # Set a default minimum value of 16kB if the chunk size is really small snake_case__ : Tuple = max(self.config.chunksize // 32 , 16 << 10 ) snake_case__ : Optional[Any] = ( self.config.encoding_errors if self.config.encoding_errors is not None else 'strict' ) while True: snake_case__ : Optional[int] = f.read(self.config.chunksize ) if not batch: break # Finish current line try: batch += f.readline() except (AttributeError, io.UnsupportedOperation): batch += readline(__UpperCamelCase ) # PyArrow only accepts utf-8 encoded bytes if self.config.encoding != "utf-8": snake_case__ : int = batch.decode(self.config.encoding , errors=__UpperCamelCase ).encode('utf-8' ) try: while True: try: snake_case__ : List[str] = paj.read_json( io.BytesIO(__UpperCamelCase ) , read_options=paj.ReadOptions(block_size=__UpperCamelCase ) ) break except (pa.ArrowInvalid, pa.ArrowNotImplementedError) as e: if ( isinstance(__UpperCamelCase , pa.ArrowInvalid ) and "straddling" not in str(__UpperCamelCase ) or block_size > len(__UpperCamelCase ) ): raise else: # Increase the block size in case it was too small. # The block size will be reset for the next file. logger.debug( F"""Batch of {len(__UpperCamelCase )} bytes couldn't be parsed with block_size={block_size}. Retrying with block_size={block_size * 2}.""" ) block_size *= 2 except pa.ArrowInvalid as e: try: with open( __UpperCamelCase , encoding=self.config.encoding , errors=self.config.encoding_errors ) as f: snake_case__ : Tuple = json.load(__UpperCamelCase ) except json.JSONDecodeError: logger.error(F"""Failed to read file '{file}' with error {type(__UpperCamelCase )}: {e}""" ) raise e # If possible, parse the file as a list of json objects and exit the loop if isinstance(__UpperCamelCase , __UpperCamelCase ): # list is the only sequence type supported in JSON try: snake_case__ : str = set().union(*[row.keys() for row in dataset] ) snake_case__ : Union[str, Any] = {col: [row.get(__UpperCamelCase ) for row in dataset] for col in keys} snake_case__ : List[str] = pa.Table.from_pydict(__UpperCamelCase ) except (pa.ArrowInvalid, AttributeError) as e: logger.error(F"""Failed to read file '{file}' with error {type(__UpperCamelCase )}: {e}""" ) raise ValueError(F"""Not able to read records in the JSON file at {file}.""" ) from None yield file_idx, self._cast_table(__UpperCamelCase ) break else: logger.error(F"""Failed to read file '{file}' with error {type(__UpperCamelCase )}: {e}""" ) raise ValueError( F"""Not able to read records in the JSON file at {file}. """ F"""You should probably indicate the field of the JSON file containing your records. """ F"""This JSON file contain the following fields: {str(list(dataset.keys() ) )}. """ F"""Select the correct one and provide it as `field='XXX'` to the dataset loading method. """ ) from None # Uncomment for debugging (will print the Arrow table size and elements) # logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}") # logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows))) yield (file_idx, batch_idx), self._cast_table(__UpperCamelCase ) batch_idx += 1
699
0
import re import subprocess import sys lowerCAmelCase__ : Tuple = subprocess.check_output('''git merge-base main HEAD'''.split()).decode('''utf-8''') lowerCAmelCase__ : Optional[Any] = subprocess.check_output(F'''git diff --name-only {fork_point_sha}'''.split()).decode('''utf-8''').split() lowerCAmelCase__ : Optional[Any] = """|""".join(sys.argv[1:]) lowerCAmelCase__ : int = re.compile(rF'''^({joined_dirs}).*?\.py$''') lowerCAmelCase__ : Tuple = [x for x in modified_files if regex.match(x)] print(''' '''.join(relevant_modified_files), end='''''')
720
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_sentencepiece_available, is_tf_available, is_tokenizers_available, is_torch_available, ) lowerCAmelCase__ : Any = {'''configuration_xglm''': ['''XGLM_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''XGLMConfig''']} try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase__ : str = ['''XGLMTokenizer'''] try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase__ : Dict = ['''XGLMTokenizerFast'''] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase__ : Optional[int] = [ '''XGLM_PRETRAINED_MODEL_ARCHIVE_LIST''', '''XGLMForCausalLM''', '''XGLMModel''', '''XGLMPreTrainedModel''', ] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase__ : Dict = [ '''FlaxXGLMForCausalLM''', '''FlaxXGLMModel''', '''FlaxXGLMPreTrainedModel''', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase__ : Dict = [ '''TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST''', '''TFXGLMForCausalLM''', '''TFXGLMModel''', '''TFXGLMPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_xglm import XGLM_PRETRAINED_CONFIG_ARCHIVE_MAP, XGLMConfig try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_xglm import XGLMTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_xglm_fast import XGLMTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_xglm import XGLM_PRETRAINED_MODEL_ARCHIVE_LIST, XGLMForCausalLM, XGLMModel, XGLMPreTrainedModel try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_xglm import FlaxXGLMForCausalLM, FlaxXGLMModel, FlaxXGLMPreTrainedModel try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_xglm import ( TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST, TFXGLMForCausalLM, TFXGLMModel, TFXGLMPreTrainedModel, ) else: import sys lowerCAmelCase__ : Tuple = _LazyModule(__name__, globals()['''__file__'''], _import_structure)
699
0
import warnings from collections import OrderedDict from typing import Mapping from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging lowerCAmelCase__ : Tuple = logging.get_logger(__name__) lowerCAmelCase__ : int = { '''nvidia/segformer-b0-finetuned-ade-512-512''': ( '''https://huggingface.co/nvidia/segformer-b0-finetuned-ade-512-512/resolve/main/config.json''' ), # See all SegFormer models at https://huggingface.co/models?filter=segformer } class __snake_case ( SCREAMING_SNAKE_CASE__ ): __lowerCamelCase = """segformer""" def __init__( self , __UpperCamelCase=3 , __UpperCamelCase=4 , __UpperCamelCase=[2, 2, 2, 2] , __UpperCamelCase=[8, 4, 2, 1] , __UpperCamelCase=[32, 64, 160, 256] , __UpperCamelCase=[7, 3, 3, 3] , __UpperCamelCase=[4, 2, 2, 2] , __UpperCamelCase=[1, 2, 5, 8] , __UpperCamelCase=[4, 4, 4, 4] , __UpperCamelCase="gelu" , __UpperCamelCase=0.0 , __UpperCamelCase=0.0 , __UpperCamelCase=0.1 , __UpperCamelCase=0.0_2 , __UpperCamelCase=0.1 , __UpperCamelCase=1E-6 , __UpperCamelCase=256 , __UpperCamelCase=255 , **__UpperCamelCase , ) -> Optional[Any]: '''simple docstring''' super().__init__(**_lowercase ) if "reshape_last_stage" in kwargs and kwargs["reshape_last_stage"] is False: warnings.warn( 'Reshape_last_stage is set to False in this config. This argument is deprecated and will soon be' ' removed, as the behaviour will default to that of reshape_last_stage = True.' , _lowercase , ) snake_case__ : Union[str, Any] = num_channels snake_case__ : Optional[int] = num_encoder_blocks snake_case__ : Union[str, Any] = depths snake_case__ : Optional[int] = sr_ratios snake_case__ : Dict = hidden_sizes snake_case__ : str = patch_sizes snake_case__ : Optional[int] = strides snake_case__ : Union[str, Any] = mlp_ratios snake_case__ : Optional[Any] = num_attention_heads snake_case__ : Optional[Any] = hidden_act snake_case__ : Dict = hidden_dropout_prob snake_case__ : Any = attention_probs_dropout_prob snake_case__ : Union[str, Any] = classifier_dropout_prob snake_case__ : str = initializer_range snake_case__ : Optional[int] = drop_path_rate snake_case__ : Tuple = layer_norm_eps snake_case__ : List[str] = decoder_hidden_size snake_case__ : Optional[Any] = kwargs.get('reshape_last_stage' , _lowercase ) snake_case__ : List[str] = semantic_loss_ignore_index class __snake_case ( SCREAMING_SNAKE_CASE__ ): __lowerCamelCase = version.parse("""1.11""" ) @property def __a ( self ) -> Mapping[str, Mapping[int, str]]: '''simple docstring''' return OrderedDict( [ ('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}), ] ) @property def __a ( self ) -> float: '''simple docstring''' return 1E-4 @property def __a ( self ) -> int: '''simple docstring''' return 12
721
from __future__ import annotations import random # Maximum size of the population. Bigger could be faster but is more memory expensive. lowerCAmelCase__ : Dict = 2_00 # Number of elements selected in every generation of evolution. The selection takes # place from best to worst of that generation and must be smaller than N_POPULATION. lowerCAmelCase__ : List[str] = 50 # Probability that an element of a generation can mutate, changing one of its genes. # This will guarantee that all genes will be used during evolution. lowerCAmelCase__ : List[str] = 0.4 # Just a seed to improve randomness required by the algorithm. random.seed(random.randint(0, 10_00)) def UpperCamelCase__ ( A__ , A__ ) -> tuple[str, float]: snake_case__ : Tuple = len([g for position, g in enumerate(A__ ) if g == main_target[position]] ) return (item, float(A__ )) def UpperCamelCase__ ( A__ , A__ ) -> tuple[str, str]: snake_case__ : str = random.randint(0 , len(A__ ) - 1 ) snake_case__ : int = parent_a[:random_slice] + parent_a[random_slice:] snake_case__ : Any = parent_a[:random_slice] + parent_a[random_slice:] return (child_a, child_a) def UpperCamelCase__ ( A__ , A__ ) -> str: snake_case__ : List[Any] = list(A__ ) if random.uniform(0 , 1 ) < MUTATION_PROBABILITY: snake_case__ : Optional[Any] = random.choice(A__ ) return "".join(A__ ) def UpperCamelCase__ ( A__ , A__ , A__ , ) -> list[str]: snake_case__ : Tuple = [] # Generate more children proportionally to the fitness score. snake_case__ : Optional[Any] = int(parent_a[1] * 100 ) + 1 snake_case__ : str = 10 if child_n >= 10 else child_n for _ in range(A__ ): snake_case__ : Any = population_score[random.randint(0 , A__ )][0] snake_case__ , snake_case__ : int = crossover(parent_a[0] , A__ ) # Append new string to the population list. pop.append(mutate(A__ , A__ ) ) pop.append(mutate(A__ , A__ ) ) return pop def UpperCamelCase__ ( A__ , A__ , A__ = True ) -> tuple[int, int, str]: # Verify if N_POPULATION is bigger than N_SELECTED if N_POPULATION < N_SELECTED: snake_case__ : Union[str, Any] = F"""{N_POPULATION} must be bigger than {N_SELECTED}""" raise ValueError(A__ ) # Verify that the target contains no genes besides the ones inside genes variable. snake_case__ : Tuple = sorted({c for c in target if c not in genes} ) if not_in_genes_list: snake_case__ : int = F"""{not_in_genes_list} is not in genes list, evolution cannot converge""" raise ValueError(A__ ) # Generate random starting population. snake_case__ : Union[str, Any] = [] for _ in range(A__ ): population.append(''.join([random.choice(A__ ) for i in range(len(A__ ) )] ) ) # Just some logs to know what the algorithms is doing. snake_case__ , snake_case__ : str = 0, 0 # This loop will end when we find a perfect match for our target. while True: generation += 1 total_population += len(A__ ) # Random population created. Now it's time to evaluate. # Adding a bit of concurrency can make everything faster, # # import concurrent.futures # population_score: list[tuple[str, float]] = [] # with concurrent.futures.ThreadPoolExecutor( # max_workers=NUM_WORKERS) as executor: # futures = {executor.submit(evaluate, item) for item in population} # concurrent.futures.wait(futures) # population_score = [item.result() for item in futures] # # but with a simple algorithm like this, it will probably be slower. # We just need to call evaluate for every item inside the population. snake_case__ : List[Any] = [evaluate(A__ , A__ ) for item in population] # Check if there is a matching evolution. snake_case__ : int = sorted(A__ , key=lambda A__ : x[1] , reverse=A__ ) if population_score[0][0] == target: return (generation, total_population, population_score[0][0]) # Print the best result every 10 generation. # Just to know that the algorithm is working. if debug and generation % 10 == 0: print( F"""\nGeneration: {generation}""" F"""\nTotal Population:{total_population}""" F"""\nBest score: {population_score[0][1]}""" F"""\nBest string: {population_score[0][0]}""" ) # Flush the old population, keeping some of the best evolutions. # Keeping this avoid regression of evolution. snake_case__ : Optional[int] = population[: int(N_POPULATION / 3 )] population.clear() population.extend(A__ ) # Normalize population score to be between 0 and 1. snake_case__ : str = [ (item, score / len(A__ )) for item, score in population_score ] # This is selection for i in range(A__ ): population.extend(select(population_score[int(A__ )] , A__ , A__ ) ) # Check if the population has already reached the maximum value and if so, # break the cycle. If this check is disabled, the algorithm will take # forever to compute large strings, but will also calculate small strings in # a far fewer generations. if len(A__ ) > N_POPULATION: break if __name__ == "__main__": lowerCAmelCase__ : str = ( '''This is a genetic algorithm to evaluate, combine, evolve, and mutate a string!''' ) lowerCAmelCase__ : Optional[Any] = list( ''' ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklm''' '''nopqrstuvwxyz.,;!?+-*#@^\'èéòà€ù=)(&%$£/\\''' ) lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__ : List[str] = basic(target_str, genes_list) print( F'''\nGeneration: {generation}\nTotal Population: {population}\nTarget: {target}''' )
699
0
def UpperCamelCase__ ( A__ , A__ , A__ , A__ ) -> Union[str, Any]: snake_case__ , snake_case__ : int = len(snake_case__ ), len(grid[0] ) if ( min(snake_case__ , snake_case__ ) < 0 or row == row_length or col == col_length or (row, col) in visit or grid[row][col] == 1 ): return 0 if row == row_length - 1 and col == col_length - 1: return 1 visit.add((row, col) ) snake_case__ : Any = 0 count += depth_first_search(snake_case__ , row + 1 , snake_case__ , snake_case__ ) count += depth_first_search(snake_case__ , row - 1 , snake_case__ , snake_case__ ) count += depth_first_search(snake_case__ , snake_case__ , col + 1 , snake_case__ ) count += depth_first_search(snake_case__ , snake_case__ , col - 1 , snake_case__ ) visit.remove((row, col) ) return count if __name__ == "__main__": import doctest doctest.testmod()
700
from __future__ import annotations from collections.abc import Iterator from typing import Generic, TypeVar lowerCAmelCase__ : Optional[int] = TypeVar('''T''') class __snake_case ( Generic[T] ): def __init__( self , __UpperCamelCase ) -> Any: '''simple docstring''' snake_case__ : Optional[int] = data snake_case__ : Node[T] | None = None def __str__( self ) -> str: '''simple docstring''' return F"""{self.data}""" class __snake_case ( Generic[T] ): def __init__( self ) -> None: '''simple docstring''' snake_case__ : Node[T] | None = None def __iter__( self ) -> Iterator[T]: '''simple docstring''' snake_case__ : str = self.top while node: yield node.data snake_case__ : Dict = node.next def __str__( self ) -> str: '''simple docstring''' return "->".join([str(__UpperCamelCase ) for item in self] ) def __len__( self ) -> int: '''simple docstring''' return len(tuple(iter(self ) ) ) def __a ( self ) -> bool: '''simple docstring''' return self.top is None def __a ( self , __UpperCamelCase ) -> None: '''simple docstring''' snake_case__ : str = Node(__UpperCamelCase ) if not self.is_empty(): snake_case__ : List[str] = self.top snake_case__ : Tuple = node def __a ( self ) -> T: '''simple docstring''' if self.is_empty(): raise IndexError('pop from empty stack' ) assert isinstance(self.top , __UpperCamelCase ) snake_case__ : List[str] = self.top snake_case__ : Union[str, Any] = self.top.next return pop_node.data def __a ( self ) -> T: '''simple docstring''' if self.is_empty(): raise IndexError('peek from empty stack' ) assert self.top is not None return self.top.data def __a ( self ) -> None: '''simple docstring''' snake_case__ : Any = None if __name__ == "__main__": from doctest import testmod testmod()
699
0
import numpy as np def UpperCamelCase__ ( A__ ) -> np.ndarray: return 1 / (1 + np.exp(-vector )) def UpperCamelCase__ ( A__ ) -> np.ndarray: return vector * sigmoid(__SCREAMING_SNAKE_CASE ) if __name__ == "__main__": import doctest doctest.testmod()
701
from collections import OrderedDict from typing import Mapping from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging lowerCAmelCase__ : Dict = logging.get_logger(__name__) lowerCAmelCase__ : int = { '''sail/poolformer_s12''': '''https://huggingface.co/sail/poolformer_s12/resolve/main/config.json''', # See all PoolFormer models at https://huggingface.co/models?filter=poolformer } class __snake_case ( _lowerCamelCase ): __lowerCamelCase = """poolformer""" def __init__( self , __UpperCamelCase=3 , __UpperCamelCase=16 , __UpperCamelCase=16 , __UpperCamelCase=3 , __UpperCamelCase=4.0 , __UpperCamelCase=[2, 2, 6, 2] , __UpperCamelCase=[64, 128, 320, 512] , __UpperCamelCase=[7, 3, 3, 3] , __UpperCamelCase=[4, 2, 2, 2] , __UpperCamelCase=[2, 1, 1, 1] , __UpperCamelCase=4 , __UpperCamelCase=0.0 , __UpperCamelCase="gelu" , __UpperCamelCase=True , __UpperCamelCase=1E-5 , __UpperCamelCase=0.0_2 , **__UpperCamelCase , ) -> Any: '''simple docstring''' snake_case__ : List[str] = num_channels snake_case__ : Dict = patch_size snake_case__ : Optional[int] = stride snake_case__ : str = padding snake_case__ : List[str] = pool_size snake_case__ : List[Any] = hidden_sizes snake_case__ : List[Any] = mlp_ratio snake_case__ : Union[str, Any] = depths snake_case__ : Dict = patch_sizes snake_case__ : Dict = strides snake_case__ : Dict = num_encoder_blocks snake_case__ : Union[str, Any] = drop_path_rate snake_case__ : List[str] = hidden_act snake_case__ : Optional[Any] = use_layer_scale snake_case__ : int = layer_scale_init_value snake_case__ : Dict = initializer_range super().__init__(**__UpperCamelCase ) class __snake_case ( _lowerCamelCase ): __lowerCamelCase = version.parse("""1.11""" ) @property def __a ( self ) -> Mapping[str, Mapping[int, str]]: '''simple docstring''' return OrderedDict( [ ('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}), ] ) @property def __a ( self ) -> float: '''simple docstring''' return 2E-3
699
0
from ...configuration_utils import PretrainedConfig from ...utils import logging from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices lowerCAmelCase__ : int = logging.get_logger(__name__) lowerCAmelCase__ : int = { '''shi-labs/nat-mini-in1k-224''': '''https://huggingface.co/shi-labs/nat-mini-in1k-224/resolve/main/config.json''', # See all Nat models at https://huggingface.co/models?filter=nat } class __snake_case ( _lowerCamelCase ,_lowerCamelCase ): __lowerCamelCase = """nat""" __lowerCamelCase = { """num_attention_heads""": """num_heads""", """num_hidden_layers""": """num_layers""", } def __init__( self , __UpperCamelCase=4 , __UpperCamelCase=3 , __UpperCamelCase=64 , __UpperCamelCase=[3, 4, 6, 5] , __UpperCamelCase=[2, 4, 8, 16] , __UpperCamelCase=7 , __UpperCamelCase=3.0 , __UpperCamelCase=True , __UpperCamelCase=0.0 , __UpperCamelCase=0.0 , __UpperCamelCase=0.1 , __UpperCamelCase="gelu" , __UpperCamelCase=0.0_2 , __UpperCamelCase=1E-5 , __UpperCamelCase=0.0 , __UpperCamelCase=None , __UpperCamelCase=None , **__UpperCamelCase , ) -> List[str]: '''simple docstring''' super().__init__(**__UpperCamelCase ) snake_case__ : Optional[Any] = patch_size snake_case__ : Dict = num_channels snake_case__ : Optional[int] = embed_dim snake_case__ : Any = depths snake_case__ : int = len(__UpperCamelCase ) snake_case__ : int = num_heads snake_case__ : List[str] = kernel_size snake_case__ : Dict = mlp_ratio snake_case__ : Any = qkv_bias snake_case__ : Any = hidden_dropout_prob snake_case__ : Any = attention_probs_dropout_prob snake_case__ : List[Any] = drop_path_rate snake_case__ : Union[str, Any] = hidden_act snake_case__ : List[Any] = layer_norm_eps snake_case__ : List[str] = initializer_range # we set the hidden_size attribute in order to make Nat work with VisionEncoderDecoderModel # this indicates the channel dimension after the last stage of the model snake_case__ : List[str] = int(embed_dim * 2 ** (len(__UpperCamelCase ) - 1) ) snake_case__ : Optional[Any] = layer_scale_init_value snake_case__ : Tuple = ['stem'] + [F"""stage{idx}""" for idx in range(1 , len(__UpperCamelCase ) + 1 )] snake_case__ , snake_case__ : Any = get_aligned_output_features_output_indices( out_features=__UpperCamelCase , out_indices=__UpperCamelCase , stage_names=self.stage_names )
702
import numpy as np import qiskit def UpperCamelCase__ ( A__ = 8 , A__ = None ) -> str: snake_case__ : Optional[int] = np.random.default_rng(seed=A__ ) # Roughly 25% of the qubits will contribute to the key. # So we take more than we need. snake_case__ : Tuple = 6 * key_len # Measurement basis for Alice's qubits. snake_case__ : Tuple = rng.integers(2 , size=A__ ) # The set of states Alice will prepare. snake_case__ : List[str] = rng.integers(2 , size=A__ ) # Measurement basis for Bob's qubits. snake_case__ : List[Any] = rng.integers(2 , size=A__ ) # Quantum Circuit to simulate BB84 snake_case__ : Any = qiskit.QuantumCircuit(A__ , name='BB84' ) # Alice prepares her qubits according to rules above. for index, _ in enumerate(A__ ): if alice_state[index] == 1: bbaa_circ.x(A__ ) if alice_basis[index] == 1: bbaa_circ.h(A__ ) bbaa_circ.barrier() # Bob measures the received qubits according to rules above. for index, _ in enumerate(A__ ): if bob_basis[index] == 1: bbaa_circ.h(A__ ) bbaa_circ.barrier() bbaa_circ.measure_all() # Simulate the quantum circuit. snake_case__ : List[str] = qiskit.Aer.get_backend('aer_simulator' ) # We only need to run one shot because the key is unique. # Multiple shots will produce the same key. snake_case__ : Optional[Any] = qiskit.execute(A__ , A__ , shots=1 , seed_simulator=A__ ) # Returns the result of measurement. snake_case__ : Union[str, Any] = job.result().get_counts(A__ ).most_frequent() # Extracting the generated key from the simulation results. # Only keep measurement results where Alice and Bob chose the same basis. snake_case__ : Optional[Any] = ''.join( [ result_bit for alice_basis_bit, bob_basis_bit, result_bit in zip( A__ , A__ , A__ ) if alice_basis_bit == bob_basis_bit ] ) # Get final key. Pad with 0 if too short, otherwise truncate. snake_case__ : Tuple = gen_key[:key_len] if len(A__ ) >= key_len else gen_key.ljust(A__ , '0' ) return key if __name__ == "__main__": print(F'''The generated key is : {bbaa(8, seed=0)}''') from doctest import testmod testmod()
699
0
'''simple docstring''' import faiss # noqa: F401 # Here to have a nice missing dependency error message early on import numpy # noqa: F401 # Here to have a nice missing dependency error message early on import requests # noqa: F401 # Here to have a nice missing dependency error message early on import sklearn # noqa: F401 # Here to have a nice missing dependency error message early on import tqdm # noqa: F401 # Here to have a nice missing dependency error message early on from mauve import compute_mauve # From: mauve-text import datasets lowerCAmelCase__ : Optional[int] = """\ @inproceedings{pillutla-etal:mauve:neurips2021, title={MAUVE: Measuring the Gap Between Neural Text and Human Text using Divergence Frontiers}, author={Pillutla, Krishna and Swayamdipta, Swabha and Zellers, Rowan and Thickstun, John and Welleck, Sean and Choi, Yejin and Harchaoui, Zaid}, booktitle = {NeurIPS}, year = {2021} } """ lowerCAmelCase__ : str = """\ MAUVE is a library built on PyTorch and HuggingFace Transformers to measure the gap between neural text and human text with the eponymous MAUVE measure. MAUVE summarizes both Type I and Type II errors measured softly using Kullback–Leibler (KL) divergences. For details, see the MAUVE paper: https://arxiv.org/abs/2102.01454 (Neurips, 2021). This metrics is a wrapper around the official implementation of MAUVE: https://github.com/krishnap25/mauve """ lowerCAmelCase__ : str = """ Calculates MAUVE scores between two lists of generated text and reference text. Args: predictions: list of generated text to score. Each predictions should be a string with tokens separated by spaces. references: list of reference for each prediction. Each reference should be a string with tokens separated by spaces. Optional Args: num_buckets: the size of the histogram to quantize P and Q. Options: 'auto' (default) or an integer pca_max_data: the number data points to use for PCA dimensionality reduction prior to clustering. If -1, use all the data. Default -1 kmeans_explained_var: amount of variance of the data to keep in dimensionality reduction by PCA. Default 0.9 kmeans_num_redo: number of times to redo k-means clustering (the best objective is kept). Default 5 kmeans_max_iter: maximum number of k-means iterations. Default 500 featurize_model_name: name of the model from which features are obtained. Default 'gpt2-large' Use one of ['gpt2', 'gpt2-medium', 'gpt2-large', 'gpt2-xl']. device_id: Device for featurization. Supply a GPU id (e.g. 0 or 3) to use GPU. If no GPU with this id is found, use CPU max_text_length: maximum number of tokens to consider. Default 1024 divergence_curve_discretization_size: Number of points to consider on the divergence curve. Default 25 mauve_scaling_factor: \"c\" from the paper. Default 5. verbose: If True (default), print running time updates seed: random seed to initialize k-means cluster assignments. Returns: mauve: MAUVE score, a number between 0 and 1. Larger values indicate that P and Q are closer, frontier_integral: Frontier Integral, a number between 0 and 1. Smaller values indicate that P and Q are closer, divergence_curve: a numpy.ndarray of shape (m, 2); plot it with matplotlib to view the divergence curve, p_hist: a discrete distribution, which is a quantized version of the text distribution p_text, q_hist: same as above, but with q_text. Examples: >>> # faiss segfaults in doctest for some reason, so the .compute call is not tested with doctest >>> import datasets >>> mauve = datasets.load_metric('mauve') >>> predictions = [\"hello there\", \"general kenobi\"] >>> references = [\"hello there\", \"general kenobi\"] >>> out = mauve.compute(predictions=predictions, references=references) # doctest: +SKIP >>> print(out.mauve) # doctest: +SKIP 1.0 """ @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION ,_KWARGS_DESCRIPTION ) class __snake_case ( datasets.Metric ): def __a ( self ) -> int: '''simple docstring''' return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , homepage='https://github.com/krishnap25/mauve' , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { 'predictions': datasets.Value('string' , id='sequence' ), 'references': datasets.Value('string' , id='sequence' ), } ) , codebase_urls=['https://github.com/krishnap25/mauve'] , reference_urls=[ 'https://arxiv.org/abs/2102.01454', 'https://github.com/krishnap25/mauve', ] , ) def __a ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase=None , __UpperCamelCase=None , __UpperCamelCase=None , __UpperCamelCase=None , __UpperCamelCase="auto" , __UpperCamelCase=-1 , __UpperCamelCase=0.9 , __UpperCamelCase=5 , __UpperCamelCase=500 , __UpperCamelCase="gpt2-large" , __UpperCamelCase=-1 , __UpperCamelCase=1024 , __UpperCamelCase=25 , __UpperCamelCase=5 , __UpperCamelCase=True , __UpperCamelCase=25 , ) -> str: '''simple docstring''' snake_case__ : List[Any] = compute_mauve( p_text=__A , q_text=__A , p_features=__A , q_features=__A , p_tokens=__A , q_tokens=__A , num_buckets=__A , pca_max_data=__A , kmeans_explained_var=__A , kmeans_num_redo=__A , kmeans_max_iter=__A , featurize_model_name=__A , device_id=__A , max_text_length=__A , divergence_curve_discretization_size=__A , mauve_scaling_factor=__A , verbose=__A , seed=__A , ) return out
703
def UpperCamelCase__ ( A__ , A__ , A__ ) -> int: if exponent == 1: return base if exponent % 2 == 0: snake_case__ : Dict = _modexpt(A__ , exponent // 2 , A__ ) % modulo_value return (x * x) % modulo_value else: return (base * _modexpt(A__ , exponent - 1 , A__ )) % modulo_value def UpperCamelCase__ ( A__ = 1777 , A__ = 1855 , A__ = 8 ) -> int: snake_case__ : Tuple = base for _ in range(1 , A__ ): snake_case__ : Any = _modexpt(A__ , A__ , 10**digits ) return result if __name__ == "__main__": print(F'''{solution() = }''')
699
0
import logging import os from dataclasses import dataclass, field from functools import partial from pathlib import Path from tempfile import TemporaryDirectory from typing import List, Optional import faiss import torch from datasets import Features, Sequence, Value, load_dataset from transformers import DPRContextEncoder, DPRContextEncoderTokenizerFast, HfArgumentParser lowerCAmelCase__ : str = logging.getLogger(__name__) torch.set_grad_enabled(False) lowerCAmelCase__ : Optional[Any] = "cuda" if torch.cuda.is_available() else "cpu" def UpperCamelCase__ ( A__ , A__=100 , A__=" " ) -> Optional[int]: '''simple docstring''' snake_case__ : Tuple = text.split(_lowerCamelCase ) return [character.join(text[i : i + n] ).strip() for i in range(0 , len(_lowerCamelCase ) , _lowerCamelCase )] def UpperCamelCase__ ( A__ ) -> Union[str, Any]: '''simple docstring''' snake_case__ : int = [], [] for title, text in zip(documents['title'] , documents['text'] ): if text is not None: for passage in split_text(_lowerCamelCase ): titles.append(title if title is not None else '' ) texts.append(_lowerCamelCase ) return {"title": titles, "text": texts} def UpperCamelCase__ ( A__ , A__ , A__ ) -> Optional[Any]: '''simple docstring''' snake_case__ : Tuple = ctx_tokenizer( documents['title'] , documents['text'] , truncation=_lowerCamelCase , padding='longest' , return_tensors='pt' )["input_ids"] snake_case__ : Dict = ctx_encoder(input_ids.to(device=_lowerCamelCase ) , return_dict=_lowerCamelCase ).pooler_output return {"embeddings": embeddings.detach().cpu().numpy()} def UpperCamelCase__ ( A__ , A__ , A__ , ) -> str: '''simple docstring''' logger.info('Step 1 - Create the dataset' ) ###################################### # The dataset needed for RAG must have three columns: # - title (string): title of the document # - text (string): text of a passage of the document # - embeddings (array of dimension d): DPR representation of the passage # Let's say you have documents in tab-separated csv files with columns "title" and "text" assert os.path.isfile(rag_example_args.csv_path ), "Please provide a valid path to a csv file" # You can load a Dataset object this way snake_case__ : Dict = load_dataset( 'csv' , data_files=[rag_example_args.csv_path] , split='train' , delimiter='\t' , column_names=['title', 'text'] ) # More info about loading csv files in the documentation: https://huggingface.co/docs/datasets/loading_datasets.html?highlight=csv#csv-files # Then split the documents into passages of 100 words snake_case__ : Tuple = dataset.map(_lowerCamelCase , batched=_lowerCamelCase , num_proc=processing_args.num_proc ) # And compute the embeddings snake_case__ : List[Any] = DPRContextEncoder.from_pretrained(rag_example_args.dpr_ctx_encoder_model_name ).to(device=_lowerCamelCase ) snake_case__ : Any = DPRContextEncoderTokenizerFast.from_pretrained(rag_example_args.dpr_ctx_encoder_model_name ) snake_case__ : str = Features( {'text': Value('string' ), 'title': Value('string' ), 'embeddings': Sequence(Value('float32' ) )} ) # optional, save as float32 instead of float64 to save space snake_case__ : List[str] = dataset.map( partial(_lowerCamelCase , ctx_encoder=_lowerCamelCase , ctx_tokenizer=_lowerCamelCase ) , batched=_lowerCamelCase , batch_size=processing_args.batch_size , features=_lowerCamelCase , ) # And finally save your dataset snake_case__ : Optional[int] = os.path.join(rag_example_args.output_dir , 'my_knowledge_dataset' ) dataset.save_to_disk(_lowerCamelCase ) # from datasets import load_from_disk # dataset = load_from_disk(passages_path) # to reload the dataset ###################################### logger.info('Step 2 - Index the dataset' ) ###################################### # Let's use the Faiss implementation of HNSW for fast approximate nearest neighbor search snake_case__ : Tuple = faiss.IndexHNSWFlat(index_hnsw_args.d , index_hnsw_args.m , faiss.METRIC_INNER_PRODUCT ) dataset.add_faiss_index('embeddings' , custom_index=_lowerCamelCase ) # And save the index snake_case__ : Dict = os.path.join(rag_example_args.output_dir , 'my_knowledge_dataset_hnsw_index.faiss' ) dataset.get_index('embeddings' ).save(_lowerCamelCase ) # dataset.load_faiss_index("embeddings", index_path) # to reload the index @dataclass class __snake_case : __lowerCamelCase = field( default=str(Path(lowercase__ ).parent / """test_run""" / """dummy-kb""" / """my_knowledge_dataset.csv""" ) ,metadata={"""help""": """Path to a tab-separated csv file with columns 'title' and 'text'"""} ,) __lowerCamelCase = field( default=lowercase__ ,metadata={"""help""": """Question that is passed as input to RAG. Default is 'What does Moses' rod turn into ?'."""} ,) __lowerCamelCase = field( default="""facebook/rag-sequence-nq""" ,metadata={"""help""": """The RAG model to use. Either 'facebook/rag-sequence-nq' or 'facebook/rag-token-nq'"""} ,) __lowerCamelCase = field( default="""facebook/dpr-ctx_encoder-multiset-base""" ,metadata={ """help""": ( """The DPR context encoder model to use. Either 'facebook/dpr-ctx_encoder-single-nq-base' or""" """ 'facebook/dpr-ctx_encoder-multiset-base'""" ) } ,) __lowerCamelCase = field( default=str(Path(lowercase__ ).parent / """test_run""" / """dummy-kb""" ) ,metadata={"""help""": """Path to a directory where the dataset passages and the index will be saved"""} ,) @dataclass class __snake_case : __lowerCamelCase = field( default=lowercase__ ,metadata={ """help""": """The number of processes to use to split the documents into passages. Default is single process.""" } ,) __lowerCamelCase = field( default=16 ,metadata={ """help""": """The batch size to use when computing the passages embeddings using the DPR context encoder.""" } ,) @dataclass class __snake_case : __lowerCamelCase = field( default=768 ,metadata={"""help""": """The dimension of the embeddings to pass to the HNSW Faiss index."""} ,) __lowerCamelCase = field( default=128 ,metadata={ """help""": ( """The number of bi-directional links created for every new element during the HNSW index construction.""" ) } ,) if __name__ == "__main__": logging.basicConfig(level=logging.WARNING) logger.setLevel(logging.INFO) lowerCAmelCase__ : str = HfArgumentParser((RagExampleArguments, ProcessingArguments, IndexHnswArguments)) lowerCAmelCase__ : List[Any] = parser.parse_args_into_dataclasses() with TemporaryDirectory() as tmp_dir: lowerCAmelCase__ : str = rag_example_args.output_dir or tmp_dir main(rag_example_args, processing_args, index_hnsw_args)
704
# tests directory-specific settings - this file is run automatically # by pytest before any tests are run import sys import warnings from os.path import abspath, dirname, join # allow having multiple repository checkouts and not needing to remember to rerun # 'pip install -e .[dev]' when switching between checkouts and running tests. lowerCAmelCase__ : Tuple = abspath(join(dirname(dirname(__file__)), '''src''')) sys.path.insert(1, git_repo_path) # silence FutureWarning warnings in tests since often we can't act on them until # they become normal warnings - i.e. the tests still need to test the current functionality warnings.simplefilter(action='''ignore''', category=FutureWarning) def UpperCamelCase__ ( A__ ) -> Optional[Any]: from diffusers.utils.testing_utils import pytest_addoption_shared pytest_addoption_shared(A__ ) def UpperCamelCase__ ( A__ ) -> Optional[Any]: from diffusers.utils.testing_utils import pytest_terminal_summary_main snake_case__ : Union[str, Any] = terminalreporter.config.getoption('--make-reports' ) if make_reports: pytest_terminal_summary_main(A__ , id=A__ )
699
0
from __future__ import annotations from dataclasses import dataclass @dataclass class __snake_case : __lowerCamelCase = 42 __lowerCamelCase = None __lowerCamelCase = None def UpperCamelCase__ ( A__ ) -> Dict: # Validation def is_valid_tree(A__ ) -> bool: if node is None: return True if not isinstance(A__ , A__ ): return False try: float(node.data ) except (TypeError, ValueError): return False return is_valid_tree(node.left ) and is_valid_tree(node.right ) if not is_valid_tree(A__ ): raise ValueError( 'Each node should be type of TreeNode and data should be float.' ) def is_binary_search_tree_recursive_check( A__ , A__ , A__ ) -> bool: if node is None: return True return ( left_bound < node.data < right_bound and is_binary_search_tree_recursive_check(node.left , A__ , node.data ) and is_binary_search_tree_recursive_check( node.right , node.data , A__ ) ) return is_binary_search_tree_recursive_check(A__ , -float('inf' ) , float('inf' ) ) if __name__ == "__main__": import doctest doctest.testmod()
705
def UpperCamelCase__ ( A__ ) -> list[int]: if length <= 0 or not isinstance(A__ , A__ ): raise ValueError('Length must be a positive integer.' ) return [n * (2 * n - 1) for n in range(A__ )] if __name__ == "__main__": print(hexagonal_numbers(length=5)) print(hexagonal_numbers(length=10))
699
0
from typing import Any class __snake_case : def __init__( self , __UpperCamelCase ) -> str: '''simple docstring''' snake_case__ : int = data snake_case__ : List[str] = None class __snake_case : def __init__( self ) -> str: '''simple docstring''' snake_case__ : str = None def __a ( self ) -> Any: '''simple docstring''' snake_case__ : str = self.head while temp is not None: print(temp.data , end=' ' ) snake_case__ : List[Any] = temp.next print() def __a ( self , __UpperCamelCase ) -> Union[str, Any]: '''simple docstring''' snake_case__ : Dict = Node(a_ ) snake_case__ : str = self.head snake_case__ : List[Any] = new_node def __a ( self , __UpperCamelCase , __UpperCamelCase ) -> Union[str, Any]: '''simple docstring''' if node_data_a == node_data_a: return else: snake_case__ : List[Any] = self.head while node_a is not None and node_a.data != node_data_a: snake_case__ : Optional[Any] = node_a.next snake_case__ : str = self.head while node_a is not None and node_a.data != node_data_a: snake_case__ : Union[str, Any] = node_a.next if node_a is None or node_a is None: return snake_case__ : Dict = node_a.data, node_a.data if __name__ == "__main__": lowerCAmelCase__ : str = LinkedList() for i in range(5, 0, -1): ll.push(i) ll.print_list() ll.swap_nodes(1, 4) print('''After swapping''') ll.print_list()
706
import argparse import json from collections import OrderedDict from pathlib import Path import requests import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import ( ConditionalDetrConfig, ConditionalDetrForObjectDetection, ConditionalDetrForSegmentation, ConditionalDetrImageProcessor, ) from transformers.utils import logging logging.set_verbosity_info() lowerCAmelCase__ : Dict = logging.get_logger(__name__) # here we list all keys to be renamed (original name on the left, our name on the right) lowerCAmelCase__ : Optional[Any] = [] for i in range(6): # encoder layers: output projection, 2 feedforward neural networks and 2 layernorms rename_keys.append( (F'''transformer.encoder.layers.{i}.self_attn.out_proj.weight''', F'''encoder.layers.{i}.self_attn.out_proj.weight''') ) rename_keys.append( (F'''transformer.encoder.layers.{i}.self_attn.out_proj.bias''', F'''encoder.layers.{i}.self_attn.out_proj.bias''') ) rename_keys.append((F'''transformer.encoder.layers.{i}.linear1.weight''', F'''encoder.layers.{i}.fc1.weight''')) rename_keys.append((F'''transformer.encoder.layers.{i}.linear1.bias''', F'''encoder.layers.{i}.fc1.bias''')) rename_keys.append((F'''transformer.encoder.layers.{i}.linear2.weight''', F'''encoder.layers.{i}.fc2.weight''')) rename_keys.append((F'''transformer.encoder.layers.{i}.linear2.bias''', F'''encoder.layers.{i}.fc2.bias''')) rename_keys.append( (F'''transformer.encoder.layers.{i}.norm1.weight''', F'''encoder.layers.{i}.self_attn_layer_norm.weight''') ) rename_keys.append((F'''transformer.encoder.layers.{i}.norm1.bias''', F'''encoder.layers.{i}.self_attn_layer_norm.bias''')) rename_keys.append((F'''transformer.encoder.layers.{i}.norm2.weight''', F'''encoder.layers.{i}.final_layer_norm.weight''')) rename_keys.append((F'''transformer.encoder.layers.{i}.norm2.bias''', F'''encoder.layers.{i}.final_layer_norm.bias''')) # decoder layers: 2 times output projection, 2 feedforward neural networks and 3 layernorms rename_keys.append( (F'''transformer.decoder.layers.{i}.self_attn.out_proj.weight''', F'''decoder.layers.{i}.self_attn.out_proj.weight''') ) rename_keys.append( (F'''transformer.decoder.layers.{i}.self_attn.out_proj.bias''', F'''decoder.layers.{i}.self_attn.out_proj.bias''') ) rename_keys.append( ( F'''transformer.decoder.layers.{i}.cross_attn.out_proj.weight''', F'''decoder.layers.{i}.encoder_attn.out_proj.weight''', ) ) rename_keys.append( ( F'''transformer.decoder.layers.{i}.cross_attn.out_proj.bias''', F'''decoder.layers.{i}.encoder_attn.out_proj.bias''', ) ) rename_keys.append((F'''transformer.decoder.layers.{i}.linear1.weight''', F'''decoder.layers.{i}.fc1.weight''')) rename_keys.append((F'''transformer.decoder.layers.{i}.linear1.bias''', F'''decoder.layers.{i}.fc1.bias''')) rename_keys.append((F'''transformer.decoder.layers.{i}.linear2.weight''', F'''decoder.layers.{i}.fc2.weight''')) rename_keys.append((F'''transformer.decoder.layers.{i}.linear2.bias''', F'''decoder.layers.{i}.fc2.bias''')) rename_keys.append( (F'''transformer.decoder.layers.{i}.norm1.weight''', F'''decoder.layers.{i}.self_attn_layer_norm.weight''') ) rename_keys.append((F'''transformer.decoder.layers.{i}.norm1.bias''', F'''decoder.layers.{i}.self_attn_layer_norm.bias''')) rename_keys.append( (F'''transformer.decoder.layers.{i}.norm2.weight''', F'''decoder.layers.{i}.encoder_attn_layer_norm.weight''') ) rename_keys.append( (F'''transformer.decoder.layers.{i}.norm2.bias''', F'''decoder.layers.{i}.encoder_attn_layer_norm.bias''') ) rename_keys.append((F'''transformer.decoder.layers.{i}.norm3.weight''', F'''decoder.layers.{i}.final_layer_norm.weight''')) rename_keys.append((F'''transformer.decoder.layers.{i}.norm3.bias''', F'''decoder.layers.{i}.final_layer_norm.bias''')) # q, k, v projections in self/cross-attention in decoder for conditional DETR rename_keys.append( (F'''transformer.decoder.layers.{i}.sa_qcontent_proj.weight''', F'''decoder.layers.{i}.sa_qcontent_proj.weight''') ) rename_keys.append( (F'''transformer.decoder.layers.{i}.sa_kcontent_proj.weight''', F'''decoder.layers.{i}.sa_kcontent_proj.weight''') ) rename_keys.append( (F'''transformer.decoder.layers.{i}.sa_qpos_proj.weight''', F'''decoder.layers.{i}.sa_qpos_proj.weight''') ) rename_keys.append( (F'''transformer.decoder.layers.{i}.sa_kpos_proj.weight''', F'''decoder.layers.{i}.sa_kpos_proj.weight''') ) rename_keys.append((F'''transformer.decoder.layers.{i}.sa_v_proj.weight''', F'''decoder.layers.{i}.sa_v_proj.weight''')) rename_keys.append( (F'''transformer.decoder.layers.{i}.ca_qcontent_proj.weight''', F'''decoder.layers.{i}.ca_qcontent_proj.weight''') ) # rename_keys.append((f"transformer.decoder.layers.{i}.ca_qpos_proj.weight", f"decoder.layers.{i}.ca_qpos_proj.weight")) rename_keys.append( (F'''transformer.decoder.layers.{i}.ca_kcontent_proj.weight''', F'''decoder.layers.{i}.ca_kcontent_proj.weight''') ) rename_keys.append( (F'''transformer.decoder.layers.{i}.ca_kpos_proj.weight''', F'''decoder.layers.{i}.ca_kpos_proj.weight''') ) rename_keys.append((F'''transformer.decoder.layers.{i}.ca_v_proj.weight''', F'''decoder.layers.{i}.ca_v_proj.weight''')) rename_keys.append( (F'''transformer.decoder.layers.{i}.ca_qpos_sine_proj.weight''', F'''decoder.layers.{i}.ca_qpos_sine_proj.weight''') ) rename_keys.append( (F'''transformer.decoder.layers.{i}.sa_qcontent_proj.bias''', F'''decoder.layers.{i}.sa_qcontent_proj.bias''') ) rename_keys.append( (F'''transformer.decoder.layers.{i}.sa_kcontent_proj.bias''', F'''decoder.layers.{i}.sa_kcontent_proj.bias''') ) rename_keys.append((F'''transformer.decoder.layers.{i}.sa_qpos_proj.bias''', F'''decoder.layers.{i}.sa_qpos_proj.bias''')) rename_keys.append((F'''transformer.decoder.layers.{i}.sa_kpos_proj.bias''', F'''decoder.layers.{i}.sa_kpos_proj.bias''')) rename_keys.append((F'''transformer.decoder.layers.{i}.sa_v_proj.bias''', F'''decoder.layers.{i}.sa_v_proj.bias''')) rename_keys.append( (F'''transformer.decoder.layers.{i}.ca_qcontent_proj.bias''', F'''decoder.layers.{i}.ca_qcontent_proj.bias''') ) # rename_keys.append((f"transformer.decoder.layers.{i}.ca_qpos_proj.bias", f"decoder.layers.{i}.ca_qpos_proj.bias")) rename_keys.append( (F'''transformer.decoder.layers.{i}.ca_kcontent_proj.bias''', F'''decoder.layers.{i}.ca_kcontent_proj.bias''') ) rename_keys.append((F'''transformer.decoder.layers.{i}.ca_kpos_proj.bias''', F'''decoder.layers.{i}.ca_kpos_proj.bias''')) rename_keys.append((F'''transformer.decoder.layers.{i}.ca_v_proj.bias''', F'''decoder.layers.{i}.ca_v_proj.bias''')) rename_keys.append( (F'''transformer.decoder.layers.{i}.ca_qpos_sine_proj.bias''', F'''decoder.layers.{i}.ca_qpos_sine_proj.bias''') ) # convolutional projection + query embeddings + layernorm of decoder + class and bounding box heads # for conditional DETR, also convert reference point head and query scale MLP rename_keys.extend( [ ('''input_proj.weight''', '''input_projection.weight'''), ('''input_proj.bias''', '''input_projection.bias'''), ('''query_embed.weight''', '''query_position_embeddings.weight'''), ('''transformer.decoder.norm.weight''', '''decoder.layernorm.weight'''), ('''transformer.decoder.norm.bias''', '''decoder.layernorm.bias'''), ('''class_embed.weight''', '''class_labels_classifier.weight'''), ('''class_embed.bias''', '''class_labels_classifier.bias'''), ('''bbox_embed.layers.0.weight''', '''bbox_predictor.layers.0.weight'''), ('''bbox_embed.layers.0.bias''', '''bbox_predictor.layers.0.bias'''), ('''bbox_embed.layers.1.weight''', '''bbox_predictor.layers.1.weight'''), ('''bbox_embed.layers.1.bias''', '''bbox_predictor.layers.1.bias'''), ('''bbox_embed.layers.2.weight''', '''bbox_predictor.layers.2.weight'''), ('''bbox_embed.layers.2.bias''', '''bbox_predictor.layers.2.bias'''), ('''transformer.decoder.ref_point_head.layers.0.weight''', '''decoder.ref_point_head.layers.0.weight'''), ('''transformer.decoder.ref_point_head.layers.0.bias''', '''decoder.ref_point_head.layers.0.bias'''), ('''transformer.decoder.ref_point_head.layers.1.weight''', '''decoder.ref_point_head.layers.1.weight'''), ('''transformer.decoder.ref_point_head.layers.1.bias''', '''decoder.ref_point_head.layers.1.bias'''), ('''transformer.decoder.query_scale.layers.0.weight''', '''decoder.query_scale.layers.0.weight'''), ('''transformer.decoder.query_scale.layers.0.bias''', '''decoder.query_scale.layers.0.bias'''), ('''transformer.decoder.query_scale.layers.1.weight''', '''decoder.query_scale.layers.1.weight'''), ('''transformer.decoder.query_scale.layers.1.bias''', '''decoder.query_scale.layers.1.bias'''), ('''transformer.decoder.layers.0.ca_qpos_proj.weight''', '''decoder.layers.0.ca_qpos_proj.weight'''), ('''transformer.decoder.layers.0.ca_qpos_proj.bias''', '''decoder.layers.0.ca_qpos_proj.bias'''), ] ) def UpperCamelCase__ ( A__ , A__ , A__ ) -> List[str]: snake_case__ : int = state_dict.pop(A__ ) snake_case__ : Union[str, Any] = val def UpperCamelCase__ ( A__ ) -> int: snake_case__ : List[Any] = OrderedDict() for key, value in state_dict.items(): if "backbone.0.body" in key: snake_case__ : Any = key.replace('backbone.0.body' , 'backbone.conv_encoder.model' ) snake_case__ : Optional[int] = value else: snake_case__ : Optional[int] = value return new_state_dict def UpperCamelCase__ ( A__ , A__=False ) -> Optional[int]: snake_case__ : Optional[int] = '' if is_panoptic: snake_case__ : Tuple = 'conditional_detr.' # first: transformer encoder for i in range(6 ): # read in weights + bias of input projection layer (in PyTorch's MultiHeadAttention, this is a single matrix + bias) snake_case__ : int = state_dict.pop(F"""{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_weight""" ) snake_case__ : str = state_dict.pop(F"""{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_bias""" ) # next, add query, keys and values (in that order) to the state dict snake_case__ : Union[str, Any] = in_proj_weight[:256, :] snake_case__ : Union[str, Any] = in_proj_bias[:256] snake_case__ : Union[str, Any] = in_proj_weight[256:512, :] snake_case__ : Optional[Any] = in_proj_bias[256:512] snake_case__ : List[str] = in_proj_weight[-256:, :] snake_case__ : Tuple = in_proj_bias[-256:] def UpperCamelCase__ ( ) -> Tuple: snake_case__ : int = 'http://images.cocodataset.org/val2017/000000039769.jpg' snake_case__ : str = Image.open(requests.get(A__ , stream=A__ ).raw ) return im @torch.no_grad() def UpperCamelCase__ ( A__ , A__ ) -> str: snake_case__ : List[Any] = ConditionalDetrConfig() # set backbone and dilation attributes if "resnet101" in model_name: snake_case__ : Any = 'resnet101' if "dc5" in model_name: snake_case__ : Any = True snake_case__ : int = 'panoptic' in model_name if is_panoptic: snake_case__ : str = 250 else: snake_case__ : Union[str, Any] = 91 snake_case__ : Optional[int] = 'huggingface/label-files' snake_case__ : Optional[Any] = 'coco-detection-id2label.json' snake_case__ : str = json.load(open(hf_hub_download(A__ , A__ , repo_type='dataset' ) , 'r' ) ) snake_case__ : List[Any] = {int(A__ ): v for k, v in idalabel.items()} snake_case__ : Any = idalabel snake_case__ : int = {v: k for k, v in idalabel.items()} # load image processor snake_case__ : List[Any] = 'coco_panoptic' if is_panoptic else 'coco_detection' snake_case__ : List[Any] = ConditionalDetrImageProcessor(format=A__ ) # prepare image snake_case__ : List[str] = prepare_img() snake_case__ : Any = image_processor(images=A__ , return_tensors='pt' ) snake_case__ : Dict = encoding['pixel_values'] logger.info(F"""Converting model {model_name}...""" ) # load original model from torch hub snake_case__ : Any = torch.hub.load('DeppMeng/ConditionalDETR' , A__ , pretrained=A__ ).eval() snake_case__ : Tuple = conditional_detr.state_dict() # rename keys for src, dest in rename_keys: if is_panoptic: snake_case__ : List[Any] = 'conditional_detr.' + src rename_key(A__ , A__ , A__ ) snake_case__ : Dict = rename_backbone_keys(A__ ) # query, key and value matrices need special treatment read_in_q_k_v(A__ , is_panoptic=A__ ) # important: we need to prepend a prefix to each of the base model keys as the head models use different attributes for them snake_case__ : Optional[int] = 'conditional_detr.model.' if is_panoptic else 'model.' for key in state_dict.copy().keys(): if is_panoptic: if ( key.startswith('conditional_detr' ) and not key.startswith('class_labels_classifier' ) and not key.startswith('bbox_predictor' ) ): snake_case__ : List[Any] = state_dict.pop(A__ ) snake_case__ : Optional[int] = val elif "class_labels_classifier" in key or "bbox_predictor" in key: snake_case__ : str = state_dict.pop(A__ ) snake_case__ : List[Any] = val elif key.startswith('bbox_attention' ) or key.startswith('mask_head' ): continue else: snake_case__ : Union[str, Any] = state_dict.pop(A__ ) snake_case__ : Dict = val else: if not key.startswith('class_labels_classifier' ) and not key.startswith('bbox_predictor' ): snake_case__ : List[Any] = state_dict.pop(A__ ) snake_case__ : Optional[int] = val # finally, create HuggingFace model and load state dict snake_case__ : Union[str, Any] = ConditionalDetrForSegmentation(A__ ) if is_panoptic else ConditionalDetrForObjectDetection(A__ ) model.load_state_dict(A__ ) model.eval() model.push_to_hub(repo_id=A__ , organization='DepuMeng' , commit_message='Add model' ) # verify our conversion snake_case__ : Tuple = conditional_detr(A__ ) snake_case__ : str = model(A__ ) assert torch.allclose(outputs.logits , original_outputs['pred_logits'] , atol=1e-4 ) assert torch.allclose(outputs.pred_boxes , original_outputs['pred_boxes'] , atol=1e-4 ) if is_panoptic: assert torch.allclose(outputs.pred_masks , original_outputs['pred_masks'] , atol=1e-4 ) # Save model and image processor logger.info(F"""Saving PyTorch model and image processor to {pytorch_dump_folder_path}...""" ) Path(A__ ).mkdir(exist_ok=A__ ) model.save_pretrained(A__ ) image_processor.save_pretrained(A__ ) if __name__ == "__main__": lowerCAmelCase__ : Any = argparse.ArgumentParser() parser.add_argument( '''--model_name''', default='''conditional_detr_resnet50''', type=str, help='''Name of the CONDITIONAL_DETR model you\'d like to convert.''', ) parser.add_argument( '''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the folder to output PyTorch model.''' ) lowerCAmelCase__ : int = parser.parse_args() convert_conditional_detr_checkpoint(args.model_name, args.pytorch_dump_folder_path)
699
0
import math from collections.abc import Iterator from itertools import takewhile def UpperCamelCase__ ( A__ ) -> bool: if 1 < number < 4: # 2 and 3 are primes return True elif number < 2 or number % 2 == 0 or number % 3 == 0: # Negatives, 0, 1, all even numbers, all multiples of 3 are not primes return False # All primes number are in format of 6k +/- 1 for i in range(5 , int(math.sqrt(__snake_case ) + 1 ) , 6 ): if number % i == 0 or number % (i + 2) == 0: return False return True def UpperCamelCase__ ( ) -> Iterator[int]: snake_case__ : Tuple = 2 while True: if is_prime(__snake_case ): yield num num += 1 def UpperCamelCase__ ( A__ = 200_0000 ) -> int: return sum(takewhile(lambda A__ : x < n , prime_generator() ) ) if __name__ == "__main__": print(F'''{solution() = }''')
707
from collections import namedtuple lowerCAmelCase__ : Union[str, Any] = namedtuple('''from_to''', '''from_ to''') lowerCAmelCase__ : Tuple = { '''cubicmeter''': from_to(1, 1), '''litre''': from_to(0.0_01, 10_00), '''kilolitre''': from_to(1, 1), '''gallon''': from_to(0.0_04_54, 2_64.1_72), '''cubicyard''': from_to(0.7_64_55, 1.3_07_95), '''cubicfoot''': from_to(0.0_28, 35.31_47), '''cup''': from_to(0.0_00_23_65_88, 42_26.75), } def UpperCamelCase__ ( A__ , A__ , A__ ) -> float: if from_type not in METRIC_CONVERSION: raise ValueError( F"""Invalid 'from_type' value: {from_type!r} Supported values are:\n""" + ', '.join(A__ ) ) if to_type not in METRIC_CONVERSION: raise ValueError( F"""Invalid 'to_type' value: {to_type!r}. Supported values are:\n""" + ', '.join(A__ ) ) return value * METRIC_CONVERSION[from_type].from_ * METRIC_CONVERSION[to_type].to if __name__ == "__main__": import doctest doctest.testmod()
699
0
from __future__ import annotations def UpperCamelCase__ ( A__ , A__ ) -> list[int]: snake_case__ : str = 0 snake_case__ : int = len(UpperCAmelCase__ ) - 1 while i < j: if nums[i] + nums[j] == target: return [i, j] elif nums[i] + nums[j] < target: snake_case__ : Dict = i + 1 else: snake_case__ : Union[str, Any] = j - 1 return [] if __name__ == "__main__": import doctest doctest.testmod() print(F'''{two_pointer([2, 7, 11, 15], 9) = }''')
708
import os from shutil import copyfile from typing import Any, Dict, List, Optional, Tuple import sentencepiece as spm from ...tokenization_utils import PreTrainedTokenizer from ...utils import logging lowerCAmelCase__ : Tuple = logging.get_logger(__name__) lowerCAmelCase__ : Union[str, Any] = '''▁''' lowerCAmelCase__ : List[Any] = {'''vocab_file''': '''sentencepiece.bpe.model'''} lowerCAmelCase__ : Optional[Any] = { '''vocab_file''': { '''facebook/xglm-564M''': '''https://huggingface.co/facebook/xglm-564M/resolve/main/sentencepiece.bpe.model''', } } lowerCAmelCase__ : str = { '''facebook/xglm-564M''': 20_48, } class __snake_case ( _lowerCamelCase ): __lowerCamelCase = VOCAB_FILES_NAMES __lowerCamelCase = PRETRAINED_VOCAB_FILES_MAP __lowerCamelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES __lowerCamelCase = ["""input_ids""", """attention_mask"""] def __init__( self , __UpperCamelCase , __UpperCamelCase="<s>" , __UpperCamelCase="</s>" , __UpperCamelCase="</s>" , __UpperCamelCase="<s>" , __UpperCamelCase="<unk>" , __UpperCamelCase="<pad>" , __UpperCamelCase = None , **__UpperCamelCase , ) -> None: '''simple docstring''' snake_case__ : Any = {} if sp_model_kwargs is None else sp_model_kwargs # Compatibility with the original tokenizer snake_case__ : Tuple = 7 snake_case__ : Dict = [F"""<madeupword{i}>""" for i in range(self.num_madeup_words )] snake_case__ : Union[str, Any] = kwargs.get('additional_special_tokens' , [] ) kwargs["additional_special_tokens"] += [ word for word in madeup_words if word not in kwargs["additional_special_tokens"] ] super().__init__( bos_token=__UpperCamelCase , eos_token=__UpperCamelCase , unk_token=__UpperCamelCase , sep_token=__UpperCamelCase , cls_token=__UpperCamelCase , pad_token=__UpperCamelCase , sp_model_kwargs=self.sp_model_kwargs , **__UpperCamelCase , ) snake_case__ : Optional[int] = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(str(__UpperCamelCase ) ) snake_case__ : Optional[Any] = vocab_file # Original fairseq vocab and spm vocab must be "aligned": # Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 # -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ---- # fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-' # spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a' # The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab snake_case__ : Tuple = 1 # Mimic fairseq token-to-id alignment for the first 4 token snake_case__ : Tuple = {'<s>': 0, '<pad>': 1, '</s>': 2, '<unk>': 3} snake_case__ : List[Any] = len(self.sp_model ) snake_case__ : Optional[Any] = {F"""<madeupword{i}>""": sp_size + i + self.fairseq_offset for i in range(self.num_madeup_words )} self.fairseq_tokens_to_ids.update(__UpperCamelCase ) snake_case__ : Union[str, Any] = {v: k for k, v in self.fairseq_tokens_to_ids.items()} def __getstate__( self ) -> List[Any]: '''simple docstring''' snake_case__ : Union[str, Any] = self.__dict__.copy() snake_case__ : Optional[Any] = None snake_case__ : Tuple = self.sp_model.serialized_model_proto() return state def __setstate__( self , __UpperCamelCase ) -> Union[str, Any]: '''simple docstring''' snake_case__ : Union[str, Any] = d # for backward compatibility if not hasattr(self , 'sp_model_kwargs' ): snake_case__ : Any = {} snake_case__ : Dict = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.LoadFromSerializedProto(self.sp_model_proto ) def __a ( self , __UpperCamelCase , __UpperCamelCase = None ) -> List[int]: '''simple docstring''' if token_ids_a is None: return [self.sep_token_id] + token_ids_a snake_case__ : str = [self.sep_token_id] return sep + token_ids_a + sep + sep + token_ids_a def __a ( self , __UpperCamelCase , __UpperCamelCase = None , __UpperCamelCase = False ) -> List[int]: '''simple docstring''' if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=__UpperCamelCase , token_ids_a=__UpperCamelCase , already_has_special_tokens=__UpperCamelCase ) if token_ids_a is None: return [1] + ([0] * len(__UpperCamelCase )) return [1] + ([0] * len(__UpperCamelCase )) + [1, 1] + ([0] * len(__UpperCamelCase )) def __a ( self , __UpperCamelCase , __UpperCamelCase = None ) -> List[int]: '''simple docstring''' snake_case__ : int = [self.sep_token_id] if token_ids_a is None: return len(sep + token_ids_a ) * [0] return len(sep + token_ids_a + sep + sep + token_ids_a ) * [0] @property def __a ( self ) -> Tuple: '''simple docstring''' return len(self.sp_model ) + self.fairseq_offset + self.num_madeup_words def __a ( self ) -> Union[str, Any]: '''simple docstring''' snake_case__ : int = {self.convert_ids_to_tokens(__UpperCamelCase ): i for i in range(self.vocab_size )} vocab.update(self.added_tokens_encoder ) return vocab def __a ( self , __UpperCamelCase ) -> List[str]: '''simple docstring''' return self.sp_model.encode(__UpperCamelCase , out_type=__UpperCamelCase ) def __a ( self , __UpperCamelCase ) -> Union[str, Any]: '''simple docstring''' if token in self.fairseq_tokens_to_ids: return self.fairseq_tokens_to_ids[token] snake_case__ : Optional[Any] = self.sp_model.PieceToId(__UpperCamelCase ) # Need to return unknown token if the SP model returned 0 return spm_id + self.fairseq_offset if spm_id else self.unk_token_id def __a ( self , __UpperCamelCase ) -> Dict: '''simple docstring''' if index in self.fairseq_ids_to_tokens: return self.fairseq_ids_to_tokens[index] return self.sp_model.IdToPiece(index - self.fairseq_offset ) def __a ( self , __UpperCamelCase ) -> int: '''simple docstring''' snake_case__ : int = ''.join(__UpperCamelCase ).replace(__UpperCamelCase , ' ' ).strip() return out_string def __a ( self , __UpperCamelCase , __UpperCamelCase = None ) -> Tuple[str]: '''simple docstring''' if not os.path.isdir(__UpperCamelCase ): logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" ) return snake_case__ : List[str] = os.path.join( __UpperCamelCase , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(__UpperCamelCase ) and os.path.isfile(self.vocab_file ): copyfile(self.vocab_file , __UpperCamelCase ) elif not os.path.isfile(self.vocab_file ): with open(__UpperCamelCase , 'wb' ) as fi: snake_case__ : Any = self.sp_model.serialized_model_proto() fi.write(__UpperCamelCase ) return (out_vocab_file,)
699
0
import gc import random import unittest import numpy as np import torch from transformers import XLMRobertaTokenizer from diffusers import ( AltDiffusionImgaImgPipeline, AutoencoderKL, PNDMScheduler, UNetaDConditionModel, ) from diffusers.image_processor import VaeImageProcessor from diffusers.pipelines.alt_diffusion.modeling_roberta_series import ( RobertaSeriesConfig, RobertaSeriesModelWithTransformation, ) from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu enable_full_determinism() class __snake_case ( unittest.TestCase ): def __a ( self ) -> List[Any]: '''simple docstring''' super().tearDown() gc.collect() torch.cuda.empty_cache() @property def __a ( self ) -> Optional[Any]: '''simple docstring''' snake_case__ : str = 1 snake_case__ : Tuple = 3 snake_case__ : Tuple = (32, 32) snake_case__ : Tuple = floats_tensor((batch_size, num_channels) + sizes , rng=random.Random(0 ) ).to(lowerCAmelCase__ ) return image @property def __a ( self ) -> Any: '''simple docstring''' torch.manual_seed(0 ) snake_case__ : List[Any] = UNetaDConditionModel( block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') , up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D') , cross_attention_dim=32 , ) return model @property def __a ( self ) -> Union[str, Any]: '''simple docstring''' torch.manual_seed(0 ) snake_case__ : Dict = AutoencoderKL( block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=4 , ) return model @property def __a ( self ) -> str: '''simple docstring''' torch.manual_seed(0 ) snake_case__ : Union[str, Any] = RobertaSeriesConfig( hidden_size=32 , project_dim=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=5006 , ) return RobertaSeriesModelWithTransformation(lowerCAmelCase__ ) @property def __a ( self ) -> Any: '''simple docstring''' def extract(*__UpperCamelCase , **__UpperCamelCase ): class __snake_case : def __init__( self ) -> Optional[Any]: '''simple docstring''' snake_case__ : Dict = torch.ones([0] ) def __a ( self , __UpperCamelCase ) -> Optional[Any]: '''simple docstring''' self.pixel_values.to(lowerCAmelCase__ ) return self return Out() return extract def __a ( self ) -> List[Any]: '''simple docstring''' snake_case__ : Dict = "cpu" # ensure determinism for the device-dependent torch.Generator snake_case__ : Any = self.dummy_cond_unet snake_case__ : Dict = PNDMScheduler(skip_prk_steps=lowerCAmelCase__ ) snake_case__ : Tuple = self.dummy_vae snake_case__ : Any = self.dummy_text_encoder snake_case__ : Tuple = XLMRobertaTokenizer.from_pretrained('hf-internal-testing/tiny-xlm-roberta' ) snake_case__ : str = 77 snake_case__ : Tuple = self.dummy_image.to(lowerCAmelCase__ ) snake_case__ : Optional[int] = init_image / 2 + 0.5 # make sure here that pndm scheduler skips prk snake_case__ : Optional[Any] = AltDiffusionImgaImgPipeline( unet=lowerCAmelCase__ , scheduler=lowerCAmelCase__ , vae=lowerCAmelCase__ , text_encoder=lowerCAmelCase__ , tokenizer=lowerCAmelCase__ , safety_checker=lowerCAmelCase__ , feature_extractor=self.dummy_extractor , ) snake_case__ : Any = VaeImageProcessor(vae_scale_factor=alt_pipe.vae_scale_factor , do_normalize=lowerCAmelCase__ ) snake_case__ : Optional[Any] = alt_pipe.to(lowerCAmelCase__ ) alt_pipe.set_progress_bar_config(disable=lowerCAmelCase__ ) snake_case__ : List[Any] = "A painting of a squirrel eating a burger" snake_case__ : Any = torch.Generator(device=lowerCAmelCase__ ).manual_seed(0 ) snake_case__ : Dict = alt_pipe( [prompt] , generator=lowerCAmelCase__ , guidance_scale=6.0 , num_inference_steps=2 , output_type='np' , image=lowerCAmelCase__ , ) snake_case__ : List[str] = output.images snake_case__ : Dict = torch.Generator(device=lowerCAmelCase__ ).manual_seed(0 ) snake_case__ : List[Any] = alt_pipe( [prompt] , generator=lowerCAmelCase__ , guidance_scale=6.0 , num_inference_steps=2 , output_type='np' , image=lowerCAmelCase__ , return_dict=lowerCAmelCase__ , )[0] snake_case__ : Optional[int] = image[0, -3:, -3:, -1] snake_case__ : Optional[Any] = image_from_tuple[0, -3:, -3:, -1] assert image.shape == (1, 32, 32, 3) snake_case__ : Dict = np.array([0.4_4_2_7, 0.3_7_3_1, 0.4_2_4_9, 0.4_9_4_1, 0.4_5_4_6, 0.4_1_4_8, 0.4_1_9_3, 0.4_6_6_6, 0.4_4_9_9] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 5E-3 assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 5E-3 @unittest.skipIf(torch_device != 'cuda' , 'This test requires a GPU' ) def __a ( self ) -> List[str]: '''simple docstring''' snake_case__ : str = self.dummy_cond_unet snake_case__ : int = PNDMScheduler(skip_prk_steps=lowerCAmelCase__ ) snake_case__ : Optional[Any] = self.dummy_vae snake_case__ : Tuple = self.dummy_text_encoder snake_case__ : Optional[int] = XLMRobertaTokenizer.from_pretrained('hf-internal-testing/tiny-xlm-roberta' ) snake_case__ : Optional[Any] = 77 snake_case__ : Any = self.dummy_image.to(lowerCAmelCase__ ) # put models in fp16 snake_case__ : Any = unet.half() snake_case__ : List[str] = vae.half() snake_case__ : Optional[Any] = bert.half() # make sure here that pndm scheduler skips prk snake_case__ : List[str] = AltDiffusionImgaImgPipeline( unet=lowerCAmelCase__ , scheduler=lowerCAmelCase__ , vae=lowerCAmelCase__ , text_encoder=lowerCAmelCase__ , tokenizer=lowerCAmelCase__ , safety_checker=lowerCAmelCase__ , feature_extractor=self.dummy_extractor , ) snake_case__ : Any = VaeImageProcessor(vae_scale_factor=alt_pipe.vae_scale_factor , do_normalize=lowerCAmelCase__ ) snake_case__ : List[str] = alt_pipe.to(lowerCAmelCase__ ) alt_pipe.set_progress_bar_config(disable=lowerCAmelCase__ ) snake_case__ : Any = "A painting of a squirrel eating a burger" snake_case__ : Any = torch.manual_seed(0 ) snake_case__ : int = alt_pipe( [prompt] , generator=lowerCAmelCase__ , num_inference_steps=2 , output_type='np' , image=lowerCAmelCase__ , ).images assert image.shape == (1, 32, 32, 3) @unittest.skipIf(torch_device != 'cuda' , 'This test requires a GPU' ) def __a ( self ) -> List[Any]: '''simple docstring''' snake_case__ : List[Any] = load_image( 'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main' '/img2img/sketch-mountains-input.jpg' ) # resize to resolution that is divisible by 8 but not 16 or 32 snake_case__ : Optional[Any] = init_image.resize((760, 504) ) snake_case__ : Optional[int] = "BAAI/AltDiffusion" snake_case__ : Union[str, Any] = AltDiffusionImgaImgPipeline.from_pretrained( lowerCAmelCase__ , safety_checker=lowerCAmelCase__ , ) pipe.to(lowerCAmelCase__ ) pipe.set_progress_bar_config(disable=lowerCAmelCase__ ) pipe.enable_attention_slicing() snake_case__ : str = "A fantasy landscape, trending on artstation" snake_case__ : Tuple = torch.manual_seed(0 ) snake_case__ : List[Any] = pipe( prompt=lowerCAmelCase__ , image=lowerCAmelCase__ , strength=0.7_5 , guidance_scale=7.5 , generator=lowerCAmelCase__ , output_type='np' , ) snake_case__ : Union[str, Any] = output.images[0] snake_case__ : List[Any] = image[255:258, 383:386, -1] assert image.shape == (504, 760, 3) snake_case__ : Dict = np.array([0.9_3_5_8, 0.9_3_9_7, 0.9_5_9_9, 0.9_9_0_1, 1.0_0_0_0, 1.0_0_0_0, 0.9_8_8_2, 1.0_0_0_0, 1.0_0_0_0] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 @slow @require_torch_gpu class __snake_case ( unittest.TestCase ): def __a ( self ) -> List[Any]: '''simple docstring''' super().tearDown() gc.collect() torch.cuda.empty_cache() def __a ( self ) -> Optional[int]: '''simple docstring''' snake_case__ : str = load_image( 'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main' '/img2img/sketch-mountains-input.jpg' ) snake_case__ : Any = init_image.resize((768, 512) ) snake_case__ : Optional[int] = load_numpy( 'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/img2img/fantasy_landscape_alt.npy' ) snake_case__ : Optional[Any] = "BAAI/AltDiffusion" snake_case__ : Optional[int] = AltDiffusionImgaImgPipeline.from_pretrained( lowerCAmelCase__ , safety_checker=lowerCAmelCase__ , ) pipe.to(lowerCAmelCase__ ) pipe.set_progress_bar_config(disable=lowerCAmelCase__ ) pipe.enable_attention_slicing() snake_case__ : Tuple = "A fantasy landscape, trending on artstation" snake_case__ : Union[str, Any] = torch.manual_seed(0 ) snake_case__ : Dict = pipe( prompt=lowerCAmelCase__ , image=lowerCAmelCase__ , strength=0.7_5 , guidance_scale=7.5 , generator=lowerCAmelCase__ , output_type='np' , ) snake_case__ : int = output.images[0] assert image.shape == (512, 768, 3) # img2img is flaky across GPUs even in fp32, so using MAE here assert np.abs(expected_image - image ).max() < 1E-2
709
import collections from typing import List, Optional, Union from ...tokenization_utils_base import BatchEncoding from ...utils import TensorType, add_end_docstrings, add_start_docstrings, logging from ..bert.tokenization_bert_fast import BertTokenizerFast from .tokenization_dpr import DPRContextEncoderTokenizer, DPRQuestionEncoderTokenizer, DPRReaderTokenizer lowerCAmelCase__ : Any = logging.get_logger(__name__) lowerCAmelCase__ : List[Any] = {'''vocab_file''': '''vocab.txt''', '''tokenizer_file''': '''tokenizer.json'''} lowerCAmelCase__ : Any = { '''vocab_file''': { '''facebook/dpr-ctx_encoder-single-nq-base''': ( '''https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/vocab.txt''' ), '''facebook/dpr-ctx_encoder-multiset-base''': ( '''https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/vocab.txt''' ), }, '''tokenizer_file''': { '''facebook/dpr-ctx_encoder-single-nq-base''': ( '''https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/tokenizer.json''' ), '''facebook/dpr-ctx_encoder-multiset-base''': ( '''https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/tokenizer.json''' ), }, } lowerCAmelCase__ : Any = { '''vocab_file''': { '''facebook/dpr-question_encoder-single-nq-base''': ( '''https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/vocab.txt''' ), '''facebook/dpr-question_encoder-multiset-base''': ( '''https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/vocab.txt''' ), }, '''tokenizer_file''': { '''facebook/dpr-question_encoder-single-nq-base''': ( '''https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/tokenizer.json''' ), '''facebook/dpr-question_encoder-multiset-base''': ( '''https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/tokenizer.json''' ), }, } lowerCAmelCase__ : Tuple = { '''vocab_file''': { '''facebook/dpr-reader-single-nq-base''': ( '''https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/vocab.txt''' ), '''facebook/dpr-reader-multiset-base''': ( '''https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/vocab.txt''' ), }, '''tokenizer_file''': { '''facebook/dpr-reader-single-nq-base''': ( '''https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/tokenizer.json''' ), '''facebook/dpr-reader-multiset-base''': ( '''https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/tokenizer.json''' ), }, } lowerCAmelCase__ : Dict = { '''facebook/dpr-ctx_encoder-single-nq-base''': 5_12, '''facebook/dpr-ctx_encoder-multiset-base''': 5_12, } lowerCAmelCase__ : Union[str, Any] = { '''facebook/dpr-question_encoder-single-nq-base''': 5_12, '''facebook/dpr-question_encoder-multiset-base''': 5_12, } lowerCAmelCase__ : Optional[Any] = { '''facebook/dpr-reader-single-nq-base''': 5_12, '''facebook/dpr-reader-multiset-base''': 5_12, } lowerCAmelCase__ : Tuple = { '''facebook/dpr-ctx_encoder-single-nq-base''': {'''do_lower_case''': True}, '''facebook/dpr-ctx_encoder-multiset-base''': {'''do_lower_case''': True}, } lowerCAmelCase__ : Any = { '''facebook/dpr-question_encoder-single-nq-base''': {'''do_lower_case''': True}, '''facebook/dpr-question_encoder-multiset-base''': {'''do_lower_case''': True}, } lowerCAmelCase__ : List[str] = { '''facebook/dpr-reader-single-nq-base''': {'''do_lower_case''': True}, '''facebook/dpr-reader-multiset-base''': {'''do_lower_case''': True}, } class __snake_case ( _lowerCamelCase ): __lowerCamelCase = VOCAB_FILES_NAMES __lowerCamelCase = CONTEXT_ENCODER_PRETRAINED_VOCAB_FILES_MAP __lowerCamelCase = CONTEXT_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES __lowerCamelCase = CONTEXT_ENCODER_PRETRAINED_INIT_CONFIGURATION __lowerCamelCase = DPRContextEncoderTokenizer class __snake_case ( _lowerCamelCase ): __lowerCamelCase = VOCAB_FILES_NAMES __lowerCamelCase = QUESTION_ENCODER_PRETRAINED_VOCAB_FILES_MAP __lowerCamelCase = QUESTION_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES __lowerCamelCase = QUESTION_ENCODER_PRETRAINED_INIT_CONFIGURATION __lowerCamelCase = DPRQuestionEncoderTokenizer lowerCAmelCase__ : Tuple = collections.namedtuple( '''DPRSpanPrediction''', ['''span_score''', '''relevance_score''', '''doc_id''', '''start_index''', '''end_index''', '''text'''] ) lowerCAmelCase__ : List[Any] = collections.namedtuple('''DPRReaderOutput''', ['''start_logits''', '''end_logits''', '''relevance_logits''']) lowerCAmelCase__ : int = r''' Return a dictionary with the token ids of the input strings and other information to give to `.decode_best_spans`. It converts the strings of a question and different passages (title and text) in a sequence of IDs (integers), using the tokenizer and vocabulary. The resulting `input_ids` is a matrix of size `(n_passages, sequence_length)` with the format: [CLS] <question token ids> [SEP] <titles ids> [SEP] <texts ids> Args: questions (`str` or `List[str]`): The questions to be encoded. You can specify one question for many passages. In this case, the question will be duplicated like `[questions] * n_passages`. Otherwise you have to specify as many questions as in `titles` or `texts`. titles (`str` or `List[str]`): The passages titles to be encoded. This can be a string or a list of strings if there are several passages. texts (`str` or `List[str]`): The passages texts to be encoded. This can be a string or a list of strings if there are several passages. padding (`bool`, `str` or [`~utils.PaddingStrategy`], *optional*, defaults to `False`): Activates and controls padding. Accepts the following values: - `True` or `\'longest\'`: Pad to the longest sequence in the batch (or no padding if only a single sequence if provided). - `\'max_length\'`: Pad to a maximum length specified with the argument `max_length` or to the maximum acceptable input length for the model if that argument is not provided. - `False` or `\'do_not_pad\'` (default): No padding (i.e., can output a batch with sequences of different lengths). truncation (`bool`, `str` or [`~tokenization_utils_base.TruncationStrategy`], *optional*, defaults to `False`): Activates and controls truncation. Accepts the following values: - `True` or `\'longest_first\'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum acceptable input length for the model if that argument is not provided. This will truncate token by token, removing a token from the longest sequence in the pair if a pair of sequences (or a batch of pairs) is provided. - `\'only_first\'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum acceptable input length for the model if that argument is not provided. This will only truncate the first sequence of a pair if a pair of sequences (or a batch of pairs) is provided. - `\'only_second\'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum acceptable input length for the model if that argument is not provided. This will only truncate the second sequence of a pair if a pair of sequences (or a batch of pairs) is provided. - `False` or `\'do_not_truncate\'` (default): No truncation (i.e., can output batch with sequence lengths greater than the model maximum admissible input size). max_length (`int`, *optional*): Controls the maximum length to use by one of the truncation/padding parameters. If left unset or set to `None`, this will use the predefined model maximum length if a maximum length is required by one of the truncation/padding parameters. If the model has no specific maximum input length (like XLNet) truncation/padding to a maximum length will be deactivated. return_tensors (`str` or [`~utils.TensorType`], *optional*): If set, will return tensors instead of list of python integers. Acceptable values are: - `\'tf\'`: Return TensorFlow `tf.constant` objects. - `\'pt\'`: Return PyTorch `torch.Tensor` objects. - `\'np\'`: Return Numpy `np.ndarray` objects. return_attention_mask (`bool`, *optional*): Whether or not to return the attention mask. If not set, will return the attention mask according to the specific tokenizer\'s default, defined by the `return_outputs` attribute. [What are attention masks?](../glossary#attention-mask) Return: `Dict[str, List[List[int]]]`: A dictionary with the following keys: - `input_ids`: List of token ids to be fed to a model. - `attention_mask`: List of indices specifying which tokens should be attended to by the model. ''' @add_start_docstrings(_lowerCamelCase ) class __snake_case : def __call__( self , __UpperCamelCase , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = False , __UpperCamelCase = False , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = None , **__UpperCamelCase , ) -> BatchEncoding: '''simple docstring''' if titles is None and texts is None: return super().__call__( __UpperCamelCase , padding=__UpperCamelCase , truncation=__UpperCamelCase , max_length=__UpperCamelCase , return_tensors=__UpperCamelCase , return_attention_mask=__UpperCamelCase , **__UpperCamelCase , ) elif titles is None or texts is None: snake_case__ : Optional[Any] = titles if texts is None else texts return super().__call__( __UpperCamelCase , __UpperCamelCase , padding=__UpperCamelCase , truncation=__UpperCamelCase , max_length=__UpperCamelCase , return_tensors=__UpperCamelCase , return_attention_mask=__UpperCamelCase , **__UpperCamelCase , ) snake_case__ : int = titles if not isinstance(__UpperCamelCase , __UpperCamelCase ) else [titles] snake_case__ : Optional[int] = texts if not isinstance(__UpperCamelCase , __UpperCamelCase ) else [texts] snake_case__ : List[Any] = len(__UpperCamelCase ) snake_case__ : str = questions if not isinstance(__UpperCamelCase , __UpperCamelCase ) else [questions] * n_passages assert len(__UpperCamelCase ) == len( __UpperCamelCase ), F"""There should be as many titles than texts but got {len(__UpperCamelCase )} titles and {len(__UpperCamelCase )} texts.""" snake_case__ : Optional[int] = super().__call__(__UpperCamelCase , __UpperCamelCase , padding=__UpperCamelCase , truncation=__UpperCamelCase )['input_ids'] snake_case__ : Optional[Any] = super().__call__(__UpperCamelCase , add_special_tokens=__UpperCamelCase , padding=__UpperCamelCase , truncation=__UpperCamelCase )['input_ids'] snake_case__ : Union[str, Any] = { 'input_ids': [ (encoded_question_and_title + encoded_text)[:max_length] if max_length is not None and truncation else encoded_question_and_title + encoded_text for encoded_question_and_title, encoded_text in zip(__UpperCamelCase , __UpperCamelCase ) ] } if return_attention_mask is not False: snake_case__ : List[Any] = [] for input_ids in encoded_inputs["input_ids"]: attention_mask.append([int(input_id != self.pad_token_id ) for input_id in input_ids] ) snake_case__ : Union[str, Any] = attention_mask return self.pad(__UpperCamelCase , padding=__UpperCamelCase , max_length=__UpperCamelCase , return_tensors=__UpperCamelCase ) def __a ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = 16 , __UpperCamelCase = 64 , __UpperCamelCase = 4 , ) -> List[DPRSpanPrediction]: '''simple docstring''' snake_case__ : Optional[Any] = reader_input['input_ids'] snake_case__ , snake_case__ , snake_case__ : Any = reader_output[:3] snake_case__ : List[str] = len(__UpperCamelCase ) snake_case__ : Tuple = sorted(range(__UpperCamelCase ) , reverse=__UpperCamelCase , key=relevance_logits.__getitem__ ) snake_case__ : List[DPRReaderOutput] = [] for doc_id in sorted_docs: snake_case__ : Tuple = list(input_ids[doc_id] ) # assuming question & title information is at the beginning of the sequence snake_case__ : int = sequence_ids.index(self.sep_token_id , 2 ) + 1 # second sep id if sequence_ids[-1] == self.pad_token_id: snake_case__ : Union[str, Any] = sequence_ids.index(self.pad_token_id ) else: snake_case__ : str = len(__UpperCamelCase ) snake_case__ : Dict = self._get_best_spans( start_logits=start_logits[doc_id][passage_offset:sequence_len] , end_logits=end_logits[doc_id][passage_offset:sequence_len] , max_answer_length=__UpperCamelCase , top_spans=__UpperCamelCase , ) for start_index, end_index in best_spans: start_index += passage_offset end_index += passage_offset nbest_spans_predictions.append( DPRSpanPrediction( span_score=start_logits[doc_id][start_index] + end_logits[doc_id][end_index] , relevance_score=relevance_logits[doc_id] , doc_id=__UpperCamelCase , start_index=__UpperCamelCase , end_index=__UpperCamelCase , text=self.decode(sequence_ids[start_index : end_index + 1] ) , ) ) if len(__UpperCamelCase ) >= num_spans: break return nbest_spans_predictions[:num_spans] def __a ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , ) -> List[DPRSpanPrediction]: '''simple docstring''' snake_case__ : Any = [] for start_index, start_score in enumerate(__UpperCamelCase ): for answer_length, end_score in enumerate(end_logits[start_index : start_index + max_answer_length] ): scores.append(((start_index, start_index + answer_length), start_score + end_score) ) snake_case__ : str = sorted(__UpperCamelCase , key=lambda __UpperCamelCase : x[1] , reverse=__UpperCamelCase ) snake_case__ : Any = [] for (start_index, end_index), score in scores: assert start_index <= end_index, F"""Wrong span indices: [{start_index}:{end_index}]""" snake_case__ : str = end_index - start_index + 1 assert length <= max_answer_length, F"""Span is too long: {length} > {max_answer_length}""" if any( start_index <= prev_start_index <= prev_end_index <= end_index or prev_start_index <= start_index <= end_index <= prev_end_index for (prev_start_index, prev_end_index) in chosen_span_intervals ): continue chosen_span_intervals.append((start_index, end_index) ) if len(__UpperCamelCase ) == top_spans: break return chosen_span_intervals @add_end_docstrings(_lowerCamelCase ) class __snake_case ( _lowerCamelCase ,_lowerCamelCase ): __lowerCamelCase = VOCAB_FILES_NAMES __lowerCamelCase = READER_PRETRAINED_VOCAB_FILES_MAP __lowerCamelCase = READER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES __lowerCamelCase = READER_PRETRAINED_INIT_CONFIGURATION __lowerCamelCase = ["""input_ids""", """attention_mask"""] __lowerCamelCase = DPRReaderTokenizer
699
0
'''simple docstring''' # Copyright 2023 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import torch from accelerate import PartialState from accelerate.utils.operations import broadcast, gather, gather_object, pad_across_processes, reduce def UpperCamelCase__ ( A__ ) -> Optional[Any]: return (torch.arange(state.num_processes ) + 1.0 + (state.num_processes * state.process_index)).to(state.device ) def UpperCamelCase__ ( A__ ) -> List[Any]: snake_case__ : Optional[Any] = create_tensor(UpperCAmelCase__ ) snake_case__ : Any = gather(UpperCAmelCase__ ) assert gathered_tensor.tolist() == list(range(1 , state.num_processes**2 + 1 ) ) def UpperCamelCase__ ( A__ ) -> Dict: snake_case__ : Union[str, Any] = [state.process_index] snake_case__ : Optional[Any] = gather_object(UpperCAmelCase__ ) assert len(UpperCAmelCase__ ) == state.num_processes, F"""{gathered_obj}, {len(UpperCAmelCase__ )} != {state.num_processes}""" assert gathered_obj == list(range(state.num_processes ) ), F"""{gathered_obj} != {list(range(state.num_processes ) )}""" def UpperCamelCase__ ( A__ ) -> Union[str, Any]: snake_case__ : str = create_tensor(UpperCAmelCase__ ) snake_case__ : Optional[Any] = broadcast(UpperCAmelCase__ ) assert broadcasted_tensor.shape == torch.Size([state.num_processes] ) assert broadcasted_tensor.tolist() == list(range(1 , state.num_processes + 1 ) ) def UpperCamelCase__ ( A__ ) -> str: if state.is_main_process: snake_case__ : Dict = torch.arange(state.num_processes + 1 ).to(state.device ) else: snake_case__ : Any = torch.arange(state.num_processes ).to(state.device ) snake_case__ : Optional[int] = pad_across_processes(UpperCAmelCase__ ) assert padded_tensor.shape == torch.Size([state.num_processes + 1] ) if not state.is_main_process: assert padded_tensor.tolist() == list(range(0 , state.num_processes ) ) + [0] def UpperCamelCase__ ( A__ ) -> Any: if state.num_processes != 2: return snake_case__ : int = create_tensor(UpperCAmelCase__ ) snake_case__ : Optional[int] = reduce(UpperCAmelCase__ , 'sum' ) snake_case__ : Optional[Any] = torch.tensor([4.0, 6] ).to(state.device ) assert torch.allclose(UpperCAmelCase__ , UpperCAmelCase__ ), F"""{reduced_tensor} != {truth_tensor}""" def UpperCamelCase__ ( A__ ) -> Optional[int]: if state.num_processes != 2: return snake_case__ : List[Any] = create_tensor(UpperCAmelCase__ ) snake_case__ : List[str] = reduce(UpperCAmelCase__ , 'mean' ) snake_case__ : str = torch.tensor([2.0, 3] ).to(state.device ) assert torch.allclose(UpperCAmelCase__ , UpperCAmelCase__ ), F"""{reduced_tensor} != {truth_tensor}""" def UpperCamelCase__ ( A__ ) -> Tuple: main() def UpperCamelCase__ ( ) -> Any: snake_case__ : List[Any] = PartialState() state.print(F"""State: {state}""" ) state.print('testing gather' ) test_gather(UpperCAmelCase__ ) state.print('testing gather_object' ) test_gather_object(UpperCAmelCase__ ) state.print('testing broadcast' ) test_broadcast(UpperCAmelCase__ ) state.print('testing pad_across_processes' ) test_pad_across_processes(UpperCAmelCase__ ) state.print('testing reduce_sum' ) test_reduce_sum(UpperCAmelCase__ ) state.print('testing reduce_mean' ) test_reduce_mean(UpperCAmelCase__ ) if __name__ == "__main__": main()
710
import gc import random import unittest import numpy as np import torch from PIL import Image from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer from diffusers import ( AutoencoderKL, DDIMScheduler, EulerAncestralDiscreteScheduler, LMSDiscreteScheduler, PNDMScheduler, StableDiffusionInstructPixaPixPipeline, UNetaDConditionModel, ) from diffusers.image_processor import VaeImageProcessor from diffusers.utils import floats_tensor, load_image, slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu from ..pipeline_params import ( IMAGE_TO_IMAGE_IMAGE_PARAMS, TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS, ) from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin enable_full_determinism() class __snake_case ( _lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase ,unittest.TestCase ): __lowerCamelCase = StableDiffusionInstructPixaPixPipeline __lowerCamelCase = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {"""height""", """width""", """cross_attention_kwargs"""} __lowerCamelCase = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS __lowerCamelCase = IMAGE_TO_IMAGE_IMAGE_PARAMS __lowerCamelCase = IMAGE_TO_IMAGE_IMAGE_PARAMS def __a ( self ) -> List[Any]: '''simple docstring''' torch.manual_seed(0 ) snake_case__ : Tuple = UNetaDConditionModel( block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=8 , out_channels=4 , down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') , up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D') , cross_attention_dim=32 , ) snake_case__ : Any = PNDMScheduler(skip_prk_steps=__UpperCamelCase ) torch.manual_seed(0 ) snake_case__ : Dict = AutoencoderKL( block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=4 , ) torch.manual_seed(0 ) snake_case__ : int = CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , ) snake_case__ : Tuple = CLIPTextModel(__UpperCamelCase ) snake_case__ : Optional[int] = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' ) snake_case__ : Optional[int] = { 'unet': unet, 'scheduler': scheduler, 'vae': vae, 'text_encoder': text_encoder, 'tokenizer': tokenizer, 'safety_checker': None, 'feature_extractor': None, } return components def __a ( self , __UpperCamelCase , __UpperCamelCase=0 ) -> Dict: '''simple docstring''' snake_case__ : Optional[Any] = floats_tensor((1, 3, 32, 32) , rng=random.Random(__UpperCamelCase ) ).to(__UpperCamelCase ) snake_case__ : int = image.cpu().permute(0 , 2 , 3 , 1 )[0] snake_case__ : Union[str, Any] = Image.fromarray(np.uinta(__UpperCamelCase ) ).convert('RGB' ) if str(__UpperCamelCase ).startswith('mps' ): snake_case__ : str = torch.manual_seed(__UpperCamelCase ) else: snake_case__ : Dict = torch.Generator(device=__UpperCamelCase ).manual_seed(__UpperCamelCase ) snake_case__ : str = { 'prompt': 'A painting of a squirrel eating a burger', 'image': image, 'generator': generator, 'num_inference_steps': 2, 'guidance_scale': 6.0, 'image_guidance_scale': 1, 'output_type': 'numpy', } return inputs def __a ( self ) -> Tuple: '''simple docstring''' snake_case__ : Dict = 'cpu' # ensure determinism for the device-dependent torch.Generator snake_case__ : Optional[int] = self.get_dummy_components() snake_case__ : str = StableDiffusionInstructPixaPixPipeline(**__UpperCamelCase ) snake_case__ : Optional[int] = sd_pipe.to(__UpperCamelCase ) sd_pipe.set_progress_bar_config(disable=__UpperCamelCase ) snake_case__ : Tuple = self.get_dummy_inputs(__UpperCamelCase ) snake_case__ : List[str] = sd_pipe(**__UpperCamelCase ).images snake_case__ : List[str] = image[0, -3:, -3:, -1] assert image.shape == (1, 32, 32, 3) snake_case__ : str = np.array([0.7_5_2_6, 0.3_7_5_0, 0.4_5_4_7, 0.6_1_1_7, 0.5_8_6_6, 0.5_0_1_6, 0.4_3_2_7, 0.5_6_4_2, 0.4_8_1_5] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3 def __a ( self ) -> Any: '''simple docstring''' snake_case__ : Dict = 'cpu' # ensure determinism for the device-dependent torch.Generator snake_case__ : Union[str, Any] = self.get_dummy_components() snake_case__ : Optional[Any] = StableDiffusionInstructPixaPixPipeline(**__UpperCamelCase ) snake_case__ : List[Any] = sd_pipe.to(__UpperCamelCase ) sd_pipe.set_progress_bar_config(disable=__UpperCamelCase ) snake_case__ : Union[str, Any] = self.get_dummy_inputs(__UpperCamelCase ) snake_case__ : List[str] = 'french fries' snake_case__ : Optional[Any] = sd_pipe(**__UpperCamelCase , negative_prompt=__UpperCamelCase ) snake_case__ : Union[str, Any] = output.images snake_case__ : Tuple = image[0, -3:, -3:, -1] assert image.shape == (1, 32, 32, 3) snake_case__ : Any = np.array([0.7_5_1_1, 0.3_6_4_2, 0.4_5_5_3, 0.6_2_3_6, 0.5_7_9_7, 0.5_0_1_3, 0.4_3_4_3, 0.5_6_1_1, 0.4_8_3_1] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3 def __a ( self ) -> int: '''simple docstring''' snake_case__ : Optional[int] = 'cpu' # ensure determinism for the device-dependent torch.Generator snake_case__ : List[str] = self.get_dummy_components() snake_case__ : str = StableDiffusionInstructPixaPixPipeline(**__UpperCamelCase ) snake_case__ : str = sd_pipe.to(__UpperCamelCase ) sd_pipe.set_progress_bar_config(disable=__UpperCamelCase ) snake_case__ : Dict = self.get_dummy_inputs(__UpperCamelCase ) snake_case__ : Any = [inputs['prompt']] * 2 snake_case__ : Optional[int] = np.array(inputs['image'] ).astype(np.floataa ) / 2_5_5.0 snake_case__ : Optional[int] = torch.from_numpy(__UpperCamelCase ).unsqueeze(0 ).to(__UpperCamelCase ) snake_case__ : Any = image / 2 + 0.5 snake_case__ : Optional[Any] = image.permute(0 , 3 , 1 , 2 ) snake_case__ : List[Any] = image.repeat(2 , 1 , 1 , 1 ) snake_case__ : Optional[int] = sd_pipe(**__UpperCamelCase ).images snake_case__ : Union[str, Any] = image[-1, -3:, -3:, -1] assert image.shape == (2, 32, 32, 3) snake_case__ : List[Any] = np.array([0.5_8_1_2, 0.5_7_4_8, 0.5_2_2_2, 0.5_9_0_8, 0.5_6_9_5, 0.7_1_7_4, 0.6_8_0_4, 0.5_5_2_3, 0.5_5_7_9] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3 def __a ( self ) -> Union[str, Any]: '''simple docstring''' snake_case__ : Optional[int] = 'cpu' # ensure determinism for the device-dependent torch.Generator snake_case__ : Optional[int] = self.get_dummy_components() snake_case__ : Tuple = EulerAncestralDiscreteScheduler( beta_start=0.0_0_0_8_5 , beta_end=0.0_1_2 , beta_schedule='scaled_linear' ) snake_case__ : int = StableDiffusionInstructPixaPixPipeline(**__UpperCamelCase ) snake_case__ : List[str] = sd_pipe.to(__UpperCamelCase ) sd_pipe.set_progress_bar_config(disable=__UpperCamelCase ) snake_case__ : str = self.get_dummy_inputs(__UpperCamelCase ) snake_case__ : Any = sd_pipe(**__UpperCamelCase ).images snake_case__ : int = image[0, -3:, -3:, -1] snake_case__ : Tuple = [round(__UpperCamelCase , 4 ) for x in image_slice.flatten().tolist()] print(','.join([str(__UpperCamelCase ) for x in slice] ) ) assert image.shape == (1, 32, 32, 3) snake_case__ : List[Any] = np.array([0.7_4_1_7, 0.3_8_4_2, 0.4_7_3_2, 0.5_7_7_6, 0.5_8_9_1, 0.5_1_3_9, 0.4_0_5_2, 0.5_6_7_3, 0.4_9_8_6] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3 def __a ( self ) -> int: '''simple docstring''' super().test_inference_batch_single_identical(expected_max_diff=3E-3 ) def __a ( self ) -> Any: '''simple docstring''' snake_case__ : Optional[int] = self.get_dummy_components() snake_case__ : int = StableDiffusionInstructPixaPixPipeline(**__UpperCamelCase ) snake_case__ : Union[str, Any] = VaeImageProcessor(do_resize=__UpperCamelCase , do_normalize=__UpperCamelCase ) snake_case__ : Optional[int] = pipe.to(__UpperCamelCase ) pipe.set_progress_bar_config(disable=__UpperCamelCase ) snake_case__ : Optional[Any] = pipe(**self.get_dummy_inputs_by_type(__UpperCamelCase , input_image_type='pt' ) )[0] snake_case__ : Union[str, Any] = components['vae'] snake_case__ : str = self.get_dummy_inputs_by_type(__UpperCamelCase , input_image_type='pt' ) for image_param in self.image_latents_params: if image_param in inputs.keys(): snake_case__ : List[str] = vae.encode(inputs[image_param] ).latent_dist.mode() snake_case__ : Dict = pipe(**__UpperCamelCase )[0] snake_case__ : str = np.abs(out - out_latents_inputs ).max() self.assertLess(__UpperCamelCase , 1E-4 , 'passing latents as image input generate different result from passing image' ) @slow @require_torch_gpu class __snake_case ( unittest.TestCase ): def __a ( self ) -> List[str]: '''simple docstring''' super().tearDown() gc.collect() torch.cuda.empty_cache() def __a ( self , __UpperCamelCase=0 ) -> Dict: '''simple docstring''' snake_case__ : Optional[Any] = torch.manual_seed(__UpperCamelCase ) snake_case__ : List[str] = load_image( 'https://huggingface.co/datasets/diffusers/test-arrays/resolve/main/stable_diffusion_pix2pix/example.jpg' ) snake_case__ : int = { 'prompt': 'turn him into a cyborg', 'image': image, 'generator': generator, 'num_inference_steps': 3, 'guidance_scale': 7.5, 'image_guidance_scale': 1.0, 'output_type': 'numpy', } return inputs def __a ( self ) -> Dict: '''simple docstring''' snake_case__ : Any = StableDiffusionInstructPixaPixPipeline.from_pretrained( 'timbrooks/instruct-pix2pix' , safety_checker=__UpperCamelCase ) pipe.to(__UpperCamelCase ) pipe.set_progress_bar_config(disable=__UpperCamelCase ) pipe.enable_attention_slicing() snake_case__ : Tuple = self.get_inputs() snake_case__ : List[Any] = pipe(**__UpperCamelCase ).images snake_case__ : Union[str, Any] = image[0, -3:, -3:, -1].flatten() assert image.shape == (1, 512, 512, 3) snake_case__ : Dict = np.array([0.5_9_0_2, 0.6_0_1_5, 0.6_0_2_7, 0.5_9_8_3, 0.6_0_9_2, 0.6_0_6_1, 0.5_7_6_5, 0.5_7_8_5, 0.5_5_5_5] ) assert np.abs(expected_slice - image_slice ).max() < 1E-3 def __a ( self ) -> str: '''simple docstring''' snake_case__ : Any = StableDiffusionInstructPixaPixPipeline.from_pretrained( 'timbrooks/instruct-pix2pix' , safety_checker=__UpperCamelCase ) snake_case__ : Tuple = LMSDiscreteScheduler.from_config(pipe.scheduler.config ) pipe.to(__UpperCamelCase ) pipe.set_progress_bar_config(disable=__UpperCamelCase ) pipe.enable_attention_slicing() snake_case__ : Dict = self.get_inputs() snake_case__ : Dict = pipe(**__UpperCamelCase ).images snake_case__ : Union[str, Any] = image[0, -3:, -3:, -1].flatten() assert image.shape == (1, 512, 512, 3) snake_case__ : List[Any] = np.array([0.6_5_7_8, 0.6_8_1_7, 0.6_9_7_2, 0.6_7_6_1, 0.6_8_5_6, 0.6_9_1_6, 0.6_4_2_8, 0.6_5_1_6, 0.6_3_0_1] ) assert np.abs(expected_slice - image_slice ).max() < 1E-3 def __a ( self ) -> Optional[Any]: '''simple docstring''' snake_case__ : Tuple = StableDiffusionInstructPixaPixPipeline.from_pretrained( 'timbrooks/instruct-pix2pix' , safety_checker=__UpperCamelCase ) snake_case__ : Tuple = DDIMScheduler.from_config(pipe.scheduler.config ) pipe.to(__UpperCamelCase ) pipe.set_progress_bar_config(disable=__UpperCamelCase ) pipe.enable_attention_slicing() snake_case__ : Optional[int] = self.get_inputs() snake_case__ : Optional[int] = pipe(**__UpperCamelCase ).images snake_case__ : Tuple = image[0, -3:, -3:, -1].flatten() assert image.shape == (1, 512, 512, 3) snake_case__ : int = np.array([0.3_8_2_8, 0.3_8_3_4, 0.3_8_1_8, 0.3_7_9_2, 0.3_8_6_5, 0.3_7_5_2, 0.3_7_9_2, 0.3_8_4_7, 0.3_7_5_3] ) assert np.abs(expected_slice - image_slice ).max() < 1E-3 def __a ( self ) -> Union[str, Any]: '''simple docstring''' snake_case__ : int = 0 def callback_fn(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) -> None: snake_case__ : List[Any] = True nonlocal number_of_steps number_of_steps += 1 if step == 1: snake_case__ : Any = latents.detach().cpu().numpy() assert latents.shape == (1, 4, 64, 64) snake_case__ : int = latents[0, -3:, -3:, -1] snake_case__ : List[str] = np.array([-0.2_4_6_3, -0.4_6_4_4, -0.9_7_5_6, 1.5_1_7_6, 1.4_4_1_4, 0.7_8_6_6, 0.9_8_9_7, 0.8_5_2_1, 0.7_9_8_3] ) assert np.abs(latents_slice.flatten() - expected_slice ).max() < 5E-2 elif step == 2: snake_case__ : Dict = latents.detach().cpu().numpy() assert latents.shape == (1, 4, 64, 64) snake_case__ : Dict = latents[0, -3:, -3:, -1] snake_case__ : Optional[Any] = np.array([-0.2_6_4_4, -0.4_6_2_6, -0.9_6_5_3, 1.5_1_7_6, 1.4_5_5_1, 0.7_6_8_6, 0.9_8_0_5, 0.8_4_5_2, 0.8_1_1_5] ) assert np.abs(latents_slice.flatten() - expected_slice ).max() < 5E-2 snake_case__ : str = False snake_case__ : List[Any] = StableDiffusionInstructPixaPixPipeline.from_pretrained( 'timbrooks/instruct-pix2pix' , safety_checker=__UpperCamelCase , torch_dtype=torch.floataa ) snake_case__ : int = pipe.to(__UpperCamelCase ) pipe.set_progress_bar_config(disable=__UpperCamelCase ) pipe.enable_attention_slicing() snake_case__ : int = self.get_inputs() pipe(**__UpperCamelCase , callback=__UpperCamelCase , callback_steps=1 ) assert callback_fn.has_been_called assert number_of_steps == 3 def __a ( self ) -> Any: '''simple docstring''' torch.cuda.empty_cache() torch.cuda.reset_max_memory_allocated() torch.cuda.reset_peak_memory_stats() snake_case__ : str = StableDiffusionInstructPixaPixPipeline.from_pretrained( 'timbrooks/instruct-pix2pix' , safety_checker=__UpperCamelCase , torch_dtype=torch.floataa ) snake_case__ : Dict = pipe.to(__UpperCamelCase ) pipe.set_progress_bar_config(disable=__UpperCamelCase ) pipe.enable_attention_slicing(1 ) pipe.enable_sequential_cpu_offload() snake_case__ : str = self.get_inputs() snake_case__ : Tuple = pipe(**__UpperCamelCase ) snake_case__ : List[Any] = torch.cuda.max_memory_allocated() # make sure that less than 2.2 GB is allocated assert mem_bytes < 2.2 * 10**9 def __a ( self ) -> int: '''simple docstring''' snake_case__ : int = self.get_inputs() # resize to resolution that is divisible by 8 but not 16 or 32 snake_case__ : Tuple = inputs['image'].resize((504, 504) ) snake_case__ : str = 'timbrooks/instruct-pix2pix' snake_case__ : Tuple = StableDiffusionInstructPixaPixPipeline.from_pretrained( __UpperCamelCase , safety_checker=__UpperCamelCase , ) pipe.to(__UpperCamelCase ) pipe.set_progress_bar_config(disable=__UpperCamelCase ) pipe.enable_attention_slicing() snake_case__ : str = pipe(**__UpperCamelCase ) snake_case__ : List[Any] = output.images[0] snake_case__ : List[Any] = image[255:258, 383:386, -1] assert image.shape == (504, 504, 3) snake_case__ : List[str] = np.array([0.2_7_2_6, 0.2_5_2_9, 0.2_6_6_4, 0.2_6_5_5, 0.2_6_4_1, 0.2_6_4_2, 0.2_5_9_1, 0.2_6_4_9, 0.2_5_9_0] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 5E-3
699
0