code
stringlengths
81
54k
code_codestyle
int64
0
721
style_context
stringlengths
91
41.9k
style_context_codestyle
int64
0
699
label
int64
0
1
'''simple docstring''' from math import cos, sin, sqrt, tau from audio_filters.iir_filter import IIRFilter def _lowercase ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = 1 / sqrt(2 ) ) -> IIRFilter: """simple docstring""" __UpperCAmelCase : Any = tau * frequency / samplerate __UpperCAmelCase : Optional[Any] = sin(lowerCamelCase__ ) __UpperCAmelCase : int = cos(lowerCamelCase__ ) __UpperCAmelCase : List[str] = _sin / (2 * q_factor) __UpperCAmelCase : str = (1 - _cos) / 2 __UpperCAmelCase : Dict = 1 - _cos __UpperCAmelCase : Optional[int] = 1 + alpha __UpperCAmelCase : Optional[Any] = -2 * _cos __UpperCAmelCase : List[str] = 1 - alpha __UpperCAmelCase : Any = IIRFilter(2 ) filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] ) return filt def _lowercase ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = 1 / sqrt(2 ) ) -> IIRFilter: """simple docstring""" __UpperCAmelCase : List[str] = tau * frequency / samplerate __UpperCAmelCase : List[str] = sin(lowerCamelCase__ ) __UpperCAmelCase : str = cos(lowerCamelCase__ ) __UpperCAmelCase : Any = _sin / (2 * q_factor) __UpperCAmelCase : str = (1 + _cos) / 2 __UpperCAmelCase : Tuple = -1 - _cos __UpperCAmelCase : int = 1 + alpha __UpperCAmelCase : Tuple = -2 * _cos __UpperCAmelCase : int = 1 - alpha __UpperCAmelCase : str = IIRFilter(2 ) filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] ) return filt def _lowercase ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = 1 / sqrt(2 ) ) -> IIRFilter: """simple docstring""" __UpperCAmelCase : Optional[Any] = tau * frequency / samplerate __UpperCAmelCase : List[Any] = sin(lowerCamelCase__ ) __UpperCAmelCase : str = cos(lowerCamelCase__ ) __UpperCAmelCase : Union[str, Any] = _sin / (2 * q_factor) __UpperCAmelCase : Any = _sin / 2 __UpperCAmelCase : Dict = 0 __UpperCAmelCase : Tuple = -ba __UpperCAmelCase : List[str] = 1 + alpha __UpperCAmelCase : List[Any] = -2 * _cos __UpperCAmelCase : Optional[int] = 1 - alpha __UpperCAmelCase : List[Any] = IIRFilter(2 ) filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] ) return filt def _lowercase ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = 1 / sqrt(2 ) ) -> IIRFilter: """simple docstring""" __UpperCAmelCase : List[Any] = tau * frequency / samplerate __UpperCAmelCase : Any = sin(lowerCamelCase__ ) __UpperCAmelCase : List[Any] = cos(lowerCamelCase__ ) __UpperCAmelCase : Dict = _sin / (2 * q_factor) __UpperCAmelCase : Optional[int] = 1 - alpha __UpperCAmelCase : Union[str, Any] = -2 * _cos __UpperCAmelCase : Any = 1 + alpha __UpperCAmelCase : Optional[int] = IIRFilter(2 ) filt.set_coefficients([ba, ba, ba] , [ba, ba, ba] ) return filt def _lowercase ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = 1 / sqrt(2 ) , ) -> IIRFilter: """simple docstring""" __UpperCAmelCase : Union[str, Any] = tau * frequency / samplerate __UpperCAmelCase : str = sin(lowerCamelCase__ ) __UpperCAmelCase : Tuple = cos(lowerCamelCase__ ) __UpperCAmelCase : Optional[int] = _sin / (2 * q_factor) __UpperCAmelCase : Tuple = 10 ** (gain_db / 40) __UpperCAmelCase : Union[str, Any] = 1 + alpha * big_a __UpperCAmelCase : str = -2 * _cos __UpperCAmelCase : Any = 1 - alpha * big_a __UpperCAmelCase : Optional[int] = 1 + alpha / big_a __UpperCAmelCase : Union[str, Any] = -2 * _cos __UpperCAmelCase : List[str] = 1 - alpha / big_a __UpperCAmelCase : int = IIRFilter(2 ) filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] ) return filt def _lowercase ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = 1 / sqrt(2 ) , ) -> IIRFilter: """simple docstring""" __UpperCAmelCase : Dict = tau * frequency / samplerate __UpperCAmelCase : List[Any] = sin(lowerCamelCase__ ) __UpperCAmelCase : Optional[int] = cos(lowerCamelCase__ ) __UpperCAmelCase : Dict = _sin / (2 * q_factor) __UpperCAmelCase : Optional[Any] = 10 ** (gain_db / 40) __UpperCAmelCase : str = (big_a + 1) - (big_a - 1) * _cos __UpperCAmelCase : Tuple = (big_a + 1) + (big_a - 1) * _cos __UpperCAmelCase : List[str] = (big_a - 1) - (big_a + 1) * _cos __UpperCAmelCase : Optional[Any] = (big_a - 1) + (big_a + 1) * _cos __UpperCAmelCase : Any = 2 * sqrt(lowerCamelCase__ ) * alpha __UpperCAmelCase : Optional[int] = big_a * (pmc + aaa) __UpperCAmelCase : Tuple = 2 * big_a * mpc __UpperCAmelCase : Union[str, Any] = big_a * (pmc - aaa) __UpperCAmelCase : Optional[int] = ppmc + aaa __UpperCAmelCase : Dict = -2 * pmpc __UpperCAmelCase : Optional[Any] = ppmc - aaa __UpperCAmelCase : Union[str, Any] = IIRFilter(2 ) filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] ) return filt def _lowercase ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = 1 / sqrt(2 ) , ) -> IIRFilter: """simple docstring""" __UpperCAmelCase : Union[str, Any] = tau * frequency / samplerate __UpperCAmelCase : Any = sin(lowerCamelCase__ ) __UpperCAmelCase : Optional[int] = cos(lowerCamelCase__ ) __UpperCAmelCase : Any = _sin / (2 * q_factor) __UpperCAmelCase : List[Any] = 10 ** (gain_db / 40) __UpperCAmelCase : str = (big_a + 1) - (big_a - 1) * _cos __UpperCAmelCase : Tuple = (big_a + 1) + (big_a - 1) * _cos __UpperCAmelCase : int = (big_a - 1) - (big_a + 1) * _cos __UpperCAmelCase : Tuple = (big_a - 1) + (big_a + 1) * _cos __UpperCAmelCase : Optional[int] = 2 * sqrt(lowerCamelCase__ ) * alpha __UpperCAmelCase : int = big_a * (ppmc + aaa) __UpperCAmelCase : Any = -2 * big_a * pmpc __UpperCAmelCase : Optional[Any] = big_a * (ppmc - aaa) __UpperCAmelCase : Tuple = pmc + aaa __UpperCAmelCase : Union[str, Any] = 2 * mpc __UpperCAmelCase : Optional[int] = pmc - aaa __UpperCAmelCase : Optional[int] = IIRFilter(2 ) filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] ) return filt
10
'''simple docstring''' from __future__ import annotations import numpy as np from numpy import floataa from numpy.typing import NDArray def _lowercase ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , ) -> list[float]: """simple docstring""" __UpperCAmelCase , __UpperCAmelCase : Union[str, Any] = coefficient_matrix.shape __UpperCAmelCase , __UpperCAmelCase : Any = constant_matrix.shape if rowsa != colsa: __UpperCAmelCase : str = f"""Coefficient matrix dimensions must be nxn but received {rowsa}x{colsa}""" raise ValueError(lowerCamelCase__ ) if colsa != 1: __UpperCAmelCase : Optional[Any] = f"""Constant matrix must be nx1 but received {rowsa}x{colsa}""" raise ValueError(lowerCamelCase__ ) if rowsa != rowsa: __UpperCAmelCase : Optional[int] = ( "Coefficient and constant matrices dimensions must be nxn and nx1 but " f"""received {rowsa}x{colsa} and {rowsa}x{colsa}""" ) raise ValueError(lowerCamelCase__ ) if len(lowerCamelCase__ ) != rowsa: __UpperCAmelCase : List[str] = ( "Number of initial values must be equal to number of rows in coefficient " f"""matrix but received {len(lowerCamelCase__ )} and {rowsa}""" ) raise ValueError(lowerCamelCase__ ) if iterations <= 0: raise ValueError("Iterations must be at least 1" ) __UpperCAmelCase : NDArray[floataa] = np.concatenate( (coefficient_matrix, constant_matrix) , axis=1 ) __UpperCAmelCase , __UpperCAmelCase : Tuple = table.shape strictly_diagonally_dominant(lowerCamelCase__ ) # Iterates the whole matrix for given number of times for _ in range(lowerCamelCase__ ): __UpperCAmelCase : int = [] for row in range(lowerCamelCase__ ): __UpperCAmelCase : List[str] = 0 for col in range(lowerCamelCase__ ): if col == row: __UpperCAmelCase : int = table[row][col] elif col == cols - 1: __UpperCAmelCase : Any = table[row][col] else: temp += (-1) * table[row][col] * init_val[col] __UpperCAmelCase : List[Any] = (temp + val) / denom new_val.append(lowerCamelCase__ ) __UpperCAmelCase : str = new_val return [float(lowerCamelCase__ ) for i in new_val] def _lowercase ( lowerCamelCase__ ) -> bool: """simple docstring""" __UpperCAmelCase , __UpperCAmelCase : Optional[int] = table.shape __UpperCAmelCase : str = True for i in range(0 , lowerCamelCase__ ): __UpperCAmelCase : Union[str, Any] = 0 for j in range(0 , cols - 1 ): if i == j: continue else: total += table[i][j] if table[i][i] <= total: raise ValueError("Coefficient matrix is not strictly diagonally dominant" ) return is_diagonally_dominant # Test Cases if __name__ == "__main__": import doctest doctest.testmod()
10
1
'''simple docstring''' from __future__ import annotations _a : Optional[Any] = 8.9_88e9 # units = N * m^s * C^-2 def _lowercase ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> dict[str, float]: """simple docstring""" __UpperCAmelCase : Optional[int] = abs(chargea * chargea ) if (force, chargea, chargea, distance).count(0 ) != 1: raise ValueError("One and only one argument must be 0" ) if distance < 0: raise ValueError("Distance cannot be negative" ) if force == 0: __UpperCAmelCase : Any = COULOMBS_CONSTANT * charge_product / (distance**2) return {"force": force} elif chargea == 0: __UpperCAmelCase : List[str] = abs(lowerCamelCase__ ) * (distance**2) / (COULOMBS_CONSTANT * chargea) return {"charge1": chargea} elif chargea == 0: __UpperCAmelCase : str = abs(lowerCamelCase__ ) * (distance**2) / (COULOMBS_CONSTANT * chargea) return {"charge2": chargea} elif distance == 0: __UpperCAmelCase : Union[str, Any] = (COULOMBS_CONSTANT * charge_product / abs(lowerCamelCase__ )) ** 0.5 return {"distance": distance} raise ValueError("Exactly one argument must be 0" ) if __name__ == "__main__": import doctest doctest.testmod()
10
'''simple docstring''' from maths.is_square_free import is_square_free from maths.prime_factors import prime_factors def _lowercase ( lowerCamelCase__ ) -> int: """simple docstring""" __UpperCAmelCase : Any = prime_factors(lowerCamelCase__ ) if is_square_free(lowerCamelCase__ ): return -1 if len(lowerCamelCase__ ) % 2 else 1 return 0 if __name__ == "__main__": import doctest doctest.testmod()
10
1
'''simple docstring''' from ...processing_utils import ProcessorMixin from ...tokenization_utils_base import BatchEncoding class __A (__magic_name__ ): snake_case :Optional[int] = ["image_processor", "tokenizer"] snake_case :str = "AutoImageProcessor" snake_case :Optional[Any] = "AutoTokenizer" def __init__( self , UpperCamelCase_ , UpperCamelCase_ ): super().__init__(UpperCamelCase_ , UpperCamelCase_ ) __UpperCAmelCase : int = self.image_processor def __call__( self , UpperCamelCase_=None , UpperCamelCase_=None , UpperCamelCase_=None , **UpperCamelCase_ ): if text is None and images is None: raise ValueError("You have to specify either text or images. Both cannot be none." ) if text is not None: __UpperCAmelCase : str = self.tokenizer(UpperCamelCase_ , return_tensors=UpperCamelCase_ , **UpperCamelCase_ ) if images is not None: __UpperCAmelCase : int = self.image_processor(UpperCamelCase_ , return_tensors=UpperCamelCase_ , **UpperCamelCase_ ) if text is not None and images is not None: __UpperCAmelCase : Optional[int] = image_features.pixel_values return encoding elif text is not None: return encoding else: return BatchEncoding(data=dict(**UpperCamelCase_ ) , tensor_type=UpperCamelCase_ ) def _snake_case ( self , *UpperCamelCase_ , **UpperCamelCase_ ): return self.tokenizer.batch_decode(*UpperCamelCase_ , **UpperCamelCase_ ) def _snake_case ( self , *UpperCamelCase_ , **UpperCamelCase_ ): return self.tokenizer.decode(*UpperCamelCase_ , **UpperCamelCase_ ) @property def _snake_case ( self ): return ["input_ids", "attention_mask", "pixel_values"]
10
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available, is_tokenizers_available, is_torch_available, ) _a : Dict = {"configuration_reformer": ["REFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP", "ReformerConfig"]} try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _a : Dict = ["ReformerTokenizer"] try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _a : List[Any] = ["ReformerTokenizerFast"] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _a : int = [ "REFORMER_PRETRAINED_MODEL_ARCHIVE_LIST", "ReformerAttention", "ReformerForMaskedLM", "ReformerForQuestionAnswering", "ReformerForSequenceClassification", "ReformerLayer", "ReformerModel", "ReformerModelWithLMHead", "ReformerPreTrainedModel", ] if TYPE_CHECKING: from .configuration_reformer import REFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, ReformerConfig try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_reformer import ReformerTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_reformer_fast import ReformerTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_reformer import ( REFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, ReformerAttention, ReformerForMaskedLM, ReformerForQuestionAnswering, ReformerForSequenceClassification, ReformerLayer, ReformerModel, ReformerModelWithLMHead, ReformerPreTrainedModel, ) else: import sys _a : Optional[int] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
10
1
'''simple docstring''' import argparse import pytorch_lightning as pl import torch from torch import nn from transformers import LongformerForQuestionAnswering, LongformerModel class __A (pl.LightningModule ): def __init__( self , UpperCamelCase_ ): super().__init__() __UpperCAmelCase : Dict = model __UpperCAmelCase : str = 2 __UpperCAmelCase : Any = nn.Linear(self.model.config.hidden_size , self.num_labels ) def _snake_case ( self ): pass def _lowercase ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> Optional[Any]: """simple docstring""" __UpperCAmelCase : List[Any] = LongformerModel.from_pretrained(lowerCamelCase__ ) __UpperCAmelCase : Dict = LightningModel(lowerCamelCase__ ) __UpperCAmelCase : str = torch.load(lowerCamelCase__ , map_location=torch.device("cpu" ) ) lightning_model.load_state_dict(ckpt["state_dict"] ) # init longformer question answering model __UpperCAmelCase : List[Any] = LongformerForQuestionAnswering.from_pretrained(lowerCamelCase__ ) # transfer weights longformer_for_qa.longformer.load_state_dict(lightning_model.model.state_dict() ) longformer_for_qa.qa_outputs.load_state_dict(lightning_model.qa_outputs.state_dict() ) longformer_for_qa.eval() # save model longformer_for_qa.save_pretrained(lowerCamelCase__ ) print(f"""Conversion successful. Model saved under {pytorch_dump_folder_path}""" ) if __name__ == "__main__": _a : Union[str, Any] = argparse.ArgumentParser() # Required parameters parser.add_argument( "--longformer_model", default=None, type=str, required=True, help="model identifier of longformer. Should be either `longformer-base-4096` or `longformer-large-4096`.", ) parser.add_argument( "--longformer_question_answering_ckpt_path", default=None, type=str, required=True, help="Path the official PyTorch Lightning Checkpoint.", ) parser.add_argument( "--pytorch_dump_folder_path", default=None, type=str, required=True, help="Path to the output PyTorch model." ) _a : int = parser.parse_args() convert_longformer_qa_checkpoint_to_pytorch( args.longformer_model, args.longformer_question_answering_ckpt_path, args.pytorch_dump_folder_path )
10
'''simple docstring''' from collections import OrderedDict from typing import Mapping from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging _a : List[str] = logging.get_logger(__name__) _a : Any = { "kssteven/ibert-roberta-base": "https://huggingface.co/kssteven/ibert-roberta-base/resolve/main/config.json", "kssteven/ibert-roberta-large": "https://huggingface.co/kssteven/ibert-roberta-large/resolve/main/config.json", "kssteven/ibert-roberta-large-mnli": ( "https://huggingface.co/kssteven/ibert-roberta-large-mnli/resolve/main/config.json" ), } class __A (__magic_name__ ): snake_case :Union[str, Any] = "ibert" def __init__( self , UpperCamelCase_=3_05_22 , UpperCamelCase_=7_68 , UpperCamelCase_=12 , UpperCamelCase_=12 , UpperCamelCase_=30_72 , UpperCamelCase_="gelu" , UpperCamelCase_=0.1 , UpperCamelCase_=0.1 , UpperCamelCase_=5_12 , UpperCamelCase_=2 , UpperCamelCase_=0.0_2 , UpperCamelCase_=1E-12 , UpperCamelCase_=1 , UpperCamelCase_=0 , UpperCamelCase_=2 , UpperCamelCase_="absolute" , UpperCamelCase_=False , UpperCamelCase_="none" , **UpperCamelCase_ , ): super().__init__(pad_token_id=UpperCamelCase_ , bos_token_id=UpperCamelCase_ , eos_token_id=UpperCamelCase_ , **UpperCamelCase_ ) __UpperCAmelCase : List[Any] = vocab_size __UpperCAmelCase : Optional[Any] = hidden_size __UpperCAmelCase : List[Any] = num_hidden_layers __UpperCAmelCase : Any = num_attention_heads __UpperCAmelCase : List[str] = hidden_act __UpperCAmelCase : List[str] = intermediate_size __UpperCAmelCase : Optional[int] = hidden_dropout_prob __UpperCAmelCase : Union[str, Any] = attention_probs_dropout_prob __UpperCAmelCase : str = max_position_embeddings __UpperCAmelCase : List[str] = type_vocab_size __UpperCAmelCase : Dict = initializer_range __UpperCAmelCase : Optional[int] = layer_norm_eps __UpperCAmelCase : Any = position_embedding_type __UpperCAmelCase : Tuple = quant_mode __UpperCAmelCase : Union[str, Any] = force_dequant class __A (__magic_name__ ): @property def _snake_case ( self ): if self.task == "multiple-choice": __UpperCAmelCase : Optional[int] = {0: "batch", 1: "choice", 2: "sequence"} else: __UpperCAmelCase : Optional[int] = {0: "batch", 1: "sequence"} return OrderedDict( [ ("input_ids", dynamic_axis), ("attention_mask", dynamic_axis), ] )
10
1
'''simple docstring''' import copy from typing import TYPE_CHECKING, Any, Mapping, Optional, OrderedDict from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging from ..auto.configuration_auto import AutoConfig if TYPE_CHECKING: from ... import PreTrainedTokenizerBase, TensorType _a : Optional[int] = logging.get_logger(__name__) class __A (__magic_name__ ): snake_case :Any = "vision-encoder-decoder" snake_case :List[str] = True def __init__( self , **UpperCamelCase_ ): super().__init__(**UpperCamelCase_ ) if "encoder" not in kwargs or "decoder" not in kwargs: raise ValueError( f"""A configuraton of type {self.model_type} cannot be instantiated because """ f"""not both `encoder` and `decoder` sub-configurations are passed, but only {kwargs}""" ) __UpperCAmelCase : List[Any] = kwargs.pop("encoder" ) __UpperCAmelCase : int = encoder_config.pop("model_type" ) __UpperCAmelCase : str = kwargs.pop("decoder" ) __UpperCAmelCase : Tuple = decoder_config.pop("model_type" ) __UpperCAmelCase : Optional[int] = AutoConfig.for_model(UpperCamelCase_ , **UpperCamelCase_ ) __UpperCAmelCase : List[str] = AutoConfig.for_model(UpperCamelCase_ , **UpperCamelCase_ ) __UpperCAmelCase : List[Any] = True @classmethod def _snake_case ( cls , UpperCamelCase_ , UpperCamelCase_ , **UpperCamelCase_ ): logger.info("Setting `config.is_decoder=True` and `config.add_cross_attention=True` for decoder_config" ) __UpperCAmelCase : Optional[int] = True __UpperCAmelCase : Dict = True return cls(encoder=encoder_config.to_dict() , decoder=decoder_config.to_dict() , **UpperCamelCase_ ) def _snake_case ( self ): __UpperCAmelCase : List[Any] = copy.deepcopy(self.__dict__ ) __UpperCAmelCase : Optional[int] = self.encoder.to_dict() __UpperCAmelCase : str = self.decoder.to_dict() __UpperCAmelCase : int = self.__class__.model_type return output class __A (__magic_name__ ): snake_case :str = version.parse("1.11" ) @property def _snake_case ( self ): return OrderedDict( [ ("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}), ] ) @property def _snake_case ( self ): return 1E-4 @property def _snake_case ( self ): return OrderedDict({"last_hidden_state": {0: "batch", 1: "encoder_sequence"}} ) class __A (__magic_name__ ): @property def _snake_case ( self ): __UpperCAmelCase : int = OrderedDict() __UpperCAmelCase : Tuple = {0: "batch", 1: "past_decoder_sequence + sequence"} __UpperCAmelCase : Optional[Any] = {0: "batch", 1: "past_decoder_sequence + sequence"} __UpperCAmelCase : Dict = {0: "batch", 1: "encoder_sequence"} return common_inputs def _snake_case ( self , UpperCamelCase_ , UpperCamelCase_ = -1 , UpperCamelCase_ = -1 , UpperCamelCase_ = False , UpperCamelCase_ = None , ): import torch __UpperCAmelCase : Optional[int] = OrderedDict() __UpperCAmelCase : Any = super().generate_dummy_inputs( UpperCamelCase_ , batch_size=UpperCamelCase_ , seq_length=UpperCamelCase_ , is_pair=UpperCamelCase_ , framework=UpperCamelCase_ ) __UpperCAmelCase , __UpperCAmelCase : List[str] = dummy_input["input_ids"].shape __UpperCAmelCase : int = (batch, encoder_sequence, self._config.encoder_hidden_size) __UpperCAmelCase : int = dummy_input.pop("input_ids" ) __UpperCAmelCase : Optional[int] = dummy_input.pop("attention_mask" ) __UpperCAmelCase : Tuple = torch.zeros(UpperCamelCase_ ) return common_inputs class __A (__magic_name__ ): @property def _snake_case ( self ): pass def _snake_case ( self , UpperCamelCase_ ): return VisionEncoderDecoderEncoderOnnxConfig(UpperCamelCase_ ) def _snake_case ( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ = "default" ): __UpperCAmelCase : str = encoder_config.hidden_size return VisionEncoderDecoderDecoderOnnxConfig(UpperCamelCase_ , UpperCamelCase_ )
10
'''simple docstring''' from transformers import HfArgumentParser, TensorFlowBenchmark, TensorFlowBenchmarkArguments def _lowercase ( ) -> Dict: """simple docstring""" __UpperCAmelCase : str = HfArgumentParser(lowerCamelCase__ ) __UpperCAmelCase : Optional[Any] = parser.parse_args_into_dataclasses()[0] __UpperCAmelCase : Any = TensorFlowBenchmark(args=lowerCamelCase__ ) try: __UpperCAmelCase : List[Any] = parser.parse_args_into_dataclasses()[0] except ValueError as e: __UpperCAmelCase : str = "Arg --no_{0} is no longer used, please use --no-{0} instead." __UpperCAmelCase : Tuple = " ".join(str(lowerCamelCase__ ).split(" " )[:-1] ) __UpperCAmelCase : Any = "" __UpperCAmelCase : List[Any] = eval(str(lowerCamelCase__ ).split(" " )[-1] ) __UpperCAmelCase : Optional[int] = [] for arg in depreciated_args: # arg[2:] removes '--' if arg[2:] in TensorFlowBenchmark.deprecated_args: # arg[5:] removes '--no_' full_error_msg += arg_error_msg.format(arg[5:] ) else: wrong_args.append(lowerCamelCase__ ) if len(lowerCamelCase__ ) > 0: __UpperCAmelCase : Union[str, Any] = full_error_msg + begin_error_msg + str(lowerCamelCase__ ) raise ValueError(lowerCamelCase__ ) benchmark.run() if __name__ == "__main__": main()
10
1
'''simple docstring''' def _lowercase ( lowerCamelCase__ ) -> int: """simple docstring""" if not isinstance(lowerCamelCase__ , lowerCamelCase__ ): __UpperCAmelCase : Optional[Any] = f"""Input value of [number={number}] must be an integer""" raise TypeError(lowerCamelCase__ ) if number < 1: __UpperCAmelCase : str = f"""Input value of [number={number}] must be > 0""" raise ValueError(lowerCamelCase__ ) __UpperCAmelCase : Optional[Any] = 1 for i in range(1 , lowerCamelCase__ ): current_number *= 4 * i - 2 current_number //= i + 1 return current_number if __name__ == "__main__": import doctest doctest.testmod()
10
'''simple docstring''' import gc import random import unittest import numpy as np import torch from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer import diffusers from diffusers import ( AutoencoderKL, EulerDiscreteScheduler, StableDiffusionLatentUpscalePipeline, StableDiffusionPipeline, UNetaDConditionModel, ) from diffusers.schedulers import KarrasDiffusionSchedulers from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu from ..pipeline_params import TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin enable_full_determinism() def _lowercase ( lowerCamelCase__ ) -> Union[str, Any]: """simple docstring""" __UpperCAmelCase : Dict = [tensor.shape for tensor in tensor_list] return all(shape == shapes[0] for shape in shapes[1:] ) class __A (__magic_name__ , __magic_name__ , __magic_name__ , unittest.TestCase ): snake_case :Union[str, Any] = StableDiffusionLatentUpscalePipeline snake_case :Optional[int] = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - { "height", "width", "cross_attention_kwargs", "negative_prompt_embeds", "prompt_embeds", } snake_case :List[str] = PipelineTesterMixin.required_optional_params - {"num_images_per_prompt"} snake_case :Optional[Any] = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS snake_case :Optional[Any] = frozenset( [] ) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess snake_case :Any = frozenset([] ) snake_case :Optional[int] = True @property def _snake_case ( self ): __UpperCAmelCase : Optional[int] = 1 __UpperCAmelCase : Dict = 4 __UpperCAmelCase : List[str] = (16, 16) __UpperCAmelCase : Dict = floats_tensor((batch_size, num_channels) + sizes , rng=random.Random(0 ) ).to(UpperCamelCase_ ) return image def _snake_case ( self ): torch.manual_seed(0 ) __UpperCAmelCase : List[str] = UNetaDConditionModel( act_fn="gelu" , attention_head_dim=8 , norm_num_groups=UpperCamelCase_ , block_out_channels=[32, 32, 64, 64] , time_cond_proj_dim=1_60 , conv_in_kernel=1 , conv_out_kernel=1 , cross_attention_dim=32 , down_block_types=( "KDownBlock2D", "KCrossAttnDownBlock2D", "KCrossAttnDownBlock2D", "KCrossAttnDownBlock2D", ) , in_channels=8 , mid_block_type=UpperCamelCase_ , only_cross_attention=UpperCamelCase_ , out_channels=5 , resnet_time_scale_shift="scale_shift" , time_embedding_type="fourier" , timestep_post_act="gelu" , up_block_types=("KCrossAttnUpBlock2D", "KCrossAttnUpBlock2D", "KCrossAttnUpBlock2D", "KUpBlock2D") , ) __UpperCAmelCase : int = AutoencoderKL( block_out_channels=[32, 32, 64, 64] , in_channels=3 , out_channels=3 , down_block_types=[ "DownEncoderBlock2D", "DownEncoderBlock2D", "DownEncoderBlock2D", "DownEncoderBlock2D", ] , up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D"] , latent_channels=4 , ) __UpperCAmelCase : Optional[int] = EulerDiscreteScheduler(prediction_type="sample" ) __UpperCAmelCase : int = CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , hidden_act="quick_gelu" , projection_dim=5_12 , ) __UpperCAmelCase : List[str] = CLIPTextModel(UpperCamelCase_ ) __UpperCAmelCase : Tuple = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" ) __UpperCAmelCase : Union[str, Any] = { "unet": model.eval(), "vae": vae.eval(), "scheduler": scheduler, "text_encoder": text_encoder, "tokenizer": tokenizer, } return components def _snake_case ( self , UpperCamelCase_ , UpperCamelCase_=0 ): if str(UpperCamelCase_ ).startswith("mps" ): __UpperCAmelCase : str = torch.manual_seed(UpperCamelCase_ ) else: __UpperCAmelCase : Optional[int] = torch.Generator(device=UpperCamelCase_ ).manual_seed(UpperCamelCase_ ) __UpperCAmelCase : Any = { "prompt": "A painting of a squirrel eating a burger", "image": self.dummy_image.cpu(), "generator": generator, "num_inference_steps": 2, "output_type": "numpy", } return inputs def _snake_case ( self ): __UpperCAmelCase : List[str] = "cpu" __UpperCAmelCase : List[str] = self.get_dummy_components() __UpperCAmelCase : Tuple = self.pipeline_class(**UpperCamelCase_ ) pipe.to(UpperCamelCase_ ) pipe.set_progress_bar_config(disable=UpperCamelCase_ ) __UpperCAmelCase : Any = self.get_dummy_inputs(UpperCamelCase_ ) __UpperCAmelCase : int = pipe(**UpperCamelCase_ ).images __UpperCAmelCase : Any = image[0, -3:, -3:, -1] self.assertEqual(image.shape , (1, 2_56, 2_56, 3) ) __UpperCAmelCase : Tuple = np.array( [0.4_7_2_2_2_4_1_2, 0.4_1_9_2_1_6_3_3, 0.4_4_7_1_7_4_3_4, 0.4_6_8_7_4_1_9_2, 0.4_2_5_8_8_2_5_8, 0.4_6_1_5_0_7_2_6, 0.4_6_7_7_5_3_4, 0.4_5_5_8_3_8_3_2, 0.4_8_5_7_9_0_5_5] ) __UpperCAmelCase : List[str] = np.abs(image_slice.flatten() - expected_slice ).max() self.assertLessEqual(UpperCamelCase_ , 1E-3 ) def _snake_case ( self ): super().test_attention_slicing_forward_pass(expected_max_diff=7E-3 ) def _snake_case ( self ): super().test_cpu_offload_forward_pass(expected_max_diff=3E-3 ) def _snake_case ( self ): super().test_dict_tuple_outputs_equivalent(expected_max_difference=3E-3 ) def _snake_case ( self ): super().test_inference_batch_single_identical(expected_max_diff=7E-3 ) def _snake_case ( self ): super().test_pt_np_pil_outputs_equivalent(expected_max_diff=3E-3 ) def _snake_case ( self ): super().test_save_load_local(expected_max_difference=3E-3 ) def _snake_case ( self ): super().test_save_load_optional_components(expected_max_difference=3E-3 ) def _snake_case ( self ): __UpperCAmelCase : Dict = [ "DDIMScheduler", "DDPMScheduler", "PNDMScheduler", "HeunDiscreteScheduler", "EulerAncestralDiscreteScheduler", "KDPM2DiscreteScheduler", "KDPM2AncestralDiscreteScheduler", "DPMSolverSDEScheduler", ] __UpperCAmelCase : Tuple = self.get_dummy_components() __UpperCAmelCase : Union[str, Any] = self.pipeline_class(**UpperCamelCase_ ) # make sure that PNDM does not need warm-up pipe.scheduler.register_to_config(skip_prk_steps=UpperCamelCase_ ) pipe.to(UpperCamelCase_ ) pipe.set_progress_bar_config(disable=UpperCamelCase_ ) __UpperCAmelCase : Tuple = self.get_dummy_inputs(UpperCamelCase_ ) __UpperCAmelCase : List[str] = 2 __UpperCAmelCase : List[str] = [] for scheduler_enum in KarrasDiffusionSchedulers: if scheduler_enum.name in skip_schedulers: # no sigma schedulers are not supported # no schedulers continue __UpperCAmelCase : Optional[int] = getattr(UpperCamelCase_ , scheduler_enum.name ) __UpperCAmelCase : List[str] = scheduler_cls.from_config(pipe.scheduler.config ) __UpperCAmelCase : Optional[int] = pipe(**UpperCamelCase_ )[0] outputs.append(UpperCamelCase_ ) assert check_same_shape(UpperCamelCase_ ) @require_torch_gpu @slow class __A (unittest.TestCase ): def _snake_case ( self ): super().tearDown() gc.collect() torch.cuda.empty_cache() def _snake_case ( self ): __UpperCAmelCase : Optional[int] = torch.manual_seed(33 ) __UpperCAmelCase : str = StableDiffusionPipeline.from_pretrained("CompVis/stable-diffusion-v1-4" , torch_dtype=torch.floataa ) pipe.to("cuda" ) __UpperCAmelCase : Union[str, Any] = StableDiffusionLatentUpscalePipeline.from_pretrained( "stabilityai/sd-x2-latent-upscaler" , torch_dtype=torch.floataa ) upscaler.to("cuda" ) __UpperCAmelCase : Optional[int] = "a photo of an astronaut high resolution, unreal engine, ultra realistic" __UpperCAmelCase : Any = pipe(UpperCamelCase_ , generator=UpperCamelCase_ , output_type="latent" ).images __UpperCAmelCase : int = upscaler( prompt=UpperCamelCase_ , image=UpperCamelCase_ , num_inference_steps=20 , guidance_scale=0 , generator=UpperCamelCase_ , output_type="np" , ).images[0] __UpperCAmelCase : Optional[Any] = load_numpy( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/latent-upscaler/astronaut_1024.npy" ) assert np.abs((expected_image - image).mean() ) < 5E-2 def _snake_case ( self ): __UpperCAmelCase : List[Any] = torch.manual_seed(33 ) __UpperCAmelCase : Union[str, Any] = StableDiffusionLatentUpscalePipeline.from_pretrained( "stabilityai/sd-x2-latent-upscaler" , torch_dtype=torch.floataa ) upscaler.to("cuda" ) __UpperCAmelCase : Optional[Any] = "the temple of fire by Ross Tran and Gerardo Dottori, oil on canvas" __UpperCAmelCase : str = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/latent-upscaler/fire_temple_512.png" ) __UpperCAmelCase : Dict = upscaler( prompt=UpperCamelCase_ , image=UpperCamelCase_ , num_inference_steps=20 , guidance_scale=0 , generator=UpperCamelCase_ , output_type="np" , ).images[0] __UpperCAmelCase : Tuple = load_numpy( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/latent-upscaler/fire_temple_1024.npy" ) assert np.abs((expected_image - image).max() ) < 5E-2
10
1
'''simple docstring''' import math def _lowercase ( lowerCamelCase__ = 100 ) -> int: """simple docstring""" __UpperCAmelCase : List[str] = sum(i * i for i in range(1 , n + 1 ) ) __UpperCAmelCase : Optional[int] = int(math.pow(sum(range(1 , n + 1 ) ) , 2 ) ) return square_of_sum - sum_of_squares if __name__ == "__main__": print(f"""{solution() = }""")
10
'''simple docstring''' # Lint as: python3 import sys from collections.abc import Mapping from typing import TYPE_CHECKING import numpy as np import pyarrow as pa from .. import config from ..utils.py_utils import map_nested from .formatting import TensorFormatter if TYPE_CHECKING: import torch class __A (TensorFormatter[Mapping, "torch.Tensor", Mapping] ): def __init__( self , UpperCamelCase_=None , **UpperCamelCase_ ): super().__init__(features=UpperCamelCase_ ) __UpperCAmelCase : Union[str, Any] = torch_tensor_kwargs import torch # noqa import torch at initialization def _snake_case ( self , UpperCamelCase_ ): import torch if isinstance(UpperCamelCase_ , UpperCamelCase_ ) and column: if all( isinstance(UpperCamelCase_ , torch.Tensor ) and x.shape == column[0].shape and x.dtype == column[0].dtype for x in column ): return torch.stack(UpperCamelCase_ ) return column def _snake_case ( self , UpperCamelCase_ ): import torch if isinstance(UpperCamelCase_ , (str, bytes, type(UpperCamelCase_ )) ): return value elif isinstance(UpperCamelCase_ , (np.character, np.ndarray) ) and np.issubdtype(value.dtype , np.character ): return value.tolist() __UpperCAmelCase : int = {} if isinstance(UpperCamelCase_ , (np.number, np.ndarray) ) and np.issubdtype(value.dtype , np.integer ): __UpperCAmelCase : Optional[int] = {"dtype": torch.intaa} elif isinstance(UpperCamelCase_ , (np.number, np.ndarray) ) and np.issubdtype(value.dtype , np.floating ): __UpperCAmelCase : str = {"dtype": torch.floataa} elif config.PIL_AVAILABLE and "PIL" in sys.modules: import PIL.Image if isinstance(UpperCamelCase_ , PIL.Image.Image ): __UpperCAmelCase : str = np.asarray(UpperCamelCase_ ) return torch.tensor(UpperCamelCase_ , **{**default_dtype, **self.torch_tensor_kwargs} ) def _snake_case ( self , UpperCamelCase_ ): import torch # support for torch, tf, jax etc. if hasattr(UpperCamelCase_ , "__array__" ) and not isinstance(UpperCamelCase_ , torch.Tensor ): __UpperCAmelCase : Dict = data_struct.__array__() # support for nested types like struct of list of struct if isinstance(UpperCamelCase_ , np.ndarray ): if data_struct.dtype == object: # torch tensors cannot be instantied from an array of objects return self._consolidate([self.recursive_tensorize(UpperCamelCase_ ) for substruct in data_struct] ) elif isinstance(UpperCamelCase_ , (list, tuple) ): return self._consolidate([self.recursive_tensorize(UpperCamelCase_ ) for substruct in data_struct] ) return self._tensorize(UpperCamelCase_ ) def _snake_case ( self , UpperCamelCase_ ): return map_nested(self._recursive_tensorize , UpperCamelCase_ , map_list=UpperCamelCase_ ) def _snake_case ( self , UpperCamelCase_ ): __UpperCAmelCase : List[str] = self.numpy_arrow_extractor().extract_row(UpperCamelCase_ ) __UpperCAmelCase : Union[str, Any] = self.python_features_decoder.decode_row(UpperCamelCase_ ) return self.recursive_tensorize(UpperCamelCase_ ) def _snake_case ( self , UpperCamelCase_ ): __UpperCAmelCase : Union[str, Any] = self.numpy_arrow_extractor().extract_column(UpperCamelCase_ ) __UpperCAmelCase : Optional[Any] = self.python_features_decoder.decode_column(UpperCamelCase_ , pa_table.column_names[0] ) __UpperCAmelCase : List[Any] = self.recursive_tensorize(UpperCamelCase_ ) __UpperCAmelCase : List[str] = self._consolidate(UpperCamelCase_ ) return column def _snake_case ( self , UpperCamelCase_ ): __UpperCAmelCase : int = self.numpy_arrow_extractor().extract_batch(UpperCamelCase_ ) __UpperCAmelCase : Any = self.python_features_decoder.decode_batch(UpperCamelCase_ ) __UpperCAmelCase : Optional[int] = self.recursive_tensorize(UpperCamelCase_ ) for column_name in batch: __UpperCAmelCase : Tuple = self._consolidate(batch[column_name] ) return batch
10
1
'''simple docstring''' import contextlib import copy import random from typing import Any, Dict, Iterable, Optional, Union import numpy as np import torch from .utils import deprecate, is_transformers_available if is_transformers_available(): import transformers def _lowercase ( lowerCamelCase__ ) -> Any: """simple docstring""" random.seed(lowerCamelCase__ ) np.random.seed(lowerCamelCase__ ) torch.manual_seed(lowerCamelCase__ ) torch.cuda.manual_seed_all(lowerCamelCase__ ) # ^^ safe to call this function even if cuda is not available class __A : def __init__( self , UpperCamelCase_ , UpperCamelCase_ = 0.9_9_9_9 , UpperCamelCase_ = 0.0 , UpperCamelCase_ = 0 , UpperCamelCase_ = False , UpperCamelCase_ = 1.0 , UpperCamelCase_ = 2 / 3 , UpperCamelCase_ = None , UpperCamelCase_ = None , **UpperCamelCase_ , ): if isinstance(UpperCamelCase_ , torch.nn.Module ): __UpperCAmelCase : int = ( "Passing a `torch.nn.Module` to `ExponentialMovingAverage` is deprecated. " "Please pass the parameters of the module instead." ) deprecate( "passing a `torch.nn.Module` to `ExponentialMovingAverage`" , "1.0.0" , UpperCamelCase_ , standard_warn=UpperCamelCase_ , ) __UpperCAmelCase : Optional[int] = parameters.parameters() # set use_ema_warmup to True if a torch.nn.Module is passed for backwards compatibility __UpperCAmelCase : str = True if kwargs.get("max_value" , UpperCamelCase_ ) is not None: __UpperCAmelCase : Dict = "The `max_value` argument is deprecated. Please use `decay` instead." deprecate("max_value" , "1.0.0" , UpperCamelCase_ , standard_warn=UpperCamelCase_ ) __UpperCAmelCase : Optional[Any] = kwargs["max_value"] if kwargs.get("min_value" , UpperCamelCase_ ) is not None: __UpperCAmelCase : Union[str, Any] = "The `min_value` argument is deprecated. Please use `min_decay` instead." deprecate("min_value" , "1.0.0" , UpperCamelCase_ , standard_warn=UpperCamelCase_ ) __UpperCAmelCase : int = kwargs["min_value"] __UpperCAmelCase : Union[str, Any] = list(UpperCamelCase_ ) __UpperCAmelCase : str = [p.clone().detach() for p in parameters] if kwargs.get("device" , UpperCamelCase_ ) is not None: __UpperCAmelCase : Optional[int] = "The `device` argument is deprecated. Please use `to` instead." deprecate("device" , "1.0.0" , UpperCamelCase_ , standard_warn=UpperCamelCase_ ) self.to(device=kwargs["device"] ) __UpperCAmelCase : int = None __UpperCAmelCase : int = decay __UpperCAmelCase : Any = min_decay __UpperCAmelCase : Any = update_after_step __UpperCAmelCase : int = use_ema_warmup __UpperCAmelCase : Dict = inv_gamma __UpperCAmelCase : Any = power __UpperCAmelCase : int = 0 __UpperCAmelCase : Dict = None # set in `step()` __UpperCAmelCase : List[str] = model_cls __UpperCAmelCase : Optional[int] = model_config @classmethod def _snake_case ( cls , UpperCamelCase_ , UpperCamelCase_ ): __UpperCAmelCase , __UpperCAmelCase : int = model_cls.load_config(UpperCamelCase_ , return_unused_kwargs=UpperCamelCase_ ) __UpperCAmelCase : str = model_cls.from_pretrained(UpperCamelCase_ ) __UpperCAmelCase : int = cls(model.parameters() , model_cls=UpperCamelCase_ , model_config=model.config ) ema_model.load_state_dict(UpperCamelCase_ ) return ema_model def _snake_case ( self , UpperCamelCase_ ): if self.model_cls is None: raise ValueError("`save_pretrained` can only be used if `model_cls` was defined at __init__." ) if self.model_config is None: raise ValueError("`save_pretrained` can only be used if `model_config` was defined at __init__." ) __UpperCAmelCase : int = self.model_cls.from_config(self.model_config ) __UpperCAmelCase : Any = self.state_dict() state_dict.pop("shadow_params" , UpperCamelCase_ ) model.register_to_config(**UpperCamelCase_ ) self.copy_to(model.parameters() ) model.save_pretrained(UpperCamelCase_ ) def _snake_case ( self , UpperCamelCase_ ): __UpperCAmelCase : int = max(0 , optimization_step - self.update_after_step - 1 ) if step <= 0: return 0.0 if self.use_ema_warmup: __UpperCAmelCase : Optional[int] = 1 - (1 + step / self.inv_gamma) ** -self.power else: __UpperCAmelCase : Any = (1 + step) / (10 + step) __UpperCAmelCase : Any = min(UpperCamelCase_ , self.decay ) # make sure decay is not smaller than min_decay __UpperCAmelCase : int = max(UpperCamelCase_ , self.min_decay ) return cur_decay_value @torch.no_grad() def _snake_case ( self , UpperCamelCase_ ): if isinstance(UpperCamelCase_ , torch.nn.Module ): __UpperCAmelCase : Optional[Any] = ( "Passing a `torch.nn.Module` to `ExponentialMovingAverage.step` is deprecated. " "Please pass the parameters of the module instead." ) deprecate( "passing a `torch.nn.Module` to `ExponentialMovingAverage.step`" , "1.0.0" , UpperCamelCase_ , standard_warn=UpperCamelCase_ , ) __UpperCAmelCase : Tuple = parameters.parameters() __UpperCAmelCase : Optional[int] = list(UpperCamelCase_ ) self.optimization_step += 1 # Compute the decay factor for the exponential moving average. __UpperCAmelCase : Optional[Any] = self.get_decay(self.optimization_step ) __UpperCAmelCase : int = decay __UpperCAmelCase : int = 1 - decay __UpperCAmelCase : Dict = contextlib.nullcontext if is_transformers_available() and transformers.deepspeed.is_deepspeed_zeroa_enabled(): import deepspeed for s_param, param in zip(self.shadow_params , UpperCamelCase_ ): if is_transformers_available() and transformers.deepspeed.is_deepspeed_zeroa_enabled(): __UpperCAmelCase : Dict = deepspeed.zero.GatheredParameters(UpperCamelCase_ , modifier_rank=UpperCamelCase_ ) with context_manager(): if param.requires_grad: s_param.sub_(one_minus_decay * (s_param - param) ) else: s_param.copy_(UpperCamelCase_ ) def _snake_case ( self , UpperCamelCase_ ): __UpperCAmelCase : List[str] = list(UpperCamelCase_ ) for s_param, param in zip(self.shadow_params , UpperCamelCase_ ): param.data.copy_(s_param.to(param.device ).data ) def _snake_case ( self , UpperCamelCase_=None , UpperCamelCase_=None ): __UpperCAmelCase : str = [ p.to(device=UpperCamelCase_ , dtype=UpperCamelCase_ ) if p.is_floating_point() else p.to(device=UpperCamelCase_ ) for p in self.shadow_params ] def _snake_case ( self ): return { "decay": self.decay, "min_decay": self.min_decay, "optimization_step": self.optimization_step, "update_after_step": self.update_after_step, "use_ema_warmup": self.use_ema_warmup, "inv_gamma": self.inv_gamma, "power": self.power, "shadow_params": self.shadow_params, } def _snake_case ( self , UpperCamelCase_ ): __UpperCAmelCase : str = [param.detach().cpu().clone() for param in parameters] def _snake_case ( self , UpperCamelCase_ ): if self.temp_stored_params is None: raise RuntimeError("This ExponentialMovingAverage has no `store()`ed weights " "to `restore()`" ) for c_param, param in zip(self.temp_stored_params , UpperCamelCase_ ): param.data.copy_(c_param.data ) # Better memory-wise. __UpperCAmelCase : str = None def _snake_case ( self , UpperCamelCase_ ): __UpperCAmelCase : Tuple = copy.deepcopy(UpperCamelCase_ ) __UpperCAmelCase : Tuple = state_dict.get("decay" , self.decay ) if self.decay < 0.0 or self.decay > 1.0: raise ValueError("Decay must be between 0 and 1" ) __UpperCAmelCase : Dict = state_dict.get("min_decay" , self.min_decay ) if not isinstance(self.min_decay , UpperCamelCase_ ): raise ValueError("Invalid min_decay" ) __UpperCAmelCase : Optional[Any] = state_dict.get("optimization_step" , self.optimization_step ) if not isinstance(self.optimization_step , UpperCamelCase_ ): raise ValueError("Invalid optimization_step" ) __UpperCAmelCase : List[str] = state_dict.get("update_after_step" , self.update_after_step ) if not isinstance(self.update_after_step , UpperCamelCase_ ): raise ValueError("Invalid update_after_step" ) __UpperCAmelCase : Dict = state_dict.get("use_ema_warmup" , self.use_ema_warmup ) if not isinstance(self.use_ema_warmup , UpperCamelCase_ ): raise ValueError("Invalid use_ema_warmup" ) __UpperCAmelCase : Any = state_dict.get("inv_gamma" , self.inv_gamma ) if not isinstance(self.inv_gamma , (float, int) ): raise ValueError("Invalid inv_gamma" ) __UpperCAmelCase : Any = state_dict.get("power" , self.power ) if not isinstance(self.power , (float, int) ): raise ValueError("Invalid power" ) __UpperCAmelCase : str = state_dict.get("shadow_params" , UpperCamelCase_ ) if shadow_params is not None: __UpperCAmelCase : Union[str, Any] = shadow_params if not isinstance(self.shadow_params , UpperCamelCase_ ): raise ValueError("shadow_params must be a list" ) if not all(isinstance(UpperCamelCase_ , torch.Tensor ) for p in self.shadow_params ): raise ValueError("shadow_params must all be Tensors" )
10
'''simple docstring''' def _lowercase ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> bool: """simple docstring""" return not any( neighbour == 1 and colored_vertices[i] == color for i, neighbour in enumerate(lowerCamelCase__ ) ) def _lowercase ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> bool: """simple docstring""" if index == len(lowerCamelCase__ ): return True # Recursive Step for i in range(lowerCamelCase__ ): if valid_coloring(graph[index] , lowerCamelCase__ , lowerCamelCase__ ): # Color current vertex __UpperCAmelCase : List[str] = i # Validate coloring if util_color(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , index + 1 ): return True # Backtrack __UpperCAmelCase : Any = -1 return False def _lowercase ( lowerCamelCase__ , lowerCamelCase__ ) -> list[int]: """simple docstring""" __UpperCAmelCase : Optional[Any] = [-1] * len(lowerCamelCase__ ) if util_color(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , 0 ): return colored_vertices return []
10
1
'''simple docstring''' import logging import os from logging import ( CRITICAL, # NOQA DEBUG, # NOQA ERROR, # NOQA FATAL, # NOQA INFO, # NOQA NOTSET, # NOQA WARN, # NOQA WARNING, # NOQA ) from typing import Optional from tqdm import auto as tqdm_lib _a : Any = { "debug": logging.DEBUG, "info": logging.INFO, "warning": logging.WARNING, "error": logging.ERROR, "critical": logging.CRITICAL, } _a : Optional[int] = logging.WARNING def _lowercase ( ) -> str: """simple docstring""" __UpperCAmelCase : Union[str, Any] = os.getenv("DATASETS_VERBOSITY" , lowerCamelCase__ ) if env_level_str: if env_level_str in log_levels: return log_levels[env_level_str] else: logging.getLogger().warning( f"""Unknown option DATASETS_VERBOSITY={env_level_str}, """ f"""has to be one of: { ', '.join(log_levels.keys() ) }""" ) return _default_log_level def _lowercase ( ) -> str: """simple docstring""" return __name__.split("." )[0] def _lowercase ( ) -> logging.Logger: """simple docstring""" return logging.getLogger(_get_library_name() ) def _lowercase ( ) -> None: """simple docstring""" __UpperCAmelCase : List[str] = _get_library_root_logger() library_root_logger.setLevel(_get_default_logging_level() ) def _lowercase ( ) -> None: """simple docstring""" __UpperCAmelCase : Dict = _get_library_root_logger() library_root_logger.setLevel(logging.NOTSET ) def _lowercase ( lowerCamelCase__ = None ) -> logging.Logger: """simple docstring""" if name is None: __UpperCAmelCase : Union[str, Any] = _get_library_name() return logging.getLogger(lowerCamelCase__ ) def _lowercase ( ) -> int: """simple docstring""" return _get_library_root_logger().getEffectiveLevel() def _lowercase ( lowerCamelCase__ ) -> None: """simple docstring""" _get_library_root_logger().setLevel(lowerCamelCase__ ) def _lowercase ( ) -> Union[str, Any]: """simple docstring""" return set_verbosity(lowerCamelCase__ ) def _lowercase ( ) -> List[Any]: """simple docstring""" return set_verbosity(lowerCamelCase__ ) def _lowercase ( ) -> Optional[int]: """simple docstring""" return set_verbosity(lowerCamelCase__ ) def _lowercase ( ) -> Dict: """simple docstring""" return set_verbosity(lowerCamelCase__ ) def _lowercase ( ) -> None: """simple docstring""" __UpperCAmelCase : Dict = False def _lowercase ( ) -> None: """simple docstring""" __UpperCAmelCase : Optional[Any] = True # Configure the library root logger at the module level (singleton-like) _configure_library_root_logger() class __A : def __init__( self , *UpperCamelCase_ , **UpperCamelCase_ ): # pylint: disable=unused-argument __UpperCAmelCase : List[Any] = args[0] if args else None def __iter__( self ): return iter(self._iterator ) def __getattr__( self , UpperCamelCase_ ): def empty_fn(*UpperCamelCase_ , **UpperCamelCase_ ): # pylint: disable=unused-argument return return empty_fn def __enter__( self ): return self def __exit__( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ): return _a : Optional[Any] = True class __A : def __call__( self , *UpperCamelCase_ , UpperCamelCase_=False , **UpperCamelCase_ ): if _tqdm_active and not disable: return tqdm_lib.tqdm(*UpperCamelCase_ , **UpperCamelCase_ ) else: return EmptyTqdm(*UpperCamelCase_ , **UpperCamelCase_ ) def _snake_case ( self , *UpperCamelCase_ , **UpperCamelCase_ ): __UpperCAmelCase : Dict = None if _tqdm_active: return tqdm_lib.tqdm.set_lock(*UpperCamelCase_ , **UpperCamelCase_ ) def _snake_case ( self ): if _tqdm_active: return tqdm_lib.tqdm.get_lock() _a : List[str] = _tqdm_cls() def _lowercase ( ) -> bool: """simple docstring""" global _tqdm_active return bool(_tqdm_active ) def _lowercase ( ) -> List[Any]: """simple docstring""" global _tqdm_active __UpperCAmelCase : Dict = True def _lowercase ( ) -> Any: """simple docstring""" global _tqdm_active __UpperCAmelCase : List[Any] = False
10
'''simple docstring''' def _lowercase ( lowerCamelCase__ , lowerCamelCase__ ) -> int: """simple docstring""" return number | (1 << position) def _lowercase ( lowerCamelCase__ , lowerCamelCase__ ) -> int: """simple docstring""" return number & ~(1 << position) def _lowercase ( lowerCamelCase__ , lowerCamelCase__ ) -> int: """simple docstring""" return number ^ (1 << position) def _lowercase ( lowerCamelCase__ , lowerCamelCase__ ) -> bool: """simple docstring""" return ((number >> position) & 1) == 1 def _lowercase ( lowerCamelCase__ , lowerCamelCase__ ) -> int: """simple docstring""" return int((number & (1 << position)) != 0 ) if __name__ == "__main__": import doctest doctest.testmod()
10
1
'''simple docstring''' import os from shutil import copyfile from typing import Any, Dict, List, Optional, Tuple import sentencepiece as spm from ...tokenization_utils import AddedToken, PreTrainedTokenizer from ...utils import logging _a : str = logging.get_logger(__name__) _a : Tuple = "▁" _a : Optional[int] = {"vocab_file": "sentencepiece.bpe.model"} _a : Tuple = { "vocab_file": { "xlm-roberta-base": "https://huggingface.co/xlm-roberta-base/resolve/main/sentencepiece.bpe.model", "xlm-roberta-large": "https://huggingface.co/xlm-roberta-large/resolve/main/sentencepiece.bpe.model", "xlm-roberta-large-finetuned-conll02-dutch": ( "https://huggingface.co/xlm-roberta-large-finetuned-conll02-dutch/resolve/main/sentencepiece.bpe.model" ), "xlm-roberta-large-finetuned-conll02-spanish": ( "https://huggingface.co/xlm-roberta-large-finetuned-conll02-spanish/resolve/main/sentencepiece.bpe.model" ), "xlm-roberta-large-finetuned-conll03-english": ( "https://huggingface.co/xlm-roberta-large-finetuned-conll03-english/resolve/main/sentencepiece.bpe.model" ), "xlm-roberta-large-finetuned-conll03-german": ( "https://huggingface.co/xlm-roberta-large-finetuned-conll03-german/resolve/main/sentencepiece.bpe.model" ), } } _a : Optional[Any] = { "xlm-roberta-base": 512, "xlm-roberta-large": 512, "xlm-roberta-large-finetuned-conll02-dutch": 512, "xlm-roberta-large-finetuned-conll02-spanish": 512, "xlm-roberta-large-finetuned-conll03-english": 512, "xlm-roberta-large-finetuned-conll03-german": 512, } class __A (__magic_name__ ): snake_case :Union[str, Any] = VOCAB_FILES_NAMES snake_case :Any = PRETRAINED_VOCAB_FILES_MAP snake_case :Tuple = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES snake_case :Optional[int] = ["input_ids", "attention_mask"] def __init__( self , UpperCamelCase_ , UpperCamelCase_="<s>" , UpperCamelCase_="</s>" , UpperCamelCase_="</s>" , UpperCamelCase_="<s>" , UpperCamelCase_="<unk>" , UpperCamelCase_="<pad>" , UpperCamelCase_="<mask>" , UpperCamelCase_ = None , **UpperCamelCase_ , ): # Mask token behave like a normal word, i.e. include the space before it __UpperCAmelCase : Optional[int] = AddedToken(UpperCamelCase_ , lstrip=UpperCamelCase_ , rstrip=UpperCamelCase_ ) if isinstance(UpperCamelCase_ , UpperCamelCase_ ) else mask_token __UpperCAmelCase : int = {} if sp_model_kwargs is None else sp_model_kwargs super().__init__( bos_token=UpperCamelCase_ , eos_token=UpperCamelCase_ , unk_token=UpperCamelCase_ , sep_token=UpperCamelCase_ , cls_token=UpperCamelCase_ , pad_token=UpperCamelCase_ , mask_token=UpperCamelCase_ , sp_model_kwargs=self.sp_model_kwargs , **UpperCamelCase_ , ) __UpperCAmelCase : Optional[int] = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(str(UpperCamelCase_ ) ) __UpperCAmelCase : Union[str, Any] = vocab_file # Original fairseq vocab and spm vocab must be "aligned": # Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 # -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ---- # fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-' # spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a' # Mimic fairseq token-to-id alignment for the first 4 token __UpperCAmelCase : Optional[Any] = {"<s>": 0, "<pad>": 1, "</s>": 2, "<unk>": 3} # The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab __UpperCAmelCase : List[Any] = 1 __UpperCAmelCase : Optional[Any] = len(self.sp_model ) + self.fairseq_offset __UpperCAmelCase : str = {v: k for k, v in self.fairseq_tokens_to_ids.items()} def __getstate__( self ): __UpperCAmelCase : List[str] = self.__dict__.copy() __UpperCAmelCase : str = None __UpperCAmelCase : str = self.sp_model.serialized_model_proto() return state def __setstate__( self , UpperCamelCase_ ): __UpperCAmelCase : Union[str, Any] = d # for backward compatibility if not hasattr(self , "sp_model_kwargs" ): __UpperCAmelCase : Tuple = {} __UpperCAmelCase : Dict = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.LoadFromSerializedProto(self.sp_model_proto ) def _snake_case ( self , UpperCamelCase_ , UpperCamelCase_ = None ): if token_ids_a is None: return [self.cls_token_id] + token_ids_a + [self.sep_token_id] __UpperCAmelCase : List[Any] = [self.cls_token_id] __UpperCAmelCase : Union[str, Any] = [self.sep_token_id] return cls + token_ids_a + sep + sep + token_ids_a + sep def _snake_case ( self , UpperCamelCase_ , UpperCamelCase_ = None , UpperCamelCase_ = False ): if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=UpperCamelCase_ , token_ids_a=UpperCamelCase_ , already_has_special_tokens=UpperCamelCase_ ) if token_ids_a is None: return [1] + ([0] * len(UpperCamelCase_ )) + [1] return [1] + ([0] * len(UpperCamelCase_ )) + [1, 1] + ([0] * len(UpperCamelCase_ )) + [1] def _snake_case ( self , UpperCamelCase_ , UpperCamelCase_ = None ): __UpperCAmelCase : Dict = [self.sep_token_id] __UpperCAmelCase : List[Any] = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0] @property def _snake_case ( self ): return len(self.sp_model ) + self.fairseq_offset + 1 # Add the <mask> token def _snake_case ( self ): __UpperCAmelCase : Union[str, Any] = {self.convert_ids_to_tokens(UpperCamelCase_ ): i for i in range(self.vocab_size )} vocab.update(self.added_tokens_encoder ) return vocab def _snake_case ( self , UpperCamelCase_ ): return self.sp_model.encode(UpperCamelCase_ , out_type=UpperCamelCase_ ) def _snake_case ( self , UpperCamelCase_ ): if token in self.fairseq_tokens_to_ids: return self.fairseq_tokens_to_ids[token] __UpperCAmelCase : Optional[int] = self.sp_model.PieceToId(UpperCamelCase_ ) # Need to return unknown token if the SP model returned 0 return spm_id + self.fairseq_offset if spm_id else self.unk_token_id def _snake_case ( self , UpperCamelCase_ ): if index in self.fairseq_ids_to_tokens: return self.fairseq_ids_to_tokens[index] return self.sp_model.IdToPiece(index - self.fairseq_offset ) def _snake_case ( self , UpperCamelCase_ ): __UpperCAmelCase : Tuple = "".join(UpperCamelCase_ ).replace(UpperCamelCase_ , " " ).strip() return out_string def _snake_case ( self , UpperCamelCase_ , UpperCamelCase_ = None ): if not os.path.isdir(UpperCamelCase_ ): logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" ) return __UpperCAmelCase : List[str] = os.path.join( UpperCamelCase_ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(UpperCamelCase_ ) and os.path.isfile(self.vocab_file ): copyfile(self.vocab_file , UpperCamelCase_ ) elif not os.path.isfile(self.vocab_file ): with open(UpperCamelCase_ , "wb" ) as fi: __UpperCAmelCase : Optional[int] = self.sp_model.serialized_model_proto() fi.write(UpperCamelCase_ ) return (out_vocab_file,)
10
'''simple docstring''' from collections import Counter import numpy as np from sklearn import datasets from sklearn.model_selection import train_test_split _a : str = datasets.load_iris() _a : List[Any] = np.array(data["data"]) _a : Optional[Any] = np.array(data["target"]) _a : Dict = data["target_names"] _a , _a , _a , _a : Any = train_test_split(X, y) def _lowercase ( lowerCamelCase__ , lowerCamelCase__ ) -> Tuple: """simple docstring""" return np.linalg.norm(np.array(lowerCamelCase__ ) - np.array(lowerCamelCase__ ) ) def _lowercase ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__=5 ) -> int: """simple docstring""" __UpperCAmelCase : List[Any] = zip(lowerCamelCase__ , lowerCamelCase__ ) # List of distances of all points from the point to be classified __UpperCAmelCase : int = [] for data_point in data: __UpperCAmelCase : Optional[Any] = euclidean_distance(data_point[0] , lowerCamelCase__ ) distances.append((distance, data_point[1]) ) # Choosing 'k' points with the least distances. __UpperCAmelCase : Union[str, Any] = [i[1] for i in sorted(lowerCamelCase__ )[:k]] # Most commonly occurring class among them # is the class into which the point is classified __UpperCAmelCase : Dict = Counter(lowerCamelCase__ ).most_common(1 )[0][0] return classes[result] if __name__ == "__main__": print(classifier(X_train, y_train, classes, [4.4, 3.1, 1.3, 1.4]))
10
1
'''simple docstring''' import argparse import tensorflow as tf import torch from transformers import BertConfig, BertForMaskedLM from transformers.models.bert.modeling_bert import ( BertIntermediate, BertLayer, BertOutput, BertPooler, BertSelfAttention, BertSelfOutput, ) from transformers.utils import logging logging.set_verbosity_info() def _lowercase ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> Optional[Any]: """simple docstring""" def get_masked_lm_array(lowerCamelCase__ ): __UpperCAmelCase : str = f"""masked_lm/{name}/.ATTRIBUTES/VARIABLE_VALUE""" __UpperCAmelCase : Union[str, Any] = tf.train.load_variable(lowerCamelCase__ , lowerCamelCase__ ) if "kernel" in name: __UpperCAmelCase : Any = array.transpose() return torch.from_numpy(lowerCamelCase__ ) def get_encoder_array(lowerCamelCase__ ): __UpperCAmelCase : Any = f"""encoder/{name}/.ATTRIBUTES/VARIABLE_VALUE""" __UpperCAmelCase : Tuple = tf.train.load_variable(lowerCamelCase__ , lowerCamelCase__ ) if "kernel" in name: __UpperCAmelCase : str = array.transpose() return torch.from_numpy(lowerCamelCase__ ) def get_encoder_layer_array(lowerCamelCase__ , lowerCamelCase__ ): __UpperCAmelCase : List[Any] = f"""encoder/_transformer_layers/{layer_index}/{name}/.ATTRIBUTES/VARIABLE_VALUE""" __UpperCAmelCase : Union[str, Any] = tf.train.load_variable(lowerCamelCase__ , lowerCamelCase__ ) if "kernel" in name: __UpperCAmelCase : str = array.transpose() return torch.from_numpy(lowerCamelCase__ ) def get_encoder_attention_layer_array(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ): __UpperCAmelCase : str = f"""encoder/_transformer_layers/{layer_index}/_attention_layer/{name}/.ATTRIBUTES/VARIABLE_VALUE""" __UpperCAmelCase : Optional[Any] = tf.train.load_variable(lowerCamelCase__ , lowerCamelCase__ ) __UpperCAmelCase : Tuple = array.reshape(lowerCamelCase__ ) if "kernel" in name: __UpperCAmelCase : int = array.transpose() return torch.from_numpy(lowerCamelCase__ ) print(f"""Loading model based on config from {config_path}...""" ) __UpperCAmelCase : Union[str, Any] = BertConfig.from_json_file(lowerCamelCase__ ) __UpperCAmelCase : Optional[Any] = BertForMaskedLM(lowerCamelCase__ ) # Layers for layer_index in range(0 , config.num_hidden_layers ): __UpperCAmelCase : BertLayer = model.bert.encoder.layer[layer_index] # Self-attention __UpperCAmelCase : BertSelfAttention = layer.attention.self __UpperCAmelCase : Union[str, Any] = get_encoder_attention_layer_array( lowerCamelCase__ , "_query_dense/kernel" , self_attn.query.weight.data.shape ) __UpperCAmelCase : Dict = get_encoder_attention_layer_array( lowerCamelCase__ , "_query_dense/bias" , self_attn.query.bias.data.shape ) __UpperCAmelCase : Tuple = get_encoder_attention_layer_array( lowerCamelCase__ , "_key_dense/kernel" , self_attn.key.weight.data.shape ) __UpperCAmelCase : Union[str, Any] = get_encoder_attention_layer_array( lowerCamelCase__ , "_key_dense/bias" , self_attn.key.bias.data.shape ) __UpperCAmelCase : List[str] = get_encoder_attention_layer_array( lowerCamelCase__ , "_value_dense/kernel" , self_attn.value.weight.data.shape ) __UpperCAmelCase : Union[str, Any] = get_encoder_attention_layer_array( lowerCamelCase__ , "_value_dense/bias" , self_attn.value.bias.data.shape ) # Self-attention Output __UpperCAmelCase : BertSelfOutput = layer.attention.output __UpperCAmelCase : Any = get_encoder_attention_layer_array( lowerCamelCase__ , "_output_dense/kernel" , self_output.dense.weight.data.shape ) __UpperCAmelCase : str = get_encoder_attention_layer_array( lowerCamelCase__ , "_output_dense/bias" , self_output.dense.bias.data.shape ) __UpperCAmelCase : List[str] = get_encoder_layer_array(lowerCamelCase__ , "_attention_layer_norm/gamma" ) __UpperCAmelCase : Tuple = get_encoder_layer_array(lowerCamelCase__ , "_attention_layer_norm/beta" ) # Intermediate __UpperCAmelCase : BertIntermediate = layer.intermediate __UpperCAmelCase : Union[str, Any] = get_encoder_layer_array(lowerCamelCase__ , "_intermediate_dense/kernel" ) __UpperCAmelCase : Any = get_encoder_layer_array(lowerCamelCase__ , "_intermediate_dense/bias" ) # Output __UpperCAmelCase : BertOutput = layer.output __UpperCAmelCase : Union[str, Any] = get_encoder_layer_array(lowerCamelCase__ , "_output_dense/kernel" ) __UpperCAmelCase : Optional[int] = get_encoder_layer_array(lowerCamelCase__ , "_output_dense/bias" ) __UpperCAmelCase : List[str] = get_encoder_layer_array(lowerCamelCase__ , "_output_layer_norm/gamma" ) __UpperCAmelCase : List[str] = get_encoder_layer_array(lowerCamelCase__ , "_output_layer_norm/beta" ) # Embeddings __UpperCAmelCase : int = get_encoder_array("_position_embedding_layer/embeddings" ) __UpperCAmelCase : Optional[Any] = get_encoder_array("_type_embedding_layer/embeddings" ) __UpperCAmelCase : Any = get_encoder_array("_embedding_norm_layer/gamma" ) __UpperCAmelCase : List[str] = get_encoder_array("_embedding_norm_layer/beta" ) # LM Head __UpperCAmelCase : List[Any] = model.cls.predictions.transform __UpperCAmelCase : List[Any] = get_masked_lm_array("dense/kernel" ) __UpperCAmelCase : Optional[Any] = get_masked_lm_array("dense/bias" ) __UpperCAmelCase : Optional[int] = get_masked_lm_array("layer_norm/gamma" ) __UpperCAmelCase : int = get_masked_lm_array("layer_norm/beta" ) __UpperCAmelCase : List[str] = get_masked_lm_array("embedding_table" ) # Pooling __UpperCAmelCase : Union[str, Any] = BertPooler(config=lowerCamelCase__ ) __UpperCAmelCase : BertPooler = get_encoder_array("_pooler_layer/kernel" ) __UpperCAmelCase : BertPooler = get_encoder_array("_pooler_layer/bias" ) # Export final model model.save_pretrained(lowerCamelCase__ ) # Integration test - should load without any errors ;) __UpperCAmelCase : Optional[int] = BertForMaskedLM.from_pretrained(lowerCamelCase__ ) print(new_model.eval() ) print("Model conversion was done sucessfully!" ) if __name__ == "__main__": _a : int = argparse.ArgumentParser() parser.add_argument( "--tf_checkpoint_path", type=str, required=True, help="Path to the TensorFlow Token Dropping checkpoint path." ) parser.add_argument( "--bert_config_file", type=str, required=True, help="The config json file corresponding to the BERT model. This specifies the model architecture.", ) parser.add_argument( "--pytorch_dump_path", type=str, required=True, help="Path to the output PyTorch model.", ) _a : Optional[Any] = parser.parse_args() convert_checkpoint_to_pytorch(args.tf_checkpoint_path, args.bert_config_file, args.pytorch_dump_path)
10
'''simple docstring''' class __A : def __init__( self , UpperCamelCase_ ): __UpperCAmelCase : Any = set_counts __UpperCAmelCase : int = max(UpperCamelCase_ ) __UpperCAmelCase : List[str] = len(UpperCamelCase_ ) __UpperCAmelCase : Any = [1] * num_sets __UpperCAmelCase : Any = list(range(UpperCamelCase_ ) ) def _snake_case ( self , UpperCamelCase_ , UpperCamelCase_ ): __UpperCAmelCase : Optional[int] = self.get_parent(UpperCamelCase_ ) __UpperCAmelCase : List[Any] = self.get_parent(UpperCamelCase_ ) if src_parent == dst_parent: return False if self.ranks[dst_parent] >= self.ranks[src_parent]: self.set_counts[dst_parent] += self.set_counts[src_parent] __UpperCAmelCase : Optional[Any] = 0 __UpperCAmelCase : List[Any] = dst_parent if self.ranks[dst_parent] == self.ranks[src_parent]: self.ranks[dst_parent] += 1 __UpperCAmelCase : Union[str, Any] = self.set_counts[dst_parent] else: self.set_counts[src_parent] += self.set_counts[dst_parent] __UpperCAmelCase : Union[str, Any] = 0 __UpperCAmelCase : Dict = src_parent __UpperCAmelCase : Dict = self.set_counts[src_parent] __UpperCAmelCase : Dict = max(self.max_set , UpperCamelCase_ ) return True def _snake_case ( self , UpperCamelCase_ ): if self.parents[disj_set] == disj_set: return disj_set __UpperCAmelCase : str = self.get_parent(self.parents[disj_set] ) return self.parents[disj_set]
10
1
'''simple docstring''' import torch from diffusers import DDIMParallelScheduler from .test_schedulers import SchedulerCommonTest class __A (__magic_name__ ): snake_case :int = (DDIMParallelScheduler,) snake_case :Tuple = (("eta", 0.0), ("num_inference_steps", 50)) def _snake_case ( self , **UpperCamelCase_ ): __UpperCAmelCase : Dict = { "num_train_timesteps": 10_00, "beta_start": 0.0_0_0_1, "beta_end": 0.0_2, "beta_schedule": "linear", "clip_sample": True, } config.update(**UpperCamelCase_ ) return config def _snake_case ( self , **UpperCamelCase_ ): __UpperCAmelCase : Union[str, Any] = self.scheduler_classes[0] __UpperCAmelCase : Optional[Any] = self.get_scheduler_config(**UpperCamelCase_ ) __UpperCAmelCase : int = scheduler_class(**UpperCamelCase_ ) __UpperCAmelCase , __UpperCAmelCase : Any = 10, 0.0 __UpperCAmelCase : List[Any] = self.dummy_model() __UpperCAmelCase : List[Any] = self.dummy_sample_deter scheduler.set_timesteps(UpperCamelCase_ ) for t in scheduler.timesteps: __UpperCAmelCase : Tuple = model(UpperCamelCase_ , UpperCamelCase_ ) __UpperCAmelCase : Optional[Any] = scheduler.step(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ).prev_sample return sample def _snake_case ( self ): for timesteps in [1_00, 5_00, 10_00]: self.check_over_configs(num_train_timesteps=UpperCamelCase_ ) def _snake_case ( self ): for steps_offset in [0, 1]: self.check_over_configs(steps_offset=UpperCamelCase_ ) __UpperCAmelCase : List[str] = self.scheduler_classes[0] __UpperCAmelCase : Optional[Any] = self.get_scheduler_config(steps_offset=1 ) __UpperCAmelCase : List[Any] = scheduler_class(**UpperCamelCase_ ) scheduler.set_timesteps(5 ) assert torch.equal(scheduler.timesteps , torch.LongTensor([8_01, 6_01, 4_01, 2_01, 1] ) ) def _snake_case ( self ): for beta_start, beta_end in zip([0.0_0_0_1, 0.0_0_1, 0.0_1, 0.1] , [0.0_0_2, 0.0_2, 0.2, 2] ): self.check_over_configs(beta_start=UpperCamelCase_ , beta_end=UpperCamelCase_ ) def _snake_case ( self ): for schedule in ["linear", "squaredcos_cap_v2"]: self.check_over_configs(beta_schedule=UpperCamelCase_ ) def _snake_case ( self ): for prediction_type in ["epsilon", "v_prediction"]: self.check_over_configs(prediction_type=UpperCamelCase_ ) def _snake_case ( self ): for clip_sample in [True, False]: self.check_over_configs(clip_sample=UpperCamelCase_ ) def _snake_case ( self ): for timestep_spacing in ["trailing", "leading"]: self.check_over_configs(timestep_spacing=UpperCamelCase_ ) def _snake_case ( self ): for rescale_betas_zero_snr in [True, False]: self.check_over_configs(rescale_betas_zero_snr=UpperCamelCase_ ) def _snake_case ( self ): self.check_over_configs(thresholding=UpperCamelCase_ ) for threshold in [0.5, 1.0, 2.0]: for prediction_type in ["epsilon", "v_prediction"]: self.check_over_configs( thresholding=UpperCamelCase_ , prediction_type=UpperCamelCase_ , sample_max_value=UpperCamelCase_ , ) def _snake_case ( self ): for t in [1, 10, 49]: self.check_over_forward(time_step=UpperCamelCase_ ) def _snake_case ( self ): for t, num_inference_steps in zip([1, 10, 50] , [10, 50, 5_00] ): self.check_over_forward(time_step=UpperCamelCase_ , num_inference_steps=UpperCamelCase_ ) def _snake_case ( self ): for t, eta in zip([1, 10, 49] , [0.0, 0.5, 1.0] ): self.check_over_forward(time_step=UpperCamelCase_ , eta=UpperCamelCase_ ) def _snake_case ( self ): __UpperCAmelCase : List[Any] = self.scheduler_classes[0] __UpperCAmelCase : Dict = self.get_scheduler_config() __UpperCAmelCase : Optional[Any] = scheduler_class(**UpperCamelCase_ ) assert torch.sum(torch.abs(scheduler._get_variance(0 , 0 ) - 0.0 ) ) < 1E-5 assert torch.sum(torch.abs(scheduler._get_variance(4_20 , 4_00 ) - 0.1_4_7_7_1 ) ) < 1E-5 assert torch.sum(torch.abs(scheduler._get_variance(9_80 , 9_60 ) - 0.3_2_4_6_0 ) ) < 1E-5 assert torch.sum(torch.abs(scheduler._get_variance(0 , 0 ) - 0.0 ) ) < 1E-5 assert torch.sum(torch.abs(scheduler._get_variance(4_87 , 4_86 ) - 0.0_0_9_7_9 ) ) < 1E-5 assert torch.sum(torch.abs(scheduler._get_variance(9_99 , 9_98 ) - 0.0_2 ) ) < 1E-5 def _snake_case ( self ): __UpperCAmelCase : Union[str, Any] = self.scheduler_classes[0] __UpperCAmelCase : Optional[int] = self.get_scheduler_config() __UpperCAmelCase : List[str] = scheduler_class(**UpperCamelCase_ ) __UpperCAmelCase , __UpperCAmelCase : int = 10, 0.0 scheduler.set_timesteps(UpperCamelCase_ ) __UpperCAmelCase : Any = self.dummy_model() __UpperCAmelCase : str = self.dummy_sample_deter __UpperCAmelCase : Optional[Any] = self.dummy_sample_deter + 0.1 __UpperCAmelCase : List[str] = self.dummy_sample_deter - 0.1 __UpperCAmelCase : str = samplea.shape[0] __UpperCAmelCase : Tuple = torch.stack([samplea, samplea, samplea] , dim=0 ) __UpperCAmelCase : List[Any] = torch.arange(UpperCamelCase_ )[0:3, None].repeat(1 , UpperCamelCase_ ) __UpperCAmelCase : List[str] = model(samples.flatten(0 , 1 ) , timesteps.flatten(0 , 1 ) ) __UpperCAmelCase : Tuple = scheduler.batch_step_no_noise(UpperCamelCase_ , timesteps.flatten(0 , 1 ) , samples.flatten(0 , 1 ) , UpperCamelCase_ ) __UpperCAmelCase : int = torch.sum(torch.abs(UpperCamelCase_ ) ) __UpperCAmelCase : Union[str, Any] = torch.mean(torch.abs(UpperCamelCase_ ) ) assert abs(result_sum.item() - 1_1_4_7.7_9_0_4 ) < 1E-2 assert abs(result_mean.item() - 0.4_9_8_2 ) < 1E-3 def _snake_case ( self ): __UpperCAmelCase : str = self.full_loop() __UpperCAmelCase : Optional[Any] = torch.sum(torch.abs(UpperCamelCase_ ) ) __UpperCAmelCase : List[Any] = torch.mean(torch.abs(UpperCamelCase_ ) ) assert abs(result_sum.item() - 1_7_2.0_0_6_7 ) < 1E-2 assert abs(result_mean.item() - 0.2_2_3_9_6_7 ) < 1E-3 def _snake_case ( self ): __UpperCAmelCase : List[Any] = self.full_loop(prediction_type="v_prediction" ) __UpperCAmelCase : Any = torch.sum(torch.abs(UpperCamelCase_ ) ) __UpperCAmelCase : List[Any] = torch.mean(torch.abs(UpperCamelCase_ ) ) assert abs(result_sum.item() - 5_2.5_3_0_2 ) < 1E-2 assert abs(result_mean.item() - 0.0_6_8_4 ) < 1E-3 def _snake_case ( self ): # We specify different beta, so that the first alpha is 0.99 __UpperCAmelCase : List[Any] = self.full_loop(set_alpha_to_one=UpperCamelCase_ , beta_start=0.0_1 ) __UpperCAmelCase : List[str] = torch.sum(torch.abs(UpperCamelCase_ ) ) __UpperCAmelCase : int = torch.mean(torch.abs(UpperCamelCase_ ) ) assert abs(result_sum.item() - 1_4_9.8_2_9_5 ) < 1E-2 assert abs(result_mean.item() - 0.1_9_5_1 ) < 1E-3 def _snake_case ( self ): # We specify different beta, so that the first alpha is 0.99 __UpperCAmelCase : Optional[int] = self.full_loop(set_alpha_to_one=UpperCamelCase_ , beta_start=0.0_1 ) __UpperCAmelCase : Tuple = torch.sum(torch.abs(UpperCamelCase_ ) ) __UpperCAmelCase : Dict = torch.mean(torch.abs(UpperCamelCase_ ) ) assert abs(result_sum.item() - 1_4_9.0_7_8_4 ) < 1E-2 assert abs(result_mean.item() - 0.1_9_4_1 ) < 1E-3
10
'''simple docstring''' def _lowercase ( lowerCamelCase__ , lowerCamelCase__ ) -> List[str]: """simple docstring""" __UpperCAmelCase : Dict = (boundary[1] - boundary[0]) / steps __UpperCAmelCase : Tuple = boundary[0] __UpperCAmelCase : List[str] = boundary[1] __UpperCAmelCase : List[Any] = make_points(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) __UpperCAmelCase : int = 0.0 y += (h / 2.0) * f(lowerCamelCase__ ) for i in x_i: # print(i) y += h * f(lowerCamelCase__ ) y += (h / 2.0) * f(lowerCamelCase__ ) return y def _lowercase ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> List[Any]: """simple docstring""" __UpperCAmelCase : Optional[Any] = a + h while x < (b - h): yield x __UpperCAmelCase : List[str] = x + h def _lowercase ( lowerCamelCase__ ) -> Optional[Any]: # enter your function here """simple docstring""" __UpperCAmelCase : str = (x - 0) * (x - 0) return y def _lowercase ( ) -> int: """simple docstring""" __UpperCAmelCase : Tuple = 0.0 # Lower bound of integration __UpperCAmelCase : Union[str, Any] = 1.0 # Upper bound of integration __UpperCAmelCase : Union[str, Any] = 10.0 # define number of steps or resolution __UpperCAmelCase : Dict = [a, b] # define boundary of integration __UpperCAmelCase : Optional[int] = method_a(lowerCamelCase__ , lowerCamelCase__ ) print(f"""y = {y}""" ) if __name__ == "__main__": main()
10
1
'''simple docstring''' from __future__ import annotations def _lowercase ( lowerCamelCase__ ) -> float: """simple docstring""" if not nums: raise ValueError("List is empty" ) return sum(lowerCamelCase__ ) / len(lowerCamelCase__ ) if __name__ == "__main__": import doctest doctest.testmod()
10
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_torch_available, is_vision_available, ) _a : str = {"configuration_vit": ["VIT_PRETRAINED_CONFIG_ARCHIVE_MAP", "ViTConfig", "ViTOnnxConfig"]} try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _a : str = ["ViTFeatureExtractor"] _a : Dict = ["ViTImageProcessor"] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _a : int = [ "VIT_PRETRAINED_MODEL_ARCHIVE_LIST", "ViTForImageClassification", "ViTForMaskedImageModeling", "ViTModel", "ViTPreTrainedModel", ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _a : List[str] = [ "TFViTForImageClassification", "TFViTModel", "TFViTPreTrainedModel", ] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _a : Dict = [ "FlaxViTForImageClassification", "FlaxViTModel", "FlaxViTPreTrainedModel", ] if TYPE_CHECKING: from .configuration_vit import VIT_PRETRAINED_CONFIG_ARCHIVE_MAP, ViTConfig, ViTOnnxConfig try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .feature_extraction_vit import ViTFeatureExtractor from .image_processing_vit import ViTImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_vit import ( VIT_PRETRAINED_MODEL_ARCHIVE_LIST, ViTForImageClassification, ViTForMaskedImageModeling, ViTModel, ViTPreTrainedModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_vit import TFViTForImageClassification, TFViTModel, TFViTPreTrainedModel try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_vit import FlaxViTForImageClassification, FlaxViTModel, FlaxViTPreTrainedModel else: import sys _a : Dict = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
10
1
'''simple docstring''' # This code is adapted from OpenAI's release # https://github.com/openai/human-eval/blob/master/human_eval/execution.py import contextlib import faulthandler import io import multiprocessing import os import platform import signal import tempfile def _lowercase ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> int: """simple docstring""" __UpperCAmelCase : List[Any] = multiprocessing.Manager() __UpperCAmelCase : str = manager.list() __UpperCAmelCase : List[str] = multiprocessing.Process(target=lowerCamelCase__ , args=(check_program, result, timeout) ) p.start() p.join(timeout=timeout + 1 ) if p.is_alive(): p.kill() if not result: result.append("timed out" ) return { "task_id": task_id, "passed": result[0] == "passed", "result": result[0], "completion_id": completion_id, } def _lowercase ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> Dict: """simple docstring""" with create_tempdir(): # These system calls are needed when cleaning up tempdir. import os import shutil __UpperCAmelCase : List[Any] = shutil.rmtree __UpperCAmelCase : List[str] = os.rmdir __UpperCAmelCase : Union[str, Any] = os.chdir # Disable functionalities that can make destructive changes to the test. reliability_guard() # Run program. try: __UpperCAmelCase : Optional[Any] = {} with swallow_io(): with time_limit(lowerCamelCase__ ): exec(lowerCamelCase__ , lowerCamelCase__ ) result.append("passed" ) except TimeoutException: result.append("timed out" ) except BaseException as e: result.append(f"""failed: {e}""" ) # Needed for cleaning up. __UpperCAmelCase : Optional[Any] = rmtree __UpperCAmelCase : List[str] = rmdir __UpperCAmelCase : int = chdir @contextlib.contextmanager def _lowercase ( lowerCamelCase__ ) -> Optional[int]: """simple docstring""" def signal_handler(lowerCamelCase__ , lowerCamelCase__ ): raise TimeoutException("Timed out!" ) signal.setitimer(signal.ITIMER_REAL , lowerCamelCase__ ) signal.signal(signal.SIGALRM , lowerCamelCase__ ) try: yield finally: signal.setitimer(signal.ITIMER_REAL , 0 ) @contextlib.contextmanager def _lowercase ( ) -> Optional[Any]: """simple docstring""" __UpperCAmelCase : List[str] = WriteOnlyStringIO() with contextlib.redirect_stdout(lowerCamelCase__ ): with contextlib.redirect_stderr(lowerCamelCase__ ): with redirect_stdin(lowerCamelCase__ ): yield @contextlib.contextmanager def _lowercase ( ) -> Optional[int]: """simple docstring""" with tempfile.TemporaryDirectory() as dirname: with chdir(lowerCamelCase__ ): yield dirname class __A (__magic_name__ ): pass class __A (io.StringIO ): def _snake_case ( self , *UpperCamelCase_ , **UpperCamelCase_ ): raise OSError def _snake_case ( self , *UpperCamelCase_ , **UpperCamelCase_ ): raise OSError def _snake_case ( self , *UpperCamelCase_ , **UpperCamelCase_ ): raise OSError def _snake_case ( self , *UpperCamelCase_ , **UpperCamelCase_ ): return False class __A (contextlib._RedirectStream ): # type: ignore snake_case :List[str] = "stdin" @contextlib.contextmanager def _lowercase ( lowerCamelCase__ ) -> Optional[Any]: """simple docstring""" if root == ".": yield return __UpperCAmelCase : str = os.getcwd() os.chdir(lowerCamelCase__ ) try: yield except BaseException as exc: raise exc finally: os.chdir(lowerCamelCase__ ) def _lowercase ( lowerCamelCase__=None ) -> Any: """simple docstring""" if maximum_memory_bytes is not None: import resource resource.setrlimit(resource.RLIMIT_AS , (maximum_memory_bytes, maximum_memory_bytes) ) resource.setrlimit(resource.RLIMIT_DATA , (maximum_memory_bytes, maximum_memory_bytes) ) if not platform.uname().system == "Darwin": resource.setrlimit(resource.RLIMIT_STACK , (maximum_memory_bytes, maximum_memory_bytes) ) faulthandler.disable() import builtins __UpperCAmelCase : Optional[int] = None __UpperCAmelCase : int = None import os __UpperCAmelCase : Union[str, Any] = "1" __UpperCAmelCase : Optional[int] = None __UpperCAmelCase : List[Any] = None __UpperCAmelCase : Optional[Any] = None __UpperCAmelCase : List[Any] = None __UpperCAmelCase : str = None __UpperCAmelCase : List[Any] = None __UpperCAmelCase : Tuple = None __UpperCAmelCase : Tuple = None __UpperCAmelCase : Union[str, Any] = None __UpperCAmelCase : Any = None __UpperCAmelCase : Tuple = None __UpperCAmelCase : int = None __UpperCAmelCase : Optional[int] = None __UpperCAmelCase : Optional[Any] = None __UpperCAmelCase : Tuple = None __UpperCAmelCase : int = None __UpperCAmelCase : Optional[int] = None __UpperCAmelCase : Optional[int] = None __UpperCAmelCase : Tuple = None __UpperCAmelCase : Optional[Any] = None __UpperCAmelCase : Any = None __UpperCAmelCase : int = None __UpperCAmelCase : Optional[int] = None __UpperCAmelCase : int = None __UpperCAmelCase : str = None __UpperCAmelCase : Dict = None __UpperCAmelCase : Optional[Any] = None import shutil __UpperCAmelCase : Optional[int] = None __UpperCAmelCase : Any = None __UpperCAmelCase : int = None import subprocess __UpperCAmelCase : Optional[Any] = None # type: ignore __UpperCAmelCase : List[str] = None import sys __UpperCAmelCase : int = None __UpperCAmelCase : Dict = None __UpperCAmelCase : List[str] = None __UpperCAmelCase : Optional[Any] = None __UpperCAmelCase : Any = None
10
'''simple docstring''' import os from shutil import copyfile from typing import Any, Dict, List, Optional, Tuple import sentencepiece as spm from ...tokenization_utils import AddedToken, PreTrainedTokenizer from ...utils import logging _a : str = logging.get_logger(__name__) _a : Tuple = "▁" _a : Optional[int] = {"vocab_file": "sentencepiece.bpe.model"} _a : Tuple = { "vocab_file": { "xlm-roberta-base": "https://huggingface.co/xlm-roberta-base/resolve/main/sentencepiece.bpe.model", "xlm-roberta-large": "https://huggingface.co/xlm-roberta-large/resolve/main/sentencepiece.bpe.model", "xlm-roberta-large-finetuned-conll02-dutch": ( "https://huggingface.co/xlm-roberta-large-finetuned-conll02-dutch/resolve/main/sentencepiece.bpe.model" ), "xlm-roberta-large-finetuned-conll02-spanish": ( "https://huggingface.co/xlm-roberta-large-finetuned-conll02-spanish/resolve/main/sentencepiece.bpe.model" ), "xlm-roberta-large-finetuned-conll03-english": ( "https://huggingface.co/xlm-roberta-large-finetuned-conll03-english/resolve/main/sentencepiece.bpe.model" ), "xlm-roberta-large-finetuned-conll03-german": ( "https://huggingface.co/xlm-roberta-large-finetuned-conll03-german/resolve/main/sentencepiece.bpe.model" ), } } _a : Optional[Any] = { "xlm-roberta-base": 512, "xlm-roberta-large": 512, "xlm-roberta-large-finetuned-conll02-dutch": 512, "xlm-roberta-large-finetuned-conll02-spanish": 512, "xlm-roberta-large-finetuned-conll03-english": 512, "xlm-roberta-large-finetuned-conll03-german": 512, } class __A (__magic_name__ ): snake_case :Union[str, Any] = VOCAB_FILES_NAMES snake_case :Any = PRETRAINED_VOCAB_FILES_MAP snake_case :Tuple = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES snake_case :Optional[int] = ["input_ids", "attention_mask"] def __init__( self , UpperCamelCase_ , UpperCamelCase_="<s>" , UpperCamelCase_="</s>" , UpperCamelCase_="</s>" , UpperCamelCase_="<s>" , UpperCamelCase_="<unk>" , UpperCamelCase_="<pad>" , UpperCamelCase_="<mask>" , UpperCamelCase_ = None , **UpperCamelCase_ , ): # Mask token behave like a normal word, i.e. include the space before it __UpperCAmelCase : Optional[int] = AddedToken(UpperCamelCase_ , lstrip=UpperCamelCase_ , rstrip=UpperCamelCase_ ) if isinstance(UpperCamelCase_ , UpperCamelCase_ ) else mask_token __UpperCAmelCase : int = {} if sp_model_kwargs is None else sp_model_kwargs super().__init__( bos_token=UpperCamelCase_ , eos_token=UpperCamelCase_ , unk_token=UpperCamelCase_ , sep_token=UpperCamelCase_ , cls_token=UpperCamelCase_ , pad_token=UpperCamelCase_ , mask_token=UpperCamelCase_ , sp_model_kwargs=self.sp_model_kwargs , **UpperCamelCase_ , ) __UpperCAmelCase : Optional[int] = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(str(UpperCamelCase_ ) ) __UpperCAmelCase : Union[str, Any] = vocab_file # Original fairseq vocab and spm vocab must be "aligned": # Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 # -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ---- # fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-' # spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a' # Mimic fairseq token-to-id alignment for the first 4 token __UpperCAmelCase : Optional[Any] = {"<s>": 0, "<pad>": 1, "</s>": 2, "<unk>": 3} # The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab __UpperCAmelCase : List[Any] = 1 __UpperCAmelCase : Optional[Any] = len(self.sp_model ) + self.fairseq_offset __UpperCAmelCase : str = {v: k for k, v in self.fairseq_tokens_to_ids.items()} def __getstate__( self ): __UpperCAmelCase : List[str] = self.__dict__.copy() __UpperCAmelCase : str = None __UpperCAmelCase : str = self.sp_model.serialized_model_proto() return state def __setstate__( self , UpperCamelCase_ ): __UpperCAmelCase : Union[str, Any] = d # for backward compatibility if not hasattr(self , "sp_model_kwargs" ): __UpperCAmelCase : Tuple = {} __UpperCAmelCase : Dict = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.LoadFromSerializedProto(self.sp_model_proto ) def _snake_case ( self , UpperCamelCase_ , UpperCamelCase_ = None ): if token_ids_a is None: return [self.cls_token_id] + token_ids_a + [self.sep_token_id] __UpperCAmelCase : List[Any] = [self.cls_token_id] __UpperCAmelCase : Union[str, Any] = [self.sep_token_id] return cls + token_ids_a + sep + sep + token_ids_a + sep def _snake_case ( self , UpperCamelCase_ , UpperCamelCase_ = None , UpperCamelCase_ = False ): if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=UpperCamelCase_ , token_ids_a=UpperCamelCase_ , already_has_special_tokens=UpperCamelCase_ ) if token_ids_a is None: return [1] + ([0] * len(UpperCamelCase_ )) + [1] return [1] + ([0] * len(UpperCamelCase_ )) + [1, 1] + ([0] * len(UpperCamelCase_ )) + [1] def _snake_case ( self , UpperCamelCase_ , UpperCamelCase_ = None ): __UpperCAmelCase : Dict = [self.sep_token_id] __UpperCAmelCase : List[Any] = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0] @property def _snake_case ( self ): return len(self.sp_model ) + self.fairseq_offset + 1 # Add the <mask> token def _snake_case ( self ): __UpperCAmelCase : Union[str, Any] = {self.convert_ids_to_tokens(UpperCamelCase_ ): i for i in range(self.vocab_size )} vocab.update(self.added_tokens_encoder ) return vocab def _snake_case ( self , UpperCamelCase_ ): return self.sp_model.encode(UpperCamelCase_ , out_type=UpperCamelCase_ ) def _snake_case ( self , UpperCamelCase_ ): if token in self.fairseq_tokens_to_ids: return self.fairseq_tokens_to_ids[token] __UpperCAmelCase : Optional[int] = self.sp_model.PieceToId(UpperCamelCase_ ) # Need to return unknown token if the SP model returned 0 return spm_id + self.fairseq_offset if spm_id else self.unk_token_id def _snake_case ( self , UpperCamelCase_ ): if index in self.fairseq_ids_to_tokens: return self.fairseq_ids_to_tokens[index] return self.sp_model.IdToPiece(index - self.fairseq_offset ) def _snake_case ( self , UpperCamelCase_ ): __UpperCAmelCase : Tuple = "".join(UpperCamelCase_ ).replace(UpperCamelCase_ , " " ).strip() return out_string def _snake_case ( self , UpperCamelCase_ , UpperCamelCase_ = None ): if not os.path.isdir(UpperCamelCase_ ): logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" ) return __UpperCAmelCase : List[str] = os.path.join( UpperCamelCase_ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(UpperCamelCase_ ) and os.path.isfile(self.vocab_file ): copyfile(self.vocab_file , UpperCamelCase_ ) elif not os.path.isfile(self.vocab_file ): with open(UpperCamelCase_ , "wb" ) as fi: __UpperCAmelCase : Optional[int] = self.sp_model.serialized_model_proto() fi.write(UpperCamelCase_ ) return (out_vocab_file,)
10
1
'''simple docstring''' from string import ascii_uppercase _a : List[str] = {str(ord(c) - 55): c for c in ascii_uppercase} def _lowercase ( lowerCamelCase__ , lowerCamelCase__ ) -> str: """simple docstring""" if isinstance(lowerCamelCase__ , lowerCamelCase__ ): raise TypeError("int() can't convert non-string with explicit base" ) if num < 0: raise ValueError("parameter must be positive int" ) if isinstance(lowerCamelCase__ , lowerCamelCase__ ): raise TypeError("'str' object cannot be interpreted as an integer" ) if isinstance(lowerCamelCase__ , lowerCamelCase__ ): raise TypeError("'float' object cannot be interpreted as an integer" ) if base in (0, 1): raise ValueError("base must be >= 2" ) if base > 36: raise ValueError("base must be <= 36" ) __UpperCAmelCase : Union[str, Any] = "" __UpperCAmelCase : List[str] = 0 __UpperCAmelCase : Any = 0 while div != 1: __UpperCAmelCase , __UpperCAmelCase : Union[str, Any] = divmod(lowerCamelCase__ , lowerCamelCase__ ) if base >= 11 and 9 < mod < 36: __UpperCAmelCase : Optional[Any] = ALPHABET_VALUES[str(lowerCamelCase__ )] else: __UpperCAmelCase : Union[str, Any] = str(lowerCamelCase__ ) new_value += actual_value __UpperCAmelCase : Union[str, Any] = num // base __UpperCAmelCase : Union[str, Any] = div if div == 0: return str(new_value[::-1] ) elif div == 1: new_value += str(lowerCamelCase__ ) return str(new_value[::-1] ) return new_value[::-1] if __name__ == "__main__": import doctest doctest.testmod() for base in range(2, 37): for num in range(1000): assert int(decimal_to_any(num, base), base) == num, ( num, base, decimal_to_any(num, base), int(decimal_to_any(num, base), base), )
10
'''simple docstring''' import time import unittest from transformers import is_torch_available from transformers.testing_utils import require_torch, torch_device from ..test_modeling_common import ids_tensor if is_torch_available(): import torch from transformers.generation import ( MaxLengthCriteria, MaxNewTokensCriteria, MaxTimeCriteria, StoppingCriteriaList, validate_stopping_criteria, ) @require_torch class __A (unittest.TestCase ): def _snake_case ( self , UpperCamelCase_ ): __UpperCAmelCase : List[str] = 3 __UpperCAmelCase : Tuple = 2_50 __UpperCAmelCase : str = ids_tensor((batch_size, length) , UpperCamelCase_ ) __UpperCAmelCase : Any = torch.ones((batch_size, length) , device=UpperCamelCase_ , dtype=torch.float ) / length return input_ids, scores def _snake_case ( self ): __UpperCAmelCase , __UpperCAmelCase : Tuple = self._get_tensors(5 ) __UpperCAmelCase : Tuple = StoppingCriteriaList( [ MaxLengthCriteria(max_length=10 ), MaxTimeCriteria(max_time=0.1 ), ] ) self.assertFalse(criteria(UpperCamelCase_ , UpperCamelCase_ ) ) __UpperCAmelCase , __UpperCAmelCase : int = self._get_tensors(9 ) self.assertFalse(criteria(UpperCamelCase_ , UpperCamelCase_ ) ) __UpperCAmelCase , __UpperCAmelCase : Optional[int] = self._get_tensors(10 ) self.assertTrue(criteria(UpperCamelCase_ , UpperCamelCase_ ) ) def _snake_case ( self ): __UpperCAmelCase : int = MaxLengthCriteria(max_length=10 ) __UpperCAmelCase , __UpperCAmelCase : Tuple = self._get_tensors(5 ) self.assertFalse(criteria(UpperCamelCase_ , UpperCamelCase_ ) ) __UpperCAmelCase , __UpperCAmelCase : Dict = self._get_tensors(9 ) self.assertFalse(criteria(UpperCamelCase_ , UpperCamelCase_ ) ) __UpperCAmelCase , __UpperCAmelCase : Optional[int] = self._get_tensors(10 ) self.assertTrue(criteria(UpperCamelCase_ , UpperCamelCase_ ) ) def _snake_case ( self ): __UpperCAmelCase : Optional[Any] = MaxNewTokensCriteria(start_length=5 , max_new_tokens=5 ) __UpperCAmelCase , __UpperCAmelCase : List[str] = self._get_tensors(5 ) self.assertFalse(criteria(UpperCamelCase_ , UpperCamelCase_ ) ) __UpperCAmelCase , __UpperCAmelCase : Dict = self._get_tensors(9 ) self.assertFalse(criteria(UpperCamelCase_ , UpperCamelCase_ ) ) __UpperCAmelCase , __UpperCAmelCase : Optional[Any] = self._get_tensors(10 ) self.assertTrue(criteria(UpperCamelCase_ , UpperCamelCase_ ) ) __UpperCAmelCase : Union[str, Any] = StoppingCriteriaList([criteria] ) self.assertEqual(criteria_list.max_length , 10 ) def _snake_case ( self ): __UpperCAmelCase , __UpperCAmelCase : Optional[Any] = self._get_tensors(5 ) __UpperCAmelCase : str = MaxTimeCriteria(max_time=0.1 ) self.assertFalse(criteria(UpperCamelCase_ , UpperCamelCase_ ) ) __UpperCAmelCase : str = MaxTimeCriteria(max_time=0.1 , initial_timestamp=time.time() - 0.2 ) self.assertTrue(criteria(UpperCamelCase_ , UpperCamelCase_ ) ) def _snake_case ( self ): validate_stopping_criteria(StoppingCriteriaList([MaxLengthCriteria(10 )] ) , 10 ) with self.assertWarns(UpperCamelCase_ ): validate_stopping_criteria(StoppingCriteriaList([MaxLengthCriteria(10 )] ) , 11 ) __UpperCAmelCase : Optional[int] = validate_stopping_criteria(StoppingCriteriaList() , 11 ) self.assertEqual(len(UpperCamelCase_ ) , 1 )
10
1
'''simple docstring''' from typing import Dict, List, Optional, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import ( center_crop, get_resize_output_image_size, normalize, rescale, resize, to_channel_dimension_format, ) from ...image_utils import ( IMAGENET_STANDARD_MEAN, IMAGENET_STANDARD_STD, ChannelDimension, ImageInput, PILImageResampling, make_list_of_images, to_numpy_array, valid_images, ) from ...utils import TensorType, logging _a : str = logging.get_logger(__name__) class __A (__magic_name__ ): snake_case :Optional[Any] = ["pixel_values"] def __init__( self , UpperCamelCase_ = True , UpperCamelCase_ = None , UpperCamelCase_ = PILImageResampling.BILINEAR , UpperCamelCase_ = True , UpperCamelCase_ = None , UpperCamelCase_ = True , UpperCamelCase_ = 1 / 2_55 , UpperCamelCase_ = True , UpperCamelCase_ = None , UpperCamelCase_ = None , **UpperCamelCase_ , ): super().__init__(**UpperCamelCase_ ) __UpperCAmelCase : List[str] = size if size is not None else {"shortest_edge": 2_56} __UpperCAmelCase : int = get_size_dict(UpperCamelCase_ , default_to_square=UpperCamelCase_ ) __UpperCAmelCase : Any = crop_size if crop_size is not None else {"height": 2_24, "width": 2_24} __UpperCAmelCase : str = get_size_dict(UpperCamelCase_ ) __UpperCAmelCase : Union[str, Any] = do_resize __UpperCAmelCase : Dict = size __UpperCAmelCase : Dict = resample __UpperCAmelCase : str = do_center_crop __UpperCAmelCase : Union[str, Any] = crop_size __UpperCAmelCase : List[str] = do_rescale __UpperCAmelCase : List[Any] = rescale_factor __UpperCAmelCase : List[Any] = do_normalize __UpperCAmelCase : str = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN __UpperCAmelCase : int = image_std if image_std is not None else IMAGENET_STANDARD_STD def _snake_case ( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ = PILImageResampling.BICUBIC , UpperCamelCase_ = None , **UpperCamelCase_ , ): __UpperCAmelCase : List[str] = get_size_dict(UpperCamelCase_ , default_to_square=UpperCamelCase_ ) if "shortest_edge" not in size: raise ValueError(f"""The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}""" ) __UpperCAmelCase : Optional[int] = get_resize_output_image_size(UpperCamelCase_ , size=size["shortest_edge"] , default_to_square=UpperCamelCase_ ) return resize(UpperCamelCase_ , size=UpperCamelCase_ , resample=UpperCamelCase_ , data_format=UpperCamelCase_ , **UpperCamelCase_ ) def _snake_case ( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ = None , **UpperCamelCase_ , ): __UpperCAmelCase : List[str] = get_size_dict(UpperCamelCase_ ) return center_crop(UpperCamelCase_ , size=(size["height"], size["width"]) , data_format=UpperCamelCase_ , **UpperCamelCase_ ) def _snake_case ( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ = None , **UpperCamelCase_ ): return rescale(UpperCamelCase_ , scale=UpperCamelCase_ , data_format=UpperCamelCase_ , **UpperCamelCase_ ) def _snake_case ( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ = None , **UpperCamelCase_ , ): return normalize(UpperCamelCase_ , mean=UpperCamelCase_ , std=UpperCamelCase_ , data_format=UpperCamelCase_ , **UpperCamelCase_ ) def _snake_case ( self , UpperCamelCase_ , UpperCamelCase_ = None , UpperCamelCase_ = None , UpperCamelCase_ = None , UpperCamelCase_ = None , UpperCamelCase_ = None , UpperCamelCase_ = None , UpperCamelCase_ = None , UpperCamelCase_ = None , UpperCamelCase_ = None , UpperCamelCase_ = None , UpperCamelCase_ = None , UpperCamelCase_ = ChannelDimension.FIRST , **UpperCamelCase_ , ): __UpperCAmelCase : List[Any] = do_resize if do_resize is not None else self.do_resize __UpperCAmelCase : Union[str, Any] = size if size is not None else self.size __UpperCAmelCase : Optional[int] = get_size_dict(UpperCamelCase_ , default_to_square=UpperCamelCase_ ) __UpperCAmelCase : Tuple = resample if resample is not None else self.resample __UpperCAmelCase : List[Any] = do_center_crop if do_center_crop is not None else self.do_center_crop __UpperCAmelCase : Dict = crop_size if crop_size is not None else self.crop_size __UpperCAmelCase : List[Any] = get_size_dict(UpperCamelCase_ ) __UpperCAmelCase : int = do_rescale if do_rescale is not None else self.do_rescale __UpperCAmelCase : Union[str, Any] = rescale_factor if rescale_factor is not None else self.rescale_factor __UpperCAmelCase : Dict = do_normalize if do_normalize is not None else self.do_normalize __UpperCAmelCase : List[Any] = image_mean if image_mean is not None else self.image_mean __UpperCAmelCase : Union[str, Any] = image_std if image_std is not None else self.image_std __UpperCAmelCase : Optional[Any] = make_list_of_images(UpperCamelCase_ ) if not valid_images(UpperCamelCase_ ): raise ValueError( "Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, " "torch.Tensor, tf.Tensor or jax.ndarray." ) if do_resize and size is None: raise ValueError("Size must be specified if do_resize is True." ) if do_center_crop and crop_size is None: raise ValueError("Crop size must be specified if do_center_crop is True." ) if do_rescale and rescale_factor is None: raise ValueError("Rescale factor must be specified if do_rescale is True." ) if do_normalize and (image_mean is None or image_std is None): raise ValueError("Image mean and std must be specified if do_normalize is True." ) # All transformations expect numpy arrays. __UpperCAmelCase : List[Any] = [to_numpy_array(UpperCamelCase_ ) for image in images] if do_resize: __UpperCAmelCase : Tuple = [self.resize(image=UpperCamelCase_ , size=UpperCamelCase_ , resample=UpperCamelCase_ ) for image in images] if do_center_crop: __UpperCAmelCase : Optional[int] = [self.center_crop(image=UpperCamelCase_ , size=UpperCamelCase_ ) for image in images] if do_rescale: __UpperCAmelCase : Optional[Any] = [self.rescale(image=UpperCamelCase_ , scale=UpperCamelCase_ ) for image in images] if do_normalize: __UpperCAmelCase : Optional[Any] = [self.normalize(image=UpperCamelCase_ , mean=UpperCamelCase_ , std=UpperCamelCase_ ) for image in images] __UpperCAmelCase : List[Any] = [to_channel_dimension_format(UpperCamelCase_ , UpperCamelCase_ ) for image in images] __UpperCAmelCase : Optional[Any] = {"pixel_values": images} return BatchFeature(data=UpperCamelCase_ , tensor_type=UpperCamelCase_ )
10
'''simple docstring''' import json import re from typing import TYPE_CHECKING, List, Optional, Tuple, Union import numpy as np from ...utils import is_tf_available, is_torch_available, logging if TYPE_CHECKING: if is_torch_available(): import torch if is_tf_available(): import tensorflow as tf from tokenizers import pre_tokenizers from ...tokenization_utils_base import BatchEncoding from ...tokenization_utils_fast import PreTrainedTokenizerFast from .tokenization_codegen import CodeGenTokenizer _a : Union[str, Any] = logging.get_logger(__name__) _a : Any = {"vocab_file": "vocab.json", "merges_file": "merges.txt", "tokenizer_file": "tokenizer.json"} _a : Tuple = { "vocab_file": { "Salesforce/codegen-350M-mono": "https://huggingface.co/Salesforce/codegen-350M-mono/resolve/main/vocab.json", }, "merges_file": { "Salesforce/codegen-350M-mono": "https://huggingface.co/Salesforce/codegen-350M-mono/resolve/main/merges.txt", }, "tokenizer_file": { "Salesforce/codegen-350M-mono": ( "https://huggingface.co/Salesforce/codegen-350M-mono/resolve/main/tokenizer.json" ), }, } _a : Dict = { "Salesforce/codegen-350M-mono": 2048, } class __A (__magic_name__ ): snake_case :Optional[Any] = VOCAB_FILES_NAMES snake_case :str = PRETRAINED_VOCAB_FILES_MAP snake_case :Optional[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES snake_case :Tuple = ["input_ids", "attention_mask"] snake_case :Dict = CodeGenTokenizer def __init__( self , UpperCamelCase_=None , UpperCamelCase_=None , UpperCamelCase_=None , UpperCamelCase_="<|endoftext|>" , UpperCamelCase_="<|endoftext|>" , UpperCamelCase_="<|endoftext|>" , UpperCamelCase_=False , **UpperCamelCase_ , ): super().__init__( UpperCamelCase_ , UpperCamelCase_ , tokenizer_file=UpperCamelCase_ , unk_token=UpperCamelCase_ , bos_token=UpperCamelCase_ , eos_token=UpperCamelCase_ , add_prefix_space=UpperCamelCase_ , **UpperCamelCase_ , ) if kwargs.pop("add_bos_token" , UpperCamelCase_ ): __UpperCAmelCase : int = kwargs.pop("name_or_path" , "" ) raise ValueError( "Currenty GPT2's fast tokenizer does NOT support adding a BOS token." "Instead you should use GPT2's slow tokenizer class `CodeGenTokenizer` as follows: \n" f"""`CodeGenTokenizer.from_pretrained('{model_id}')`\nor\n""" f"""`AutoTokenizer.from_pretrained('{model_id}', use_fast=False)`\n""" "This issue will be fixed soon, see: https://github.com/huggingface/tokenizers/pull/1005." " so that the fast tokenizer works correctly." ) __UpperCAmelCase : Any = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() ) if pre_tok_state.get("add_prefix_space" , UpperCamelCase_ ) != add_prefix_space: __UpperCAmelCase : str = getattr(UpperCamelCase_ , pre_tok_state.pop("type" ) ) __UpperCAmelCase : Optional[int] = add_prefix_space __UpperCAmelCase : Tuple = pre_tok_class(**UpperCamelCase_ ) __UpperCAmelCase : Tuple = add_prefix_space def _snake_case ( self , *UpperCamelCase_ , **UpperCamelCase_ ): __UpperCAmelCase : Optional[Any] = kwargs.get("is_split_into_words" , UpperCamelCase_ ) assert self.add_prefix_space or not is_split_into_words, ( f"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """ "to use it with pretokenized inputs." ) return super()._batch_encode_plus(*UpperCamelCase_ , **UpperCamelCase_ ) def _snake_case ( self , *UpperCamelCase_ , **UpperCamelCase_ ): __UpperCAmelCase : Any = kwargs.get("is_split_into_words" , UpperCamelCase_ ) assert self.add_prefix_space or not is_split_into_words, ( f"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """ "to use it with pretokenized inputs." ) return super()._encode_plus(*UpperCamelCase_ , **UpperCamelCase_ ) def _snake_case ( self , UpperCamelCase_ , UpperCamelCase_ = None ): __UpperCAmelCase : int = self._tokenizer.model.save(UpperCamelCase_ , name=UpperCamelCase_ ) return tuple(UpperCamelCase_ ) def _snake_case ( self , UpperCamelCase_ , UpperCamelCase_ = False , UpperCamelCase_ = None , UpperCamelCase_ = None , **UpperCamelCase_ , ): __UpperCAmelCase : str = super().decode( token_ids=UpperCamelCase_ , skip_special_tokens=UpperCamelCase_ , clean_up_tokenization_spaces=UpperCamelCase_ , **UpperCamelCase_ , ) if truncate_before_pattern is not None and len(UpperCamelCase_ ) > 0: __UpperCAmelCase : Union[str, Any] = self.truncate(UpperCamelCase_ , UpperCamelCase_ ) return decoded_text def _snake_case ( self , UpperCamelCase_ , UpperCamelCase_ ): def find_re(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ): __UpperCAmelCase : Dict = pattern.search(UpperCamelCase_ , UpperCamelCase_ ) return m.start() if m else -1 __UpperCAmelCase : List[str] = [re.compile(UpperCamelCase_ , re.MULTILINE ) for pattern in truncate_before_pattern] __UpperCAmelCase : Optional[Any] = list(re.finditer("^print" , UpperCamelCase_ , re.MULTILINE ) ) if len(UpperCamelCase_ ) > 1: __UpperCAmelCase : List[Any] = completion[: prints[1].start()] __UpperCAmelCase : Tuple = list(re.finditer("^def" , UpperCamelCase_ , re.MULTILINE ) ) if len(UpperCamelCase_ ) > 1: __UpperCAmelCase : Union[str, Any] = completion[: defs[1].start()] __UpperCAmelCase : Dict = 0 __UpperCAmelCase : Dict = [ pos for pos in [find_re(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) for terminal in terminals] if pos != -1 ] if len(UpperCamelCase_ ) > 0: return completion[: min(UpperCamelCase_ )] else: return completion
10
1
'''simple docstring''' import argparse import os import torch from transformers import ( XLNetConfig, XLNetForQuestionAnswering, XLNetForSequenceClassification, XLNetLMHeadModel, load_tf_weights_in_xlnet, ) from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging _a : List[str] = { "cola": 2, "mnli": 3, "mrpc": 2, "sst-2": 2, "sts-b": 1, "qqp": 2, "qnli": 2, "rte": 2, "wnli": 2, } logging.set_verbosity_info() def _lowercase ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__=None ) -> Optional[int]: """simple docstring""" __UpperCAmelCase : Union[str, Any] = XLNetConfig.from_json_file(lowerCamelCase__ ) __UpperCAmelCase : Dict = finetuning_task.lower() if finetuning_task is not None else "" if finetuning_task in GLUE_TASKS_NUM_LABELS: print(f"""Building PyTorch XLNetForSequenceClassification model from configuration: {config}""" ) __UpperCAmelCase : int = finetuning_task __UpperCAmelCase : int = GLUE_TASKS_NUM_LABELS[finetuning_task] __UpperCAmelCase : List[str] = XLNetForSequenceClassification(lowerCamelCase__ ) elif "squad" in finetuning_task: __UpperCAmelCase : Optional[Any] = finetuning_task __UpperCAmelCase : Dict = XLNetForQuestionAnswering(lowerCamelCase__ ) else: __UpperCAmelCase : Tuple = XLNetLMHeadModel(lowerCamelCase__ ) # Load weights from tf checkpoint load_tf_weights_in_xlnet(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) # Save pytorch-model __UpperCAmelCase : int = os.path.join(lowerCamelCase__ , lowerCamelCase__ ) __UpperCAmelCase : List[str] = os.path.join(lowerCamelCase__ , lowerCamelCase__ ) print(f"""Save PyTorch model to {os.path.abspath(lowerCamelCase__ )}""" ) torch.save(model.state_dict() , lowerCamelCase__ ) print(f"""Save configuration file to {os.path.abspath(lowerCamelCase__ )}""" ) with open(lowerCamelCase__ , "w" , encoding="utf-8" ) as f: f.write(config.to_json_string() ) if __name__ == "__main__": _a : Any = argparse.ArgumentParser() # Required parameters parser.add_argument( "--tf_checkpoint_path", default=None, type=str, required=True, help="Path to the TensorFlow checkpoint path." ) parser.add_argument( "--xlnet_config_file", default=None, type=str, required=True, help=( "The config json file corresponding to the pre-trained XLNet model. \n" "This specifies the model architecture." ), ) parser.add_argument( "--pytorch_dump_folder_path", default=None, type=str, required=True, help="Path to the folder to store the PyTorch model or dataset/vocab.", ) parser.add_argument( "--finetuning_task", default=None, type=str, help="Name of a task on which the XLNet TensorFlow model was fine-tuned", ) _a : List[str] = parser.parse_args() print(args) convert_xlnet_checkpoint_to_pytorch( args.tf_checkpoint_path, args.xlnet_config_file, args.pytorch_dump_folder_path, args.finetuning_task )
10
'''simple docstring''' import json import os from functools import lru_cache from typing import List, Optional, Tuple import regex as re from ...tokenization_utils import AddedToken, PreTrainedTokenizer from ...utils import logging _a : Optional[Any] = logging.get_logger(__name__) _a : int = {"vocab_file": "vocab.json", "merges_file": "merges.txt"} # See all BART models at https://huggingface.co/models?filter=bart _a : Tuple = { "vocab_file": { "facebook/bart-base": "https://huggingface.co/facebook/bart-base/resolve/main/vocab.json", "facebook/bart-large": "https://huggingface.co/facebook/bart-large/resolve/main/vocab.json", "facebook/bart-large-mnli": "https://huggingface.co/facebook/bart-large-mnli/resolve/main/vocab.json", "facebook/bart-large-cnn": "https://huggingface.co/facebook/bart-large-cnn/resolve/main/vocab.json", "facebook/bart-large-xsum": "https://huggingface.co/facebook/bart-large-xsum/resolve/main/vocab.json", "yjernite/bart_eli5": "https://huggingface.co/yjernite/bart_eli5/resolve/main/vocab.json", }, "merges_file": { "facebook/bart-base": "https://huggingface.co/facebook/bart-base/resolve/main/merges.txt", "facebook/bart-large": "https://huggingface.co/facebook/bart-large/resolve/main/merges.txt", "facebook/bart-large-mnli": "https://huggingface.co/facebook/bart-large-mnli/resolve/main/merges.txt", "facebook/bart-large-cnn": "https://huggingface.co/facebook/bart-large-cnn/resolve/main/merges.txt", "facebook/bart-large-xsum": "https://huggingface.co/facebook/bart-large-xsum/resolve/main/merges.txt", "yjernite/bart_eli5": "https://huggingface.co/yjernite/bart_eli5/resolve/main/merges.txt", }, } _a : List[Any] = { "facebook/bart-base": 1024, "facebook/bart-large": 1024, "facebook/bart-large-mnli": 1024, "facebook/bart-large-cnn": 1024, "facebook/bart-large-xsum": 1024, "yjernite/bart_eli5": 1024, } @lru_cache() def _lowercase ( ) -> List[Any]: """simple docstring""" __UpperCAmelCase : Dict = ( list(range(ord("!" ) , ord("~" ) + 1 ) ) + list(range(ord("¡" ) , ord("¬" ) + 1 ) ) + list(range(ord("®" ) , ord("ÿ" ) + 1 ) ) ) __UpperCAmelCase : Optional[Any] = bs[:] __UpperCAmelCase : Optional[int] = 0 for b in range(2**8 ): if b not in bs: bs.append(lowerCamelCase__ ) cs.append(2**8 + n ) n += 1 __UpperCAmelCase : Dict = [chr(lowerCamelCase__ ) for n in cs] return dict(zip(lowerCamelCase__ , lowerCamelCase__ ) ) def _lowercase ( lowerCamelCase__ ) -> str: """simple docstring""" __UpperCAmelCase : Dict = set() __UpperCAmelCase : Union[str, Any] = word[0] for char in word[1:]: pairs.add((prev_char, char) ) __UpperCAmelCase : Optional[Any] = char return pairs class __A (__magic_name__ ): snake_case :Optional[int] = VOCAB_FILES_NAMES snake_case :List[Any] = PRETRAINED_VOCAB_FILES_MAP snake_case :Any = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES snake_case :Optional[int] = ["input_ids", "attention_mask"] def __init__( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_="replace" , UpperCamelCase_="<s>" , UpperCamelCase_="</s>" , UpperCamelCase_="</s>" , UpperCamelCase_="<s>" , UpperCamelCase_="<unk>" , UpperCamelCase_="<pad>" , UpperCamelCase_="<mask>" , UpperCamelCase_=False , **UpperCamelCase_ , ): __UpperCAmelCase : str = AddedToken(UpperCamelCase_ , lstrip=UpperCamelCase_ , rstrip=UpperCamelCase_ ) if isinstance(UpperCamelCase_ , UpperCamelCase_ ) else bos_token __UpperCAmelCase : List[str] = AddedToken(UpperCamelCase_ , lstrip=UpperCamelCase_ , rstrip=UpperCamelCase_ ) if isinstance(UpperCamelCase_ , UpperCamelCase_ ) else eos_token __UpperCAmelCase : Optional[int] = AddedToken(UpperCamelCase_ , lstrip=UpperCamelCase_ , rstrip=UpperCamelCase_ ) if isinstance(UpperCamelCase_ , UpperCamelCase_ ) else sep_token __UpperCAmelCase : int = AddedToken(UpperCamelCase_ , lstrip=UpperCamelCase_ , rstrip=UpperCamelCase_ ) if isinstance(UpperCamelCase_ , UpperCamelCase_ ) else cls_token __UpperCAmelCase : Optional[int] = AddedToken(UpperCamelCase_ , lstrip=UpperCamelCase_ , rstrip=UpperCamelCase_ ) if isinstance(UpperCamelCase_ , UpperCamelCase_ ) else unk_token __UpperCAmelCase : Dict = AddedToken(UpperCamelCase_ , lstrip=UpperCamelCase_ , rstrip=UpperCamelCase_ ) if isinstance(UpperCamelCase_ , UpperCamelCase_ ) else pad_token # Mask token behave like a normal word, i.e. include the space before it __UpperCAmelCase : Union[str, Any] = AddedToken(UpperCamelCase_ , lstrip=UpperCamelCase_ , rstrip=UpperCamelCase_ ) if isinstance(UpperCamelCase_ , UpperCamelCase_ ) else mask_token super().__init__( errors=UpperCamelCase_ , bos_token=UpperCamelCase_ , eos_token=UpperCamelCase_ , unk_token=UpperCamelCase_ , sep_token=UpperCamelCase_ , cls_token=UpperCamelCase_ , pad_token=UpperCamelCase_ , mask_token=UpperCamelCase_ , add_prefix_space=UpperCamelCase_ , **UpperCamelCase_ , ) with open(UpperCamelCase_ , encoding="utf-8" ) as vocab_handle: __UpperCAmelCase : int = json.load(UpperCamelCase_ ) __UpperCAmelCase : Any = {v: k for k, v in self.encoder.items()} __UpperCAmelCase : Any = errors # how to handle errors in decoding __UpperCAmelCase : str = bytes_to_unicode() __UpperCAmelCase : List[str] = {v: k for k, v in self.byte_encoder.items()} with open(UpperCamelCase_ , encoding="utf-8" ) as merges_handle: __UpperCAmelCase : str = merges_handle.read().split("\n" )[1:-1] __UpperCAmelCase : List[str] = [tuple(merge.split() ) for merge in bpe_merges] __UpperCAmelCase : Union[str, Any] = dict(zip(UpperCamelCase_ , range(len(UpperCamelCase_ ) ) ) ) __UpperCAmelCase : Optional[int] = {} __UpperCAmelCase : Optional[int] = add_prefix_space # Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions __UpperCAmelCase : Dict = re.compile(r"'s|'t|'re|'ve|'m|'ll|'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+" ) @property def _snake_case ( self ): return len(self.encoder ) def _snake_case ( self ): return dict(self.encoder , **self.added_tokens_encoder ) def _snake_case ( self , UpperCamelCase_ ): if token in self.cache: return self.cache[token] __UpperCAmelCase : List[str] = tuple(UpperCamelCase_ ) __UpperCAmelCase : str = get_pairs(UpperCamelCase_ ) if not pairs: return token while True: __UpperCAmelCase : str = min(UpperCamelCase_ , key=lambda UpperCamelCase_ : self.bpe_ranks.get(UpperCamelCase_ , float("inf" ) ) ) if bigram not in self.bpe_ranks: break __UpperCAmelCase , __UpperCAmelCase : List[Any] = bigram __UpperCAmelCase : Any = [] __UpperCAmelCase : List[str] = 0 while i < len(UpperCamelCase_ ): try: __UpperCAmelCase : Union[str, Any] = word.index(UpperCamelCase_ , UpperCamelCase_ ) except ValueError: new_word.extend(word[i:] ) break else: new_word.extend(word[i:j] ) __UpperCAmelCase : str = j if word[i] == first and i < len(UpperCamelCase_ ) - 1 and word[i + 1] == second: new_word.append(first + second ) i += 2 else: new_word.append(word[i] ) i += 1 __UpperCAmelCase : Dict = tuple(UpperCamelCase_ ) __UpperCAmelCase : str = new_word if len(UpperCamelCase_ ) == 1: break else: __UpperCAmelCase : int = get_pairs(UpperCamelCase_ ) __UpperCAmelCase : Optional[int] = " ".join(UpperCamelCase_ ) __UpperCAmelCase : Dict = word return word def _snake_case ( self , UpperCamelCase_ ): __UpperCAmelCase : Optional[Any] = [] for token in re.findall(self.pat , UpperCamelCase_ ): __UpperCAmelCase : Any = "".join( self.byte_encoder[b] for b in token.encode("utf-8" ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case) bpe_tokens.extend(bpe_token for bpe_token in self.bpe(UpperCamelCase_ ).split(" " ) ) return bpe_tokens def _snake_case ( self , UpperCamelCase_ ): return self.encoder.get(UpperCamelCase_ , self.encoder.get(self.unk_token ) ) def _snake_case ( self , UpperCamelCase_ ): return self.decoder.get(UpperCamelCase_ ) def _snake_case ( self , UpperCamelCase_ ): __UpperCAmelCase : List[str] = "".join(UpperCamelCase_ ) __UpperCAmelCase : Union[str, Any] = bytearray([self.byte_decoder[c] for c in text] ).decode("utf-8" , errors=self.errors ) return text def _snake_case ( self , UpperCamelCase_ , UpperCamelCase_ = None ): if not os.path.isdir(UpperCamelCase_ ): logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" ) return __UpperCAmelCase : Any = os.path.join( UpperCamelCase_ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] ) __UpperCAmelCase : Optional[int] = os.path.join( UpperCamelCase_ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["merges_file"] ) with open(UpperCamelCase_ , "w" , encoding="utf-8" ) as f: f.write(json.dumps(self.encoder , indent=2 , sort_keys=UpperCamelCase_ , ensure_ascii=UpperCamelCase_ ) + "\n" ) __UpperCAmelCase : str = 0 with open(UpperCamelCase_ , "w" , encoding="utf-8" ) as writer: writer.write("#version: 0.2\n" ) for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda UpperCamelCase_ : kv[1] ): if index != token_index: logger.warning( f"""Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.""" " Please check that the tokenizer is not corrupted!" ) __UpperCAmelCase : str = token_index writer.write(" ".join(UpperCamelCase_ ) + "\n" ) index += 1 return vocab_file, merge_file def _snake_case ( self , UpperCamelCase_ , UpperCamelCase_ = None ): if token_ids_a is None: return [self.cls_token_id] + token_ids_a + [self.sep_token_id] __UpperCAmelCase : List[Any] = [self.cls_token_id] __UpperCAmelCase : Tuple = [self.sep_token_id] return cls + token_ids_a + sep + sep + token_ids_a + sep def _snake_case ( self , UpperCamelCase_ , UpperCamelCase_ = None , UpperCamelCase_ = False ): if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=UpperCamelCase_ , token_ids_a=UpperCamelCase_ , already_has_special_tokens=UpperCamelCase_ ) if token_ids_a is None: return [1] + ([0] * len(UpperCamelCase_ )) + [1] return [1] + ([0] * len(UpperCamelCase_ )) + [1, 1] + ([0] * len(UpperCamelCase_ )) + [1] def _snake_case ( self , UpperCamelCase_ , UpperCamelCase_ = None ): __UpperCAmelCase : int = [self.sep_token_id] __UpperCAmelCase : List[str] = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0] def _snake_case ( self , UpperCamelCase_ , UpperCamelCase_=False , **UpperCamelCase_ ): __UpperCAmelCase : List[str] = kwargs.pop("add_prefix_space" , self.add_prefix_space ) if (is_split_into_words or add_prefix_space) and (len(UpperCamelCase_ ) > 0 and not text[0].isspace()): __UpperCAmelCase : Tuple = " " + text return (text, kwargs)
10
1
'''simple docstring''' import json import pathlib import unittest import numpy as np from transformers.testing_utils import require_torch, require_vision, slow from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import DeformableDetrImageProcessor class __A (unittest.TestCase ): def __init__( self , UpperCamelCase_ , UpperCamelCase_=7 , UpperCamelCase_=3 , UpperCamelCase_=30 , UpperCamelCase_=4_00 , UpperCamelCase_=True , UpperCamelCase_=None , UpperCamelCase_=True , UpperCamelCase_=[0.5, 0.5, 0.5] , UpperCamelCase_=[0.5, 0.5, 0.5] , UpperCamelCase_=True , UpperCamelCase_=1 / 2_55 , UpperCamelCase_=True , ): # by setting size["longest_edge"] > max_resolution we're effectively not testing this :p __UpperCAmelCase : Optional[Any] = size if size is not None else {"shortest_edge": 18, "longest_edge": 13_33} __UpperCAmelCase : Optional[Any] = parent __UpperCAmelCase : Tuple = batch_size __UpperCAmelCase : List[Any] = num_channels __UpperCAmelCase : Union[str, Any] = min_resolution __UpperCAmelCase : str = max_resolution __UpperCAmelCase : Optional[int] = do_resize __UpperCAmelCase : Any = size __UpperCAmelCase : str = do_normalize __UpperCAmelCase : Tuple = image_mean __UpperCAmelCase : Optional[int] = image_std __UpperCAmelCase : Any = do_rescale __UpperCAmelCase : Union[str, Any] = rescale_factor __UpperCAmelCase : Tuple = do_pad def _snake_case ( self ): return { "do_resize": self.do_resize, "size": self.size, "do_normalize": self.do_normalize, "image_mean": self.image_mean, "image_std": self.image_std, "do_rescale": self.do_rescale, "rescale_factor": self.rescale_factor, "do_pad": self.do_pad, } def _snake_case ( self , UpperCamelCase_ , UpperCamelCase_=False ): if not batched: __UpperCAmelCase : Any = image_inputs[0] if isinstance(UpperCamelCase_ , Image.Image ): __UpperCAmelCase , __UpperCAmelCase : Optional[Any] = image.size else: __UpperCAmelCase , __UpperCAmelCase : Optional[Any] = image.shape[1], image.shape[2] if w < h: __UpperCAmelCase : Dict = int(self.size["shortest_edge"] * h / w ) __UpperCAmelCase : Union[str, Any] = self.size["shortest_edge"] elif w > h: __UpperCAmelCase : Dict = self.size["shortest_edge"] __UpperCAmelCase : Any = int(self.size["shortest_edge"] * w / h ) else: __UpperCAmelCase : List[Any] = self.size["shortest_edge"] __UpperCAmelCase : Tuple = self.size["shortest_edge"] else: __UpperCAmelCase : Optional[int] = [] for image in image_inputs: __UpperCAmelCase , __UpperCAmelCase : Optional[Any] = self.get_expected_values([image] ) expected_values.append((expected_height, expected_width) ) __UpperCAmelCase : Optional[Any] = max(UpperCamelCase_ , key=lambda UpperCamelCase_ : item[0] )[0] __UpperCAmelCase : int = max(UpperCamelCase_ , key=lambda UpperCamelCase_ : item[1] )[1] return expected_height, expected_width @require_torch @require_vision class __A (__magic_name__ , unittest.TestCase ): snake_case :List[Any] = DeformableDetrImageProcessor if is_vision_available() else None def _snake_case ( self ): __UpperCAmelCase : Optional[Any] = DeformableDetrImageProcessingTester(self ) @property def _snake_case ( self ): return self.image_processor_tester.prepare_image_processor_dict() def _snake_case ( self ): __UpperCAmelCase : Optional[Any] = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(UpperCamelCase_ , "image_mean" ) ) self.assertTrue(hasattr(UpperCamelCase_ , "image_std" ) ) self.assertTrue(hasattr(UpperCamelCase_ , "do_normalize" ) ) self.assertTrue(hasattr(UpperCamelCase_ , "do_resize" ) ) self.assertTrue(hasattr(UpperCamelCase_ , "do_rescale" ) ) self.assertTrue(hasattr(UpperCamelCase_ , "do_pad" ) ) self.assertTrue(hasattr(UpperCamelCase_ , "size" ) ) def _snake_case ( self ): __UpperCAmelCase : int = self.image_processing_class.from_dict(self.image_processor_dict ) self.assertEqual(image_processor.size , {"shortest_edge": 18, "longest_edge": 13_33} ) self.assertEqual(image_processor.do_pad , UpperCamelCase_ ) __UpperCAmelCase : Optional[Any] = self.image_processing_class.from_dict( self.image_processor_dict , size=42 , max_size=84 , pad_and_return_pixel_mask=UpperCamelCase_ ) self.assertEqual(image_processor.size , {"shortest_edge": 42, "longest_edge": 84} ) self.assertEqual(image_processor.do_pad , UpperCamelCase_ ) def _snake_case ( self ): pass def _snake_case ( self ): # Initialize image_processing __UpperCAmelCase : Optional[int] = self.image_processing_class(**self.image_processor_dict ) # create random PIL images __UpperCAmelCase : Dict = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCamelCase_ ) for image in image_inputs: self.assertIsInstance(UpperCamelCase_ , Image.Image ) # Test not batched input __UpperCAmelCase : Dict = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values __UpperCAmelCase , __UpperCAmelCase : List[str] = self.image_processor_tester.get_expected_values(UpperCamelCase_ ) self.assertEqual( encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , ) # Test batched __UpperCAmelCase , __UpperCAmelCase : Optional[Any] = self.image_processor_tester.get_expected_values(UpperCamelCase_ , batched=UpperCamelCase_ ) __UpperCAmelCase : Union[str, Any] = image_processing(UpperCamelCase_ , return_tensors="pt" ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, expected_height, expected_width, ) , ) def _snake_case ( self ): # Initialize image_processing __UpperCAmelCase : int = self.image_processing_class(**self.image_processor_dict ) # create random numpy tensors __UpperCAmelCase : int = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCamelCase_ , numpify=UpperCamelCase_ ) for image in image_inputs: self.assertIsInstance(UpperCamelCase_ , np.ndarray ) # Test not batched input __UpperCAmelCase : List[Any] = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values __UpperCAmelCase , __UpperCAmelCase : Union[str, Any] = self.image_processor_tester.get_expected_values(UpperCamelCase_ ) self.assertEqual( encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , ) # Test batched __UpperCAmelCase : List[Any] = image_processing(UpperCamelCase_ , return_tensors="pt" ).pixel_values __UpperCAmelCase , __UpperCAmelCase : Any = self.image_processor_tester.get_expected_values(UpperCamelCase_ , batched=UpperCamelCase_ ) self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, expected_height, expected_width, ) , ) def _snake_case ( self ): # Initialize image_processing __UpperCAmelCase : Tuple = self.image_processing_class(**self.image_processor_dict ) # create random PyTorch tensors __UpperCAmelCase : Union[str, Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCamelCase_ , torchify=UpperCamelCase_ ) for image in image_inputs: self.assertIsInstance(UpperCamelCase_ , torch.Tensor ) # Test not batched input __UpperCAmelCase : Optional[Any] = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values __UpperCAmelCase , __UpperCAmelCase : List[Any] = self.image_processor_tester.get_expected_values(UpperCamelCase_ ) self.assertEqual( encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , ) # Test batched __UpperCAmelCase : Union[str, Any] = image_processing(UpperCamelCase_ , return_tensors="pt" ).pixel_values __UpperCAmelCase , __UpperCAmelCase : Dict = self.image_processor_tester.get_expected_values(UpperCamelCase_ , batched=UpperCamelCase_ ) self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, expected_height, expected_width, ) , ) @slow def _snake_case ( self ): # prepare image and target __UpperCAmelCase : Union[str, Any] = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" ) with open("./tests/fixtures/tests_samples/COCO/coco_annotations.txt" , "r" ) as f: __UpperCAmelCase : Union[str, Any] = json.loads(f.read() ) __UpperCAmelCase : Optional[Any] = {"image_id": 3_97_69, "annotations": target} # encode them __UpperCAmelCase : List[str] = DeformableDetrImageProcessor() __UpperCAmelCase : Union[str, Any] = image_processing(images=UpperCamelCase_ , annotations=UpperCamelCase_ , return_tensors="pt" ) # verify pixel values __UpperCAmelCase : List[Any] = torch.Size([1, 3, 8_00, 10_66] ) self.assertEqual(encoding["pixel_values"].shape , UpperCamelCase_ ) __UpperCAmelCase : List[Any] = torch.tensor([0.2_7_9_6, 0.3_1_3_8, 0.3_4_8_1] ) self.assertTrue(torch.allclose(encoding["pixel_values"][0, 0, 0, :3] , UpperCamelCase_ , atol=1E-4 ) ) # verify area __UpperCAmelCase : List[Any] = torch.tensor([5_8_8_7.9_6_0_0, 1_1_2_5_0.2_0_6_1, 4_8_9_3_5_3.8_4_3_8, 8_3_7_1_2_2.7_5_0_0, 1_4_7_9_6_7.5_1_5_6, 1_6_5_7_3_2.3_4_3_8] ) self.assertTrue(torch.allclose(encoding["labels"][0]["area"] , UpperCamelCase_ ) ) # verify boxes __UpperCAmelCase : Optional[int] = torch.Size([6, 4] ) self.assertEqual(encoding["labels"][0]["boxes"].shape , UpperCamelCase_ ) __UpperCAmelCase : str = torch.tensor([0.5_5_0_3, 0.2_7_6_5, 0.0_6_0_4, 0.2_2_1_5] ) self.assertTrue(torch.allclose(encoding["labels"][0]["boxes"][0] , UpperCamelCase_ , atol=1E-3 ) ) # verify image_id __UpperCAmelCase : str = torch.tensor([3_97_69] ) self.assertTrue(torch.allclose(encoding["labels"][0]["image_id"] , UpperCamelCase_ ) ) # verify is_crowd __UpperCAmelCase : Tuple = torch.tensor([0, 0, 0, 0, 0, 0] ) self.assertTrue(torch.allclose(encoding["labels"][0]["iscrowd"] , UpperCamelCase_ ) ) # verify class_labels __UpperCAmelCase : Any = torch.tensor([75, 75, 63, 65, 17, 17] ) self.assertTrue(torch.allclose(encoding["labels"][0]["class_labels"] , UpperCamelCase_ ) ) # verify orig_size __UpperCAmelCase : Union[str, Any] = torch.tensor([4_80, 6_40] ) self.assertTrue(torch.allclose(encoding["labels"][0]["orig_size"] , UpperCamelCase_ ) ) # verify size __UpperCAmelCase : List[str] = torch.tensor([8_00, 10_66] ) self.assertTrue(torch.allclose(encoding["labels"][0]["size"] , UpperCamelCase_ ) ) @slow def _snake_case ( self ): # prepare image, target and masks_path __UpperCAmelCase : Dict = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" ) with open("./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt" , "r" ) as f: __UpperCAmelCase : Optional[Any] = json.loads(f.read() ) __UpperCAmelCase : Optional[int] = {"file_name": "000000039769.png", "image_id": 3_97_69, "segments_info": target} __UpperCAmelCase : Tuple = pathlib.Path("./tests/fixtures/tests_samples/COCO/coco_panoptic" ) # encode them __UpperCAmelCase : int = DeformableDetrImageProcessor(format="coco_panoptic" ) __UpperCAmelCase : Optional[int] = image_processing(images=UpperCamelCase_ , annotations=UpperCamelCase_ , masks_path=UpperCamelCase_ , return_tensors="pt" ) # verify pixel values __UpperCAmelCase : str = torch.Size([1, 3, 8_00, 10_66] ) self.assertEqual(encoding["pixel_values"].shape , UpperCamelCase_ ) __UpperCAmelCase : int = torch.tensor([0.2_7_9_6, 0.3_1_3_8, 0.3_4_8_1] ) self.assertTrue(torch.allclose(encoding["pixel_values"][0, 0, 0, :3] , UpperCamelCase_ , atol=1E-4 ) ) # verify area __UpperCAmelCase : str = torch.tensor([1_4_7_9_7_9.6_8_7_5, 1_6_5_5_2_7.0_4_6_9, 4_8_4_6_3_8.5_9_3_8, 1_1_2_9_2.9_3_7_5, 5_8_7_9.6_5_6_2, 7_6_3_4.1_1_4_7] ) self.assertTrue(torch.allclose(encoding["labels"][0]["area"] , UpperCamelCase_ ) ) # verify boxes __UpperCAmelCase : Any = torch.Size([6, 4] ) self.assertEqual(encoding["labels"][0]["boxes"].shape , UpperCamelCase_ ) __UpperCAmelCase : Tuple = torch.tensor([0.2_6_2_5, 0.5_4_3_7, 0.4_6_8_8, 0.8_6_2_5] ) self.assertTrue(torch.allclose(encoding["labels"][0]["boxes"][0] , UpperCamelCase_ , atol=1E-3 ) ) # verify image_id __UpperCAmelCase : List[str] = torch.tensor([3_97_69] ) self.assertTrue(torch.allclose(encoding["labels"][0]["image_id"] , UpperCamelCase_ ) ) # verify is_crowd __UpperCAmelCase : Union[str, Any] = torch.tensor([0, 0, 0, 0, 0, 0] ) self.assertTrue(torch.allclose(encoding["labels"][0]["iscrowd"] , UpperCamelCase_ ) ) # verify class_labels __UpperCAmelCase : Any = torch.tensor([17, 17, 63, 75, 75, 93] ) self.assertTrue(torch.allclose(encoding["labels"][0]["class_labels"] , UpperCamelCase_ ) ) # verify masks __UpperCAmelCase : str = 82_28_73 self.assertEqual(encoding["labels"][0]["masks"].sum().item() , UpperCamelCase_ ) # verify orig_size __UpperCAmelCase : str = torch.tensor([4_80, 6_40] ) self.assertTrue(torch.allclose(encoding["labels"][0]["orig_size"] , UpperCamelCase_ ) ) # verify size __UpperCAmelCase : int = torch.tensor([8_00, 10_66] ) self.assertTrue(torch.allclose(encoding["labels"][0]["size"] , UpperCamelCase_ ) )
10
'''simple docstring''' from ...configuration_utils import PretrainedConfig from ...utils import logging _a : Any = logging.get_logger(__name__) _a : int = { "facebook/s2t-wav2vec2-large-en-de": ( "https://huggingface.co/facebook/s2t-wav2vec2-large-en-de/resolve/main/config.json" ), # See all Speech2Text models at https://huggingface.co/models?filter=speech2text2 } class __A (__magic_name__ ): snake_case :Optional[int] = "speech_to_text_2" snake_case :List[Any] = ["past_key_values"] snake_case :str = {"num_attention_heads": "decoder_attention_heads", "hidden_size": "d_model"} def __init__( self , UpperCamelCase_=1_00_00 , UpperCamelCase_=6 , UpperCamelCase_=20_48 , UpperCamelCase_=4 , UpperCamelCase_=0.0 , UpperCamelCase_=True , UpperCamelCase_="relu" , UpperCamelCase_=2_56 , UpperCamelCase_=0.1 , UpperCamelCase_=0.0 , UpperCamelCase_=0.0 , UpperCamelCase_=0.0_2 , UpperCamelCase_=2 , UpperCamelCase_=True , UpperCamelCase_=1 , UpperCamelCase_=0 , UpperCamelCase_=2 , UpperCamelCase_=10_24 , **UpperCamelCase_ , ): __UpperCAmelCase : Any = vocab_size __UpperCAmelCase : Optional[int] = d_model __UpperCAmelCase : Tuple = decoder_ffn_dim __UpperCAmelCase : List[str] = decoder_layers __UpperCAmelCase : str = decoder_attention_heads __UpperCAmelCase : Dict = dropout __UpperCAmelCase : Optional[Any] = attention_dropout __UpperCAmelCase : int = activation_dropout __UpperCAmelCase : Dict = activation_function __UpperCAmelCase : Tuple = init_std __UpperCAmelCase : Any = decoder_layerdrop __UpperCAmelCase : str = use_cache __UpperCAmelCase : int = decoder_layers __UpperCAmelCase : Any = scale_embedding # scale factor will be sqrt(d_model) if True __UpperCAmelCase : Union[str, Any] = max_target_positions super().__init__( pad_token_id=UpperCamelCase_ , bos_token_id=UpperCamelCase_ , eos_token_id=UpperCamelCase_ , decoder_start_token_id=UpperCamelCase_ , **UpperCamelCase_ , )
10
1
'''simple docstring''' import argparse import json from collections import OrderedDict from functools import partial from pathlib import Path import timm import torch from huggingface_hub import hf_hub_download from transformers import LevitConfig, LevitForImageClassificationWithTeacher, LevitImageProcessor from transformers.utils import logging logging.set_verbosity_info() _a : List[str] = logging.get_logger() def _lowercase ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = True ) -> Optional[Any]: """simple docstring""" print(f"""Converting {name}...""" ) with torch.no_grad(): if hidden_sizes == 128: if name[-1] == "S": __UpperCAmelCase : int = timm.create_model("levit_128s" , pretrained=lowerCamelCase__ ) else: __UpperCAmelCase : List[str] = timm.create_model("levit_128" , pretrained=lowerCamelCase__ ) if hidden_sizes == 192: __UpperCAmelCase : Optional[int] = timm.create_model("levit_192" , pretrained=lowerCamelCase__ ) if hidden_sizes == 256: __UpperCAmelCase : Tuple = timm.create_model("levit_256" , pretrained=lowerCamelCase__ ) if hidden_sizes == 384: __UpperCAmelCase : Optional[Any] = timm.create_model("levit_384" , pretrained=lowerCamelCase__ ) from_model.eval() __UpperCAmelCase : Dict = LevitForImageClassificationWithTeacher(lowerCamelCase__ ).eval() __UpperCAmelCase : List[str] = OrderedDict() __UpperCAmelCase : Any = from_model.state_dict() __UpperCAmelCase : List[str] = list(from_model.state_dict().keys() ) __UpperCAmelCase : List[str] = list(our_model.state_dict().keys() ) print(len(lowerCamelCase__ ) , len(lowerCamelCase__ ) ) for i in range(len(lowerCamelCase__ ) ): __UpperCAmelCase : int = weights[og_keys[i]] our_model.load_state_dict(lowerCamelCase__ ) __UpperCAmelCase : List[str] = torch.randn((2, 3, 224, 224) ) __UpperCAmelCase : List[Any] = from_model(lowerCamelCase__ ) __UpperCAmelCase : int = our_model(lowerCamelCase__ ).logits assert torch.allclose(lowerCamelCase__ , lowerCamelCase__ ), "The model logits don't match the original one." __UpperCAmelCase : Union[str, Any] = name print(lowerCamelCase__ ) if push_to_hub: our_model.save_pretrained(save_directory / checkpoint_name ) __UpperCAmelCase : Any = LevitImageProcessor() image_processor.save_pretrained(save_directory / checkpoint_name ) print(f"""Pushed {checkpoint_name}""" ) def _lowercase ( lowerCamelCase__ , lowerCamelCase__ = None , lowerCamelCase__ = True ) -> Optional[Any]: """simple docstring""" __UpperCAmelCase : Dict = "imagenet-1k-id2label.json" __UpperCAmelCase : Optional[Any] = 1000 __UpperCAmelCase : int = (1, num_labels) __UpperCAmelCase : Tuple = "huggingface/label-files" __UpperCAmelCase : List[Any] = num_labels __UpperCAmelCase : Union[str, Any] = json.load(open(hf_hub_download(lowerCamelCase__ , lowerCamelCase__ , repo_type="dataset" ) , "r" ) ) __UpperCAmelCase : Optional[int] = {int(lowerCamelCase__ ): v for k, v in idalabel.items()} __UpperCAmelCase : List[str] = idalabel __UpperCAmelCase : List[Any] = {v: k for k, v in idalabel.items()} __UpperCAmelCase : Optional[Any] = partial(lowerCamelCase__ , num_labels=lowerCamelCase__ , idalabel=lowerCamelCase__ , labelaid=lowerCamelCase__ ) __UpperCAmelCase : Optional[int] = { "levit-128S": 128, "levit-128": 128, "levit-192": 192, "levit-256": 256, "levit-384": 384, } __UpperCAmelCase : str = { "levit-128S": ImageNetPreTrainedConfig( hidden_sizes=[128, 256, 384] , num_attention_heads=[4, 6, 8] , depths=[2, 3, 4] , key_dim=[16, 16, 16] , drop_path_rate=0 , ), "levit-128": ImageNetPreTrainedConfig( hidden_sizes=[128, 256, 384] , num_attention_heads=[4, 8, 12] , depths=[4, 4, 4] , key_dim=[16, 16, 16] , drop_path_rate=0 , ), "levit-192": ImageNetPreTrainedConfig( hidden_sizes=[192, 288, 384] , num_attention_heads=[3, 5, 6] , depths=[4, 4, 4] , key_dim=[32, 32, 32] , drop_path_rate=0 , ), "levit-256": ImageNetPreTrainedConfig( hidden_sizes=[256, 384, 512] , num_attention_heads=[4, 6, 8] , depths=[4, 4, 4] , key_dim=[32, 32, 32] , drop_path_rate=0 , ), "levit-384": ImageNetPreTrainedConfig( hidden_sizes=[384, 512, 768] , num_attention_heads=[6, 9, 12] , depths=[4, 4, 4] , key_dim=[32, 32, 32] , drop_path_rate=0.1 , ), } if model_name: convert_weight_and_push( names_to_hidden_sizes[model_name] , lowerCamelCase__ , names_to_config[model_name] , lowerCamelCase__ , lowerCamelCase__ ) else: for model_name, config in names_to_config.items(): convert_weight_and_push(names_to_hidden_sizes[model_name] , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) return config, expected_shape if __name__ == "__main__": _a : Tuple = argparse.ArgumentParser() # Required parameters parser.add_argument( "--model_name", default=None, type=str, help="The name of the model you wish to convert, it must be one of the supported Levit* architecture,", ) parser.add_argument( "--pytorch_dump_folder_path", default="levit-dump-folder/", type=Path, required=False, help="Path to the output PyTorch model directory.", ) parser.add_argument("--push_to_hub", action="store_true", help="Push model and image processor to the hub") parser.add_argument( "--no-push_to_hub", dest="push_to_hub", action="store_false", help="Do not push model and image processor to the hub", ) _a : Optional[Any] = parser.parse_args() _a : Path = args.pytorch_dump_folder_path pytorch_dump_folder_path.mkdir(exist_ok=True, parents=True) convert_weights_and_push(pytorch_dump_folder_path, args.model_name, args.push_to_hub)
10
'''simple docstring''' def _lowercase ( lowerCamelCase__ = 100 ) -> int: """simple docstring""" __UpperCAmelCase : Optional[Any] = (n * (n + 1) // 2) ** 2 __UpperCAmelCase : Any = n * (n + 1) * (2 * n + 1) // 6 return sum_cubes - sum_squares if __name__ == "__main__": print(f"""{solution() = }""")
10
1
'''simple docstring''' import torch from torch import nn from transformers import CLIPPreTrainedModel, CLIPVisionModel from ...models.attention import BasicTransformerBlock from ...utils import logging _a : Dict = logging.get_logger(__name__) # pylint: disable=invalid-name class __A (__magic_name__ ): def __init__( self , UpperCamelCase_ , UpperCamelCase_=7_68 ): super().__init__(UpperCamelCase_ ) __UpperCAmelCase : Optional[Any] = proj_size __UpperCAmelCase : Any = CLIPVisionModel(UpperCamelCase_ ) __UpperCAmelCase : Union[str, Any] = PaintByExampleMapper(UpperCamelCase_ ) __UpperCAmelCase : Tuple = nn.LayerNorm(config.hidden_size ) __UpperCAmelCase : Optional[int] = nn.Linear(config.hidden_size , self.proj_size ) # uncondition for scaling __UpperCAmelCase : Dict = nn.Parameter(torch.randn((1, 1, self.proj_size) ) ) def _snake_case ( self , UpperCamelCase_ , UpperCamelCase_=False ): __UpperCAmelCase : List[str] = self.model(pixel_values=UpperCamelCase_ ) __UpperCAmelCase : str = clip_output.pooler_output __UpperCAmelCase : Any = self.mapper(latent_states[:, None] ) __UpperCAmelCase : str = self.final_layer_norm(UpperCamelCase_ ) __UpperCAmelCase : List[Any] = self.proj_out(UpperCamelCase_ ) if return_uncond_vector: return latent_states, self.uncond_vector return latent_states class __A (nn.Module ): def __init__( self , UpperCamelCase_ ): super().__init__() __UpperCAmelCase : Optional[Any] = (config.num_hidden_layers + 1) // 5 __UpperCAmelCase : Dict = config.hidden_size __UpperCAmelCase : Any = 1 __UpperCAmelCase : Tuple = nn.ModuleList( [ BasicTransformerBlock(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , activation_fn="gelu" , attention_bias=UpperCamelCase_ ) for _ in range(UpperCamelCase_ ) ] ) def _snake_case ( self , UpperCamelCase_ ): for block in self.blocks: __UpperCAmelCase : Optional[Any] = block(UpperCamelCase_ ) return hidden_states
10
'''simple docstring''' def _lowercase ( lowerCamelCase__ , lowerCamelCase__ ) -> float: """simple docstring""" if discount_rate < 0: raise ValueError("Discount rate cannot be negative" ) if not cash_flows: raise ValueError("Cash flows list cannot be empty" ) __UpperCAmelCase : Tuple = sum( cash_flow / ((1 + discount_rate) ** i) for i, cash_flow in enumerate(lowerCamelCase__ ) ) return round(lowerCamelCase__ , ndigits=2 ) if __name__ == "__main__": import doctest doctest.testmod()
10
1
'''simple docstring''' from typing import TYPE_CHECKING # rely on isort to merge the imports from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available _a : Union[str, Any] = { "configuration_informer": [ "INFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP", "InformerConfig", ], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _a : List[Any] = [ "INFORMER_PRETRAINED_MODEL_ARCHIVE_LIST", "InformerForPrediction", "InformerModel", "InformerPreTrainedModel", ] if TYPE_CHECKING: from .configuration_informer import INFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, InformerConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_informer import ( INFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, InformerForPrediction, InformerModel, InformerPreTrainedModel, ) else: import sys _a : int = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
10
'''simple docstring''' import random import torch from huggingface_hub import HfApi from diffusers import UNetaDModel _a : Union[str, Any] = HfApi() _a : int = {} # fmt: off _a : Optional[int] = torch.tensor([ -0.7_515, -1.6_883, 0.2_420, 0.0_300, 0.6_347, 1.3_433, -1.1_743, -3.7_467, 1.2_342, -2.2_485, 0.4_636, 0.8_076, -0.7_991, 0.3_969, 0.8_498, 0.9_189, -1.8_887, -3.3_522, 0.7_639, 0.2_040, 0.6_271, -2.7_148, -1.6_316, 3.0_839, 0.3_186, 0.2_721, -0.9_759, -1.2_461, 2.6_257, 1.3_557 ]) _a : Optional[Any] = torch.tensor([ -2.3_639, -2.5_344, 0.0_054, -0.6_674, 1.5_990, 1.0_158, 0.3_124, -2.1_436, 1.8_795, -2.5_429, -0.1_566, -0.3_973, 1.2_490, 2.6_447, 1.2_283, -0.5_208, -2.8_154, -3.5_119, 2.3_838, 1.2_033, 1.7_201, -2.1_256, -1.4_576, 2.7_948, 2.4_204, -0.9_752, -1.2_546, 0.8_027, 3.2_758, 3.1_365 ]) _a : int = torch.tensor([ -0.6_531, -0.6_891, -0.3_172, -0.5_375, -0.9_140, -0.5_367, -0.1_175, -0.7_869, -0.3_808, -0.4_513, -0.2_098, -0.0_083, 0.3_183, 0.5_140, 0.2_247, -0.1_304, -0.1_302, -0.2_802, -0.2_084, -0.2_025, -0.4_967, -0.4_873, -0.0_861, 0.6_925, 0.0_250, 0.1_290, -0.1_543, 0.6_316, 1.0_460, 1.4_943 ]) _a : str = torch.tensor([ 0.0_911, 0.1_107, 0.0_182, 0.0_435, -0.0_805, -0.0_608, 0.0_381, 0.2_172, -0.0_280, 0.1_327, -0.0_299, -0.0_255, -0.0_050, -0.1_170, -0.1_046, 0.0_309, 0.1_367, 0.1_728, -0.0_533, -0.0_748, -0.0_534, 0.1_624, 0.0_384, -0.1_805, -0.0_707, 0.0_642, 0.0_220, -0.0_134, -0.1_333, -0.1_505 ]) _a : Union[str, Any] = torch.tensor([ 0.1_321, 0.1_337, 0.0_440, 0.0_622, -0.0_591, -0.0_370, 0.0_503, 0.2_133, -0.0_177, 0.1_415, -0.0_116, -0.0_112, 0.0_044, -0.0_980, -0.0_789, 0.0_395, 0.1_502, 0.1_785, -0.0_488, -0.0_514, -0.0_404, 0.1_539, 0.0_454, -0.1_559, -0.0_665, 0.0_659, 0.0_383, -0.0_005, -0.1_266, -0.1_386 ]) _a : Any = torch.tensor([ 0.1_154, 0.1_218, 0.0_307, 0.0_526, -0.0_711, -0.0_541, 0.0_366, 0.2_078, -0.0_267, 0.1_317, -0.0_226, -0.0_193, -0.0_014, -0.1_055, -0.0_902, 0.0_330, 0.1_391, 0.1_709, -0.0_562, -0.0_693, -0.0_560, 0.1_482, 0.0_381, -0.1_683, -0.0_681, 0.0_661, 0.0_331, -0.0_046, -0.1_268, -0.1_431 ]) _a : List[Any] = torch.tensor([ 0.1_192, 0.1_240, 0.0_414, 0.0_606, -0.0_557, -0.0_412, 0.0_430, 0.2_042, -0.0_200, 0.1_385, -0.0_115, -0.0_132, 0.0_017, -0.0_965, -0.0_802, 0.0_398, 0.1_433, 0.1_747, -0.0_458, -0.0_533, -0.0_407, 0.1_545, 0.0_419, -0.1_574, -0.0_645, 0.0_626, 0.0_341, -0.0_010, -0.1_199, -0.1_390 ]) _a : Optional[int] = torch.tensor([ 0.1_075, 0.1_074, 0.0_205, 0.0_431, -0.0_774, -0.0_607, 0.0_298, 0.2_042, -0.0_320, 0.1_267, -0.0_281, -0.0_250, -0.0_064, -0.1_091, -0.0_946, 0.0_290, 0.1_328, 0.1_650, -0.0_580, -0.0_738, -0.0_586, 0.1_440, 0.0_337, -0.1_746, -0.0_712, 0.0_605, 0.0_250, -0.0_099, -0.1_316, -0.1_473 ]) _a : Tuple = torch.tensor([ -1.4_572, -2.0_481, -0.0_414, -0.6_005, 1.4_136, 0.5_848, 0.4_028, -2.7_330, 1.2_212, -2.1_228, 0.2_155, 0.4_039, 0.7_662, 2.0_535, 0.7_477, -0.3_243, -2.1_758, -2.7_648, 1.6_947, 0.7_026, 1.2_338, -1.6_078, -0.8_682, 2.2_810, 1.8_574, -0.5_718, -0.5_586, -0.0_186, 2.3_415, 2.1_251]) _a : List[Any] = torch.tensor([ -1.3_690, -1.9_720, -0.4_090, -0.6_966, 1.4_660, 0.9_938, -0.1_385, -2.7_324, 0.7_736, -1.8_917, 0.2_923, 0.4_293, 0.1_693, 1.4_112, 1.1_887, -0.3_181, -2.2_160, -2.6_381, 1.3_170, 0.8_163, 0.9_240, -1.6_544, -0.6_099, 2.5_259, 1.6_430, -0.9_090, -0.9_392, -0.0_126, 2.4_268, 2.3_266 ]) _a : Optional[Any] = torch.tensor([ -1.3_525, -1.9_628, -0.3_956, -0.6_860, 1.4_664, 1.0_014, -0.1_259, -2.7_212, 0.7_772, -1.8_811, 0.2_996, 0.4_388, 0.1_704, 1.4_029, 1.1_701, -0.3_027, -2.2_053, -2.6_287, 1.3_350, 0.8_131, 0.9_274, -1.6_292, -0.6_098, 2.5_131, 1.6_505, -0.8_958, -0.9_298, -0.0_151, 2.4_257, 2.3_355 ]) _a : Union[str, Any] = torch.tensor([ -2.0_585, -2.7_897, -0.2_850, -0.8_940, 1.9_052, 0.5_702, 0.6_345, -3.8_959, 1.5_932, -3.2_319, 0.1_974, 0.0_287, 1.7_566, 2.6_543, 0.8_387, -0.5_351, -3.2_736, -4.3_375, 2.9_029, 1.6_390, 1.4_640, -2.1_701, -1.9_013, 2.9_341, 3.4_981, -0.6_255, -1.1_644, -0.1_591, 3.7_097, 3.2_066 ]) _a : Optional[int] = torch.tensor([ -2.3_139, -2.5_594, -0.0_197, -0.6_785, 1.7_001, 1.1_606, 0.3_075, -2.1_740, 1.8_071, -2.5_630, -0.0_926, -0.3_811, 1.2_116, 2.6_246, 1.2_731, -0.5_398, -2.8_153, -3.6_140, 2.3_893, 1.3_262, 1.6_258, -2.1_856, -1.3_267, 2.8_395, 2.3_779, -1.0_623, -1.2_468, 0.8_959, 3.3_367, 3.2_243 ]) _a : Union[str, Any] = torch.tensor([ -2.0_628, -2.7_667, -0.2_089, -0.8_263, 2.0_539, 0.5_992, 0.6_495, -3.8_336, 1.6_025, -3.2_817, 0.1_721, -0.0_633, 1.7_516, 2.7_039, 0.8_100, -0.5_908, -3.2_113, -4.4_343, 2.9_257, 1.3_632, 1.5_562, -2.1_489, -1.9_894, 3.0_560, 3.3_396, -0.7_328, -1.0_417, 0.0_383, 3.7_093, 3.2_343 ]) _a : str = torch.tensor([ -1.4_574, -2.0_569, -0.0_473, -0.6_117, 1.4_018, 0.5_769, 0.4_129, -2.7_344, 1.2_241, -2.1_397, 0.2_000, 0.3_937, 0.7_616, 2.0_453, 0.7_324, -0.3_391, -2.1_746, -2.7_744, 1.6_963, 0.6_921, 1.2_187, -1.6_172, -0.8_877, 2.2_439, 1.8_471, -0.5_839, -0.5_605, -0.0_464, 2.3_250, 2.1_219 ]) # fmt: on _a : Optional[Any] = api.list_models(filter="diffusers") for mod in models: if "google" in mod.author or mod.modelId == "CompVis/ldm-celebahq-256": _a : List[str] = "/home/patrick/google_checkpoints/" + mod.modelId.split("/")[-1] print(f"""Started running {mod.modelId}!!!""") if mod.modelId.startswith("CompVis"): _a : int = UNetaDModel.from_pretrained(local_checkpoint, subfolder="unet") else: _a : Optional[int] = UNetaDModel.from_pretrained(local_checkpoint) torch.manual_seed(0) random.seed(0) _a : str = torch.randn(1, model.config.in_channels, model.config.sample_size, model.config.sample_size) _a : str = torch.tensor([10] * noise.shape[0]) with torch.no_grad(): _a : str = model(noise, time_step).sample assert torch.allclose( logits[0, 0, 0, :30], results["_".join("_".join(mod.modelId.split("/")).split("-"))], atol=1e-3 ) print(f"""{mod.modelId} has passed successfully!!!""")
10
1
'''simple docstring''' import unittest from huggingface_hub import hf_hub_download from transformers import MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING, VideoMAEFeatureExtractor from transformers.pipelines import VideoClassificationPipeline, pipeline from transformers.testing_utils import ( is_pipeline_test, nested_simplify, require_decord, require_tf, require_torch, require_torch_or_tf, require_vision, ) from .test_pipelines_common import ANY @is_pipeline_test @require_torch_or_tf @require_vision @require_decord class __A (unittest.TestCase ): snake_case :int = MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING def _snake_case ( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ): __UpperCAmelCase : Any = hf_hub_download( repo_id="nateraw/video-demo" , filename="archery.mp4" , repo_type="dataset" ) __UpperCAmelCase : Optional[Any] = VideoClassificationPipeline(model=UpperCamelCase_ , image_processor=UpperCamelCase_ , top_k=2 ) __UpperCAmelCase : Dict = [ example_video_filepath, "https://huggingface.co/datasets/nateraw/video-demo/resolve/main/archery.mp4", ] return video_classifier, examples def _snake_case ( self , UpperCamelCase_ , UpperCamelCase_ ): for example in examples: __UpperCAmelCase : int = video_classifier(UpperCamelCase_ ) self.assertEqual( UpperCamelCase_ , [ {"score": ANY(UpperCamelCase_ ), "label": ANY(UpperCamelCase_ )}, {"score": ANY(UpperCamelCase_ ), "label": ANY(UpperCamelCase_ )}, ] , ) @require_torch def _snake_case ( self ): __UpperCAmelCase : Union[str, Any] = "hf-internal-testing/tiny-random-VideoMAEForVideoClassification" __UpperCAmelCase : Optional[Any] = VideoMAEFeatureExtractor( size={"shortest_edge": 10} , crop_size={"height": 10, "width": 10} ) __UpperCAmelCase : Optional[Any] = pipeline( "video-classification" , model=UpperCamelCase_ , feature_extractor=UpperCamelCase_ , frame_sampling_rate=4 ) __UpperCAmelCase : Any = hf_hub_download(repo_id="nateraw/video-demo" , filename="archery.mp4" , repo_type="dataset" ) __UpperCAmelCase : Dict = video_classifier(UpperCamelCase_ , top_k=2 ) self.assertEqual( nested_simplify(UpperCamelCase_ , decimals=4 ) , [{"score": 0.5_1_9_9, "label": "LABEL_0"}, {"score": 0.4_8_0_1, "label": "LABEL_1"}] , ) __UpperCAmelCase : str = video_classifier( [ video_file_path, video_file_path, ] , top_k=2 , ) self.assertEqual( nested_simplify(UpperCamelCase_ , decimals=4 ) , [ [{"score": 0.5_1_9_9, "label": "LABEL_0"}, {"score": 0.4_8_0_1, "label": "LABEL_1"}], [{"score": 0.5_1_9_9, "label": "LABEL_0"}, {"score": 0.4_8_0_1, "label": "LABEL_1"}], ] , ) @require_tf def _snake_case ( self ): pass
10
'''simple docstring''' from ...configuration_utils import PretrainedConfig from ...utils import logging _a : Any = logging.get_logger(__name__) _a : List[Any] = { "microsoft/cvt-13": "https://huggingface.co/microsoft/cvt-13/resolve/main/config.json", # See all Cvt models at https://huggingface.co/models?filter=cvt } class __A (__magic_name__ ): snake_case :Any = "cvt" def __init__( self , UpperCamelCase_=3 , UpperCamelCase_=[7, 3, 3] , UpperCamelCase_=[4, 2, 2] , UpperCamelCase_=[2, 1, 1] , UpperCamelCase_=[64, 1_92, 3_84] , UpperCamelCase_=[1, 3, 6] , UpperCamelCase_=[1, 2, 10] , UpperCamelCase_=[4.0, 4.0, 4.0] , UpperCamelCase_=[0.0, 0.0, 0.0] , UpperCamelCase_=[0.0, 0.0, 0.0] , UpperCamelCase_=[0.0, 0.0, 0.1] , UpperCamelCase_=[True, True, True] , UpperCamelCase_=[False, False, True] , UpperCamelCase_=["dw_bn", "dw_bn", "dw_bn"] , UpperCamelCase_=[3, 3, 3] , UpperCamelCase_=[1, 1, 1] , UpperCamelCase_=[2, 2, 2] , UpperCamelCase_=[1, 1, 1] , UpperCamelCase_=[1, 1, 1] , UpperCamelCase_=0.0_2 , UpperCamelCase_=1E-12 , **UpperCamelCase_ , ): super().__init__(**UpperCamelCase_ ) __UpperCAmelCase : Optional[int] = num_channels __UpperCAmelCase : Optional[Any] = patch_sizes __UpperCAmelCase : List[str] = patch_stride __UpperCAmelCase : Tuple = patch_padding __UpperCAmelCase : int = embed_dim __UpperCAmelCase : str = num_heads __UpperCAmelCase : Any = depth __UpperCAmelCase : List[str] = mlp_ratio __UpperCAmelCase : List[str] = attention_drop_rate __UpperCAmelCase : Dict = drop_rate __UpperCAmelCase : Dict = drop_path_rate __UpperCAmelCase : str = qkv_bias __UpperCAmelCase : Optional[int] = cls_token __UpperCAmelCase : Optional[Any] = qkv_projection_method __UpperCAmelCase : Tuple = kernel_qkv __UpperCAmelCase : Optional[Any] = padding_kv __UpperCAmelCase : Optional[int] = stride_kv __UpperCAmelCase : Any = padding_q __UpperCAmelCase : List[Any] = stride_q __UpperCAmelCase : Union[str, Any] = initializer_range __UpperCAmelCase : Any = layer_norm_eps
10
1
'''simple docstring''' import time import unittest from transformers import is_torch_available from transformers.testing_utils import require_torch, torch_device from ..test_modeling_common import ids_tensor if is_torch_available(): import torch from transformers.generation import ( MaxLengthCriteria, MaxNewTokensCriteria, MaxTimeCriteria, StoppingCriteriaList, validate_stopping_criteria, ) @require_torch class __A (unittest.TestCase ): def _snake_case ( self , UpperCamelCase_ ): __UpperCAmelCase : List[str] = 3 __UpperCAmelCase : Tuple = 2_50 __UpperCAmelCase : str = ids_tensor((batch_size, length) , UpperCamelCase_ ) __UpperCAmelCase : Any = torch.ones((batch_size, length) , device=UpperCamelCase_ , dtype=torch.float ) / length return input_ids, scores def _snake_case ( self ): __UpperCAmelCase , __UpperCAmelCase : Tuple = self._get_tensors(5 ) __UpperCAmelCase : Tuple = StoppingCriteriaList( [ MaxLengthCriteria(max_length=10 ), MaxTimeCriteria(max_time=0.1 ), ] ) self.assertFalse(criteria(UpperCamelCase_ , UpperCamelCase_ ) ) __UpperCAmelCase , __UpperCAmelCase : int = self._get_tensors(9 ) self.assertFalse(criteria(UpperCamelCase_ , UpperCamelCase_ ) ) __UpperCAmelCase , __UpperCAmelCase : Optional[int] = self._get_tensors(10 ) self.assertTrue(criteria(UpperCamelCase_ , UpperCamelCase_ ) ) def _snake_case ( self ): __UpperCAmelCase : int = MaxLengthCriteria(max_length=10 ) __UpperCAmelCase , __UpperCAmelCase : Tuple = self._get_tensors(5 ) self.assertFalse(criteria(UpperCamelCase_ , UpperCamelCase_ ) ) __UpperCAmelCase , __UpperCAmelCase : Dict = self._get_tensors(9 ) self.assertFalse(criteria(UpperCamelCase_ , UpperCamelCase_ ) ) __UpperCAmelCase , __UpperCAmelCase : Optional[int] = self._get_tensors(10 ) self.assertTrue(criteria(UpperCamelCase_ , UpperCamelCase_ ) ) def _snake_case ( self ): __UpperCAmelCase : Optional[Any] = MaxNewTokensCriteria(start_length=5 , max_new_tokens=5 ) __UpperCAmelCase , __UpperCAmelCase : List[str] = self._get_tensors(5 ) self.assertFalse(criteria(UpperCamelCase_ , UpperCamelCase_ ) ) __UpperCAmelCase , __UpperCAmelCase : Dict = self._get_tensors(9 ) self.assertFalse(criteria(UpperCamelCase_ , UpperCamelCase_ ) ) __UpperCAmelCase , __UpperCAmelCase : Optional[Any] = self._get_tensors(10 ) self.assertTrue(criteria(UpperCamelCase_ , UpperCamelCase_ ) ) __UpperCAmelCase : Union[str, Any] = StoppingCriteriaList([criteria] ) self.assertEqual(criteria_list.max_length , 10 ) def _snake_case ( self ): __UpperCAmelCase , __UpperCAmelCase : Optional[Any] = self._get_tensors(5 ) __UpperCAmelCase : str = MaxTimeCriteria(max_time=0.1 ) self.assertFalse(criteria(UpperCamelCase_ , UpperCamelCase_ ) ) __UpperCAmelCase : str = MaxTimeCriteria(max_time=0.1 , initial_timestamp=time.time() - 0.2 ) self.assertTrue(criteria(UpperCamelCase_ , UpperCamelCase_ ) ) def _snake_case ( self ): validate_stopping_criteria(StoppingCriteriaList([MaxLengthCriteria(10 )] ) , 10 ) with self.assertWarns(UpperCamelCase_ ): validate_stopping_criteria(StoppingCriteriaList([MaxLengthCriteria(10 )] ) , 11 ) __UpperCAmelCase : Optional[int] = validate_stopping_criteria(StoppingCriteriaList() , 11 ) self.assertEqual(len(UpperCamelCase_ ) , 1 )
10
'''simple docstring''' from __future__ import annotations import numpy as np from numpy import floataa from numpy.typing import NDArray def _lowercase ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , ) -> list[float]: """simple docstring""" __UpperCAmelCase , __UpperCAmelCase : Union[str, Any] = coefficient_matrix.shape __UpperCAmelCase , __UpperCAmelCase : Any = constant_matrix.shape if rowsa != colsa: __UpperCAmelCase : str = f"""Coefficient matrix dimensions must be nxn but received {rowsa}x{colsa}""" raise ValueError(lowerCamelCase__ ) if colsa != 1: __UpperCAmelCase : Optional[Any] = f"""Constant matrix must be nx1 but received {rowsa}x{colsa}""" raise ValueError(lowerCamelCase__ ) if rowsa != rowsa: __UpperCAmelCase : Optional[int] = ( "Coefficient and constant matrices dimensions must be nxn and nx1 but " f"""received {rowsa}x{colsa} and {rowsa}x{colsa}""" ) raise ValueError(lowerCamelCase__ ) if len(lowerCamelCase__ ) != rowsa: __UpperCAmelCase : List[str] = ( "Number of initial values must be equal to number of rows in coefficient " f"""matrix but received {len(lowerCamelCase__ )} and {rowsa}""" ) raise ValueError(lowerCamelCase__ ) if iterations <= 0: raise ValueError("Iterations must be at least 1" ) __UpperCAmelCase : NDArray[floataa] = np.concatenate( (coefficient_matrix, constant_matrix) , axis=1 ) __UpperCAmelCase , __UpperCAmelCase : Tuple = table.shape strictly_diagonally_dominant(lowerCamelCase__ ) # Iterates the whole matrix for given number of times for _ in range(lowerCamelCase__ ): __UpperCAmelCase : int = [] for row in range(lowerCamelCase__ ): __UpperCAmelCase : List[str] = 0 for col in range(lowerCamelCase__ ): if col == row: __UpperCAmelCase : int = table[row][col] elif col == cols - 1: __UpperCAmelCase : Any = table[row][col] else: temp += (-1) * table[row][col] * init_val[col] __UpperCAmelCase : List[Any] = (temp + val) / denom new_val.append(lowerCamelCase__ ) __UpperCAmelCase : str = new_val return [float(lowerCamelCase__ ) for i in new_val] def _lowercase ( lowerCamelCase__ ) -> bool: """simple docstring""" __UpperCAmelCase , __UpperCAmelCase : Optional[int] = table.shape __UpperCAmelCase : str = True for i in range(0 , lowerCamelCase__ ): __UpperCAmelCase : Union[str, Any] = 0 for j in range(0 , cols - 1 ): if i == j: continue else: total += table[i][j] if table[i][i] <= total: raise ValueError("Coefficient matrix is not strictly diagonally dominant" ) return is_diagonally_dominant # Test Cases if __name__ == "__main__": import doctest doctest.testmod()
10
1
'''simple docstring''' from __future__ import annotations from dataclasses import dataclass @dataclass class __A : snake_case :float snake_case :TreeNode | None = None snake_case :TreeNode | None = None def _lowercase ( lowerCamelCase__ ) -> bool: """simple docstring""" def is_valid_tree(lowerCamelCase__ ) -> bool: if node is None: return True if not isinstance(lowerCamelCase__ , lowerCamelCase__ ): return False try: float(node.data ) except (TypeError, ValueError): return False return is_valid_tree(node.left ) and is_valid_tree(node.right ) if not is_valid_tree(lowerCamelCase__ ): raise ValueError( "Each node should be type of TreeNode and data should be float." ) def is_binary_search_tree_recursive_check( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> bool: if node is None: return True return ( left_bound < node.data < right_bound and is_binary_search_tree_recursive_check(node.left , lowerCamelCase__ , node.data ) and is_binary_search_tree_recursive_check( node.right , node.data , lowerCamelCase__ ) ) return is_binary_search_tree_recursive_check(lowerCamelCase__ , -float("inf" ) , float("inf" ) ) if __name__ == "__main__": import doctest doctest.testmod()
10
'''simple docstring''' from maths.is_square_free import is_square_free from maths.prime_factors import prime_factors def _lowercase ( lowerCamelCase__ ) -> int: """simple docstring""" __UpperCAmelCase : Any = prime_factors(lowerCamelCase__ ) if is_square_free(lowerCamelCase__ ): return -1 if len(lowerCamelCase__ ) % 2 else 1 return 0 if __name__ == "__main__": import doctest doctest.testmod()
10
1
'''simple docstring''' from collections import OrderedDict from typing import List, Mapping from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging _a : Dict = logging.get_logger(__name__) _a : Optional[int] = { "google/efficientnet-b7": "https://huggingface.co/google/efficientnet-b7/resolve/main/config.json", } class __A (__magic_name__ ): snake_case :Union[str, Any] = "efficientnet" def __init__( self , UpperCamelCase_ = 3 , UpperCamelCase_ = 6_00 , UpperCamelCase_ = 2.0 , UpperCamelCase_ = 3.1 , UpperCamelCase_ = 8 , UpperCamelCase_ = [3, 3, 5, 3, 5, 5, 3] , UpperCamelCase_ = [32, 16, 24, 40, 80, 1_12, 1_92] , UpperCamelCase_ = [16, 24, 40, 80, 1_12, 1_92, 3_20] , UpperCamelCase_ = [] , UpperCamelCase_ = [1, 2, 2, 2, 1, 2, 1] , UpperCamelCase_ = [1, 2, 2, 3, 3, 4, 1] , UpperCamelCase_ = [1, 6, 6, 6, 6, 6, 6] , UpperCamelCase_ = 0.2_5 , UpperCamelCase_ = "swish" , UpperCamelCase_ = 25_60 , UpperCamelCase_ = "mean" , UpperCamelCase_ = 0.0_2 , UpperCamelCase_ = 0.0_0_1 , UpperCamelCase_ = 0.9_9 , UpperCamelCase_ = 0.5 , UpperCamelCase_ = 0.2 , **UpperCamelCase_ , ): super().__init__(**UpperCamelCase_ ) __UpperCAmelCase : List[Any] = num_channels __UpperCAmelCase : Union[str, Any] = image_size __UpperCAmelCase : List[Any] = width_coefficient __UpperCAmelCase : Any = depth_coefficient __UpperCAmelCase : Optional[Any] = depth_divisor __UpperCAmelCase : Any = kernel_sizes __UpperCAmelCase : str = in_channels __UpperCAmelCase : Optional[Any] = out_channels __UpperCAmelCase : List[str] = depthwise_padding __UpperCAmelCase : List[str] = strides __UpperCAmelCase : Dict = num_block_repeats __UpperCAmelCase : Optional[int] = expand_ratios __UpperCAmelCase : Optional[int] = squeeze_expansion_ratio __UpperCAmelCase : int = hidden_act __UpperCAmelCase : Any = hidden_dim __UpperCAmelCase : List[Any] = pooling_type __UpperCAmelCase : Union[str, Any] = initializer_range __UpperCAmelCase : List[Any] = batch_norm_eps __UpperCAmelCase : List[str] = batch_norm_momentum __UpperCAmelCase : Optional[Any] = dropout_rate __UpperCAmelCase : Optional[Any] = drop_connect_rate __UpperCAmelCase : str = sum(UpperCamelCase_ ) * 4 class __A (__magic_name__ ): snake_case :List[str] = version.parse("1.11" ) @property def _snake_case ( self ): return OrderedDict( [ ("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}), ] ) @property def _snake_case ( self ): return 1E-5
10
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available, is_tokenizers_available, is_torch_available, ) _a : Dict = {"configuration_reformer": ["REFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP", "ReformerConfig"]} try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _a : Dict = ["ReformerTokenizer"] try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _a : List[Any] = ["ReformerTokenizerFast"] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _a : int = [ "REFORMER_PRETRAINED_MODEL_ARCHIVE_LIST", "ReformerAttention", "ReformerForMaskedLM", "ReformerForQuestionAnswering", "ReformerForSequenceClassification", "ReformerLayer", "ReformerModel", "ReformerModelWithLMHead", "ReformerPreTrainedModel", ] if TYPE_CHECKING: from .configuration_reformer import REFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, ReformerConfig try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_reformer import ReformerTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_reformer_fast import ReformerTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_reformer import ( REFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, ReformerAttention, ReformerForMaskedLM, ReformerForQuestionAnswering, ReformerForSequenceClassification, ReformerLayer, ReformerModel, ReformerModelWithLMHead, ReformerPreTrainedModel, ) else: import sys _a : Optional[int] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
10
1
'''simple docstring''' def _lowercase ( lowerCamelCase__ = 10**12 ) -> int: """simple docstring""" __UpperCAmelCase : Any = 1 __UpperCAmelCase : int = 0 __UpperCAmelCase : str = 1 __UpperCAmelCase : Union[str, Any] = 1 while numerator <= 2 * min_total - 1: prev_numerator += 2 * numerator numerator += 2 * prev_numerator prev_denominator += 2 * denominator denominator += 2 * prev_denominator return (denominator + 1) // 2 if __name__ == "__main__": print(f"""{solution() = }""")
10
'''simple docstring''' from collections import OrderedDict from typing import Mapping from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging _a : List[str] = logging.get_logger(__name__) _a : Any = { "kssteven/ibert-roberta-base": "https://huggingface.co/kssteven/ibert-roberta-base/resolve/main/config.json", "kssteven/ibert-roberta-large": "https://huggingface.co/kssteven/ibert-roberta-large/resolve/main/config.json", "kssteven/ibert-roberta-large-mnli": ( "https://huggingface.co/kssteven/ibert-roberta-large-mnli/resolve/main/config.json" ), } class __A (__magic_name__ ): snake_case :Union[str, Any] = "ibert" def __init__( self , UpperCamelCase_=3_05_22 , UpperCamelCase_=7_68 , UpperCamelCase_=12 , UpperCamelCase_=12 , UpperCamelCase_=30_72 , UpperCamelCase_="gelu" , UpperCamelCase_=0.1 , UpperCamelCase_=0.1 , UpperCamelCase_=5_12 , UpperCamelCase_=2 , UpperCamelCase_=0.0_2 , UpperCamelCase_=1E-12 , UpperCamelCase_=1 , UpperCamelCase_=0 , UpperCamelCase_=2 , UpperCamelCase_="absolute" , UpperCamelCase_=False , UpperCamelCase_="none" , **UpperCamelCase_ , ): super().__init__(pad_token_id=UpperCamelCase_ , bos_token_id=UpperCamelCase_ , eos_token_id=UpperCamelCase_ , **UpperCamelCase_ ) __UpperCAmelCase : List[Any] = vocab_size __UpperCAmelCase : Optional[Any] = hidden_size __UpperCAmelCase : List[Any] = num_hidden_layers __UpperCAmelCase : Any = num_attention_heads __UpperCAmelCase : List[str] = hidden_act __UpperCAmelCase : List[str] = intermediate_size __UpperCAmelCase : Optional[int] = hidden_dropout_prob __UpperCAmelCase : Union[str, Any] = attention_probs_dropout_prob __UpperCAmelCase : str = max_position_embeddings __UpperCAmelCase : List[str] = type_vocab_size __UpperCAmelCase : Dict = initializer_range __UpperCAmelCase : Optional[int] = layer_norm_eps __UpperCAmelCase : Any = position_embedding_type __UpperCAmelCase : Tuple = quant_mode __UpperCAmelCase : Union[str, Any] = force_dequant class __A (__magic_name__ ): @property def _snake_case ( self ): if self.task == "multiple-choice": __UpperCAmelCase : Optional[int] = {0: "batch", 1: "choice", 2: "sequence"} else: __UpperCAmelCase : Optional[int] = {0: "batch", 1: "sequence"} return OrderedDict( [ ("input_ids", dynamic_axis), ("attention_mask", dynamic_axis), ] )
10
1
'''simple docstring''' from typing import Dict, List, Optional, Tuple, Union import torch from ...models import AutoencoderKL, TransformeraDModel from ...schedulers import KarrasDiffusionSchedulers from ...utils import randn_tensor from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput class __A (__magic_name__ ): def __init__( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ = None , ): super().__init__() self.register_modules(transformer=UpperCamelCase_ , vae=UpperCamelCase_ , scheduler=UpperCamelCase_ ) # create a imagenet -> id dictionary for easier use __UpperCAmelCase : Dict = {} if idalabel is not None: for key, value in idalabel.items(): for label in value.split("," ): __UpperCAmelCase : str = int(UpperCamelCase_ ) __UpperCAmelCase : Dict = dict(sorted(self.labels.items() ) ) def _snake_case ( self , UpperCamelCase_ ): if not isinstance(UpperCamelCase_ , UpperCamelCase_ ): __UpperCAmelCase : Tuple = list(UpperCamelCase_ ) for l in label: if l not in self.labels: raise ValueError( f"""{l} does not exist. Please make sure to select one of the following labels: \n {self.labels}.""" ) return [self.labels[l] for l in label] @torch.no_grad() def __call__( self , UpperCamelCase_ , UpperCamelCase_ = 4.0 , UpperCamelCase_ = None , UpperCamelCase_ = 50 , UpperCamelCase_ = "pil" , UpperCamelCase_ = True , ): __UpperCAmelCase : Any = len(UpperCamelCase_ ) __UpperCAmelCase : Any = self.transformer.config.sample_size __UpperCAmelCase : Optional[int] = self.transformer.config.in_channels __UpperCAmelCase : Any = randn_tensor( shape=(batch_size, latent_channels, latent_size, latent_size) , generator=UpperCamelCase_ , device=self.device , dtype=self.transformer.dtype , ) __UpperCAmelCase : List[Any] = torch.cat([latents] * 2 ) if guidance_scale > 1 else latents __UpperCAmelCase : Union[str, Any] = torch.tensor(UpperCamelCase_ , device=self.device ).reshape(-1 ) __UpperCAmelCase : Union[str, Any] = torch.tensor([10_00] * batch_size , device=self.device ) __UpperCAmelCase : Union[str, Any] = torch.cat([class_labels, class_null] , 0 ) if guidance_scale > 1 else class_labels # set step values self.scheduler.set_timesteps(UpperCamelCase_ ) for t in self.progress_bar(self.scheduler.timesteps ): if guidance_scale > 1: __UpperCAmelCase : Optional[Any] = latent_model_input[: len(UpperCamelCase_ ) // 2] __UpperCAmelCase : str = torch.cat([half, half] , dim=0 ) __UpperCAmelCase : str = self.scheduler.scale_model_input(UpperCamelCase_ , UpperCamelCase_ ) __UpperCAmelCase : Optional[Any] = t if not torch.is_tensor(UpperCamelCase_ ): # TODO: this requires sync between CPU and GPU. So try to pass timesteps as tensors if you can # This would be a good case for the `match` statement (Python 3.10+) __UpperCAmelCase : List[Any] = latent_model_input.device.type == "mps" if isinstance(UpperCamelCase_ , UpperCamelCase_ ): __UpperCAmelCase : Dict = torch.floataa if is_mps else torch.floataa else: __UpperCAmelCase : List[Any] = torch.intaa if is_mps else torch.intaa __UpperCAmelCase : Optional[Any] = torch.tensor([timesteps] , dtype=UpperCamelCase_ , device=latent_model_input.device ) elif len(timesteps.shape ) == 0: __UpperCAmelCase : Optional[int] = timesteps[None].to(latent_model_input.device ) # broadcast to batch dimension in a way that's compatible with ONNX/Core ML __UpperCAmelCase : Any = timesteps.expand(latent_model_input.shape[0] ) # predict noise model_output __UpperCAmelCase : Tuple = self.transformer( UpperCamelCase_ , timestep=UpperCamelCase_ , class_labels=UpperCamelCase_ ).sample # perform guidance if guidance_scale > 1: __UpperCAmelCase , __UpperCAmelCase : Dict = noise_pred[:, :latent_channels], noise_pred[:, latent_channels:] __UpperCAmelCase , __UpperCAmelCase : str = torch.split(UpperCamelCase_ , len(UpperCamelCase_ ) // 2 , dim=0 ) __UpperCAmelCase : int = uncond_eps + guidance_scale * (cond_eps - uncond_eps) __UpperCAmelCase : Optional[int] = torch.cat([half_eps, half_eps] , dim=0 ) __UpperCAmelCase : Dict = torch.cat([eps, rest] , dim=1 ) # learned sigma if self.transformer.config.out_channels // 2 == latent_channels: __UpperCAmelCase , __UpperCAmelCase : str = torch.split(UpperCamelCase_ , UpperCamelCase_ , dim=1 ) else: __UpperCAmelCase : str = noise_pred # compute previous image: x_t -> x_t-1 __UpperCAmelCase : str = self.scheduler.step(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ).prev_sample if guidance_scale > 1: __UpperCAmelCase , __UpperCAmelCase : List[Any] = latent_model_input.chunk(2 , dim=0 ) else: __UpperCAmelCase : Union[str, Any] = latent_model_input __UpperCAmelCase : Any = 1 / self.vae.config.scaling_factor * latents __UpperCAmelCase : List[Any] = self.vae.decode(UpperCamelCase_ ).sample __UpperCAmelCase : List[Any] = (samples / 2 + 0.5).clamp(0 , 1 ) # we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16 __UpperCAmelCase : Optional[Any] = samples.cpu().permute(0 , 2 , 3 , 1 ).float().numpy() if output_type == "pil": __UpperCAmelCase : Union[str, Any] = self.numpy_to_pil(UpperCamelCase_ ) if not return_dict: return (samples,) return ImagePipelineOutput(images=UpperCamelCase_ )
10
'''simple docstring''' from transformers import HfArgumentParser, TensorFlowBenchmark, TensorFlowBenchmarkArguments def _lowercase ( ) -> Dict: """simple docstring""" __UpperCAmelCase : str = HfArgumentParser(lowerCamelCase__ ) __UpperCAmelCase : Optional[Any] = parser.parse_args_into_dataclasses()[0] __UpperCAmelCase : Any = TensorFlowBenchmark(args=lowerCamelCase__ ) try: __UpperCAmelCase : List[Any] = parser.parse_args_into_dataclasses()[0] except ValueError as e: __UpperCAmelCase : str = "Arg --no_{0} is no longer used, please use --no-{0} instead." __UpperCAmelCase : Tuple = " ".join(str(lowerCamelCase__ ).split(" " )[:-1] ) __UpperCAmelCase : Any = "" __UpperCAmelCase : List[Any] = eval(str(lowerCamelCase__ ).split(" " )[-1] ) __UpperCAmelCase : Optional[int] = [] for arg in depreciated_args: # arg[2:] removes '--' if arg[2:] in TensorFlowBenchmark.deprecated_args: # arg[5:] removes '--no_' full_error_msg += arg_error_msg.format(arg[5:] ) else: wrong_args.append(lowerCamelCase__ ) if len(lowerCamelCase__ ) > 0: __UpperCAmelCase : Union[str, Any] = full_error_msg + begin_error_msg + str(lowerCamelCase__ ) raise ValueError(lowerCamelCase__ ) benchmark.run() if __name__ == "__main__": main()
10
1
'''simple docstring''' def _lowercase ( lowerCamelCase__ ) -> bool: """simple docstring""" __UpperCAmelCase : Any = 0 for ch in input_str: __UpperCAmelCase : Dict = ord(lowerCamelCase__ ) __UpperCAmelCase : List[str] = pow(2 , lowerCamelCase__ ) # If we already turned on bit for current character's unicode if bitmap >> ch_unicode & 1 == 1: return False bitmap |= ch_bit_index_on return True if __name__ == "__main__": import doctest doctest.testmod()
10
'''simple docstring''' import gc import random import unittest import numpy as np import torch from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer import diffusers from diffusers import ( AutoencoderKL, EulerDiscreteScheduler, StableDiffusionLatentUpscalePipeline, StableDiffusionPipeline, UNetaDConditionModel, ) from diffusers.schedulers import KarrasDiffusionSchedulers from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu from ..pipeline_params import TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin enable_full_determinism() def _lowercase ( lowerCamelCase__ ) -> Union[str, Any]: """simple docstring""" __UpperCAmelCase : Dict = [tensor.shape for tensor in tensor_list] return all(shape == shapes[0] for shape in shapes[1:] ) class __A (__magic_name__ , __magic_name__ , __magic_name__ , unittest.TestCase ): snake_case :Union[str, Any] = StableDiffusionLatentUpscalePipeline snake_case :Optional[int] = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - { "height", "width", "cross_attention_kwargs", "negative_prompt_embeds", "prompt_embeds", } snake_case :List[str] = PipelineTesterMixin.required_optional_params - {"num_images_per_prompt"} snake_case :Optional[Any] = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS snake_case :Optional[Any] = frozenset( [] ) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess snake_case :Any = frozenset([] ) snake_case :Optional[int] = True @property def _snake_case ( self ): __UpperCAmelCase : Optional[int] = 1 __UpperCAmelCase : Dict = 4 __UpperCAmelCase : List[str] = (16, 16) __UpperCAmelCase : Dict = floats_tensor((batch_size, num_channels) + sizes , rng=random.Random(0 ) ).to(UpperCamelCase_ ) return image def _snake_case ( self ): torch.manual_seed(0 ) __UpperCAmelCase : List[str] = UNetaDConditionModel( act_fn="gelu" , attention_head_dim=8 , norm_num_groups=UpperCamelCase_ , block_out_channels=[32, 32, 64, 64] , time_cond_proj_dim=1_60 , conv_in_kernel=1 , conv_out_kernel=1 , cross_attention_dim=32 , down_block_types=( "KDownBlock2D", "KCrossAttnDownBlock2D", "KCrossAttnDownBlock2D", "KCrossAttnDownBlock2D", ) , in_channels=8 , mid_block_type=UpperCamelCase_ , only_cross_attention=UpperCamelCase_ , out_channels=5 , resnet_time_scale_shift="scale_shift" , time_embedding_type="fourier" , timestep_post_act="gelu" , up_block_types=("KCrossAttnUpBlock2D", "KCrossAttnUpBlock2D", "KCrossAttnUpBlock2D", "KUpBlock2D") , ) __UpperCAmelCase : int = AutoencoderKL( block_out_channels=[32, 32, 64, 64] , in_channels=3 , out_channels=3 , down_block_types=[ "DownEncoderBlock2D", "DownEncoderBlock2D", "DownEncoderBlock2D", "DownEncoderBlock2D", ] , up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D"] , latent_channels=4 , ) __UpperCAmelCase : Optional[int] = EulerDiscreteScheduler(prediction_type="sample" ) __UpperCAmelCase : int = CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , hidden_act="quick_gelu" , projection_dim=5_12 , ) __UpperCAmelCase : List[str] = CLIPTextModel(UpperCamelCase_ ) __UpperCAmelCase : Tuple = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" ) __UpperCAmelCase : Union[str, Any] = { "unet": model.eval(), "vae": vae.eval(), "scheduler": scheduler, "text_encoder": text_encoder, "tokenizer": tokenizer, } return components def _snake_case ( self , UpperCamelCase_ , UpperCamelCase_=0 ): if str(UpperCamelCase_ ).startswith("mps" ): __UpperCAmelCase : str = torch.manual_seed(UpperCamelCase_ ) else: __UpperCAmelCase : Optional[int] = torch.Generator(device=UpperCamelCase_ ).manual_seed(UpperCamelCase_ ) __UpperCAmelCase : Any = { "prompt": "A painting of a squirrel eating a burger", "image": self.dummy_image.cpu(), "generator": generator, "num_inference_steps": 2, "output_type": "numpy", } return inputs def _snake_case ( self ): __UpperCAmelCase : List[str] = "cpu" __UpperCAmelCase : List[str] = self.get_dummy_components() __UpperCAmelCase : Tuple = self.pipeline_class(**UpperCamelCase_ ) pipe.to(UpperCamelCase_ ) pipe.set_progress_bar_config(disable=UpperCamelCase_ ) __UpperCAmelCase : Any = self.get_dummy_inputs(UpperCamelCase_ ) __UpperCAmelCase : int = pipe(**UpperCamelCase_ ).images __UpperCAmelCase : Any = image[0, -3:, -3:, -1] self.assertEqual(image.shape , (1, 2_56, 2_56, 3) ) __UpperCAmelCase : Tuple = np.array( [0.4_7_2_2_2_4_1_2, 0.4_1_9_2_1_6_3_3, 0.4_4_7_1_7_4_3_4, 0.4_6_8_7_4_1_9_2, 0.4_2_5_8_8_2_5_8, 0.4_6_1_5_0_7_2_6, 0.4_6_7_7_5_3_4, 0.4_5_5_8_3_8_3_2, 0.4_8_5_7_9_0_5_5] ) __UpperCAmelCase : List[str] = np.abs(image_slice.flatten() - expected_slice ).max() self.assertLessEqual(UpperCamelCase_ , 1E-3 ) def _snake_case ( self ): super().test_attention_slicing_forward_pass(expected_max_diff=7E-3 ) def _snake_case ( self ): super().test_cpu_offload_forward_pass(expected_max_diff=3E-3 ) def _snake_case ( self ): super().test_dict_tuple_outputs_equivalent(expected_max_difference=3E-3 ) def _snake_case ( self ): super().test_inference_batch_single_identical(expected_max_diff=7E-3 ) def _snake_case ( self ): super().test_pt_np_pil_outputs_equivalent(expected_max_diff=3E-3 ) def _snake_case ( self ): super().test_save_load_local(expected_max_difference=3E-3 ) def _snake_case ( self ): super().test_save_load_optional_components(expected_max_difference=3E-3 ) def _snake_case ( self ): __UpperCAmelCase : Dict = [ "DDIMScheduler", "DDPMScheduler", "PNDMScheduler", "HeunDiscreteScheduler", "EulerAncestralDiscreteScheduler", "KDPM2DiscreteScheduler", "KDPM2AncestralDiscreteScheduler", "DPMSolverSDEScheduler", ] __UpperCAmelCase : Tuple = self.get_dummy_components() __UpperCAmelCase : Union[str, Any] = self.pipeline_class(**UpperCamelCase_ ) # make sure that PNDM does not need warm-up pipe.scheduler.register_to_config(skip_prk_steps=UpperCamelCase_ ) pipe.to(UpperCamelCase_ ) pipe.set_progress_bar_config(disable=UpperCamelCase_ ) __UpperCAmelCase : Tuple = self.get_dummy_inputs(UpperCamelCase_ ) __UpperCAmelCase : List[str] = 2 __UpperCAmelCase : List[str] = [] for scheduler_enum in KarrasDiffusionSchedulers: if scheduler_enum.name in skip_schedulers: # no sigma schedulers are not supported # no schedulers continue __UpperCAmelCase : Optional[int] = getattr(UpperCamelCase_ , scheduler_enum.name ) __UpperCAmelCase : List[str] = scheduler_cls.from_config(pipe.scheduler.config ) __UpperCAmelCase : Optional[int] = pipe(**UpperCamelCase_ )[0] outputs.append(UpperCamelCase_ ) assert check_same_shape(UpperCamelCase_ ) @require_torch_gpu @slow class __A (unittest.TestCase ): def _snake_case ( self ): super().tearDown() gc.collect() torch.cuda.empty_cache() def _snake_case ( self ): __UpperCAmelCase : Optional[int] = torch.manual_seed(33 ) __UpperCAmelCase : str = StableDiffusionPipeline.from_pretrained("CompVis/stable-diffusion-v1-4" , torch_dtype=torch.floataa ) pipe.to("cuda" ) __UpperCAmelCase : Union[str, Any] = StableDiffusionLatentUpscalePipeline.from_pretrained( "stabilityai/sd-x2-latent-upscaler" , torch_dtype=torch.floataa ) upscaler.to("cuda" ) __UpperCAmelCase : Optional[int] = "a photo of an astronaut high resolution, unreal engine, ultra realistic" __UpperCAmelCase : Any = pipe(UpperCamelCase_ , generator=UpperCamelCase_ , output_type="latent" ).images __UpperCAmelCase : int = upscaler( prompt=UpperCamelCase_ , image=UpperCamelCase_ , num_inference_steps=20 , guidance_scale=0 , generator=UpperCamelCase_ , output_type="np" , ).images[0] __UpperCAmelCase : Optional[Any] = load_numpy( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/latent-upscaler/astronaut_1024.npy" ) assert np.abs((expected_image - image).mean() ) < 5E-2 def _snake_case ( self ): __UpperCAmelCase : List[Any] = torch.manual_seed(33 ) __UpperCAmelCase : Union[str, Any] = StableDiffusionLatentUpscalePipeline.from_pretrained( "stabilityai/sd-x2-latent-upscaler" , torch_dtype=torch.floataa ) upscaler.to("cuda" ) __UpperCAmelCase : Optional[Any] = "the temple of fire by Ross Tran and Gerardo Dottori, oil on canvas" __UpperCAmelCase : str = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/latent-upscaler/fire_temple_512.png" ) __UpperCAmelCase : Dict = upscaler( prompt=UpperCamelCase_ , image=UpperCamelCase_ , num_inference_steps=20 , guidance_scale=0 , generator=UpperCamelCase_ , output_type="np" , ).images[0] __UpperCAmelCase : Tuple = load_numpy( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/latent-upscaler/fire_temple_1024.npy" ) assert np.abs((expected_image - image).max() ) < 5E-2
10
1
'''simple docstring''' import os import tempfile import unittest from transformers import FlaubertConfig, is_torch_available from transformers.testing_utils import require_torch, require_torch_gpu, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( FlaubertForMultipleChoice, FlaubertForQuestionAnswering, FlaubertForQuestionAnsweringSimple, FlaubertForSequenceClassification, FlaubertForTokenClassification, FlaubertModel, FlaubertWithLMHeadModel, ) from transformers.models.flaubert.modeling_flaubert import FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST class __A (__magic_name__ ): def __init__( self , UpperCamelCase_ , UpperCamelCase_=13 , UpperCamelCase_=7 , UpperCamelCase_=True , UpperCamelCase_=True , UpperCamelCase_=True , UpperCamelCase_=True , UpperCamelCase_=True , UpperCamelCase_=False , UpperCamelCase_=False , UpperCamelCase_=False , UpperCamelCase_=2 , UpperCamelCase_=99 , UpperCamelCase_=0 , UpperCamelCase_=32 , UpperCamelCase_=5 , UpperCamelCase_=4 , UpperCamelCase_=0.1 , UpperCamelCase_=0.1 , UpperCamelCase_=5_12 , UpperCamelCase_=12 , UpperCamelCase_=2 , UpperCamelCase_=0.0_2 , UpperCamelCase_=3 , UpperCamelCase_=4 , UpperCamelCase_="last" , UpperCamelCase_=None , UpperCamelCase_=None , ): __UpperCAmelCase : Optional[Any] = parent __UpperCAmelCase : Union[str, Any] = batch_size __UpperCAmelCase : Optional[Any] = seq_length __UpperCAmelCase : Optional[int] = is_training __UpperCAmelCase : Any = use_input_lengths __UpperCAmelCase : Optional[Any] = use_token_type_ids __UpperCAmelCase : List[str] = use_labels __UpperCAmelCase : Union[str, Any] = gelu_activation __UpperCAmelCase : Tuple = sinusoidal_embeddings __UpperCAmelCase : str = causal __UpperCAmelCase : Union[str, Any] = asm __UpperCAmelCase : Optional[int] = n_langs __UpperCAmelCase : Union[str, Any] = vocab_size __UpperCAmelCase : Optional[int] = n_special __UpperCAmelCase : List[str] = hidden_size __UpperCAmelCase : str = num_hidden_layers __UpperCAmelCase : Dict = num_attention_heads __UpperCAmelCase : Dict = hidden_dropout_prob __UpperCAmelCase : Any = attention_probs_dropout_prob __UpperCAmelCase : Dict = max_position_embeddings __UpperCAmelCase : str = type_vocab_size __UpperCAmelCase : Dict = type_sequence_label_size __UpperCAmelCase : Tuple = initializer_range __UpperCAmelCase : Optional[Any] = num_labels __UpperCAmelCase : Union[str, Any] = num_choices __UpperCAmelCase : Union[str, Any] = summary_type __UpperCAmelCase : Dict = use_proj __UpperCAmelCase : Dict = scope def _snake_case ( self ): __UpperCAmelCase : str = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) __UpperCAmelCase : str = random_attention_mask([self.batch_size, self.seq_length] ) __UpperCAmelCase : Union[str, Any] = None if self.use_input_lengths: __UpperCAmelCase : Union[str, Any] = ( ids_tensor([self.batch_size] , vocab_size=2 ) + self.seq_length - 2 ) # small variation of seq_length __UpperCAmelCase : Optional[Any] = None if self.use_token_type_ids: __UpperCAmelCase : int = ids_tensor([self.batch_size, self.seq_length] , self.n_langs ) __UpperCAmelCase : str = None __UpperCAmelCase : int = None __UpperCAmelCase : Optional[Any] = None if self.use_labels: __UpperCAmelCase : str = ids_tensor([self.batch_size] , self.type_sequence_label_size ) __UpperCAmelCase : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) __UpperCAmelCase : Any = ids_tensor([self.batch_size] , 2 ).float() __UpperCAmelCase : Any = ids_tensor([self.batch_size] , self.num_choices ) __UpperCAmelCase : List[Any] = self.get_config() return ( config, input_ids, token_type_ids, input_lengths, sequence_labels, token_labels, is_impossible_labels, choice_labels, input_mask, ) def _snake_case ( self ): return FlaubertConfig( vocab_size=self.vocab_size , n_special=self.n_special , emb_dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , gelu_activation=self.gelu_activation , sinusoidal_embeddings=self.sinusoidal_embeddings , asm=self.asm , causal=self.causal , n_langs=self.n_langs , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , summary_type=self.summary_type , use_proj=self.use_proj , ) def _snake_case ( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , ): __UpperCAmelCase : Tuple = FlaubertModel(config=UpperCamelCase_ ) model.to(UpperCamelCase_ ) model.eval() __UpperCAmelCase : int = model(UpperCamelCase_ , lengths=UpperCamelCase_ , langs=UpperCamelCase_ ) __UpperCAmelCase : int = model(UpperCamelCase_ , langs=UpperCamelCase_ ) __UpperCAmelCase : Optional[Any] = model(UpperCamelCase_ ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def _snake_case ( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , ): __UpperCAmelCase : List[str] = FlaubertWithLMHeadModel(UpperCamelCase_ ) model.to(UpperCamelCase_ ) model.eval() __UpperCAmelCase : Tuple = model(UpperCamelCase_ , token_type_ids=UpperCamelCase_ , labels=UpperCamelCase_ ) self.parent.assertEqual(result.loss.shape , () ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def _snake_case ( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , ): __UpperCAmelCase : Union[str, Any] = FlaubertForQuestionAnsweringSimple(UpperCamelCase_ ) model.to(UpperCamelCase_ ) model.eval() __UpperCAmelCase : int = model(UpperCamelCase_ ) __UpperCAmelCase : Dict = model(UpperCamelCase_ , start_positions=UpperCamelCase_ , end_positions=UpperCamelCase_ ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def _snake_case ( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , ): __UpperCAmelCase : Tuple = FlaubertForQuestionAnswering(UpperCamelCase_ ) model.to(UpperCamelCase_ ) model.eval() __UpperCAmelCase : Dict = model(UpperCamelCase_ ) __UpperCAmelCase : str = model( UpperCamelCase_ , start_positions=UpperCamelCase_ , end_positions=UpperCamelCase_ , cls_index=UpperCamelCase_ , is_impossible=UpperCamelCase_ , p_mask=UpperCamelCase_ , ) __UpperCAmelCase : Dict = model( UpperCamelCase_ , start_positions=UpperCamelCase_ , end_positions=UpperCamelCase_ , cls_index=UpperCamelCase_ , is_impossible=UpperCamelCase_ , ) ((__UpperCAmelCase) , ) : str = result_with_labels.to_tuple() __UpperCAmelCase : List[Any] = model(UpperCamelCase_ , start_positions=UpperCamelCase_ , end_positions=UpperCamelCase_ ) ((__UpperCAmelCase) , ) : Tuple = result_with_labels.to_tuple() self.parent.assertEqual(result_with_labels.loss.shape , () ) self.parent.assertEqual(result.start_top_log_probs.shape , (self.batch_size, model.config.start_n_top) ) self.parent.assertEqual(result.start_top_index.shape , (self.batch_size, model.config.start_n_top) ) self.parent.assertEqual( result.end_top_log_probs.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top) ) self.parent.assertEqual( result.end_top_index.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top) ) self.parent.assertEqual(result.cls_logits.shape , (self.batch_size,) ) def _snake_case ( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , ): __UpperCAmelCase : Optional[Any] = FlaubertForSequenceClassification(UpperCamelCase_ ) model.to(UpperCamelCase_ ) model.eval() __UpperCAmelCase : Any = model(UpperCamelCase_ ) __UpperCAmelCase : int = model(UpperCamelCase_ , labels=UpperCamelCase_ ) self.parent.assertEqual(result.loss.shape , () ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) ) def _snake_case ( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , ): __UpperCAmelCase : int = self.num_labels __UpperCAmelCase : Optional[Any] = FlaubertForTokenClassification(UpperCamelCase_ ) model.to(UpperCamelCase_ ) model.eval() __UpperCAmelCase : str = model(UpperCamelCase_ , attention_mask=UpperCamelCase_ , labels=UpperCamelCase_ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def _snake_case ( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , ): __UpperCAmelCase : Optional[Any] = self.num_choices __UpperCAmelCase : Dict = FlaubertForMultipleChoice(config=UpperCamelCase_ ) model.to(UpperCamelCase_ ) model.eval() __UpperCAmelCase : str = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() __UpperCAmelCase : Any = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() __UpperCAmelCase : List[Any] = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() __UpperCAmelCase : str = model( UpperCamelCase_ , attention_mask=UpperCamelCase_ , token_type_ids=UpperCamelCase_ , labels=UpperCamelCase_ , ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) ) def _snake_case ( self ): __UpperCAmelCase : Optional[int] = self.prepare_config_and_inputs() ( ( __UpperCAmelCase ) , ( __UpperCAmelCase ) , ( __UpperCAmelCase ) , ( __UpperCAmelCase ) , ( __UpperCAmelCase ) , ( __UpperCAmelCase ) , ( __UpperCAmelCase ) , ( __UpperCAmelCase ) , ( __UpperCAmelCase ) , ) : Dict = config_and_inputs __UpperCAmelCase : str = { "input_ids": input_ids, "token_type_ids": token_type_ids, "lengths": input_lengths, "attention_mask": input_mask, } return config, inputs_dict @require_torch class __A (__magic_name__ , __magic_name__ , unittest.TestCase ): snake_case :Tuple = ( ( FlaubertModel, FlaubertWithLMHeadModel, FlaubertForQuestionAnswering, FlaubertForQuestionAnsweringSimple, FlaubertForSequenceClassification, FlaubertForTokenClassification, FlaubertForMultipleChoice, ) if is_torch_available() else () ) snake_case :Optional[Any] = ( { "feature-extraction": FlaubertModel, "fill-mask": FlaubertWithLMHeadModel, "question-answering": FlaubertForQuestionAnsweringSimple, "text-classification": FlaubertForSequenceClassification, "token-classification": FlaubertForTokenClassification, "zero-shot": FlaubertForSequenceClassification, } if is_torch_available() else {} ) def _snake_case ( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ): if ( pipeline_test_casse_name == "QAPipelineTests" and tokenizer_name is not None and not tokenizer_name.endswith("Fast" ) ): # `QAPipelineTests` fails for a few models when the slower tokenizer are used. # (The slower tokenizers were never used for pipeline tests before the pipeline testing rework) # TODO: check (and possibly fix) the `QAPipelineTests` with slower tokenizer return True return False def _snake_case ( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_=False ): __UpperCAmelCase : Tuple = super()._prepare_for_class(UpperCamelCase_ , UpperCamelCase_ , return_labels=UpperCamelCase_ ) if return_labels: if model_class.__name__ == "FlaubertForQuestionAnswering": __UpperCAmelCase : List[Any] = torch.zeros( self.model_tester.batch_size , dtype=torch.long , device=UpperCamelCase_ ) __UpperCAmelCase : Tuple = torch.zeros( self.model_tester.batch_size , dtype=torch.long , device=UpperCamelCase_ ) return inputs_dict def _snake_case ( self ): __UpperCAmelCase : Optional[int] = FlaubertModelTester(self ) __UpperCAmelCase : int = ConfigTester(self , config_class=UpperCamelCase_ , emb_dim=37 ) def _snake_case ( self ): self.config_tester.run_common_tests() def _snake_case ( self ): __UpperCAmelCase : Any = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_flaubert_model(*UpperCamelCase_ ) def _snake_case ( self ): __UpperCAmelCase : Optional[int] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_flaubert_lm_head(*UpperCamelCase_ ) def _snake_case ( self ): __UpperCAmelCase : str = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_flaubert_simple_qa(*UpperCamelCase_ ) def _snake_case ( self ): __UpperCAmelCase : List[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_flaubert_qa(*UpperCamelCase_ ) def _snake_case ( self ): __UpperCAmelCase : Any = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_flaubert_sequence_classif(*UpperCamelCase_ ) def _snake_case ( self ): __UpperCAmelCase : Any = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_flaubert_token_classif(*UpperCamelCase_ ) def _snake_case ( self ): __UpperCAmelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_flaubert_multiple_choice(*UpperCamelCase_ ) @slow def _snake_case ( self ): for model_name in FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: __UpperCAmelCase : List[str] = FlaubertModel.from_pretrained(UpperCamelCase_ ) self.assertIsNotNone(UpperCamelCase_ ) @slow @require_torch_gpu def _snake_case ( self ): __UpperCAmelCase , __UpperCAmelCase : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: # FlauBertForMultipleChoice behaves incorrectly in JIT environments. if model_class == FlaubertForMultipleChoice: return __UpperCAmelCase : Optional[int] = True __UpperCAmelCase : Tuple = model_class(config=UpperCamelCase_ ) __UpperCAmelCase : Dict = self._prepare_for_class(UpperCamelCase_ , UpperCamelCase_ ) __UpperCAmelCase : Any = torch.jit.trace( UpperCamelCase_ , (inputs_dict["input_ids"].to("cpu" ), inputs_dict["attention_mask"].to("cpu" )) ) with tempfile.TemporaryDirectory() as tmp: torch.jit.save(UpperCamelCase_ , os.path.join(UpperCamelCase_ , "traced_model.pt" ) ) __UpperCAmelCase : Dict = torch.jit.load(os.path.join(UpperCamelCase_ , "traced_model.pt" ) , map_location=UpperCamelCase_ ) loaded(inputs_dict["input_ids"].to(UpperCamelCase_ ) , inputs_dict["attention_mask"].to(UpperCamelCase_ ) ) @require_torch class __A (unittest.TestCase ): @slow def _snake_case ( self ): __UpperCAmelCase : Union[str, Any] = FlaubertModel.from_pretrained("flaubert/flaubert_base_cased" ) __UpperCAmelCase : int = torch.tensor([[0, 3_45, 2_32, 3_28, 7_40, 1_40, 16_95, 69, 60_78, 15_88, 2]] ) with torch.no_grad(): __UpperCAmelCase : Any = model(UpperCamelCase_ )[0] __UpperCAmelCase : List[Any] = torch.Size((1, 11, 7_68) ) self.assertEqual(output.shape , UpperCamelCase_ ) __UpperCAmelCase : int = torch.tensor( [[[-2.6_2_5_1, -1.4_2_9_8, -0.0_2_2_7], [-2.8_5_1_0, -1.6_3_8_7, 0.2_2_5_8], [-2.8_1_1_4, -1.1_8_3_2, -0.3_0_6_6]]] ) self.assertTrue(torch.allclose(output[:, :3, :3] , UpperCamelCase_ , atol=1E-4 ) )
10
'''simple docstring''' # Lint as: python3 import sys from collections.abc import Mapping from typing import TYPE_CHECKING import numpy as np import pyarrow as pa from .. import config from ..utils.py_utils import map_nested from .formatting import TensorFormatter if TYPE_CHECKING: import torch class __A (TensorFormatter[Mapping, "torch.Tensor", Mapping] ): def __init__( self , UpperCamelCase_=None , **UpperCamelCase_ ): super().__init__(features=UpperCamelCase_ ) __UpperCAmelCase : Union[str, Any] = torch_tensor_kwargs import torch # noqa import torch at initialization def _snake_case ( self , UpperCamelCase_ ): import torch if isinstance(UpperCamelCase_ , UpperCamelCase_ ) and column: if all( isinstance(UpperCamelCase_ , torch.Tensor ) and x.shape == column[0].shape and x.dtype == column[0].dtype for x in column ): return torch.stack(UpperCamelCase_ ) return column def _snake_case ( self , UpperCamelCase_ ): import torch if isinstance(UpperCamelCase_ , (str, bytes, type(UpperCamelCase_ )) ): return value elif isinstance(UpperCamelCase_ , (np.character, np.ndarray) ) and np.issubdtype(value.dtype , np.character ): return value.tolist() __UpperCAmelCase : int = {} if isinstance(UpperCamelCase_ , (np.number, np.ndarray) ) and np.issubdtype(value.dtype , np.integer ): __UpperCAmelCase : Optional[int] = {"dtype": torch.intaa} elif isinstance(UpperCamelCase_ , (np.number, np.ndarray) ) and np.issubdtype(value.dtype , np.floating ): __UpperCAmelCase : str = {"dtype": torch.floataa} elif config.PIL_AVAILABLE and "PIL" in sys.modules: import PIL.Image if isinstance(UpperCamelCase_ , PIL.Image.Image ): __UpperCAmelCase : str = np.asarray(UpperCamelCase_ ) return torch.tensor(UpperCamelCase_ , **{**default_dtype, **self.torch_tensor_kwargs} ) def _snake_case ( self , UpperCamelCase_ ): import torch # support for torch, tf, jax etc. if hasattr(UpperCamelCase_ , "__array__" ) and not isinstance(UpperCamelCase_ , torch.Tensor ): __UpperCAmelCase : Dict = data_struct.__array__() # support for nested types like struct of list of struct if isinstance(UpperCamelCase_ , np.ndarray ): if data_struct.dtype == object: # torch tensors cannot be instantied from an array of objects return self._consolidate([self.recursive_tensorize(UpperCamelCase_ ) for substruct in data_struct] ) elif isinstance(UpperCamelCase_ , (list, tuple) ): return self._consolidate([self.recursive_tensorize(UpperCamelCase_ ) for substruct in data_struct] ) return self._tensorize(UpperCamelCase_ ) def _snake_case ( self , UpperCamelCase_ ): return map_nested(self._recursive_tensorize , UpperCamelCase_ , map_list=UpperCamelCase_ ) def _snake_case ( self , UpperCamelCase_ ): __UpperCAmelCase : List[str] = self.numpy_arrow_extractor().extract_row(UpperCamelCase_ ) __UpperCAmelCase : Union[str, Any] = self.python_features_decoder.decode_row(UpperCamelCase_ ) return self.recursive_tensorize(UpperCamelCase_ ) def _snake_case ( self , UpperCamelCase_ ): __UpperCAmelCase : Union[str, Any] = self.numpy_arrow_extractor().extract_column(UpperCamelCase_ ) __UpperCAmelCase : Optional[Any] = self.python_features_decoder.decode_column(UpperCamelCase_ , pa_table.column_names[0] ) __UpperCAmelCase : List[Any] = self.recursive_tensorize(UpperCamelCase_ ) __UpperCAmelCase : List[str] = self._consolidate(UpperCamelCase_ ) return column def _snake_case ( self , UpperCamelCase_ ): __UpperCAmelCase : int = self.numpy_arrow_extractor().extract_batch(UpperCamelCase_ ) __UpperCAmelCase : Any = self.python_features_decoder.decode_batch(UpperCamelCase_ ) __UpperCAmelCase : Optional[int] = self.recursive_tensorize(UpperCamelCase_ ) for column_name in batch: __UpperCAmelCase : Tuple = self._consolidate(batch[column_name] ) return batch
10
1
'''simple docstring''' import argparse import os import torch from diffusers import ( CMStochasticIterativeScheduler, ConsistencyModelPipeline, UNetaDModel, ) _a : Tuple = { "sample_size": 32, "in_channels": 3, "out_channels": 3, "layers_per_block": 2, "num_class_embeds": 1000, "block_out_channels": [32, 64], "attention_head_dim": 8, "down_block_types": [ "ResnetDownsampleBlock2D", "AttnDownBlock2D", ], "up_block_types": [ "AttnUpBlock2D", "ResnetUpsampleBlock2D", ], "resnet_time_scale_shift": "scale_shift", "upsample_type": "resnet", "downsample_type": "resnet", } _a : Optional[Any] = { "sample_size": 64, "in_channels": 3, "out_channels": 3, "layers_per_block": 3, "num_class_embeds": 1000, "block_out_channels": [192, 192 * 2, 192 * 3, 192 * 4], "attention_head_dim": 64, "down_block_types": [ "ResnetDownsampleBlock2D", "AttnDownBlock2D", "AttnDownBlock2D", "AttnDownBlock2D", ], "up_block_types": [ "AttnUpBlock2D", "AttnUpBlock2D", "AttnUpBlock2D", "ResnetUpsampleBlock2D", ], "resnet_time_scale_shift": "scale_shift", "upsample_type": "resnet", "downsample_type": "resnet", } _a : Optional[Any] = { "sample_size": 256, "in_channels": 3, "out_channels": 3, "layers_per_block": 2, "num_class_embeds": None, "block_out_channels": [256, 256, 256 * 2, 256 * 2, 256 * 4, 256 * 4], "attention_head_dim": 64, "down_block_types": [ "ResnetDownsampleBlock2D", "ResnetDownsampleBlock2D", "ResnetDownsampleBlock2D", "AttnDownBlock2D", "AttnDownBlock2D", "AttnDownBlock2D", ], "up_block_types": [ "AttnUpBlock2D", "AttnUpBlock2D", "AttnUpBlock2D", "ResnetUpsampleBlock2D", "ResnetUpsampleBlock2D", "ResnetUpsampleBlock2D", ], "resnet_time_scale_shift": "default", "upsample_type": "resnet", "downsample_type": "resnet", } _a : Optional[Any] = { "num_train_timesteps": 40, "sigma_min": 0.002, "sigma_max": 80.0, } _a : List[str] = { "num_train_timesteps": 201, "sigma_min": 0.002, "sigma_max": 80.0, } _a : str = { "num_train_timesteps": 151, "sigma_min": 0.002, "sigma_max": 80.0, } def _lowercase ( lowerCamelCase__ ) -> Tuple: """simple docstring""" if isinstance(lowerCamelCase__ , lowerCamelCase__ ): return v if v.lower() in ("yes", "true", "t", "y", "1"): return True elif v.lower() in ("no", "false", "f", "n", "0"): return False else: raise argparse.ArgumentTypeError("boolean value expected" ) def _lowercase ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__=False ) -> int: """simple docstring""" __UpperCAmelCase : Union[str, Any] = checkpoint[f"""{old_prefix}.in_layers.0.weight"""] __UpperCAmelCase : Any = checkpoint[f"""{old_prefix}.in_layers.0.bias"""] __UpperCAmelCase : Optional[Any] = checkpoint[f"""{old_prefix}.in_layers.2.weight"""] __UpperCAmelCase : List[Any] = checkpoint[f"""{old_prefix}.in_layers.2.bias"""] __UpperCAmelCase : Dict = checkpoint[f"""{old_prefix}.emb_layers.1.weight"""] __UpperCAmelCase : List[str] = checkpoint[f"""{old_prefix}.emb_layers.1.bias"""] __UpperCAmelCase : List[str] = checkpoint[f"""{old_prefix}.out_layers.0.weight"""] __UpperCAmelCase : Optional[int] = checkpoint[f"""{old_prefix}.out_layers.0.bias"""] __UpperCAmelCase : Optional[int] = checkpoint[f"""{old_prefix}.out_layers.3.weight"""] __UpperCAmelCase : Union[str, Any] = checkpoint[f"""{old_prefix}.out_layers.3.bias"""] if has_skip: __UpperCAmelCase : List[str] = checkpoint[f"""{old_prefix}.skip_connection.weight"""] __UpperCAmelCase : int = checkpoint[f"""{old_prefix}.skip_connection.bias"""] return new_checkpoint def _lowercase ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__=None ) -> Optional[Any]: """simple docstring""" __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase : int = checkpoint[f"""{old_prefix}.qkv.weight"""].chunk(3 , dim=0 ) __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase : List[str] = checkpoint[f"""{old_prefix}.qkv.bias"""].chunk(3 , dim=0 ) __UpperCAmelCase : Any = checkpoint[f"""{old_prefix}.norm.weight"""] __UpperCAmelCase : int = checkpoint[f"""{old_prefix}.norm.bias"""] __UpperCAmelCase : Union[str, Any] = weight_q.squeeze(-1 ).squeeze(-1 ) __UpperCAmelCase : int = bias_q.squeeze(-1 ).squeeze(-1 ) __UpperCAmelCase : List[Any] = weight_k.squeeze(-1 ).squeeze(-1 ) __UpperCAmelCase : Any = bias_k.squeeze(-1 ).squeeze(-1 ) __UpperCAmelCase : List[str] = weight_v.squeeze(-1 ).squeeze(-1 ) __UpperCAmelCase : Any = bias_v.squeeze(-1 ).squeeze(-1 ) __UpperCAmelCase : Dict = ( checkpoint[f"""{old_prefix}.proj_out.weight"""].squeeze(-1 ).squeeze(-1 ) ) __UpperCAmelCase : List[str] = checkpoint[f"""{old_prefix}.proj_out.bias"""].squeeze(-1 ).squeeze(-1 ) return new_checkpoint def _lowercase ( lowerCamelCase__ , lowerCamelCase__ ) -> Dict: """simple docstring""" __UpperCAmelCase : int = torch.load(lowerCamelCase__ , map_location="cpu" ) __UpperCAmelCase : Union[str, Any] = {} __UpperCAmelCase : Tuple = checkpoint["time_embed.0.weight"] __UpperCAmelCase : Any = checkpoint["time_embed.0.bias"] __UpperCAmelCase : Union[str, Any] = checkpoint["time_embed.2.weight"] __UpperCAmelCase : List[Any] = checkpoint["time_embed.2.bias"] if unet_config["num_class_embeds"] is not None: __UpperCAmelCase : str = checkpoint["label_emb.weight"] __UpperCAmelCase : Union[str, Any] = checkpoint["input_blocks.0.0.weight"] __UpperCAmelCase : Optional[int] = checkpoint["input_blocks.0.0.bias"] __UpperCAmelCase : Tuple = unet_config["down_block_types"] __UpperCAmelCase : Optional[Any] = unet_config["layers_per_block"] __UpperCAmelCase : Optional[Any] = unet_config["attention_head_dim"] __UpperCAmelCase : int = unet_config["block_out_channels"] __UpperCAmelCase : int = 1 __UpperCAmelCase : List[Any] = channels_list[0] for i, layer_type in enumerate(lowerCamelCase__ ): __UpperCAmelCase : Tuple = channels_list[i] __UpperCAmelCase : Tuple = current_channels != prev_channels if layer_type == "ResnetDownsampleBlock2D": for j in range(lowerCamelCase__ ): __UpperCAmelCase : Dict = f"""down_blocks.{i}.resnets.{j}""" __UpperCAmelCase : Dict = f"""input_blocks.{current_layer}.0""" __UpperCAmelCase : Optional[Any] = True if j == 0 and downsample_block_has_skip else False __UpperCAmelCase : Dict = convert_resnet(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , has_skip=lowerCamelCase__ ) current_layer += 1 elif layer_type == "AttnDownBlock2D": for j in range(lowerCamelCase__ ): __UpperCAmelCase : str = f"""down_blocks.{i}.resnets.{j}""" __UpperCAmelCase : Any = f"""input_blocks.{current_layer}.0""" __UpperCAmelCase : str = True if j == 0 and downsample_block_has_skip else False __UpperCAmelCase : Union[str, Any] = convert_resnet(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , has_skip=lowerCamelCase__ ) __UpperCAmelCase : List[str] = f"""down_blocks.{i}.attentions.{j}""" __UpperCAmelCase : Dict = f"""input_blocks.{current_layer}.1""" __UpperCAmelCase : Union[str, Any] = convert_attention( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) current_layer += 1 if i != len(lowerCamelCase__ ) - 1: __UpperCAmelCase : Dict = f"""down_blocks.{i}.downsamplers.0""" __UpperCAmelCase : int = f"""input_blocks.{current_layer}.0""" __UpperCAmelCase : Union[str, Any] = convert_resnet(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) current_layer += 1 __UpperCAmelCase : Union[str, Any] = current_channels # hardcoded the mid-block for now __UpperCAmelCase : str = "mid_block.resnets.0" __UpperCAmelCase : Optional[Any] = "middle_block.0" __UpperCAmelCase : Optional[int] = convert_resnet(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) __UpperCAmelCase : str = "mid_block.attentions.0" __UpperCAmelCase : int = "middle_block.1" __UpperCAmelCase : Union[str, Any] = convert_attention(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) __UpperCAmelCase : List[str] = "mid_block.resnets.1" __UpperCAmelCase : int = "middle_block.2" __UpperCAmelCase : Any = convert_resnet(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) __UpperCAmelCase : Dict = 0 __UpperCAmelCase : List[str] = unet_config["up_block_types"] for i, layer_type in enumerate(lowerCamelCase__ ): if layer_type == "ResnetUpsampleBlock2D": for j in range(layers_per_block + 1 ): __UpperCAmelCase : Union[str, Any] = f"""up_blocks.{i}.resnets.{j}""" __UpperCAmelCase : Optional[int] = f"""output_blocks.{current_layer}.0""" __UpperCAmelCase : Union[str, Any] = convert_resnet(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , has_skip=lowerCamelCase__ ) current_layer += 1 if i != len(lowerCamelCase__ ) - 1: __UpperCAmelCase : List[Any] = f"""up_blocks.{i}.upsamplers.0""" __UpperCAmelCase : List[Any] = f"""output_blocks.{current_layer-1}.1""" __UpperCAmelCase : List[Any] = convert_resnet(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) elif layer_type == "AttnUpBlock2D": for j in range(layers_per_block + 1 ): __UpperCAmelCase : int = f"""up_blocks.{i}.resnets.{j}""" __UpperCAmelCase : Union[str, Any] = f"""output_blocks.{current_layer}.0""" __UpperCAmelCase : Dict = convert_resnet(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , has_skip=lowerCamelCase__ ) __UpperCAmelCase : Union[str, Any] = f"""up_blocks.{i}.attentions.{j}""" __UpperCAmelCase : str = f"""output_blocks.{current_layer}.1""" __UpperCAmelCase : Tuple = convert_attention( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) current_layer += 1 if i != len(lowerCamelCase__ ) - 1: __UpperCAmelCase : Any = f"""up_blocks.{i}.upsamplers.0""" __UpperCAmelCase : List[Any] = f"""output_blocks.{current_layer-1}.2""" __UpperCAmelCase : Optional[int] = convert_resnet(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) __UpperCAmelCase : Union[str, Any] = checkpoint["out.0.weight"] __UpperCAmelCase : Optional[int] = checkpoint["out.0.bias"] __UpperCAmelCase : Optional[int] = checkpoint["out.2.weight"] __UpperCAmelCase : List[Any] = checkpoint["out.2.bias"] return new_checkpoint if __name__ == "__main__": _a : Optional[int] = argparse.ArgumentParser() parser.add_argument("--unet_path", default=None, type=str, required=True, help="Path to the unet.pt to convert.") parser.add_argument( "--dump_path", default=None, type=str, required=True, help="Path to output the converted UNet model." ) parser.add_argument("--class_cond", default=True, type=str, help="Whether the model is class-conditional.") _a : Any = parser.parse_args() _a : Optional[Any] = strabool(args.class_cond) _a : Any = os.path.basename(args.unet_path) print(f"""Checkpoint: {ckpt_name}""") # Get U-Net config if "imagenet64" in ckpt_name: _a : Optional[int] = IMAGENET_64_UNET_CONFIG elif "256" in ckpt_name and (("bedroom" in ckpt_name) or ("cat" in ckpt_name)): _a : str = LSUN_256_UNET_CONFIG elif "test" in ckpt_name: _a : List[Any] = TEST_UNET_CONFIG else: raise ValueError(f"""Checkpoint type {ckpt_name} is not currently supported.""") if not args.class_cond: _a : List[str] = None _a : str = con_pt_to_diffuser(args.unet_path, unet_config) _a : str = UNetaDModel(**unet_config) image_unet.load_state_dict(converted_unet_ckpt) # Get scheduler config if "cd" in ckpt_name or "test" in ckpt_name: _a : Union[str, Any] = CD_SCHEDULER_CONFIG elif "ct" in ckpt_name and "imagenet64" in ckpt_name: _a : List[Any] = CT_IMAGENET_64_SCHEDULER_CONFIG elif "ct" in ckpt_name and "256" in ckpt_name and (("bedroom" in ckpt_name) or ("cat" in ckpt_name)): _a : List[str] = CT_LSUN_256_SCHEDULER_CONFIG else: raise ValueError(f"""Checkpoint type {ckpt_name} is not currently supported.""") _a : Optional[int] = CMStochasticIterativeScheduler(**scheduler_config) _a : Optional[Any] = ConsistencyModelPipeline(unet=image_unet, scheduler=cm_scheduler) consistency_model.save_pretrained(args.dump_path)
10
'''simple docstring''' def _lowercase ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> bool: """simple docstring""" return not any( neighbour == 1 and colored_vertices[i] == color for i, neighbour in enumerate(lowerCamelCase__ ) ) def _lowercase ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> bool: """simple docstring""" if index == len(lowerCamelCase__ ): return True # Recursive Step for i in range(lowerCamelCase__ ): if valid_coloring(graph[index] , lowerCamelCase__ , lowerCamelCase__ ): # Color current vertex __UpperCAmelCase : List[str] = i # Validate coloring if util_color(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , index + 1 ): return True # Backtrack __UpperCAmelCase : Any = -1 return False def _lowercase ( lowerCamelCase__ , lowerCamelCase__ ) -> list[int]: """simple docstring""" __UpperCAmelCase : Optional[Any] = [-1] * len(lowerCamelCase__ ) if util_color(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , 0 ): return colored_vertices return []
10
1
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_torch_available, is_vision_available, ) _a : str = {"configuration_vit": ["VIT_PRETRAINED_CONFIG_ARCHIVE_MAP", "ViTConfig", "ViTOnnxConfig"]} try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _a : str = ["ViTFeatureExtractor"] _a : Dict = ["ViTImageProcessor"] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _a : int = [ "VIT_PRETRAINED_MODEL_ARCHIVE_LIST", "ViTForImageClassification", "ViTForMaskedImageModeling", "ViTModel", "ViTPreTrainedModel", ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _a : List[str] = [ "TFViTForImageClassification", "TFViTModel", "TFViTPreTrainedModel", ] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _a : Dict = [ "FlaxViTForImageClassification", "FlaxViTModel", "FlaxViTPreTrainedModel", ] if TYPE_CHECKING: from .configuration_vit import VIT_PRETRAINED_CONFIG_ARCHIVE_MAP, ViTConfig, ViTOnnxConfig try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .feature_extraction_vit import ViTFeatureExtractor from .image_processing_vit import ViTImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_vit import ( VIT_PRETRAINED_MODEL_ARCHIVE_LIST, ViTForImageClassification, ViTForMaskedImageModeling, ViTModel, ViTPreTrainedModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_vit import TFViTForImageClassification, TFViTModel, TFViTPreTrainedModel try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_vit import FlaxViTForImageClassification, FlaxViTModel, FlaxViTPreTrainedModel else: import sys _a : Dict = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
10
'''simple docstring''' def _lowercase ( lowerCamelCase__ , lowerCamelCase__ ) -> int: """simple docstring""" return number | (1 << position) def _lowercase ( lowerCamelCase__ , lowerCamelCase__ ) -> int: """simple docstring""" return number & ~(1 << position) def _lowercase ( lowerCamelCase__ , lowerCamelCase__ ) -> int: """simple docstring""" return number ^ (1 << position) def _lowercase ( lowerCamelCase__ , lowerCamelCase__ ) -> bool: """simple docstring""" return ((number >> position) & 1) == 1 def _lowercase ( lowerCamelCase__ , lowerCamelCase__ ) -> int: """simple docstring""" return int((number & (1 << position)) != 0 ) if __name__ == "__main__": import doctest doctest.testmod()
10
1
'''simple docstring''' import logging import os import random import sys from dataclasses import dataclass, field from typing import Optional import datasets import numpy as np import pandas as pd from datasets import load_dataset import transformers from transformers import ( AutoConfig, BartForSequenceClassification, DataCollatorWithPadding, EvalPrediction, HfArgumentParser, TapexTokenizer, Trainer, TrainingArguments, default_data_collator, set_seed, ) from transformers.trainer_utils import get_last_checkpoint from transformers.utils import check_min_version from transformers.utils.versions import require_version # Will error if the minimal version of Transformers is not installed. Remove at your own risks. check_min_version("4.17.0.dev0") require_version("datasets>=1.8.0", "To fix: pip install -r examples/pytorch/text-classification/requirements.txt") _a : Dict = logging.getLogger(__name__) @dataclass class __A : snake_case :Optional[str] = field( default="tab_fact" , metadata={"help": "The name of the dataset to use (via the datasets library)."} ) snake_case :Optional[str] = field( default="tab_fact" , metadata={"help": "The configuration name of the dataset to use (via the datasets library)."} , ) snake_case :int = field( default=1_024 , metadata={ "help": ( "The maximum total input sequence length after tokenization. Sequences longer " "than this will be truncated, sequences shorter will be padded." ) } , ) snake_case :bool = field( default=__magic_name__ , metadata={"help": "Overwrite the cached preprocessed datasets or not."} ) snake_case :bool = field( default=__magic_name__ , metadata={ "help": ( "Whether to pad all samples to `max_seq_length`. " "If False, will pad the samples dynamically when batching to the maximum length in the batch." ) } , ) snake_case :Optional[int] = field( default=__magic_name__ , metadata={ "help": ( "For debugging purposes or quicker training, truncate the number of training examples to this " "value if set." ) } , ) snake_case :Optional[int] = field( default=__magic_name__ , metadata={ "help": ( "For debugging purposes or quicker training, truncate the number of evaluation examples to this " "value if set." ) } , ) snake_case :Optional[int] = field( default=__magic_name__ , metadata={ "help": ( "For debugging purposes or quicker training, truncate the number of prediction examples to this " "value if set." ) } , ) snake_case :Optional[str] = field( default=__magic_name__ , metadata={"help": "A csv or a json file containing the training data."} ) snake_case :Optional[str] = field( default=__magic_name__ , metadata={"help": "A csv or a json file containing the validation data."} ) snake_case :Optional[str] = field(default=__magic_name__ , metadata={"help": "A csv or a json file containing the test data."} ) def _snake_case ( self ): if self.dataset_name is not None: pass elif self.train_file is None or self.validation_file is None: raise ValueError("Need either a GLUE task, a training/validation file or a dataset name." ) else: __UpperCAmelCase : Optional[int] = self.train_file.split("." )[-1] assert train_extension in ["csv", "json"], "`train_file` should be a csv or a json file." __UpperCAmelCase : int = self.validation_file.split("." )[-1] assert ( validation_extension == train_extension ), "`validation_file` should have the same extension (csv or json) as `train_file`." @dataclass class __A : snake_case :str = field( default=__magic_name__ , metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"} ) snake_case :Optional[str] = field( default=__magic_name__ , metadata={"help": "Pretrained config name or path if not the same as model_name"} ) snake_case :Optional[str] = field( default=__magic_name__ , metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"} ) snake_case :Optional[str] = field( default=__magic_name__ , metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co"} , ) snake_case :bool = field( default=__magic_name__ , metadata={"help": "Whether to use one of the fast tokenizer (backed by the tokenizers library) or not."} , ) snake_case :str = field( default="main" , metadata={"help": "The specific model version to use (can be a branch name, tag name or commit id)."} , ) snake_case :bool = field( default=__magic_name__ , metadata={ "help": ( "Will use the token generated when running `huggingface-cli login` (necessary to use this script " "with private models)." ) } , ) def _lowercase ( ) -> int: """simple docstring""" __UpperCAmelCase : Optional[Any] = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) ) if len(sys.argv ) == 2 and sys.argv[1].endswith(".json" ): # If we pass only one argument to the script and it's the path to a json file, # let's parse it to get our arguments. __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase : Union[str, Any] = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) ) else: __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase : List[Any] = parser.parse_args_into_dataclasses() # Setup logging logging.basicConfig( format="%(asctime)s - %(levelname)s - %(name)s - %(message)s" , datefmt="%m/%d/%Y %H:%M:%S" , handlers=[logging.StreamHandler(sys.stdout )] , ) __UpperCAmelCase : str = training_args.get_process_log_level() logger.setLevel(lowerCamelCase__ ) datasets.utils.logging.set_verbosity(lowerCamelCase__ ) transformers.utils.logging.set_verbosity(lowerCamelCase__ ) transformers.utils.logging.enable_default_handler() transformers.utils.logging.enable_explicit_format() # Log on each process the small summary: logger.warning( f"""Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}""" + f"""distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}""" ) logger.info(f"""Training/evaluation parameters {training_args}""" ) # Detecting last checkpoint. __UpperCAmelCase : int = None if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir: __UpperCAmelCase : List[Any] = get_last_checkpoint(training_args.output_dir ) if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0: raise ValueError( f"""Output directory ({training_args.output_dir}) already exists and is not empty. """ "Use --overwrite_output_dir to overcome." ) elif last_checkpoint is not None and training_args.resume_from_checkpoint is None: logger.info( f"""Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change """ "the `--output_dir` or add `--overwrite_output_dir` to train from scratch." ) # Set seed before initializing model. set_seed(training_args.seed ) # Get the datasets: you can either provide your own CSV/JSON training and evaluation files (see below) # or specify a GLUE benchmark task (the dataset will be downloaded automatically from the datasets Hub). # # For JSON files, this script will use the `question` column for the input question and `table` column for the corresponding table. # # If the CSVs/JSONs contain only one non-label column, the script does single sentence classification on this # single column. You can easily tweak this behavior (see below) # # In distributed training, the load_dataset function guarantee that only one local process can concurrently # download the dataset. if data_args.dataset_name is not None: # Downloading and loading a dataset from the hub. __UpperCAmelCase : Optional[Any] = load_dataset( data_args.dataset_name , data_args.dataset_config_name , cache_dir=model_args.cache_dir ) else: # Loading a dataset from your local files. # CSV/JSON training and evaluation files are needed. __UpperCAmelCase : Tuple = {"train": data_args.train_file, "validation": data_args.validation_file} # Get the test dataset: you can provide your own CSV/JSON test file (see below) # when you use `do_predict` without specifying a GLUE benchmark task. if training_args.do_predict: if data_args.test_file is not None: __UpperCAmelCase : Optional[int] = data_args.train_file.split("." )[-1] __UpperCAmelCase : str = data_args.test_file.split("." )[-1] assert ( test_extension == train_extension ), "`test_file` should have the same extension (csv or json) as `train_file`." __UpperCAmelCase : str = data_args.test_file else: raise ValueError("Need either a GLUE task or a test file for `do_predict`." ) for key in data_files.keys(): logger.info(f"""load a local file for {key}: {data_files[key]}""" ) if data_args.train_file.endswith(".csv" ): # Loading a dataset from local csv files __UpperCAmelCase : int = load_dataset("csv" , data_files=lowerCamelCase__ , cache_dir=model_args.cache_dir ) else: # Loading a dataset from local json files __UpperCAmelCase : List[str] = load_dataset("json" , data_files=lowerCamelCase__ , cache_dir=model_args.cache_dir ) # See more about loading any type of standard or custom dataset at # https://huggingface.co/docs/datasets/loading_datasets.html. # Labels __UpperCAmelCase : Any = raw_datasets["train"].features["label"].names __UpperCAmelCase : List[str] = len(lowerCamelCase__ ) # Load pretrained model and tokenizer # # In distributed training, the .from_pretrained methods guarantee that only one local process can concurrently # download model & vocab. __UpperCAmelCase : List[Any] = AutoConfig.from_pretrained( model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=lowerCamelCase__ , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ) # load tapex tokenizer __UpperCAmelCase : Any = TapexTokenizer.from_pretrained( model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , use_fast=model_args.use_fast_tokenizer , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , add_prefix_space=lowerCamelCase__ , ) __UpperCAmelCase : Optional[int] = BartForSequenceClassification.from_pretrained( model_args.model_name_or_path , from_tf=bool(".ckpt" in model_args.model_name_or_path ) , config=lowerCamelCase__ , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ) # Padding strategy if data_args.pad_to_max_length: __UpperCAmelCase : Optional[Any] = "max_length" else: # We will pad later, dynamically at batch creation, to the max sequence length in each batch __UpperCAmelCase : List[str] = False # Some models have set the order of the labels to use, so let's make sure we do use it. __UpperCAmelCase : str = {"Refused": 0, "Entailed": 1} __UpperCAmelCase : Union[str, Any] = {0: "Refused", 1: "Entailed"} if data_args.max_seq_length > tokenizer.model_max_length: logger.warning( f"""The max_seq_length passed ({data_args.max_seq_length}) is larger than the maximum length for the""" f"""model ({tokenizer.model_max_length}). Using max_seq_length={tokenizer.model_max_length}.""" ) __UpperCAmelCase : List[str] = min(data_args.max_seq_length , tokenizer.model_max_length ) def preprocess_tabfact_function(lowerCamelCase__ ): # Tokenize the texts def _convert_table_text_to_pandas(lowerCamelCase__ ): __UpperCAmelCase : List[str] = [_table_row.split("#" ) for _table_row in _table_text.strip("\n" ).split("\n" )] __UpperCAmelCase : str = pd.DataFrame.from_records(_table_content[1:] , columns=_table_content[0] ) return _table_pd __UpperCAmelCase : str = examples["statement"] __UpperCAmelCase : str = list(map(_convert_table_text_to_pandas , examples["table_text"] ) ) __UpperCAmelCase : int = tokenizer(lowerCamelCase__ , lowerCamelCase__ , padding=lowerCamelCase__ , max_length=lowerCamelCase__ , truncation=lowerCamelCase__ ) __UpperCAmelCase : List[str] = examples["label"] return result with training_args.main_process_first(desc="dataset map pre-processing" ): __UpperCAmelCase : int = raw_datasets.map( lowerCamelCase__ , batched=lowerCamelCase__ , load_from_cache_file=not data_args.overwrite_cache , desc="Running tokenizer on dataset" , ) if training_args.do_train: if "train" not in raw_datasets: raise ValueError("--do_train requires a train dataset" ) __UpperCAmelCase : List[Any] = raw_datasets["train"] if data_args.max_train_samples is not None: __UpperCAmelCase : Optional[Any] = train_dataset.select(range(data_args.max_train_samples ) ) if training_args.do_eval: if "validation" not in raw_datasets and "validation_matched" not in raw_datasets: raise ValueError("--do_eval requires a validation dataset" ) __UpperCAmelCase : List[str] = raw_datasets["validation"] if data_args.max_eval_samples is not None: __UpperCAmelCase : Optional[Any] = eval_dataset.select(range(data_args.max_eval_samples ) ) if training_args.do_predict or data_args.test_file is not None: if "test" not in raw_datasets and "test_matched" not in raw_datasets: raise ValueError("--do_predict requires a test dataset" ) __UpperCAmelCase : List[str] = raw_datasets["test"] if data_args.max_predict_samples is not None: __UpperCAmelCase : int = predict_dataset.select(range(data_args.max_predict_samples ) ) # Log a few random samples from the training set: if training_args.do_train: for index in random.sample(range(len(lowerCamelCase__ ) ) , 3 ): logger.info(f"""Sample {index} of the training set: {train_dataset[index]}.""" ) # You can define your custom compute_metrics function. It takes an `EvalPrediction` object (a namedtuple with a # predictions and label_ids field) and has to return a dictionary string to float. def compute_metrics(lowerCamelCase__ ): __UpperCAmelCase : Any = p.predictions[0] if isinstance(p.predictions , lowerCamelCase__ ) else p.predictions __UpperCAmelCase : int = np.argmax(lowerCamelCase__ , axis=1 ) return {"accuracy": (preds == p.label_ids).astype(np.floataa ).mean().item()} # Data collator will default to DataCollatorWithPadding, so we change it if we already did the padding. if data_args.pad_to_max_length: __UpperCAmelCase : Optional[int] = default_data_collator elif training_args.fpaa: __UpperCAmelCase : List[Any] = DataCollatorWithPadding(lowerCamelCase__ , pad_to_multiple_of=8 ) else: __UpperCAmelCase : Dict = None # Initialize our Trainer __UpperCAmelCase : Any = Trainer( model=lowerCamelCase__ , args=lowerCamelCase__ , train_dataset=train_dataset if training_args.do_train else None , eval_dataset=eval_dataset if training_args.do_eval else None , compute_metrics=lowerCamelCase__ , tokenizer=lowerCamelCase__ , data_collator=lowerCamelCase__ , ) # Training if training_args.do_train: __UpperCAmelCase : List[str] = None if training_args.resume_from_checkpoint is not None: __UpperCAmelCase : Tuple = training_args.resume_from_checkpoint elif last_checkpoint is not None: __UpperCAmelCase : Tuple = last_checkpoint __UpperCAmelCase : Any = trainer.train(resume_from_checkpoint=lowerCamelCase__ ) __UpperCAmelCase : Any = train_result.metrics __UpperCAmelCase : Any = ( data_args.max_train_samples if data_args.max_train_samples is not None else len(lowerCamelCase__ ) ) __UpperCAmelCase : Dict = min(lowerCamelCase__ , len(lowerCamelCase__ ) ) trainer.save_model() # Saves the tokenizer too for easy upload trainer.log_metrics("train" , lowerCamelCase__ ) trainer.save_metrics("train" , lowerCamelCase__ ) trainer.save_state() # Evaluation if training_args.do_eval: logger.info("*** Evaluate ***" ) __UpperCAmelCase : List[str] = trainer.evaluate(eval_dataset=lowerCamelCase__ ) __UpperCAmelCase : List[str] = data_args.max_eval_samples if data_args.max_eval_samples is not None else len(lowerCamelCase__ ) __UpperCAmelCase : List[str] = min(lowerCamelCase__ , len(lowerCamelCase__ ) ) trainer.log_metrics("eval" , lowerCamelCase__ ) trainer.save_metrics("eval" , lowerCamelCase__ ) if training_args.do_predict: logger.info("*** Predict ***" ) # Removing the `label` columns because it contains -1 and Trainer won't like that. __UpperCAmelCase : str = predict_dataset.remove_columns("label" ) __UpperCAmelCase : List[Any] = trainer.predict(lowerCamelCase__ , metric_key_prefix="predict" ).predictions __UpperCAmelCase : int = np.argmax(lowerCamelCase__ , axis=1 ) __UpperCAmelCase : List[Any] = os.path.join(training_args.output_dir , "predict_results_tabfact.txt" ) if trainer.is_world_process_zero(): with open(lowerCamelCase__ , "w" ) as writer: logger.info("***** Predict Results *****" ) writer.write("index\tprediction\n" ) for index, item in enumerate(lowerCamelCase__ ): __UpperCAmelCase : List[Any] = label_list[item] writer.write(f"""{index}\t{item}\n""" ) __UpperCAmelCase : Dict = {"finetuned_from": model_args.model_name_or_path, "tasks": "text-classification"} if training_args.push_to_hub: trainer.push_to_hub(**lowerCamelCase__ ) else: trainer.create_model_card(**lowerCamelCase__ ) def _lowercase ( lowerCamelCase__ ) -> Union[str, Any]: """simple docstring""" main() if __name__ == "__main__": main()
10
'''simple docstring''' from collections import Counter import numpy as np from sklearn import datasets from sklearn.model_selection import train_test_split _a : str = datasets.load_iris() _a : List[Any] = np.array(data["data"]) _a : Optional[Any] = np.array(data["target"]) _a : Dict = data["target_names"] _a , _a , _a , _a : Any = train_test_split(X, y) def _lowercase ( lowerCamelCase__ , lowerCamelCase__ ) -> Tuple: """simple docstring""" return np.linalg.norm(np.array(lowerCamelCase__ ) - np.array(lowerCamelCase__ ) ) def _lowercase ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__=5 ) -> int: """simple docstring""" __UpperCAmelCase : List[Any] = zip(lowerCamelCase__ , lowerCamelCase__ ) # List of distances of all points from the point to be classified __UpperCAmelCase : int = [] for data_point in data: __UpperCAmelCase : Optional[Any] = euclidean_distance(data_point[0] , lowerCamelCase__ ) distances.append((distance, data_point[1]) ) # Choosing 'k' points with the least distances. __UpperCAmelCase : Union[str, Any] = [i[1] for i in sorted(lowerCamelCase__ )[:k]] # Most commonly occurring class among them # is the class into which the point is classified __UpperCAmelCase : Dict = Counter(lowerCamelCase__ ).most_common(1 )[0][0] return classes[result] if __name__ == "__main__": print(classifier(X_train, y_train, classes, [4.4, 3.1, 1.3, 1.4]))
10
1
'''simple docstring''' from __future__ import annotations from collections.abc import Callable def _lowercase ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = 100 , ) -> float: """simple docstring""" __UpperCAmelCase : Union[str, Any] = x_start __UpperCAmelCase : Optional[int] = fnc(lowerCamelCase__ ) __UpperCAmelCase : List[Any] = 0.0 for _ in range(lowerCamelCase__ ): # Approximates small segments of curve as linear and solve # for trapezoidal area __UpperCAmelCase : List[Any] = (x_end - x_start) / steps + xa __UpperCAmelCase : Optional[int] = fnc(lowerCamelCase__ ) area += abs(fxa + fxa ) * (xa - xa) / 2 # Increment step __UpperCAmelCase : Tuple = xa __UpperCAmelCase : Optional[int] = fxa return area if __name__ == "__main__": def _lowercase ( lowerCamelCase__ ) -> Optional[Any]: """simple docstring""" return x**3 + x**2 print("f(x) = x^3 + x^2") print("The area between the curve, x = -5, x = 5 and the x axis is:") _a : List[Any] = 10 while i <= 100000: print(f"""with {i} steps: {trapezoidal_area(f, -5, 5, i)}""") i *= 10
10
'''simple docstring''' class __A : def __init__( self , UpperCamelCase_ ): __UpperCAmelCase : Any = set_counts __UpperCAmelCase : int = max(UpperCamelCase_ ) __UpperCAmelCase : List[str] = len(UpperCamelCase_ ) __UpperCAmelCase : Any = [1] * num_sets __UpperCAmelCase : Any = list(range(UpperCamelCase_ ) ) def _snake_case ( self , UpperCamelCase_ , UpperCamelCase_ ): __UpperCAmelCase : Optional[int] = self.get_parent(UpperCamelCase_ ) __UpperCAmelCase : List[Any] = self.get_parent(UpperCamelCase_ ) if src_parent == dst_parent: return False if self.ranks[dst_parent] >= self.ranks[src_parent]: self.set_counts[dst_parent] += self.set_counts[src_parent] __UpperCAmelCase : Optional[Any] = 0 __UpperCAmelCase : List[Any] = dst_parent if self.ranks[dst_parent] == self.ranks[src_parent]: self.ranks[dst_parent] += 1 __UpperCAmelCase : Union[str, Any] = self.set_counts[dst_parent] else: self.set_counts[src_parent] += self.set_counts[dst_parent] __UpperCAmelCase : Union[str, Any] = 0 __UpperCAmelCase : Dict = src_parent __UpperCAmelCase : Dict = self.set_counts[src_parent] __UpperCAmelCase : Dict = max(self.max_set , UpperCamelCase_ ) return True def _snake_case ( self , UpperCamelCase_ ): if self.parents[disj_set] == disj_set: return disj_set __UpperCAmelCase : str = self.get_parent(self.parents[disj_set] ) return self.parents[disj_set]
10
1
'''simple docstring''' import inspect import unittest from transformers import ConvNextConfig from transformers.testing_utils import require_torch, require_vision, slow, torch_device from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_backbone_common import BackboneTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ConvNextBackbone, ConvNextForImageClassification, ConvNextModel from transformers.models.convnext.modeling_convnext import CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import AutoImageProcessor class __A : def __init__( self , UpperCamelCase_ , UpperCamelCase_=13 , UpperCamelCase_=32 , UpperCamelCase_=3 , UpperCamelCase_=4 , UpperCamelCase_=[10, 20, 30, 40] , UpperCamelCase_=[2, 2, 3, 2] , UpperCamelCase_=True , UpperCamelCase_=True , UpperCamelCase_=37 , UpperCamelCase_="gelu" , UpperCamelCase_=10 , UpperCamelCase_=0.0_2 , UpperCamelCase_=["stage2", "stage3", "stage4"] , UpperCamelCase_=[2, 3, 4] , UpperCamelCase_=None , ): __UpperCAmelCase : Optional[Any] = parent __UpperCAmelCase : Tuple = batch_size __UpperCAmelCase : Optional[int] = image_size __UpperCAmelCase : int = num_channels __UpperCAmelCase : str = num_stages __UpperCAmelCase : Tuple = hidden_sizes __UpperCAmelCase : Dict = depths __UpperCAmelCase : int = is_training __UpperCAmelCase : List[Any] = use_labels __UpperCAmelCase : Optional[int] = intermediate_size __UpperCAmelCase : Tuple = hidden_act __UpperCAmelCase : Dict = num_labels __UpperCAmelCase : Optional[int] = initializer_range __UpperCAmelCase : List[Any] = out_features __UpperCAmelCase : Optional[int] = out_indices __UpperCAmelCase : int = scope def _snake_case ( self ): __UpperCAmelCase : Dict = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) __UpperCAmelCase : Optional[Any] = None if self.use_labels: __UpperCAmelCase : List[str] = ids_tensor([self.batch_size] , self.num_labels ) __UpperCAmelCase : int = self.get_config() return config, pixel_values, labels def _snake_case ( self ): return ConvNextConfig( num_channels=self.num_channels , hidden_sizes=self.hidden_sizes , depths=self.depths , num_stages=self.num_stages , hidden_act=self.hidden_act , is_decoder=UpperCamelCase_ , initializer_range=self.initializer_range , out_features=self.out_features , out_indices=self.out_indices , num_labels=self.num_labels , ) def _snake_case ( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ): __UpperCAmelCase : Dict = ConvNextModel(config=UpperCamelCase_ ) model.to(UpperCamelCase_ ) model.eval() __UpperCAmelCase : int = model(UpperCamelCase_ ) # expected last hidden states: B, C, H // 32, W // 32 self.parent.assertEqual( result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , ) def _snake_case ( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ): __UpperCAmelCase : Tuple = ConvNextForImageClassification(UpperCamelCase_ ) model.to(UpperCamelCase_ ) model.eval() __UpperCAmelCase : int = model(UpperCamelCase_ , labels=UpperCamelCase_ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def _snake_case ( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ): __UpperCAmelCase : Dict = ConvNextBackbone(config=UpperCamelCase_ ) model.to(UpperCamelCase_ ) model.eval() __UpperCAmelCase : Dict = model(UpperCamelCase_ ) # verify hidden states self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) ) self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[1], 4, 4] ) # verify channels self.parent.assertEqual(len(model.channels ) , len(config.out_features ) ) self.parent.assertListEqual(model.channels , config.hidden_sizes[1:] ) # verify backbone works with out_features=None __UpperCAmelCase : List[Any] = None __UpperCAmelCase : List[str] = ConvNextBackbone(config=UpperCamelCase_ ) model.to(UpperCamelCase_ ) model.eval() __UpperCAmelCase : Dict = model(UpperCamelCase_ ) # verify feature maps self.parent.assertEqual(len(result.feature_maps ) , 1 ) self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[-1], 1, 1] ) # verify channels self.parent.assertEqual(len(model.channels ) , 1 ) self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]] ) def _snake_case ( self ): __UpperCAmelCase : Union[str, Any] = self.prepare_config_and_inputs() __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase : Union[str, Any] = config_and_inputs __UpperCAmelCase : Any = {"pixel_values": pixel_values} return config, inputs_dict @require_torch class __A (__magic_name__ , __magic_name__ , unittest.TestCase ): snake_case :Optional[int] = ( ( ConvNextModel, ConvNextForImageClassification, ConvNextBackbone, ) if is_torch_available() else () ) snake_case :int = ( {"feature-extraction": ConvNextModel, "image-classification": ConvNextForImageClassification} if is_torch_available() else {} ) snake_case :int = True snake_case :Optional[Any] = False snake_case :List[str] = False snake_case :Optional[Any] = False snake_case :Dict = False def _snake_case ( self ): __UpperCAmelCase : Optional[Any] = ConvNextModelTester(self ) __UpperCAmelCase : Union[str, Any] = ConfigTester(self , config_class=UpperCamelCase_ , has_text_modality=UpperCamelCase_ , hidden_size=37 ) def _snake_case ( self ): self.create_and_test_config_common_properties() self.config_tester.create_and_test_config_to_json_string() self.config_tester.create_and_test_config_to_json_file() self.config_tester.create_and_test_config_from_and_save_pretrained() self.config_tester.create_and_test_config_with_num_labels() self.config_tester.check_config_can_be_init_without_params() self.config_tester.check_config_arguments_init() def _snake_case ( self ): return @unittest.skip(reason="ConvNext does not use inputs_embeds" ) def _snake_case ( self ): pass @unittest.skip(reason="ConvNext does not support input and output embeddings" ) def _snake_case ( self ): pass @unittest.skip(reason="ConvNext does not use feedforward chunking" ) def _snake_case ( self ): pass def _snake_case ( self ): __UpperCAmelCase , __UpperCAmelCase : Any = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: __UpperCAmelCase : Optional[int] = model_class(UpperCamelCase_ ) __UpperCAmelCase : int = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic __UpperCAmelCase : Optional[Any] = [*signature.parameters.keys()] __UpperCAmelCase : int = ["pixel_values"] self.assertListEqual(arg_names[:1] , UpperCamelCase_ ) def _snake_case ( self ): __UpperCAmelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*UpperCamelCase_ ) def _snake_case ( self ): __UpperCAmelCase : str = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_backbone(*UpperCamelCase_ ) def _snake_case ( self ): def check_hidden_states_output(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ): __UpperCAmelCase : List[Any] = model_class(UpperCamelCase_ ) model.to(UpperCamelCase_ ) model.eval() with torch.no_grad(): __UpperCAmelCase : Tuple = model(**self._prepare_for_class(UpperCamelCase_ , UpperCamelCase_ ) ) __UpperCAmelCase : int = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states __UpperCAmelCase : Tuple = self.model_tester.num_stages self.assertEqual(len(UpperCamelCase_ ) , expected_num_stages + 1 ) # ConvNext's feature maps are of shape (batch_size, num_channels, height, width) self.assertListEqual( list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , ) __UpperCAmelCase , __UpperCAmelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: __UpperCAmelCase : Dict = True check_hidden_states_output(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] __UpperCAmelCase : Dict = True check_hidden_states_output(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) def _snake_case ( self ): __UpperCAmelCase : Dict = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*UpperCamelCase_ ) @slow def _snake_case ( self ): for model_name in CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: __UpperCAmelCase : int = ConvNextModel.from_pretrained(UpperCamelCase_ ) self.assertIsNotNone(UpperCamelCase_ ) def _lowercase ( ) -> List[str]: """simple docstring""" __UpperCAmelCase : List[str] = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" ) return image @require_torch @require_vision class __A (unittest.TestCase ): @cached_property def _snake_case ( self ): return AutoImageProcessor.from_pretrained("facebook/convnext-tiny-224" ) if is_vision_available() else None @slow def _snake_case ( self ): __UpperCAmelCase : List[str] = ConvNextForImageClassification.from_pretrained("facebook/convnext-tiny-224" ).to(UpperCamelCase_ ) __UpperCAmelCase : Union[str, Any] = self.default_image_processor __UpperCAmelCase : Dict = prepare_img() __UpperCAmelCase : Union[str, Any] = image_processor(images=UpperCamelCase_ , return_tensors="pt" ).to(UpperCamelCase_ ) # forward pass with torch.no_grad(): __UpperCAmelCase : List[Any] = model(**UpperCamelCase_ ) # verify the logits __UpperCAmelCase : Union[str, Any] = torch.Size((1, 10_00) ) self.assertEqual(outputs.logits.shape , UpperCamelCase_ ) __UpperCAmelCase : int = torch.tensor([-0.0_2_6_0, -0.4_7_3_9, 0.1_9_1_1] ).to(UpperCamelCase_ ) self.assertTrue(torch.allclose(outputs.logits[0, :3] , UpperCamelCase_ , atol=1E-4 ) ) @require_torch class __A (unittest.TestCase , __magic_name__ ): snake_case :List[Any] = (ConvNextBackbone,) if is_torch_available() else () snake_case :Tuple = ConvNextConfig snake_case :Optional[Any] = False def _snake_case ( self ): __UpperCAmelCase : int = ConvNextModelTester(self )
10
'''simple docstring''' def _lowercase ( lowerCamelCase__ , lowerCamelCase__ ) -> List[str]: """simple docstring""" __UpperCAmelCase : Dict = (boundary[1] - boundary[0]) / steps __UpperCAmelCase : Tuple = boundary[0] __UpperCAmelCase : List[str] = boundary[1] __UpperCAmelCase : List[Any] = make_points(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) __UpperCAmelCase : int = 0.0 y += (h / 2.0) * f(lowerCamelCase__ ) for i in x_i: # print(i) y += h * f(lowerCamelCase__ ) y += (h / 2.0) * f(lowerCamelCase__ ) return y def _lowercase ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> List[Any]: """simple docstring""" __UpperCAmelCase : Optional[Any] = a + h while x < (b - h): yield x __UpperCAmelCase : List[str] = x + h def _lowercase ( lowerCamelCase__ ) -> Optional[Any]: # enter your function here """simple docstring""" __UpperCAmelCase : str = (x - 0) * (x - 0) return y def _lowercase ( ) -> int: """simple docstring""" __UpperCAmelCase : Tuple = 0.0 # Lower bound of integration __UpperCAmelCase : Union[str, Any] = 1.0 # Upper bound of integration __UpperCAmelCase : Union[str, Any] = 10.0 # define number of steps or resolution __UpperCAmelCase : Dict = [a, b] # define boundary of integration __UpperCAmelCase : Optional[int] = method_a(lowerCamelCase__ , lowerCamelCase__ ) print(f"""y = {y}""" ) if __name__ == "__main__": main()
10
1
'''simple docstring''' def _lowercase ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> Dict: """simple docstring""" __UpperCAmelCase : Any = [False] * len(lowerCamelCase__ ) __UpperCAmelCase : Tuple = [] queue.append(lowerCamelCase__ ) __UpperCAmelCase : int = True while queue: __UpperCAmelCase : List[str] = queue.pop(0 ) for ind in range(len(graph[u] ) ): if visited[ind] is False and graph[u][ind] > 0: queue.append(lowerCamelCase__ ) __UpperCAmelCase : Tuple = True __UpperCAmelCase : int = u return visited[t] def _lowercase ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> int: """simple docstring""" __UpperCAmelCase : Tuple = [-1] * (len(lowerCamelCase__ )) __UpperCAmelCase : Any = 0 while bfs(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ): __UpperCAmelCase : Tuple = float("Inf" ) __UpperCAmelCase : Optional[int] = sink while s != source: # Find the minimum value in select path __UpperCAmelCase : List[str] = min(lowerCamelCase__ , graph[parent[s]][s] ) __UpperCAmelCase : str = parent[s] max_flow += path_flow __UpperCAmelCase : List[str] = sink while v != source: __UpperCAmelCase : Optional[Any] = parent[v] graph[u][v] -= path_flow graph[v][u] += path_flow __UpperCAmelCase : Any = parent[v] return max_flow _a : Optional[Any] = [ [0, 16, 13, 0, 0, 0], [0, 0, 10, 12, 0, 0], [0, 4, 0, 0, 14, 0], [0, 0, 9, 0, 0, 20], [0, 0, 0, 7, 0, 4], [0, 0, 0, 0, 0, 0], ] _a , _a : List[Any] = 0, 5 print(ford_fulkerson(graph, source, sink))
10
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_torch_available, is_vision_available, ) _a : str = {"configuration_vit": ["VIT_PRETRAINED_CONFIG_ARCHIVE_MAP", "ViTConfig", "ViTOnnxConfig"]} try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _a : str = ["ViTFeatureExtractor"] _a : Dict = ["ViTImageProcessor"] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _a : int = [ "VIT_PRETRAINED_MODEL_ARCHIVE_LIST", "ViTForImageClassification", "ViTForMaskedImageModeling", "ViTModel", "ViTPreTrainedModel", ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _a : List[str] = [ "TFViTForImageClassification", "TFViTModel", "TFViTPreTrainedModel", ] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _a : Dict = [ "FlaxViTForImageClassification", "FlaxViTModel", "FlaxViTPreTrainedModel", ] if TYPE_CHECKING: from .configuration_vit import VIT_PRETRAINED_CONFIG_ARCHIVE_MAP, ViTConfig, ViTOnnxConfig try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .feature_extraction_vit import ViTFeatureExtractor from .image_processing_vit import ViTImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_vit import ( VIT_PRETRAINED_MODEL_ARCHIVE_LIST, ViTForImageClassification, ViTForMaskedImageModeling, ViTModel, ViTPreTrainedModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_vit import TFViTForImageClassification, TFViTModel, TFViTPreTrainedModel try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_vit import FlaxViTForImageClassification, FlaxViTModel, FlaxViTPreTrainedModel else: import sys _a : Dict = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
10
1
'''simple docstring''' import argparse import OmegaConf import torch from diffusers import DDIMScheduler, LDMPipeline, UNetLDMModel, VQModel def _lowercase ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> Dict: """simple docstring""" __UpperCAmelCase : Union[str, Any] = OmegaConf.load(lowerCamelCase__ ) __UpperCAmelCase : List[str] = torch.load(lowerCamelCase__ , map_location="cpu" )["model"] __UpperCAmelCase : List[Any] = list(state_dict.keys() ) # extract state_dict for VQVAE __UpperCAmelCase : Tuple = {} __UpperCAmelCase : int = "first_stage_model." for key in keys: if key.startswith(lowerCamelCase__ ): __UpperCAmelCase : str = state_dict[key] # extract state_dict for UNetLDM __UpperCAmelCase : str = {} __UpperCAmelCase : int = "model.diffusion_model." for key in keys: if key.startswith(lowerCamelCase__ ): __UpperCAmelCase : List[Any] = state_dict[key] __UpperCAmelCase : int = config.model.params.first_stage_config.params __UpperCAmelCase : str = config.model.params.unet_config.params __UpperCAmelCase : Union[str, Any] = VQModel(**lowerCamelCase__ ).eval() vqvae.load_state_dict(lowerCamelCase__ ) __UpperCAmelCase : Optional[Any] = UNetLDMModel(**lowerCamelCase__ ).eval() unet.load_state_dict(lowerCamelCase__ ) __UpperCAmelCase : List[Any] = DDIMScheduler( timesteps=config.model.params.timesteps , beta_schedule="scaled_linear" , beta_start=config.model.params.linear_start , beta_end=config.model.params.linear_end , clip_sample=lowerCamelCase__ , ) __UpperCAmelCase : Dict = LDMPipeline(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) pipeline.save_pretrained(lowerCamelCase__ ) if __name__ == "__main__": _a : str = argparse.ArgumentParser() parser.add_argument("--checkpoint_path", type=str, required=True) parser.add_argument("--config_path", type=str, required=True) parser.add_argument("--output_path", type=str, required=True) _a : int = parser.parse_args() convert_ldm_original(args.checkpoint_path, args.config_path, args.output_path)
10
'''simple docstring''' import os from shutil import copyfile from typing import Any, Dict, List, Optional, Tuple import sentencepiece as spm from ...tokenization_utils import AddedToken, PreTrainedTokenizer from ...utils import logging _a : str = logging.get_logger(__name__) _a : Tuple = "▁" _a : Optional[int] = {"vocab_file": "sentencepiece.bpe.model"} _a : Tuple = { "vocab_file": { "xlm-roberta-base": "https://huggingface.co/xlm-roberta-base/resolve/main/sentencepiece.bpe.model", "xlm-roberta-large": "https://huggingface.co/xlm-roberta-large/resolve/main/sentencepiece.bpe.model", "xlm-roberta-large-finetuned-conll02-dutch": ( "https://huggingface.co/xlm-roberta-large-finetuned-conll02-dutch/resolve/main/sentencepiece.bpe.model" ), "xlm-roberta-large-finetuned-conll02-spanish": ( "https://huggingface.co/xlm-roberta-large-finetuned-conll02-spanish/resolve/main/sentencepiece.bpe.model" ), "xlm-roberta-large-finetuned-conll03-english": ( "https://huggingface.co/xlm-roberta-large-finetuned-conll03-english/resolve/main/sentencepiece.bpe.model" ), "xlm-roberta-large-finetuned-conll03-german": ( "https://huggingface.co/xlm-roberta-large-finetuned-conll03-german/resolve/main/sentencepiece.bpe.model" ), } } _a : Optional[Any] = { "xlm-roberta-base": 512, "xlm-roberta-large": 512, "xlm-roberta-large-finetuned-conll02-dutch": 512, "xlm-roberta-large-finetuned-conll02-spanish": 512, "xlm-roberta-large-finetuned-conll03-english": 512, "xlm-roberta-large-finetuned-conll03-german": 512, } class __A (__magic_name__ ): snake_case :Union[str, Any] = VOCAB_FILES_NAMES snake_case :Any = PRETRAINED_VOCAB_FILES_MAP snake_case :Tuple = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES snake_case :Optional[int] = ["input_ids", "attention_mask"] def __init__( self , UpperCamelCase_ , UpperCamelCase_="<s>" , UpperCamelCase_="</s>" , UpperCamelCase_="</s>" , UpperCamelCase_="<s>" , UpperCamelCase_="<unk>" , UpperCamelCase_="<pad>" , UpperCamelCase_="<mask>" , UpperCamelCase_ = None , **UpperCamelCase_ , ): # Mask token behave like a normal word, i.e. include the space before it __UpperCAmelCase : Optional[int] = AddedToken(UpperCamelCase_ , lstrip=UpperCamelCase_ , rstrip=UpperCamelCase_ ) if isinstance(UpperCamelCase_ , UpperCamelCase_ ) else mask_token __UpperCAmelCase : int = {} if sp_model_kwargs is None else sp_model_kwargs super().__init__( bos_token=UpperCamelCase_ , eos_token=UpperCamelCase_ , unk_token=UpperCamelCase_ , sep_token=UpperCamelCase_ , cls_token=UpperCamelCase_ , pad_token=UpperCamelCase_ , mask_token=UpperCamelCase_ , sp_model_kwargs=self.sp_model_kwargs , **UpperCamelCase_ , ) __UpperCAmelCase : Optional[int] = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(str(UpperCamelCase_ ) ) __UpperCAmelCase : Union[str, Any] = vocab_file # Original fairseq vocab and spm vocab must be "aligned": # Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 # -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ---- # fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-' # spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a' # Mimic fairseq token-to-id alignment for the first 4 token __UpperCAmelCase : Optional[Any] = {"<s>": 0, "<pad>": 1, "</s>": 2, "<unk>": 3} # The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab __UpperCAmelCase : List[Any] = 1 __UpperCAmelCase : Optional[Any] = len(self.sp_model ) + self.fairseq_offset __UpperCAmelCase : str = {v: k for k, v in self.fairseq_tokens_to_ids.items()} def __getstate__( self ): __UpperCAmelCase : List[str] = self.__dict__.copy() __UpperCAmelCase : str = None __UpperCAmelCase : str = self.sp_model.serialized_model_proto() return state def __setstate__( self , UpperCamelCase_ ): __UpperCAmelCase : Union[str, Any] = d # for backward compatibility if not hasattr(self , "sp_model_kwargs" ): __UpperCAmelCase : Tuple = {} __UpperCAmelCase : Dict = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.LoadFromSerializedProto(self.sp_model_proto ) def _snake_case ( self , UpperCamelCase_ , UpperCamelCase_ = None ): if token_ids_a is None: return [self.cls_token_id] + token_ids_a + [self.sep_token_id] __UpperCAmelCase : List[Any] = [self.cls_token_id] __UpperCAmelCase : Union[str, Any] = [self.sep_token_id] return cls + token_ids_a + sep + sep + token_ids_a + sep def _snake_case ( self , UpperCamelCase_ , UpperCamelCase_ = None , UpperCamelCase_ = False ): if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=UpperCamelCase_ , token_ids_a=UpperCamelCase_ , already_has_special_tokens=UpperCamelCase_ ) if token_ids_a is None: return [1] + ([0] * len(UpperCamelCase_ )) + [1] return [1] + ([0] * len(UpperCamelCase_ )) + [1, 1] + ([0] * len(UpperCamelCase_ )) + [1] def _snake_case ( self , UpperCamelCase_ , UpperCamelCase_ = None ): __UpperCAmelCase : Dict = [self.sep_token_id] __UpperCAmelCase : List[Any] = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0] @property def _snake_case ( self ): return len(self.sp_model ) + self.fairseq_offset + 1 # Add the <mask> token def _snake_case ( self ): __UpperCAmelCase : Union[str, Any] = {self.convert_ids_to_tokens(UpperCamelCase_ ): i for i in range(self.vocab_size )} vocab.update(self.added_tokens_encoder ) return vocab def _snake_case ( self , UpperCamelCase_ ): return self.sp_model.encode(UpperCamelCase_ , out_type=UpperCamelCase_ ) def _snake_case ( self , UpperCamelCase_ ): if token in self.fairseq_tokens_to_ids: return self.fairseq_tokens_to_ids[token] __UpperCAmelCase : Optional[int] = self.sp_model.PieceToId(UpperCamelCase_ ) # Need to return unknown token if the SP model returned 0 return spm_id + self.fairseq_offset if spm_id else self.unk_token_id def _snake_case ( self , UpperCamelCase_ ): if index in self.fairseq_ids_to_tokens: return self.fairseq_ids_to_tokens[index] return self.sp_model.IdToPiece(index - self.fairseq_offset ) def _snake_case ( self , UpperCamelCase_ ): __UpperCAmelCase : Tuple = "".join(UpperCamelCase_ ).replace(UpperCamelCase_ , " " ).strip() return out_string def _snake_case ( self , UpperCamelCase_ , UpperCamelCase_ = None ): if not os.path.isdir(UpperCamelCase_ ): logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" ) return __UpperCAmelCase : List[str] = os.path.join( UpperCamelCase_ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(UpperCamelCase_ ) and os.path.isfile(self.vocab_file ): copyfile(self.vocab_file , UpperCamelCase_ ) elif not os.path.isfile(self.vocab_file ): with open(UpperCamelCase_ , "wb" ) as fi: __UpperCAmelCase : Optional[int] = self.sp_model.serialized_model_proto() fi.write(UpperCamelCase_ ) return (out_vocab_file,)
10
1
'''simple docstring''' import os import unicodedata from shutil import copyfile from typing import Any, Dict, List, Optional, Tuple import sentencepiece as spm from ...tokenization_utils import AddedToken, PreTrainedTokenizer from ...utils import SPIECE_UNDERLINE, logging _a : List[Any] = logging.get_logger(__name__) _a : Optional[Any] = {"vocab_file": "spiece.model"} _a : Optional[Any] = { "vocab_file": { "TsinghuaAI/CPM-Generate": "https://huggingface.co/TsinghuaAI/CPM-Generate/resolve/main/spiece.model", } } class __A (__magic_name__ ): def __init__( self , UpperCamelCase_ , UpperCamelCase_=False , UpperCamelCase_=True , UpperCamelCase_=False , UpperCamelCase_="<s>" , UpperCamelCase_="</s>" , UpperCamelCase_="<unk>" , UpperCamelCase_="<sep>" , UpperCamelCase_="<pad>" , UpperCamelCase_="<cls>" , UpperCamelCase_="<mask>" , UpperCamelCase_=["<eop>", "<eod>"] , UpperCamelCase_ = None , **UpperCamelCase_ , ): __UpperCAmelCase : Union[str, Any] = AddedToken(UpperCamelCase_ , lstrip=UpperCamelCase_ , rstrip=UpperCamelCase_ ) if isinstance(UpperCamelCase_ , UpperCamelCase_ ) else mask_token __UpperCAmelCase : int = {} if sp_model_kwargs is None else sp_model_kwargs super().__init__( do_lower_case=UpperCamelCase_ , remove_space=UpperCamelCase_ , keep_accents=UpperCamelCase_ , bos_token=UpperCamelCase_ , eos_token=UpperCamelCase_ , unk_token=UpperCamelCase_ , sep_token=UpperCamelCase_ , pad_token=UpperCamelCase_ , cls_token=UpperCamelCase_ , mask_token=UpperCamelCase_ , additional_special_tokens=UpperCamelCase_ , sp_model_kwargs=self.sp_model_kwargs , **UpperCamelCase_ , ) __UpperCAmelCase : Union[str, Any] = 3 __UpperCAmelCase : Tuple = do_lower_case __UpperCAmelCase : List[str] = remove_space __UpperCAmelCase : Union[str, Any] = keep_accents __UpperCAmelCase : Union[str, Any] = vocab_file __UpperCAmelCase : Dict = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(UpperCamelCase_ ) try: import jieba except ModuleNotFoundError as error: raise error.__class__( "You need to install jieba to use CpmTokenizer or CpmTokenizerFast. " "See https://pypi.org/project/jieba/ for installation." ) __UpperCAmelCase : Any = jieba __UpperCAmelCase : List[Any] = str.maketrans(" \n" , "\u2582\u2583" ) @property # Copied from transformers.models.xlnet.tokenization_xlnet.XLNetTokenizer.vocab_size def _snake_case ( self ): return len(self.sp_model ) def _snake_case ( self ): __UpperCAmelCase : List[str] = {self.convert_ids_to_tokens(UpperCamelCase_ ): i for i in range(self.vocab_size )} vocab.update(self.added_tokens_encoder ) return vocab def __getstate__( self ): __UpperCAmelCase : Optional[Any] = self.__dict__.copy() __UpperCAmelCase : Dict = None return state def __setstate__( self , UpperCamelCase_ ): __UpperCAmelCase : int = d # for backward compatibility if not hasattr(self , "sp_model_kwargs" ): __UpperCAmelCase : int = {} __UpperCAmelCase : Union[str, Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(self.vocab_file ) def _snake_case ( self , UpperCamelCase_ ): if self.remove_space: __UpperCAmelCase : List[str] = " ".join(inputs.strip().split() ) else: __UpperCAmelCase : str = inputs __UpperCAmelCase : Any = outputs.replace("``" , "\"" ).replace("''" , "\"" ) if not self.keep_accents: __UpperCAmelCase : Optional[int] = unicodedata.normalize("NFKD" , UpperCamelCase_ ) __UpperCAmelCase : Optional[int] = "".join([c for c in outputs if not unicodedata.combining(UpperCamelCase_ )] ) if self.do_lower_case: __UpperCAmelCase : str = outputs.lower() return outputs def _snake_case ( self , UpperCamelCase_ ): __UpperCAmelCase : Optional[int] = self.preprocess_text(UpperCamelCase_ ) __UpperCAmelCase : Union[str, Any] = self.sp_model.encode(UpperCamelCase_ , out_type=UpperCamelCase_ ) __UpperCAmelCase : Optional[Any] = [] for piece in pieces: if len(UpperCamelCase_ ) > 1 and piece[-1] == str("," ) and piece[-2].isdigit(): __UpperCAmelCase : str = self.sp_model.EncodeAsPieces(piece[:-1].replace(UpperCamelCase_ , "" ) ) if piece[0] != SPIECE_UNDERLINE and cur_pieces[0][0] == SPIECE_UNDERLINE: if len(cur_pieces[0] ) == 1: __UpperCAmelCase : List[Any] = cur_pieces[1:] else: __UpperCAmelCase : Any = cur_pieces[0][1:] cur_pieces.append(piece[-1] ) new_pieces.extend(UpperCamelCase_ ) else: new_pieces.append(UpperCamelCase_ ) return new_pieces def _snake_case ( self , UpperCamelCase_ ): return self.sp_model.PieceToId(UpperCamelCase_ ) def _snake_case ( self , UpperCamelCase_ ): return self.sp_model.IdToPiece(UpperCamelCase_ ) def _snake_case ( self , UpperCamelCase_ ): __UpperCAmelCase : Optional[int] = "".join(UpperCamelCase_ ).replace(UpperCamelCase_ , " " ).strip() return out_string def _snake_case ( self , UpperCamelCase_ , UpperCamelCase_ = None ): __UpperCAmelCase : Optional[int] = [self.sep_token_id] __UpperCAmelCase : Union[str, Any] = [self.cls_token_id] if token_ids_a is None: return token_ids_a + sep + cls return token_ids_a + sep + token_ids_a + sep + cls def _snake_case ( self , UpperCamelCase_ , UpperCamelCase_ = None , UpperCamelCase_ = False ): if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=UpperCamelCase_ , token_ids_a=UpperCamelCase_ , already_has_special_tokens=UpperCamelCase_ ) if token_ids_a is not None: return ([0] * len(UpperCamelCase_ )) + [1] + ([0] * len(UpperCamelCase_ )) + [1, 1] return ([0] * len(UpperCamelCase_ )) + [1, 1] def _snake_case ( self , UpperCamelCase_ , UpperCamelCase_ = None ): __UpperCAmelCase : int = [self.sep_token_id] __UpperCAmelCase : Tuple = [2] if token_ids_a is None: return len(token_ids_a + sep ) * [0] + cls_segment_id return len(token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] + cls_segment_id def _snake_case ( self , UpperCamelCase_ , UpperCamelCase_ = None ): if not os.path.isdir(UpperCamelCase_ ): logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" ) return __UpperCAmelCase : List[Any] = os.path.join( UpperCamelCase_ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(UpperCamelCase_ ) and os.path.isfile(self.vocab_file ): copyfile(self.vocab_file , UpperCamelCase_ ) elif not os.path.isfile(self.vocab_file ): with open(UpperCamelCase_ , "wb" ) as fi: __UpperCAmelCase : Optional[int] = self.sp_model.serialized_model_proto() fi.write(UpperCamelCase_ ) return (out_vocab_file,) def _snake_case ( self , *UpperCamelCase_ , **UpperCamelCase_ ): __UpperCAmelCase : str = super()._decode(*UpperCamelCase_ , **UpperCamelCase_ ) __UpperCAmelCase : Optional[Any] = text.replace(" " , "" ).replace("\u2582" , " " ).replace("\u2583" , "\n" ) return text
10
'''simple docstring''' import time import unittest from transformers import is_torch_available from transformers.testing_utils import require_torch, torch_device from ..test_modeling_common import ids_tensor if is_torch_available(): import torch from transformers.generation import ( MaxLengthCriteria, MaxNewTokensCriteria, MaxTimeCriteria, StoppingCriteriaList, validate_stopping_criteria, ) @require_torch class __A (unittest.TestCase ): def _snake_case ( self , UpperCamelCase_ ): __UpperCAmelCase : List[str] = 3 __UpperCAmelCase : Tuple = 2_50 __UpperCAmelCase : str = ids_tensor((batch_size, length) , UpperCamelCase_ ) __UpperCAmelCase : Any = torch.ones((batch_size, length) , device=UpperCamelCase_ , dtype=torch.float ) / length return input_ids, scores def _snake_case ( self ): __UpperCAmelCase , __UpperCAmelCase : Tuple = self._get_tensors(5 ) __UpperCAmelCase : Tuple = StoppingCriteriaList( [ MaxLengthCriteria(max_length=10 ), MaxTimeCriteria(max_time=0.1 ), ] ) self.assertFalse(criteria(UpperCamelCase_ , UpperCamelCase_ ) ) __UpperCAmelCase , __UpperCAmelCase : int = self._get_tensors(9 ) self.assertFalse(criteria(UpperCamelCase_ , UpperCamelCase_ ) ) __UpperCAmelCase , __UpperCAmelCase : Optional[int] = self._get_tensors(10 ) self.assertTrue(criteria(UpperCamelCase_ , UpperCamelCase_ ) ) def _snake_case ( self ): __UpperCAmelCase : int = MaxLengthCriteria(max_length=10 ) __UpperCAmelCase , __UpperCAmelCase : Tuple = self._get_tensors(5 ) self.assertFalse(criteria(UpperCamelCase_ , UpperCamelCase_ ) ) __UpperCAmelCase , __UpperCAmelCase : Dict = self._get_tensors(9 ) self.assertFalse(criteria(UpperCamelCase_ , UpperCamelCase_ ) ) __UpperCAmelCase , __UpperCAmelCase : Optional[int] = self._get_tensors(10 ) self.assertTrue(criteria(UpperCamelCase_ , UpperCamelCase_ ) ) def _snake_case ( self ): __UpperCAmelCase : Optional[Any] = MaxNewTokensCriteria(start_length=5 , max_new_tokens=5 ) __UpperCAmelCase , __UpperCAmelCase : List[str] = self._get_tensors(5 ) self.assertFalse(criteria(UpperCamelCase_ , UpperCamelCase_ ) ) __UpperCAmelCase , __UpperCAmelCase : Dict = self._get_tensors(9 ) self.assertFalse(criteria(UpperCamelCase_ , UpperCamelCase_ ) ) __UpperCAmelCase , __UpperCAmelCase : Optional[Any] = self._get_tensors(10 ) self.assertTrue(criteria(UpperCamelCase_ , UpperCamelCase_ ) ) __UpperCAmelCase : Union[str, Any] = StoppingCriteriaList([criteria] ) self.assertEqual(criteria_list.max_length , 10 ) def _snake_case ( self ): __UpperCAmelCase , __UpperCAmelCase : Optional[Any] = self._get_tensors(5 ) __UpperCAmelCase : str = MaxTimeCriteria(max_time=0.1 ) self.assertFalse(criteria(UpperCamelCase_ , UpperCamelCase_ ) ) __UpperCAmelCase : str = MaxTimeCriteria(max_time=0.1 , initial_timestamp=time.time() - 0.2 ) self.assertTrue(criteria(UpperCamelCase_ , UpperCamelCase_ ) ) def _snake_case ( self ): validate_stopping_criteria(StoppingCriteriaList([MaxLengthCriteria(10 )] ) , 10 ) with self.assertWarns(UpperCamelCase_ ): validate_stopping_criteria(StoppingCriteriaList([MaxLengthCriteria(10 )] ) , 11 ) __UpperCAmelCase : Optional[int] = validate_stopping_criteria(StoppingCriteriaList() , 11 ) self.assertEqual(len(UpperCamelCase_ ) , 1 )
10
1
'''simple docstring''' from ...configuration_utils import PretrainedConfig from ...utils import logging _a : Tuple = logging.get_logger(__name__) _a : int = { "facebook/dpr-ctx_encoder-single-nq-base": ( "https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/config.json" ), "facebook/dpr-question_encoder-single-nq-base": ( "https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/config.json" ), "facebook/dpr-reader-single-nq-base": ( "https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/config.json" ), "facebook/dpr-ctx_encoder-multiset-base": ( "https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/config.json" ), "facebook/dpr-question_encoder-multiset-base": ( "https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/config.json" ), "facebook/dpr-reader-multiset-base": ( "https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/config.json" ), } class __A (__magic_name__ ): snake_case :Union[str, Any] = "dpr" def __init__( self , UpperCamelCase_=3_05_22 , UpperCamelCase_=7_68 , UpperCamelCase_=12 , UpperCamelCase_=12 , UpperCamelCase_=30_72 , UpperCamelCase_="gelu" , UpperCamelCase_=0.1 , UpperCamelCase_=0.1 , UpperCamelCase_=5_12 , UpperCamelCase_=2 , UpperCamelCase_=0.0_2 , UpperCamelCase_=1E-12 , UpperCamelCase_=0 , UpperCamelCase_="absolute" , UpperCamelCase_ = 0 , **UpperCamelCase_ , ): super().__init__(pad_token_id=UpperCamelCase_ , **UpperCamelCase_ ) __UpperCAmelCase : Any = vocab_size __UpperCAmelCase : Optional[int] = hidden_size __UpperCAmelCase : Union[str, Any] = num_hidden_layers __UpperCAmelCase : str = num_attention_heads __UpperCAmelCase : List[str] = hidden_act __UpperCAmelCase : Union[str, Any] = intermediate_size __UpperCAmelCase : List[Any] = hidden_dropout_prob __UpperCAmelCase : Dict = attention_probs_dropout_prob __UpperCAmelCase : Any = max_position_embeddings __UpperCAmelCase : str = type_vocab_size __UpperCAmelCase : str = initializer_range __UpperCAmelCase : List[Any] = layer_norm_eps __UpperCAmelCase : List[str] = projection_dim __UpperCAmelCase : Optional[int] = position_embedding_type
10
'''simple docstring''' import json import re from typing import TYPE_CHECKING, List, Optional, Tuple, Union import numpy as np from ...utils import is_tf_available, is_torch_available, logging if TYPE_CHECKING: if is_torch_available(): import torch if is_tf_available(): import tensorflow as tf from tokenizers import pre_tokenizers from ...tokenization_utils_base import BatchEncoding from ...tokenization_utils_fast import PreTrainedTokenizerFast from .tokenization_codegen import CodeGenTokenizer _a : Union[str, Any] = logging.get_logger(__name__) _a : Any = {"vocab_file": "vocab.json", "merges_file": "merges.txt", "tokenizer_file": "tokenizer.json"} _a : Tuple = { "vocab_file": { "Salesforce/codegen-350M-mono": "https://huggingface.co/Salesforce/codegen-350M-mono/resolve/main/vocab.json", }, "merges_file": { "Salesforce/codegen-350M-mono": "https://huggingface.co/Salesforce/codegen-350M-mono/resolve/main/merges.txt", }, "tokenizer_file": { "Salesforce/codegen-350M-mono": ( "https://huggingface.co/Salesforce/codegen-350M-mono/resolve/main/tokenizer.json" ), }, } _a : Dict = { "Salesforce/codegen-350M-mono": 2048, } class __A (__magic_name__ ): snake_case :Optional[Any] = VOCAB_FILES_NAMES snake_case :str = PRETRAINED_VOCAB_FILES_MAP snake_case :Optional[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES snake_case :Tuple = ["input_ids", "attention_mask"] snake_case :Dict = CodeGenTokenizer def __init__( self , UpperCamelCase_=None , UpperCamelCase_=None , UpperCamelCase_=None , UpperCamelCase_="<|endoftext|>" , UpperCamelCase_="<|endoftext|>" , UpperCamelCase_="<|endoftext|>" , UpperCamelCase_=False , **UpperCamelCase_ , ): super().__init__( UpperCamelCase_ , UpperCamelCase_ , tokenizer_file=UpperCamelCase_ , unk_token=UpperCamelCase_ , bos_token=UpperCamelCase_ , eos_token=UpperCamelCase_ , add_prefix_space=UpperCamelCase_ , **UpperCamelCase_ , ) if kwargs.pop("add_bos_token" , UpperCamelCase_ ): __UpperCAmelCase : int = kwargs.pop("name_or_path" , "" ) raise ValueError( "Currenty GPT2's fast tokenizer does NOT support adding a BOS token." "Instead you should use GPT2's slow tokenizer class `CodeGenTokenizer` as follows: \n" f"""`CodeGenTokenizer.from_pretrained('{model_id}')`\nor\n""" f"""`AutoTokenizer.from_pretrained('{model_id}', use_fast=False)`\n""" "This issue will be fixed soon, see: https://github.com/huggingface/tokenizers/pull/1005." " so that the fast tokenizer works correctly." ) __UpperCAmelCase : Any = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() ) if pre_tok_state.get("add_prefix_space" , UpperCamelCase_ ) != add_prefix_space: __UpperCAmelCase : str = getattr(UpperCamelCase_ , pre_tok_state.pop("type" ) ) __UpperCAmelCase : Optional[int] = add_prefix_space __UpperCAmelCase : Tuple = pre_tok_class(**UpperCamelCase_ ) __UpperCAmelCase : Tuple = add_prefix_space def _snake_case ( self , *UpperCamelCase_ , **UpperCamelCase_ ): __UpperCAmelCase : Optional[Any] = kwargs.get("is_split_into_words" , UpperCamelCase_ ) assert self.add_prefix_space or not is_split_into_words, ( f"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """ "to use it with pretokenized inputs." ) return super()._batch_encode_plus(*UpperCamelCase_ , **UpperCamelCase_ ) def _snake_case ( self , *UpperCamelCase_ , **UpperCamelCase_ ): __UpperCAmelCase : Any = kwargs.get("is_split_into_words" , UpperCamelCase_ ) assert self.add_prefix_space or not is_split_into_words, ( f"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """ "to use it with pretokenized inputs." ) return super()._encode_plus(*UpperCamelCase_ , **UpperCamelCase_ ) def _snake_case ( self , UpperCamelCase_ , UpperCamelCase_ = None ): __UpperCAmelCase : int = self._tokenizer.model.save(UpperCamelCase_ , name=UpperCamelCase_ ) return tuple(UpperCamelCase_ ) def _snake_case ( self , UpperCamelCase_ , UpperCamelCase_ = False , UpperCamelCase_ = None , UpperCamelCase_ = None , **UpperCamelCase_ , ): __UpperCAmelCase : str = super().decode( token_ids=UpperCamelCase_ , skip_special_tokens=UpperCamelCase_ , clean_up_tokenization_spaces=UpperCamelCase_ , **UpperCamelCase_ , ) if truncate_before_pattern is not None and len(UpperCamelCase_ ) > 0: __UpperCAmelCase : Union[str, Any] = self.truncate(UpperCamelCase_ , UpperCamelCase_ ) return decoded_text def _snake_case ( self , UpperCamelCase_ , UpperCamelCase_ ): def find_re(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ): __UpperCAmelCase : Dict = pattern.search(UpperCamelCase_ , UpperCamelCase_ ) return m.start() if m else -1 __UpperCAmelCase : List[str] = [re.compile(UpperCamelCase_ , re.MULTILINE ) for pattern in truncate_before_pattern] __UpperCAmelCase : Optional[Any] = list(re.finditer("^print" , UpperCamelCase_ , re.MULTILINE ) ) if len(UpperCamelCase_ ) > 1: __UpperCAmelCase : List[Any] = completion[: prints[1].start()] __UpperCAmelCase : Tuple = list(re.finditer("^def" , UpperCamelCase_ , re.MULTILINE ) ) if len(UpperCamelCase_ ) > 1: __UpperCAmelCase : Union[str, Any] = completion[: defs[1].start()] __UpperCAmelCase : Dict = 0 __UpperCAmelCase : Dict = [ pos for pos in [find_re(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) for terminal in terminals] if pos != -1 ] if len(UpperCamelCase_ ) > 0: return completion[: min(UpperCamelCase_ )] else: return completion
10
1
'''simple docstring''' import warnings from collections import OrderedDict from typing import Mapping from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging _a : Dict = logging.get_logger(__name__) _a : int = { "nvidia/segformer-b0-finetuned-ade-512-512": ( "https://huggingface.co/nvidia/segformer-b0-finetuned-ade-512-512/resolve/main/config.json" ), # See all SegFormer models at https://huggingface.co/models?filter=segformer } class __A (__magic_name__ ): snake_case :Optional[Any] = "segformer" def __init__( self , UpperCamelCase_=3 , UpperCamelCase_=4 , UpperCamelCase_=[2, 2, 2, 2] , UpperCamelCase_=[8, 4, 2, 1] , UpperCamelCase_=[32, 64, 1_60, 2_56] , UpperCamelCase_=[7, 3, 3, 3] , UpperCamelCase_=[4, 2, 2, 2] , UpperCamelCase_=[1, 2, 5, 8] , UpperCamelCase_=[4, 4, 4, 4] , UpperCamelCase_="gelu" , UpperCamelCase_=0.0 , UpperCamelCase_=0.0 , UpperCamelCase_=0.1 , UpperCamelCase_=0.0_2 , UpperCamelCase_=0.1 , UpperCamelCase_=1E-6 , UpperCamelCase_=2_56 , UpperCamelCase_=2_55 , **UpperCamelCase_ , ): super().__init__(**UpperCamelCase_ ) if "reshape_last_stage" in kwargs and kwargs["reshape_last_stage"] is False: warnings.warn( "Reshape_last_stage is set to False in this config. This argument is deprecated and will soon be" " removed, as the behaviour will default to that of reshape_last_stage = True." , UpperCamelCase_ , ) __UpperCAmelCase : int = num_channels __UpperCAmelCase : Dict = num_encoder_blocks __UpperCAmelCase : Dict = depths __UpperCAmelCase : Optional[Any] = sr_ratios __UpperCAmelCase : Any = hidden_sizes __UpperCAmelCase : Union[str, Any] = patch_sizes __UpperCAmelCase : Union[str, Any] = strides __UpperCAmelCase : Union[str, Any] = mlp_ratios __UpperCAmelCase : Dict = num_attention_heads __UpperCAmelCase : List[Any] = hidden_act __UpperCAmelCase : int = hidden_dropout_prob __UpperCAmelCase : Union[str, Any] = attention_probs_dropout_prob __UpperCAmelCase : Union[str, Any] = classifier_dropout_prob __UpperCAmelCase : List[Any] = initializer_range __UpperCAmelCase : List[str] = drop_path_rate __UpperCAmelCase : Any = layer_norm_eps __UpperCAmelCase : List[str] = decoder_hidden_size __UpperCAmelCase : Any = kwargs.get("reshape_last_stage" , UpperCamelCase_ ) __UpperCAmelCase : str = semantic_loss_ignore_index class __A (__magic_name__ ): snake_case :List[Any] = version.parse("1.11" ) @property def _snake_case ( self ): return OrderedDict( [ ("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}), ] ) @property def _snake_case ( self ): return 1E-4 @property def _snake_case ( self ): return 12
10
'''simple docstring''' import json import os from functools import lru_cache from typing import List, Optional, Tuple import regex as re from ...tokenization_utils import AddedToken, PreTrainedTokenizer from ...utils import logging _a : Optional[Any] = logging.get_logger(__name__) _a : int = {"vocab_file": "vocab.json", "merges_file": "merges.txt"} # See all BART models at https://huggingface.co/models?filter=bart _a : Tuple = { "vocab_file": { "facebook/bart-base": "https://huggingface.co/facebook/bart-base/resolve/main/vocab.json", "facebook/bart-large": "https://huggingface.co/facebook/bart-large/resolve/main/vocab.json", "facebook/bart-large-mnli": "https://huggingface.co/facebook/bart-large-mnli/resolve/main/vocab.json", "facebook/bart-large-cnn": "https://huggingface.co/facebook/bart-large-cnn/resolve/main/vocab.json", "facebook/bart-large-xsum": "https://huggingface.co/facebook/bart-large-xsum/resolve/main/vocab.json", "yjernite/bart_eli5": "https://huggingface.co/yjernite/bart_eli5/resolve/main/vocab.json", }, "merges_file": { "facebook/bart-base": "https://huggingface.co/facebook/bart-base/resolve/main/merges.txt", "facebook/bart-large": "https://huggingface.co/facebook/bart-large/resolve/main/merges.txt", "facebook/bart-large-mnli": "https://huggingface.co/facebook/bart-large-mnli/resolve/main/merges.txt", "facebook/bart-large-cnn": "https://huggingface.co/facebook/bart-large-cnn/resolve/main/merges.txt", "facebook/bart-large-xsum": "https://huggingface.co/facebook/bart-large-xsum/resolve/main/merges.txt", "yjernite/bart_eli5": "https://huggingface.co/yjernite/bart_eli5/resolve/main/merges.txt", }, } _a : List[Any] = { "facebook/bart-base": 1024, "facebook/bart-large": 1024, "facebook/bart-large-mnli": 1024, "facebook/bart-large-cnn": 1024, "facebook/bart-large-xsum": 1024, "yjernite/bart_eli5": 1024, } @lru_cache() def _lowercase ( ) -> List[Any]: """simple docstring""" __UpperCAmelCase : Dict = ( list(range(ord("!" ) , ord("~" ) + 1 ) ) + list(range(ord("¡" ) , ord("¬" ) + 1 ) ) + list(range(ord("®" ) , ord("ÿ" ) + 1 ) ) ) __UpperCAmelCase : Optional[Any] = bs[:] __UpperCAmelCase : Optional[int] = 0 for b in range(2**8 ): if b not in bs: bs.append(lowerCamelCase__ ) cs.append(2**8 + n ) n += 1 __UpperCAmelCase : Dict = [chr(lowerCamelCase__ ) for n in cs] return dict(zip(lowerCamelCase__ , lowerCamelCase__ ) ) def _lowercase ( lowerCamelCase__ ) -> str: """simple docstring""" __UpperCAmelCase : Dict = set() __UpperCAmelCase : Union[str, Any] = word[0] for char in word[1:]: pairs.add((prev_char, char) ) __UpperCAmelCase : Optional[Any] = char return pairs class __A (__magic_name__ ): snake_case :Optional[int] = VOCAB_FILES_NAMES snake_case :List[Any] = PRETRAINED_VOCAB_FILES_MAP snake_case :Any = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES snake_case :Optional[int] = ["input_ids", "attention_mask"] def __init__( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_="replace" , UpperCamelCase_="<s>" , UpperCamelCase_="</s>" , UpperCamelCase_="</s>" , UpperCamelCase_="<s>" , UpperCamelCase_="<unk>" , UpperCamelCase_="<pad>" , UpperCamelCase_="<mask>" , UpperCamelCase_=False , **UpperCamelCase_ , ): __UpperCAmelCase : str = AddedToken(UpperCamelCase_ , lstrip=UpperCamelCase_ , rstrip=UpperCamelCase_ ) if isinstance(UpperCamelCase_ , UpperCamelCase_ ) else bos_token __UpperCAmelCase : List[str] = AddedToken(UpperCamelCase_ , lstrip=UpperCamelCase_ , rstrip=UpperCamelCase_ ) if isinstance(UpperCamelCase_ , UpperCamelCase_ ) else eos_token __UpperCAmelCase : Optional[int] = AddedToken(UpperCamelCase_ , lstrip=UpperCamelCase_ , rstrip=UpperCamelCase_ ) if isinstance(UpperCamelCase_ , UpperCamelCase_ ) else sep_token __UpperCAmelCase : int = AddedToken(UpperCamelCase_ , lstrip=UpperCamelCase_ , rstrip=UpperCamelCase_ ) if isinstance(UpperCamelCase_ , UpperCamelCase_ ) else cls_token __UpperCAmelCase : Optional[int] = AddedToken(UpperCamelCase_ , lstrip=UpperCamelCase_ , rstrip=UpperCamelCase_ ) if isinstance(UpperCamelCase_ , UpperCamelCase_ ) else unk_token __UpperCAmelCase : Dict = AddedToken(UpperCamelCase_ , lstrip=UpperCamelCase_ , rstrip=UpperCamelCase_ ) if isinstance(UpperCamelCase_ , UpperCamelCase_ ) else pad_token # Mask token behave like a normal word, i.e. include the space before it __UpperCAmelCase : Union[str, Any] = AddedToken(UpperCamelCase_ , lstrip=UpperCamelCase_ , rstrip=UpperCamelCase_ ) if isinstance(UpperCamelCase_ , UpperCamelCase_ ) else mask_token super().__init__( errors=UpperCamelCase_ , bos_token=UpperCamelCase_ , eos_token=UpperCamelCase_ , unk_token=UpperCamelCase_ , sep_token=UpperCamelCase_ , cls_token=UpperCamelCase_ , pad_token=UpperCamelCase_ , mask_token=UpperCamelCase_ , add_prefix_space=UpperCamelCase_ , **UpperCamelCase_ , ) with open(UpperCamelCase_ , encoding="utf-8" ) as vocab_handle: __UpperCAmelCase : int = json.load(UpperCamelCase_ ) __UpperCAmelCase : Any = {v: k for k, v in self.encoder.items()} __UpperCAmelCase : Any = errors # how to handle errors in decoding __UpperCAmelCase : str = bytes_to_unicode() __UpperCAmelCase : List[str] = {v: k for k, v in self.byte_encoder.items()} with open(UpperCamelCase_ , encoding="utf-8" ) as merges_handle: __UpperCAmelCase : str = merges_handle.read().split("\n" )[1:-1] __UpperCAmelCase : List[str] = [tuple(merge.split() ) for merge in bpe_merges] __UpperCAmelCase : Union[str, Any] = dict(zip(UpperCamelCase_ , range(len(UpperCamelCase_ ) ) ) ) __UpperCAmelCase : Optional[int] = {} __UpperCAmelCase : Optional[int] = add_prefix_space # Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions __UpperCAmelCase : Dict = re.compile(r"'s|'t|'re|'ve|'m|'ll|'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+" ) @property def _snake_case ( self ): return len(self.encoder ) def _snake_case ( self ): return dict(self.encoder , **self.added_tokens_encoder ) def _snake_case ( self , UpperCamelCase_ ): if token in self.cache: return self.cache[token] __UpperCAmelCase : List[str] = tuple(UpperCamelCase_ ) __UpperCAmelCase : str = get_pairs(UpperCamelCase_ ) if not pairs: return token while True: __UpperCAmelCase : str = min(UpperCamelCase_ , key=lambda UpperCamelCase_ : self.bpe_ranks.get(UpperCamelCase_ , float("inf" ) ) ) if bigram not in self.bpe_ranks: break __UpperCAmelCase , __UpperCAmelCase : List[Any] = bigram __UpperCAmelCase : Any = [] __UpperCAmelCase : List[str] = 0 while i < len(UpperCamelCase_ ): try: __UpperCAmelCase : Union[str, Any] = word.index(UpperCamelCase_ , UpperCamelCase_ ) except ValueError: new_word.extend(word[i:] ) break else: new_word.extend(word[i:j] ) __UpperCAmelCase : str = j if word[i] == first and i < len(UpperCamelCase_ ) - 1 and word[i + 1] == second: new_word.append(first + second ) i += 2 else: new_word.append(word[i] ) i += 1 __UpperCAmelCase : Dict = tuple(UpperCamelCase_ ) __UpperCAmelCase : str = new_word if len(UpperCamelCase_ ) == 1: break else: __UpperCAmelCase : int = get_pairs(UpperCamelCase_ ) __UpperCAmelCase : Optional[int] = " ".join(UpperCamelCase_ ) __UpperCAmelCase : Dict = word return word def _snake_case ( self , UpperCamelCase_ ): __UpperCAmelCase : Optional[Any] = [] for token in re.findall(self.pat , UpperCamelCase_ ): __UpperCAmelCase : Any = "".join( self.byte_encoder[b] for b in token.encode("utf-8" ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case) bpe_tokens.extend(bpe_token for bpe_token in self.bpe(UpperCamelCase_ ).split(" " ) ) return bpe_tokens def _snake_case ( self , UpperCamelCase_ ): return self.encoder.get(UpperCamelCase_ , self.encoder.get(self.unk_token ) ) def _snake_case ( self , UpperCamelCase_ ): return self.decoder.get(UpperCamelCase_ ) def _snake_case ( self , UpperCamelCase_ ): __UpperCAmelCase : List[str] = "".join(UpperCamelCase_ ) __UpperCAmelCase : Union[str, Any] = bytearray([self.byte_decoder[c] for c in text] ).decode("utf-8" , errors=self.errors ) return text def _snake_case ( self , UpperCamelCase_ , UpperCamelCase_ = None ): if not os.path.isdir(UpperCamelCase_ ): logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" ) return __UpperCAmelCase : Any = os.path.join( UpperCamelCase_ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] ) __UpperCAmelCase : Optional[int] = os.path.join( UpperCamelCase_ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["merges_file"] ) with open(UpperCamelCase_ , "w" , encoding="utf-8" ) as f: f.write(json.dumps(self.encoder , indent=2 , sort_keys=UpperCamelCase_ , ensure_ascii=UpperCamelCase_ ) + "\n" ) __UpperCAmelCase : str = 0 with open(UpperCamelCase_ , "w" , encoding="utf-8" ) as writer: writer.write("#version: 0.2\n" ) for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda UpperCamelCase_ : kv[1] ): if index != token_index: logger.warning( f"""Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.""" " Please check that the tokenizer is not corrupted!" ) __UpperCAmelCase : str = token_index writer.write(" ".join(UpperCamelCase_ ) + "\n" ) index += 1 return vocab_file, merge_file def _snake_case ( self , UpperCamelCase_ , UpperCamelCase_ = None ): if token_ids_a is None: return [self.cls_token_id] + token_ids_a + [self.sep_token_id] __UpperCAmelCase : List[Any] = [self.cls_token_id] __UpperCAmelCase : Tuple = [self.sep_token_id] return cls + token_ids_a + sep + sep + token_ids_a + sep def _snake_case ( self , UpperCamelCase_ , UpperCamelCase_ = None , UpperCamelCase_ = False ): if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=UpperCamelCase_ , token_ids_a=UpperCamelCase_ , already_has_special_tokens=UpperCamelCase_ ) if token_ids_a is None: return [1] + ([0] * len(UpperCamelCase_ )) + [1] return [1] + ([0] * len(UpperCamelCase_ )) + [1, 1] + ([0] * len(UpperCamelCase_ )) + [1] def _snake_case ( self , UpperCamelCase_ , UpperCamelCase_ = None ): __UpperCAmelCase : int = [self.sep_token_id] __UpperCAmelCase : List[str] = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0] def _snake_case ( self , UpperCamelCase_ , UpperCamelCase_=False , **UpperCamelCase_ ): __UpperCAmelCase : List[str] = kwargs.pop("add_prefix_space" , self.add_prefix_space ) if (is_split_into_words or add_prefix_space) and (len(UpperCamelCase_ ) > 0 and not text[0].isspace()): __UpperCAmelCase : Tuple = " " + text return (text, kwargs)
10
1
'''simple docstring''' import unittest from transformers import ( MODEL_FOR_CAUSAL_LM_MAPPING, TF_MODEL_FOR_CAUSAL_LM_MAPPING, TextGenerationPipeline, logging, pipeline, ) from transformers.testing_utils import ( CaptureLogger, is_pipeline_test, require_accelerate, require_tf, require_torch, require_torch_gpu, require_torch_or_tf, ) from .test_pipelines_common import ANY @is_pipeline_test @require_torch_or_tf class __A (unittest.TestCase ): snake_case :Dict = MODEL_FOR_CAUSAL_LM_MAPPING snake_case :Tuple = TF_MODEL_FOR_CAUSAL_LM_MAPPING @require_torch def _snake_case ( self ): __UpperCAmelCase : str = pipeline(task="text-generation" , model="sshleifer/tiny-ctrl" , framework="pt" ) # Using `do_sample=False` to force deterministic output __UpperCAmelCase : Union[str, Any] = text_generator("This is a test" , do_sample=UpperCamelCase_ ) self.assertEqual( UpperCamelCase_ , [ { "generated_text": ( "This is a test ☃ ☃ segmental segmental segmental 议议eski eski flutter flutter Lacy oscope." " oscope. FiliFili@@" ) } ] , ) __UpperCAmelCase : List[Any] = text_generator(["This is a test", "This is a second test"] ) self.assertEqual( UpperCamelCase_ , [ [ { "generated_text": ( "This is a test ☃ ☃ segmental segmental segmental 议议eski eski flutter flutter Lacy oscope." " oscope. FiliFili@@" ) } ], [ { "generated_text": ( "This is a second test ☃ segmental segmental segmental 议议eski eski flutter flutter Lacy" " oscope. oscope. FiliFili@@" ) } ], ] , ) __UpperCAmelCase : Union[str, Any] = text_generator("This is a test" , do_sample=UpperCamelCase_ , num_return_sequences=2 , return_tensors=UpperCamelCase_ ) self.assertEqual( UpperCamelCase_ , [ {"generated_token_ids": ANY(UpperCamelCase_ )}, {"generated_token_ids": ANY(UpperCamelCase_ )}, ] , ) __UpperCAmelCase : List[Any] = text_generator.model.config.eos_token_id __UpperCAmelCase : Dict = "<pad>" __UpperCAmelCase : int = text_generator( ["This is a test", "This is a second test"] , do_sample=UpperCamelCase_ , num_return_sequences=2 , batch_size=2 , return_tensors=UpperCamelCase_ , ) self.assertEqual( UpperCamelCase_ , [ [ {"generated_token_ids": ANY(UpperCamelCase_ )}, {"generated_token_ids": ANY(UpperCamelCase_ )}, ], [ {"generated_token_ids": ANY(UpperCamelCase_ )}, {"generated_token_ids": ANY(UpperCamelCase_ )}, ], ] , ) @require_tf def _snake_case ( self ): __UpperCAmelCase : List[Any] = pipeline(task="text-generation" , model="sshleifer/tiny-ctrl" , framework="tf" ) # Using `do_sample=False` to force deterministic output __UpperCAmelCase : Optional[Any] = text_generator("This is a test" , do_sample=UpperCamelCase_ ) self.assertEqual( UpperCamelCase_ , [ { "generated_text": ( "This is a test FeyFeyFey(Croatis.), s.), Cannes Cannes Cannes 閲閲Cannes Cannes Cannes 攵" " please," ) } ] , ) __UpperCAmelCase : Union[str, Any] = text_generator(["This is a test", "This is a second test"] , do_sample=UpperCamelCase_ ) self.assertEqual( UpperCamelCase_ , [ [ { "generated_text": ( "This is a test FeyFeyFey(Croatis.), s.), Cannes Cannes Cannes 閲閲Cannes Cannes Cannes 攵" " please," ) } ], [ { "generated_text": ( "This is a second test Chieftain Chieftain prefecture prefecture prefecture Cannes Cannes" " Cannes 閲閲Cannes Cannes Cannes 攵 please," ) } ], ] , ) def _snake_case ( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ): __UpperCAmelCase : Optional[Any] = TextGenerationPipeline(model=UpperCamelCase_ , tokenizer=UpperCamelCase_ ) return text_generator, ["This is a test", "Another test"] def _snake_case ( self ): __UpperCAmelCase : List[Any] = "Hello I believe in" __UpperCAmelCase : Dict = pipeline("text-generation" , model="hf-internal-testing/tiny-random-gpt2" ) __UpperCAmelCase : Optional[Any] = text_generator(UpperCamelCase_ ) self.assertEqual( UpperCamelCase_ , [{"generated_text": "Hello I believe in fe fe fe fe fe fe fe fe fe fe fe fe"}] , ) __UpperCAmelCase : Union[str, Any] = text_generator(UpperCamelCase_ , stop_sequence=" fe" ) self.assertEqual(UpperCamelCase_ , [{"generated_text": "Hello I believe in fe"}] ) def _snake_case ( self , UpperCamelCase_ , UpperCamelCase_ ): __UpperCAmelCase : Optional[int] = text_generator.model __UpperCAmelCase : Dict = text_generator.tokenizer __UpperCAmelCase : List[str] = text_generator("This is a test" ) self.assertEqual(UpperCamelCase_ , [{"generated_text": ANY(UpperCamelCase_ )}] ) self.assertTrue(outputs[0]["generated_text"].startswith("This is a test" ) ) __UpperCAmelCase : Optional[int] = text_generator("This is a test" , return_full_text=UpperCamelCase_ ) self.assertEqual(UpperCamelCase_ , [{"generated_text": ANY(UpperCamelCase_ )}] ) self.assertNotIn("This is a test" , outputs[0]["generated_text"] ) __UpperCAmelCase : Optional[int] = pipeline(task="text-generation" , model=UpperCamelCase_ , tokenizer=UpperCamelCase_ , return_full_text=UpperCamelCase_ ) __UpperCAmelCase : List[Any] = text_generator("This is a test" ) self.assertEqual(UpperCamelCase_ , [{"generated_text": ANY(UpperCamelCase_ )}] ) self.assertNotIn("This is a test" , outputs[0]["generated_text"] ) __UpperCAmelCase : Dict = text_generator("This is a test" , return_full_text=UpperCamelCase_ ) self.assertEqual(UpperCamelCase_ , [{"generated_text": ANY(UpperCamelCase_ )}] ) self.assertTrue(outputs[0]["generated_text"].startswith("This is a test" ) ) __UpperCAmelCase : Optional[int] = text_generator(["This is great !", "Something else"] , num_return_sequences=2 , do_sample=UpperCamelCase_ ) self.assertEqual( UpperCamelCase_ , [ [{"generated_text": ANY(UpperCamelCase_ )}, {"generated_text": ANY(UpperCamelCase_ )}], [{"generated_text": ANY(UpperCamelCase_ )}, {"generated_text": ANY(UpperCamelCase_ )}], ] , ) if text_generator.tokenizer.pad_token is not None: __UpperCAmelCase : Dict = text_generator( ["This is great !", "Something else"] , num_return_sequences=2 , batch_size=2 , do_sample=UpperCamelCase_ ) self.assertEqual( UpperCamelCase_ , [ [{"generated_text": ANY(UpperCamelCase_ )}, {"generated_text": ANY(UpperCamelCase_ )}], [{"generated_text": ANY(UpperCamelCase_ )}, {"generated_text": ANY(UpperCamelCase_ )}], ] , ) with self.assertRaises(UpperCamelCase_ ): __UpperCAmelCase : Tuple = text_generator("test" , return_full_text=UpperCamelCase_ , return_text=UpperCamelCase_ ) with self.assertRaises(UpperCamelCase_ ): __UpperCAmelCase : Optional[Any] = text_generator("test" , return_full_text=UpperCamelCase_ , return_tensors=UpperCamelCase_ ) with self.assertRaises(UpperCamelCase_ ): __UpperCAmelCase : Dict = text_generator("test" , return_text=UpperCamelCase_ , return_tensors=UpperCamelCase_ ) # Empty prompt is slighly special # it requires BOS token to exist. # Special case for Pegasus which will always append EOS so will # work even without BOS. if ( text_generator.tokenizer.bos_token_id is not None or "Pegasus" in tokenizer.__class__.__name__ or "Git" in model.__class__.__name__ ): __UpperCAmelCase : Any = text_generator("" ) self.assertEqual(UpperCamelCase_ , [{"generated_text": ANY(UpperCamelCase_ )}] ) else: with self.assertRaises((ValueError, AssertionError) ): __UpperCAmelCase : Optional[Any] = text_generator("" ) if text_generator.framework == "tf": # TF generation does not support max_new_tokens, and it's impossible # to control long generation with only max_length without # fancy calculation, dismissing tests for now. return # We don't care about infinite range models. # They already work. # Skip this test for XGLM, since it uses sinusoidal positional embeddings which are resized on-the-fly. __UpperCAmelCase : int = ["RwkvForCausalLM", "XGLMForCausalLM", "GPTNeoXForCausalLM"] if ( tokenizer.model_max_length < 1_00_00 and text_generator.model.__class__.__name__ not in EXTRA_MODELS_CAN_HANDLE_LONG_INPUTS ): # Handling of large generations with self.assertRaises((RuntimeError, IndexError, ValueError, AssertionError) ): text_generator("This is a test" * 5_00 , max_new_tokens=20 ) __UpperCAmelCase : Dict = text_generator("This is a test" * 5_00 , handle_long_generation="hole" , max_new_tokens=20 ) # Hole strategy cannot work with self.assertRaises(UpperCamelCase_ ): text_generator( "This is a test" * 5_00 , handle_long_generation="hole" , max_new_tokens=tokenizer.model_max_length + 10 , ) @require_torch @require_accelerate @require_torch_gpu def _snake_case ( self ): import torch # Classic `model_kwargs` __UpperCAmelCase : str = pipeline( model="hf-internal-testing/tiny-random-bloom" , model_kwargs={"device_map": "auto", "torch_dtype": torch.bfloataa} , ) self.assertEqual(pipe.model.device , torch.device(0 ) ) self.assertEqual(pipe.model.lm_head.weight.dtype , torch.bfloataa ) __UpperCAmelCase : Union[str, Any] = pipe("This is a test" ) self.assertEqual( UpperCamelCase_ , [ { "generated_text": ( "This is a test test test test test test test test test test test test test test test test" " test" ) } ] , ) # Upgraded those two to real pipeline arguments (they just get sent for the model as they're unlikely to mean anything else.) __UpperCAmelCase : Union[str, Any] = pipeline(model="hf-internal-testing/tiny-random-bloom" , device_map="auto" , torch_dtype=torch.bfloataa ) self.assertEqual(pipe.model.device , torch.device(0 ) ) self.assertEqual(pipe.model.lm_head.weight.dtype , torch.bfloataa ) __UpperCAmelCase : Dict = pipe("This is a test" ) self.assertEqual( UpperCamelCase_ , [ { "generated_text": ( "This is a test test test test test test test test test test test test test test test test" " test" ) } ] , ) # torch_dtype will be automatically set to float32 if not provided - check: https://github.com/huggingface/transformers/pull/20602 __UpperCAmelCase : Any = pipeline(model="hf-internal-testing/tiny-random-bloom" , device_map="auto" ) self.assertEqual(pipe.model.device , torch.device(0 ) ) self.assertEqual(pipe.model.lm_head.weight.dtype , torch.floataa ) __UpperCAmelCase : Any = pipe("This is a test" ) self.assertEqual( UpperCamelCase_ , [ { "generated_text": ( "This is a test test test test test test test test test test test test test test test test" " test" ) } ] , ) @require_torch @require_torch_gpu def _snake_case ( self ): import torch __UpperCAmelCase : str = pipeline(model="hf-internal-testing/tiny-random-bloom" , device=0 , torch_dtype=torch.floataa ) pipe("This is a test" ) @require_torch @require_accelerate @require_torch_gpu def _snake_case ( self ): import torch __UpperCAmelCase : List[str] = pipeline(model="hf-internal-testing/tiny-random-bloom" , device_map="auto" , torch_dtype=torch.floataa ) pipe("This is a test" , do_sample=UpperCamelCase_ , top_p=0.5 ) def _snake_case ( self ): __UpperCAmelCase : Dict = "Hello world" __UpperCAmelCase : Tuple = pipeline("text-generation" , model="hf-internal-testing/tiny-random-gpt2" ) if text_generator.model.framework == "tf": __UpperCAmelCase : List[str] = logging.get_logger("transformers.generation.tf_utils" ) else: __UpperCAmelCase : List[Any] = logging.get_logger("transformers.generation.utils" ) __UpperCAmelCase : Optional[int] = "Both `max_new_tokens`" # The beggining of the message to be checked in this test # Both are set by the user -> log warning with CaptureLogger(UpperCamelCase_ ) as cl: __UpperCAmelCase : List[str] = text_generator(UpperCamelCase_ , max_length=10 , max_new_tokens=1 ) self.assertIn(UpperCamelCase_ , cl.out ) # The user only sets one -> no warning with CaptureLogger(UpperCamelCase_ ) as cl: __UpperCAmelCase : str = text_generator(UpperCamelCase_ , max_new_tokens=1 ) self.assertNotIn(UpperCamelCase_ , cl.out ) with CaptureLogger(UpperCamelCase_ ) as cl: __UpperCAmelCase : List[str] = text_generator(UpperCamelCase_ , max_length=10 ) self.assertNotIn(UpperCamelCase_ , cl.out )
10
'''simple docstring''' from ...configuration_utils import PretrainedConfig from ...utils import logging _a : Any = logging.get_logger(__name__) _a : int = { "facebook/s2t-wav2vec2-large-en-de": ( "https://huggingface.co/facebook/s2t-wav2vec2-large-en-de/resolve/main/config.json" ), # See all Speech2Text models at https://huggingface.co/models?filter=speech2text2 } class __A (__magic_name__ ): snake_case :Optional[int] = "speech_to_text_2" snake_case :List[Any] = ["past_key_values"] snake_case :str = {"num_attention_heads": "decoder_attention_heads", "hidden_size": "d_model"} def __init__( self , UpperCamelCase_=1_00_00 , UpperCamelCase_=6 , UpperCamelCase_=20_48 , UpperCamelCase_=4 , UpperCamelCase_=0.0 , UpperCamelCase_=True , UpperCamelCase_="relu" , UpperCamelCase_=2_56 , UpperCamelCase_=0.1 , UpperCamelCase_=0.0 , UpperCamelCase_=0.0 , UpperCamelCase_=0.0_2 , UpperCamelCase_=2 , UpperCamelCase_=True , UpperCamelCase_=1 , UpperCamelCase_=0 , UpperCamelCase_=2 , UpperCamelCase_=10_24 , **UpperCamelCase_ , ): __UpperCAmelCase : Any = vocab_size __UpperCAmelCase : Optional[int] = d_model __UpperCAmelCase : Tuple = decoder_ffn_dim __UpperCAmelCase : List[str] = decoder_layers __UpperCAmelCase : str = decoder_attention_heads __UpperCAmelCase : Dict = dropout __UpperCAmelCase : Optional[Any] = attention_dropout __UpperCAmelCase : int = activation_dropout __UpperCAmelCase : Dict = activation_function __UpperCAmelCase : Tuple = init_std __UpperCAmelCase : Any = decoder_layerdrop __UpperCAmelCase : str = use_cache __UpperCAmelCase : int = decoder_layers __UpperCAmelCase : Any = scale_embedding # scale factor will be sqrt(d_model) if True __UpperCAmelCase : Union[str, Any] = max_target_positions super().__init__( pad_token_id=UpperCamelCase_ , bos_token_id=UpperCamelCase_ , eos_token_id=UpperCamelCase_ , decoder_start_token_id=UpperCamelCase_ , **UpperCamelCase_ , )
10
1
'''simple docstring''' import math def _lowercase ( lowerCamelCase__ ) -> bool: """simple docstring""" if 1 < number < 4: # 2 and 3 are primes return True elif number < 2 or number % 2 == 0 or number % 3 == 0: # Negatives, 0, 1, all even numbers, all multiples of 3 are not primes return False # All primes number are in format of 6k +/- 1 for i in range(5 , int(math.sqrt(lowerCamelCase__ ) + 1 ) , 6 ): if number % i == 0 or number % (i + 2) == 0: return False return True def _lowercase ( lowerCamelCase__ = 1_0001 ) -> int: """simple docstring""" try: __UpperCAmelCase : int = int(lowerCamelCase__ ) except (TypeError, ValueError): raise TypeError("Parameter nth must be int or castable to int." ) from None if nth <= 0: raise ValueError("Parameter nth must be greater than or equal to one." ) __UpperCAmelCase : list[int] = [] __UpperCAmelCase : List[str] = 2 while len(lowerCamelCase__ ) < nth: if is_prime(lowerCamelCase__ ): primes.append(lowerCamelCase__ ) num += 1 else: num += 1 return primes[len(lowerCamelCase__ ) - 1] if __name__ == "__main__": print(f"""{solution() = }""")
10
'''simple docstring''' def _lowercase ( lowerCamelCase__ = 100 ) -> int: """simple docstring""" __UpperCAmelCase : Optional[Any] = (n * (n + 1) // 2) ** 2 __UpperCAmelCase : Any = n * (n + 1) * (2 * n + 1) // 6 return sum_cubes - sum_squares if __name__ == "__main__": print(f"""{solution() = }""")
10
1
'''simple docstring''' from collections.abc import Generator from math import sin def _lowercase ( lowerCamelCase__ ) -> bytes: """simple docstring""" if len(lowerCamelCase__ ) != 32: raise ValueError("Input must be of length 32" ) __UpperCAmelCase : Dict = b"" for i in [3, 2, 1, 0]: little_endian += string_aa[8 * i : 8 * i + 8] return little_endian def _lowercase ( lowerCamelCase__ ) -> bytes: """simple docstring""" if i < 0: raise ValueError("Input must be non-negative" ) __UpperCAmelCase : Dict = format(lowerCamelCase__ , "08x" )[-8:] __UpperCAmelCase : int = b"" for i in [3, 2, 1, 0]: little_endian_hex += hex_rep[2 * i : 2 * i + 2].encode("utf-8" ) return little_endian_hex def _lowercase ( lowerCamelCase__ ) -> bytes: """simple docstring""" __UpperCAmelCase : Tuple = b"" for char in message: bit_string += format(lowerCamelCase__ , "08b" ).encode("utf-8" ) __UpperCAmelCase : str = format(len(lowerCamelCase__ ) , "064b" ).encode("utf-8" ) # Pad bit_string to a multiple of 512 chars bit_string += b"1" while len(lowerCamelCase__ ) % 512 != 448: bit_string += b"0" bit_string += to_little_endian(start_len[32:] ) + to_little_endian(start_len[:32] ) return bit_string def _lowercase ( lowerCamelCase__ ) -> Generator[list[int], None, None]: """simple docstring""" if len(lowerCamelCase__ ) % 512 != 0: raise ValueError("Input must have length that's a multiple of 512" ) for pos in range(0 , len(lowerCamelCase__ ) , 512 ): __UpperCAmelCase : Dict = bit_string[pos : pos + 512] __UpperCAmelCase : int = [] for i in range(0 , 512 , 32 ): block_words.append(int(to_little_endian(block[i : i + 32] ) , 2 ) ) yield block_words def _lowercase ( lowerCamelCase__ ) -> int: """simple docstring""" if i < 0: raise ValueError("Input must be non-negative" ) __UpperCAmelCase : Union[str, Any] = format(lowerCamelCase__ , "032b" ) __UpperCAmelCase : Tuple = "" for c in i_str: new_str += "1" if c == "0" else "0" return int(lowerCamelCase__ , 2 ) def _lowercase ( lowerCamelCase__ , lowerCamelCase__ ) -> int: """simple docstring""" return (a + b) % 2**32 def _lowercase ( lowerCamelCase__ , lowerCamelCase__ ) -> int: """simple docstring""" if i < 0: raise ValueError("Input must be non-negative" ) if shift < 0: raise ValueError("Shift must be non-negative" ) return ((i << shift) ^ (i >> (32 - shift))) % 2**32 def _lowercase ( lowerCamelCase__ ) -> bytes: """simple docstring""" __UpperCAmelCase : Any = preprocess(lowerCamelCase__ ) __UpperCAmelCase : List[Any] = [int(2**32 * abs(sin(i + 1 ) ) ) for i in range(64 )] # Starting states __UpperCAmelCase : Tuple = 0X6_7_4_5_2_3_0_1 __UpperCAmelCase : str = 0Xe_f_c_d_a_b_8_9 __UpperCAmelCase : Dict = 0X9_8_b_a_d_c_f_e __UpperCAmelCase : str = 0X1_0_3_2_5_4_7_6 __UpperCAmelCase : Optional[Any] = [ 7, 12, 17, 22, 7, 12, 17, 22, 7, 12, 17, 22, 7, 12, 17, 22, 5, 9, 14, 20, 5, 9, 14, 20, 5, 9, 14, 20, 5, 9, 14, 20, 4, 11, 16, 23, 4, 11, 16, 23, 4, 11, 16, 23, 4, 11, 16, 23, 6, 10, 15, 21, 6, 10, 15, 21, 6, 10, 15, 21, 6, 10, 15, 21, ] # Process bit string in chunks, each with 16 32-char words for block_words in get_block_words(lowerCamelCase__ ): __UpperCAmelCase : str = aa __UpperCAmelCase : Optional[int] = ba __UpperCAmelCase : Optional[int] = ca __UpperCAmelCase : Tuple = da # Hash current chunk for i in range(64 ): if i <= 15: # f = (b & c) | (not_32(b) & d) # Alternate definition for f __UpperCAmelCase : Union[str, Any] = d ^ (b & (c ^ d)) __UpperCAmelCase : Tuple = i elif i <= 31: # f = (d & b) | (not_32(d) & c) # Alternate definition for f __UpperCAmelCase : Optional[int] = c ^ (d & (b ^ c)) __UpperCAmelCase : Tuple = (5 * i + 1) % 16 elif i <= 47: __UpperCAmelCase : str = b ^ c ^ d __UpperCAmelCase : Union[str, Any] = (3 * i + 5) % 16 else: __UpperCAmelCase : str = c ^ (b | not_aa(lowerCamelCase__ )) __UpperCAmelCase : Tuple = (7 * i) % 16 __UpperCAmelCase : Union[str, Any] = (f + a + added_consts[i] + block_words[g]) % 2**32 __UpperCAmelCase : Tuple = d __UpperCAmelCase : Any = c __UpperCAmelCase : str = b __UpperCAmelCase : Any = sum_aa(lowerCamelCase__ , left_rotate_aa(lowerCamelCase__ , shift_amounts[i] ) ) # Add hashed chunk to running total __UpperCAmelCase : Dict = sum_aa(lowerCamelCase__ , lowerCamelCase__ ) __UpperCAmelCase : Union[str, Any] = sum_aa(lowerCamelCase__ , lowerCamelCase__ ) __UpperCAmelCase : Optional[Any] = sum_aa(lowerCamelCase__ , lowerCamelCase__ ) __UpperCAmelCase : Optional[Any] = sum_aa(lowerCamelCase__ , lowerCamelCase__ ) __UpperCAmelCase : List[str] = reformat_hex(lowerCamelCase__ ) + reformat_hex(lowerCamelCase__ ) + reformat_hex(lowerCamelCase__ ) + reformat_hex(lowerCamelCase__ ) return digest if __name__ == "__main__": import doctest doctest.testmod()
10
'''simple docstring''' def _lowercase ( lowerCamelCase__ , lowerCamelCase__ ) -> float: """simple docstring""" if discount_rate < 0: raise ValueError("Discount rate cannot be negative" ) if not cash_flows: raise ValueError("Cash flows list cannot be empty" ) __UpperCAmelCase : Tuple = sum( cash_flow / ((1 + discount_rate) ** i) for i, cash_flow in enumerate(lowerCamelCase__ ) ) return round(lowerCamelCase__ , ndigits=2 ) if __name__ == "__main__": import doctest doctest.testmod()
10
1
'''simple docstring''' from __future__ import annotations # This is the precision for this function which can be altered. # It is recommended for users to keep this number greater than or equal to 10. _a : Union[str, Any] = 10 def _lowercase ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> int: """simple docstring""" for i in range(lowerCamelCase__ , lowerCamelCase__ ): if array[i] == target: return i return -1 def _lowercase ( lowerCamelCase__ , lowerCamelCase__ ) -> int: """simple docstring""" __UpperCAmelCase : Optional[Any] = 0 __UpperCAmelCase : Any = len(lowerCamelCase__ ) while left <= right: if right - left < precision: return lin_search(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) __UpperCAmelCase : List[str] = (left + right) // 3 + 1 __UpperCAmelCase : str = 2 * (left + right) // 3 + 1 if array[one_third] == target: return one_third elif array[two_third] == target: return two_third elif target < array[one_third]: __UpperCAmelCase : str = one_third - 1 elif array[two_third] < target: __UpperCAmelCase : Optional[int] = two_third + 1 else: __UpperCAmelCase : str = one_third + 1 __UpperCAmelCase : Any = two_third - 1 else: return -1 def _lowercase ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> int: """simple docstring""" if left < right: if right - left < precision: return lin_search(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) __UpperCAmelCase : Tuple = (left + right) // 3 + 1 __UpperCAmelCase : List[Any] = 2 * (left + right) // 3 + 1 if array[one_third] == target: return one_third elif array[two_third] == target: return two_third elif target < array[one_third]: return rec_ternary_search(lowerCamelCase__ , one_third - 1 , lowerCamelCase__ , lowerCamelCase__ ) elif array[two_third] < target: return rec_ternary_search(two_third + 1 , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) else: return rec_ternary_search(one_third + 1 , two_third - 1 , lowerCamelCase__ , lowerCamelCase__ ) else: return -1 if __name__ == "__main__": import doctest doctest.testmod() _a : Any = input("Enter numbers separated by comma:\n").strip() _a : Optional[Any] = [int(item.strip()) for item in user_input.split(",")] assert collection == sorted(collection), f"List must be ordered.\n{collection}." _a : int = int(input("Enter the number to be found in the list:\n").strip()) _a : Dict = ite_ternary_search(collection, target) _a : int = rec_ternary_search(0, len(collection) - 1, collection, target) if resulta != -1: print(f"""Iterative search: {target} found at positions: {resulta}""") print(f"""Recursive search: {target} found at positions: {resulta}""") else: print("Not found")
10
'''simple docstring''' import random import torch from huggingface_hub import HfApi from diffusers import UNetaDModel _a : Union[str, Any] = HfApi() _a : int = {} # fmt: off _a : Optional[int] = torch.tensor([ -0.7_515, -1.6_883, 0.2_420, 0.0_300, 0.6_347, 1.3_433, -1.1_743, -3.7_467, 1.2_342, -2.2_485, 0.4_636, 0.8_076, -0.7_991, 0.3_969, 0.8_498, 0.9_189, -1.8_887, -3.3_522, 0.7_639, 0.2_040, 0.6_271, -2.7_148, -1.6_316, 3.0_839, 0.3_186, 0.2_721, -0.9_759, -1.2_461, 2.6_257, 1.3_557 ]) _a : Optional[Any] = torch.tensor([ -2.3_639, -2.5_344, 0.0_054, -0.6_674, 1.5_990, 1.0_158, 0.3_124, -2.1_436, 1.8_795, -2.5_429, -0.1_566, -0.3_973, 1.2_490, 2.6_447, 1.2_283, -0.5_208, -2.8_154, -3.5_119, 2.3_838, 1.2_033, 1.7_201, -2.1_256, -1.4_576, 2.7_948, 2.4_204, -0.9_752, -1.2_546, 0.8_027, 3.2_758, 3.1_365 ]) _a : int = torch.tensor([ -0.6_531, -0.6_891, -0.3_172, -0.5_375, -0.9_140, -0.5_367, -0.1_175, -0.7_869, -0.3_808, -0.4_513, -0.2_098, -0.0_083, 0.3_183, 0.5_140, 0.2_247, -0.1_304, -0.1_302, -0.2_802, -0.2_084, -0.2_025, -0.4_967, -0.4_873, -0.0_861, 0.6_925, 0.0_250, 0.1_290, -0.1_543, 0.6_316, 1.0_460, 1.4_943 ]) _a : str = torch.tensor([ 0.0_911, 0.1_107, 0.0_182, 0.0_435, -0.0_805, -0.0_608, 0.0_381, 0.2_172, -0.0_280, 0.1_327, -0.0_299, -0.0_255, -0.0_050, -0.1_170, -0.1_046, 0.0_309, 0.1_367, 0.1_728, -0.0_533, -0.0_748, -0.0_534, 0.1_624, 0.0_384, -0.1_805, -0.0_707, 0.0_642, 0.0_220, -0.0_134, -0.1_333, -0.1_505 ]) _a : Union[str, Any] = torch.tensor([ 0.1_321, 0.1_337, 0.0_440, 0.0_622, -0.0_591, -0.0_370, 0.0_503, 0.2_133, -0.0_177, 0.1_415, -0.0_116, -0.0_112, 0.0_044, -0.0_980, -0.0_789, 0.0_395, 0.1_502, 0.1_785, -0.0_488, -0.0_514, -0.0_404, 0.1_539, 0.0_454, -0.1_559, -0.0_665, 0.0_659, 0.0_383, -0.0_005, -0.1_266, -0.1_386 ]) _a : Any = torch.tensor([ 0.1_154, 0.1_218, 0.0_307, 0.0_526, -0.0_711, -0.0_541, 0.0_366, 0.2_078, -0.0_267, 0.1_317, -0.0_226, -0.0_193, -0.0_014, -0.1_055, -0.0_902, 0.0_330, 0.1_391, 0.1_709, -0.0_562, -0.0_693, -0.0_560, 0.1_482, 0.0_381, -0.1_683, -0.0_681, 0.0_661, 0.0_331, -0.0_046, -0.1_268, -0.1_431 ]) _a : List[Any] = torch.tensor([ 0.1_192, 0.1_240, 0.0_414, 0.0_606, -0.0_557, -0.0_412, 0.0_430, 0.2_042, -0.0_200, 0.1_385, -0.0_115, -0.0_132, 0.0_017, -0.0_965, -0.0_802, 0.0_398, 0.1_433, 0.1_747, -0.0_458, -0.0_533, -0.0_407, 0.1_545, 0.0_419, -0.1_574, -0.0_645, 0.0_626, 0.0_341, -0.0_010, -0.1_199, -0.1_390 ]) _a : Optional[int] = torch.tensor([ 0.1_075, 0.1_074, 0.0_205, 0.0_431, -0.0_774, -0.0_607, 0.0_298, 0.2_042, -0.0_320, 0.1_267, -0.0_281, -0.0_250, -0.0_064, -0.1_091, -0.0_946, 0.0_290, 0.1_328, 0.1_650, -0.0_580, -0.0_738, -0.0_586, 0.1_440, 0.0_337, -0.1_746, -0.0_712, 0.0_605, 0.0_250, -0.0_099, -0.1_316, -0.1_473 ]) _a : Tuple = torch.tensor([ -1.4_572, -2.0_481, -0.0_414, -0.6_005, 1.4_136, 0.5_848, 0.4_028, -2.7_330, 1.2_212, -2.1_228, 0.2_155, 0.4_039, 0.7_662, 2.0_535, 0.7_477, -0.3_243, -2.1_758, -2.7_648, 1.6_947, 0.7_026, 1.2_338, -1.6_078, -0.8_682, 2.2_810, 1.8_574, -0.5_718, -0.5_586, -0.0_186, 2.3_415, 2.1_251]) _a : List[Any] = torch.tensor([ -1.3_690, -1.9_720, -0.4_090, -0.6_966, 1.4_660, 0.9_938, -0.1_385, -2.7_324, 0.7_736, -1.8_917, 0.2_923, 0.4_293, 0.1_693, 1.4_112, 1.1_887, -0.3_181, -2.2_160, -2.6_381, 1.3_170, 0.8_163, 0.9_240, -1.6_544, -0.6_099, 2.5_259, 1.6_430, -0.9_090, -0.9_392, -0.0_126, 2.4_268, 2.3_266 ]) _a : Optional[Any] = torch.tensor([ -1.3_525, -1.9_628, -0.3_956, -0.6_860, 1.4_664, 1.0_014, -0.1_259, -2.7_212, 0.7_772, -1.8_811, 0.2_996, 0.4_388, 0.1_704, 1.4_029, 1.1_701, -0.3_027, -2.2_053, -2.6_287, 1.3_350, 0.8_131, 0.9_274, -1.6_292, -0.6_098, 2.5_131, 1.6_505, -0.8_958, -0.9_298, -0.0_151, 2.4_257, 2.3_355 ]) _a : Union[str, Any] = torch.tensor([ -2.0_585, -2.7_897, -0.2_850, -0.8_940, 1.9_052, 0.5_702, 0.6_345, -3.8_959, 1.5_932, -3.2_319, 0.1_974, 0.0_287, 1.7_566, 2.6_543, 0.8_387, -0.5_351, -3.2_736, -4.3_375, 2.9_029, 1.6_390, 1.4_640, -2.1_701, -1.9_013, 2.9_341, 3.4_981, -0.6_255, -1.1_644, -0.1_591, 3.7_097, 3.2_066 ]) _a : Optional[int] = torch.tensor([ -2.3_139, -2.5_594, -0.0_197, -0.6_785, 1.7_001, 1.1_606, 0.3_075, -2.1_740, 1.8_071, -2.5_630, -0.0_926, -0.3_811, 1.2_116, 2.6_246, 1.2_731, -0.5_398, -2.8_153, -3.6_140, 2.3_893, 1.3_262, 1.6_258, -2.1_856, -1.3_267, 2.8_395, 2.3_779, -1.0_623, -1.2_468, 0.8_959, 3.3_367, 3.2_243 ]) _a : Union[str, Any] = torch.tensor([ -2.0_628, -2.7_667, -0.2_089, -0.8_263, 2.0_539, 0.5_992, 0.6_495, -3.8_336, 1.6_025, -3.2_817, 0.1_721, -0.0_633, 1.7_516, 2.7_039, 0.8_100, -0.5_908, -3.2_113, -4.4_343, 2.9_257, 1.3_632, 1.5_562, -2.1_489, -1.9_894, 3.0_560, 3.3_396, -0.7_328, -1.0_417, 0.0_383, 3.7_093, 3.2_343 ]) _a : str = torch.tensor([ -1.4_574, -2.0_569, -0.0_473, -0.6_117, 1.4_018, 0.5_769, 0.4_129, -2.7_344, 1.2_241, -2.1_397, 0.2_000, 0.3_937, 0.7_616, 2.0_453, 0.7_324, -0.3_391, -2.1_746, -2.7_744, 1.6_963, 0.6_921, 1.2_187, -1.6_172, -0.8_877, 2.2_439, 1.8_471, -0.5_839, -0.5_605, -0.0_464, 2.3_250, 2.1_219 ]) # fmt: on _a : Optional[Any] = api.list_models(filter="diffusers") for mod in models: if "google" in mod.author or mod.modelId == "CompVis/ldm-celebahq-256": _a : List[str] = "/home/patrick/google_checkpoints/" + mod.modelId.split("/")[-1] print(f"""Started running {mod.modelId}!!!""") if mod.modelId.startswith("CompVis"): _a : int = UNetaDModel.from_pretrained(local_checkpoint, subfolder="unet") else: _a : Optional[int] = UNetaDModel.from_pretrained(local_checkpoint) torch.manual_seed(0) random.seed(0) _a : str = torch.randn(1, model.config.in_channels, model.config.sample_size, model.config.sample_size) _a : str = torch.tensor([10] * noise.shape[0]) with torch.no_grad(): _a : str = model(noise, time_step).sample assert torch.allclose( logits[0, 0, 0, :30], results["_".join("_".join(mod.modelId.split("/")).split("-"))], atol=1e-3 ) print(f"""{mod.modelId} has passed successfully!!!""")
10
1
'''simple docstring''' from sklearn.metrics import matthews_corrcoef import datasets _a : Any = "\nCompute the Matthews correlation coefficient (MCC)\n\nThe Matthews correlation coefficient is used in machine learning as a\nmeasure of the quality of binary and multiclass classifications. It takes\ninto account true and false positives and negatives and is generally\nregarded as a balanced measure which can be used even if the classes are of\nvery different sizes. The MCC is in essence a correlation coefficient value\nbetween -1 and +1. A coefficient of +1 represents a perfect prediction, 0\nan average random prediction and -1 an inverse prediction. The statistic\nis also known as the phi coefficient. [source: Wikipedia]\n" _a : int = "\nArgs:\n predictions (list of int): Predicted labels, as returned by a model.\n references (list of int): Ground truth labels.\n sample_weight (list of int, float, or bool): Sample weights. Defaults to `None`.\nReturns:\n matthews_correlation (dict containing float): Matthews correlation.\nExamples:\n Example 1, a basic example with only predictions and references as inputs:\n >>> matthews_metric = datasets.load_metric(\"matthews_correlation\")\n >>> results = matthews_metric.compute(references=[1, 3, 2, 0, 3, 2],\n ... predictions=[1, 2, 2, 0, 3, 3])\n >>> print(round(results['matthews_correlation'], 2))\n 0.54\n\n Example 2, the same example as above, but also including sample weights:\n >>> matthews_metric = datasets.load_metric(\"matthews_correlation\")\n >>> results = matthews_metric.compute(references=[1, 3, 2, 0, 3, 2],\n ... predictions=[1, 2, 2, 0, 3, 3],\n ... sample_weight=[0.5, 3, 1, 1, 1, 2])\n >>> print(round(results['matthews_correlation'], 2))\n 0.1\n\n Example 3, the same example as above, but with sample weights that cause a negative correlation:\n >>> matthews_metric = datasets.load_metric(\"matthews_correlation\")\n >>> results = matthews_metric.compute(references=[1, 3, 2, 0, 3, 2],\n ... predictions=[1, 2, 2, 0, 3, 3],\n ... sample_weight=[0.5, 1, 0, 0, 0, 1])\n >>> print(round(results['matthews_correlation'], 2))\n -0.25\n" _a : List[Any] = "\\n@article{scikit-learn,\n title={Scikit-learn: Machine Learning in {P}ython},\n author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V.\n and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P.\n and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and\n Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.},\n journal={Journal of Machine Learning Research},\n volume={12},\n pages={2825--2830},\n year={2011}\n}\n" @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class __A (datasets.Metric ): def _snake_case ( self ): return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { "predictions": datasets.Value("int32" ), "references": datasets.Value("int32" ), } ) , reference_urls=[ "https://scikit-learn.org/stable/modules/generated/sklearn.metrics.matthews_corrcoef.html" ] , ) def _snake_case ( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_=None ): return { "matthews_correlation": float(matthews_corrcoef(UpperCamelCase_ , UpperCamelCase_ , sample_weight=UpperCamelCase_ ) ), }
10
'''simple docstring''' from ...configuration_utils import PretrainedConfig from ...utils import logging _a : Any = logging.get_logger(__name__) _a : List[Any] = { "microsoft/cvt-13": "https://huggingface.co/microsoft/cvt-13/resolve/main/config.json", # See all Cvt models at https://huggingface.co/models?filter=cvt } class __A (__magic_name__ ): snake_case :Any = "cvt" def __init__( self , UpperCamelCase_=3 , UpperCamelCase_=[7, 3, 3] , UpperCamelCase_=[4, 2, 2] , UpperCamelCase_=[2, 1, 1] , UpperCamelCase_=[64, 1_92, 3_84] , UpperCamelCase_=[1, 3, 6] , UpperCamelCase_=[1, 2, 10] , UpperCamelCase_=[4.0, 4.0, 4.0] , UpperCamelCase_=[0.0, 0.0, 0.0] , UpperCamelCase_=[0.0, 0.0, 0.0] , UpperCamelCase_=[0.0, 0.0, 0.1] , UpperCamelCase_=[True, True, True] , UpperCamelCase_=[False, False, True] , UpperCamelCase_=["dw_bn", "dw_bn", "dw_bn"] , UpperCamelCase_=[3, 3, 3] , UpperCamelCase_=[1, 1, 1] , UpperCamelCase_=[2, 2, 2] , UpperCamelCase_=[1, 1, 1] , UpperCamelCase_=[1, 1, 1] , UpperCamelCase_=0.0_2 , UpperCamelCase_=1E-12 , **UpperCamelCase_ , ): super().__init__(**UpperCamelCase_ ) __UpperCAmelCase : Optional[int] = num_channels __UpperCAmelCase : Optional[Any] = patch_sizes __UpperCAmelCase : List[str] = patch_stride __UpperCAmelCase : Tuple = patch_padding __UpperCAmelCase : int = embed_dim __UpperCAmelCase : str = num_heads __UpperCAmelCase : Any = depth __UpperCAmelCase : List[str] = mlp_ratio __UpperCAmelCase : List[str] = attention_drop_rate __UpperCAmelCase : Dict = drop_rate __UpperCAmelCase : Dict = drop_path_rate __UpperCAmelCase : str = qkv_bias __UpperCAmelCase : Optional[int] = cls_token __UpperCAmelCase : Optional[Any] = qkv_projection_method __UpperCAmelCase : Tuple = kernel_qkv __UpperCAmelCase : Optional[Any] = padding_kv __UpperCAmelCase : Optional[int] = stride_kv __UpperCAmelCase : Any = padding_q __UpperCAmelCase : List[Any] = stride_q __UpperCAmelCase : Union[str, Any] = initializer_range __UpperCAmelCase : Any = layer_norm_eps
10
1
'''simple docstring''' import argparse import json from pathlib import Path import requests import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import ViTConfig, ViTForImageClassification, ViTImageProcessor, ViTModel from transformers.utils import logging logging.set_verbosity_info() _a : Tuple = logging.get_logger(__name__) def _lowercase ( lowerCamelCase__ , lowerCamelCase__=False ) -> Any: """simple docstring""" __UpperCAmelCase : Union[str, Any] = [] for i in range(config.num_hidden_layers ): # encoder layers: output projection, 2 feedforward neural networks and 2 layernorms rename_keys.append((f"""blocks.{i}.norm1.weight""", f"""vit.encoder.layer.{i}.layernorm_before.weight""") ) rename_keys.append((f"""blocks.{i}.norm1.bias""", f"""vit.encoder.layer.{i}.layernorm_before.bias""") ) rename_keys.append((f"""blocks.{i}.attn.proj.weight""", f"""vit.encoder.layer.{i}.attention.output.dense.weight""") ) rename_keys.append((f"""blocks.{i}.attn.proj.bias""", f"""vit.encoder.layer.{i}.attention.output.dense.bias""") ) rename_keys.append((f"""blocks.{i}.norm2.weight""", f"""vit.encoder.layer.{i}.layernorm_after.weight""") ) rename_keys.append((f"""blocks.{i}.norm2.bias""", f"""vit.encoder.layer.{i}.layernorm_after.bias""") ) rename_keys.append((f"""blocks.{i}.mlp.fc1.weight""", f"""vit.encoder.layer.{i}.intermediate.dense.weight""") ) rename_keys.append((f"""blocks.{i}.mlp.fc1.bias""", f"""vit.encoder.layer.{i}.intermediate.dense.bias""") ) rename_keys.append((f"""blocks.{i}.mlp.fc2.weight""", f"""vit.encoder.layer.{i}.output.dense.weight""") ) rename_keys.append((f"""blocks.{i}.mlp.fc2.bias""", f"""vit.encoder.layer.{i}.output.dense.bias""") ) # projection layer + position embeddings rename_keys.extend( [ ("cls_token", "vit.embeddings.cls_token"), ("patch_embed.proj.weight", "vit.embeddings.patch_embeddings.projection.weight"), ("patch_embed.proj.bias", "vit.embeddings.patch_embeddings.projection.bias"), ("pos_embed", "vit.embeddings.position_embeddings"), ] ) if base_model: # layernorm + pooler rename_keys.extend( [ ("norm.weight", "layernorm.weight"), ("norm.bias", "layernorm.bias"), ] ) # if just the base model, we should remove "vit" from all keys that start with "vit" __UpperCAmelCase : Tuple = [(pair[0], pair[1][4:]) if pair[1].startswith("vit" ) else pair for pair in rename_keys] else: # layernorm + classification head rename_keys.extend( [ ("norm.weight", "vit.layernorm.weight"), ("norm.bias", "vit.layernorm.bias"), ("head.weight", "classifier.weight"), ("head.bias", "classifier.bias"), ] ) return rename_keys def _lowercase ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__=False ) -> str: """simple docstring""" for i in range(config.num_hidden_layers ): if base_model: __UpperCAmelCase : Any = "" else: __UpperCAmelCase : List[Any] = "vit." # read in weights + bias of input projection layer (in timm, this is a single matrix + bias) __UpperCAmelCase : Union[str, Any] = state_dict.pop(f"""blocks.{i}.attn.qkv.weight""" ) __UpperCAmelCase : int = state_dict.pop(f"""blocks.{i}.attn.qkv.bias""" ) # next, add query, keys and values (in that order) to the state dict __UpperCAmelCase : str = in_proj_weight[ : config.hidden_size, : ] __UpperCAmelCase : List[str] = in_proj_bias[: config.hidden_size] __UpperCAmelCase : Optional[int] = in_proj_weight[ config.hidden_size : config.hidden_size * 2, : ] __UpperCAmelCase : Dict = in_proj_bias[ config.hidden_size : config.hidden_size * 2 ] __UpperCAmelCase : List[Any] = in_proj_weight[ -config.hidden_size :, : ] __UpperCAmelCase : Tuple = in_proj_bias[-config.hidden_size :] def _lowercase ( lowerCamelCase__ ) -> Optional[Any]: """simple docstring""" __UpperCAmelCase : List[str] = ["head.weight", "head.bias"] for k in ignore_keys: state_dict.pop(lowerCamelCase__ , lowerCamelCase__ ) def _lowercase ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> Dict: """simple docstring""" __UpperCAmelCase : str = dct.pop(lowerCamelCase__ ) __UpperCAmelCase : Optional[Any] = val def _lowercase ( ) -> Tuple: """simple docstring""" __UpperCAmelCase : Dict = "http://images.cocodataset.org/val2017/000000039769.jpg" __UpperCAmelCase : int = Image.open(requests.get(lowerCamelCase__ , stream=lowerCamelCase__ ).raw ) return im @torch.no_grad() def _lowercase ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__=True ) -> str: """simple docstring""" __UpperCAmelCase : Dict = ViTConfig() # patch_size if model_name[-1] == "8": __UpperCAmelCase : Union[str, Any] = 8 # set labels if required if not base_model: __UpperCAmelCase : str = 1000 __UpperCAmelCase : str = "huggingface/label-files" __UpperCAmelCase : str = "imagenet-1k-id2label.json" __UpperCAmelCase : Optional[int] = json.load(open(hf_hub_download(lowerCamelCase__ , lowerCamelCase__ , repo_type="dataset" ) , "r" ) ) __UpperCAmelCase : Tuple = {int(lowerCamelCase__ ): v for k, v in idalabel.items()} __UpperCAmelCase : str = idalabel __UpperCAmelCase : Tuple = {v: k for k, v in idalabel.items()} # size of the architecture if model_name in ["dino_vits8", "dino_vits16"]: __UpperCAmelCase : str = 384 __UpperCAmelCase : Optional[int] = 1536 __UpperCAmelCase : Union[str, Any] = 12 __UpperCAmelCase : int = 6 # load original model from torch hub __UpperCAmelCase : List[str] = torch.hub.load("facebookresearch/dino:main" , lowerCamelCase__ ) original_model.eval() # load state_dict of original model, remove and rename some keys __UpperCAmelCase : Tuple = original_model.state_dict() if base_model: remove_classification_head_(lowerCamelCase__ ) __UpperCAmelCase : Union[str, Any] = create_rename_keys(lowerCamelCase__ , base_model=lowerCamelCase__ ) for src, dest in rename_keys: rename_key(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) read_in_q_k_v(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) # load HuggingFace model if base_model: __UpperCAmelCase : List[str] = ViTModel(lowerCamelCase__ , add_pooling_layer=lowerCamelCase__ ).eval() else: __UpperCAmelCase : str = ViTForImageClassification(lowerCamelCase__ ).eval() model.load_state_dict(lowerCamelCase__ ) # Check outputs on an image, prepared by ViTImageProcessor __UpperCAmelCase : List[str] = ViTImageProcessor() __UpperCAmelCase : Dict = image_processor(images=prepare_img() , return_tensors="pt" ) __UpperCAmelCase : Dict = encoding["pixel_values"] __UpperCAmelCase : str = model(lowerCamelCase__ ) if base_model: __UpperCAmelCase : List[Any] = original_model(lowerCamelCase__ ) assert torch.allclose(lowerCamelCase__ , outputs.last_hidden_state[:, 0, :] , atol=1e-1 ) else: __UpperCAmelCase : int = original_model(lowerCamelCase__ ) assert logits.shape == outputs.logits.shape assert torch.allclose(lowerCamelCase__ , outputs.logits , atol=1e-3 ) Path(lowerCamelCase__ ).mkdir(exist_ok=lowerCamelCase__ ) print(f"""Saving model {model_name} to {pytorch_dump_folder_path}""" ) model.save_pretrained(lowerCamelCase__ ) print(f"""Saving image processor to {pytorch_dump_folder_path}""" ) image_processor.save_pretrained(lowerCamelCase__ ) if __name__ == "__main__": _a : Optional[Any] = argparse.ArgumentParser() # Required parameters parser.add_argument( "--model_name", default="dino_vitb16", type=str, help="Name of the model trained with DINO you'd like to convert.", ) parser.add_argument( "--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory." ) parser.add_argument( "--base_model", action="store_true", help="Whether to only convert the base model (no projection head weights).", ) parser.set_defaults(base_model=True) _a : str = parser.parse_args() convert_vit_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.base_model)
10
'''simple docstring''' from __future__ import annotations import numpy as np from numpy import floataa from numpy.typing import NDArray def _lowercase ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , ) -> list[float]: """simple docstring""" __UpperCAmelCase , __UpperCAmelCase : Union[str, Any] = coefficient_matrix.shape __UpperCAmelCase , __UpperCAmelCase : Any = constant_matrix.shape if rowsa != colsa: __UpperCAmelCase : str = f"""Coefficient matrix dimensions must be nxn but received {rowsa}x{colsa}""" raise ValueError(lowerCamelCase__ ) if colsa != 1: __UpperCAmelCase : Optional[Any] = f"""Constant matrix must be nx1 but received {rowsa}x{colsa}""" raise ValueError(lowerCamelCase__ ) if rowsa != rowsa: __UpperCAmelCase : Optional[int] = ( "Coefficient and constant matrices dimensions must be nxn and nx1 but " f"""received {rowsa}x{colsa} and {rowsa}x{colsa}""" ) raise ValueError(lowerCamelCase__ ) if len(lowerCamelCase__ ) != rowsa: __UpperCAmelCase : List[str] = ( "Number of initial values must be equal to number of rows in coefficient " f"""matrix but received {len(lowerCamelCase__ )} and {rowsa}""" ) raise ValueError(lowerCamelCase__ ) if iterations <= 0: raise ValueError("Iterations must be at least 1" ) __UpperCAmelCase : NDArray[floataa] = np.concatenate( (coefficient_matrix, constant_matrix) , axis=1 ) __UpperCAmelCase , __UpperCAmelCase : Tuple = table.shape strictly_diagonally_dominant(lowerCamelCase__ ) # Iterates the whole matrix for given number of times for _ in range(lowerCamelCase__ ): __UpperCAmelCase : int = [] for row in range(lowerCamelCase__ ): __UpperCAmelCase : List[str] = 0 for col in range(lowerCamelCase__ ): if col == row: __UpperCAmelCase : int = table[row][col] elif col == cols - 1: __UpperCAmelCase : Any = table[row][col] else: temp += (-1) * table[row][col] * init_val[col] __UpperCAmelCase : List[Any] = (temp + val) / denom new_val.append(lowerCamelCase__ ) __UpperCAmelCase : str = new_val return [float(lowerCamelCase__ ) for i in new_val] def _lowercase ( lowerCamelCase__ ) -> bool: """simple docstring""" __UpperCAmelCase , __UpperCAmelCase : Optional[int] = table.shape __UpperCAmelCase : str = True for i in range(0 , lowerCamelCase__ ): __UpperCAmelCase : Union[str, Any] = 0 for j in range(0 , cols - 1 ): if i == j: continue else: total += table[i][j] if table[i][i] <= total: raise ValueError("Coefficient matrix is not strictly diagonally dominant" ) return is_diagonally_dominant # Test Cases if __name__ == "__main__": import doctest doctest.testmod()
10
1
'''simple docstring''' from collections.abc import Callable from math import pi, sqrt from random import uniform from statistics import mean def _lowercase ( lowerCamelCase__ ) -> Dict: """simple docstring""" def is_in_circle(lowerCamelCase__ , lowerCamelCase__ ) -> bool: __UpperCAmelCase : List[Any] = sqrt((x**2) + (y**2) ) # Our circle has a radius of 1, so a distance # greater than 1 would land outside the circle. return distance_from_centre <= 1 # The proportion of guesses that landed in the circle __UpperCAmelCase : Dict = mean( int(is_in_circle(uniform(-1.0 , 1.0 ) , uniform(-1.0 , 1.0 ) ) ) for _ in range(lowerCamelCase__ ) ) # The ratio of the area for circle to square is pi/4. __UpperCAmelCase : Optional[Any] = proportion * 4 print(f"""The estimated value of pi is {pi_estimate}""" ) print(f"""The numpy value of pi is {pi}""" ) print(f"""The total error is {abs(pi - pi_estimate )}""" ) def _lowercase ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = 0.0 , lowerCamelCase__ = 1.0 , ) -> float: """simple docstring""" return mean( function_to_integrate(uniform(lowerCamelCase__ , lowerCamelCase__ ) ) for _ in range(lowerCamelCase__ ) ) * (max_value - min_value) def _lowercase ( lowerCamelCase__ , lowerCamelCase__ = 0.0 , lowerCamelCase__ = 1.0 ) -> None: """simple docstring""" def identity_function(lowerCamelCase__ ) -> float: return x __UpperCAmelCase : int = area_under_curve_estimator( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) __UpperCAmelCase : int = (max_value * max_value - min_value * min_value) / 2 print("******************" ) print(f"""Estimating area under y=x where x varies from {min_value} to {max_value}""" ) print(f"""Estimated value is {estimated_value}""" ) print(f"""Expected value is {expected_value}""" ) print(f"""Total error is {abs(estimated_value - expected_value )}""" ) print("******************" ) def _lowercase ( lowerCamelCase__ ) -> None: """simple docstring""" def function_to_integrate(lowerCamelCase__ ) -> float: return sqrt(4.0 - x * x ) __UpperCAmelCase : Any = area_under_curve_estimator( lowerCamelCase__ , lowerCamelCase__ , 0.0 , 2.0 ) print("******************" ) print("Estimating pi using area_under_curve_estimator" ) print(f"""Estimated value is {estimated_value}""" ) print(f"""Expected value is {pi}""" ) print(f"""Total error is {abs(estimated_value - pi )}""" ) print("******************" ) if __name__ == "__main__": import doctest doctest.testmod()
10
'''simple docstring''' from maths.is_square_free import is_square_free from maths.prime_factors import prime_factors def _lowercase ( lowerCamelCase__ ) -> int: """simple docstring""" __UpperCAmelCase : Any = prime_factors(lowerCamelCase__ ) if is_square_free(lowerCamelCase__ ): return -1 if len(lowerCamelCase__ ) % 2 else 1 return 0 if __name__ == "__main__": import doctest doctest.testmod()
10
1
'''simple docstring''' from __future__ import annotations from collections import Counter from random import random class __A : def __init__( self ): __UpperCAmelCase : List[str] = {} def _snake_case ( self , UpperCamelCase_ ): __UpperCAmelCase : List[str] = {} def _snake_case ( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ): if nodea not in self.connections: self.add_node(UpperCamelCase_ ) if nodea not in self.connections: self.add_node(UpperCamelCase_ ) __UpperCAmelCase : List[Any] = probability def _snake_case ( self ): return list(self.connections ) def _snake_case ( self , UpperCamelCase_ ): __UpperCAmelCase : Optional[int] = 0 __UpperCAmelCase : List[str] = random() for dest in self.connections[node]: current_probability += self.connections[node][dest] if current_probability > random_value: return dest return "" def _lowercase ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> dict[str, int]: """simple docstring""" __UpperCAmelCase : Dict = MarkovChainGraphUndirectedUnweighted() for nodea, nodea, probability in transitions: graph.add_transition_probability(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) __UpperCAmelCase : Union[str, Any] = Counter(graph.get_nodes() ) __UpperCAmelCase : Tuple = start for _ in range(lowerCamelCase__ ): __UpperCAmelCase : str = graph.transition(lowerCamelCase__ ) visited[node] += 1 return visited if __name__ == "__main__": import doctest doctest.testmod()
10
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available, is_tokenizers_available, is_torch_available, ) _a : Dict = {"configuration_reformer": ["REFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP", "ReformerConfig"]} try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _a : Dict = ["ReformerTokenizer"] try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _a : List[Any] = ["ReformerTokenizerFast"] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _a : int = [ "REFORMER_PRETRAINED_MODEL_ARCHIVE_LIST", "ReformerAttention", "ReformerForMaskedLM", "ReformerForQuestionAnswering", "ReformerForSequenceClassification", "ReformerLayer", "ReformerModel", "ReformerModelWithLMHead", "ReformerPreTrainedModel", ] if TYPE_CHECKING: from .configuration_reformer import REFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, ReformerConfig try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_reformer import ReformerTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_reformer_fast import ReformerTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_reformer import ( REFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, ReformerAttention, ReformerForMaskedLM, ReformerForQuestionAnswering, ReformerForSequenceClassification, ReformerLayer, ReformerModel, ReformerModelWithLMHead, ReformerPreTrainedModel, ) else: import sys _a : Optional[int] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
10
1
'''simple docstring''' from ...utils import logging from ..ta.modeling_tf_ta import TFTaEncoderModel, TFTaForConditionalGeneration, TFTaModel from .configuration_mta import MTaConfig _a : str = logging.get_logger(__name__) _a : Tuple = "T5Config" class __A (__magic_name__ ): snake_case :Optional[int] = "mt5" snake_case :List[Any] = MTaConfig class __A (__magic_name__ ): snake_case :str = "mt5" snake_case :List[Any] = MTaConfig class __A (__magic_name__ ): snake_case :str = "mt5" snake_case :Tuple = MTaConfig
10
'''simple docstring''' from collections import OrderedDict from typing import Mapping from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging _a : List[str] = logging.get_logger(__name__) _a : Any = { "kssteven/ibert-roberta-base": "https://huggingface.co/kssteven/ibert-roberta-base/resolve/main/config.json", "kssteven/ibert-roberta-large": "https://huggingface.co/kssteven/ibert-roberta-large/resolve/main/config.json", "kssteven/ibert-roberta-large-mnli": ( "https://huggingface.co/kssteven/ibert-roberta-large-mnli/resolve/main/config.json" ), } class __A (__magic_name__ ): snake_case :Union[str, Any] = "ibert" def __init__( self , UpperCamelCase_=3_05_22 , UpperCamelCase_=7_68 , UpperCamelCase_=12 , UpperCamelCase_=12 , UpperCamelCase_=30_72 , UpperCamelCase_="gelu" , UpperCamelCase_=0.1 , UpperCamelCase_=0.1 , UpperCamelCase_=5_12 , UpperCamelCase_=2 , UpperCamelCase_=0.0_2 , UpperCamelCase_=1E-12 , UpperCamelCase_=1 , UpperCamelCase_=0 , UpperCamelCase_=2 , UpperCamelCase_="absolute" , UpperCamelCase_=False , UpperCamelCase_="none" , **UpperCamelCase_ , ): super().__init__(pad_token_id=UpperCamelCase_ , bos_token_id=UpperCamelCase_ , eos_token_id=UpperCamelCase_ , **UpperCamelCase_ ) __UpperCAmelCase : List[Any] = vocab_size __UpperCAmelCase : Optional[Any] = hidden_size __UpperCAmelCase : List[Any] = num_hidden_layers __UpperCAmelCase : Any = num_attention_heads __UpperCAmelCase : List[str] = hidden_act __UpperCAmelCase : List[str] = intermediate_size __UpperCAmelCase : Optional[int] = hidden_dropout_prob __UpperCAmelCase : Union[str, Any] = attention_probs_dropout_prob __UpperCAmelCase : str = max_position_embeddings __UpperCAmelCase : List[str] = type_vocab_size __UpperCAmelCase : Dict = initializer_range __UpperCAmelCase : Optional[int] = layer_norm_eps __UpperCAmelCase : Any = position_embedding_type __UpperCAmelCase : Tuple = quant_mode __UpperCAmelCase : Union[str, Any] = force_dequant class __A (__magic_name__ ): @property def _snake_case ( self ): if self.task == "multiple-choice": __UpperCAmelCase : Optional[int] = {0: "batch", 1: "choice", 2: "sequence"} else: __UpperCAmelCase : Optional[int] = {0: "batch", 1: "sequence"} return OrderedDict( [ ("input_ids", dynamic_axis), ("attention_mask", dynamic_axis), ] )
10
1
'''simple docstring''' import json import os from datetime import date from pathlib import Path from tabulate import DataRow, TableFormat, tabulate _a : Dict = TableFormat( lineabove=None, linebelowheader=None, linebetweenrows=None, linebelow=None, headerrow=DataRow("", "|", "|"), datarow=DataRow("", "|", "|"), padding=1, with_header_hide=None, ) _a : Union[str, Any] = [] _a : str = [] _a : List[Any] = {"type": "section", "text": {"type": "plain_text", "text": "No failed tests! 🤗", "emoji": True}} _a : int = [ { "type": "header", "text": { "type": "plain_text", "text": f"""🤗 Accelerate nightly {os.environ.get('TEST_TYPE', '')} test results""", "emoji": True, }, } ] _a : str = 0 for log in Path().glob("*.log"): _a : Union[str, Any] = 0 with open(log, "r") as f: for line in f: _a : Tuple = json.loads(line) if line.get("nodeid", "") != "": _a : str = line["nodeid"] if line.get("duration", None) is not None: _a : Tuple = f"""{line['duration']:.4f}""" if line.get("outcome", "") == "failed": section_num_failed += 1 failed.append([test, duration, log.name.split("_")[0]]) total_num_failed += 1 group_info.append([str(log), section_num_failed, failed]) _a : Optional[int] = [] log.unlink() _a : Tuple = "" _a : str = [] if total_num_failed > 0: for name, num_failed, failed_tests in group_info: if num_failed > 0: if num_failed == 1: message += f"*{name[1:]}: {num_failed} failed test*\n" else: message += f"*{name[1:]}: {num_failed} failed tests*\n" _a : Tuple = [] _a : Dict = {} for test in failed_tests: _a : Union[str, Any] = test[0].split("::") _a : Any = data[0].split("/")[-1] if data[0] not in filesafailed: _a : Optional[Any] = [data[1:]] else: filesafailed[data[0]] += [data[1:]] failed_table.append(data) _a : Optional[Any] = [test[0] for test in failed_table] _a : str = list(set(files)) # Count number of instances in failed_tests _a : int = [] for file in individual_files: table.append([file, len(filesafailed[file])]) _a : int = tabulate( table, headers=["Test Location", "Num Failed"], tablefmt=hf_table_format, stralign="right", ) message += f"\n```\n{failed_table}\n```" all_filesafailed.append(filesafailed) if len(message) > 3000: _a : Tuple = "Too many failed tests, please see the full report in the Action results." _a : Dict = len(err) + 10 _a : Optional[Any] = message[: 3000 - offset] + f"""\n...\n```\n{err}""" print(f"""### {message}""") else: _a : Optional[Any] = "No failed tests! 🤗" print(f"""## {message}""") payload.append(no_error_payload) if os.environ.get("TEST_TYPE", "") != "": from slack_sdk import WebClient _a : str = WebClient(token=os.environ["SLACK_API_TOKEN"]) if message != "No failed tests! 🤗": _a : Union[str, Any] = { "type": "section", "text": { "type": "mrkdwn", "text": message, }, } payload.append(md_report) _a : Dict = { "type": "section", "text": { "type": "mrkdwn", "text": "*For more details:*", }, "accessory": { "type": "button", "text": { "type": "plain_text", "text": "Check Action results", "emoji": True, }, "url": f"""https://github.com/{os.environ['GITHUB_REPOSITORY']}/actions/runs/{os.environ['GITHUB_RUN_ID']}""", }, } payload.append(action_button) _a : Tuple = { "type": "context", "elements": [ { "type": "plain_text", "text": f"""Nightly {os.environ.get('TEST_TYPE')} test results for {date.today()}""", } ], } payload.append(date_report) _a : Optional[Any] = client.chat_postMessage(channel="#accelerate-ci-daily", text=message, blocks=payload) _a : int = response.data["ts"] for failed_file in all_filesafailed: for test_location, test_failures in failed_file.items(): # Keep only the first instance of the test name _a : List[Any] = "" for i, row in enumerate(test_failures): if row[0] != test_class: _a : Union[str, Any] = row[0] else: _a : List[Any] = "" _a : Union[str, Any] = { "type": "section", "text": { "type": "mrkdwn", "text": f"""Test location: {test_location}\n```\n{tabulate(test_failures, headers=['Class', 'Test'], tablefmt=hf_table_format, stralign='right')}\n```""", }, } client.chat_postMessage( channel="#accelerate-ci-daily", thread_ts=ts, blocks=[payload], )
10
'''simple docstring''' from transformers import HfArgumentParser, TensorFlowBenchmark, TensorFlowBenchmarkArguments def _lowercase ( ) -> Dict: """simple docstring""" __UpperCAmelCase : str = HfArgumentParser(lowerCamelCase__ ) __UpperCAmelCase : Optional[Any] = parser.parse_args_into_dataclasses()[0] __UpperCAmelCase : Any = TensorFlowBenchmark(args=lowerCamelCase__ ) try: __UpperCAmelCase : List[Any] = parser.parse_args_into_dataclasses()[0] except ValueError as e: __UpperCAmelCase : str = "Arg --no_{0} is no longer used, please use --no-{0} instead." __UpperCAmelCase : Tuple = " ".join(str(lowerCamelCase__ ).split(" " )[:-1] ) __UpperCAmelCase : Any = "" __UpperCAmelCase : List[Any] = eval(str(lowerCamelCase__ ).split(" " )[-1] ) __UpperCAmelCase : Optional[int] = [] for arg in depreciated_args: # arg[2:] removes '--' if arg[2:] in TensorFlowBenchmark.deprecated_args: # arg[5:] removes '--no_' full_error_msg += arg_error_msg.format(arg[5:] ) else: wrong_args.append(lowerCamelCase__ ) if len(lowerCamelCase__ ) > 0: __UpperCAmelCase : Union[str, Any] = full_error_msg + begin_error_msg + str(lowerCamelCase__ ) raise ValueError(lowerCamelCase__ ) benchmark.run() if __name__ == "__main__": main()
10
1
'''simple docstring''' import gc import random import unittest import numpy as np import torch from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer import diffusers from diffusers import ( AutoencoderKL, EulerDiscreteScheduler, StableDiffusionLatentUpscalePipeline, StableDiffusionPipeline, UNetaDConditionModel, ) from diffusers.schedulers import KarrasDiffusionSchedulers from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu from ..pipeline_params import TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin enable_full_determinism() def _lowercase ( lowerCamelCase__ ) -> Union[str, Any]: """simple docstring""" __UpperCAmelCase : Dict = [tensor.shape for tensor in tensor_list] return all(shape == shapes[0] for shape in shapes[1:] ) class __A (__magic_name__ , __magic_name__ , __magic_name__ , unittest.TestCase ): snake_case :Union[str, Any] = StableDiffusionLatentUpscalePipeline snake_case :Optional[int] = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - { "height", "width", "cross_attention_kwargs", "negative_prompt_embeds", "prompt_embeds", } snake_case :List[str] = PipelineTesterMixin.required_optional_params - {"num_images_per_prompt"} snake_case :Optional[Any] = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS snake_case :Optional[Any] = frozenset( [] ) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess snake_case :Any = frozenset([] ) snake_case :Optional[int] = True @property def _snake_case ( self ): __UpperCAmelCase : Optional[int] = 1 __UpperCAmelCase : Dict = 4 __UpperCAmelCase : List[str] = (16, 16) __UpperCAmelCase : Dict = floats_tensor((batch_size, num_channels) + sizes , rng=random.Random(0 ) ).to(UpperCamelCase_ ) return image def _snake_case ( self ): torch.manual_seed(0 ) __UpperCAmelCase : List[str] = UNetaDConditionModel( act_fn="gelu" , attention_head_dim=8 , norm_num_groups=UpperCamelCase_ , block_out_channels=[32, 32, 64, 64] , time_cond_proj_dim=1_60 , conv_in_kernel=1 , conv_out_kernel=1 , cross_attention_dim=32 , down_block_types=( "KDownBlock2D", "KCrossAttnDownBlock2D", "KCrossAttnDownBlock2D", "KCrossAttnDownBlock2D", ) , in_channels=8 , mid_block_type=UpperCamelCase_ , only_cross_attention=UpperCamelCase_ , out_channels=5 , resnet_time_scale_shift="scale_shift" , time_embedding_type="fourier" , timestep_post_act="gelu" , up_block_types=("KCrossAttnUpBlock2D", "KCrossAttnUpBlock2D", "KCrossAttnUpBlock2D", "KUpBlock2D") , ) __UpperCAmelCase : int = AutoencoderKL( block_out_channels=[32, 32, 64, 64] , in_channels=3 , out_channels=3 , down_block_types=[ "DownEncoderBlock2D", "DownEncoderBlock2D", "DownEncoderBlock2D", "DownEncoderBlock2D", ] , up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D"] , latent_channels=4 , ) __UpperCAmelCase : Optional[int] = EulerDiscreteScheduler(prediction_type="sample" ) __UpperCAmelCase : int = CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , hidden_act="quick_gelu" , projection_dim=5_12 , ) __UpperCAmelCase : List[str] = CLIPTextModel(UpperCamelCase_ ) __UpperCAmelCase : Tuple = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" ) __UpperCAmelCase : Union[str, Any] = { "unet": model.eval(), "vae": vae.eval(), "scheduler": scheduler, "text_encoder": text_encoder, "tokenizer": tokenizer, } return components def _snake_case ( self , UpperCamelCase_ , UpperCamelCase_=0 ): if str(UpperCamelCase_ ).startswith("mps" ): __UpperCAmelCase : str = torch.manual_seed(UpperCamelCase_ ) else: __UpperCAmelCase : Optional[int] = torch.Generator(device=UpperCamelCase_ ).manual_seed(UpperCamelCase_ ) __UpperCAmelCase : Any = { "prompt": "A painting of a squirrel eating a burger", "image": self.dummy_image.cpu(), "generator": generator, "num_inference_steps": 2, "output_type": "numpy", } return inputs def _snake_case ( self ): __UpperCAmelCase : List[str] = "cpu" __UpperCAmelCase : List[str] = self.get_dummy_components() __UpperCAmelCase : Tuple = self.pipeline_class(**UpperCamelCase_ ) pipe.to(UpperCamelCase_ ) pipe.set_progress_bar_config(disable=UpperCamelCase_ ) __UpperCAmelCase : Any = self.get_dummy_inputs(UpperCamelCase_ ) __UpperCAmelCase : int = pipe(**UpperCamelCase_ ).images __UpperCAmelCase : Any = image[0, -3:, -3:, -1] self.assertEqual(image.shape , (1, 2_56, 2_56, 3) ) __UpperCAmelCase : Tuple = np.array( [0.4_7_2_2_2_4_1_2, 0.4_1_9_2_1_6_3_3, 0.4_4_7_1_7_4_3_4, 0.4_6_8_7_4_1_9_2, 0.4_2_5_8_8_2_5_8, 0.4_6_1_5_0_7_2_6, 0.4_6_7_7_5_3_4, 0.4_5_5_8_3_8_3_2, 0.4_8_5_7_9_0_5_5] ) __UpperCAmelCase : List[str] = np.abs(image_slice.flatten() - expected_slice ).max() self.assertLessEqual(UpperCamelCase_ , 1E-3 ) def _snake_case ( self ): super().test_attention_slicing_forward_pass(expected_max_diff=7E-3 ) def _snake_case ( self ): super().test_cpu_offload_forward_pass(expected_max_diff=3E-3 ) def _snake_case ( self ): super().test_dict_tuple_outputs_equivalent(expected_max_difference=3E-3 ) def _snake_case ( self ): super().test_inference_batch_single_identical(expected_max_diff=7E-3 ) def _snake_case ( self ): super().test_pt_np_pil_outputs_equivalent(expected_max_diff=3E-3 ) def _snake_case ( self ): super().test_save_load_local(expected_max_difference=3E-3 ) def _snake_case ( self ): super().test_save_load_optional_components(expected_max_difference=3E-3 ) def _snake_case ( self ): __UpperCAmelCase : Dict = [ "DDIMScheduler", "DDPMScheduler", "PNDMScheduler", "HeunDiscreteScheduler", "EulerAncestralDiscreteScheduler", "KDPM2DiscreteScheduler", "KDPM2AncestralDiscreteScheduler", "DPMSolverSDEScheduler", ] __UpperCAmelCase : Tuple = self.get_dummy_components() __UpperCAmelCase : Union[str, Any] = self.pipeline_class(**UpperCamelCase_ ) # make sure that PNDM does not need warm-up pipe.scheduler.register_to_config(skip_prk_steps=UpperCamelCase_ ) pipe.to(UpperCamelCase_ ) pipe.set_progress_bar_config(disable=UpperCamelCase_ ) __UpperCAmelCase : Tuple = self.get_dummy_inputs(UpperCamelCase_ ) __UpperCAmelCase : List[str] = 2 __UpperCAmelCase : List[str] = [] for scheduler_enum in KarrasDiffusionSchedulers: if scheduler_enum.name in skip_schedulers: # no sigma schedulers are not supported # no schedulers continue __UpperCAmelCase : Optional[int] = getattr(UpperCamelCase_ , scheduler_enum.name ) __UpperCAmelCase : List[str] = scheduler_cls.from_config(pipe.scheduler.config ) __UpperCAmelCase : Optional[int] = pipe(**UpperCamelCase_ )[0] outputs.append(UpperCamelCase_ ) assert check_same_shape(UpperCamelCase_ ) @require_torch_gpu @slow class __A (unittest.TestCase ): def _snake_case ( self ): super().tearDown() gc.collect() torch.cuda.empty_cache() def _snake_case ( self ): __UpperCAmelCase : Optional[int] = torch.manual_seed(33 ) __UpperCAmelCase : str = StableDiffusionPipeline.from_pretrained("CompVis/stable-diffusion-v1-4" , torch_dtype=torch.floataa ) pipe.to("cuda" ) __UpperCAmelCase : Union[str, Any] = StableDiffusionLatentUpscalePipeline.from_pretrained( "stabilityai/sd-x2-latent-upscaler" , torch_dtype=torch.floataa ) upscaler.to("cuda" ) __UpperCAmelCase : Optional[int] = "a photo of an astronaut high resolution, unreal engine, ultra realistic" __UpperCAmelCase : Any = pipe(UpperCamelCase_ , generator=UpperCamelCase_ , output_type="latent" ).images __UpperCAmelCase : int = upscaler( prompt=UpperCamelCase_ , image=UpperCamelCase_ , num_inference_steps=20 , guidance_scale=0 , generator=UpperCamelCase_ , output_type="np" , ).images[0] __UpperCAmelCase : Optional[Any] = load_numpy( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/latent-upscaler/astronaut_1024.npy" ) assert np.abs((expected_image - image).mean() ) < 5E-2 def _snake_case ( self ): __UpperCAmelCase : List[Any] = torch.manual_seed(33 ) __UpperCAmelCase : Union[str, Any] = StableDiffusionLatentUpscalePipeline.from_pretrained( "stabilityai/sd-x2-latent-upscaler" , torch_dtype=torch.floataa ) upscaler.to("cuda" ) __UpperCAmelCase : Optional[Any] = "the temple of fire by Ross Tran and Gerardo Dottori, oil on canvas" __UpperCAmelCase : str = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/latent-upscaler/fire_temple_512.png" ) __UpperCAmelCase : Dict = upscaler( prompt=UpperCamelCase_ , image=UpperCamelCase_ , num_inference_steps=20 , guidance_scale=0 , generator=UpperCamelCase_ , output_type="np" , ).images[0] __UpperCAmelCase : Tuple = load_numpy( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/latent-upscaler/fire_temple_1024.npy" ) assert np.abs((expected_image - image).max() ) < 5E-2
10
'''simple docstring''' import gc import random import unittest import numpy as np import torch from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer import diffusers from diffusers import ( AutoencoderKL, EulerDiscreteScheduler, StableDiffusionLatentUpscalePipeline, StableDiffusionPipeline, UNetaDConditionModel, ) from diffusers.schedulers import KarrasDiffusionSchedulers from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu from ..pipeline_params import TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin enable_full_determinism() def _lowercase ( lowerCamelCase__ ) -> Union[str, Any]: """simple docstring""" __UpperCAmelCase : Dict = [tensor.shape for tensor in tensor_list] return all(shape == shapes[0] for shape in shapes[1:] ) class __A (__magic_name__ , __magic_name__ , __magic_name__ , unittest.TestCase ): snake_case :Union[str, Any] = StableDiffusionLatentUpscalePipeline snake_case :Optional[int] = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - { "height", "width", "cross_attention_kwargs", "negative_prompt_embeds", "prompt_embeds", } snake_case :List[str] = PipelineTesterMixin.required_optional_params - {"num_images_per_prompt"} snake_case :Optional[Any] = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS snake_case :Optional[Any] = frozenset( [] ) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess snake_case :Any = frozenset([] ) snake_case :Optional[int] = True @property def _snake_case ( self ): __UpperCAmelCase : Optional[int] = 1 __UpperCAmelCase : Dict = 4 __UpperCAmelCase : List[str] = (16, 16) __UpperCAmelCase : Dict = floats_tensor((batch_size, num_channels) + sizes , rng=random.Random(0 ) ).to(UpperCamelCase_ ) return image def _snake_case ( self ): torch.manual_seed(0 ) __UpperCAmelCase : List[str] = UNetaDConditionModel( act_fn="gelu" , attention_head_dim=8 , norm_num_groups=UpperCamelCase_ , block_out_channels=[32, 32, 64, 64] , time_cond_proj_dim=1_60 , conv_in_kernel=1 , conv_out_kernel=1 , cross_attention_dim=32 , down_block_types=( "KDownBlock2D", "KCrossAttnDownBlock2D", "KCrossAttnDownBlock2D", "KCrossAttnDownBlock2D", ) , in_channels=8 , mid_block_type=UpperCamelCase_ , only_cross_attention=UpperCamelCase_ , out_channels=5 , resnet_time_scale_shift="scale_shift" , time_embedding_type="fourier" , timestep_post_act="gelu" , up_block_types=("KCrossAttnUpBlock2D", "KCrossAttnUpBlock2D", "KCrossAttnUpBlock2D", "KUpBlock2D") , ) __UpperCAmelCase : int = AutoencoderKL( block_out_channels=[32, 32, 64, 64] , in_channels=3 , out_channels=3 , down_block_types=[ "DownEncoderBlock2D", "DownEncoderBlock2D", "DownEncoderBlock2D", "DownEncoderBlock2D", ] , up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D"] , latent_channels=4 , ) __UpperCAmelCase : Optional[int] = EulerDiscreteScheduler(prediction_type="sample" ) __UpperCAmelCase : int = CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , hidden_act="quick_gelu" , projection_dim=5_12 , ) __UpperCAmelCase : List[str] = CLIPTextModel(UpperCamelCase_ ) __UpperCAmelCase : Tuple = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" ) __UpperCAmelCase : Union[str, Any] = { "unet": model.eval(), "vae": vae.eval(), "scheduler": scheduler, "text_encoder": text_encoder, "tokenizer": tokenizer, } return components def _snake_case ( self , UpperCamelCase_ , UpperCamelCase_=0 ): if str(UpperCamelCase_ ).startswith("mps" ): __UpperCAmelCase : str = torch.manual_seed(UpperCamelCase_ ) else: __UpperCAmelCase : Optional[int] = torch.Generator(device=UpperCamelCase_ ).manual_seed(UpperCamelCase_ ) __UpperCAmelCase : Any = { "prompt": "A painting of a squirrel eating a burger", "image": self.dummy_image.cpu(), "generator": generator, "num_inference_steps": 2, "output_type": "numpy", } return inputs def _snake_case ( self ): __UpperCAmelCase : List[str] = "cpu" __UpperCAmelCase : List[str] = self.get_dummy_components() __UpperCAmelCase : Tuple = self.pipeline_class(**UpperCamelCase_ ) pipe.to(UpperCamelCase_ ) pipe.set_progress_bar_config(disable=UpperCamelCase_ ) __UpperCAmelCase : Any = self.get_dummy_inputs(UpperCamelCase_ ) __UpperCAmelCase : int = pipe(**UpperCamelCase_ ).images __UpperCAmelCase : Any = image[0, -3:, -3:, -1] self.assertEqual(image.shape , (1, 2_56, 2_56, 3) ) __UpperCAmelCase : Tuple = np.array( [0.4_7_2_2_2_4_1_2, 0.4_1_9_2_1_6_3_3, 0.4_4_7_1_7_4_3_4, 0.4_6_8_7_4_1_9_2, 0.4_2_5_8_8_2_5_8, 0.4_6_1_5_0_7_2_6, 0.4_6_7_7_5_3_4, 0.4_5_5_8_3_8_3_2, 0.4_8_5_7_9_0_5_5] ) __UpperCAmelCase : List[str] = np.abs(image_slice.flatten() - expected_slice ).max() self.assertLessEqual(UpperCamelCase_ , 1E-3 ) def _snake_case ( self ): super().test_attention_slicing_forward_pass(expected_max_diff=7E-3 ) def _snake_case ( self ): super().test_cpu_offload_forward_pass(expected_max_diff=3E-3 ) def _snake_case ( self ): super().test_dict_tuple_outputs_equivalent(expected_max_difference=3E-3 ) def _snake_case ( self ): super().test_inference_batch_single_identical(expected_max_diff=7E-3 ) def _snake_case ( self ): super().test_pt_np_pil_outputs_equivalent(expected_max_diff=3E-3 ) def _snake_case ( self ): super().test_save_load_local(expected_max_difference=3E-3 ) def _snake_case ( self ): super().test_save_load_optional_components(expected_max_difference=3E-3 ) def _snake_case ( self ): __UpperCAmelCase : Dict = [ "DDIMScheduler", "DDPMScheduler", "PNDMScheduler", "HeunDiscreteScheduler", "EulerAncestralDiscreteScheduler", "KDPM2DiscreteScheduler", "KDPM2AncestralDiscreteScheduler", "DPMSolverSDEScheduler", ] __UpperCAmelCase : Tuple = self.get_dummy_components() __UpperCAmelCase : Union[str, Any] = self.pipeline_class(**UpperCamelCase_ ) # make sure that PNDM does not need warm-up pipe.scheduler.register_to_config(skip_prk_steps=UpperCamelCase_ ) pipe.to(UpperCamelCase_ ) pipe.set_progress_bar_config(disable=UpperCamelCase_ ) __UpperCAmelCase : Tuple = self.get_dummy_inputs(UpperCamelCase_ ) __UpperCAmelCase : List[str] = 2 __UpperCAmelCase : List[str] = [] for scheduler_enum in KarrasDiffusionSchedulers: if scheduler_enum.name in skip_schedulers: # no sigma schedulers are not supported # no schedulers continue __UpperCAmelCase : Optional[int] = getattr(UpperCamelCase_ , scheduler_enum.name ) __UpperCAmelCase : List[str] = scheduler_cls.from_config(pipe.scheduler.config ) __UpperCAmelCase : Optional[int] = pipe(**UpperCamelCase_ )[0] outputs.append(UpperCamelCase_ ) assert check_same_shape(UpperCamelCase_ ) @require_torch_gpu @slow class __A (unittest.TestCase ): def _snake_case ( self ): super().tearDown() gc.collect() torch.cuda.empty_cache() def _snake_case ( self ): __UpperCAmelCase : Optional[int] = torch.manual_seed(33 ) __UpperCAmelCase : str = StableDiffusionPipeline.from_pretrained("CompVis/stable-diffusion-v1-4" , torch_dtype=torch.floataa ) pipe.to("cuda" ) __UpperCAmelCase : Union[str, Any] = StableDiffusionLatentUpscalePipeline.from_pretrained( "stabilityai/sd-x2-latent-upscaler" , torch_dtype=torch.floataa ) upscaler.to("cuda" ) __UpperCAmelCase : Optional[int] = "a photo of an astronaut high resolution, unreal engine, ultra realistic" __UpperCAmelCase : Any = pipe(UpperCamelCase_ , generator=UpperCamelCase_ , output_type="latent" ).images __UpperCAmelCase : int = upscaler( prompt=UpperCamelCase_ , image=UpperCamelCase_ , num_inference_steps=20 , guidance_scale=0 , generator=UpperCamelCase_ , output_type="np" , ).images[0] __UpperCAmelCase : Optional[Any] = load_numpy( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/latent-upscaler/astronaut_1024.npy" ) assert np.abs((expected_image - image).mean() ) < 5E-2 def _snake_case ( self ): __UpperCAmelCase : List[Any] = torch.manual_seed(33 ) __UpperCAmelCase : Union[str, Any] = StableDiffusionLatentUpscalePipeline.from_pretrained( "stabilityai/sd-x2-latent-upscaler" , torch_dtype=torch.floataa ) upscaler.to("cuda" ) __UpperCAmelCase : Optional[Any] = "the temple of fire by Ross Tran and Gerardo Dottori, oil on canvas" __UpperCAmelCase : str = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/latent-upscaler/fire_temple_512.png" ) __UpperCAmelCase : Dict = upscaler( prompt=UpperCamelCase_ , image=UpperCamelCase_ , num_inference_steps=20 , guidance_scale=0 , generator=UpperCamelCase_ , output_type="np" , ).images[0] __UpperCAmelCase : Tuple = load_numpy( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/latent-upscaler/fire_temple_1024.npy" ) assert np.abs((expected_image - image).max() ) < 5E-2
10
1
'''simple docstring''' from __future__ import annotations import math def _lowercase ( lowerCamelCase__ ) -> bool: """simple docstring""" if 1 < number < 4: # 2 and 3 are primes return True elif number < 2 or number % 2 == 0 or number % 3 == 0: # Negatives, 0, 1, all even numbers, all multiples of 3 are not primes return False # All primes number are in format of 6k +/- 1 for i in range(5 , int(math.sqrt(lowerCamelCase__ ) + 1 ) , 6 ): if number % i == 0 or number % (i + 2) == 0: return False return True _a : Optional[int] = [num for num in range(3, 100001, 2) if not is_prime(num)] def _lowercase ( lowerCamelCase__ ) -> list[int]: """simple docstring""" if not isinstance(lowerCamelCase__ , lowerCamelCase__ ): raise ValueError("n must be an integer" ) if n <= 0: raise ValueError("n must be >= 0" ) __UpperCAmelCase : int = [] for num in range(len(lowerCamelCase__ ) ): __UpperCAmelCase : List[Any] = 0 while 2 * i * i <= odd_composites[num]: __UpperCAmelCase : Tuple = odd_composites[num] - 2 * i * i if is_prime(lowerCamelCase__ ): break i += 1 else: list_nums.append(odd_composites[num] ) if len(lowerCamelCase__ ) == n: return list_nums return [] def _lowercase ( ) -> int: """simple docstring""" return compute_nums(1 )[0] if __name__ == "__main__": print(f"""{solution() = }""")
10
'''simple docstring''' # Lint as: python3 import sys from collections.abc import Mapping from typing import TYPE_CHECKING import numpy as np import pyarrow as pa from .. import config from ..utils.py_utils import map_nested from .formatting import TensorFormatter if TYPE_CHECKING: import torch class __A (TensorFormatter[Mapping, "torch.Tensor", Mapping] ): def __init__( self , UpperCamelCase_=None , **UpperCamelCase_ ): super().__init__(features=UpperCamelCase_ ) __UpperCAmelCase : Union[str, Any] = torch_tensor_kwargs import torch # noqa import torch at initialization def _snake_case ( self , UpperCamelCase_ ): import torch if isinstance(UpperCamelCase_ , UpperCamelCase_ ) and column: if all( isinstance(UpperCamelCase_ , torch.Tensor ) and x.shape == column[0].shape and x.dtype == column[0].dtype for x in column ): return torch.stack(UpperCamelCase_ ) return column def _snake_case ( self , UpperCamelCase_ ): import torch if isinstance(UpperCamelCase_ , (str, bytes, type(UpperCamelCase_ )) ): return value elif isinstance(UpperCamelCase_ , (np.character, np.ndarray) ) and np.issubdtype(value.dtype , np.character ): return value.tolist() __UpperCAmelCase : int = {} if isinstance(UpperCamelCase_ , (np.number, np.ndarray) ) and np.issubdtype(value.dtype , np.integer ): __UpperCAmelCase : Optional[int] = {"dtype": torch.intaa} elif isinstance(UpperCamelCase_ , (np.number, np.ndarray) ) and np.issubdtype(value.dtype , np.floating ): __UpperCAmelCase : str = {"dtype": torch.floataa} elif config.PIL_AVAILABLE and "PIL" in sys.modules: import PIL.Image if isinstance(UpperCamelCase_ , PIL.Image.Image ): __UpperCAmelCase : str = np.asarray(UpperCamelCase_ ) return torch.tensor(UpperCamelCase_ , **{**default_dtype, **self.torch_tensor_kwargs} ) def _snake_case ( self , UpperCamelCase_ ): import torch # support for torch, tf, jax etc. if hasattr(UpperCamelCase_ , "__array__" ) and not isinstance(UpperCamelCase_ , torch.Tensor ): __UpperCAmelCase : Dict = data_struct.__array__() # support for nested types like struct of list of struct if isinstance(UpperCamelCase_ , np.ndarray ): if data_struct.dtype == object: # torch tensors cannot be instantied from an array of objects return self._consolidate([self.recursive_tensorize(UpperCamelCase_ ) for substruct in data_struct] ) elif isinstance(UpperCamelCase_ , (list, tuple) ): return self._consolidate([self.recursive_tensorize(UpperCamelCase_ ) for substruct in data_struct] ) return self._tensorize(UpperCamelCase_ ) def _snake_case ( self , UpperCamelCase_ ): return map_nested(self._recursive_tensorize , UpperCamelCase_ , map_list=UpperCamelCase_ ) def _snake_case ( self , UpperCamelCase_ ): __UpperCAmelCase : List[str] = self.numpy_arrow_extractor().extract_row(UpperCamelCase_ ) __UpperCAmelCase : Union[str, Any] = self.python_features_decoder.decode_row(UpperCamelCase_ ) return self.recursive_tensorize(UpperCamelCase_ ) def _snake_case ( self , UpperCamelCase_ ): __UpperCAmelCase : Union[str, Any] = self.numpy_arrow_extractor().extract_column(UpperCamelCase_ ) __UpperCAmelCase : Optional[Any] = self.python_features_decoder.decode_column(UpperCamelCase_ , pa_table.column_names[0] ) __UpperCAmelCase : List[Any] = self.recursive_tensorize(UpperCamelCase_ ) __UpperCAmelCase : List[str] = self._consolidate(UpperCamelCase_ ) return column def _snake_case ( self , UpperCamelCase_ ): __UpperCAmelCase : int = self.numpy_arrow_extractor().extract_batch(UpperCamelCase_ ) __UpperCAmelCase : Any = self.python_features_decoder.decode_batch(UpperCamelCase_ ) __UpperCAmelCase : Optional[int] = self.recursive_tensorize(UpperCamelCase_ ) for column_name in batch: __UpperCAmelCase : Tuple = self._consolidate(batch[column_name] ) return batch
10
1
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available, is_tf_available, is_tokenizers_available, is_torch_available, ) _a : Optional[Any] = { "configuration_rembert": ["REMBERT_PRETRAINED_CONFIG_ARCHIVE_MAP", "RemBertConfig", "RemBertOnnxConfig"] } try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _a : Optional[Any] = ["RemBertTokenizer"] try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _a : List[str] = ["RemBertTokenizerFast"] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _a : int = [ "REMBERT_PRETRAINED_MODEL_ARCHIVE_LIST", "RemBertForCausalLM", "RemBertForMaskedLM", "RemBertForMultipleChoice", "RemBertForQuestionAnswering", "RemBertForSequenceClassification", "RemBertForTokenClassification", "RemBertLayer", "RemBertModel", "RemBertPreTrainedModel", "load_tf_weights_in_rembert", ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _a : Any = [ "TF_REMBERT_PRETRAINED_MODEL_ARCHIVE_LIST", "TFRemBertForCausalLM", "TFRemBertForMaskedLM", "TFRemBertForMultipleChoice", "TFRemBertForQuestionAnswering", "TFRemBertForSequenceClassification", "TFRemBertForTokenClassification", "TFRemBertLayer", "TFRemBertModel", "TFRemBertPreTrainedModel", ] if TYPE_CHECKING: from .configuration_rembert import REMBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, RemBertConfig, RemBertOnnxConfig try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_rembert import RemBertTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_rembert_fast import RemBertTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_rembert import ( REMBERT_PRETRAINED_MODEL_ARCHIVE_LIST, RemBertForCausalLM, RemBertForMaskedLM, RemBertForMultipleChoice, RemBertForQuestionAnswering, RemBertForSequenceClassification, RemBertForTokenClassification, RemBertLayer, RemBertModel, RemBertPreTrainedModel, load_tf_weights_in_rembert, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_rembert import ( TF_REMBERT_PRETRAINED_MODEL_ARCHIVE_LIST, TFRemBertForCausalLM, TFRemBertForMaskedLM, TFRemBertForMultipleChoice, TFRemBertForQuestionAnswering, TFRemBertForSequenceClassification, TFRemBertForTokenClassification, TFRemBertLayer, TFRemBertModel, TFRemBertPreTrainedModel, ) else: import sys _a : Tuple = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
10
'''simple docstring''' def _lowercase ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> bool: """simple docstring""" return not any( neighbour == 1 and colored_vertices[i] == color for i, neighbour in enumerate(lowerCamelCase__ ) ) def _lowercase ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> bool: """simple docstring""" if index == len(lowerCamelCase__ ): return True # Recursive Step for i in range(lowerCamelCase__ ): if valid_coloring(graph[index] , lowerCamelCase__ , lowerCamelCase__ ): # Color current vertex __UpperCAmelCase : List[str] = i # Validate coloring if util_color(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , index + 1 ): return True # Backtrack __UpperCAmelCase : Any = -1 return False def _lowercase ( lowerCamelCase__ , lowerCamelCase__ ) -> list[int]: """simple docstring""" __UpperCAmelCase : Optional[Any] = [-1] * len(lowerCamelCase__ ) if util_color(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , 0 ): return colored_vertices return []
10
1
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available, is_tokenizers_available, is_torch_available, ) _a : Dict = {"configuration_reformer": ["REFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP", "ReformerConfig"]} try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _a : Dict = ["ReformerTokenizer"] try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _a : List[Any] = ["ReformerTokenizerFast"] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _a : int = [ "REFORMER_PRETRAINED_MODEL_ARCHIVE_LIST", "ReformerAttention", "ReformerForMaskedLM", "ReformerForQuestionAnswering", "ReformerForSequenceClassification", "ReformerLayer", "ReformerModel", "ReformerModelWithLMHead", "ReformerPreTrainedModel", ] if TYPE_CHECKING: from .configuration_reformer import REFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, ReformerConfig try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_reformer import ReformerTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_reformer_fast import ReformerTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_reformer import ( REFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, ReformerAttention, ReformerForMaskedLM, ReformerForQuestionAnswering, ReformerForSequenceClassification, ReformerLayer, ReformerModel, ReformerModelWithLMHead, ReformerPreTrainedModel, ) else: import sys _a : Optional[int] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
10
'''simple docstring''' def _lowercase ( lowerCamelCase__ , lowerCamelCase__ ) -> int: """simple docstring""" return number | (1 << position) def _lowercase ( lowerCamelCase__ , lowerCamelCase__ ) -> int: """simple docstring""" return number & ~(1 << position) def _lowercase ( lowerCamelCase__ , lowerCamelCase__ ) -> int: """simple docstring""" return number ^ (1 << position) def _lowercase ( lowerCamelCase__ , lowerCamelCase__ ) -> bool: """simple docstring""" return ((number >> position) & 1) == 1 def _lowercase ( lowerCamelCase__ , lowerCamelCase__ ) -> int: """simple docstring""" return int((number & (1 << position)) != 0 ) if __name__ == "__main__": import doctest doctest.testmod()
10
1
'''simple docstring''' import argparse from typing import Dict import tensorflow as tf import torch from tqdm import tqdm from transformers import BigBirdPegasusConfig, BigBirdPegasusForConditionalGeneration _a : str = [ # tf -> hf ("/", "."), ("layer_", "layers."), ("kernel", "weight"), ("beta", "bias"), ("gamma", "weight"), ("pegasus", "model"), ] _a : List[Any] = [ (".output.dense", ".fc2"), ("intermediate.LayerNorm", "final_layer_norm"), ("intermediate.dense", "fc1"), ] _a : int = ( INIT_COMMON + [ ("attention.self.LayerNorm", "self_attn_layer_norm"), ("attention.output.dense", "self_attn.out_proj"), ("attention.self", "self_attn"), ("attention.encdec.LayerNorm", "encoder_attn_layer_norm"), ("attention.encdec_output.dense", "encoder_attn.out_proj"), ("attention.encdec", "encoder_attn"), ("key", "k_proj"), ("value", "v_proj"), ("query", "q_proj"), ("decoder.LayerNorm", "decoder.layernorm_embedding"), ] + END_COMMON ) _a : str = ( INIT_COMMON + [ ("embeddings.word_embeddings", "shared.weight"), ("embeddings.position_embeddings", "embed_positions.weight"), ("attention.self.LayerNorm", "self_attn_layer_norm"), ("attention.output.dense", "self_attn.output"), ("attention.self", "self_attn.self"), ("encoder.LayerNorm", "encoder.layernorm_embedding"), ] + END_COMMON ) _a : List[str] = [ "encdec/key/bias", "encdec/query/bias", "encdec/value/bias", "self/key/bias", "self/query/bias", "self/value/bias", "encdec_output/dense/bias", "attention/output/dense/bias", ] def _lowercase ( lowerCamelCase__ , lowerCamelCase__ ) -> Any: """simple docstring""" for tf_name, hf_name in patterns: __UpperCAmelCase : Union[str, Any] = k.replace(lowerCamelCase__ , lowerCamelCase__ ) return k def _lowercase ( lowerCamelCase__ , lowerCamelCase__ ) -> BigBirdPegasusForConditionalGeneration: """simple docstring""" __UpperCAmelCase : List[Any] = BigBirdPegasusConfig(**lowerCamelCase__ ) __UpperCAmelCase : List[Any] = BigBirdPegasusForConditionalGeneration(lowerCamelCase__ ) __UpperCAmelCase : Union[str, Any] = torch_model.state_dict() __UpperCAmelCase : Dict = {} # separating decoder weights __UpperCAmelCase : Optional[Any] = {k: tf_weights[k] for k in tf_weights if k.startswith("pegasus/decoder" )} __UpperCAmelCase : Any = {k: tf_weights[k] for k in tf_weights if not k.startswith("pegasus/decoder" )} for k, v in tqdm(decoder_weights.items() , "tf -> hf conversion" ): __UpperCAmelCase : Union[str, Any] = [k.endswith(lowerCamelCase__ ) for ending in KEYS_TO_IGNORE] if any(lowerCamelCase__ ): continue __UpperCAmelCase : List[str] = DECODER_PATTERNS __UpperCAmelCase : Optional[Any] = rename_state_dict_key(lowerCamelCase__ , lowerCamelCase__ ) if new_k not in state_dict: raise ValueError(f"""could not find new key {new_k} in state dict. (converted from {k})""" ) if any(True if i in k else False for i in ["dense", "query", "key", "value"] ): __UpperCAmelCase : List[str] = v.T __UpperCAmelCase : int = torch.from_numpy(lowerCamelCase__ ) assert v.shape == state_dict[new_k].shape, f"""{new_k}, {k}, {v.shape}, {state_dict[new_k].shape}""" for k, v in tqdm(remaining_weights.items() , "tf -> hf conversion" ): __UpperCAmelCase : Optional[int] = [k.endswith(lowerCamelCase__ ) for ending in KEYS_TO_IGNORE] if any(lowerCamelCase__ ): continue __UpperCAmelCase : Optional[Any] = REMAINING_PATTERNS __UpperCAmelCase : Any = rename_state_dict_key(lowerCamelCase__ , lowerCamelCase__ ) if new_k not in state_dict and k != "pegasus/embeddings/position_embeddings": raise ValueError(f"""could not find new key {new_k} in state dict. (converted from {k})""" ) if any(True if i in k else False for i in ["dense", "query", "key", "value"] ): __UpperCAmelCase : Optional[Any] = v.T __UpperCAmelCase : int = torch.from_numpy(lowerCamelCase__ ) if k != "pegasus/embeddings/position_embeddings": assert v.shape == state_dict[new_k].shape, f"""{new_k}, {k}, {v.shape}, {state_dict[new_k].shape}""" __UpperCAmelCase : List[str] = mapping["model.embed_positions.weight"] __UpperCAmelCase : Optional[Any] = mapping.pop("model.embed_positions.weight" ) __UpperCAmelCase , __UpperCAmelCase : Union[str, Any] = torch_model.load_state_dict(lowerCamelCase__ , strict=lowerCamelCase__ ) __UpperCAmelCase : str = [ k for k in missing if k not in [ "final_logits_bias", "model.encoder.embed_tokens.weight", "model.decoder.embed_tokens.weight", "lm_head.weight", ] ] assert unexpected_missing == [], f"""no matches found for the following torch keys {unexpected_missing}""" assert extra == [], f"""no matches found for the following tf keys {extra}""" return torch_model def _lowercase ( lowerCamelCase__ ) -> Dict: """simple docstring""" __UpperCAmelCase : Tuple = tf.train.list_variables(lowerCamelCase__ ) __UpperCAmelCase : Tuple = {} __UpperCAmelCase : Tuple = ["global_step"] for name, shape in tqdm(lowerCamelCase__ , desc="converting tf checkpoint to dict" ): __UpperCAmelCase : Any = any(pat in name for pat in ignore_name ) if skip_key: continue __UpperCAmelCase : str = tf.train.load_variable(lowerCamelCase__ , lowerCamelCase__ ) __UpperCAmelCase : Tuple = array return tf_weights def _lowercase ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> Any: """simple docstring""" __UpperCAmelCase : Optional[Any] = get_tf_weights_as_numpy(lowerCamelCase__ ) __UpperCAmelCase : Tuple = convert_bigbird_pegasus(lowerCamelCase__ , lowerCamelCase__ ) torch_model.save_pretrained(lowerCamelCase__ ) if __name__ == "__main__": _a : Optional[int] = argparse.ArgumentParser() parser.add_argument("--tf_ckpt_path", type=str, help="passed to tf.train.list_variables") parser.add_argument("--save_dir", default=None, type=str, help="Path to the output PyTorch model.") _a : List[Any] = parser.parse_args() _a : Union[str, Any] = {} convert_bigbird_pegasus_ckpt_to_pytorch(args.tf_ckpt_path, args.save_dir, config_update=config_update)
10
'''simple docstring''' from collections import Counter import numpy as np from sklearn import datasets from sklearn.model_selection import train_test_split _a : str = datasets.load_iris() _a : List[Any] = np.array(data["data"]) _a : Optional[Any] = np.array(data["target"]) _a : Dict = data["target_names"] _a , _a , _a , _a : Any = train_test_split(X, y) def _lowercase ( lowerCamelCase__ , lowerCamelCase__ ) -> Tuple: """simple docstring""" return np.linalg.norm(np.array(lowerCamelCase__ ) - np.array(lowerCamelCase__ ) ) def _lowercase ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__=5 ) -> int: """simple docstring""" __UpperCAmelCase : List[Any] = zip(lowerCamelCase__ , lowerCamelCase__ ) # List of distances of all points from the point to be classified __UpperCAmelCase : int = [] for data_point in data: __UpperCAmelCase : Optional[Any] = euclidean_distance(data_point[0] , lowerCamelCase__ ) distances.append((distance, data_point[1]) ) # Choosing 'k' points with the least distances. __UpperCAmelCase : Union[str, Any] = [i[1] for i in sorted(lowerCamelCase__ )[:k]] # Most commonly occurring class among them # is the class into which the point is classified __UpperCAmelCase : Dict = Counter(lowerCamelCase__ ).most_common(1 )[0][0] return classes[result] if __name__ == "__main__": print(classifier(X_train, y_train, classes, [4.4, 3.1, 1.3, 1.4]))
10
1
'''simple docstring''' from __future__ import annotations import math import random from typing import Any class __A : def __init__( self ): __UpperCAmelCase : list[Any] = [] __UpperCAmelCase : int = 0 __UpperCAmelCase : int = 0 def _snake_case ( self ): return self.head == self.tail def _snake_case ( self , UpperCamelCase_ ): self.data.append(UpperCamelCase_ ) __UpperCAmelCase : str = self.tail + 1 def _snake_case ( self ): __UpperCAmelCase : str = self.data[self.head] __UpperCAmelCase : Tuple = self.head + 1 return ret def _snake_case ( self ): return self.tail - self.head def _snake_case ( self ): print(self.data ) print("**************" ) print(self.data[self.head : self.tail] ) class __A : def __init__( self , UpperCamelCase_ ): __UpperCAmelCase : int = data __UpperCAmelCase : MyNode | None = None __UpperCAmelCase : MyNode | None = None __UpperCAmelCase : int = 1 def _snake_case ( self ): return self.data def _snake_case ( self ): return self.left def _snake_case ( self ): return self.right def _snake_case ( self ): return self.height def _snake_case ( self , UpperCamelCase_ ): __UpperCAmelCase : Tuple = data def _snake_case ( self , UpperCamelCase_ ): __UpperCAmelCase : List[Any] = node def _snake_case ( self , UpperCamelCase_ ): __UpperCAmelCase : int = node def _snake_case ( self , UpperCamelCase_ ): __UpperCAmelCase : int = height def _lowercase ( lowerCamelCase__ ) -> int: """simple docstring""" if node is None: return 0 return node.get_height() def _lowercase ( lowerCamelCase__ , lowerCamelCase__ ) -> int: """simple docstring""" if a > b: return a return b def _lowercase ( lowerCamelCase__ ) -> MyNode: """simple docstring""" print("left rotation node:" , node.get_data() ) __UpperCAmelCase : Union[str, Any] = node.get_left() assert ret is not None node.set_left(ret.get_right() ) ret.set_right(lowerCamelCase__ ) __UpperCAmelCase : Optional[int] = my_max(get_height(node.get_right() ) , get_height(node.get_left() ) ) + 1 node.set_height(lowerCamelCase__ ) __UpperCAmelCase : Optional[int] = my_max(get_height(ret.get_right() ) , get_height(ret.get_left() ) ) + 1 ret.set_height(lowerCamelCase__ ) return ret def _lowercase ( lowerCamelCase__ ) -> MyNode: """simple docstring""" print("right rotation node:" , node.get_data() ) __UpperCAmelCase : Union[str, Any] = node.get_right() assert ret is not None node.set_right(ret.get_left() ) ret.set_left(lowerCamelCase__ ) __UpperCAmelCase : Tuple = my_max(get_height(node.get_right() ) , get_height(node.get_left() ) ) + 1 node.set_height(lowerCamelCase__ ) __UpperCAmelCase : Optional[Any] = my_max(get_height(ret.get_right() ) , get_height(ret.get_left() ) ) + 1 ret.set_height(lowerCamelCase__ ) return ret def _lowercase ( lowerCamelCase__ ) -> MyNode: """simple docstring""" __UpperCAmelCase : Dict = node.get_left() assert left_child is not None node.set_left(left_rotation(lowerCamelCase__ ) ) return right_rotation(lowerCamelCase__ ) def _lowercase ( lowerCamelCase__ ) -> MyNode: """simple docstring""" __UpperCAmelCase : List[str] = node.get_right() assert right_child is not None node.set_right(right_rotation(lowerCamelCase__ ) ) return left_rotation(lowerCamelCase__ ) def _lowercase ( lowerCamelCase__ , lowerCamelCase__ ) -> MyNode | None: """simple docstring""" if node is None: return MyNode(lowerCamelCase__ ) if data < node.get_data(): node.set_left(insert_node(node.get_left() , lowerCamelCase__ ) ) if ( get_height(node.get_left() ) - get_height(node.get_right() ) == 2 ): # an unbalance detected __UpperCAmelCase : Optional[Any] = node.get_left() assert left_child is not None if ( data < left_child.get_data() ): # new node is the left child of the left child __UpperCAmelCase : Dict = right_rotation(lowerCamelCase__ ) else: __UpperCAmelCase : Union[str, Any] = lr_rotation(lowerCamelCase__ ) else: node.set_right(insert_node(node.get_right() , lowerCamelCase__ ) ) if get_height(node.get_right() ) - get_height(node.get_left() ) == 2: __UpperCAmelCase : Any = node.get_right() assert right_child is not None if data < right_child.get_data(): __UpperCAmelCase : Union[str, Any] = rl_rotation(lowerCamelCase__ ) else: __UpperCAmelCase : Any = left_rotation(lowerCamelCase__ ) __UpperCAmelCase : int = my_max(get_height(node.get_right() ) , get_height(node.get_left() ) ) + 1 node.set_height(lowerCamelCase__ ) return node def _lowercase ( lowerCamelCase__ ) -> Any: """simple docstring""" while True: __UpperCAmelCase : Tuple = root.get_right() if right_child is None: break __UpperCAmelCase : List[Any] = right_child return root.get_data() def _lowercase ( lowerCamelCase__ ) -> Any: """simple docstring""" while True: __UpperCAmelCase : Optional[int] = root.get_left() if left_child is None: break __UpperCAmelCase : List[Any] = left_child return root.get_data() def _lowercase ( lowerCamelCase__ , lowerCamelCase__ ) -> MyNode | None: """simple docstring""" __UpperCAmelCase : int = root.get_left() __UpperCAmelCase : Tuple = root.get_right() if root.get_data() == data: if left_child is not None and right_child is not None: __UpperCAmelCase : List[Any] = get_left_most(lowerCamelCase__ ) root.set_data(lowerCamelCase__ ) root.set_right(del_node(lowerCamelCase__ , lowerCamelCase__ ) ) elif left_child is not None: __UpperCAmelCase : Tuple = left_child elif right_child is not None: __UpperCAmelCase : Optional[int] = right_child else: return None elif root.get_data() > data: if left_child is None: print("No such data" ) return root else: root.set_left(del_node(lowerCamelCase__ , lowerCamelCase__ ) ) else: # root.get_data() < data if right_child is None: return root else: root.set_right(del_node(lowerCamelCase__ , lowerCamelCase__ ) ) if get_height(lowerCamelCase__ ) - get_height(lowerCamelCase__ ) == 2: assert right_child is not None if get_height(right_child.get_right() ) > get_height(right_child.get_left() ): __UpperCAmelCase : int = left_rotation(lowerCamelCase__ ) else: __UpperCAmelCase : Dict = rl_rotation(lowerCamelCase__ ) elif get_height(lowerCamelCase__ ) - get_height(lowerCamelCase__ ) == -2: assert left_child is not None if get_height(left_child.get_left() ) > get_height(left_child.get_right() ): __UpperCAmelCase : Optional[int] = right_rotation(lowerCamelCase__ ) else: __UpperCAmelCase : int = lr_rotation(lowerCamelCase__ ) __UpperCAmelCase : int = my_max(get_height(root.get_right() ) , get_height(root.get_left() ) ) + 1 root.set_height(lowerCamelCase__ ) return root class __A : def __init__( self ): __UpperCAmelCase : MyNode | None = None def _snake_case ( self ): return get_height(self.root ) def _snake_case ( self , UpperCamelCase_ ): print("insert:" + str(UpperCamelCase_ ) ) __UpperCAmelCase : List[Any] = insert_node(self.root , UpperCamelCase_ ) def _snake_case ( self , UpperCamelCase_ ): print("delete:" + str(UpperCamelCase_ ) ) if self.root is None: print("Tree is empty!" ) return __UpperCAmelCase : List[Any] = del_node(self.root , UpperCamelCase_ ) def __str__( self , ): # a level traversale, gives a more intuitive look on the tree __UpperCAmelCase : List[str] = "" __UpperCAmelCase : int = MyQueue() q.push(self.root ) __UpperCAmelCase : Optional[Any] = self.get_height() if layer == 0: return output __UpperCAmelCase : Union[str, Any] = 0 while not q.is_empty(): __UpperCAmelCase : List[Any] = q.pop() __UpperCAmelCase : Optional[Any] = " " * int(math.pow(2 , layer - 1 ) ) output += space if node is None: output += "*" q.push(UpperCamelCase_ ) q.push(UpperCamelCase_ ) else: output += str(node.get_data() ) q.push(node.get_left() ) q.push(node.get_right() ) output += space __UpperCAmelCase : List[Any] = cnt + 1 for i in range(1_00 ): if cnt == math.pow(2 , UpperCamelCase_ ) - 1: __UpperCAmelCase : Dict = layer - 1 if layer == 0: output += "\n*************************************" return output output += "\n" break output += "\n*************************************" return output def _lowercase ( ) -> None: """simple docstring""" import doctest doctest.testmod() if __name__ == "__main__": _test() _a : Dict = AVLtree() _a : List[str] = list(range(10)) random.shuffle(lst) for i in lst: t.insert(i) print(str(t)) random.shuffle(lst) for i in lst: t.del_node(i) print(str(t))
10
'''simple docstring''' class __A : def __init__( self , UpperCamelCase_ ): __UpperCAmelCase : Any = set_counts __UpperCAmelCase : int = max(UpperCamelCase_ ) __UpperCAmelCase : List[str] = len(UpperCamelCase_ ) __UpperCAmelCase : Any = [1] * num_sets __UpperCAmelCase : Any = list(range(UpperCamelCase_ ) ) def _snake_case ( self , UpperCamelCase_ , UpperCamelCase_ ): __UpperCAmelCase : Optional[int] = self.get_parent(UpperCamelCase_ ) __UpperCAmelCase : List[Any] = self.get_parent(UpperCamelCase_ ) if src_parent == dst_parent: return False if self.ranks[dst_parent] >= self.ranks[src_parent]: self.set_counts[dst_parent] += self.set_counts[src_parent] __UpperCAmelCase : Optional[Any] = 0 __UpperCAmelCase : List[Any] = dst_parent if self.ranks[dst_parent] == self.ranks[src_parent]: self.ranks[dst_parent] += 1 __UpperCAmelCase : Union[str, Any] = self.set_counts[dst_parent] else: self.set_counts[src_parent] += self.set_counts[dst_parent] __UpperCAmelCase : Union[str, Any] = 0 __UpperCAmelCase : Dict = src_parent __UpperCAmelCase : Dict = self.set_counts[src_parent] __UpperCAmelCase : Dict = max(self.max_set , UpperCamelCase_ ) return True def _snake_case ( self , UpperCamelCase_ ): if self.parents[disj_set] == disj_set: return disj_set __UpperCAmelCase : str = self.get_parent(self.parents[disj_set] ) return self.parents[disj_set]
10
1
'''simple docstring''' from torch import nn def _lowercase ( lowerCamelCase__ ) -> List[Any]: """simple docstring""" if act_fn in ["swish", "silu"]: return nn.SiLU() elif act_fn == "mish": return nn.Mish() elif act_fn == "gelu": return nn.GELU() else: raise ValueError(f"""Unsupported activation function: {act_fn}""" )
10
'''simple docstring''' def _lowercase ( lowerCamelCase__ , lowerCamelCase__ ) -> List[str]: """simple docstring""" __UpperCAmelCase : Dict = (boundary[1] - boundary[0]) / steps __UpperCAmelCase : Tuple = boundary[0] __UpperCAmelCase : List[str] = boundary[1] __UpperCAmelCase : List[Any] = make_points(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) __UpperCAmelCase : int = 0.0 y += (h / 2.0) * f(lowerCamelCase__ ) for i in x_i: # print(i) y += h * f(lowerCamelCase__ ) y += (h / 2.0) * f(lowerCamelCase__ ) return y def _lowercase ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> List[Any]: """simple docstring""" __UpperCAmelCase : Optional[Any] = a + h while x < (b - h): yield x __UpperCAmelCase : List[str] = x + h def _lowercase ( lowerCamelCase__ ) -> Optional[Any]: # enter your function here """simple docstring""" __UpperCAmelCase : str = (x - 0) * (x - 0) return y def _lowercase ( ) -> int: """simple docstring""" __UpperCAmelCase : Tuple = 0.0 # Lower bound of integration __UpperCAmelCase : Union[str, Any] = 1.0 # Upper bound of integration __UpperCAmelCase : Union[str, Any] = 10.0 # define number of steps or resolution __UpperCAmelCase : Dict = [a, b] # define boundary of integration __UpperCAmelCase : Optional[int] = method_a(lowerCamelCase__ , lowerCamelCase__ ) print(f"""y = {y}""" ) if __name__ == "__main__": main()
10
1
'''simple docstring''' import logging import os import sys from dataclasses import dataclass, field from importlib import import_module from typing import Dict, List, Optional, Tuple import numpy as np from seqeval.metrics import accuracy_score, fa_score, precision_score, recall_score from torch import nn from utils_ner import Split, TokenClassificationDataset, TokenClassificationTask import transformers from transformers import ( AutoConfig, AutoModelForTokenClassification, AutoTokenizer, DataCollatorWithPadding, EvalPrediction, HfArgumentParser, Trainer, TrainingArguments, set_seed, ) from transformers.trainer_utils import is_main_process _a : List[Any] = logging.getLogger(__name__) @dataclass class __A : snake_case :str = field( metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"} ) snake_case :Optional[str] = field( default=__magic_name__ , metadata={"help": "Pretrained config name or path if not the same as model_name"} ) snake_case :Optional[str] = field( default="NER" , metadata={"help": "Task type to fine tune in training (e.g. NER, POS, etc)"} ) snake_case :Optional[str] = field( default=__magic_name__ , metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"} ) snake_case :bool = field(default=__magic_name__ , metadata={"help": "Set this flag to use fast tokenization."} ) # If you want to tweak more attributes on your tokenizer, you should do it in a distinct script, # or just modify its tokenizer_config.json. snake_case :Optional[str] = field( default=__magic_name__ , metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co"} , ) @dataclass class __A : snake_case :str = field( metadata={"help": "The input data dir. Should contain the .txt files for a CoNLL-2003-formatted task."} ) snake_case :Optional[str] = field( default=__magic_name__ , metadata={"help": "Path to a file containing all labels. If not specified, CoNLL-2003 labels are used."} , ) snake_case :int = field( default=128 , metadata={ "help": ( "The maximum total input sequence length after tokenization. Sequences longer " "than this will be truncated, sequences shorter will be padded." ) } , ) snake_case :bool = field( default=__magic_name__ , metadata={"help": "Overwrite the cached training and evaluation sets"} ) def _lowercase ( ) -> str: """simple docstring""" __UpperCAmelCase : int = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) ) if len(sys.argv ) == 2 and sys.argv[1].endswith(".json" ): # If we pass only one argument to the script and it's the path to a json file, # let's parse it to get our arguments. __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase : str = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) ) else: __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase : List[Any] = parser.parse_args_into_dataclasses() if ( os.path.exists(training_args.output_dir ) and os.listdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir ): raise ValueError( f"""Output directory ({training_args.output_dir}) already exists and is not empty. Use""" " --overwrite_output_dir to overcome." ) __UpperCAmelCase : List[str] = import_module("tasks" ) try: __UpperCAmelCase : Optional[int] = getattr(lowerCamelCase__ , model_args.task_type ) __UpperCAmelCase : TokenClassificationTask = token_classification_task_clazz() except AttributeError: raise ValueError( f"""Task {model_args.task_type} needs to be defined as a TokenClassificationTask subclass in {module}. """ f"""Available tasks classes are: {TokenClassificationTask.__subclasses__()}""" ) # Setup logging logging.basicConfig( format="%(asctime)s - %(levelname)s - %(name)s - %(message)s" , datefmt="%m/%d/%Y %H:%M:%S" , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , ) logger.warning( "Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s" , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.local_rank != -1 ) , training_args.fpaa , ) # Set the verbosity to info of the Transformers logger (on main process only): if is_main_process(training_args.local_rank ): transformers.utils.logging.set_verbosity_info() transformers.utils.logging.enable_default_handler() transformers.utils.logging.enable_explicit_format() logger.info("Training/evaluation parameters %s" , lowerCamelCase__ ) # Set seed set_seed(training_args.seed ) # Prepare CONLL-2003 task __UpperCAmelCase : Any = token_classification_task.get_labels(data_args.labels ) __UpperCAmelCase : Dict[int, str] = dict(enumerate(lowerCamelCase__ ) ) __UpperCAmelCase : Any = len(lowerCamelCase__ ) # Load pretrained model and tokenizer # # Distributed training: # The .from_pretrained methods guarantee that only one local process can concurrently # download model & vocab. __UpperCAmelCase : List[str] = AutoConfig.from_pretrained( model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=lowerCamelCase__ , idalabel=lowerCamelCase__ , labelaid={label: i for i, label in enumerate(lowerCamelCase__ )} , cache_dir=model_args.cache_dir , ) __UpperCAmelCase : List[Any] = AutoTokenizer.from_pretrained( model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , use_fast=model_args.use_fast , ) __UpperCAmelCase : Union[str, Any] = AutoModelForTokenClassification.from_pretrained( model_args.model_name_or_path , from_tf=bool(".ckpt" in model_args.model_name_or_path ) , config=lowerCamelCase__ , cache_dir=model_args.cache_dir , ) # Get datasets __UpperCAmelCase : Tuple = ( TokenClassificationDataset( token_classification_task=lowerCamelCase__ , data_dir=data_args.data_dir , tokenizer=lowerCamelCase__ , labels=lowerCamelCase__ , model_type=config.model_type , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.train , ) if training_args.do_train else None ) __UpperCAmelCase : int = ( TokenClassificationDataset( token_classification_task=lowerCamelCase__ , data_dir=data_args.data_dir , tokenizer=lowerCamelCase__ , labels=lowerCamelCase__ , model_type=config.model_type , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.dev , ) if training_args.do_eval else None ) def align_predictions(lowerCamelCase__ , lowerCamelCase__ ) -> Tuple[List[int], List[int]]: __UpperCAmelCase : Tuple = np.argmax(lowerCamelCase__ , axis=2 ) __UpperCAmelCase , __UpperCAmelCase : str = preds.shape __UpperCAmelCase : List[Any] = [[] for _ in range(lowerCamelCase__ )] __UpperCAmelCase : Union[str, Any] = [[] for _ in range(lowerCamelCase__ )] for i in range(lowerCamelCase__ ): for j in range(lowerCamelCase__ ): if label_ids[i, j] != nn.CrossEntropyLoss().ignore_index: out_label_list[i].append(label_map[label_ids[i][j]] ) preds_list[i].append(label_map[preds[i][j]] ) return preds_list, out_label_list def compute_metrics(lowerCamelCase__ ) -> Dict: __UpperCAmelCase , __UpperCAmelCase : Any = align_predictions(p.predictions , p.label_ids ) return { "accuracy_score": accuracy_score(lowerCamelCase__ , lowerCamelCase__ ), "precision": precision_score(lowerCamelCase__ , lowerCamelCase__ ), "recall": recall_score(lowerCamelCase__ , lowerCamelCase__ ), "f1": fa_score(lowerCamelCase__ , lowerCamelCase__ ), } # Data collator __UpperCAmelCase : Dict = DataCollatorWithPadding(lowerCamelCase__ , pad_to_multiple_of=8 ) if training_args.fpaa else None # Initialize our Trainer __UpperCAmelCase : List[str] = Trainer( model=lowerCamelCase__ , args=lowerCamelCase__ , train_dataset=lowerCamelCase__ , eval_dataset=lowerCamelCase__ , compute_metrics=lowerCamelCase__ , data_collator=lowerCamelCase__ , ) # Training if training_args.do_train: trainer.train( model_path=model_args.model_name_or_path if os.path.isdir(model_args.model_name_or_path ) else None ) trainer.save_model() # For convenience, we also re-save the tokenizer to the same directory, # so that you can share your model easily on huggingface.co/models =) if trainer.is_world_process_zero(): tokenizer.save_pretrained(training_args.output_dir ) # Evaluation __UpperCAmelCase : List[str] = {} if training_args.do_eval: logger.info("*** Evaluate ***" ) __UpperCAmelCase : Tuple = trainer.evaluate() __UpperCAmelCase : List[str] = os.path.join(training_args.output_dir , "eval_results.txt" ) if trainer.is_world_process_zero(): with open(lowerCamelCase__ , "w" ) as writer: logger.info("***** Eval results *****" ) for key, value in result.items(): logger.info(" %s = %s" , lowerCamelCase__ , lowerCamelCase__ ) writer.write("%s = %s\n" % (key, value) ) results.update(lowerCamelCase__ ) # Predict if training_args.do_predict: __UpperCAmelCase : List[Any] = TokenClassificationDataset( token_classification_task=lowerCamelCase__ , data_dir=data_args.data_dir , tokenizer=lowerCamelCase__ , labels=lowerCamelCase__ , model_type=config.model_type , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.test , ) __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase : List[Any] = trainer.predict(lowerCamelCase__ ) __UpperCAmelCase , __UpperCAmelCase : int = align_predictions(lowerCamelCase__ , lowerCamelCase__ ) __UpperCAmelCase : str = os.path.join(training_args.output_dir , "test_results.txt" ) if trainer.is_world_process_zero(): with open(lowerCamelCase__ , "w" ) as writer: for key, value in metrics.items(): logger.info(" %s = %s" , lowerCamelCase__ , lowerCamelCase__ ) writer.write("%s = %s\n" % (key, value) ) # Save predictions __UpperCAmelCase : Any = os.path.join(training_args.output_dir , "test_predictions.txt" ) if trainer.is_world_process_zero(): with open(lowerCamelCase__ , "w" ) as writer: with open(os.path.join(data_args.data_dir , "test.txt" ) , "r" ) as f: token_classification_task.write_predictions_to_file(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) return results def _lowercase ( lowerCamelCase__ ) -> Optional[int]: """simple docstring""" main() if __name__ == "__main__": main()
10
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_torch_available, is_vision_available, ) _a : str = {"configuration_vit": ["VIT_PRETRAINED_CONFIG_ARCHIVE_MAP", "ViTConfig", "ViTOnnxConfig"]} try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _a : str = ["ViTFeatureExtractor"] _a : Dict = ["ViTImageProcessor"] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _a : int = [ "VIT_PRETRAINED_MODEL_ARCHIVE_LIST", "ViTForImageClassification", "ViTForMaskedImageModeling", "ViTModel", "ViTPreTrainedModel", ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _a : List[str] = [ "TFViTForImageClassification", "TFViTModel", "TFViTPreTrainedModel", ] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _a : Dict = [ "FlaxViTForImageClassification", "FlaxViTModel", "FlaxViTPreTrainedModel", ] if TYPE_CHECKING: from .configuration_vit import VIT_PRETRAINED_CONFIG_ARCHIVE_MAP, ViTConfig, ViTOnnxConfig try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .feature_extraction_vit import ViTFeatureExtractor from .image_processing_vit import ViTImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_vit import ( VIT_PRETRAINED_MODEL_ARCHIVE_LIST, ViTForImageClassification, ViTForMaskedImageModeling, ViTModel, ViTPreTrainedModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_vit import TFViTForImageClassification, TFViTModel, TFViTPreTrainedModel try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_vit import FlaxViTForImageClassification, FlaxViTModel, FlaxViTPreTrainedModel else: import sys _a : Dict = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
10
1
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_tokenizers_available, is_torch_available, ) _a : Union[str, Any] = { "configuration_blenderbot_small": [ "BLENDERBOT_SMALL_PRETRAINED_CONFIG_ARCHIVE_MAP", "BlenderbotSmallConfig", "BlenderbotSmallOnnxConfig", ], "tokenization_blenderbot_small": ["BlenderbotSmallTokenizer"], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _a : Tuple = ["BlenderbotSmallTokenizerFast"] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _a : Dict = [ "BLENDERBOT_SMALL_PRETRAINED_MODEL_ARCHIVE_LIST", "BlenderbotSmallForCausalLM", "BlenderbotSmallForConditionalGeneration", "BlenderbotSmallModel", "BlenderbotSmallPreTrainedModel", ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _a : Optional[Any] = [ "TFBlenderbotSmallForConditionalGeneration", "TFBlenderbotSmallModel", "TFBlenderbotSmallPreTrainedModel", ] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _a : Optional[int] = [ "FlaxBlenderbotSmallForConditionalGeneration", "FlaxBlenderbotSmallModel", "FlaxBlenderbotSmallPreTrainedModel", ] if TYPE_CHECKING: from .configuration_blenderbot_small import ( BLENDERBOT_SMALL_PRETRAINED_CONFIG_ARCHIVE_MAP, BlenderbotSmallConfig, BlenderbotSmallOnnxConfig, ) from .tokenization_blenderbot_small import BlenderbotSmallTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_blenderbot_small_fast import BlenderbotSmallTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_blenderbot_small import ( BLENDERBOT_SMALL_PRETRAINED_MODEL_ARCHIVE_LIST, BlenderbotSmallForCausalLM, BlenderbotSmallForConditionalGeneration, BlenderbotSmallModel, BlenderbotSmallPreTrainedModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_blenderbot_small import ( TFBlenderbotSmallForConditionalGeneration, TFBlenderbotSmallModel, TFBlenderbotSmallPreTrainedModel, ) try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_blenderbot_small import ( FlaxBlenderbotSmallForConditionalGeneration, FlaxBlenderbotSmallModel, FlaxBlenderbotSmallPreTrainedModel, ) else: import sys _a : Any = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
10
'''simple docstring''' import os from shutil import copyfile from typing import Any, Dict, List, Optional, Tuple import sentencepiece as spm from ...tokenization_utils import AddedToken, PreTrainedTokenizer from ...utils import logging _a : str = logging.get_logger(__name__) _a : Tuple = "▁" _a : Optional[int] = {"vocab_file": "sentencepiece.bpe.model"} _a : Tuple = { "vocab_file": { "xlm-roberta-base": "https://huggingface.co/xlm-roberta-base/resolve/main/sentencepiece.bpe.model", "xlm-roberta-large": "https://huggingface.co/xlm-roberta-large/resolve/main/sentencepiece.bpe.model", "xlm-roberta-large-finetuned-conll02-dutch": ( "https://huggingface.co/xlm-roberta-large-finetuned-conll02-dutch/resolve/main/sentencepiece.bpe.model" ), "xlm-roberta-large-finetuned-conll02-spanish": ( "https://huggingface.co/xlm-roberta-large-finetuned-conll02-spanish/resolve/main/sentencepiece.bpe.model" ), "xlm-roberta-large-finetuned-conll03-english": ( "https://huggingface.co/xlm-roberta-large-finetuned-conll03-english/resolve/main/sentencepiece.bpe.model" ), "xlm-roberta-large-finetuned-conll03-german": ( "https://huggingface.co/xlm-roberta-large-finetuned-conll03-german/resolve/main/sentencepiece.bpe.model" ), } } _a : Optional[Any] = { "xlm-roberta-base": 512, "xlm-roberta-large": 512, "xlm-roberta-large-finetuned-conll02-dutch": 512, "xlm-roberta-large-finetuned-conll02-spanish": 512, "xlm-roberta-large-finetuned-conll03-english": 512, "xlm-roberta-large-finetuned-conll03-german": 512, } class __A (__magic_name__ ): snake_case :Union[str, Any] = VOCAB_FILES_NAMES snake_case :Any = PRETRAINED_VOCAB_FILES_MAP snake_case :Tuple = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES snake_case :Optional[int] = ["input_ids", "attention_mask"] def __init__( self , UpperCamelCase_ , UpperCamelCase_="<s>" , UpperCamelCase_="</s>" , UpperCamelCase_="</s>" , UpperCamelCase_="<s>" , UpperCamelCase_="<unk>" , UpperCamelCase_="<pad>" , UpperCamelCase_="<mask>" , UpperCamelCase_ = None , **UpperCamelCase_ , ): # Mask token behave like a normal word, i.e. include the space before it __UpperCAmelCase : Optional[int] = AddedToken(UpperCamelCase_ , lstrip=UpperCamelCase_ , rstrip=UpperCamelCase_ ) if isinstance(UpperCamelCase_ , UpperCamelCase_ ) else mask_token __UpperCAmelCase : int = {} if sp_model_kwargs is None else sp_model_kwargs super().__init__( bos_token=UpperCamelCase_ , eos_token=UpperCamelCase_ , unk_token=UpperCamelCase_ , sep_token=UpperCamelCase_ , cls_token=UpperCamelCase_ , pad_token=UpperCamelCase_ , mask_token=UpperCamelCase_ , sp_model_kwargs=self.sp_model_kwargs , **UpperCamelCase_ , ) __UpperCAmelCase : Optional[int] = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(str(UpperCamelCase_ ) ) __UpperCAmelCase : Union[str, Any] = vocab_file # Original fairseq vocab and spm vocab must be "aligned": # Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 # -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ---- # fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-' # spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a' # Mimic fairseq token-to-id alignment for the first 4 token __UpperCAmelCase : Optional[Any] = {"<s>": 0, "<pad>": 1, "</s>": 2, "<unk>": 3} # The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab __UpperCAmelCase : List[Any] = 1 __UpperCAmelCase : Optional[Any] = len(self.sp_model ) + self.fairseq_offset __UpperCAmelCase : str = {v: k for k, v in self.fairseq_tokens_to_ids.items()} def __getstate__( self ): __UpperCAmelCase : List[str] = self.__dict__.copy() __UpperCAmelCase : str = None __UpperCAmelCase : str = self.sp_model.serialized_model_proto() return state def __setstate__( self , UpperCamelCase_ ): __UpperCAmelCase : Union[str, Any] = d # for backward compatibility if not hasattr(self , "sp_model_kwargs" ): __UpperCAmelCase : Tuple = {} __UpperCAmelCase : Dict = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.LoadFromSerializedProto(self.sp_model_proto ) def _snake_case ( self , UpperCamelCase_ , UpperCamelCase_ = None ): if token_ids_a is None: return [self.cls_token_id] + token_ids_a + [self.sep_token_id] __UpperCAmelCase : List[Any] = [self.cls_token_id] __UpperCAmelCase : Union[str, Any] = [self.sep_token_id] return cls + token_ids_a + sep + sep + token_ids_a + sep def _snake_case ( self , UpperCamelCase_ , UpperCamelCase_ = None , UpperCamelCase_ = False ): if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=UpperCamelCase_ , token_ids_a=UpperCamelCase_ , already_has_special_tokens=UpperCamelCase_ ) if token_ids_a is None: return [1] + ([0] * len(UpperCamelCase_ )) + [1] return [1] + ([0] * len(UpperCamelCase_ )) + [1, 1] + ([0] * len(UpperCamelCase_ )) + [1] def _snake_case ( self , UpperCamelCase_ , UpperCamelCase_ = None ): __UpperCAmelCase : Dict = [self.sep_token_id] __UpperCAmelCase : List[Any] = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0] @property def _snake_case ( self ): return len(self.sp_model ) + self.fairseq_offset + 1 # Add the <mask> token def _snake_case ( self ): __UpperCAmelCase : Union[str, Any] = {self.convert_ids_to_tokens(UpperCamelCase_ ): i for i in range(self.vocab_size )} vocab.update(self.added_tokens_encoder ) return vocab def _snake_case ( self , UpperCamelCase_ ): return self.sp_model.encode(UpperCamelCase_ , out_type=UpperCamelCase_ ) def _snake_case ( self , UpperCamelCase_ ): if token in self.fairseq_tokens_to_ids: return self.fairseq_tokens_to_ids[token] __UpperCAmelCase : Optional[int] = self.sp_model.PieceToId(UpperCamelCase_ ) # Need to return unknown token if the SP model returned 0 return spm_id + self.fairseq_offset if spm_id else self.unk_token_id def _snake_case ( self , UpperCamelCase_ ): if index in self.fairseq_ids_to_tokens: return self.fairseq_ids_to_tokens[index] return self.sp_model.IdToPiece(index - self.fairseq_offset ) def _snake_case ( self , UpperCamelCase_ ): __UpperCAmelCase : Tuple = "".join(UpperCamelCase_ ).replace(UpperCamelCase_ , " " ).strip() return out_string def _snake_case ( self , UpperCamelCase_ , UpperCamelCase_ = None ): if not os.path.isdir(UpperCamelCase_ ): logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" ) return __UpperCAmelCase : List[str] = os.path.join( UpperCamelCase_ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(UpperCamelCase_ ) and os.path.isfile(self.vocab_file ): copyfile(self.vocab_file , UpperCamelCase_ ) elif not os.path.isfile(self.vocab_file ): with open(UpperCamelCase_ , "wb" ) as fi: __UpperCAmelCase : Optional[int] = self.sp_model.serialized_model_proto() fi.write(UpperCamelCase_ ) return (out_vocab_file,)
10
1
'''simple docstring''' import functools import operator from ...configuration_utils import PretrainedConfig from ...utils import logging _a : List[Any] = logging.get_logger(__name__) _a : Tuple = { "microsoft/unispeech-sat-base-100h-libri-ft": ( "https://huggingface.co/microsoft/unispeech-sat-base-100h-libri-ft/resolve/main/config.json" ), # See all UniSpeechSat models at https://huggingface.co/models?filter=unispeech_sat } class __A (__magic_name__ ): snake_case :Optional[int] = "unispeech-sat" def __init__( self , UpperCamelCase_=32 , UpperCamelCase_=7_68 , UpperCamelCase_=12 , UpperCamelCase_=12 , UpperCamelCase_=30_72 , UpperCamelCase_="gelu" , UpperCamelCase_=0.1 , UpperCamelCase_=0.1 , UpperCamelCase_=0.1 , UpperCamelCase_=0.0 , UpperCamelCase_=0.0 , UpperCamelCase_=0.1 , UpperCamelCase_=0.1 , UpperCamelCase_=0.0_2 , UpperCamelCase_=1E-5 , UpperCamelCase_="group" , UpperCamelCase_="gelu" , UpperCamelCase_=(5_12, 5_12, 5_12, 5_12, 5_12, 5_12, 5_12) , UpperCamelCase_=(5, 2, 2, 2, 2, 2, 2) , UpperCamelCase_=(10, 3, 3, 3, 3, 2, 2) , UpperCamelCase_=False , UpperCamelCase_=1_28 , UpperCamelCase_=16 , UpperCamelCase_=False , UpperCamelCase_=True , UpperCamelCase_=0.0_5 , UpperCamelCase_=10 , UpperCamelCase_=2 , UpperCamelCase_=0.0 , UpperCamelCase_=10 , UpperCamelCase_=0 , UpperCamelCase_=3_20 , UpperCamelCase_=2 , UpperCamelCase_=0.1 , UpperCamelCase_=1_00 , UpperCamelCase_=2_56 , UpperCamelCase_=2_56 , UpperCamelCase_=0.1 , UpperCamelCase_="mean" , UpperCamelCase_=False , UpperCamelCase_=False , UpperCamelCase_=2_56 , UpperCamelCase_=(5_12, 5_12, 5_12, 5_12, 15_00) , UpperCamelCase_=(5, 3, 3, 1, 1) , UpperCamelCase_=(1, 2, 3, 1, 1) , UpperCamelCase_=5_12 , UpperCamelCase_=0 , UpperCamelCase_=1 , UpperCamelCase_=2 , UpperCamelCase_=5_04 , **UpperCamelCase_ , ): super().__init__(**UpperCamelCase_ , pad_token_id=UpperCamelCase_ , bos_token_id=UpperCamelCase_ , eos_token_id=UpperCamelCase_ ) __UpperCAmelCase : Tuple = hidden_size __UpperCAmelCase : Union[str, Any] = feat_extract_norm __UpperCAmelCase : Union[str, Any] = feat_extract_activation __UpperCAmelCase : List[Any] = list(UpperCamelCase_ ) __UpperCAmelCase : Optional[int] = list(UpperCamelCase_ ) __UpperCAmelCase : Optional[Any] = list(UpperCamelCase_ ) __UpperCAmelCase : Tuple = conv_bias __UpperCAmelCase : Optional[int] = num_conv_pos_embeddings __UpperCAmelCase : int = num_conv_pos_embedding_groups __UpperCAmelCase : List[str] = len(self.conv_dim ) __UpperCAmelCase : Optional[Any] = num_hidden_layers __UpperCAmelCase : str = intermediate_size __UpperCAmelCase : Dict = hidden_act __UpperCAmelCase : List[Any] = num_attention_heads __UpperCAmelCase : List[str] = hidden_dropout __UpperCAmelCase : Dict = attention_dropout __UpperCAmelCase : Optional[Any] = activation_dropout __UpperCAmelCase : str = feat_proj_dropout __UpperCAmelCase : Dict = final_dropout __UpperCAmelCase : List[str] = layerdrop __UpperCAmelCase : Union[str, Any] = layer_norm_eps __UpperCAmelCase : Optional[int] = initializer_range __UpperCAmelCase : Optional[Any] = vocab_size __UpperCAmelCase : Dict = num_clusters __UpperCAmelCase : List[Any] = do_stable_layer_norm __UpperCAmelCase : List[str] = use_weighted_layer_sum if ( (len(self.conv_stride ) != self.num_feat_extract_layers) or (len(self.conv_kernel ) != self.num_feat_extract_layers) or (len(self.conv_dim ) != self.num_feat_extract_layers) ): raise ValueError( "Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` ==" " `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) =" f""" {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`,""" f""" `len(config.conv_kernel) = {len(self.conv_kernel )}`.""" ) # fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779 __UpperCAmelCase : Union[str, Any] = apply_spec_augment __UpperCAmelCase : Optional[Any] = mask_time_prob __UpperCAmelCase : Optional[Any] = mask_time_length __UpperCAmelCase : Any = mask_time_min_masks __UpperCAmelCase : Optional[int] = mask_feature_prob __UpperCAmelCase : List[str] = mask_feature_length __UpperCAmelCase : int = mask_feature_min_masks # parameters for pretraining with codevector quantized representations __UpperCAmelCase : Union[str, Any] = num_codevectors_per_group __UpperCAmelCase : List[Any] = num_codevector_groups __UpperCAmelCase : Tuple = contrastive_logits_temperature __UpperCAmelCase : Optional[int] = feat_quantizer_dropout __UpperCAmelCase : Optional[int] = num_negatives __UpperCAmelCase : Tuple = codevector_dim __UpperCAmelCase : Any = proj_codevector_dim __UpperCAmelCase : List[str] = diversity_loss_weight # ctc loss __UpperCAmelCase : str = ctc_loss_reduction __UpperCAmelCase : Optional[Any] = ctc_zero_infinity # SequenceClassification-specific parameter. Feel free to ignore for other classes. __UpperCAmelCase : str = classifier_proj_size # XVector-specific parameters. Feel free to ignore for other classes. __UpperCAmelCase : List[str] = list(UpperCamelCase_ ) __UpperCAmelCase : Union[str, Any] = list(UpperCamelCase_ ) __UpperCAmelCase : Dict = list(UpperCamelCase_ ) __UpperCAmelCase : List[str] = xvector_output_dim @property def _snake_case ( self ): return functools.reduce(operator.mul , self.conv_stride , 1 )
10
'''simple docstring''' import time import unittest from transformers import is_torch_available from transformers.testing_utils import require_torch, torch_device from ..test_modeling_common import ids_tensor if is_torch_available(): import torch from transformers.generation import ( MaxLengthCriteria, MaxNewTokensCriteria, MaxTimeCriteria, StoppingCriteriaList, validate_stopping_criteria, ) @require_torch class __A (unittest.TestCase ): def _snake_case ( self , UpperCamelCase_ ): __UpperCAmelCase : List[str] = 3 __UpperCAmelCase : Tuple = 2_50 __UpperCAmelCase : str = ids_tensor((batch_size, length) , UpperCamelCase_ ) __UpperCAmelCase : Any = torch.ones((batch_size, length) , device=UpperCamelCase_ , dtype=torch.float ) / length return input_ids, scores def _snake_case ( self ): __UpperCAmelCase , __UpperCAmelCase : Tuple = self._get_tensors(5 ) __UpperCAmelCase : Tuple = StoppingCriteriaList( [ MaxLengthCriteria(max_length=10 ), MaxTimeCriteria(max_time=0.1 ), ] ) self.assertFalse(criteria(UpperCamelCase_ , UpperCamelCase_ ) ) __UpperCAmelCase , __UpperCAmelCase : int = self._get_tensors(9 ) self.assertFalse(criteria(UpperCamelCase_ , UpperCamelCase_ ) ) __UpperCAmelCase , __UpperCAmelCase : Optional[int] = self._get_tensors(10 ) self.assertTrue(criteria(UpperCamelCase_ , UpperCamelCase_ ) ) def _snake_case ( self ): __UpperCAmelCase : int = MaxLengthCriteria(max_length=10 ) __UpperCAmelCase , __UpperCAmelCase : Tuple = self._get_tensors(5 ) self.assertFalse(criteria(UpperCamelCase_ , UpperCamelCase_ ) ) __UpperCAmelCase , __UpperCAmelCase : Dict = self._get_tensors(9 ) self.assertFalse(criteria(UpperCamelCase_ , UpperCamelCase_ ) ) __UpperCAmelCase , __UpperCAmelCase : Optional[int] = self._get_tensors(10 ) self.assertTrue(criteria(UpperCamelCase_ , UpperCamelCase_ ) ) def _snake_case ( self ): __UpperCAmelCase : Optional[Any] = MaxNewTokensCriteria(start_length=5 , max_new_tokens=5 ) __UpperCAmelCase , __UpperCAmelCase : List[str] = self._get_tensors(5 ) self.assertFalse(criteria(UpperCamelCase_ , UpperCamelCase_ ) ) __UpperCAmelCase , __UpperCAmelCase : Dict = self._get_tensors(9 ) self.assertFalse(criteria(UpperCamelCase_ , UpperCamelCase_ ) ) __UpperCAmelCase , __UpperCAmelCase : Optional[Any] = self._get_tensors(10 ) self.assertTrue(criteria(UpperCamelCase_ , UpperCamelCase_ ) ) __UpperCAmelCase : Union[str, Any] = StoppingCriteriaList([criteria] ) self.assertEqual(criteria_list.max_length , 10 ) def _snake_case ( self ): __UpperCAmelCase , __UpperCAmelCase : Optional[Any] = self._get_tensors(5 ) __UpperCAmelCase : str = MaxTimeCriteria(max_time=0.1 ) self.assertFalse(criteria(UpperCamelCase_ , UpperCamelCase_ ) ) __UpperCAmelCase : str = MaxTimeCriteria(max_time=0.1 , initial_timestamp=time.time() - 0.2 ) self.assertTrue(criteria(UpperCamelCase_ , UpperCamelCase_ ) ) def _snake_case ( self ): validate_stopping_criteria(StoppingCriteriaList([MaxLengthCriteria(10 )] ) , 10 ) with self.assertWarns(UpperCamelCase_ ): validate_stopping_criteria(StoppingCriteriaList([MaxLengthCriteria(10 )] ) , 11 ) __UpperCAmelCase : Optional[int] = validate_stopping_criteria(StoppingCriteriaList() , 11 ) self.assertEqual(len(UpperCamelCase_ ) , 1 )
10
1
'''simple docstring''' from maths.is_square_free import is_square_free from maths.prime_factors import prime_factors def _lowercase ( lowerCamelCase__ ) -> int: """simple docstring""" __UpperCAmelCase : Any = prime_factors(lowerCamelCase__ ) if is_square_free(lowerCamelCase__ ): return -1 if len(lowerCamelCase__ ) % 2 else 1 return 0 if __name__ == "__main__": import doctest doctest.testmod()
10
'''simple docstring''' import json import re from typing import TYPE_CHECKING, List, Optional, Tuple, Union import numpy as np from ...utils import is_tf_available, is_torch_available, logging if TYPE_CHECKING: if is_torch_available(): import torch if is_tf_available(): import tensorflow as tf from tokenizers import pre_tokenizers from ...tokenization_utils_base import BatchEncoding from ...tokenization_utils_fast import PreTrainedTokenizerFast from .tokenization_codegen import CodeGenTokenizer _a : Union[str, Any] = logging.get_logger(__name__) _a : Any = {"vocab_file": "vocab.json", "merges_file": "merges.txt", "tokenizer_file": "tokenizer.json"} _a : Tuple = { "vocab_file": { "Salesforce/codegen-350M-mono": "https://huggingface.co/Salesforce/codegen-350M-mono/resolve/main/vocab.json", }, "merges_file": { "Salesforce/codegen-350M-mono": "https://huggingface.co/Salesforce/codegen-350M-mono/resolve/main/merges.txt", }, "tokenizer_file": { "Salesforce/codegen-350M-mono": ( "https://huggingface.co/Salesforce/codegen-350M-mono/resolve/main/tokenizer.json" ), }, } _a : Dict = { "Salesforce/codegen-350M-mono": 2048, } class __A (__magic_name__ ): snake_case :Optional[Any] = VOCAB_FILES_NAMES snake_case :str = PRETRAINED_VOCAB_FILES_MAP snake_case :Optional[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES snake_case :Tuple = ["input_ids", "attention_mask"] snake_case :Dict = CodeGenTokenizer def __init__( self , UpperCamelCase_=None , UpperCamelCase_=None , UpperCamelCase_=None , UpperCamelCase_="<|endoftext|>" , UpperCamelCase_="<|endoftext|>" , UpperCamelCase_="<|endoftext|>" , UpperCamelCase_=False , **UpperCamelCase_ , ): super().__init__( UpperCamelCase_ , UpperCamelCase_ , tokenizer_file=UpperCamelCase_ , unk_token=UpperCamelCase_ , bos_token=UpperCamelCase_ , eos_token=UpperCamelCase_ , add_prefix_space=UpperCamelCase_ , **UpperCamelCase_ , ) if kwargs.pop("add_bos_token" , UpperCamelCase_ ): __UpperCAmelCase : int = kwargs.pop("name_or_path" , "" ) raise ValueError( "Currenty GPT2's fast tokenizer does NOT support adding a BOS token." "Instead you should use GPT2's slow tokenizer class `CodeGenTokenizer` as follows: \n" f"""`CodeGenTokenizer.from_pretrained('{model_id}')`\nor\n""" f"""`AutoTokenizer.from_pretrained('{model_id}', use_fast=False)`\n""" "This issue will be fixed soon, see: https://github.com/huggingface/tokenizers/pull/1005." " so that the fast tokenizer works correctly." ) __UpperCAmelCase : Any = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() ) if pre_tok_state.get("add_prefix_space" , UpperCamelCase_ ) != add_prefix_space: __UpperCAmelCase : str = getattr(UpperCamelCase_ , pre_tok_state.pop("type" ) ) __UpperCAmelCase : Optional[int] = add_prefix_space __UpperCAmelCase : Tuple = pre_tok_class(**UpperCamelCase_ ) __UpperCAmelCase : Tuple = add_prefix_space def _snake_case ( self , *UpperCamelCase_ , **UpperCamelCase_ ): __UpperCAmelCase : Optional[Any] = kwargs.get("is_split_into_words" , UpperCamelCase_ ) assert self.add_prefix_space or not is_split_into_words, ( f"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """ "to use it with pretokenized inputs." ) return super()._batch_encode_plus(*UpperCamelCase_ , **UpperCamelCase_ ) def _snake_case ( self , *UpperCamelCase_ , **UpperCamelCase_ ): __UpperCAmelCase : Any = kwargs.get("is_split_into_words" , UpperCamelCase_ ) assert self.add_prefix_space or not is_split_into_words, ( f"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """ "to use it with pretokenized inputs." ) return super()._encode_plus(*UpperCamelCase_ , **UpperCamelCase_ ) def _snake_case ( self , UpperCamelCase_ , UpperCamelCase_ = None ): __UpperCAmelCase : int = self._tokenizer.model.save(UpperCamelCase_ , name=UpperCamelCase_ ) return tuple(UpperCamelCase_ ) def _snake_case ( self , UpperCamelCase_ , UpperCamelCase_ = False , UpperCamelCase_ = None , UpperCamelCase_ = None , **UpperCamelCase_ , ): __UpperCAmelCase : str = super().decode( token_ids=UpperCamelCase_ , skip_special_tokens=UpperCamelCase_ , clean_up_tokenization_spaces=UpperCamelCase_ , **UpperCamelCase_ , ) if truncate_before_pattern is not None and len(UpperCamelCase_ ) > 0: __UpperCAmelCase : Union[str, Any] = self.truncate(UpperCamelCase_ , UpperCamelCase_ ) return decoded_text def _snake_case ( self , UpperCamelCase_ , UpperCamelCase_ ): def find_re(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ): __UpperCAmelCase : Dict = pattern.search(UpperCamelCase_ , UpperCamelCase_ ) return m.start() if m else -1 __UpperCAmelCase : List[str] = [re.compile(UpperCamelCase_ , re.MULTILINE ) for pattern in truncate_before_pattern] __UpperCAmelCase : Optional[Any] = list(re.finditer("^print" , UpperCamelCase_ , re.MULTILINE ) ) if len(UpperCamelCase_ ) > 1: __UpperCAmelCase : List[Any] = completion[: prints[1].start()] __UpperCAmelCase : Tuple = list(re.finditer("^def" , UpperCamelCase_ , re.MULTILINE ) ) if len(UpperCamelCase_ ) > 1: __UpperCAmelCase : Union[str, Any] = completion[: defs[1].start()] __UpperCAmelCase : Dict = 0 __UpperCAmelCase : Dict = [ pos for pos in [find_re(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) for terminal in terminals] if pos != -1 ] if len(UpperCamelCase_ ) > 0: return completion[: min(UpperCamelCase_ )] else: return completion
10
1
'''simple docstring''' import unittest from transformers import DebertaVaTokenizer, DebertaVaTokenizerFast from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow from ...test_tokenization_common import TokenizerTesterMixin _a : Optional[Any] = get_tests_dir("fixtures/spiece.model") @require_sentencepiece @require_tokenizers class __A (__magic_name__ , unittest.TestCase ): snake_case :Optional[Any] = DebertaVaTokenizer snake_case :Dict = DebertaVaTokenizerFast snake_case :str = True snake_case :List[str] = True def _snake_case ( self ): super().setUp() # We have a SentencePiece fixture for testing __UpperCAmelCase : List[Any] = DebertaVaTokenizer(UpperCamelCase_ , unk_token="<unk>" ) tokenizer.save_pretrained(self.tmpdirname ) def _snake_case ( self , UpperCamelCase_ ): __UpperCAmelCase : int = "this is a test" __UpperCAmelCase : Tuple = "this is a test" return input_text, output_text def _snake_case ( self ): __UpperCAmelCase : Tuple = "<pad>" __UpperCAmelCase : int = 0 self.assertEqual(self.get_tokenizer()._convert_token_to_id(UpperCamelCase_ ) , UpperCamelCase_ ) self.assertEqual(self.get_tokenizer()._convert_id_to_token(UpperCamelCase_ ) , UpperCamelCase_ ) def _snake_case ( self ): __UpperCAmelCase : Optional[Any] = list(self.get_tokenizer().get_vocab().keys() ) self.assertEqual(vocab_keys[0] , "<pad>" ) self.assertEqual(vocab_keys[1] , "<unk>" ) self.assertEqual(vocab_keys[-1] , "[PAD]" ) self.assertEqual(len(UpperCamelCase_ ) , 3_00_01 ) def _snake_case ( self ): self.assertEqual(self.get_tokenizer().vocab_size , 3_00_00 ) def _snake_case ( self ): # fmt: off __UpperCAmelCase : List[str] = " \tHeLLo!how \n Are yoU? " __UpperCAmelCase : Tuple = ["▁hello", "!", "how", "▁are", "▁you", "?"] # fmt: on __UpperCAmelCase : List[Any] = DebertaVaTokenizer(UpperCamelCase_ , do_lower_case=UpperCamelCase_ ) __UpperCAmelCase : str = tokenizer.convert_ids_to_tokens(tokenizer.encode(UpperCamelCase_ , add_special_tokens=UpperCamelCase_ ) ) self.assertListEqual(UpperCamelCase_ , UpperCamelCase_ ) __UpperCAmelCase : List[str] = DebertaVaTokenizerFast(UpperCamelCase_ , do_lower_case=UpperCamelCase_ ) __UpperCAmelCase : int = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(UpperCamelCase_ , add_special_tokens=UpperCamelCase_ ) ) self.assertListEqual(UpperCamelCase_ , UpperCamelCase_ ) @unittest.skip("There is an inconsistency between slow and fast tokenizer due to a bug in the fast one." ) def _snake_case ( self ): pass @unittest.skip("There is an inconsistency between slow and fast tokenizer due to a bug in the fast one." ) def _snake_case ( self ): pass def _snake_case ( self ): # fmt: off __UpperCAmelCase : List[Any] = "I was born in 92000, and this is falsé." __UpperCAmelCase : Optional[int] = ["▁", "<unk>", "▁was", "▁born", "▁in", "▁9", "2000", "▁", ",", "▁and", "▁this", "▁is", "▁fal", "s", "<unk>", "▁", ".", ] # fmt: on __UpperCAmelCase : Optional[Any] = DebertaVaTokenizer(UpperCamelCase_ , split_by_punct=UpperCamelCase_ ) __UpperCAmelCase : Dict = tokenizer.convert_ids_to_tokens(tokenizer.encode(UpperCamelCase_ , add_special_tokens=UpperCamelCase_ ) ) self.assertListEqual(UpperCamelCase_ , UpperCamelCase_ ) __UpperCAmelCase : Dict = DebertaVaTokenizerFast(UpperCamelCase_ , split_by_punct=UpperCamelCase_ ) __UpperCAmelCase : Union[str, Any] = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(UpperCamelCase_ , add_special_tokens=UpperCamelCase_ ) ) self.assertListEqual(UpperCamelCase_ , UpperCamelCase_ ) def _snake_case ( self ): # fmt: off __UpperCAmelCase : List[str] = "I was born in 92000, and this is falsé." __UpperCAmelCase : Optional[int] = ["▁i", "▁was", "▁born", "▁in", "▁9", "2000", "▁", ",", "▁and", "▁this", "▁is", "▁fal", "s", "<unk>", "▁", ".", ] # fmt: on __UpperCAmelCase : str = DebertaVaTokenizer(UpperCamelCase_ , do_lower_case=UpperCamelCase_ , split_by_punct=UpperCamelCase_ ) __UpperCAmelCase : Dict = tokenizer.convert_ids_to_tokens(tokenizer.encode(UpperCamelCase_ , add_special_tokens=UpperCamelCase_ ) ) self.assertListEqual(UpperCamelCase_ , UpperCamelCase_ ) __UpperCAmelCase : Union[str, Any] = DebertaVaTokenizerFast(UpperCamelCase_ , do_lower_case=UpperCamelCase_ , split_by_punct=UpperCamelCase_ ) __UpperCAmelCase : int = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(UpperCamelCase_ , add_special_tokens=UpperCamelCase_ ) ) self.assertListEqual(UpperCamelCase_ , UpperCamelCase_ ) def _snake_case ( self ): # fmt: off __UpperCAmelCase : Optional[int] = "I was born in 92000, and this is falsé." __UpperCAmelCase : str = ["▁i", "▁was", "▁born", "▁in", "▁9", "2000", ",", "▁and", "▁this", "▁is", "▁fal", "s", "<unk>", ".", ] # fmt: on __UpperCAmelCase : List[str] = DebertaVaTokenizer(UpperCamelCase_ , do_lower_case=UpperCamelCase_ , split_by_punct=UpperCamelCase_ ) __UpperCAmelCase : Optional[Any] = tokenizer.convert_ids_to_tokens(tokenizer.encode(UpperCamelCase_ , add_special_tokens=UpperCamelCase_ ) ) self.assertListEqual(UpperCamelCase_ , UpperCamelCase_ ) __UpperCAmelCase : List[str] = DebertaVaTokenizerFast(UpperCamelCase_ , do_lower_case=UpperCamelCase_ , split_by_punct=UpperCamelCase_ ) __UpperCAmelCase : Optional[int] = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(UpperCamelCase_ , add_special_tokens=UpperCamelCase_ ) ) self.assertListEqual(UpperCamelCase_ , UpperCamelCase_ ) def _snake_case ( self ): # fmt: off __UpperCAmelCase : Union[str, Any] = "I was born in 92000, and this is falsé." __UpperCAmelCase : Any = ["▁", "<unk>", "▁was", "▁born", "▁in", "▁9", "2000", "▁", ",", "▁and", "▁this", "▁is", "▁fal", "s", "<unk>", "▁", ".", ] # fmt: on __UpperCAmelCase : List[Any] = DebertaVaTokenizer(UpperCamelCase_ , do_lower_case=UpperCamelCase_ , split_by_punct=UpperCamelCase_ ) __UpperCAmelCase : Dict = tokenizer.convert_ids_to_tokens(tokenizer.encode(UpperCamelCase_ , add_special_tokens=UpperCamelCase_ ) ) self.assertListEqual(UpperCamelCase_ , UpperCamelCase_ ) __UpperCAmelCase : int = DebertaVaTokenizerFast(UpperCamelCase_ , do_lower_case=UpperCamelCase_ , split_by_punct=UpperCamelCase_ ) __UpperCAmelCase : Optional[Any] = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(UpperCamelCase_ , add_special_tokens=UpperCamelCase_ ) ) self.assertListEqual(UpperCamelCase_ , UpperCamelCase_ ) def _snake_case ( self ): # fmt: off __UpperCAmelCase : Optional[int] = " \tHeLLo!how \n Are yoU? " __UpperCAmelCase : int = ["▁", "<unk>", "e", "<unk>", "o", "!", "how", "▁", "<unk>", "re", "▁yo", "<unk>", "?"] # fmt: on __UpperCAmelCase : int = DebertaVaTokenizer(UpperCamelCase_ , do_lower_case=UpperCamelCase_ , split_by_punct=UpperCamelCase_ ) __UpperCAmelCase : Dict = tokenizer.convert_ids_to_tokens(tokenizer.encode(UpperCamelCase_ , add_special_tokens=UpperCamelCase_ ) ) self.assertListEqual(UpperCamelCase_ , UpperCamelCase_ ) __UpperCAmelCase : Tuple = DebertaVaTokenizerFast(UpperCamelCase_ , do_lower_case=UpperCamelCase_ , split_by_punct=UpperCamelCase_ ) __UpperCAmelCase : Union[str, Any] = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(UpperCamelCase_ , add_special_tokens=UpperCamelCase_ ) ) self.assertListEqual(UpperCamelCase_ , UpperCamelCase_ ) def _snake_case ( self ): __UpperCAmelCase : List[Any] = self.get_tokenizer() __UpperCAmelCase : Union[str, Any] = self.get_rust_tokenizer() __UpperCAmelCase : Union[str, Any] = "I was born in 92000, and this is falsé." __UpperCAmelCase : str = tokenizer.convert_ids_to_tokens(tokenizer.encode(UpperCamelCase_ , add_special_tokens=UpperCamelCase_ ) ) __UpperCAmelCase : Union[str, Any] = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(UpperCamelCase_ , add_special_tokens=UpperCamelCase_ ) ) self.assertListEqual(UpperCamelCase_ , UpperCamelCase_ ) __UpperCAmelCase : Optional[int] = tokenizer.encode(UpperCamelCase_ , add_special_tokens=UpperCamelCase_ ) __UpperCAmelCase : Optional[int] = rust_tokenizer.encode(UpperCamelCase_ , add_special_tokens=UpperCamelCase_ ) self.assertListEqual(UpperCamelCase_ , UpperCamelCase_ ) __UpperCAmelCase : Optional[int] = self.get_rust_tokenizer() __UpperCAmelCase : int = tokenizer.encode(UpperCamelCase_ ) __UpperCAmelCase : Any = rust_tokenizer.encode(UpperCamelCase_ ) self.assertListEqual(UpperCamelCase_ , UpperCamelCase_ ) def _snake_case ( self ): __UpperCAmelCase : str = "This is a test" __UpperCAmelCase : List[Any] = [13, 1, 43_98, 25, 21, 12_89] __UpperCAmelCase : Optional[int] = ["▁", "T", "his", "▁is", "▁a", "▁test"] __UpperCAmelCase : Dict = ["▁", "<unk>", "his", "▁is", "▁a", "▁test"] __UpperCAmelCase : int = DebertaVaTokenizer(UpperCamelCase_ , keep_accents=UpperCamelCase_ ) __UpperCAmelCase : List[Any] = DebertaVaTokenizerFast(UpperCamelCase_ , keep_accents=UpperCamelCase_ ) __UpperCAmelCase : Tuple = tokenizer.encode(UpperCamelCase_ , add_special_tokens=UpperCamelCase_ ) self.assertListEqual(UpperCamelCase_ , UpperCamelCase_ ) __UpperCAmelCase : Tuple = tokenizer.tokenize(UpperCamelCase_ ) self.assertListEqual(UpperCamelCase_ , UpperCamelCase_ ) __UpperCAmelCase : Dict = tokenizer.convert_ids_to_tokens(UpperCamelCase_ ) self.assertListEqual(UpperCamelCase_ , UpperCamelCase_ ) __UpperCAmelCase : str = rust_tokenizer.encode(UpperCamelCase_ , add_special_tokens=UpperCamelCase_ ) self.assertListEqual(UpperCamelCase_ , UpperCamelCase_ ) __UpperCAmelCase : Optional[int] = rust_tokenizer.tokenize(UpperCamelCase_ ) self.assertListEqual(UpperCamelCase_ , UpperCamelCase_ ) __UpperCAmelCase : str = rust_tokenizer.convert_ids_to_tokens(UpperCamelCase_ ) self.assertListEqual(UpperCamelCase_ , UpperCamelCase_ ) # fmt: off __UpperCAmelCase : Dict = "I was born in 92000, and this is falsé." __UpperCAmelCase : Dict = [13, 1, 23, 3_86, 19, 5_61, 30_50, 15, 17, 48, 25, 82_56, 18, 1, 9] __UpperCAmelCase : Dict = ["▁", "I", "▁was", "▁born", "▁in", "▁9", "2000", ",", "▁and", "▁this", "▁is", "▁fal", "s", "é", ".", ] __UpperCAmelCase : Any = ["▁", "<unk>", "▁was", "▁born", "▁in", "▁9", "2000", ",", "▁and", "▁this", "▁is", "▁fal", "s", "<unk>", ".", ] # fmt: on __UpperCAmelCase : List[Any] = tokenizer.encode(UpperCamelCase_ , add_special_tokens=UpperCamelCase_ ) self.assertListEqual(UpperCamelCase_ , UpperCamelCase_ ) __UpperCAmelCase : Dict = tokenizer.tokenize(UpperCamelCase_ ) self.assertListEqual(UpperCamelCase_ , UpperCamelCase_ ) __UpperCAmelCase : int = tokenizer.convert_ids_to_tokens(UpperCamelCase_ ) self.assertListEqual(UpperCamelCase_ , UpperCamelCase_ ) __UpperCAmelCase : Dict = rust_tokenizer.encode(UpperCamelCase_ , add_special_tokens=UpperCamelCase_ ) self.assertListEqual(UpperCamelCase_ , UpperCamelCase_ ) __UpperCAmelCase : Union[str, Any] = rust_tokenizer.tokenize(UpperCamelCase_ ) self.assertListEqual(UpperCamelCase_ , UpperCamelCase_ ) __UpperCAmelCase : Union[str, Any] = rust_tokenizer.convert_ids_to_tokens(UpperCamelCase_ ) self.assertListEqual(UpperCamelCase_ , UpperCamelCase_ ) def _snake_case ( self ): __UpperCAmelCase : List[str] = DebertaVaTokenizer(UpperCamelCase_ ) __UpperCAmelCase : Optional[Any] = tokenizer.encode("sequence builders" ) __UpperCAmelCase : int = tokenizer.encode("multi-sequence build" ) __UpperCAmelCase : Tuple = tokenizer.build_inputs_with_special_tokens(UpperCamelCase_ ) __UpperCAmelCase : int = tokenizer.build_inputs_with_special_tokens(UpperCamelCase_ , UpperCamelCase_ ) self.assertEqual([tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] , UpperCamelCase_ ) self.assertEqual( [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] + text_a + [tokenizer.sep_token_id] , UpperCamelCase_ , ) @slow def _snake_case ( self ): # fmt: off __UpperCAmelCase : int = {"input_ids": [[1, 3_98_67, 36, 1_93_90, 4_86, 27, 3_50_52, 8_14_36, 18, 6_06_85, 12_25, 7, 3_50_52, 8_14_36, 18, 93_67, 1_68_99, 18, 1_59_37, 53, 5_94, 7_73, 18, 1_62_87, 3_04_65, 36, 1_59_37, 6, 4_11_39, 38, 3_69_79, 6_07_63, 1_91, 6, 3_41_32, 99, 6, 5_05_38, 3_90, 4_32_30, 6, 3_41_32, 27_79, 2_08_50, 14, 6_99, 10_72, 11_94, 36, 3_82, 1_09_01, 53, 7, 6_99, 10_72, 20_84, 36, 2_04_22, 6_30, 53, 19, 1_05, 30_49, 18_96, 10_53, 1_68_99, 15_06, 11, 3_79_78, 42_43, 7, 12_37, 3_18_69, 2_00, 1_65_66, 6_54, 6, 3_50_52, 8_14_36, 7, 5_56_30, 1_35_93, 4, 2], [1, 26, 1_50_11, 13, 6_67, 8, 10_53, 18, 2_36_11, 12_37, 7_23_56, 1_28_20, 34, 10_41_34, 12_09, 35, 1_33_13, 66_27, 21, 2_02, 3_47, 7, 1_64, 23_99, 11, 46, 44_85, 4, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 5, 12_32, 28_64, 1_57_85, 1_49_51, 1_05, 5, 85_81, 12_50, 4, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], "token_type_ids": [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], "attention_mask": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501 # fmt: on self.tokenizer_integration_test_util( expected_encoding=UpperCamelCase_ , model_name="microsoft/deberta-v2-xlarge" , revision="ad6e42c1532ddf3a15c39246b63f5559d558b670" , )
10
'''simple docstring''' import json import os from functools import lru_cache from typing import List, Optional, Tuple import regex as re from ...tokenization_utils import AddedToken, PreTrainedTokenizer from ...utils import logging _a : Optional[Any] = logging.get_logger(__name__) _a : int = {"vocab_file": "vocab.json", "merges_file": "merges.txt"} # See all BART models at https://huggingface.co/models?filter=bart _a : Tuple = { "vocab_file": { "facebook/bart-base": "https://huggingface.co/facebook/bart-base/resolve/main/vocab.json", "facebook/bart-large": "https://huggingface.co/facebook/bart-large/resolve/main/vocab.json", "facebook/bart-large-mnli": "https://huggingface.co/facebook/bart-large-mnli/resolve/main/vocab.json", "facebook/bart-large-cnn": "https://huggingface.co/facebook/bart-large-cnn/resolve/main/vocab.json", "facebook/bart-large-xsum": "https://huggingface.co/facebook/bart-large-xsum/resolve/main/vocab.json", "yjernite/bart_eli5": "https://huggingface.co/yjernite/bart_eli5/resolve/main/vocab.json", }, "merges_file": { "facebook/bart-base": "https://huggingface.co/facebook/bart-base/resolve/main/merges.txt", "facebook/bart-large": "https://huggingface.co/facebook/bart-large/resolve/main/merges.txt", "facebook/bart-large-mnli": "https://huggingface.co/facebook/bart-large-mnli/resolve/main/merges.txt", "facebook/bart-large-cnn": "https://huggingface.co/facebook/bart-large-cnn/resolve/main/merges.txt", "facebook/bart-large-xsum": "https://huggingface.co/facebook/bart-large-xsum/resolve/main/merges.txt", "yjernite/bart_eli5": "https://huggingface.co/yjernite/bart_eli5/resolve/main/merges.txt", }, } _a : List[Any] = { "facebook/bart-base": 1024, "facebook/bart-large": 1024, "facebook/bart-large-mnli": 1024, "facebook/bart-large-cnn": 1024, "facebook/bart-large-xsum": 1024, "yjernite/bart_eli5": 1024, } @lru_cache() def _lowercase ( ) -> List[Any]: """simple docstring""" __UpperCAmelCase : Dict = ( list(range(ord("!" ) , ord("~" ) + 1 ) ) + list(range(ord("¡" ) , ord("¬" ) + 1 ) ) + list(range(ord("®" ) , ord("ÿ" ) + 1 ) ) ) __UpperCAmelCase : Optional[Any] = bs[:] __UpperCAmelCase : Optional[int] = 0 for b in range(2**8 ): if b not in bs: bs.append(lowerCamelCase__ ) cs.append(2**8 + n ) n += 1 __UpperCAmelCase : Dict = [chr(lowerCamelCase__ ) for n in cs] return dict(zip(lowerCamelCase__ , lowerCamelCase__ ) ) def _lowercase ( lowerCamelCase__ ) -> str: """simple docstring""" __UpperCAmelCase : Dict = set() __UpperCAmelCase : Union[str, Any] = word[0] for char in word[1:]: pairs.add((prev_char, char) ) __UpperCAmelCase : Optional[Any] = char return pairs class __A (__magic_name__ ): snake_case :Optional[int] = VOCAB_FILES_NAMES snake_case :List[Any] = PRETRAINED_VOCAB_FILES_MAP snake_case :Any = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES snake_case :Optional[int] = ["input_ids", "attention_mask"] def __init__( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_="replace" , UpperCamelCase_="<s>" , UpperCamelCase_="</s>" , UpperCamelCase_="</s>" , UpperCamelCase_="<s>" , UpperCamelCase_="<unk>" , UpperCamelCase_="<pad>" , UpperCamelCase_="<mask>" , UpperCamelCase_=False , **UpperCamelCase_ , ): __UpperCAmelCase : str = AddedToken(UpperCamelCase_ , lstrip=UpperCamelCase_ , rstrip=UpperCamelCase_ ) if isinstance(UpperCamelCase_ , UpperCamelCase_ ) else bos_token __UpperCAmelCase : List[str] = AddedToken(UpperCamelCase_ , lstrip=UpperCamelCase_ , rstrip=UpperCamelCase_ ) if isinstance(UpperCamelCase_ , UpperCamelCase_ ) else eos_token __UpperCAmelCase : Optional[int] = AddedToken(UpperCamelCase_ , lstrip=UpperCamelCase_ , rstrip=UpperCamelCase_ ) if isinstance(UpperCamelCase_ , UpperCamelCase_ ) else sep_token __UpperCAmelCase : int = AddedToken(UpperCamelCase_ , lstrip=UpperCamelCase_ , rstrip=UpperCamelCase_ ) if isinstance(UpperCamelCase_ , UpperCamelCase_ ) else cls_token __UpperCAmelCase : Optional[int] = AddedToken(UpperCamelCase_ , lstrip=UpperCamelCase_ , rstrip=UpperCamelCase_ ) if isinstance(UpperCamelCase_ , UpperCamelCase_ ) else unk_token __UpperCAmelCase : Dict = AddedToken(UpperCamelCase_ , lstrip=UpperCamelCase_ , rstrip=UpperCamelCase_ ) if isinstance(UpperCamelCase_ , UpperCamelCase_ ) else pad_token # Mask token behave like a normal word, i.e. include the space before it __UpperCAmelCase : Union[str, Any] = AddedToken(UpperCamelCase_ , lstrip=UpperCamelCase_ , rstrip=UpperCamelCase_ ) if isinstance(UpperCamelCase_ , UpperCamelCase_ ) else mask_token super().__init__( errors=UpperCamelCase_ , bos_token=UpperCamelCase_ , eos_token=UpperCamelCase_ , unk_token=UpperCamelCase_ , sep_token=UpperCamelCase_ , cls_token=UpperCamelCase_ , pad_token=UpperCamelCase_ , mask_token=UpperCamelCase_ , add_prefix_space=UpperCamelCase_ , **UpperCamelCase_ , ) with open(UpperCamelCase_ , encoding="utf-8" ) as vocab_handle: __UpperCAmelCase : int = json.load(UpperCamelCase_ ) __UpperCAmelCase : Any = {v: k for k, v in self.encoder.items()} __UpperCAmelCase : Any = errors # how to handle errors in decoding __UpperCAmelCase : str = bytes_to_unicode() __UpperCAmelCase : List[str] = {v: k for k, v in self.byte_encoder.items()} with open(UpperCamelCase_ , encoding="utf-8" ) as merges_handle: __UpperCAmelCase : str = merges_handle.read().split("\n" )[1:-1] __UpperCAmelCase : List[str] = [tuple(merge.split() ) for merge in bpe_merges] __UpperCAmelCase : Union[str, Any] = dict(zip(UpperCamelCase_ , range(len(UpperCamelCase_ ) ) ) ) __UpperCAmelCase : Optional[int] = {} __UpperCAmelCase : Optional[int] = add_prefix_space # Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions __UpperCAmelCase : Dict = re.compile(r"'s|'t|'re|'ve|'m|'ll|'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+" ) @property def _snake_case ( self ): return len(self.encoder ) def _snake_case ( self ): return dict(self.encoder , **self.added_tokens_encoder ) def _snake_case ( self , UpperCamelCase_ ): if token in self.cache: return self.cache[token] __UpperCAmelCase : List[str] = tuple(UpperCamelCase_ ) __UpperCAmelCase : str = get_pairs(UpperCamelCase_ ) if not pairs: return token while True: __UpperCAmelCase : str = min(UpperCamelCase_ , key=lambda UpperCamelCase_ : self.bpe_ranks.get(UpperCamelCase_ , float("inf" ) ) ) if bigram not in self.bpe_ranks: break __UpperCAmelCase , __UpperCAmelCase : List[Any] = bigram __UpperCAmelCase : Any = [] __UpperCAmelCase : List[str] = 0 while i < len(UpperCamelCase_ ): try: __UpperCAmelCase : Union[str, Any] = word.index(UpperCamelCase_ , UpperCamelCase_ ) except ValueError: new_word.extend(word[i:] ) break else: new_word.extend(word[i:j] ) __UpperCAmelCase : str = j if word[i] == first and i < len(UpperCamelCase_ ) - 1 and word[i + 1] == second: new_word.append(first + second ) i += 2 else: new_word.append(word[i] ) i += 1 __UpperCAmelCase : Dict = tuple(UpperCamelCase_ ) __UpperCAmelCase : str = new_word if len(UpperCamelCase_ ) == 1: break else: __UpperCAmelCase : int = get_pairs(UpperCamelCase_ ) __UpperCAmelCase : Optional[int] = " ".join(UpperCamelCase_ ) __UpperCAmelCase : Dict = word return word def _snake_case ( self , UpperCamelCase_ ): __UpperCAmelCase : Optional[Any] = [] for token in re.findall(self.pat , UpperCamelCase_ ): __UpperCAmelCase : Any = "".join( self.byte_encoder[b] for b in token.encode("utf-8" ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case) bpe_tokens.extend(bpe_token for bpe_token in self.bpe(UpperCamelCase_ ).split(" " ) ) return bpe_tokens def _snake_case ( self , UpperCamelCase_ ): return self.encoder.get(UpperCamelCase_ , self.encoder.get(self.unk_token ) ) def _snake_case ( self , UpperCamelCase_ ): return self.decoder.get(UpperCamelCase_ ) def _snake_case ( self , UpperCamelCase_ ): __UpperCAmelCase : List[str] = "".join(UpperCamelCase_ ) __UpperCAmelCase : Union[str, Any] = bytearray([self.byte_decoder[c] for c in text] ).decode("utf-8" , errors=self.errors ) return text def _snake_case ( self , UpperCamelCase_ , UpperCamelCase_ = None ): if not os.path.isdir(UpperCamelCase_ ): logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" ) return __UpperCAmelCase : Any = os.path.join( UpperCamelCase_ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] ) __UpperCAmelCase : Optional[int] = os.path.join( UpperCamelCase_ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["merges_file"] ) with open(UpperCamelCase_ , "w" , encoding="utf-8" ) as f: f.write(json.dumps(self.encoder , indent=2 , sort_keys=UpperCamelCase_ , ensure_ascii=UpperCamelCase_ ) + "\n" ) __UpperCAmelCase : str = 0 with open(UpperCamelCase_ , "w" , encoding="utf-8" ) as writer: writer.write("#version: 0.2\n" ) for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda UpperCamelCase_ : kv[1] ): if index != token_index: logger.warning( f"""Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.""" " Please check that the tokenizer is not corrupted!" ) __UpperCAmelCase : str = token_index writer.write(" ".join(UpperCamelCase_ ) + "\n" ) index += 1 return vocab_file, merge_file def _snake_case ( self , UpperCamelCase_ , UpperCamelCase_ = None ): if token_ids_a is None: return [self.cls_token_id] + token_ids_a + [self.sep_token_id] __UpperCAmelCase : List[Any] = [self.cls_token_id] __UpperCAmelCase : Tuple = [self.sep_token_id] return cls + token_ids_a + sep + sep + token_ids_a + sep def _snake_case ( self , UpperCamelCase_ , UpperCamelCase_ = None , UpperCamelCase_ = False ): if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=UpperCamelCase_ , token_ids_a=UpperCamelCase_ , already_has_special_tokens=UpperCamelCase_ ) if token_ids_a is None: return [1] + ([0] * len(UpperCamelCase_ )) + [1] return [1] + ([0] * len(UpperCamelCase_ )) + [1, 1] + ([0] * len(UpperCamelCase_ )) + [1] def _snake_case ( self , UpperCamelCase_ , UpperCamelCase_ = None ): __UpperCAmelCase : int = [self.sep_token_id] __UpperCAmelCase : List[str] = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0] def _snake_case ( self , UpperCamelCase_ , UpperCamelCase_=False , **UpperCamelCase_ ): __UpperCAmelCase : List[str] = kwargs.pop("add_prefix_space" , self.add_prefix_space ) if (is_split_into_words or add_prefix_space) and (len(UpperCamelCase_ ) > 0 and not text[0].isspace()): __UpperCAmelCase : Tuple = " " + text return (text, kwargs)
10
1
'''simple docstring''' import os import shutil from pathlib import Path from typing import Optional, Union import numpy as np from huggingface_hub import hf_hub_download from ..utils import ONNX_EXTERNAL_WEIGHTS_NAME, ONNX_WEIGHTS_NAME, is_onnx_available, logging if is_onnx_available(): import onnxruntime as ort _a : Tuple = logging.get_logger(__name__) _a : Optional[Any] = { "tensor(bool)": np.bool_, "tensor(int8)": np.inta, "tensor(uint8)": np.uinta, "tensor(int16)": np.intaa, "tensor(uint16)": np.uintaa, "tensor(int32)": np.intaa, "tensor(uint32)": np.uintaa, "tensor(int64)": np.intaa, "tensor(uint64)": np.uintaa, "tensor(float16)": np.floataa, "tensor(float)": np.floataa, "tensor(double)": np.floataa, } class __A : def __init__( self , UpperCamelCase_=None , **UpperCamelCase_ ): logger.info("`diffusers.OnnxRuntimeModel` is experimental and might change in the future." ) __UpperCAmelCase : List[str] = model __UpperCAmelCase : Tuple = kwargs.get("model_save_dir" , UpperCamelCase_ ) __UpperCAmelCase : int = kwargs.get("latest_model_name" , UpperCamelCase_ ) def __call__( self , **UpperCamelCase_ ): __UpperCAmelCase : List[Any] = {k: np.array(UpperCamelCase_ ) for k, v in kwargs.items()} return self.model.run(UpperCamelCase_ , UpperCamelCase_ ) @staticmethod def _snake_case ( UpperCamelCase_ , UpperCamelCase_=None , UpperCamelCase_=None ): if provider is None: logger.info("No onnxruntime provider specified, using CPUExecutionProvider" ) __UpperCAmelCase : Dict = "CPUExecutionProvider" return ort.InferenceSession(UpperCamelCase_ , providers=[provider] , sess_options=UpperCamelCase_ ) def _snake_case ( self , UpperCamelCase_ , UpperCamelCase_ = None , **UpperCamelCase_ ): __UpperCAmelCase : Union[str, Any] = file_name if file_name is not None else ONNX_WEIGHTS_NAME __UpperCAmelCase : Optional[int] = self.model_save_dir.joinpath(self.latest_model_name ) __UpperCAmelCase : str = Path(UpperCamelCase_ ).joinpath(UpperCamelCase_ ) try: shutil.copyfile(UpperCamelCase_ , UpperCamelCase_ ) except shutil.SameFileError: pass # copy external weights (for models >2GB) __UpperCAmelCase : Optional[int] = self.model_save_dir.joinpath(UpperCamelCase_ ) if src_path.exists(): __UpperCAmelCase : int = Path(UpperCamelCase_ ).joinpath(UpperCamelCase_ ) try: shutil.copyfile(UpperCamelCase_ , UpperCamelCase_ ) except shutil.SameFileError: pass def _snake_case ( self , UpperCamelCase_ , **UpperCamelCase_ , ): if os.path.isfile(UpperCamelCase_ ): logger.error(f"""Provided path ({save_directory}) should be a directory, not a file""" ) return os.makedirs(UpperCamelCase_ , exist_ok=UpperCamelCase_ ) # saving model weights/files self._save_pretrained(UpperCamelCase_ , **UpperCamelCase_ ) @classmethod def _snake_case ( cls , UpperCamelCase_ , UpperCamelCase_ = None , UpperCamelCase_ = None , UpperCamelCase_ = False , UpperCamelCase_ = None , UpperCamelCase_ = None , UpperCamelCase_ = None , UpperCamelCase_ = None , **UpperCamelCase_ , ): __UpperCAmelCase : Tuple = file_name if file_name is not None else ONNX_WEIGHTS_NAME # load model from local directory if os.path.isdir(UpperCamelCase_ ): __UpperCAmelCase : Optional[int] = OnnxRuntimeModel.load_model( os.path.join(UpperCamelCase_ , UpperCamelCase_ ) , provider=UpperCamelCase_ , sess_options=UpperCamelCase_ ) __UpperCAmelCase : Tuple = Path(UpperCamelCase_ ) # load model from hub else: # download model __UpperCAmelCase : Tuple = hf_hub_download( repo_id=UpperCamelCase_ , filename=UpperCamelCase_ , use_auth_token=UpperCamelCase_ , revision=UpperCamelCase_ , cache_dir=UpperCamelCase_ , force_download=UpperCamelCase_ , ) __UpperCAmelCase : Optional[Any] = Path(UpperCamelCase_ ).parent __UpperCAmelCase : int = Path(UpperCamelCase_ ).name __UpperCAmelCase : Optional[Any] = OnnxRuntimeModel.load_model(UpperCamelCase_ , provider=UpperCamelCase_ , sess_options=UpperCamelCase_ ) return cls(model=UpperCamelCase_ , **UpperCamelCase_ ) @classmethod def _snake_case ( cls , UpperCamelCase_ , UpperCamelCase_ = True , UpperCamelCase_ = None , UpperCamelCase_ = None , **UpperCamelCase_ , ): __UpperCAmelCase : Dict = None if len(str(UpperCamelCase_ ).split("@" ) ) == 2: __UpperCAmelCase , __UpperCAmelCase : int = model_id.split("@" ) return cls._from_pretrained( model_id=UpperCamelCase_ , revision=UpperCamelCase_ , cache_dir=UpperCamelCase_ , force_download=UpperCamelCase_ , use_auth_token=UpperCamelCase_ , **UpperCamelCase_ , )
10
'''simple docstring''' from ...configuration_utils import PretrainedConfig from ...utils import logging _a : Any = logging.get_logger(__name__) _a : int = { "facebook/s2t-wav2vec2-large-en-de": ( "https://huggingface.co/facebook/s2t-wav2vec2-large-en-de/resolve/main/config.json" ), # See all Speech2Text models at https://huggingface.co/models?filter=speech2text2 } class __A (__magic_name__ ): snake_case :Optional[int] = "speech_to_text_2" snake_case :List[Any] = ["past_key_values"] snake_case :str = {"num_attention_heads": "decoder_attention_heads", "hidden_size": "d_model"} def __init__( self , UpperCamelCase_=1_00_00 , UpperCamelCase_=6 , UpperCamelCase_=20_48 , UpperCamelCase_=4 , UpperCamelCase_=0.0 , UpperCamelCase_=True , UpperCamelCase_="relu" , UpperCamelCase_=2_56 , UpperCamelCase_=0.1 , UpperCamelCase_=0.0 , UpperCamelCase_=0.0 , UpperCamelCase_=0.0_2 , UpperCamelCase_=2 , UpperCamelCase_=True , UpperCamelCase_=1 , UpperCamelCase_=0 , UpperCamelCase_=2 , UpperCamelCase_=10_24 , **UpperCamelCase_ , ): __UpperCAmelCase : Any = vocab_size __UpperCAmelCase : Optional[int] = d_model __UpperCAmelCase : Tuple = decoder_ffn_dim __UpperCAmelCase : List[str] = decoder_layers __UpperCAmelCase : str = decoder_attention_heads __UpperCAmelCase : Dict = dropout __UpperCAmelCase : Optional[Any] = attention_dropout __UpperCAmelCase : int = activation_dropout __UpperCAmelCase : Dict = activation_function __UpperCAmelCase : Tuple = init_std __UpperCAmelCase : Any = decoder_layerdrop __UpperCAmelCase : str = use_cache __UpperCAmelCase : int = decoder_layers __UpperCAmelCase : Any = scale_embedding # scale factor will be sqrt(d_model) if True __UpperCAmelCase : Union[str, Any] = max_target_positions super().__init__( pad_token_id=UpperCamelCase_ , bos_token_id=UpperCamelCase_ , eos_token_id=UpperCamelCase_ , decoder_start_token_id=UpperCamelCase_ , **UpperCamelCase_ , )
10
1
'''simple docstring''' import dataclasses import json import warnings from dataclasses import dataclass, field from time import time from typing import List from ..utils import logging _a : List[Any] = logging.get_logger(__name__) def _lowercase ( lowerCamelCase__=None , lowerCamelCase__=None ) -> int: """simple docstring""" return field(default_factory=lambda: default , metadata=lowerCamelCase__ ) @dataclass class __A : snake_case :List[str] = list_field( default=[] , metadata={ "help": ( "Model checkpoints to be provided to the AutoModel classes. Leave blank to benchmark the base version" " of all available models" ) } , ) snake_case :List[int] = list_field( default=[8] , metadata={"help": "List of batch sizes for which memory and time performance will be evaluated"} ) snake_case :List[int] = list_field( default=[8, 32, 128, 512] , metadata={"help": "List of sequence lengths for which memory and time performance will be evaluated"} , ) snake_case :bool = field( default=__magic_name__ , metadata={"help": "Whether to benchmark inference of model. Inference can be disabled via --no-inference."} , ) snake_case :bool = field( default=__magic_name__ , metadata={"help": "Whether to run on available cuda devices. Cuda can be disabled via --no-cuda."} , ) snake_case :bool = field( default=__magic_name__ , metadata={"help": "Whether to run on available tpu devices. TPU can be disabled via --no-tpu."} ) snake_case :bool = field(default=__magic_name__ , metadata={"help": "Use FP16 to accelerate inference."} ) snake_case :bool = field(default=__magic_name__ , metadata={"help": "Benchmark training of model"} ) snake_case :bool = field(default=__magic_name__ , metadata={"help": "Verbose memory tracing"} ) snake_case :bool = field( default=__magic_name__ , metadata={"help": "Whether to perform speed measurements. Speed measurements can be disabled via --no-speed."} , ) snake_case :bool = field( default=__magic_name__ , metadata={ "help": "Whether to perform memory measurements. Memory measurements can be disabled via --no-memory" } , ) snake_case :bool = field(default=__magic_name__ , metadata={"help": "Trace memory line by line"} ) snake_case :bool = field(default=__magic_name__ , metadata={"help": "Save result to a CSV file"} ) snake_case :bool = field(default=__magic_name__ , metadata={"help": "Save all print statements in a log file"} ) snake_case :bool = field(default=__magic_name__ , metadata={"help": "Whether to print environment information"} ) snake_case :bool = field( default=__magic_name__ , metadata={ "help": ( "Whether to use multiprocessing for memory and speed measurement. It is highly recommended to use" " multiprocessing for accurate CPU and GPU memory measurements. This option should only be disabled" " for debugging / testing and on TPU." ) } , ) snake_case :str = field( default=f"inference_time_{round(time() )}.csv" , metadata={"help": "CSV filename used if saving time results to csv."} , ) snake_case :str = field( default=f"inference_memory_{round(time() )}.csv" , metadata={"help": "CSV filename used if saving memory results to csv."} , ) snake_case :str = field( default=f"train_time_{round(time() )}.csv" , metadata={"help": "CSV filename used if saving time results to csv for training."} , ) snake_case :str = field( default=f"train_memory_{round(time() )}.csv" , metadata={"help": "CSV filename used if saving memory results to csv for training."} , ) snake_case :str = field( default=f"env_info_{round(time() )}.csv" , metadata={"help": "CSV filename used if saving environment information."} , ) snake_case :str = field( default=f"log_{round(time() )}.csv" , metadata={"help": "Log filename used if print statements are saved in log."} , ) snake_case :int = field(default=3 , metadata={"help": "Times an experiment will be run."} ) snake_case :bool = field( default=__magic_name__ , metadata={ "help": ( "Instead of loading the model as defined in `config.architectures` if exists, just load the pretrain" " model weights." ) } , ) def _snake_case ( self ): warnings.warn( f"""The class {self.__class__} is deprecated. Hugging Face Benchmarking utils""" " are deprecated in general and it is advised to use external Benchmarking libraries " " to benchmark Transformer models." , UpperCamelCase_ , ) def _snake_case ( self ): return json.dumps(dataclasses.asdict(self ) , indent=2 ) @property def _snake_case ( self ): if len(self.models ) <= 0: raise ValueError( "Please make sure you provide at least one model name / model identifier, *e.g.* `--models" " bert-base-cased` or `args.models = ['bert-base-cased']." ) return self.models @property def _snake_case ( self ): if not self.multi_process: return False elif self.is_tpu: logger.info("Multiprocessing is currently not possible on TPU." ) return False else: return True
10
'''simple docstring''' def _lowercase ( lowerCamelCase__ = 100 ) -> int: """simple docstring""" __UpperCAmelCase : Optional[Any] = (n * (n + 1) // 2) ** 2 __UpperCAmelCase : Any = n * (n + 1) * (2 * n + 1) // 6 return sum_cubes - sum_squares if __name__ == "__main__": print(f"""{solution() = }""")
10
1
'''simple docstring''' import json import os from typing import Optional, Tuple from ...tokenization_utils import PreTrainedTokenizer from ...utils import logging _a : Dict = logging.get_logger(__name__) _a : Optional[int] = {"vocab_file": "vocab.json"} _a : str = { "vocab_file": { "mgp-str": "https://huggingface.co/alibaba-damo/mgp-str-base/blob/main/vocab.json", } } _a : Union[str, Any] = {"mgp-str": 27} class __A (__magic_name__ ): snake_case :str = VOCAB_FILES_NAMES snake_case :str = PRETRAINED_VOCAB_FILES_MAP snake_case :Any = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES def __init__( self , UpperCamelCase_ , UpperCamelCase_="[GO]" , UpperCamelCase_="[GO]" , UpperCamelCase_="[s]" , UpperCamelCase_="[GO]" , **UpperCamelCase_ ): super().__init__( unk_token=UpperCamelCase_ , bos_token=UpperCamelCase_ , eos_token=UpperCamelCase_ , pad_token=UpperCamelCase_ , **UpperCamelCase_ , ) with open(UpperCamelCase_ , encoding="utf-8" ) as vocab_handle: __UpperCAmelCase : int = json.load(UpperCamelCase_ ) __UpperCAmelCase : Any = {v: k for k, v in self.vocab.items()} @property def _snake_case ( self ): return len(self.vocab ) def _snake_case ( self ): return dict(self.vocab , **self.added_tokens_encoder ) def _snake_case ( self , UpperCamelCase_ ): __UpperCAmelCase : List[str] = [] for s in text: char_tokens.extend(UpperCamelCase_ ) return char_tokens def _snake_case ( self , UpperCamelCase_ ): return self.vocab.get(UpperCamelCase_ , self.vocab.get(self.unk_token ) ) def _snake_case ( self , UpperCamelCase_ ): return self.decoder.get(UpperCamelCase_ ) def _snake_case ( self , UpperCamelCase_ , UpperCamelCase_ = None ): if not os.path.isdir(UpperCamelCase_ ): logger.error("Vocabulary path ({}) should be a directory".format(UpperCamelCase_ ) ) return __UpperCAmelCase : Union[str, Any] = os.path.join( UpperCamelCase_ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] ) with open(UpperCamelCase_ , "w" , encoding="utf-8" ) as f: f.write(json.dumps(self.vocab , indent=2 , sort_keys=UpperCamelCase_ , ensure_ascii=UpperCamelCase_ ) + "\n" ) return (vocab_file,)
10
'''simple docstring''' def _lowercase ( lowerCamelCase__ , lowerCamelCase__ ) -> float: """simple docstring""" if discount_rate < 0: raise ValueError("Discount rate cannot be negative" ) if not cash_flows: raise ValueError("Cash flows list cannot be empty" ) __UpperCAmelCase : Tuple = sum( cash_flow / ((1 + discount_rate) ** i) for i, cash_flow in enumerate(lowerCamelCase__ ) ) return round(lowerCamelCase__ , ndigits=2 ) if __name__ == "__main__": import doctest doctest.testmod()
10
1
'''simple docstring''' import argparse import glob import importlib.util import os import re import black from doc_builder.style_doc import style_docstrings_in_code # All paths are set with the intent you should run this script from the root of the repo with the command # python utils/check_copies.py _a : str = "src/diffusers" _a : Dict = "." # This is to make sure the diffusers module imported is the one in the repo. _a : Any = importlib.util.spec_from_file_location( "diffusers", os.path.join(DIFFUSERS_PATH, "__init__.py"), submodule_search_locations=[DIFFUSERS_PATH], ) _a : Optional[Any] = spec.loader.load_module() def _lowercase ( lowerCamelCase__ , lowerCamelCase__ ) -> Union[str, Any]: """simple docstring""" return line.startswith(lowerCamelCase__ ) or len(lowerCamelCase__ ) <= 1 or re.search(R"^\s*\)(\s*->.*:|:)\s*$" , lowerCamelCase__ ) is not None def _lowercase ( lowerCamelCase__ ) -> str: """simple docstring""" __UpperCAmelCase : Any = object_name.split("." ) __UpperCAmelCase : List[Any] = 0 # First let's find the module where our object lives. __UpperCAmelCase : List[Any] = parts[i] while i < len(lowerCamelCase__ ) and not os.path.isfile(os.path.join(lowerCamelCase__ , f"""{module}.py""" ) ): i += 1 if i < len(lowerCamelCase__ ): __UpperCAmelCase : Union[str, Any] = os.path.join(lowerCamelCase__ , parts[i] ) if i >= len(lowerCamelCase__ ): raise ValueError(f"""`object_name` should begin with the name of a module of diffusers but got {object_name}.""" ) with open(os.path.join(lowerCamelCase__ , f"""{module}.py""" ) , "r" , encoding="utf-8" , newline="\n" ) as f: __UpperCAmelCase : List[str] = f.readlines() # Now let's find the class / func in the code! __UpperCAmelCase : Optional[Any] = "" __UpperCAmelCase : Optional[Any] = 0 for name in parts[i + 1 :]: while ( line_index < len(lowerCamelCase__ ) and re.search(Rf"""^{indent}(class|def)\s+{name}(\(|\:)""" , lines[line_index] ) is None ): line_index += 1 indent += " " line_index += 1 if line_index >= len(lowerCamelCase__ ): raise ValueError(f""" {object_name} does not match any function or class in {module}.""" ) # We found the beginning of the class / func, now let's find the end (when the indent diminishes). __UpperCAmelCase : Any = line_index while line_index < len(lowerCamelCase__ ) and _should_continue(lines[line_index] , lowerCamelCase__ ): line_index += 1 # Clean up empty lines at the end (if any). while len(lines[line_index - 1] ) <= 1: line_index -= 1 __UpperCAmelCase : Any = lines[start_index:line_index] return "".join(lowerCamelCase__ ) _a : List[Any] = re.compile(R"^(\s*)#\s*Copied from\s+diffusers\.(\S+\.\S+)\s*($|\S.*$)") _a : Tuple = re.compile(R"^\s*(\S+)->(\S+)(\s+.*|$)") _a : Tuple = re.compile(R"<FILL\s+[^>]*>") def _lowercase ( lowerCamelCase__ ) -> Dict: """simple docstring""" __UpperCAmelCase : Optional[int] = code.split("\n" ) __UpperCAmelCase : Any = 0 while idx < len(lowerCamelCase__ ) and len(lines[idx] ) == 0: idx += 1 if idx < len(lowerCamelCase__ ): return re.search(R"^(\s*)\S" , lines[idx] ).groups()[0] return "" def _lowercase ( lowerCamelCase__ ) -> Optional[Any]: """simple docstring""" __UpperCAmelCase : Dict = len(get_indent(lowerCamelCase__ ) ) > 0 if has_indent: __UpperCAmelCase : Tuple = f"""class Bla:\n{code}""" __UpperCAmelCase : int = black.Mode(target_versions={black.TargetVersion.PYaa} , line_length=119 , preview=lowerCamelCase__ ) __UpperCAmelCase : Union[str, Any] = black.format_str(lowerCamelCase__ , mode=lowerCamelCase__ ) __UpperCAmelCase , __UpperCAmelCase : Union[str, Any] = style_docstrings_in_code(lowerCamelCase__ ) return result[len("class Bla:\n" ) :] if has_indent else result def _lowercase ( lowerCamelCase__ , lowerCamelCase__=False ) -> Any: """simple docstring""" with open(lowerCamelCase__ , "r" , encoding="utf-8" , newline="\n" ) as f: __UpperCAmelCase : Optional[Any] = f.readlines() __UpperCAmelCase : List[str] = [] __UpperCAmelCase : Dict = 0 # Not a for loop cause `lines` is going to change (if `overwrite=True`). while line_index < len(lowerCamelCase__ ): __UpperCAmelCase : Dict = _re_copy_warning.search(lines[line_index] ) if search is None: line_index += 1 continue # There is some copied code here, let's retrieve the original. __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase : Any = search.groups() __UpperCAmelCase : List[Any] = find_code_in_diffusers(lowerCamelCase__ ) __UpperCAmelCase : Optional[Any] = get_indent(lowerCamelCase__ ) __UpperCAmelCase : Tuple = line_index + 1 if indent == theoretical_indent else line_index + 2 __UpperCAmelCase : str = theoretical_indent __UpperCAmelCase : Any = start_index # Loop to check the observed code, stop when indentation diminishes or if we see a End copy comment. __UpperCAmelCase : Any = True while line_index < len(lowerCamelCase__ ) and should_continue: line_index += 1 if line_index >= len(lowerCamelCase__ ): break __UpperCAmelCase : Optional[int] = lines[line_index] __UpperCAmelCase : int = _should_continue(lowerCamelCase__ , lowerCamelCase__ ) and re.search(f"""^{indent}# End copy""" , lowerCamelCase__ ) is None # Clean up empty lines at the end (if any). while len(lines[line_index - 1] ) <= 1: line_index -= 1 __UpperCAmelCase : Dict = lines[start_index:line_index] __UpperCAmelCase : List[Any] = "".join(lowerCamelCase__ ) # Remove any nested `Copied from` comments to avoid circular copies __UpperCAmelCase : str = [line for line in theoretical_code.split("\n" ) if _re_copy_warning.search(lowerCamelCase__ ) is None] __UpperCAmelCase : int = "\n".join(lowerCamelCase__ ) # Before comparing, use the `replace_pattern` on the original code. if len(lowerCamelCase__ ) > 0: __UpperCAmelCase : Tuple = replace_pattern.replace("with" , "" ).split("," ) __UpperCAmelCase : int = [_re_replace_pattern.search(lowerCamelCase__ ) for p in patterns] for pattern in patterns: if pattern is None: continue __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase : Dict = pattern.groups() __UpperCAmelCase : Union[str, Any] = re.sub(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) if option.strip() == "all-casing": __UpperCAmelCase : Optional[int] = re.sub(obja.lower() , obja.lower() , lowerCamelCase__ ) __UpperCAmelCase : str = re.sub(obja.upper() , obja.upper() , lowerCamelCase__ ) # Blackify after replacement. To be able to do that, we need the header (class or function definition) # from the previous line __UpperCAmelCase : List[Any] = blackify(lines[start_index - 1] + theoretical_code ) __UpperCAmelCase : Any = theoretical_code[len(lines[start_index - 1] ) :] # Test for a diff and act accordingly. if observed_code != theoretical_code: diffs.append([object_name, start_index] ) if overwrite: __UpperCAmelCase : List[Any] = lines[:start_index] + [theoretical_code] + lines[line_index:] __UpperCAmelCase : Optional[Any] = start_index + 1 if overwrite and len(lowerCamelCase__ ) > 0: # Warn the user a file has been modified. print(f"""Detected changes, rewriting {filename}.""" ) with open(lowerCamelCase__ , "w" , encoding="utf-8" , newline="\n" ) as f: f.writelines(lowerCamelCase__ ) return diffs def _lowercase ( lowerCamelCase__ = False ) -> Optional[int]: """simple docstring""" __UpperCAmelCase : Optional[int] = glob.glob(os.path.join(lowerCamelCase__ , "**/*.py" ) , recursive=lowerCamelCase__ ) __UpperCAmelCase : Union[str, Any] = [] for filename in all_files: __UpperCAmelCase : Tuple = is_copy_consistent(lowerCamelCase__ , lowerCamelCase__ ) diffs += [f"""- {filename}: copy does not match {d[0]} at line {d[1]}""" for d in new_diffs] if not overwrite and len(lowerCamelCase__ ) > 0: __UpperCAmelCase : Dict = "\n".join(lowerCamelCase__ ) raise Exception( "Found the following copy inconsistencies:\n" + diff + "\nRun `make fix-copies` or `python utils/check_copies.py --fix_and_overwrite` to fix them." ) if __name__ == "__main__": _a : Union[str, Any] = argparse.ArgumentParser() parser.add_argument("--fix_and_overwrite", action="store_true", help="Whether to fix inconsistencies.") _a : Any = parser.parse_args() check_copies(args.fix_and_overwrite)
10
'''simple docstring''' import random import torch from huggingface_hub import HfApi from diffusers import UNetaDModel _a : Union[str, Any] = HfApi() _a : int = {} # fmt: off _a : Optional[int] = torch.tensor([ -0.7_515, -1.6_883, 0.2_420, 0.0_300, 0.6_347, 1.3_433, -1.1_743, -3.7_467, 1.2_342, -2.2_485, 0.4_636, 0.8_076, -0.7_991, 0.3_969, 0.8_498, 0.9_189, -1.8_887, -3.3_522, 0.7_639, 0.2_040, 0.6_271, -2.7_148, -1.6_316, 3.0_839, 0.3_186, 0.2_721, -0.9_759, -1.2_461, 2.6_257, 1.3_557 ]) _a : Optional[Any] = torch.tensor([ -2.3_639, -2.5_344, 0.0_054, -0.6_674, 1.5_990, 1.0_158, 0.3_124, -2.1_436, 1.8_795, -2.5_429, -0.1_566, -0.3_973, 1.2_490, 2.6_447, 1.2_283, -0.5_208, -2.8_154, -3.5_119, 2.3_838, 1.2_033, 1.7_201, -2.1_256, -1.4_576, 2.7_948, 2.4_204, -0.9_752, -1.2_546, 0.8_027, 3.2_758, 3.1_365 ]) _a : int = torch.tensor([ -0.6_531, -0.6_891, -0.3_172, -0.5_375, -0.9_140, -0.5_367, -0.1_175, -0.7_869, -0.3_808, -0.4_513, -0.2_098, -0.0_083, 0.3_183, 0.5_140, 0.2_247, -0.1_304, -0.1_302, -0.2_802, -0.2_084, -0.2_025, -0.4_967, -0.4_873, -0.0_861, 0.6_925, 0.0_250, 0.1_290, -0.1_543, 0.6_316, 1.0_460, 1.4_943 ]) _a : str = torch.tensor([ 0.0_911, 0.1_107, 0.0_182, 0.0_435, -0.0_805, -0.0_608, 0.0_381, 0.2_172, -0.0_280, 0.1_327, -0.0_299, -0.0_255, -0.0_050, -0.1_170, -0.1_046, 0.0_309, 0.1_367, 0.1_728, -0.0_533, -0.0_748, -0.0_534, 0.1_624, 0.0_384, -0.1_805, -0.0_707, 0.0_642, 0.0_220, -0.0_134, -0.1_333, -0.1_505 ]) _a : Union[str, Any] = torch.tensor([ 0.1_321, 0.1_337, 0.0_440, 0.0_622, -0.0_591, -0.0_370, 0.0_503, 0.2_133, -0.0_177, 0.1_415, -0.0_116, -0.0_112, 0.0_044, -0.0_980, -0.0_789, 0.0_395, 0.1_502, 0.1_785, -0.0_488, -0.0_514, -0.0_404, 0.1_539, 0.0_454, -0.1_559, -0.0_665, 0.0_659, 0.0_383, -0.0_005, -0.1_266, -0.1_386 ]) _a : Any = torch.tensor([ 0.1_154, 0.1_218, 0.0_307, 0.0_526, -0.0_711, -0.0_541, 0.0_366, 0.2_078, -0.0_267, 0.1_317, -0.0_226, -0.0_193, -0.0_014, -0.1_055, -0.0_902, 0.0_330, 0.1_391, 0.1_709, -0.0_562, -0.0_693, -0.0_560, 0.1_482, 0.0_381, -0.1_683, -0.0_681, 0.0_661, 0.0_331, -0.0_046, -0.1_268, -0.1_431 ]) _a : List[Any] = torch.tensor([ 0.1_192, 0.1_240, 0.0_414, 0.0_606, -0.0_557, -0.0_412, 0.0_430, 0.2_042, -0.0_200, 0.1_385, -0.0_115, -0.0_132, 0.0_017, -0.0_965, -0.0_802, 0.0_398, 0.1_433, 0.1_747, -0.0_458, -0.0_533, -0.0_407, 0.1_545, 0.0_419, -0.1_574, -0.0_645, 0.0_626, 0.0_341, -0.0_010, -0.1_199, -0.1_390 ]) _a : Optional[int] = torch.tensor([ 0.1_075, 0.1_074, 0.0_205, 0.0_431, -0.0_774, -0.0_607, 0.0_298, 0.2_042, -0.0_320, 0.1_267, -0.0_281, -0.0_250, -0.0_064, -0.1_091, -0.0_946, 0.0_290, 0.1_328, 0.1_650, -0.0_580, -0.0_738, -0.0_586, 0.1_440, 0.0_337, -0.1_746, -0.0_712, 0.0_605, 0.0_250, -0.0_099, -0.1_316, -0.1_473 ]) _a : Tuple = torch.tensor([ -1.4_572, -2.0_481, -0.0_414, -0.6_005, 1.4_136, 0.5_848, 0.4_028, -2.7_330, 1.2_212, -2.1_228, 0.2_155, 0.4_039, 0.7_662, 2.0_535, 0.7_477, -0.3_243, -2.1_758, -2.7_648, 1.6_947, 0.7_026, 1.2_338, -1.6_078, -0.8_682, 2.2_810, 1.8_574, -0.5_718, -0.5_586, -0.0_186, 2.3_415, 2.1_251]) _a : List[Any] = torch.tensor([ -1.3_690, -1.9_720, -0.4_090, -0.6_966, 1.4_660, 0.9_938, -0.1_385, -2.7_324, 0.7_736, -1.8_917, 0.2_923, 0.4_293, 0.1_693, 1.4_112, 1.1_887, -0.3_181, -2.2_160, -2.6_381, 1.3_170, 0.8_163, 0.9_240, -1.6_544, -0.6_099, 2.5_259, 1.6_430, -0.9_090, -0.9_392, -0.0_126, 2.4_268, 2.3_266 ]) _a : Optional[Any] = torch.tensor([ -1.3_525, -1.9_628, -0.3_956, -0.6_860, 1.4_664, 1.0_014, -0.1_259, -2.7_212, 0.7_772, -1.8_811, 0.2_996, 0.4_388, 0.1_704, 1.4_029, 1.1_701, -0.3_027, -2.2_053, -2.6_287, 1.3_350, 0.8_131, 0.9_274, -1.6_292, -0.6_098, 2.5_131, 1.6_505, -0.8_958, -0.9_298, -0.0_151, 2.4_257, 2.3_355 ]) _a : Union[str, Any] = torch.tensor([ -2.0_585, -2.7_897, -0.2_850, -0.8_940, 1.9_052, 0.5_702, 0.6_345, -3.8_959, 1.5_932, -3.2_319, 0.1_974, 0.0_287, 1.7_566, 2.6_543, 0.8_387, -0.5_351, -3.2_736, -4.3_375, 2.9_029, 1.6_390, 1.4_640, -2.1_701, -1.9_013, 2.9_341, 3.4_981, -0.6_255, -1.1_644, -0.1_591, 3.7_097, 3.2_066 ]) _a : Optional[int] = torch.tensor([ -2.3_139, -2.5_594, -0.0_197, -0.6_785, 1.7_001, 1.1_606, 0.3_075, -2.1_740, 1.8_071, -2.5_630, -0.0_926, -0.3_811, 1.2_116, 2.6_246, 1.2_731, -0.5_398, -2.8_153, -3.6_140, 2.3_893, 1.3_262, 1.6_258, -2.1_856, -1.3_267, 2.8_395, 2.3_779, -1.0_623, -1.2_468, 0.8_959, 3.3_367, 3.2_243 ]) _a : Union[str, Any] = torch.tensor([ -2.0_628, -2.7_667, -0.2_089, -0.8_263, 2.0_539, 0.5_992, 0.6_495, -3.8_336, 1.6_025, -3.2_817, 0.1_721, -0.0_633, 1.7_516, 2.7_039, 0.8_100, -0.5_908, -3.2_113, -4.4_343, 2.9_257, 1.3_632, 1.5_562, -2.1_489, -1.9_894, 3.0_560, 3.3_396, -0.7_328, -1.0_417, 0.0_383, 3.7_093, 3.2_343 ]) _a : str = torch.tensor([ -1.4_574, -2.0_569, -0.0_473, -0.6_117, 1.4_018, 0.5_769, 0.4_129, -2.7_344, 1.2_241, -2.1_397, 0.2_000, 0.3_937, 0.7_616, 2.0_453, 0.7_324, -0.3_391, -2.1_746, -2.7_744, 1.6_963, 0.6_921, 1.2_187, -1.6_172, -0.8_877, 2.2_439, 1.8_471, -0.5_839, -0.5_605, -0.0_464, 2.3_250, 2.1_219 ]) # fmt: on _a : Optional[Any] = api.list_models(filter="diffusers") for mod in models: if "google" in mod.author or mod.modelId == "CompVis/ldm-celebahq-256": _a : List[str] = "/home/patrick/google_checkpoints/" + mod.modelId.split("/")[-1] print(f"""Started running {mod.modelId}!!!""") if mod.modelId.startswith("CompVis"): _a : int = UNetaDModel.from_pretrained(local_checkpoint, subfolder="unet") else: _a : Optional[int] = UNetaDModel.from_pretrained(local_checkpoint) torch.manual_seed(0) random.seed(0) _a : str = torch.randn(1, model.config.in_channels, model.config.sample_size, model.config.sample_size) _a : str = torch.tensor([10] * noise.shape[0]) with torch.no_grad(): _a : str = model(noise, time_step).sample assert torch.allclose( logits[0, 0, 0, :30], results["_".join("_".join(mod.modelId.split("/")).split("-"))], atol=1e-3 ) print(f"""{mod.modelId} has passed successfully!!!""")
10
1
'''simple docstring''' from collections import Counter import numpy as np from sklearn import datasets from sklearn.model_selection import train_test_split _a : str = datasets.load_iris() _a : List[Any] = np.array(data["data"]) _a : Optional[Any] = np.array(data["target"]) _a : Dict = data["target_names"] _a , _a , _a , _a : Any = train_test_split(X, y) def _lowercase ( lowerCamelCase__ , lowerCamelCase__ ) -> Tuple: """simple docstring""" return np.linalg.norm(np.array(lowerCamelCase__ ) - np.array(lowerCamelCase__ ) ) def _lowercase ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__=5 ) -> int: """simple docstring""" __UpperCAmelCase : List[Any] = zip(lowerCamelCase__ , lowerCamelCase__ ) # List of distances of all points from the point to be classified __UpperCAmelCase : int = [] for data_point in data: __UpperCAmelCase : Optional[Any] = euclidean_distance(data_point[0] , lowerCamelCase__ ) distances.append((distance, data_point[1]) ) # Choosing 'k' points with the least distances. __UpperCAmelCase : Union[str, Any] = [i[1] for i in sorted(lowerCamelCase__ )[:k]] # Most commonly occurring class among them # is the class into which the point is classified __UpperCAmelCase : Dict = Counter(lowerCamelCase__ ).most_common(1 )[0][0] return classes[result] if __name__ == "__main__": print(classifier(X_train, y_train, classes, [4.4, 3.1, 1.3, 1.4]))
10
'''simple docstring''' from ...configuration_utils import PretrainedConfig from ...utils import logging _a : Any = logging.get_logger(__name__) _a : List[Any] = { "microsoft/cvt-13": "https://huggingface.co/microsoft/cvt-13/resolve/main/config.json", # See all Cvt models at https://huggingface.co/models?filter=cvt } class __A (__magic_name__ ): snake_case :Any = "cvt" def __init__( self , UpperCamelCase_=3 , UpperCamelCase_=[7, 3, 3] , UpperCamelCase_=[4, 2, 2] , UpperCamelCase_=[2, 1, 1] , UpperCamelCase_=[64, 1_92, 3_84] , UpperCamelCase_=[1, 3, 6] , UpperCamelCase_=[1, 2, 10] , UpperCamelCase_=[4.0, 4.0, 4.0] , UpperCamelCase_=[0.0, 0.0, 0.0] , UpperCamelCase_=[0.0, 0.0, 0.0] , UpperCamelCase_=[0.0, 0.0, 0.1] , UpperCamelCase_=[True, True, True] , UpperCamelCase_=[False, False, True] , UpperCamelCase_=["dw_bn", "dw_bn", "dw_bn"] , UpperCamelCase_=[3, 3, 3] , UpperCamelCase_=[1, 1, 1] , UpperCamelCase_=[2, 2, 2] , UpperCamelCase_=[1, 1, 1] , UpperCamelCase_=[1, 1, 1] , UpperCamelCase_=0.0_2 , UpperCamelCase_=1E-12 , **UpperCamelCase_ , ): super().__init__(**UpperCamelCase_ ) __UpperCAmelCase : Optional[int] = num_channels __UpperCAmelCase : Optional[Any] = patch_sizes __UpperCAmelCase : List[str] = patch_stride __UpperCAmelCase : Tuple = patch_padding __UpperCAmelCase : int = embed_dim __UpperCAmelCase : str = num_heads __UpperCAmelCase : Any = depth __UpperCAmelCase : List[str] = mlp_ratio __UpperCAmelCase : List[str] = attention_drop_rate __UpperCAmelCase : Dict = drop_rate __UpperCAmelCase : Dict = drop_path_rate __UpperCAmelCase : str = qkv_bias __UpperCAmelCase : Optional[int] = cls_token __UpperCAmelCase : Optional[Any] = qkv_projection_method __UpperCAmelCase : Tuple = kernel_qkv __UpperCAmelCase : Optional[Any] = padding_kv __UpperCAmelCase : Optional[int] = stride_kv __UpperCAmelCase : Any = padding_q __UpperCAmelCase : List[Any] = stride_q __UpperCAmelCase : Union[str, Any] = initializer_range __UpperCAmelCase : Any = layer_norm_eps
10
1
'''simple docstring''' from typing import List from .keymap import KEYMAP, get_character def _lowercase ( lowerCamelCase__ ) -> Optional[int]: """simple docstring""" def decorator(lowerCamelCase__ ): __UpperCAmelCase : Dict = getattr(lowerCamelCase__ , "handle_key" , [] ) handle += [key] setattr(lowerCamelCase__ , "handle_key" , lowerCamelCase__ ) return func return decorator def _lowercase ( *lowerCamelCase__ ) -> str: """simple docstring""" def decorator(lowerCamelCase__ ): __UpperCAmelCase : Dict = getattr(lowerCamelCase__ , "handle_key" , [] ) handle += keys setattr(lowerCamelCase__ , "handle_key" , lowerCamelCase__ ) return func return decorator class __A (__magic_name__ ): def __new__( cls , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ): __UpperCAmelCase : Union[str, Any] = super().__new__(cls , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) if not hasattr(UpperCamelCase_ , "key_handler" ): setattr(UpperCamelCase_ , "key_handler" , {} ) setattr(UpperCamelCase_ , "handle_input" , KeyHandler.handle_input ) for value in attrs.values(): __UpperCAmelCase : List[Any] = getattr(UpperCamelCase_ , "handle_key" , [] ) for key in handled_keys: __UpperCAmelCase : Dict = value return new_cls @staticmethod def _snake_case ( cls ): __UpperCAmelCase : str = get_character() if char != KEYMAP["undefined"]: __UpperCAmelCase : Tuple = ord(UpperCamelCase_ ) __UpperCAmelCase : str = cls.key_handler.get(UpperCamelCase_ ) if handler: __UpperCAmelCase : List[str] = char return handler(cls ) else: return None def _lowercase ( cls ) -> str: """simple docstring""" return KeyHandler(cls.__name__ , cls.__bases__ , cls.__dict__.copy() )
10
'''simple docstring''' from __future__ import annotations import numpy as np from numpy import floataa from numpy.typing import NDArray def _lowercase ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , ) -> list[float]: """simple docstring""" __UpperCAmelCase , __UpperCAmelCase : Union[str, Any] = coefficient_matrix.shape __UpperCAmelCase , __UpperCAmelCase : Any = constant_matrix.shape if rowsa != colsa: __UpperCAmelCase : str = f"""Coefficient matrix dimensions must be nxn but received {rowsa}x{colsa}""" raise ValueError(lowerCamelCase__ ) if colsa != 1: __UpperCAmelCase : Optional[Any] = f"""Constant matrix must be nx1 but received {rowsa}x{colsa}""" raise ValueError(lowerCamelCase__ ) if rowsa != rowsa: __UpperCAmelCase : Optional[int] = ( "Coefficient and constant matrices dimensions must be nxn and nx1 but " f"""received {rowsa}x{colsa} and {rowsa}x{colsa}""" ) raise ValueError(lowerCamelCase__ ) if len(lowerCamelCase__ ) != rowsa: __UpperCAmelCase : List[str] = ( "Number of initial values must be equal to number of rows in coefficient " f"""matrix but received {len(lowerCamelCase__ )} and {rowsa}""" ) raise ValueError(lowerCamelCase__ ) if iterations <= 0: raise ValueError("Iterations must be at least 1" ) __UpperCAmelCase : NDArray[floataa] = np.concatenate( (coefficient_matrix, constant_matrix) , axis=1 ) __UpperCAmelCase , __UpperCAmelCase : Tuple = table.shape strictly_diagonally_dominant(lowerCamelCase__ ) # Iterates the whole matrix for given number of times for _ in range(lowerCamelCase__ ): __UpperCAmelCase : int = [] for row in range(lowerCamelCase__ ): __UpperCAmelCase : List[str] = 0 for col in range(lowerCamelCase__ ): if col == row: __UpperCAmelCase : int = table[row][col] elif col == cols - 1: __UpperCAmelCase : Any = table[row][col] else: temp += (-1) * table[row][col] * init_val[col] __UpperCAmelCase : List[Any] = (temp + val) / denom new_val.append(lowerCamelCase__ ) __UpperCAmelCase : str = new_val return [float(lowerCamelCase__ ) for i in new_val] def _lowercase ( lowerCamelCase__ ) -> bool: """simple docstring""" __UpperCAmelCase , __UpperCAmelCase : Optional[int] = table.shape __UpperCAmelCase : str = True for i in range(0 , lowerCamelCase__ ): __UpperCAmelCase : Union[str, Any] = 0 for j in range(0 , cols - 1 ): if i == j: continue else: total += table[i][j] if table[i][i] <= total: raise ValueError("Coefficient matrix is not strictly diagonally dominant" ) return is_diagonally_dominant # Test Cases if __name__ == "__main__": import doctest doctest.testmod()
10
1
'''simple docstring''' import unittest from transformers import BertGenerationTokenizer from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_torch, slow from transformers.utils import cached_property from ...test_tokenization_common import TokenizerTesterMixin _a : Optional[int] = "▁" _a : Optional[int] = get_tests_dir("fixtures/test_sentencepiece.model") @require_sentencepiece class __A (__magic_name__ , unittest.TestCase ): snake_case :List[Any] = BertGenerationTokenizer snake_case :Tuple = False snake_case :List[str] = True def _snake_case ( self ): super().setUp() __UpperCAmelCase : Any = BertGenerationTokenizer(UpperCamelCase_ , keep_accents=UpperCamelCase_ ) tokenizer.save_pretrained(self.tmpdirname ) def _snake_case ( self ): __UpperCAmelCase : int = "<s>" __UpperCAmelCase : Optional[int] = 1 self.assertEqual(self.get_tokenizer()._convert_token_to_id(UpperCamelCase_ ) , UpperCamelCase_ ) self.assertEqual(self.get_tokenizer()._convert_id_to_token(UpperCamelCase_ ) , UpperCamelCase_ ) def _snake_case ( self ): __UpperCAmelCase : List[Any] = list(self.get_tokenizer().get_vocab().keys() ) self.assertEqual(vocab_keys[0] , "<unk>" ) self.assertEqual(vocab_keys[1] , "<s>" ) self.assertEqual(vocab_keys[-1] , "<pad>" ) self.assertEqual(len(UpperCamelCase_ ) , 10_02 ) def _snake_case ( self ): self.assertEqual(self.get_tokenizer().vocab_size , 10_00 ) def _snake_case ( self ): __UpperCAmelCase : str = BertGenerationTokenizer(UpperCamelCase_ , keep_accents=UpperCamelCase_ ) __UpperCAmelCase : Tuple = tokenizer.tokenize("This is a test" ) self.assertListEqual(UpperCamelCase_ , ["▁This", "▁is", "▁a", "▁t", "est"] ) self.assertListEqual( tokenizer.convert_tokens_to_ids(UpperCamelCase_ ) , [2_85, 46, 10, 1_70, 3_82] , ) __UpperCAmelCase : Any = tokenizer.tokenize("I was born in 92000, and this is falsé." ) self.assertListEqual( UpperCamelCase_ , [ SPIECE_UNDERLINE + "I", SPIECE_UNDERLINE + "was", SPIECE_UNDERLINE + "b", "or", "n", SPIECE_UNDERLINE + "in", SPIECE_UNDERLINE + "", "9", "2", "0", "0", "0", ",", SPIECE_UNDERLINE + "and", SPIECE_UNDERLINE + "this", SPIECE_UNDERLINE + "is", SPIECE_UNDERLINE + "f", "al", "s", "é", ".", ] , ) __UpperCAmelCase : Optional[Any] = tokenizer.convert_tokens_to_ids(UpperCamelCase_ ) self.assertListEqual( UpperCamelCase_ , [8, 21, 84, 55, 24, 19, 7, 0, 6_02, 3_47, 3_47, 3_47, 3, 12, 66, 46, 72, 80, 6, 0, 4] , ) __UpperCAmelCase : Any = tokenizer.convert_ids_to_tokens(UpperCamelCase_ ) self.assertListEqual( UpperCamelCase_ , [ SPIECE_UNDERLINE + "I", SPIECE_UNDERLINE + "was", SPIECE_UNDERLINE + "b", "or", "n", SPIECE_UNDERLINE + "in", SPIECE_UNDERLINE + "", "<unk>", "2", "0", "0", "0", ",", SPIECE_UNDERLINE + "and", SPIECE_UNDERLINE + "this", SPIECE_UNDERLINE + "is", SPIECE_UNDERLINE + "f", "al", "s", "<unk>", ".", ] , ) @cached_property def _snake_case ( self ): return BertGenerationTokenizer.from_pretrained("google/bert_for_seq_generation_L-24_bbc_encoder" ) @slow def _snake_case ( self ): __UpperCAmelCase : Dict = "Hello World!" __UpperCAmelCase : Optional[Any] = [1_85_36, 22_60, 1_01] self.assertListEqual(UpperCamelCase_ , self.big_tokenizer.encode(UpperCamelCase_ ) ) @slow def _snake_case ( self ): __UpperCAmelCase : str = ( "This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) \" [ ] ! : - . Also we will" " add words that should not exsist and be tokenized to <unk>, such as saoneuhaoesuth" ) __UpperCAmelCase : int = [ 8_71, 4_19, 3_58, 9_46, 9_91, 25_21, 4_52, 3_58, 13_57, 3_87, 77_51, 35_36, 1_12, 9_85, 4_56, 1_26, 8_65, 9_38, 54_00, 57_34, 4_58, 13_68, 4_67, 7_86, 24_62, 52_46, 11_59, 6_33, 8_65, 45_19, 4_57, 5_82, 8_52, 25_57, 4_27, 9_16, 5_08, 4_05, 3_43_24, 4_97, 3_91, 4_08, 1_13_42, 12_44, 3_85, 1_00, 9_38, 9_85, 4_56, 5_74, 3_62, 1_25_97, 32_00, 31_29, 11_72, ] self.assertListEqual(UpperCamelCase_ , self.big_tokenizer.encode(UpperCamelCase_ ) ) @require_torch @slow def _snake_case ( self ): import torch from transformers import BertGenerationConfig, BertGenerationEncoder # Build sequence __UpperCAmelCase : Union[str, Any] = list(self.big_tokenizer.get_vocab().keys() )[:10] __UpperCAmelCase : str = " ".join(UpperCamelCase_ ) __UpperCAmelCase : Union[str, Any] = self.big_tokenizer.encode_plus(UpperCamelCase_ , return_tensors="pt" , return_token_type_ids=UpperCamelCase_ ) __UpperCAmelCase : Union[str, Any] = self.big_tokenizer.batch_encode_plus( [sequence + " " + sequence] , return_tensors="pt" , return_token_type_ids=UpperCamelCase_ ) __UpperCAmelCase : Optional[Any] = BertGenerationConfig() __UpperCAmelCase : Tuple = BertGenerationEncoder(UpperCamelCase_ ) assert model.get_input_embeddings().weight.shape[0] >= self.big_tokenizer.vocab_size with torch.no_grad(): model(**UpperCamelCase_ ) model(**UpperCamelCase_ ) @slow def _snake_case ( self ): # fmt: off __UpperCAmelCase : List[Any] = {"input_ids": [[3_92_86, 4_58, 3_63_35, 20_01, 4_56, 1_30_73, 1_32_66, 4_55, 1_13, 77_46, 17_41, 1_11_57, 3_91, 1_30_73, 1_32_66, 4_55, 1_13, 39_67, 3_54_12, 1_13, 49_36, 1_09, 38_70, 23_77, 1_13, 3_00_84, 4_57_20, 4_58, 1_34, 1_74_96, 1_12, 5_03, 1_16_72, 1_13, 1_18, 1_12, 56_65, 1_33_47, 3_86_87, 1_12, 14_96, 3_13_89, 1_12, 32_68, 4_72_64, 1_34, 9_62, 1_12, 1_63_77, 80_35, 2_31_30, 4_30, 1_21_69, 1_55_18, 2_85_92, 4_58, 1_46, 4_16_97, 1_09, 3_91, 1_21_69, 1_55_18, 1_66_89, 4_58, 1_46, 4_13_58, 1_09, 4_52, 7_26, 40_34, 1_11, 7_63, 3_54_12, 50_82, 3_88, 19_03, 1_11, 90_51, 3_91, 28_70, 4_89_18, 19_00, 11_23, 5_50, 9_98, 1_12, 95_86, 1_59_85, 4_55, 3_91, 4_10, 2_29_55, 3_76_36, 1_14], [4_48, 1_74_96, 4_19, 36_63, 3_85, 7_63, 1_13, 2_75_33, 28_70, 32_83, 1_30_43, 16_39, 2_47_13, 5_23, 6_56, 2_40_13, 1_85_50, 25_21, 5_17, 2_70_14, 2_12_44, 4_20, 12_12, 14_65, 3_91, 9_27, 48_33, 3_88, 5_78, 1_17_86, 1_14, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [4_84, 21_69, 76_87, 2_19_32, 1_81_46, 7_26, 3_63, 1_70_32, 33_91, 1_14, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], "attention_mask": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501 # fmt: on self.tokenizer_integration_test_util( expected_encoding=UpperCamelCase_ , model_name="google/bert_for_seq_generation_L-24_bbc_encoder" , revision="c817d1fd1be2ffa69431227a1fe320544943d4db" , )
10
'''simple docstring''' from maths.is_square_free import is_square_free from maths.prime_factors import prime_factors def _lowercase ( lowerCamelCase__ ) -> int: """simple docstring""" __UpperCAmelCase : Any = prime_factors(lowerCamelCase__ ) if is_square_free(lowerCamelCase__ ): return -1 if len(lowerCamelCase__ ) % 2 else 1 return 0 if __name__ == "__main__": import doctest doctest.testmod()
10
1
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available _a : List[str] = { "configuration_ctrl": ["CTRL_PRETRAINED_CONFIG_ARCHIVE_MAP", "CTRLConfig"], "tokenization_ctrl": ["CTRLTokenizer"], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _a : Union[str, Any] = [ "CTRL_PRETRAINED_MODEL_ARCHIVE_LIST", "CTRLForSequenceClassification", "CTRLLMHeadModel", "CTRLModel", "CTRLPreTrainedModel", ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _a : Optional[Any] = [ "TF_CTRL_PRETRAINED_MODEL_ARCHIVE_LIST", "TFCTRLForSequenceClassification", "TFCTRLLMHeadModel", "TFCTRLModel", "TFCTRLPreTrainedModel", ] if TYPE_CHECKING: from .configuration_ctrl import CTRL_PRETRAINED_CONFIG_ARCHIVE_MAP, CTRLConfig from .tokenization_ctrl import CTRLTokenizer try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_ctrl import ( CTRL_PRETRAINED_MODEL_ARCHIVE_LIST, CTRLForSequenceClassification, CTRLLMHeadModel, CTRLModel, CTRLPreTrainedModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_ctrl import ( TF_CTRL_PRETRAINED_MODEL_ARCHIVE_LIST, TFCTRLForSequenceClassification, TFCTRLLMHeadModel, TFCTRLModel, TFCTRLPreTrainedModel, ) else: import sys _a : Optional[int] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
10
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available, is_tokenizers_available, is_torch_available, ) _a : Dict = {"configuration_reformer": ["REFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP", "ReformerConfig"]} try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _a : Dict = ["ReformerTokenizer"] try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _a : List[Any] = ["ReformerTokenizerFast"] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _a : int = [ "REFORMER_PRETRAINED_MODEL_ARCHIVE_LIST", "ReformerAttention", "ReformerForMaskedLM", "ReformerForQuestionAnswering", "ReformerForSequenceClassification", "ReformerLayer", "ReformerModel", "ReformerModelWithLMHead", "ReformerPreTrainedModel", ] if TYPE_CHECKING: from .configuration_reformer import REFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, ReformerConfig try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_reformer import ReformerTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_reformer_fast import ReformerTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_reformer import ( REFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, ReformerAttention, ReformerForMaskedLM, ReformerForQuestionAnswering, ReformerForSequenceClassification, ReformerLayer, ReformerModel, ReformerModelWithLMHead, ReformerPreTrainedModel, ) else: import sys _a : Optional[int] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
10
1
'''simple docstring''' import math def _lowercase ( lowerCamelCase__ ) -> bool: """simple docstring""" if 1 < number < 4: # 2 and 3 are primes return True elif number < 2 or number % 2 == 0 or number % 3 == 0: # Negatives, 0, 1, all even numbers, all multiples of 3 are not primes return False # All primes number are in format of 6k +/- 1 for i in range(5 , int(math.sqrt(lowerCamelCase__ ) + 1 ) , 6 ): if number % i == 0 or number % (i + 2) == 0: return False return True def _lowercase ( lowerCamelCase__ = 0.1 ) -> int: """simple docstring""" __UpperCAmelCase : List[str] = 3 __UpperCAmelCase : Optional[int] = 3 while primes / (2 * j - 1) >= ratio: for i in range(j * j + j + 1 , (j + 2) * (j + 2) , j + 1 ): primes += is_prime(lowerCamelCase__ ) j += 2 return j if __name__ == "__main__": import doctest doctest.testmod()
10
'''simple docstring''' from collections import OrderedDict from typing import Mapping from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging _a : List[str] = logging.get_logger(__name__) _a : Any = { "kssteven/ibert-roberta-base": "https://huggingface.co/kssteven/ibert-roberta-base/resolve/main/config.json", "kssteven/ibert-roberta-large": "https://huggingface.co/kssteven/ibert-roberta-large/resolve/main/config.json", "kssteven/ibert-roberta-large-mnli": ( "https://huggingface.co/kssteven/ibert-roberta-large-mnli/resolve/main/config.json" ), } class __A (__magic_name__ ): snake_case :Union[str, Any] = "ibert" def __init__( self , UpperCamelCase_=3_05_22 , UpperCamelCase_=7_68 , UpperCamelCase_=12 , UpperCamelCase_=12 , UpperCamelCase_=30_72 , UpperCamelCase_="gelu" , UpperCamelCase_=0.1 , UpperCamelCase_=0.1 , UpperCamelCase_=5_12 , UpperCamelCase_=2 , UpperCamelCase_=0.0_2 , UpperCamelCase_=1E-12 , UpperCamelCase_=1 , UpperCamelCase_=0 , UpperCamelCase_=2 , UpperCamelCase_="absolute" , UpperCamelCase_=False , UpperCamelCase_="none" , **UpperCamelCase_ , ): super().__init__(pad_token_id=UpperCamelCase_ , bos_token_id=UpperCamelCase_ , eos_token_id=UpperCamelCase_ , **UpperCamelCase_ ) __UpperCAmelCase : List[Any] = vocab_size __UpperCAmelCase : Optional[Any] = hidden_size __UpperCAmelCase : List[Any] = num_hidden_layers __UpperCAmelCase : Any = num_attention_heads __UpperCAmelCase : List[str] = hidden_act __UpperCAmelCase : List[str] = intermediate_size __UpperCAmelCase : Optional[int] = hidden_dropout_prob __UpperCAmelCase : Union[str, Any] = attention_probs_dropout_prob __UpperCAmelCase : str = max_position_embeddings __UpperCAmelCase : List[str] = type_vocab_size __UpperCAmelCase : Dict = initializer_range __UpperCAmelCase : Optional[int] = layer_norm_eps __UpperCAmelCase : Any = position_embedding_type __UpperCAmelCase : Tuple = quant_mode __UpperCAmelCase : Union[str, Any] = force_dequant class __A (__magic_name__ ): @property def _snake_case ( self ): if self.task == "multiple-choice": __UpperCAmelCase : Optional[int] = {0: "batch", 1: "choice", 2: "sequence"} else: __UpperCAmelCase : Optional[int] = {0: "batch", 1: "sequence"} return OrderedDict( [ ("input_ids", dynamic_axis), ("attention_mask", dynamic_axis), ] )
10
1
'''simple docstring''' import datasets from .nmt_bleu import compute_bleu # From: https://github.com/tensorflow/nmt/blob/master/nmt/scripts/bleu.py _a : int = "\\n@INPROCEEDINGS{Papineni02bleu:a,\n author = {Kishore Papineni and Salim Roukos and Todd Ward and Wei-jing Zhu},\n title = {BLEU: a Method for Automatic Evaluation of Machine Translation},\n booktitle = {},\n year = {2002},\n pages = {311--318}\n}\n@inproceedings{lin-och-2004-orange,\n title = \"{ORANGE}: a Method for Evaluating Automatic Evaluation Metrics for Machine Translation\",\n author = \"Lin, Chin-Yew and\n Och, Franz Josef\",\n booktitle = \"{COLING} 2004: Proceedings of the 20th International Conference on Computational Linguistics\",\n month = \"aug 23{--}aug 27\",\n year = \"2004\",\n address = \"Geneva, Switzerland\",\n publisher = \"COLING\",\n url = \"https://www.aclweb.org/anthology/C04-1072\",\n pages = \"501--507\",\n}\n" _a : Union[str, Any] = "\\nBLEU (bilingual evaluation understudy) is an algorithm for evaluating the quality of text which has been machine-translated from one natural language to another.\nQuality is considered to be the correspondence between a machine's output and that of a human: \"the closer a machine translation is to a professional human translation,\nthe better it is\" – this is the central idea behind BLEU. BLEU was one of the first metrics to claim a high correlation with human judgements of quality, and\nremains one of the most popular automated and inexpensive metrics.\n\nScores are calculated for individual translated segments—generally sentences—by comparing them with a set of good quality reference translations.\nThose scores are then averaged over the whole corpus to reach an estimate of the translation's overall quality. Intelligibility or grammatical correctness\nare not taken into account[citation needed].\n\nBLEU's output is always a number between 0 and 1. This value indicates how similar the candidate text is to the reference texts, with values closer to 1\nrepresenting more similar texts. Few human translations will attain a score of 1, since this would indicate that the candidate is identical to one of the\nreference translations. For this reason, it is not necessary to attain a score of 1. Because there are more opportunities to match, adding additional\nreference translations will increase the BLEU score.\n" _a : List[Any] = "\nComputes BLEU score of translated segments against one or more references.\nArgs:\n predictions: list of translations to score.\n Each translation should be tokenized into a list of tokens.\n references: list of lists of references for each translation.\n Each reference should be tokenized into a list of tokens.\n max_order: Maximum n-gram order to use when computing BLEU score.\n smooth: Whether or not to apply Lin et al. 2004 smoothing.\nReturns:\n 'bleu': bleu score,\n 'precisions': geometric mean of n-gram precisions,\n 'brevity_penalty': brevity penalty,\n 'length_ratio': ratio of lengths,\n 'translation_length': translation_length,\n 'reference_length': reference_length\nExamples:\n\n >>> predictions = [\n ... [\"hello\", \"there\", \"general\", \"kenobi\"], # tokenized prediction of the first sample\n ... [\"foo\", \"bar\", \"foobar\"] # tokenized prediction of the second sample\n ... ]\n >>> references = [\n ... [[\"hello\", \"there\", \"general\", \"kenobi\"], [\"hello\", \"there\", \"!\"]], # tokenized references for the first sample (2 references)\n ... [[\"foo\", \"bar\", \"foobar\"]] # tokenized references for the second sample (1 reference)\n ... ]\n >>> bleu = datasets.load_metric(\"bleu\")\n >>> results = bleu.compute(predictions=predictions, references=references)\n >>> print(results[\"bleu\"])\n 1.0\n" @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class __A (datasets.Metric ): def _snake_case ( self ): return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { "predictions": datasets.Sequence(datasets.Value("string" , id="token" ) , id="sequence" ), "references": datasets.Sequence( datasets.Sequence(datasets.Value("string" , id="token" ) , id="sequence" ) , id="references" ), } ) , codebase_urls=["https://github.com/tensorflow/nmt/blob/master/nmt/scripts/bleu.py"] , reference_urls=[ "https://en.wikipedia.org/wiki/BLEU", "https://towardsdatascience.com/evaluating-text-output-in-nlp-bleu-at-your-own-risk-e8609665a213", ] , ) def _snake_case ( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_=4 , UpperCamelCase_=False ): __UpperCAmelCase : Any = compute_bleu( reference_corpus=UpperCamelCase_ , translation_corpus=UpperCamelCase_ , max_order=UpperCamelCase_ , smooth=UpperCamelCase_ ) ((__UpperCAmelCase) , (__UpperCAmelCase) , (__UpperCAmelCase) , (__UpperCAmelCase) , (__UpperCAmelCase) , (__UpperCAmelCase)) : List[Any] = score return { "bleu": bleu, "precisions": precisions, "brevity_penalty": bp, "length_ratio": ratio, "translation_length": translation_length, "reference_length": reference_length, }
10
'''simple docstring''' from transformers import HfArgumentParser, TensorFlowBenchmark, TensorFlowBenchmarkArguments def _lowercase ( ) -> Dict: """simple docstring""" __UpperCAmelCase : str = HfArgumentParser(lowerCamelCase__ ) __UpperCAmelCase : Optional[Any] = parser.parse_args_into_dataclasses()[0] __UpperCAmelCase : Any = TensorFlowBenchmark(args=lowerCamelCase__ ) try: __UpperCAmelCase : List[Any] = parser.parse_args_into_dataclasses()[0] except ValueError as e: __UpperCAmelCase : str = "Arg --no_{0} is no longer used, please use --no-{0} instead." __UpperCAmelCase : Tuple = " ".join(str(lowerCamelCase__ ).split(" " )[:-1] ) __UpperCAmelCase : Any = "" __UpperCAmelCase : List[Any] = eval(str(lowerCamelCase__ ).split(" " )[-1] ) __UpperCAmelCase : Optional[int] = [] for arg in depreciated_args: # arg[2:] removes '--' if arg[2:] in TensorFlowBenchmark.deprecated_args: # arg[5:] removes '--no_' full_error_msg += arg_error_msg.format(arg[5:] ) else: wrong_args.append(lowerCamelCase__ ) if len(lowerCamelCase__ ) > 0: __UpperCAmelCase : Union[str, Any] = full_error_msg + begin_error_msg + str(lowerCamelCase__ ) raise ValueError(lowerCamelCase__ ) benchmark.run() if __name__ == "__main__": main()
10
1
'''simple docstring''' from __future__ import annotations class __A : def __init__( self , UpperCamelCase_ ): __UpperCAmelCase : Optional[int] = TypeError( "Matrices must be formed from a list of zero or more lists containing at " "least one and the same number of values, each of which must be of type " "int or float." ) if len(UpperCamelCase_ ) != 0: __UpperCAmelCase : List[str] = len(rows[0] ) if cols == 0: raise error for row in rows: if len(UpperCamelCase_ ) != cols: raise error for value in row: if not isinstance(UpperCamelCase_ , (int, float) ): raise error __UpperCAmelCase : Any = rows else: __UpperCAmelCase : List[str] = [] def _snake_case ( self ): return [[row[i] for row in self.rows] for i in range(len(self.rows[0] ) )] @property def _snake_case ( self ): return len(self.rows ) @property def _snake_case ( self ): return len(self.rows[0] ) @property def _snake_case ( self ): return (self.num_rows, self.num_columns) @property def _snake_case ( self ): return self.order[0] == self.order[1] def _snake_case ( self ): __UpperCAmelCase : List[str] = [ [0 if column_num != row_num else 1 for column_num in range(self.num_rows )] for row_num in range(self.num_rows ) ] return Matrix(UpperCamelCase_ ) def _snake_case ( self ): if not self.is_square: return 0 if self.order == (0, 0): return 1 if self.order == (1, 1): return int(self.rows[0][0] ) if self.order == (2, 2): return int( (self.rows[0][0] * self.rows[1][1]) - (self.rows[0][1] * self.rows[1][0]) ) else: return sum( self.rows[0][column] * self.cofactors().rows[0][column] for column in range(self.num_columns ) ) def _snake_case ( self ): return bool(self.determinant() ) def _snake_case ( self , UpperCamelCase_ , UpperCamelCase_ ): __UpperCAmelCase : Optional[Any] = [ [ self.rows[other_row][other_column] for other_column in range(self.num_columns ) if other_column != column ] for other_row in range(self.num_rows ) if other_row != row ] return Matrix(UpperCamelCase_ ).determinant() def _snake_case ( self , UpperCamelCase_ , UpperCamelCase_ ): if (row + column) % 2 == 0: return self.get_minor(UpperCamelCase_ , UpperCamelCase_ ) return -1 * self.get_minor(UpperCamelCase_ , UpperCamelCase_ ) def _snake_case ( self ): return Matrix( [ [self.get_minor(UpperCamelCase_ , UpperCamelCase_ ) for column in range(self.num_columns )] for row in range(self.num_rows ) ] ) def _snake_case ( self ): return Matrix( [ [ self.minors().rows[row][column] if (row + column) % 2 == 0 else self.minors().rows[row][column] * -1 for column in range(self.minors().num_columns ) ] for row in range(self.minors().num_rows ) ] ) def _snake_case ( self ): __UpperCAmelCase : List[Any] = [ [self.cofactors().rows[column][row] for column in range(self.num_columns )] for row in range(self.num_rows ) ] return Matrix(UpperCamelCase_ ) def _snake_case ( self ): __UpperCAmelCase : Dict = self.determinant() if not determinant: raise TypeError("Only matrices with a non-zero determinant have an inverse" ) return self.adjugate() * (1 / determinant) def __repr__( self ): return str(self.rows ) def __str__( self ): if self.num_rows == 0: return "[]" if self.num_rows == 1: return "[[" + ". ".join(str(self.rows[0] ) ) + "]]" return ( "[" + "\n ".join( [ "[" + ". ".join([str(UpperCamelCase_ ) for value in row] ) + ".]" for row in self.rows ] ) + "]" ) def _snake_case ( self , UpperCamelCase_ , UpperCamelCase_ = None ): __UpperCAmelCase : int = TypeError("Row must be a list containing all ints and/or floats" ) if not isinstance(UpperCamelCase_ , UpperCamelCase_ ): raise type_error for value in row: if not isinstance(UpperCamelCase_ , (int, float) ): raise type_error if len(UpperCamelCase_ ) != self.num_columns: raise ValueError( "Row must be equal in length to the other rows in the matrix" ) if position is None: self.rows.append(UpperCamelCase_ ) else: __UpperCAmelCase : Any = self.rows[0:position] + [row] + self.rows[position:] def _snake_case ( self , UpperCamelCase_ , UpperCamelCase_ = None ): __UpperCAmelCase : Dict = TypeError( "Column must be a list containing all ints and/or floats" ) if not isinstance(UpperCamelCase_ , UpperCamelCase_ ): raise type_error for value in column: if not isinstance(UpperCamelCase_ , (int, float) ): raise type_error if len(UpperCamelCase_ ) != self.num_rows: raise ValueError( "Column must be equal in length to the other columns in the matrix" ) if position is None: __UpperCAmelCase : Union[str, Any] = [self.rows[i] + [column[i]] for i in range(self.num_rows )] else: __UpperCAmelCase : int = [ self.rows[i][0:position] + [column[i]] + self.rows[i][position:] for i in range(self.num_rows ) ] def __eq__( self , UpperCamelCase_ ): if not isinstance(UpperCamelCase_ , UpperCamelCase_ ): return NotImplemented return self.rows == other.rows def __ne__( self , UpperCamelCase_ ): return not self == other def __neg__( self ): return self * -1 def __add__( self , UpperCamelCase_ ): if self.order != other.order: raise ValueError("Addition requires matrices of the same order" ) return Matrix( [ [self.rows[i][j] + other.rows[i][j] for j in range(self.num_columns )] for i in range(self.num_rows ) ] ) def __sub__( self , UpperCamelCase_ ): if self.order != other.order: raise ValueError("Subtraction requires matrices of the same order" ) return Matrix( [ [self.rows[i][j] - other.rows[i][j] for j in range(self.num_columns )] for i in range(self.num_rows ) ] ) def __mul__( self , UpperCamelCase_ ): if isinstance(UpperCamelCase_ , (int, float) ): return Matrix( [[int(element * other ) for element in row] for row in self.rows] ) elif isinstance(UpperCamelCase_ , UpperCamelCase_ ): if self.num_columns != other.num_rows: raise ValueError( "The number of columns in the first matrix must " "be equal to the number of rows in the second" ) return Matrix( [ [Matrix.dot_product(UpperCamelCase_ , UpperCamelCase_ ) for column in other.columns()] for row in self.rows ] ) else: raise TypeError( "A Matrix can only be multiplied by an int, float, or another matrix" ) def __pow__( self , UpperCamelCase_ ): if not isinstance(UpperCamelCase_ , UpperCamelCase_ ): raise TypeError("A Matrix can only be raised to the power of an int" ) if not self.is_square: raise ValueError("Only square matrices can be raised to a power" ) if other == 0: return self.identity() if other < 0: if self.is_invertable(): return self.inverse() ** (-other) raise ValueError( "Only invertable matrices can be raised to a negative power" ) __UpperCAmelCase : Any = self for _ in range(other - 1 ): result *= self return result @classmethod def _snake_case ( cls , UpperCamelCase_ , UpperCamelCase_ ): return sum(row[i] * column[i] for i in range(len(UpperCamelCase_ ) ) ) if __name__ == "__main__": import doctest doctest.testmod()
10
'''simple docstring''' import gc import random import unittest import numpy as np import torch from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer import diffusers from diffusers import ( AutoencoderKL, EulerDiscreteScheduler, StableDiffusionLatentUpscalePipeline, StableDiffusionPipeline, UNetaDConditionModel, ) from diffusers.schedulers import KarrasDiffusionSchedulers from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu from ..pipeline_params import TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin enable_full_determinism() def _lowercase ( lowerCamelCase__ ) -> Union[str, Any]: """simple docstring""" __UpperCAmelCase : Dict = [tensor.shape for tensor in tensor_list] return all(shape == shapes[0] for shape in shapes[1:] ) class __A (__magic_name__ , __magic_name__ , __magic_name__ , unittest.TestCase ): snake_case :Union[str, Any] = StableDiffusionLatentUpscalePipeline snake_case :Optional[int] = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - { "height", "width", "cross_attention_kwargs", "negative_prompt_embeds", "prompt_embeds", } snake_case :List[str] = PipelineTesterMixin.required_optional_params - {"num_images_per_prompt"} snake_case :Optional[Any] = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS snake_case :Optional[Any] = frozenset( [] ) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess snake_case :Any = frozenset([] ) snake_case :Optional[int] = True @property def _snake_case ( self ): __UpperCAmelCase : Optional[int] = 1 __UpperCAmelCase : Dict = 4 __UpperCAmelCase : List[str] = (16, 16) __UpperCAmelCase : Dict = floats_tensor((batch_size, num_channels) + sizes , rng=random.Random(0 ) ).to(UpperCamelCase_ ) return image def _snake_case ( self ): torch.manual_seed(0 ) __UpperCAmelCase : List[str] = UNetaDConditionModel( act_fn="gelu" , attention_head_dim=8 , norm_num_groups=UpperCamelCase_ , block_out_channels=[32, 32, 64, 64] , time_cond_proj_dim=1_60 , conv_in_kernel=1 , conv_out_kernel=1 , cross_attention_dim=32 , down_block_types=( "KDownBlock2D", "KCrossAttnDownBlock2D", "KCrossAttnDownBlock2D", "KCrossAttnDownBlock2D", ) , in_channels=8 , mid_block_type=UpperCamelCase_ , only_cross_attention=UpperCamelCase_ , out_channels=5 , resnet_time_scale_shift="scale_shift" , time_embedding_type="fourier" , timestep_post_act="gelu" , up_block_types=("KCrossAttnUpBlock2D", "KCrossAttnUpBlock2D", "KCrossAttnUpBlock2D", "KUpBlock2D") , ) __UpperCAmelCase : int = AutoencoderKL( block_out_channels=[32, 32, 64, 64] , in_channels=3 , out_channels=3 , down_block_types=[ "DownEncoderBlock2D", "DownEncoderBlock2D", "DownEncoderBlock2D", "DownEncoderBlock2D", ] , up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D"] , latent_channels=4 , ) __UpperCAmelCase : Optional[int] = EulerDiscreteScheduler(prediction_type="sample" ) __UpperCAmelCase : int = CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , hidden_act="quick_gelu" , projection_dim=5_12 , ) __UpperCAmelCase : List[str] = CLIPTextModel(UpperCamelCase_ ) __UpperCAmelCase : Tuple = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" ) __UpperCAmelCase : Union[str, Any] = { "unet": model.eval(), "vae": vae.eval(), "scheduler": scheduler, "text_encoder": text_encoder, "tokenizer": tokenizer, } return components def _snake_case ( self , UpperCamelCase_ , UpperCamelCase_=0 ): if str(UpperCamelCase_ ).startswith("mps" ): __UpperCAmelCase : str = torch.manual_seed(UpperCamelCase_ ) else: __UpperCAmelCase : Optional[int] = torch.Generator(device=UpperCamelCase_ ).manual_seed(UpperCamelCase_ ) __UpperCAmelCase : Any = { "prompt": "A painting of a squirrel eating a burger", "image": self.dummy_image.cpu(), "generator": generator, "num_inference_steps": 2, "output_type": "numpy", } return inputs def _snake_case ( self ): __UpperCAmelCase : List[str] = "cpu" __UpperCAmelCase : List[str] = self.get_dummy_components() __UpperCAmelCase : Tuple = self.pipeline_class(**UpperCamelCase_ ) pipe.to(UpperCamelCase_ ) pipe.set_progress_bar_config(disable=UpperCamelCase_ ) __UpperCAmelCase : Any = self.get_dummy_inputs(UpperCamelCase_ ) __UpperCAmelCase : int = pipe(**UpperCamelCase_ ).images __UpperCAmelCase : Any = image[0, -3:, -3:, -1] self.assertEqual(image.shape , (1, 2_56, 2_56, 3) ) __UpperCAmelCase : Tuple = np.array( [0.4_7_2_2_2_4_1_2, 0.4_1_9_2_1_6_3_3, 0.4_4_7_1_7_4_3_4, 0.4_6_8_7_4_1_9_2, 0.4_2_5_8_8_2_5_8, 0.4_6_1_5_0_7_2_6, 0.4_6_7_7_5_3_4, 0.4_5_5_8_3_8_3_2, 0.4_8_5_7_9_0_5_5] ) __UpperCAmelCase : List[str] = np.abs(image_slice.flatten() - expected_slice ).max() self.assertLessEqual(UpperCamelCase_ , 1E-3 ) def _snake_case ( self ): super().test_attention_slicing_forward_pass(expected_max_diff=7E-3 ) def _snake_case ( self ): super().test_cpu_offload_forward_pass(expected_max_diff=3E-3 ) def _snake_case ( self ): super().test_dict_tuple_outputs_equivalent(expected_max_difference=3E-3 ) def _snake_case ( self ): super().test_inference_batch_single_identical(expected_max_diff=7E-3 ) def _snake_case ( self ): super().test_pt_np_pil_outputs_equivalent(expected_max_diff=3E-3 ) def _snake_case ( self ): super().test_save_load_local(expected_max_difference=3E-3 ) def _snake_case ( self ): super().test_save_load_optional_components(expected_max_difference=3E-3 ) def _snake_case ( self ): __UpperCAmelCase : Dict = [ "DDIMScheduler", "DDPMScheduler", "PNDMScheduler", "HeunDiscreteScheduler", "EulerAncestralDiscreteScheduler", "KDPM2DiscreteScheduler", "KDPM2AncestralDiscreteScheduler", "DPMSolverSDEScheduler", ] __UpperCAmelCase : Tuple = self.get_dummy_components() __UpperCAmelCase : Union[str, Any] = self.pipeline_class(**UpperCamelCase_ ) # make sure that PNDM does not need warm-up pipe.scheduler.register_to_config(skip_prk_steps=UpperCamelCase_ ) pipe.to(UpperCamelCase_ ) pipe.set_progress_bar_config(disable=UpperCamelCase_ ) __UpperCAmelCase : Tuple = self.get_dummy_inputs(UpperCamelCase_ ) __UpperCAmelCase : List[str] = 2 __UpperCAmelCase : List[str] = [] for scheduler_enum in KarrasDiffusionSchedulers: if scheduler_enum.name in skip_schedulers: # no sigma schedulers are not supported # no schedulers continue __UpperCAmelCase : Optional[int] = getattr(UpperCamelCase_ , scheduler_enum.name ) __UpperCAmelCase : List[str] = scheduler_cls.from_config(pipe.scheduler.config ) __UpperCAmelCase : Optional[int] = pipe(**UpperCamelCase_ )[0] outputs.append(UpperCamelCase_ ) assert check_same_shape(UpperCamelCase_ ) @require_torch_gpu @slow class __A (unittest.TestCase ): def _snake_case ( self ): super().tearDown() gc.collect() torch.cuda.empty_cache() def _snake_case ( self ): __UpperCAmelCase : Optional[int] = torch.manual_seed(33 ) __UpperCAmelCase : str = StableDiffusionPipeline.from_pretrained("CompVis/stable-diffusion-v1-4" , torch_dtype=torch.floataa ) pipe.to("cuda" ) __UpperCAmelCase : Union[str, Any] = StableDiffusionLatentUpscalePipeline.from_pretrained( "stabilityai/sd-x2-latent-upscaler" , torch_dtype=torch.floataa ) upscaler.to("cuda" ) __UpperCAmelCase : Optional[int] = "a photo of an astronaut high resolution, unreal engine, ultra realistic" __UpperCAmelCase : Any = pipe(UpperCamelCase_ , generator=UpperCamelCase_ , output_type="latent" ).images __UpperCAmelCase : int = upscaler( prompt=UpperCamelCase_ , image=UpperCamelCase_ , num_inference_steps=20 , guidance_scale=0 , generator=UpperCamelCase_ , output_type="np" , ).images[0] __UpperCAmelCase : Optional[Any] = load_numpy( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/latent-upscaler/astronaut_1024.npy" ) assert np.abs((expected_image - image).mean() ) < 5E-2 def _snake_case ( self ): __UpperCAmelCase : List[Any] = torch.manual_seed(33 ) __UpperCAmelCase : Union[str, Any] = StableDiffusionLatentUpscalePipeline.from_pretrained( "stabilityai/sd-x2-latent-upscaler" , torch_dtype=torch.floataa ) upscaler.to("cuda" ) __UpperCAmelCase : Optional[Any] = "the temple of fire by Ross Tran and Gerardo Dottori, oil on canvas" __UpperCAmelCase : str = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/latent-upscaler/fire_temple_512.png" ) __UpperCAmelCase : Dict = upscaler( prompt=UpperCamelCase_ , image=UpperCamelCase_ , num_inference_steps=20 , guidance_scale=0 , generator=UpperCamelCase_ , output_type="np" , ).images[0] __UpperCAmelCase : Tuple = load_numpy( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/latent-upscaler/fire_temple_1024.npy" ) assert np.abs((expected_image - image).max() ) < 5E-2
10
1
'''simple docstring''' from typing import Optional import pyspark from .. import Features, NamedSplit from ..download import DownloadMode from ..packaged_modules.spark.spark import Spark from .abc import AbstractDatasetReader class __A (__magic_name__ ): def __init__( self , UpperCamelCase_ , UpperCamelCase_ = None , UpperCamelCase_ = None , UpperCamelCase_ = True , UpperCamelCase_ = None , UpperCamelCase_ = False , UpperCamelCase_ = None , UpperCamelCase_ = True , UpperCamelCase_ = "arrow" , **UpperCamelCase_ , ): super().__init__( split=UpperCamelCase_ , features=UpperCamelCase_ , cache_dir=UpperCamelCase_ , keep_in_memory=UpperCamelCase_ , streaming=UpperCamelCase_ , **UpperCamelCase_ , ) __UpperCAmelCase : str = load_from_cache_file __UpperCAmelCase : Dict = file_format __UpperCAmelCase : Union[str, Any] = Spark( df=UpperCamelCase_ , features=UpperCamelCase_ , cache_dir=UpperCamelCase_ , working_dir=UpperCamelCase_ , **UpperCamelCase_ , ) def _snake_case ( self ): if self.streaming: return self.builder.as_streaming_dataset(split=self.split ) __UpperCAmelCase : Optional[int] = None if self._load_from_cache_file else DownloadMode.FORCE_REDOWNLOAD self.builder.download_and_prepare( download_mode=UpperCamelCase_ , file_format=self._file_format , ) return self.builder.as_dataset(split=self.split )
10
'''simple docstring''' # Lint as: python3 import sys from collections.abc import Mapping from typing import TYPE_CHECKING import numpy as np import pyarrow as pa from .. import config from ..utils.py_utils import map_nested from .formatting import TensorFormatter if TYPE_CHECKING: import torch class __A (TensorFormatter[Mapping, "torch.Tensor", Mapping] ): def __init__( self , UpperCamelCase_=None , **UpperCamelCase_ ): super().__init__(features=UpperCamelCase_ ) __UpperCAmelCase : Union[str, Any] = torch_tensor_kwargs import torch # noqa import torch at initialization def _snake_case ( self , UpperCamelCase_ ): import torch if isinstance(UpperCamelCase_ , UpperCamelCase_ ) and column: if all( isinstance(UpperCamelCase_ , torch.Tensor ) and x.shape == column[0].shape and x.dtype == column[0].dtype for x in column ): return torch.stack(UpperCamelCase_ ) return column def _snake_case ( self , UpperCamelCase_ ): import torch if isinstance(UpperCamelCase_ , (str, bytes, type(UpperCamelCase_ )) ): return value elif isinstance(UpperCamelCase_ , (np.character, np.ndarray) ) and np.issubdtype(value.dtype , np.character ): return value.tolist() __UpperCAmelCase : int = {} if isinstance(UpperCamelCase_ , (np.number, np.ndarray) ) and np.issubdtype(value.dtype , np.integer ): __UpperCAmelCase : Optional[int] = {"dtype": torch.intaa} elif isinstance(UpperCamelCase_ , (np.number, np.ndarray) ) and np.issubdtype(value.dtype , np.floating ): __UpperCAmelCase : str = {"dtype": torch.floataa} elif config.PIL_AVAILABLE and "PIL" in sys.modules: import PIL.Image if isinstance(UpperCamelCase_ , PIL.Image.Image ): __UpperCAmelCase : str = np.asarray(UpperCamelCase_ ) return torch.tensor(UpperCamelCase_ , **{**default_dtype, **self.torch_tensor_kwargs} ) def _snake_case ( self , UpperCamelCase_ ): import torch # support for torch, tf, jax etc. if hasattr(UpperCamelCase_ , "__array__" ) and not isinstance(UpperCamelCase_ , torch.Tensor ): __UpperCAmelCase : Dict = data_struct.__array__() # support for nested types like struct of list of struct if isinstance(UpperCamelCase_ , np.ndarray ): if data_struct.dtype == object: # torch tensors cannot be instantied from an array of objects return self._consolidate([self.recursive_tensorize(UpperCamelCase_ ) for substruct in data_struct] ) elif isinstance(UpperCamelCase_ , (list, tuple) ): return self._consolidate([self.recursive_tensorize(UpperCamelCase_ ) for substruct in data_struct] ) return self._tensorize(UpperCamelCase_ ) def _snake_case ( self , UpperCamelCase_ ): return map_nested(self._recursive_tensorize , UpperCamelCase_ , map_list=UpperCamelCase_ ) def _snake_case ( self , UpperCamelCase_ ): __UpperCAmelCase : List[str] = self.numpy_arrow_extractor().extract_row(UpperCamelCase_ ) __UpperCAmelCase : Union[str, Any] = self.python_features_decoder.decode_row(UpperCamelCase_ ) return self.recursive_tensorize(UpperCamelCase_ ) def _snake_case ( self , UpperCamelCase_ ): __UpperCAmelCase : Union[str, Any] = self.numpy_arrow_extractor().extract_column(UpperCamelCase_ ) __UpperCAmelCase : Optional[Any] = self.python_features_decoder.decode_column(UpperCamelCase_ , pa_table.column_names[0] ) __UpperCAmelCase : List[Any] = self.recursive_tensorize(UpperCamelCase_ ) __UpperCAmelCase : List[str] = self._consolidate(UpperCamelCase_ ) return column def _snake_case ( self , UpperCamelCase_ ): __UpperCAmelCase : int = self.numpy_arrow_extractor().extract_batch(UpperCamelCase_ ) __UpperCAmelCase : Any = self.python_features_decoder.decode_batch(UpperCamelCase_ ) __UpperCAmelCase : Optional[int] = self.recursive_tensorize(UpperCamelCase_ ) for column_name in batch: __UpperCAmelCase : Tuple = self._consolidate(batch[column_name] ) return batch
10
1
'''simple docstring''' def _lowercase ( lowerCamelCase__ , lowerCamelCase__ ) -> int: """simple docstring""" while a != 0: __UpperCAmelCase , __UpperCAmelCase : Optional[Any] = b % a, a return b def _lowercase ( lowerCamelCase__ , lowerCamelCase__ ) -> int: """simple docstring""" if gcd(lowerCamelCase__ , lowerCamelCase__ ) != 1: __UpperCAmelCase : Any = f"""mod inverse of {a!r} and {m!r} does not exist""" raise ValueError(lowerCamelCase__ ) __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase : str = 1, 0, a __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase : Union[str, Any] = 0, 1, m while va != 0: __UpperCAmelCase : Any = ua // va __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase : List[Any] = (ua - q * va), (ua - q * va), (ua - q * va), va, va, va return ua % m
10
'''simple docstring''' def _lowercase ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> bool: """simple docstring""" return not any( neighbour == 1 and colored_vertices[i] == color for i, neighbour in enumerate(lowerCamelCase__ ) ) def _lowercase ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> bool: """simple docstring""" if index == len(lowerCamelCase__ ): return True # Recursive Step for i in range(lowerCamelCase__ ): if valid_coloring(graph[index] , lowerCamelCase__ , lowerCamelCase__ ): # Color current vertex __UpperCAmelCase : List[str] = i # Validate coloring if util_color(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , index + 1 ): return True # Backtrack __UpperCAmelCase : Any = -1 return False def _lowercase ( lowerCamelCase__ , lowerCamelCase__ ) -> list[int]: """simple docstring""" __UpperCAmelCase : Optional[Any] = [-1] * len(lowerCamelCase__ ) if util_color(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , 0 ): return colored_vertices return []
10
1
'''simple docstring''' def _lowercase ( lowerCamelCase__ = 100 ) -> int: """simple docstring""" __UpperCAmelCase : Optional[Any] = (n * (n + 1) // 2) ** 2 __UpperCAmelCase : Any = n * (n + 1) * (2 * n + 1) // 6 return sum_cubes - sum_squares if __name__ == "__main__": print(f"""{solution() = }""")
10
'''simple docstring''' def _lowercase ( lowerCamelCase__ , lowerCamelCase__ ) -> int: """simple docstring""" return number | (1 << position) def _lowercase ( lowerCamelCase__ , lowerCamelCase__ ) -> int: """simple docstring""" return number & ~(1 << position) def _lowercase ( lowerCamelCase__ , lowerCamelCase__ ) -> int: """simple docstring""" return number ^ (1 << position) def _lowercase ( lowerCamelCase__ , lowerCamelCase__ ) -> bool: """simple docstring""" return ((number >> position) & 1) == 1 def _lowercase ( lowerCamelCase__ , lowerCamelCase__ ) -> int: """simple docstring""" return int((number & (1 << position)) != 0 ) if __name__ == "__main__": import doctest doctest.testmod()
10
1
'''simple docstring''' import os import re import shutil import sys import tempfile import unittest import black _a : Tuple = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__)))) sys.path.append(os.path.join(git_repo_path, "utils")) import check_copies # noqa: E402 # This is the reference code that will be used in the tests. # If DDPMSchedulerOutput is changed in scheduling_ddpm.py, this code needs to be manually updated. _a : Optional[int] = " \"\"\"\n Output class for the scheduler's step function output.\n\n Args:\n prev_sample (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)` for images):\n Computed sample (x_{t-1}) of previous timestep. `prev_sample` should be used as next model input in the\n denoising loop.\n pred_original_sample (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)` for images):\n The predicted denoised sample (x_{0}) based on the model output from the current timestep.\n `pred_original_sample` can be used to preview progress or for guidance.\n \"\"\"\n\n prev_sample: torch.FloatTensor\n pred_original_sample: Optional[torch.FloatTensor] = None\n" class __A (unittest.TestCase ): def _snake_case ( self ): __UpperCAmelCase : Any = tempfile.mkdtemp() os.makedirs(os.path.join(self.diffusers_dir , "schedulers/" ) ) __UpperCAmelCase : List[str] = self.diffusers_dir shutil.copy( os.path.join(UpperCamelCase_ , "src/diffusers/schedulers/scheduling_ddpm.py" ) , os.path.join(self.diffusers_dir , "schedulers/scheduling_ddpm.py" ) , ) def _snake_case ( self ): __UpperCAmelCase : Optional[int] = "src/diffusers" shutil.rmtree(self.diffusers_dir ) def _snake_case ( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_=None ): __UpperCAmelCase : str = comment + f"""\nclass {class_name}(nn.Module):\n""" + class_code if overwrite_result is not None: __UpperCAmelCase : Any = comment + f"""\nclass {class_name}(nn.Module):\n""" + overwrite_result __UpperCAmelCase : Dict = black.Mode(target_versions={black.TargetVersion.PYaa} , line_length=1_19 ) __UpperCAmelCase : Dict = black.format_str(UpperCamelCase_ , mode=UpperCamelCase_ ) __UpperCAmelCase : Dict = os.path.join(self.diffusers_dir , "new_code.py" ) with open(UpperCamelCase_ , "w" , newline="\n" ) as f: f.write(UpperCamelCase_ ) if overwrite_result is None: self.assertTrue(len(check_copies.is_copy_consistent(UpperCamelCase_ ) ) == 0 ) else: check_copies.is_copy_consistent(f.name , overwrite=UpperCamelCase_ ) with open(UpperCamelCase_ , "r" ) as f: self.assertTrue(f.read() , UpperCamelCase_ ) def _snake_case ( self ): __UpperCAmelCase : Any = check_copies.find_code_in_diffusers("schedulers.scheduling_ddpm.DDPMSchedulerOutput" ) self.assertEqual(UpperCamelCase_ , UpperCamelCase_ ) def _snake_case ( self ): # Base copy consistency self.check_copy_consistency( "# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput" , "DDPMSchedulerOutput" , REFERENCE_CODE + "\n" , ) # With no empty line at the end self.check_copy_consistency( "# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput" , "DDPMSchedulerOutput" , UpperCamelCase_ , ) # Copy consistency with rename self.check_copy_consistency( "# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->Test" , "TestSchedulerOutput" , re.sub("DDPM" , "Test" , UpperCamelCase_ ) , ) # Copy consistency with a really long name __UpperCAmelCase : int = "TestClassWithAReallyLongNameBecauseSomePeopleLikeThatForSomeReason" self.check_copy_consistency( f"""# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->{long_class_name}""" , f"""{long_class_name}SchedulerOutput""" , re.sub("Bert" , UpperCamelCase_ , UpperCamelCase_ ) , ) # Copy consistency with overwrite self.check_copy_consistency( "# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->Test" , "TestSchedulerOutput" , UpperCamelCase_ , overwrite_result=re.sub("DDPM" , "Test" , UpperCamelCase_ ) , )
10
'''simple docstring''' from collections import Counter import numpy as np from sklearn import datasets from sklearn.model_selection import train_test_split _a : str = datasets.load_iris() _a : List[Any] = np.array(data["data"]) _a : Optional[Any] = np.array(data["target"]) _a : Dict = data["target_names"] _a , _a , _a , _a : Any = train_test_split(X, y) def _lowercase ( lowerCamelCase__ , lowerCamelCase__ ) -> Tuple: """simple docstring""" return np.linalg.norm(np.array(lowerCamelCase__ ) - np.array(lowerCamelCase__ ) ) def _lowercase ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__=5 ) -> int: """simple docstring""" __UpperCAmelCase : List[Any] = zip(lowerCamelCase__ , lowerCamelCase__ ) # List of distances of all points from the point to be classified __UpperCAmelCase : int = [] for data_point in data: __UpperCAmelCase : Optional[Any] = euclidean_distance(data_point[0] , lowerCamelCase__ ) distances.append((distance, data_point[1]) ) # Choosing 'k' points with the least distances. __UpperCAmelCase : Union[str, Any] = [i[1] for i in sorted(lowerCamelCase__ )[:k]] # Most commonly occurring class among them # is the class into which the point is classified __UpperCAmelCase : Dict = Counter(lowerCamelCase__ ).most_common(1 )[0][0] return classes[result] if __name__ == "__main__": print(classifier(X_train, y_train, classes, [4.4, 3.1, 1.3, 1.4]))
10
1
'''simple docstring''' import copy from ...configuration_utils import PretrainedConfig from ...utils import logging from ..auto import CONFIG_MAPPING _a : Dict = logging.get_logger(__name__) _a : Union[str, Any] = { "ut/deta": "https://huggingface.co/ut/deta/resolve/main/config.json", } class __A (__magic_name__ ): snake_case :Dict = "deta" snake_case :List[Any] = { "hidden_size": "d_model", "num_attention_heads": "encoder_attention_heads", } def __init__( self , UpperCamelCase_=None , UpperCamelCase_=9_00 , UpperCamelCase_=20_48 , UpperCamelCase_=6 , UpperCamelCase_=20_48 , UpperCamelCase_=8 , UpperCamelCase_=6 , UpperCamelCase_=10_24 , UpperCamelCase_=8 , UpperCamelCase_=0.0 , UpperCamelCase_=True , UpperCamelCase_="relu" , UpperCamelCase_=2_56 , UpperCamelCase_=0.1 , UpperCamelCase_=0.0 , UpperCamelCase_=0.0 , UpperCamelCase_=0.0_2 , UpperCamelCase_=1.0 , UpperCamelCase_=True , UpperCamelCase_=False , UpperCamelCase_="sine" , UpperCamelCase_=5 , UpperCamelCase_=4 , UpperCamelCase_=4 , UpperCamelCase_=True , UpperCamelCase_=3_00 , UpperCamelCase_=True , UpperCamelCase_=True , UpperCamelCase_=1 , UpperCamelCase_=5 , UpperCamelCase_=2 , UpperCamelCase_=1 , UpperCamelCase_=1 , UpperCamelCase_=5 , UpperCamelCase_=2 , UpperCamelCase_=0.1 , UpperCamelCase_=0.2_5 , **UpperCamelCase_ , ): if backbone_config is None: logger.info("`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone." ) __UpperCAmelCase : Tuple = CONFIG_MAPPING["resnet"](out_features=["stage2", "stage3", "stage4"] ) else: if isinstance(UpperCamelCase_ , UpperCamelCase_ ): __UpperCAmelCase : List[str] = backbone_config.pop("model_type" ) __UpperCAmelCase : List[str] = CONFIG_MAPPING[backbone_model_type] __UpperCAmelCase : Dict = config_class.from_dict(UpperCamelCase_ ) __UpperCAmelCase : List[str] = backbone_config __UpperCAmelCase : List[str] = num_queries __UpperCAmelCase : Optional[Any] = max_position_embeddings __UpperCAmelCase : Optional[Any] = d_model __UpperCAmelCase : int = encoder_ffn_dim __UpperCAmelCase : List[Any] = encoder_layers __UpperCAmelCase : Dict = encoder_attention_heads __UpperCAmelCase : Union[str, Any] = decoder_ffn_dim __UpperCAmelCase : str = decoder_layers __UpperCAmelCase : str = decoder_attention_heads __UpperCAmelCase : Tuple = dropout __UpperCAmelCase : Union[str, Any] = attention_dropout __UpperCAmelCase : List[Any] = activation_dropout __UpperCAmelCase : List[str] = activation_function __UpperCAmelCase : List[Any] = init_std __UpperCAmelCase : Any = init_xavier_std __UpperCAmelCase : Optional[int] = encoder_layerdrop __UpperCAmelCase : Union[str, Any] = auxiliary_loss __UpperCAmelCase : Optional[Any] = position_embedding_type # deformable attributes __UpperCAmelCase : Any = num_feature_levels __UpperCAmelCase : str = encoder_n_points __UpperCAmelCase : str = decoder_n_points __UpperCAmelCase : Tuple = two_stage __UpperCAmelCase : Optional[int] = two_stage_num_proposals __UpperCAmelCase : int = with_box_refine __UpperCAmelCase : int = assign_first_stage if two_stage is True and with_box_refine is False: raise ValueError("If two_stage is True, with_box_refine must be True." ) # Hungarian matcher __UpperCAmelCase : int = class_cost __UpperCAmelCase : Optional[Any] = bbox_cost __UpperCAmelCase : Any = giou_cost # Loss coefficients __UpperCAmelCase : int = mask_loss_coefficient __UpperCAmelCase : Union[str, Any] = dice_loss_coefficient __UpperCAmelCase : int = bbox_loss_coefficient __UpperCAmelCase : str = giou_loss_coefficient __UpperCAmelCase : Optional[Any] = eos_coefficient __UpperCAmelCase : int = focal_alpha super().__init__(is_encoder_decoder=UpperCamelCase_ , **UpperCamelCase_ ) @property def _snake_case ( self ): return self.encoder_attention_heads @property def _snake_case ( self ): return self.d_model def _snake_case ( self ): __UpperCAmelCase : Optional[Any] = copy.deepcopy(self.__dict__ ) __UpperCAmelCase : str = self.backbone_config.to_dict() __UpperCAmelCase : Optional[int] = self.__class__.model_type return output
10
'''simple docstring''' class __A : def __init__( self , UpperCamelCase_ ): __UpperCAmelCase : Any = set_counts __UpperCAmelCase : int = max(UpperCamelCase_ ) __UpperCAmelCase : List[str] = len(UpperCamelCase_ ) __UpperCAmelCase : Any = [1] * num_sets __UpperCAmelCase : Any = list(range(UpperCamelCase_ ) ) def _snake_case ( self , UpperCamelCase_ , UpperCamelCase_ ): __UpperCAmelCase : Optional[int] = self.get_parent(UpperCamelCase_ ) __UpperCAmelCase : List[Any] = self.get_parent(UpperCamelCase_ ) if src_parent == dst_parent: return False if self.ranks[dst_parent] >= self.ranks[src_parent]: self.set_counts[dst_parent] += self.set_counts[src_parent] __UpperCAmelCase : Optional[Any] = 0 __UpperCAmelCase : List[Any] = dst_parent if self.ranks[dst_parent] == self.ranks[src_parent]: self.ranks[dst_parent] += 1 __UpperCAmelCase : Union[str, Any] = self.set_counts[dst_parent] else: self.set_counts[src_parent] += self.set_counts[dst_parent] __UpperCAmelCase : Union[str, Any] = 0 __UpperCAmelCase : Dict = src_parent __UpperCAmelCase : Dict = self.set_counts[src_parent] __UpperCAmelCase : Dict = max(self.max_set , UpperCamelCase_ ) return True def _snake_case ( self , UpperCamelCase_ ): if self.parents[disj_set] == disj_set: return disj_set __UpperCAmelCase : str = self.get_parent(self.parents[disj_set] ) return self.parents[disj_set]
10
1
'''simple docstring''' import argparse import re import numpy as np import requests import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import ( SamConfig, SamImageProcessor, SamModel, SamProcessor, SamVisionConfig, ) _a : Union[str, Any] = { "iou_prediction_head.layers.0": "iou_prediction_head.proj_in", "iou_prediction_head.layers.1": "iou_prediction_head.layers.0", "iou_prediction_head.layers.2": "iou_prediction_head.proj_out", "mask_decoder.output_upscaling.0": "mask_decoder.upscale_conv1", "mask_decoder.output_upscaling.1": "mask_decoder.upscale_layer_norm", "mask_decoder.output_upscaling.3": "mask_decoder.upscale_conv2", "mask_downscaling.0": "mask_embed.conv1", "mask_downscaling.1": "mask_embed.layer_norm1", "mask_downscaling.3": "mask_embed.conv2", "mask_downscaling.4": "mask_embed.layer_norm2", "mask_downscaling.6": "mask_embed.conv3", "point_embeddings": "point_embed", "pe_layer.positional_encoding_gaussian_matrix": "shared_embedding.positional_embedding", "image_encoder": "vision_encoder", "neck.0": "neck.conv1", "neck.1": "neck.layer_norm1", "neck.2": "neck.conv2", "neck.3": "neck.layer_norm2", "patch_embed.proj": "patch_embed.projection", ".norm": ".layer_norm", "blocks": "layers", } def _lowercase ( lowerCamelCase__ ) -> Union[str, Any]: """simple docstring""" __UpperCAmelCase : Union[str, Any] = {} state_dict.pop("pixel_mean" , lowerCamelCase__ ) state_dict.pop("pixel_std" , lowerCamelCase__ ) __UpperCAmelCase : Union[str, Any] = R".*.output_hypernetworks_mlps.(\d+).layers.(\d+).*" for key, value in state_dict.items(): for key_to_modify, new_key in KEYS_TO_MODIFY_MAPPING.items(): if key_to_modify in key: __UpperCAmelCase : str = key.replace(lowerCamelCase__ , lowerCamelCase__ ) if re.match(lowerCamelCase__ , lowerCamelCase__ ): __UpperCAmelCase : Tuple = int(re.match(lowerCamelCase__ , lowerCamelCase__ ).group(2 ) ) if layer_nb == 0: __UpperCAmelCase : List[str] = key.replace("layers.0" , "proj_in" ) elif layer_nb == 1: __UpperCAmelCase : int = key.replace("layers.1" , "layers.0" ) elif layer_nb == 2: __UpperCAmelCase : List[str] = key.replace("layers.2" , "proj_out" ) __UpperCAmelCase : Optional[Any] = value __UpperCAmelCase : str = model_state_dict[ "prompt_encoder.shared_embedding.positional_embedding" ] return model_state_dict def _lowercase ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__="ybelkada/segment-anything" ) -> List[str]: """simple docstring""" __UpperCAmelCase : Dict = hf_hub_download(lowerCamelCase__ , f"""checkpoints/{model_name}.pth""" ) if "sam_vit_b" in model_name: __UpperCAmelCase : Optional[int] = SamConfig() elif "sam_vit_l" in model_name: __UpperCAmelCase : Dict = SamVisionConfig( hidden_size=1024 , num_hidden_layers=24 , num_attention_heads=16 , global_attn_indexes=[5, 11, 17, 23] , ) __UpperCAmelCase : Optional[int] = SamConfig( vision_config=lowerCamelCase__ , ) elif "sam_vit_h" in model_name: __UpperCAmelCase : List[str] = SamVisionConfig( hidden_size=1280 , num_hidden_layers=32 , num_attention_heads=16 , global_attn_indexes=[7, 15, 23, 31] , ) __UpperCAmelCase : Any = SamConfig( vision_config=lowerCamelCase__ , ) __UpperCAmelCase : Union[str, Any] = torch.load(lowerCamelCase__ , map_location="cpu" ) __UpperCAmelCase : int = replace_keys(lowerCamelCase__ ) __UpperCAmelCase : int = SamImageProcessor() __UpperCAmelCase : Any = SamProcessor(image_processor=lowerCamelCase__ ) __UpperCAmelCase : str = SamModel(lowerCamelCase__ ) hf_model.load_state_dict(lowerCamelCase__ ) __UpperCAmelCase : List[str] = hf_model.to("cuda" ) __UpperCAmelCase : List[Any] = "https://huggingface.co/ybelkada/segment-anything/resolve/main/assets/car.png" __UpperCAmelCase : Tuple = Image.open(requests.get(lowerCamelCase__ , stream=lowerCamelCase__ ).raw ).convert("RGB" ) __UpperCAmelCase : List[str] = [[[400, 650]]] __UpperCAmelCase : Any = [[1]] __UpperCAmelCase : Any = processor(images=np.array(lowerCamelCase__ ) , return_tensors="pt" ).to("cuda" ) with torch.no_grad(): __UpperCAmelCase : str = hf_model(**lowerCamelCase__ ) __UpperCAmelCase : int = output.iou_scores.squeeze() if model_name == "sam_vit_h_4b8939": assert scores[-1].item() == 0.579_8902_5115_9668 __UpperCAmelCase : Tuple = processor( images=np.array(lowerCamelCase__ ) , input_points=lowerCamelCase__ , input_labels=lowerCamelCase__ , return_tensors="pt" ).to("cuda" ) with torch.no_grad(): __UpperCAmelCase : Union[str, Any] = hf_model(**lowerCamelCase__ ) __UpperCAmelCase : Optional[Any] = output.iou_scores.squeeze() assert scores[-1].item() == 0.9712_6030_9219_3604 __UpperCAmelCase : Any = ((75, 275, 1725, 850),) __UpperCAmelCase : Any = processor(images=np.array(lowerCamelCase__ ) , input_boxes=lowerCamelCase__ , return_tensors="pt" ).to("cuda" ) with torch.no_grad(): __UpperCAmelCase : Dict = hf_model(**lowerCamelCase__ ) __UpperCAmelCase : Tuple = output.iou_scores.squeeze() assert scores[-1].item() == 0.8686_0156_0592_6514 # Test with 2 points and 1 image. __UpperCAmelCase : str = [[[400, 650], [800, 650]]] __UpperCAmelCase : Tuple = [[1, 1]] __UpperCAmelCase : str = processor( images=np.array(lowerCamelCase__ ) , input_points=lowerCamelCase__ , input_labels=lowerCamelCase__ , return_tensors="pt" ).to("cuda" ) with torch.no_grad(): __UpperCAmelCase : List[Any] = hf_model(**lowerCamelCase__ ) __UpperCAmelCase : str = output.iou_scores.squeeze() assert scores[-1].item() == 0.9936_0477_9243_4692 if __name__ == "__main__": _a : List[Any] = argparse.ArgumentParser() _a : List[str] = ["sam_vit_b_01ec64", "sam_vit_h_4b8939", "sam_vit_l_0b3195"] parser.add_argument( "--model_name", default="sam_vit_h_4b8939", choices=choices, type=str, help="Path to hf config.json of model to convert", ) parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.") parser.add_argument( "--push_to_hub", action="store_true", help="Whether to push the model and processor to the hub after converting", ) parser.add_argument( "--model_hub_id", default="ybelkada/segment-anything", choices=choices, type=str, help="Path to hf config.json of model to convert", ) _a : Dict = parser.parse_args() convert_sam_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub, args.model_hub_id)
10
'''simple docstring''' def _lowercase ( lowerCamelCase__ , lowerCamelCase__ ) -> List[str]: """simple docstring""" __UpperCAmelCase : Dict = (boundary[1] - boundary[0]) / steps __UpperCAmelCase : Tuple = boundary[0] __UpperCAmelCase : List[str] = boundary[1] __UpperCAmelCase : List[Any] = make_points(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) __UpperCAmelCase : int = 0.0 y += (h / 2.0) * f(lowerCamelCase__ ) for i in x_i: # print(i) y += h * f(lowerCamelCase__ ) y += (h / 2.0) * f(lowerCamelCase__ ) return y def _lowercase ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> List[Any]: """simple docstring""" __UpperCAmelCase : Optional[Any] = a + h while x < (b - h): yield x __UpperCAmelCase : List[str] = x + h def _lowercase ( lowerCamelCase__ ) -> Optional[Any]: # enter your function here """simple docstring""" __UpperCAmelCase : str = (x - 0) * (x - 0) return y def _lowercase ( ) -> int: """simple docstring""" __UpperCAmelCase : Tuple = 0.0 # Lower bound of integration __UpperCAmelCase : Union[str, Any] = 1.0 # Upper bound of integration __UpperCAmelCase : Union[str, Any] = 10.0 # define number of steps or resolution __UpperCAmelCase : Dict = [a, b] # define boundary of integration __UpperCAmelCase : Optional[int] = method_a(lowerCamelCase__ , lowerCamelCase__ ) print(f"""y = {y}""" ) if __name__ == "__main__": main()
10
1
'''simple docstring''' from __future__ import annotations import unittest from transformers import EsmConfig, is_tf_available from transformers.testing_utils import require_tf, slow from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import numpy import tensorflow as tf from transformers.models.esm.modeling_tf_esm import ( TF_ESM_PRETRAINED_MODEL_ARCHIVE_LIST, TFEsmForMaskedLM, TFEsmForSequenceClassification, TFEsmForTokenClassification, TFEsmModel, ) class __A : def __init__( self , UpperCamelCase_ , ): __UpperCAmelCase : Optional[int] = parent __UpperCAmelCase : Tuple = 13 __UpperCAmelCase : Any = 7 __UpperCAmelCase : Optional[int] = True __UpperCAmelCase : Dict = True __UpperCAmelCase : List[Any] = True __UpperCAmelCase : str = 99 __UpperCAmelCase : Any = 32 __UpperCAmelCase : Dict = 2 __UpperCAmelCase : List[str] = 4 __UpperCAmelCase : Optional[int] = 37 __UpperCAmelCase : int = "gelu" __UpperCAmelCase : List[Any] = 0.1 __UpperCAmelCase : int = 0.1 __UpperCAmelCase : List[str] = 5_12 __UpperCAmelCase : Tuple = 16 __UpperCAmelCase : Optional[Any] = 2 __UpperCAmelCase : Optional[int] = 0.0_2 __UpperCAmelCase : Union[str, Any] = 3 __UpperCAmelCase : List[str] = 4 __UpperCAmelCase : List[Any] = None def _snake_case ( self ): __UpperCAmelCase : Any = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) __UpperCAmelCase : Union[str, Any] = None if self.use_input_mask: __UpperCAmelCase : int = random_attention_mask([self.batch_size, self.seq_length] ) __UpperCAmelCase : Optional[Any] = None __UpperCAmelCase : Tuple = None __UpperCAmelCase : Dict = None if self.use_labels: __UpperCAmelCase : Tuple = ids_tensor([self.batch_size] , self.type_sequence_label_size ) __UpperCAmelCase : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) __UpperCAmelCase : Optional[Any] = ids_tensor([self.batch_size] , self.num_choices ) __UpperCAmelCase : List[str] = EsmConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , pad_token_id=1 , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , ) return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels def _snake_case ( self ): ( ( __UpperCAmelCase ) , ( __UpperCAmelCase ) , ( __UpperCAmelCase ) , ( __UpperCAmelCase ) , ( __UpperCAmelCase ) , ( __UpperCAmelCase ) , ) : List[Any] = self.prepare_config_and_inputs() __UpperCAmelCase : Optional[int] = True __UpperCAmelCase : str = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] ) __UpperCAmelCase : List[Any] = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 ) return ( config, input_ids, input_mask, sequence_labels, token_labels, choice_labels, encoder_hidden_states, encoder_attention_mask, ) def _snake_case ( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ): __UpperCAmelCase : List[Any] = TFEsmModel(config=UpperCamelCase_ ) __UpperCAmelCase : Any = {"input_ids": input_ids, "attention_mask": input_mask} __UpperCAmelCase : List[str] = model(UpperCamelCase_ ) __UpperCAmelCase : List[str] = [input_ids, input_mask] __UpperCAmelCase : List[Any] = model(UpperCamelCase_ ) __UpperCAmelCase : Tuple = model(UpperCamelCase_ ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def _snake_case ( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , ): __UpperCAmelCase : Optional[Any] = True __UpperCAmelCase : Tuple = TFEsmModel(config=UpperCamelCase_ ) __UpperCAmelCase : Dict = { "input_ids": input_ids, "attention_mask": input_mask, "encoder_hidden_states": encoder_hidden_states, "encoder_attention_mask": encoder_attention_mask, } __UpperCAmelCase : Union[str, Any] = model(UpperCamelCase_ ) __UpperCAmelCase : Dict = [input_ids, input_mask] __UpperCAmelCase : List[str] = model(UpperCamelCase_ , encoder_hidden_states=UpperCamelCase_ ) # Also check the case where encoder outputs are not passed __UpperCAmelCase : Dict = model(UpperCamelCase_ , attention_mask=UpperCamelCase_ ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def _snake_case ( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ): __UpperCAmelCase : Any = TFEsmForMaskedLM(config=UpperCamelCase_ ) __UpperCAmelCase : List[str] = model([input_ids, input_mask] ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def _snake_case ( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ): __UpperCAmelCase : List[Any] = self.num_labels __UpperCAmelCase : Union[str, Any] = TFEsmForTokenClassification(config=UpperCamelCase_ ) __UpperCAmelCase : Tuple = {"input_ids": input_ids, "attention_mask": input_mask} __UpperCAmelCase : Union[str, Any] = model(UpperCamelCase_ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def _snake_case ( self ): __UpperCAmelCase : List[Any] = self.prepare_config_and_inputs() ( ( __UpperCAmelCase ) , ( __UpperCAmelCase ) , ( __UpperCAmelCase ) , ( __UpperCAmelCase ) , ( __UpperCAmelCase ) , ( __UpperCAmelCase ) , ) : List[str] = config_and_inputs __UpperCAmelCase : Optional[int] = {"input_ids": input_ids, "attention_mask": input_mask} return config, inputs_dict @require_tf class __A (__magic_name__ , __magic_name__ , unittest.TestCase ): snake_case :Optional[int] = ( ( TFEsmModel, TFEsmForMaskedLM, TFEsmForSequenceClassification, TFEsmForTokenClassification, ) if is_tf_available() else () ) snake_case :List[str] = ( { "feature-extraction": TFEsmModel, "fill-mask": TFEsmForMaskedLM, "text-classification": TFEsmForSequenceClassification, "token-classification": TFEsmForTokenClassification, "zero-shot": TFEsmForSequenceClassification, } if is_tf_available() else {} ) snake_case :Tuple = False snake_case :Any = False def _snake_case ( self ): __UpperCAmelCase : str = TFEsmModelTester(self ) __UpperCAmelCase : int = ConfigTester(self , config_class=UpperCamelCase_ , hidden_size=37 ) def _snake_case ( self ): self.config_tester.run_common_tests() def _snake_case ( self ): __UpperCAmelCase : List[str] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*UpperCamelCase_ ) def _snake_case ( self ): __UpperCAmelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_decoder() self.model_tester.create_and_check_model_as_decoder(*UpperCamelCase_ ) def _snake_case ( self ): __UpperCAmelCase : int = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_lm(*UpperCamelCase_ ) def _snake_case ( self ): __UpperCAmelCase : Any = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_token_classification(*UpperCamelCase_ ) @slow def _snake_case ( self ): for model_name in TF_ESM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: __UpperCAmelCase : List[str] = TFEsmModel.from_pretrained(UpperCamelCase_ ) self.assertIsNotNone(UpperCamelCase_ ) @unittest.skip("Protein models do not support embedding resizing." ) def _snake_case ( self ): pass @unittest.skip("Protein models do not support embedding resizing." ) def _snake_case ( self ): pass def _snake_case ( self ): __UpperCAmelCase , __UpperCAmelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: __UpperCAmelCase : str = model_class(UpperCamelCase_ ) assert isinstance(model.get_input_embeddings() , tf.keras.layers.Layer ) if model_class is TFEsmForMaskedLM: # Output embedding test differs from the main test because they're a matrix, not a layer __UpperCAmelCase : Any = model.get_bias() assert isinstance(UpperCamelCase_ , UpperCamelCase_ ) for k, v in name.items(): assert isinstance(UpperCamelCase_ , tf.Variable ) else: __UpperCAmelCase : str = model.get_output_embeddings() assert x is None __UpperCAmelCase : List[str] = model.get_bias() assert name is None @require_tf class __A (unittest.TestCase ): @slow def _snake_case ( self ): __UpperCAmelCase : int = TFEsmForMaskedLM.from_pretrained("facebook/esm2_t6_8M_UR50D" ) __UpperCAmelCase : List[str] = tf.constant([[0, 1, 2, 3, 4, 5]] ) __UpperCAmelCase : List[str] = model(UpperCamelCase_ )[0] __UpperCAmelCase : Any = [1, 6, 33] self.assertEqual(list(output.numpy().shape ) , UpperCamelCase_ ) # compare the actual values for a slice. __UpperCAmelCase : Optional[int] = tf.constant( [ [ [8.9_2_1_5_1_8, -1_0.5_8_9_8_1_4, -6.4_6_7_1_3_0_7], [-6.3_9_6_7_1_5_6, -1_3.9_1_1_3_7_7, -1.1_2_1_1_9_1_5], [-7.7_8_1_2_4_7, -1_3.9_5_1_5_5_7, -3.7_4_0_5_9_2], ] ] ) self.assertTrue(numpy.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1E-2 ) ) @slow def _snake_case ( self ): __UpperCAmelCase : Optional[int] = TFEsmModel.from_pretrained("facebook/esm2_t6_8M_UR50D" ) __UpperCAmelCase : int = tf.constant([[0, 6, 4, 13, 5, 4, 16, 12, 11, 7, 2]] ) __UpperCAmelCase : List[Any] = model(UpperCamelCase_ )[0] # compare the actual values for a slice. __UpperCAmelCase : List[str] = tf.constant( [ [ [0.1_4_4_4_3_0_9_2, 0.5_4_1_2_5_3_2_7, 0.3_2_4_7_7_3_9], [0.3_0_3_4_0_4_8_4, 0.0_0_5_2_6_6_7_6, 0.3_1_0_7_7_7_2_2], [0.3_2_2_7_8_0_4_3, -0.2_4_9_8_7_0_9_6, 0.3_4_1_4_6_2_8], ] ] ) self.assertTrue(numpy.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1E-4 ) )
10
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_torch_available, is_vision_available, ) _a : str = {"configuration_vit": ["VIT_PRETRAINED_CONFIG_ARCHIVE_MAP", "ViTConfig", "ViTOnnxConfig"]} try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _a : str = ["ViTFeatureExtractor"] _a : Dict = ["ViTImageProcessor"] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _a : int = [ "VIT_PRETRAINED_MODEL_ARCHIVE_LIST", "ViTForImageClassification", "ViTForMaskedImageModeling", "ViTModel", "ViTPreTrainedModel", ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _a : List[str] = [ "TFViTForImageClassification", "TFViTModel", "TFViTPreTrainedModel", ] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _a : Dict = [ "FlaxViTForImageClassification", "FlaxViTModel", "FlaxViTPreTrainedModel", ] if TYPE_CHECKING: from .configuration_vit import VIT_PRETRAINED_CONFIG_ARCHIVE_MAP, ViTConfig, ViTOnnxConfig try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .feature_extraction_vit import ViTFeatureExtractor from .image_processing_vit import ViTImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_vit import ( VIT_PRETRAINED_MODEL_ARCHIVE_LIST, ViTForImageClassification, ViTForMaskedImageModeling, ViTModel, ViTPreTrainedModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_vit import TFViTForImageClassification, TFViTModel, TFViTPreTrainedModel try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_vit import FlaxViTForImageClassification, FlaxViTModel, FlaxViTPreTrainedModel else: import sys _a : Dict = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
10
1
'''simple docstring''' def _lowercase ( lowerCamelCase__ , lowerCamelCase__ ) -> Optional[Any]: """simple docstring""" if b == 0: return 1 if (b % 2) == 0: return actual_power(lowerCamelCase__ , int(b / 2 ) ) * actual_power(lowerCamelCase__ , int(b / 2 ) ) else: return a * actual_power(lowerCamelCase__ , int(b / 2 ) ) * actual_power(lowerCamelCase__ , int(b / 2 ) ) def _lowercase ( lowerCamelCase__ , lowerCamelCase__ ) -> float: """simple docstring""" if b < 0: return 1 / actual_power(lowerCamelCase__ , lowerCamelCase__ ) return actual_power(lowerCamelCase__ , lowerCamelCase__ ) if __name__ == "__main__": print(power(-2, -3))
10
'''simple docstring''' import os from shutil import copyfile from typing import Any, Dict, List, Optional, Tuple import sentencepiece as spm from ...tokenization_utils import AddedToken, PreTrainedTokenizer from ...utils import logging _a : str = logging.get_logger(__name__) _a : Tuple = "▁" _a : Optional[int] = {"vocab_file": "sentencepiece.bpe.model"} _a : Tuple = { "vocab_file": { "xlm-roberta-base": "https://huggingface.co/xlm-roberta-base/resolve/main/sentencepiece.bpe.model", "xlm-roberta-large": "https://huggingface.co/xlm-roberta-large/resolve/main/sentencepiece.bpe.model", "xlm-roberta-large-finetuned-conll02-dutch": ( "https://huggingface.co/xlm-roberta-large-finetuned-conll02-dutch/resolve/main/sentencepiece.bpe.model" ), "xlm-roberta-large-finetuned-conll02-spanish": ( "https://huggingface.co/xlm-roberta-large-finetuned-conll02-spanish/resolve/main/sentencepiece.bpe.model" ), "xlm-roberta-large-finetuned-conll03-english": ( "https://huggingface.co/xlm-roberta-large-finetuned-conll03-english/resolve/main/sentencepiece.bpe.model" ), "xlm-roberta-large-finetuned-conll03-german": ( "https://huggingface.co/xlm-roberta-large-finetuned-conll03-german/resolve/main/sentencepiece.bpe.model" ), } } _a : Optional[Any] = { "xlm-roberta-base": 512, "xlm-roberta-large": 512, "xlm-roberta-large-finetuned-conll02-dutch": 512, "xlm-roberta-large-finetuned-conll02-spanish": 512, "xlm-roberta-large-finetuned-conll03-english": 512, "xlm-roberta-large-finetuned-conll03-german": 512, } class __A (__magic_name__ ): snake_case :Union[str, Any] = VOCAB_FILES_NAMES snake_case :Any = PRETRAINED_VOCAB_FILES_MAP snake_case :Tuple = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES snake_case :Optional[int] = ["input_ids", "attention_mask"] def __init__( self , UpperCamelCase_ , UpperCamelCase_="<s>" , UpperCamelCase_="</s>" , UpperCamelCase_="</s>" , UpperCamelCase_="<s>" , UpperCamelCase_="<unk>" , UpperCamelCase_="<pad>" , UpperCamelCase_="<mask>" , UpperCamelCase_ = None , **UpperCamelCase_ , ): # Mask token behave like a normal word, i.e. include the space before it __UpperCAmelCase : Optional[int] = AddedToken(UpperCamelCase_ , lstrip=UpperCamelCase_ , rstrip=UpperCamelCase_ ) if isinstance(UpperCamelCase_ , UpperCamelCase_ ) else mask_token __UpperCAmelCase : int = {} if sp_model_kwargs is None else sp_model_kwargs super().__init__( bos_token=UpperCamelCase_ , eos_token=UpperCamelCase_ , unk_token=UpperCamelCase_ , sep_token=UpperCamelCase_ , cls_token=UpperCamelCase_ , pad_token=UpperCamelCase_ , mask_token=UpperCamelCase_ , sp_model_kwargs=self.sp_model_kwargs , **UpperCamelCase_ , ) __UpperCAmelCase : Optional[int] = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(str(UpperCamelCase_ ) ) __UpperCAmelCase : Union[str, Any] = vocab_file # Original fairseq vocab and spm vocab must be "aligned": # Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 # -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ---- # fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-' # spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a' # Mimic fairseq token-to-id alignment for the first 4 token __UpperCAmelCase : Optional[Any] = {"<s>": 0, "<pad>": 1, "</s>": 2, "<unk>": 3} # The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab __UpperCAmelCase : List[Any] = 1 __UpperCAmelCase : Optional[Any] = len(self.sp_model ) + self.fairseq_offset __UpperCAmelCase : str = {v: k for k, v in self.fairseq_tokens_to_ids.items()} def __getstate__( self ): __UpperCAmelCase : List[str] = self.__dict__.copy() __UpperCAmelCase : str = None __UpperCAmelCase : str = self.sp_model.serialized_model_proto() return state def __setstate__( self , UpperCamelCase_ ): __UpperCAmelCase : Union[str, Any] = d # for backward compatibility if not hasattr(self , "sp_model_kwargs" ): __UpperCAmelCase : Tuple = {} __UpperCAmelCase : Dict = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.LoadFromSerializedProto(self.sp_model_proto ) def _snake_case ( self , UpperCamelCase_ , UpperCamelCase_ = None ): if token_ids_a is None: return [self.cls_token_id] + token_ids_a + [self.sep_token_id] __UpperCAmelCase : List[Any] = [self.cls_token_id] __UpperCAmelCase : Union[str, Any] = [self.sep_token_id] return cls + token_ids_a + sep + sep + token_ids_a + sep def _snake_case ( self , UpperCamelCase_ , UpperCamelCase_ = None , UpperCamelCase_ = False ): if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=UpperCamelCase_ , token_ids_a=UpperCamelCase_ , already_has_special_tokens=UpperCamelCase_ ) if token_ids_a is None: return [1] + ([0] * len(UpperCamelCase_ )) + [1] return [1] + ([0] * len(UpperCamelCase_ )) + [1, 1] + ([0] * len(UpperCamelCase_ )) + [1] def _snake_case ( self , UpperCamelCase_ , UpperCamelCase_ = None ): __UpperCAmelCase : Dict = [self.sep_token_id] __UpperCAmelCase : List[Any] = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0] @property def _snake_case ( self ): return len(self.sp_model ) + self.fairseq_offset + 1 # Add the <mask> token def _snake_case ( self ): __UpperCAmelCase : Union[str, Any] = {self.convert_ids_to_tokens(UpperCamelCase_ ): i for i in range(self.vocab_size )} vocab.update(self.added_tokens_encoder ) return vocab def _snake_case ( self , UpperCamelCase_ ): return self.sp_model.encode(UpperCamelCase_ , out_type=UpperCamelCase_ ) def _snake_case ( self , UpperCamelCase_ ): if token in self.fairseq_tokens_to_ids: return self.fairseq_tokens_to_ids[token] __UpperCAmelCase : Optional[int] = self.sp_model.PieceToId(UpperCamelCase_ ) # Need to return unknown token if the SP model returned 0 return spm_id + self.fairseq_offset if spm_id else self.unk_token_id def _snake_case ( self , UpperCamelCase_ ): if index in self.fairseq_ids_to_tokens: return self.fairseq_ids_to_tokens[index] return self.sp_model.IdToPiece(index - self.fairseq_offset ) def _snake_case ( self , UpperCamelCase_ ): __UpperCAmelCase : Tuple = "".join(UpperCamelCase_ ).replace(UpperCamelCase_ , " " ).strip() return out_string def _snake_case ( self , UpperCamelCase_ , UpperCamelCase_ = None ): if not os.path.isdir(UpperCamelCase_ ): logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" ) return __UpperCAmelCase : List[str] = os.path.join( UpperCamelCase_ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(UpperCamelCase_ ) and os.path.isfile(self.vocab_file ): copyfile(self.vocab_file , UpperCamelCase_ ) elif not os.path.isfile(self.vocab_file ): with open(UpperCamelCase_ , "wb" ) as fi: __UpperCAmelCase : Optional[int] = self.sp_model.serialized_model_proto() fi.write(UpperCamelCase_ ) return (out_vocab_file,)
10
1
'''simple docstring''' def _lowercase ( lowerCamelCase__ , lowerCamelCase__ ) -> Optional[int]: """simple docstring""" assert x is not None assert y is not None __UpperCAmelCase : List[Any] = len(lowerCamelCase__ ) __UpperCAmelCase : Union[str, Any] = len(lowerCamelCase__ ) # declaring the array for storing the dp values __UpperCAmelCase : Optional[int] = [[0] * (n + 1) for _ in range(m + 1 )] # noqa: E741 for i in range(1 , m + 1 ): for j in range(1 , n + 1 ): __UpperCAmelCase : Tuple = 1 if x[i - 1] == y[j - 1] else 0 __UpperCAmelCase : Tuple = max(l[i - 1][j] , l[i][j - 1] , l[i - 1][j - 1] + match ) __UpperCAmelCase : Dict = "" __UpperCAmelCase , __UpperCAmelCase : Tuple = m, n while i > 0 and j > 0: __UpperCAmelCase : List[Any] = 1 if x[i - 1] == y[j - 1] else 0 if l[i][j] == l[i - 1][j - 1] + match: if match == 1: __UpperCAmelCase : Union[str, Any] = x[i - 1] + seq i -= 1 j -= 1 elif l[i][j] == l[i - 1][j]: i -= 1 else: j -= 1 return l[m][n], seq if __name__ == "__main__": _a : Any = "AGGTAB" _a : str = "GXTXAYB" _a : List[str] = 4 _a : Optional[int] = "GTAB" _a , _a : Optional[int] = longest_common_subsequence(a, b) print("len =", ln, ", sub-sequence =", subseq) import doctest doctest.testmod()
10
'''simple docstring''' import time import unittest from transformers import is_torch_available from transformers.testing_utils import require_torch, torch_device from ..test_modeling_common import ids_tensor if is_torch_available(): import torch from transformers.generation import ( MaxLengthCriteria, MaxNewTokensCriteria, MaxTimeCriteria, StoppingCriteriaList, validate_stopping_criteria, ) @require_torch class __A (unittest.TestCase ): def _snake_case ( self , UpperCamelCase_ ): __UpperCAmelCase : List[str] = 3 __UpperCAmelCase : Tuple = 2_50 __UpperCAmelCase : str = ids_tensor((batch_size, length) , UpperCamelCase_ ) __UpperCAmelCase : Any = torch.ones((batch_size, length) , device=UpperCamelCase_ , dtype=torch.float ) / length return input_ids, scores def _snake_case ( self ): __UpperCAmelCase , __UpperCAmelCase : Tuple = self._get_tensors(5 ) __UpperCAmelCase : Tuple = StoppingCriteriaList( [ MaxLengthCriteria(max_length=10 ), MaxTimeCriteria(max_time=0.1 ), ] ) self.assertFalse(criteria(UpperCamelCase_ , UpperCamelCase_ ) ) __UpperCAmelCase , __UpperCAmelCase : int = self._get_tensors(9 ) self.assertFalse(criteria(UpperCamelCase_ , UpperCamelCase_ ) ) __UpperCAmelCase , __UpperCAmelCase : Optional[int] = self._get_tensors(10 ) self.assertTrue(criteria(UpperCamelCase_ , UpperCamelCase_ ) ) def _snake_case ( self ): __UpperCAmelCase : int = MaxLengthCriteria(max_length=10 ) __UpperCAmelCase , __UpperCAmelCase : Tuple = self._get_tensors(5 ) self.assertFalse(criteria(UpperCamelCase_ , UpperCamelCase_ ) ) __UpperCAmelCase , __UpperCAmelCase : Dict = self._get_tensors(9 ) self.assertFalse(criteria(UpperCamelCase_ , UpperCamelCase_ ) ) __UpperCAmelCase , __UpperCAmelCase : Optional[int] = self._get_tensors(10 ) self.assertTrue(criteria(UpperCamelCase_ , UpperCamelCase_ ) ) def _snake_case ( self ): __UpperCAmelCase : Optional[Any] = MaxNewTokensCriteria(start_length=5 , max_new_tokens=5 ) __UpperCAmelCase , __UpperCAmelCase : List[str] = self._get_tensors(5 ) self.assertFalse(criteria(UpperCamelCase_ , UpperCamelCase_ ) ) __UpperCAmelCase , __UpperCAmelCase : Dict = self._get_tensors(9 ) self.assertFalse(criteria(UpperCamelCase_ , UpperCamelCase_ ) ) __UpperCAmelCase , __UpperCAmelCase : Optional[Any] = self._get_tensors(10 ) self.assertTrue(criteria(UpperCamelCase_ , UpperCamelCase_ ) ) __UpperCAmelCase : Union[str, Any] = StoppingCriteriaList([criteria] ) self.assertEqual(criteria_list.max_length , 10 ) def _snake_case ( self ): __UpperCAmelCase , __UpperCAmelCase : Optional[Any] = self._get_tensors(5 ) __UpperCAmelCase : str = MaxTimeCriteria(max_time=0.1 ) self.assertFalse(criteria(UpperCamelCase_ , UpperCamelCase_ ) ) __UpperCAmelCase : str = MaxTimeCriteria(max_time=0.1 , initial_timestamp=time.time() - 0.2 ) self.assertTrue(criteria(UpperCamelCase_ , UpperCamelCase_ ) ) def _snake_case ( self ): validate_stopping_criteria(StoppingCriteriaList([MaxLengthCriteria(10 )] ) , 10 ) with self.assertWarns(UpperCamelCase_ ): validate_stopping_criteria(StoppingCriteriaList([MaxLengthCriteria(10 )] ) , 11 ) __UpperCAmelCase : Optional[int] = validate_stopping_criteria(StoppingCriteriaList() , 11 ) self.assertEqual(len(UpperCamelCase_ ) , 1 )
10
1
'''simple docstring''' import math class __A : def __init__( self , UpperCamelCase_=0 ): # a graph with Node 0,1,...,N-1 __UpperCAmelCase : Optional[int] = n __UpperCAmelCase : Union[str, Any] = [ [math.inf for j in range(0 , UpperCamelCase_ )] for i in range(0 , UpperCamelCase_ ) ] # adjacency matrix for weight __UpperCAmelCase : str = [ [math.inf for j in range(0 , UpperCamelCase_ )] for i in range(0 , UpperCamelCase_ ) ] # dp[i][j] stores minimum distance from i to j def _snake_case ( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ): __UpperCAmelCase : Dict = w def _snake_case ( self ): for k in range(0 , self.n ): for i in range(0 , self.n ): for j in range(0 , self.n ): __UpperCAmelCase : str = min(self.dp[i][j] , self.dp[i][k] + self.dp[k][j] ) def _snake_case ( self , UpperCamelCase_ , UpperCamelCase_ ): return self.dp[u][v] if __name__ == "__main__": _a : Dict = Graph(5) graph.add_edge(0, 2, 9) graph.add_edge(0, 4, 10) graph.add_edge(1, 3, 5) graph.add_edge(2, 3, 7) graph.add_edge(3, 0, 10) graph.add_edge(3, 1, 2) graph.add_edge(3, 2, 1) graph.add_edge(3, 4, 6) graph.add_edge(4, 1, 3) graph.add_edge(4, 2, 4) graph.add_edge(4, 3, 9) graph.floyd_warshall() graph.show_min(1, 4) graph.show_min(0, 3)
10
'''simple docstring''' import json import re from typing import TYPE_CHECKING, List, Optional, Tuple, Union import numpy as np from ...utils import is_tf_available, is_torch_available, logging if TYPE_CHECKING: if is_torch_available(): import torch if is_tf_available(): import tensorflow as tf from tokenizers import pre_tokenizers from ...tokenization_utils_base import BatchEncoding from ...tokenization_utils_fast import PreTrainedTokenizerFast from .tokenization_codegen import CodeGenTokenizer _a : Union[str, Any] = logging.get_logger(__name__) _a : Any = {"vocab_file": "vocab.json", "merges_file": "merges.txt", "tokenizer_file": "tokenizer.json"} _a : Tuple = { "vocab_file": { "Salesforce/codegen-350M-mono": "https://huggingface.co/Salesforce/codegen-350M-mono/resolve/main/vocab.json", }, "merges_file": { "Salesforce/codegen-350M-mono": "https://huggingface.co/Salesforce/codegen-350M-mono/resolve/main/merges.txt", }, "tokenizer_file": { "Salesforce/codegen-350M-mono": ( "https://huggingface.co/Salesforce/codegen-350M-mono/resolve/main/tokenizer.json" ), }, } _a : Dict = { "Salesforce/codegen-350M-mono": 2048, } class __A (__magic_name__ ): snake_case :Optional[Any] = VOCAB_FILES_NAMES snake_case :str = PRETRAINED_VOCAB_FILES_MAP snake_case :Optional[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES snake_case :Tuple = ["input_ids", "attention_mask"] snake_case :Dict = CodeGenTokenizer def __init__( self , UpperCamelCase_=None , UpperCamelCase_=None , UpperCamelCase_=None , UpperCamelCase_="<|endoftext|>" , UpperCamelCase_="<|endoftext|>" , UpperCamelCase_="<|endoftext|>" , UpperCamelCase_=False , **UpperCamelCase_ , ): super().__init__( UpperCamelCase_ , UpperCamelCase_ , tokenizer_file=UpperCamelCase_ , unk_token=UpperCamelCase_ , bos_token=UpperCamelCase_ , eos_token=UpperCamelCase_ , add_prefix_space=UpperCamelCase_ , **UpperCamelCase_ , ) if kwargs.pop("add_bos_token" , UpperCamelCase_ ): __UpperCAmelCase : int = kwargs.pop("name_or_path" , "" ) raise ValueError( "Currenty GPT2's fast tokenizer does NOT support adding a BOS token." "Instead you should use GPT2's slow tokenizer class `CodeGenTokenizer` as follows: \n" f"""`CodeGenTokenizer.from_pretrained('{model_id}')`\nor\n""" f"""`AutoTokenizer.from_pretrained('{model_id}', use_fast=False)`\n""" "This issue will be fixed soon, see: https://github.com/huggingface/tokenizers/pull/1005." " so that the fast tokenizer works correctly." ) __UpperCAmelCase : Any = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() ) if pre_tok_state.get("add_prefix_space" , UpperCamelCase_ ) != add_prefix_space: __UpperCAmelCase : str = getattr(UpperCamelCase_ , pre_tok_state.pop("type" ) ) __UpperCAmelCase : Optional[int] = add_prefix_space __UpperCAmelCase : Tuple = pre_tok_class(**UpperCamelCase_ ) __UpperCAmelCase : Tuple = add_prefix_space def _snake_case ( self , *UpperCamelCase_ , **UpperCamelCase_ ): __UpperCAmelCase : Optional[Any] = kwargs.get("is_split_into_words" , UpperCamelCase_ ) assert self.add_prefix_space or not is_split_into_words, ( f"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """ "to use it with pretokenized inputs." ) return super()._batch_encode_plus(*UpperCamelCase_ , **UpperCamelCase_ ) def _snake_case ( self , *UpperCamelCase_ , **UpperCamelCase_ ): __UpperCAmelCase : Any = kwargs.get("is_split_into_words" , UpperCamelCase_ ) assert self.add_prefix_space or not is_split_into_words, ( f"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """ "to use it with pretokenized inputs." ) return super()._encode_plus(*UpperCamelCase_ , **UpperCamelCase_ ) def _snake_case ( self , UpperCamelCase_ , UpperCamelCase_ = None ): __UpperCAmelCase : int = self._tokenizer.model.save(UpperCamelCase_ , name=UpperCamelCase_ ) return tuple(UpperCamelCase_ ) def _snake_case ( self , UpperCamelCase_ , UpperCamelCase_ = False , UpperCamelCase_ = None , UpperCamelCase_ = None , **UpperCamelCase_ , ): __UpperCAmelCase : str = super().decode( token_ids=UpperCamelCase_ , skip_special_tokens=UpperCamelCase_ , clean_up_tokenization_spaces=UpperCamelCase_ , **UpperCamelCase_ , ) if truncate_before_pattern is not None and len(UpperCamelCase_ ) > 0: __UpperCAmelCase : Union[str, Any] = self.truncate(UpperCamelCase_ , UpperCamelCase_ ) return decoded_text def _snake_case ( self , UpperCamelCase_ , UpperCamelCase_ ): def find_re(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ): __UpperCAmelCase : Dict = pattern.search(UpperCamelCase_ , UpperCamelCase_ ) return m.start() if m else -1 __UpperCAmelCase : List[str] = [re.compile(UpperCamelCase_ , re.MULTILINE ) for pattern in truncate_before_pattern] __UpperCAmelCase : Optional[Any] = list(re.finditer("^print" , UpperCamelCase_ , re.MULTILINE ) ) if len(UpperCamelCase_ ) > 1: __UpperCAmelCase : List[Any] = completion[: prints[1].start()] __UpperCAmelCase : Tuple = list(re.finditer("^def" , UpperCamelCase_ , re.MULTILINE ) ) if len(UpperCamelCase_ ) > 1: __UpperCAmelCase : Union[str, Any] = completion[: defs[1].start()] __UpperCAmelCase : Dict = 0 __UpperCAmelCase : Dict = [ pos for pos in [find_re(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) for terminal in terminals] if pos != -1 ] if len(UpperCamelCase_ ) > 0: return completion[: min(UpperCamelCase_ )] else: return completion
10
1
'''simple docstring''' import unittest from transformers import is_torch_available from transformers.testing_utils import require_torch, slow, torch_device from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_LIST, OpenAIGPTConfig, OpenAIGPTDoubleHeadsModel, OpenAIGPTForSequenceClassification, OpenAIGPTLMHeadModel, OpenAIGPTModel, ) class __A : def __init__( self , UpperCamelCase_ , UpperCamelCase_=13 , UpperCamelCase_=7 , UpperCamelCase_=True , UpperCamelCase_=True , UpperCamelCase_=True , UpperCamelCase_=99 , UpperCamelCase_=32 , UpperCamelCase_=5 , UpperCamelCase_=4 , UpperCamelCase_=37 , UpperCamelCase_="gelu" , UpperCamelCase_=0.1 , UpperCamelCase_=0.1 , UpperCamelCase_=5_12 , UpperCamelCase_=16 , UpperCamelCase_=2 , UpperCamelCase_=0.0_2 , UpperCamelCase_=3 , UpperCamelCase_=4 , UpperCamelCase_=None , ): __UpperCAmelCase : Union[str, Any] = parent __UpperCAmelCase : Dict = batch_size __UpperCAmelCase : str = seq_length __UpperCAmelCase : Union[str, Any] = is_training __UpperCAmelCase : Tuple = use_token_type_ids __UpperCAmelCase : Union[str, Any] = use_labels __UpperCAmelCase : str = vocab_size __UpperCAmelCase : Optional[Any] = hidden_size __UpperCAmelCase : Tuple = num_hidden_layers __UpperCAmelCase : Optional[Any] = num_attention_heads __UpperCAmelCase : int = intermediate_size __UpperCAmelCase : Optional[Any] = hidden_act __UpperCAmelCase : Dict = hidden_dropout_prob __UpperCAmelCase : List[str] = attention_probs_dropout_prob __UpperCAmelCase : Optional[Any] = max_position_embeddings __UpperCAmelCase : str = type_vocab_size __UpperCAmelCase : List[Any] = type_sequence_label_size __UpperCAmelCase : Optional[int] = initializer_range __UpperCAmelCase : int = num_labels __UpperCAmelCase : Any = num_choices __UpperCAmelCase : Dict = scope __UpperCAmelCase : str = self.vocab_size - 1 def _snake_case ( self ): __UpperCAmelCase : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) __UpperCAmelCase : str = None if self.use_token_type_ids: __UpperCAmelCase : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) __UpperCAmelCase : Dict = None __UpperCAmelCase : Optional[int] = None __UpperCAmelCase : str = None if self.use_labels: __UpperCAmelCase : Union[str, Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size ) __UpperCAmelCase : Dict = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) __UpperCAmelCase : Optional[Any] = ids_tensor([self.batch_size] , self.num_choices ) __UpperCAmelCase : List[str] = OpenAIGPTConfig( vocab_size=self.vocab_size , n_embd=self.hidden_size , n_layer=self.num_hidden_layers , n_head=self.num_attention_heads , n_positions=self.max_position_embeddings , pad_token_id=self.pad_token_id , ) __UpperCAmelCase : Optional[Any] = ids_tensor([self.num_hidden_layers, self.num_attention_heads] , 2 ) return ( config, input_ids, head_mask, token_type_ids, sequence_labels, token_labels, choice_labels, ) def _snake_case ( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , *UpperCamelCase_ ): __UpperCAmelCase : str = OpenAIGPTModel(config=UpperCamelCase_ ) model.to(UpperCamelCase_ ) model.eval() __UpperCAmelCase : List[str] = model(UpperCamelCase_ , token_type_ids=UpperCamelCase_ , head_mask=UpperCamelCase_ ) __UpperCAmelCase : Any = model(UpperCamelCase_ , token_type_ids=UpperCamelCase_ ) __UpperCAmelCase : Union[str, Any] = model(UpperCamelCase_ ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def _snake_case ( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , *UpperCamelCase_ ): __UpperCAmelCase : Any = OpenAIGPTLMHeadModel(UpperCamelCase_ ) model.to(UpperCamelCase_ ) model.eval() __UpperCAmelCase : List[Any] = model(UpperCamelCase_ , token_type_ids=UpperCamelCase_ , labels=UpperCamelCase_ ) self.parent.assertEqual(result.loss.shape , () ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def _snake_case ( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , *UpperCamelCase_ ): __UpperCAmelCase : Any = OpenAIGPTDoubleHeadsModel(UpperCamelCase_ ) model.to(UpperCamelCase_ ) model.eval() __UpperCAmelCase : str = model(UpperCamelCase_ , token_type_ids=UpperCamelCase_ , labels=UpperCamelCase_ ) self.parent.assertEqual(result.loss.shape , () ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def _snake_case ( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , *UpperCamelCase_ ): __UpperCAmelCase : Tuple = self.num_labels __UpperCAmelCase : int = OpenAIGPTForSequenceClassification(UpperCamelCase_ ) model.to(UpperCamelCase_ ) model.eval() __UpperCAmelCase : str = ids_tensor([self.batch_size] , self.type_sequence_label_size ) __UpperCAmelCase : Tuple = model(UpperCamelCase_ , token_type_ids=UpperCamelCase_ , labels=UpperCamelCase_ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def _snake_case ( self ): __UpperCAmelCase : int = self.prepare_config_and_inputs() ( ( __UpperCAmelCase ) , ( __UpperCAmelCase ) , ( __UpperCAmelCase ) , ( __UpperCAmelCase ) , ( __UpperCAmelCase ) , ( __UpperCAmelCase ) , ( __UpperCAmelCase ) , ) : Union[str, Any] = config_and_inputs __UpperCAmelCase : List[str] = { "input_ids": input_ids, "token_type_ids": token_type_ids, "head_mask": head_mask, } return config, inputs_dict @require_torch class __A (__magic_name__ , __magic_name__ , __magic_name__ , unittest.TestCase ): snake_case :Union[str, Any] = ( (OpenAIGPTModel, OpenAIGPTLMHeadModel, OpenAIGPTDoubleHeadsModel, OpenAIGPTForSequenceClassification) if is_torch_available() else () ) snake_case :List[str] = ( (OpenAIGPTLMHeadModel,) if is_torch_available() else () ) # TODO (PVP): Add Double HeadsModel when generate() function is changed accordingly snake_case :Optional[int] = ( { "feature-extraction": OpenAIGPTModel, "text-classification": OpenAIGPTForSequenceClassification, "text-generation": OpenAIGPTLMHeadModel, "zero-shot": OpenAIGPTForSequenceClassification, } if is_torch_available() else {} ) def _snake_case ( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ): if pipeline_test_casse_name == "ZeroShotClassificationPipelineTests": # Get `tokenizer does not have a padding token` error for both fast/slow tokenizers. # `OpenAIGPTConfig` was never used in pipeline tests, either because of a missing checkpoint or because a # tiny config could not be created. return True return False def _snake_case ( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_=False ): __UpperCAmelCase : Union[str, Any] = super()._prepare_for_class(UpperCamelCase_ , UpperCamelCase_ , return_labels=UpperCamelCase_ ) if return_labels: if model_class.__name__ == "OpenAIGPTDoubleHeadsModel": __UpperCAmelCase : List[str] = torch.zeros( (self.model_tester.batch_size, self.model_tester.num_choices, self.model_tester.seq_length) , dtype=torch.long , device=UpperCamelCase_ , ) __UpperCAmelCase : str = inputs_dict["labels"] __UpperCAmelCase : int = inputs_dict["labels"] __UpperCAmelCase : List[Any] = torch.zeros( (self.model_tester.batch_size, self.model_tester.num_choices) , dtype=torch.long , device=UpperCamelCase_ , ) __UpperCAmelCase : Any = torch.zeros( self.model_tester.batch_size , dtype=torch.long , device=UpperCamelCase_ ) return inputs_dict def _snake_case ( self ): __UpperCAmelCase : Union[str, Any] = OpenAIGPTModelTester(self ) __UpperCAmelCase : Optional[int] = ConfigTester(self , config_class=UpperCamelCase_ , n_embd=37 ) def _snake_case ( self ): self.config_tester.run_common_tests() def _snake_case ( self ): __UpperCAmelCase : int = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_openai_gpt_model(*UpperCamelCase_ ) def _snake_case ( self ): __UpperCAmelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_lm_head_model(*UpperCamelCase_ ) def _snake_case ( self ): __UpperCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_double_lm_head_model(*UpperCamelCase_ ) def _snake_case ( self ): __UpperCAmelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_openai_gpt_for_sequence_classification(*UpperCamelCase_ ) @slow def _snake_case ( self ): for model_name in OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: __UpperCAmelCase : Dict = OpenAIGPTModel.from_pretrained(UpperCamelCase_ ) self.assertIsNotNone(UpperCamelCase_ ) @require_torch class __A (unittest.TestCase ): @slow def _snake_case ( self ): __UpperCAmelCase : Any = OpenAIGPTLMHeadModel.from_pretrained("openai-gpt" ) model.to(UpperCamelCase_ ) __UpperCAmelCase : Any = torch.tensor([[4_81, 47_35, 5_44]] , dtype=torch.long , device=UpperCamelCase_ ) # the president is __UpperCAmelCase : List[str] = [ 4_81, 47_35, 5_44, 2_46, 9_63, 8_70, 7_62, 2_39, 2_44, 4_04_77, 2_44, 2_49, 7_19, 8_81, 4_87, 5_44, 2_40, 2_44, 6_03, 4_81, ] # the president is a very good man. " \n " i\'m sure he is, " said the __UpperCAmelCase : Tuple = model.generate(UpperCamelCase_ , do_sample=UpperCamelCase_ ) self.assertListEqual(output_ids[0].tolist() , UpperCamelCase_ )
10
'''simple docstring''' import json import os from functools import lru_cache from typing import List, Optional, Tuple import regex as re from ...tokenization_utils import AddedToken, PreTrainedTokenizer from ...utils import logging _a : Optional[Any] = logging.get_logger(__name__) _a : int = {"vocab_file": "vocab.json", "merges_file": "merges.txt"} # See all BART models at https://huggingface.co/models?filter=bart _a : Tuple = { "vocab_file": { "facebook/bart-base": "https://huggingface.co/facebook/bart-base/resolve/main/vocab.json", "facebook/bart-large": "https://huggingface.co/facebook/bart-large/resolve/main/vocab.json", "facebook/bart-large-mnli": "https://huggingface.co/facebook/bart-large-mnli/resolve/main/vocab.json", "facebook/bart-large-cnn": "https://huggingface.co/facebook/bart-large-cnn/resolve/main/vocab.json", "facebook/bart-large-xsum": "https://huggingface.co/facebook/bart-large-xsum/resolve/main/vocab.json", "yjernite/bart_eli5": "https://huggingface.co/yjernite/bart_eli5/resolve/main/vocab.json", }, "merges_file": { "facebook/bart-base": "https://huggingface.co/facebook/bart-base/resolve/main/merges.txt", "facebook/bart-large": "https://huggingface.co/facebook/bart-large/resolve/main/merges.txt", "facebook/bart-large-mnli": "https://huggingface.co/facebook/bart-large-mnli/resolve/main/merges.txt", "facebook/bart-large-cnn": "https://huggingface.co/facebook/bart-large-cnn/resolve/main/merges.txt", "facebook/bart-large-xsum": "https://huggingface.co/facebook/bart-large-xsum/resolve/main/merges.txt", "yjernite/bart_eli5": "https://huggingface.co/yjernite/bart_eli5/resolve/main/merges.txt", }, } _a : List[Any] = { "facebook/bart-base": 1024, "facebook/bart-large": 1024, "facebook/bart-large-mnli": 1024, "facebook/bart-large-cnn": 1024, "facebook/bart-large-xsum": 1024, "yjernite/bart_eli5": 1024, } @lru_cache() def _lowercase ( ) -> List[Any]: """simple docstring""" __UpperCAmelCase : Dict = ( list(range(ord("!" ) , ord("~" ) + 1 ) ) + list(range(ord("¡" ) , ord("¬" ) + 1 ) ) + list(range(ord("®" ) , ord("ÿ" ) + 1 ) ) ) __UpperCAmelCase : Optional[Any] = bs[:] __UpperCAmelCase : Optional[int] = 0 for b in range(2**8 ): if b not in bs: bs.append(lowerCamelCase__ ) cs.append(2**8 + n ) n += 1 __UpperCAmelCase : Dict = [chr(lowerCamelCase__ ) for n in cs] return dict(zip(lowerCamelCase__ , lowerCamelCase__ ) ) def _lowercase ( lowerCamelCase__ ) -> str: """simple docstring""" __UpperCAmelCase : Dict = set() __UpperCAmelCase : Union[str, Any] = word[0] for char in word[1:]: pairs.add((prev_char, char) ) __UpperCAmelCase : Optional[Any] = char return pairs class __A (__magic_name__ ): snake_case :Optional[int] = VOCAB_FILES_NAMES snake_case :List[Any] = PRETRAINED_VOCAB_FILES_MAP snake_case :Any = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES snake_case :Optional[int] = ["input_ids", "attention_mask"] def __init__( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_="replace" , UpperCamelCase_="<s>" , UpperCamelCase_="</s>" , UpperCamelCase_="</s>" , UpperCamelCase_="<s>" , UpperCamelCase_="<unk>" , UpperCamelCase_="<pad>" , UpperCamelCase_="<mask>" , UpperCamelCase_=False , **UpperCamelCase_ , ): __UpperCAmelCase : str = AddedToken(UpperCamelCase_ , lstrip=UpperCamelCase_ , rstrip=UpperCamelCase_ ) if isinstance(UpperCamelCase_ , UpperCamelCase_ ) else bos_token __UpperCAmelCase : List[str] = AddedToken(UpperCamelCase_ , lstrip=UpperCamelCase_ , rstrip=UpperCamelCase_ ) if isinstance(UpperCamelCase_ , UpperCamelCase_ ) else eos_token __UpperCAmelCase : Optional[int] = AddedToken(UpperCamelCase_ , lstrip=UpperCamelCase_ , rstrip=UpperCamelCase_ ) if isinstance(UpperCamelCase_ , UpperCamelCase_ ) else sep_token __UpperCAmelCase : int = AddedToken(UpperCamelCase_ , lstrip=UpperCamelCase_ , rstrip=UpperCamelCase_ ) if isinstance(UpperCamelCase_ , UpperCamelCase_ ) else cls_token __UpperCAmelCase : Optional[int] = AddedToken(UpperCamelCase_ , lstrip=UpperCamelCase_ , rstrip=UpperCamelCase_ ) if isinstance(UpperCamelCase_ , UpperCamelCase_ ) else unk_token __UpperCAmelCase : Dict = AddedToken(UpperCamelCase_ , lstrip=UpperCamelCase_ , rstrip=UpperCamelCase_ ) if isinstance(UpperCamelCase_ , UpperCamelCase_ ) else pad_token # Mask token behave like a normal word, i.e. include the space before it __UpperCAmelCase : Union[str, Any] = AddedToken(UpperCamelCase_ , lstrip=UpperCamelCase_ , rstrip=UpperCamelCase_ ) if isinstance(UpperCamelCase_ , UpperCamelCase_ ) else mask_token super().__init__( errors=UpperCamelCase_ , bos_token=UpperCamelCase_ , eos_token=UpperCamelCase_ , unk_token=UpperCamelCase_ , sep_token=UpperCamelCase_ , cls_token=UpperCamelCase_ , pad_token=UpperCamelCase_ , mask_token=UpperCamelCase_ , add_prefix_space=UpperCamelCase_ , **UpperCamelCase_ , ) with open(UpperCamelCase_ , encoding="utf-8" ) as vocab_handle: __UpperCAmelCase : int = json.load(UpperCamelCase_ ) __UpperCAmelCase : Any = {v: k for k, v in self.encoder.items()} __UpperCAmelCase : Any = errors # how to handle errors in decoding __UpperCAmelCase : str = bytes_to_unicode() __UpperCAmelCase : List[str] = {v: k for k, v in self.byte_encoder.items()} with open(UpperCamelCase_ , encoding="utf-8" ) as merges_handle: __UpperCAmelCase : str = merges_handle.read().split("\n" )[1:-1] __UpperCAmelCase : List[str] = [tuple(merge.split() ) for merge in bpe_merges] __UpperCAmelCase : Union[str, Any] = dict(zip(UpperCamelCase_ , range(len(UpperCamelCase_ ) ) ) ) __UpperCAmelCase : Optional[int] = {} __UpperCAmelCase : Optional[int] = add_prefix_space # Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions __UpperCAmelCase : Dict = re.compile(r"'s|'t|'re|'ve|'m|'ll|'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+" ) @property def _snake_case ( self ): return len(self.encoder ) def _snake_case ( self ): return dict(self.encoder , **self.added_tokens_encoder ) def _snake_case ( self , UpperCamelCase_ ): if token in self.cache: return self.cache[token] __UpperCAmelCase : List[str] = tuple(UpperCamelCase_ ) __UpperCAmelCase : str = get_pairs(UpperCamelCase_ ) if not pairs: return token while True: __UpperCAmelCase : str = min(UpperCamelCase_ , key=lambda UpperCamelCase_ : self.bpe_ranks.get(UpperCamelCase_ , float("inf" ) ) ) if bigram not in self.bpe_ranks: break __UpperCAmelCase , __UpperCAmelCase : List[Any] = bigram __UpperCAmelCase : Any = [] __UpperCAmelCase : List[str] = 0 while i < len(UpperCamelCase_ ): try: __UpperCAmelCase : Union[str, Any] = word.index(UpperCamelCase_ , UpperCamelCase_ ) except ValueError: new_word.extend(word[i:] ) break else: new_word.extend(word[i:j] ) __UpperCAmelCase : str = j if word[i] == first and i < len(UpperCamelCase_ ) - 1 and word[i + 1] == second: new_word.append(first + second ) i += 2 else: new_word.append(word[i] ) i += 1 __UpperCAmelCase : Dict = tuple(UpperCamelCase_ ) __UpperCAmelCase : str = new_word if len(UpperCamelCase_ ) == 1: break else: __UpperCAmelCase : int = get_pairs(UpperCamelCase_ ) __UpperCAmelCase : Optional[int] = " ".join(UpperCamelCase_ ) __UpperCAmelCase : Dict = word return word def _snake_case ( self , UpperCamelCase_ ): __UpperCAmelCase : Optional[Any] = [] for token in re.findall(self.pat , UpperCamelCase_ ): __UpperCAmelCase : Any = "".join( self.byte_encoder[b] for b in token.encode("utf-8" ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case) bpe_tokens.extend(bpe_token for bpe_token in self.bpe(UpperCamelCase_ ).split(" " ) ) return bpe_tokens def _snake_case ( self , UpperCamelCase_ ): return self.encoder.get(UpperCamelCase_ , self.encoder.get(self.unk_token ) ) def _snake_case ( self , UpperCamelCase_ ): return self.decoder.get(UpperCamelCase_ ) def _snake_case ( self , UpperCamelCase_ ): __UpperCAmelCase : List[str] = "".join(UpperCamelCase_ ) __UpperCAmelCase : Union[str, Any] = bytearray([self.byte_decoder[c] for c in text] ).decode("utf-8" , errors=self.errors ) return text def _snake_case ( self , UpperCamelCase_ , UpperCamelCase_ = None ): if not os.path.isdir(UpperCamelCase_ ): logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" ) return __UpperCAmelCase : Any = os.path.join( UpperCamelCase_ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] ) __UpperCAmelCase : Optional[int] = os.path.join( UpperCamelCase_ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["merges_file"] ) with open(UpperCamelCase_ , "w" , encoding="utf-8" ) as f: f.write(json.dumps(self.encoder , indent=2 , sort_keys=UpperCamelCase_ , ensure_ascii=UpperCamelCase_ ) + "\n" ) __UpperCAmelCase : str = 0 with open(UpperCamelCase_ , "w" , encoding="utf-8" ) as writer: writer.write("#version: 0.2\n" ) for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda UpperCamelCase_ : kv[1] ): if index != token_index: logger.warning( f"""Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.""" " Please check that the tokenizer is not corrupted!" ) __UpperCAmelCase : str = token_index writer.write(" ".join(UpperCamelCase_ ) + "\n" ) index += 1 return vocab_file, merge_file def _snake_case ( self , UpperCamelCase_ , UpperCamelCase_ = None ): if token_ids_a is None: return [self.cls_token_id] + token_ids_a + [self.sep_token_id] __UpperCAmelCase : List[Any] = [self.cls_token_id] __UpperCAmelCase : Tuple = [self.sep_token_id] return cls + token_ids_a + sep + sep + token_ids_a + sep def _snake_case ( self , UpperCamelCase_ , UpperCamelCase_ = None , UpperCamelCase_ = False ): if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=UpperCamelCase_ , token_ids_a=UpperCamelCase_ , already_has_special_tokens=UpperCamelCase_ ) if token_ids_a is None: return [1] + ([0] * len(UpperCamelCase_ )) + [1] return [1] + ([0] * len(UpperCamelCase_ )) + [1, 1] + ([0] * len(UpperCamelCase_ )) + [1] def _snake_case ( self , UpperCamelCase_ , UpperCamelCase_ = None ): __UpperCAmelCase : int = [self.sep_token_id] __UpperCAmelCase : List[str] = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0] def _snake_case ( self , UpperCamelCase_ , UpperCamelCase_=False , **UpperCamelCase_ ): __UpperCAmelCase : List[str] = kwargs.pop("add_prefix_space" , self.add_prefix_space ) if (is_split_into_words or add_prefix_space) and (len(UpperCamelCase_ ) > 0 and not text[0].isspace()): __UpperCAmelCase : Tuple = " " + text return (text, kwargs)
10
1
'''simple docstring''' import logging import os from .state import PartialState class __A (logging.LoggerAdapter ): @staticmethod def _snake_case ( UpperCamelCase_ ): __UpperCAmelCase : Any = PartialState() return not main_process_only or (main_process_only and state.is_main_process) def _snake_case ( self , UpperCamelCase_ , UpperCamelCase_ , *UpperCamelCase_ , **UpperCamelCase_ ): if PartialState._shared_state == {}: raise RuntimeError( "You must initialize the accelerate state by calling either `PartialState()` or `Accelerator()` before using the logging utility." ) __UpperCAmelCase : Union[str, Any] = kwargs.pop("main_process_only" , UpperCamelCase_ ) __UpperCAmelCase : Any = kwargs.pop("in_order" , UpperCamelCase_ ) if self.isEnabledFor(UpperCamelCase_ ): if self._should_log(UpperCamelCase_ ): __UpperCAmelCase , __UpperCAmelCase : List[Any] = self.process(UpperCamelCase_ , UpperCamelCase_ ) self.logger.log(UpperCamelCase_ , UpperCamelCase_ , *UpperCamelCase_ , **UpperCamelCase_ ) elif in_order: __UpperCAmelCase : int = PartialState() for i in range(state.num_processes ): if i == state.process_index: __UpperCAmelCase , __UpperCAmelCase : List[Any] = self.process(UpperCamelCase_ , UpperCamelCase_ ) self.logger.log(UpperCamelCase_ , UpperCamelCase_ , *UpperCamelCase_ , **UpperCamelCase_ ) state.wait_for_everyone() def _lowercase ( lowerCamelCase__ , lowerCamelCase__ = None ) -> Optional[Any]: """simple docstring""" if log_level is None: __UpperCAmelCase : Tuple = os.environ.get("ACCELERATE_LOG_LEVEL" , lowerCamelCase__ ) __UpperCAmelCase : Any = logging.getLogger(lowerCamelCase__ ) if log_level is not None: logger.setLevel(log_level.upper() ) logger.root.setLevel(log_level.upper() ) return MultiProcessAdapter(lowerCamelCase__ , {} )
10
'''simple docstring''' from ...configuration_utils import PretrainedConfig from ...utils import logging _a : Any = logging.get_logger(__name__) _a : int = { "facebook/s2t-wav2vec2-large-en-de": ( "https://huggingface.co/facebook/s2t-wav2vec2-large-en-de/resolve/main/config.json" ), # See all Speech2Text models at https://huggingface.co/models?filter=speech2text2 } class __A (__magic_name__ ): snake_case :Optional[int] = "speech_to_text_2" snake_case :List[Any] = ["past_key_values"] snake_case :str = {"num_attention_heads": "decoder_attention_heads", "hidden_size": "d_model"} def __init__( self , UpperCamelCase_=1_00_00 , UpperCamelCase_=6 , UpperCamelCase_=20_48 , UpperCamelCase_=4 , UpperCamelCase_=0.0 , UpperCamelCase_=True , UpperCamelCase_="relu" , UpperCamelCase_=2_56 , UpperCamelCase_=0.1 , UpperCamelCase_=0.0 , UpperCamelCase_=0.0 , UpperCamelCase_=0.0_2 , UpperCamelCase_=2 , UpperCamelCase_=True , UpperCamelCase_=1 , UpperCamelCase_=0 , UpperCamelCase_=2 , UpperCamelCase_=10_24 , **UpperCamelCase_ , ): __UpperCAmelCase : Any = vocab_size __UpperCAmelCase : Optional[int] = d_model __UpperCAmelCase : Tuple = decoder_ffn_dim __UpperCAmelCase : List[str] = decoder_layers __UpperCAmelCase : str = decoder_attention_heads __UpperCAmelCase : Dict = dropout __UpperCAmelCase : Optional[Any] = attention_dropout __UpperCAmelCase : int = activation_dropout __UpperCAmelCase : Dict = activation_function __UpperCAmelCase : Tuple = init_std __UpperCAmelCase : Any = decoder_layerdrop __UpperCAmelCase : str = use_cache __UpperCAmelCase : int = decoder_layers __UpperCAmelCase : Any = scale_embedding # scale factor will be sqrt(d_model) if True __UpperCAmelCase : Union[str, Any] = max_target_positions super().__init__( pad_token_id=UpperCamelCase_ , bos_token_id=UpperCamelCase_ , eos_token_id=UpperCamelCase_ , decoder_start_token_id=UpperCamelCase_ , **UpperCamelCase_ , )
10
1
'''simple docstring''' import html from ...feature_extraction_utils import BatchFeature, FeatureExtractionMixin from ...utils import is_bsa_available, logging, requires_backends if is_bsa_available(): import bsa from bsa import BeautifulSoup _a : Optional[Any] = logging.get_logger(__name__) class __A (__magic_name__ ): def __init__( self , **UpperCamelCase_ ): requires_backends(self , ["bs4"] ) super().__init__(**UpperCamelCase_ ) def _snake_case ( self , UpperCamelCase_ ): __UpperCAmelCase : Optional[Any] = [] __UpperCAmelCase : List[str] = [] __UpperCAmelCase : Optional[int] = element if element.name else element.parent for parent in child.parents: # type: bs4.element.Tag __UpperCAmelCase : Tuple = parent.find_all(child.name , recursive=UpperCamelCase_ ) xpath_tags.append(child.name ) xpath_subscripts.append( 0 if 1 == len(UpperCamelCase_ ) else next(i for i, s in enumerate(UpperCamelCase_ , 1 ) if s is child ) ) __UpperCAmelCase : Tuple = parent xpath_tags.reverse() xpath_subscripts.reverse() return xpath_tags, xpath_subscripts def _snake_case ( self , UpperCamelCase_ ): __UpperCAmelCase : Any = BeautifulSoup(UpperCamelCase_ , "html.parser" ) __UpperCAmelCase : Tuple = [] __UpperCAmelCase : Optional[int] = [] __UpperCAmelCase : Any = [] for element in html_code.descendants: if type(UpperCamelCase_ ) == bsa.element.NavigableString: if type(element.parent ) != bsa.element.Tag: continue __UpperCAmelCase : Dict = html.unescape(UpperCamelCase_ ).strip() if not text_in_this_tag: continue all_doc_strings.append(UpperCamelCase_ ) __UpperCAmelCase , __UpperCAmelCase : int = self.xpath_soup(UpperCamelCase_ ) stringaxtag_seq.append(UpperCamelCase_ ) stringaxsubs_seq.append(UpperCamelCase_ ) if len(UpperCamelCase_ ) != len(UpperCamelCase_ ): raise ValueError("Number of doc strings and xtags does not correspond" ) if len(UpperCamelCase_ ) != len(UpperCamelCase_ ): raise ValueError("Number of doc strings and xsubs does not correspond" ) return all_doc_strings, stringaxtag_seq, stringaxsubs_seq def _snake_case ( self , UpperCamelCase_ , UpperCamelCase_ ): __UpperCAmelCase : List[Any] = "" for tagname, subs in zip(UpperCamelCase_ , UpperCamelCase_ ): xpath += f"""/{tagname}""" if subs != 0: xpath += f"""[{subs}]""" return xpath def __call__( self , UpperCamelCase_ ): __UpperCAmelCase : Union[str, Any] = False # Check that strings has a valid type if isinstance(UpperCamelCase_ , UpperCamelCase_ ): __UpperCAmelCase : List[Any] = True elif isinstance(UpperCamelCase_ , (list, tuple) ): if len(UpperCamelCase_ ) == 0 or isinstance(html_strings[0] , UpperCamelCase_ ): __UpperCAmelCase : Union[str, Any] = True if not valid_strings: raise ValueError( "HTML strings must of type `str`, `List[str]` (batch of examples), " f"""but is of type {type(UpperCamelCase_ )}.""" ) __UpperCAmelCase : Any = bool(isinstance(UpperCamelCase_ , (list, tuple) ) and (isinstance(html_strings[0] , UpperCamelCase_ )) ) if not is_batched: __UpperCAmelCase : Any = [html_strings] # Get nodes + xpaths __UpperCAmelCase : Union[str, Any] = [] __UpperCAmelCase : Optional[int] = [] for html_string in html_strings: __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase : Union[str, Any] = self.get_three_from_single(UpperCamelCase_ ) nodes.append(UpperCamelCase_ ) __UpperCAmelCase : Optional[Any] = [] for node, tag_list, sub_list in zip(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ): __UpperCAmelCase : Optional[Any] = self.construct_xpath(UpperCamelCase_ , UpperCamelCase_ ) xpath_strings.append(UpperCamelCase_ ) xpaths.append(UpperCamelCase_ ) # return as Dict __UpperCAmelCase : int = {"nodes": nodes, "xpaths": xpaths} __UpperCAmelCase : Tuple = BatchFeature(data=UpperCamelCase_ , tensor_type=UpperCamelCase_ ) return encoded_inputs
10
'''simple docstring''' def _lowercase ( lowerCamelCase__ = 100 ) -> int: """simple docstring""" __UpperCAmelCase : Optional[Any] = (n * (n + 1) // 2) ** 2 __UpperCAmelCase : Any = n * (n + 1) * (2 * n + 1) // 6 return sum_cubes - sum_squares if __name__ == "__main__": print(f"""{solution() = }""")
10
1
'''simple docstring''' import argparse from copy import deepcopy import numpy as np from datasets import ClassLabel, DatasetDict, load_dataset from evaluate import load from transformers import ( AutoModelForSequenceClassification, AutoTokenizer, DataCollatorWithPadding, Trainer, TrainerCallback, TrainingArguments, set_seed, ) def _lowercase ( ) -> Optional[Any]: """simple docstring""" __UpperCAmelCase : int = argparse.ArgumentParser() parser.add_argument("--model_ckpt" , type=lowerCamelCase__ , default="microsoft/unixcoder-base-nine" ) parser.add_argument("--num_epochs" , type=lowerCamelCase__ , default=5 ) parser.add_argument("--batch_size" , type=lowerCamelCase__ , default=6 ) parser.add_argument("--gradient_accumulation_steps" , type=lowerCamelCase__ , default=1 ) parser.add_argument("--freeze" , type=lowerCamelCase__ , default=lowerCamelCase__ ) parser.add_argument("--learning_rate" , type=lowerCamelCase__ , default=5e-4 ) parser.add_argument("--seed" , type=lowerCamelCase__ , default=0 ) parser.add_argument("--lr_scheduler_type" , type=lowerCamelCase__ , default="cosine" ) parser.add_argument("--num_warmup_steps" , type=lowerCamelCase__ , default=10 ) parser.add_argument("--weight_decay" , type=lowerCamelCase__ , default=0.01 ) parser.add_argument("--output_dir" , type=lowerCamelCase__ , default="./results" ) return parser.parse_args() _a : List[str] = load("accuracy") def _lowercase ( lowerCamelCase__ ) -> Optional[Any]: """simple docstring""" __UpperCAmelCase , __UpperCAmelCase : int = eval_pred __UpperCAmelCase : Optional[int] = np.argmax(lowerCamelCase__ , axis=1 ) return metric.compute(predictions=lowerCamelCase__ , references=lowerCamelCase__ ) class __A (__magic_name__ ): def __init__( self , UpperCamelCase_ ): super().__init__() __UpperCAmelCase : int = trainer def _snake_case ( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , **UpperCamelCase_ ): if control.should_evaluate: __UpperCAmelCase : List[Any] = deepcopy(UpperCamelCase_ ) self._trainer.evaluate(eval_dataset=self._trainer.train_dataset , metric_key_prefix="train" ) return control_copy def _lowercase ( ) -> Union[str, Any]: """simple docstring""" __UpperCAmelCase : Optional[Any] = get_args() set_seed(args.seed ) __UpperCAmelCase : Tuple = load_dataset("codeparrot/codecomplex" , split="train" ) __UpperCAmelCase : int = dataset.train_test_split(test_size=0.2 ) __UpperCAmelCase : str = train_test["test"].train_test_split(test_size=0.5 ) __UpperCAmelCase : Dict = DatasetDict( { "train": train_test["train"], "test": test_validation["train"], "valid": test_validation["test"], } ) print("Loading tokenizer and model" ) __UpperCAmelCase : Any = AutoTokenizer.from_pretrained(args.model_ckpt ) __UpperCAmelCase : Optional[Any] = tokenizer.eos_token __UpperCAmelCase : Dict = AutoModelForSequenceClassification.from_pretrained(args.model_ckpt , num_labels=7 ) __UpperCAmelCase : List[Any] = model.config.eos_token_id if args.freeze: for param in model.roberta.parameters(): __UpperCAmelCase : Optional[Any] = False __UpperCAmelCase : List[str] = ClassLabel(num_classes=7 , names=list(set(train_test_validation["train"]["complexity"] ) ) ) def tokenize(lowerCamelCase__ ): __UpperCAmelCase : Optional[int] = tokenizer(example["src"] , truncation=lowerCamelCase__ , max_length=1024 ) __UpperCAmelCase : str = labels.straint(example["complexity"] ) return { "input_ids": inputs["input_ids"], "attention_mask": inputs["attention_mask"], "label": label, } __UpperCAmelCase : int = train_test_validation.map( lowerCamelCase__ , batched=lowerCamelCase__ , remove_columns=train_test_validation["train"].column_names , ) __UpperCAmelCase : str = DataCollatorWithPadding(tokenizer=lowerCamelCase__ ) __UpperCAmelCase : List[Any] = TrainingArguments( output_dir=args.output_dir , learning_rate=args.learning_rate , lr_scheduler_type=args.lr_scheduler_type , evaluation_strategy="epoch" , save_strategy="epoch" , logging_strategy="epoch" , per_device_train_batch_size=args.batch_size , per_device_eval_batch_size=args.batch_size , num_train_epochs=args.num_epochs , gradient_accumulation_steps=args.gradient_accumulation_steps , weight_decay=0.01 , metric_for_best_model="accuracy" , run_name="complexity-java" , report_to="wandb" , ) __UpperCAmelCase : Union[str, Any] = Trainer( model=lowerCamelCase__ , args=lowerCamelCase__ , train_dataset=tokenized_datasets["train"] , eval_dataset=tokenized_datasets["valid"] , tokenizer=lowerCamelCase__ , data_collator=lowerCamelCase__ , compute_metrics=lowerCamelCase__ , ) print("Training..." ) trainer.add_callback(CustomCallback(lowerCamelCase__ ) ) trainer.train() if __name__ == "__main__": main()
10
'''simple docstring''' def _lowercase ( lowerCamelCase__ , lowerCamelCase__ ) -> float: """simple docstring""" if discount_rate < 0: raise ValueError("Discount rate cannot be negative" ) if not cash_flows: raise ValueError("Cash flows list cannot be empty" ) __UpperCAmelCase : Tuple = sum( cash_flow / ((1 + discount_rate) ** i) for i, cash_flow in enumerate(lowerCamelCase__ ) ) return round(lowerCamelCase__ , ndigits=2 ) if __name__ == "__main__": import doctest doctest.testmod()
10
1
'''simple docstring''' import os from typing import BinaryIO, Optional, Union import numpy as np import pyarrow.parquet as pq from .. import Audio, Dataset, Features, Image, NamedSplit, Value, config from ..features.features import FeatureType, _visit from ..formatting import query_table from ..packaged_modules import _PACKAGED_DATASETS_MODULES from ..packaged_modules.parquet.parquet import Parquet from ..utils import logging from ..utils.typing import NestedDataStructureLike, PathLike from .abc import AbstractDatasetReader def _lowercase ( lowerCamelCase__ ) -> Optional[int]: """simple docstring""" __UpperCAmelCase : Optional[Any] = np.inf def set_batch_size(lowerCamelCase__ ) -> None: nonlocal batch_size if isinstance(lowerCamelCase__ , lowerCamelCase__ ): __UpperCAmelCase : Optional[int] = min(lowerCamelCase__ , config.PARQUET_ROW_GROUP_SIZE_FOR_IMAGE_DATASETS ) elif isinstance(lowerCamelCase__ , lowerCamelCase__ ): __UpperCAmelCase : str = min(lowerCamelCase__ , config.PARQUET_ROW_GROUP_SIZE_FOR_AUDIO_DATASETS ) elif isinstance(lowerCamelCase__ , lowerCamelCase__ ) and feature.dtype == "binary": __UpperCAmelCase : Tuple = min(lowerCamelCase__ , config.PARQUET_ROW_GROUP_SIZE_FOR_BINARY_DATASETS ) _visit(lowerCamelCase__ , lowerCamelCase__ ) return None if batch_size is np.inf else batch_size class __A (__magic_name__ ): def __init__( self , UpperCamelCase_ , UpperCamelCase_ = None , UpperCamelCase_ = None , UpperCamelCase_ = None , UpperCamelCase_ = False , UpperCamelCase_ = False , UpperCamelCase_ = None , **UpperCamelCase_ , ): super().__init__( UpperCamelCase_ , split=UpperCamelCase_ , features=UpperCamelCase_ , cache_dir=UpperCamelCase_ , keep_in_memory=UpperCamelCase_ , streaming=UpperCamelCase_ , num_proc=UpperCamelCase_ , **UpperCamelCase_ , ) __UpperCAmelCase : Tuple = path_or_paths if isinstance(UpperCamelCase_ , UpperCamelCase_ ) else {self.split: path_or_paths} __UpperCAmelCase : Optional[int] = _PACKAGED_DATASETS_MODULES["parquet"][1] __UpperCAmelCase : str = Parquet( cache_dir=UpperCamelCase_ , data_files=UpperCamelCase_ , features=UpperCamelCase_ , hash=UpperCamelCase_ , **UpperCamelCase_ , ) def _snake_case ( self ): # Build iterable dataset if self.streaming: __UpperCAmelCase : List[str] = self.builder.as_streaming_dataset(split=self.split ) # Build regular (map-style) dataset else: __UpperCAmelCase : Any = None __UpperCAmelCase : List[Any] = None __UpperCAmelCase : Union[str, Any] = None __UpperCAmelCase : Dict = None self.builder.download_and_prepare( download_config=UpperCamelCase_ , download_mode=UpperCamelCase_ , verification_mode=UpperCamelCase_ , base_path=UpperCamelCase_ , num_proc=self.num_proc , ) __UpperCAmelCase : int = self.builder.as_dataset( split=self.split , verification_mode=UpperCamelCase_ , in_memory=self.keep_in_memory ) return dataset class __A : def __init__( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ = None , **UpperCamelCase_ , ): __UpperCAmelCase : List[Any] = dataset __UpperCAmelCase : Dict = path_or_buf __UpperCAmelCase : List[str] = batch_size or get_writer_batch_size(dataset.features ) __UpperCAmelCase : Optional[int] = parquet_writer_kwargs def _snake_case ( self ): __UpperCAmelCase : int = self.batch_size if self.batch_size else config.DEFAULT_MAX_BATCH_SIZE if isinstance(self.path_or_buf , (str, bytes, os.PathLike) ): with open(self.path_or_buf , "wb+" ) as buffer: __UpperCAmelCase : Union[str, Any] = self._write(file_obj=UpperCamelCase_ , batch_size=UpperCamelCase_ , **self.parquet_writer_kwargs ) else: __UpperCAmelCase : int = self._write(file_obj=self.path_or_buf , batch_size=UpperCamelCase_ , **self.parquet_writer_kwargs ) return written def _snake_case ( self , UpperCamelCase_ , UpperCamelCase_ , **UpperCamelCase_ ): __UpperCAmelCase : Optional[int] = 0 __UpperCAmelCase : List[str] = parquet_writer_kwargs.pop("path_or_buf" , UpperCamelCase_ ) __UpperCAmelCase : Any = self.dataset.features.arrow_schema __UpperCAmelCase : Dict = pq.ParquetWriter(UpperCamelCase_ , schema=UpperCamelCase_ , **UpperCamelCase_ ) for offset in logging.tqdm( range(0 , len(self.dataset ) , UpperCamelCase_ ) , unit="ba" , disable=not logging.is_progress_bar_enabled() , desc="Creating parquet from Arrow format" , ): __UpperCAmelCase : Dict = query_table( table=self.dataset._data , key=slice(UpperCamelCase_ , offset + batch_size ) , indices=self.dataset._indices if self.dataset._indices is not None else None , ) writer.write_table(UpperCamelCase_ ) written += batch.nbytes writer.close() return written
10
'''simple docstring''' import random import torch from huggingface_hub import HfApi from diffusers import UNetaDModel _a : Union[str, Any] = HfApi() _a : int = {} # fmt: off _a : Optional[int] = torch.tensor([ -0.7_515, -1.6_883, 0.2_420, 0.0_300, 0.6_347, 1.3_433, -1.1_743, -3.7_467, 1.2_342, -2.2_485, 0.4_636, 0.8_076, -0.7_991, 0.3_969, 0.8_498, 0.9_189, -1.8_887, -3.3_522, 0.7_639, 0.2_040, 0.6_271, -2.7_148, -1.6_316, 3.0_839, 0.3_186, 0.2_721, -0.9_759, -1.2_461, 2.6_257, 1.3_557 ]) _a : Optional[Any] = torch.tensor([ -2.3_639, -2.5_344, 0.0_054, -0.6_674, 1.5_990, 1.0_158, 0.3_124, -2.1_436, 1.8_795, -2.5_429, -0.1_566, -0.3_973, 1.2_490, 2.6_447, 1.2_283, -0.5_208, -2.8_154, -3.5_119, 2.3_838, 1.2_033, 1.7_201, -2.1_256, -1.4_576, 2.7_948, 2.4_204, -0.9_752, -1.2_546, 0.8_027, 3.2_758, 3.1_365 ]) _a : int = torch.tensor([ -0.6_531, -0.6_891, -0.3_172, -0.5_375, -0.9_140, -0.5_367, -0.1_175, -0.7_869, -0.3_808, -0.4_513, -0.2_098, -0.0_083, 0.3_183, 0.5_140, 0.2_247, -0.1_304, -0.1_302, -0.2_802, -0.2_084, -0.2_025, -0.4_967, -0.4_873, -0.0_861, 0.6_925, 0.0_250, 0.1_290, -0.1_543, 0.6_316, 1.0_460, 1.4_943 ]) _a : str = torch.tensor([ 0.0_911, 0.1_107, 0.0_182, 0.0_435, -0.0_805, -0.0_608, 0.0_381, 0.2_172, -0.0_280, 0.1_327, -0.0_299, -0.0_255, -0.0_050, -0.1_170, -0.1_046, 0.0_309, 0.1_367, 0.1_728, -0.0_533, -0.0_748, -0.0_534, 0.1_624, 0.0_384, -0.1_805, -0.0_707, 0.0_642, 0.0_220, -0.0_134, -0.1_333, -0.1_505 ]) _a : Union[str, Any] = torch.tensor([ 0.1_321, 0.1_337, 0.0_440, 0.0_622, -0.0_591, -0.0_370, 0.0_503, 0.2_133, -0.0_177, 0.1_415, -0.0_116, -0.0_112, 0.0_044, -0.0_980, -0.0_789, 0.0_395, 0.1_502, 0.1_785, -0.0_488, -0.0_514, -0.0_404, 0.1_539, 0.0_454, -0.1_559, -0.0_665, 0.0_659, 0.0_383, -0.0_005, -0.1_266, -0.1_386 ]) _a : Any = torch.tensor([ 0.1_154, 0.1_218, 0.0_307, 0.0_526, -0.0_711, -0.0_541, 0.0_366, 0.2_078, -0.0_267, 0.1_317, -0.0_226, -0.0_193, -0.0_014, -0.1_055, -0.0_902, 0.0_330, 0.1_391, 0.1_709, -0.0_562, -0.0_693, -0.0_560, 0.1_482, 0.0_381, -0.1_683, -0.0_681, 0.0_661, 0.0_331, -0.0_046, -0.1_268, -0.1_431 ]) _a : List[Any] = torch.tensor([ 0.1_192, 0.1_240, 0.0_414, 0.0_606, -0.0_557, -0.0_412, 0.0_430, 0.2_042, -0.0_200, 0.1_385, -0.0_115, -0.0_132, 0.0_017, -0.0_965, -0.0_802, 0.0_398, 0.1_433, 0.1_747, -0.0_458, -0.0_533, -0.0_407, 0.1_545, 0.0_419, -0.1_574, -0.0_645, 0.0_626, 0.0_341, -0.0_010, -0.1_199, -0.1_390 ]) _a : Optional[int] = torch.tensor([ 0.1_075, 0.1_074, 0.0_205, 0.0_431, -0.0_774, -0.0_607, 0.0_298, 0.2_042, -0.0_320, 0.1_267, -0.0_281, -0.0_250, -0.0_064, -0.1_091, -0.0_946, 0.0_290, 0.1_328, 0.1_650, -0.0_580, -0.0_738, -0.0_586, 0.1_440, 0.0_337, -0.1_746, -0.0_712, 0.0_605, 0.0_250, -0.0_099, -0.1_316, -0.1_473 ]) _a : Tuple = torch.tensor([ -1.4_572, -2.0_481, -0.0_414, -0.6_005, 1.4_136, 0.5_848, 0.4_028, -2.7_330, 1.2_212, -2.1_228, 0.2_155, 0.4_039, 0.7_662, 2.0_535, 0.7_477, -0.3_243, -2.1_758, -2.7_648, 1.6_947, 0.7_026, 1.2_338, -1.6_078, -0.8_682, 2.2_810, 1.8_574, -0.5_718, -0.5_586, -0.0_186, 2.3_415, 2.1_251]) _a : List[Any] = torch.tensor([ -1.3_690, -1.9_720, -0.4_090, -0.6_966, 1.4_660, 0.9_938, -0.1_385, -2.7_324, 0.7_736, -1.8_917, 0.2_923, 0.4_293, 0.1_693, 1.4_112, 1.1_887, -0.3_181, -2.2_160, -2.6_381, 1.3_170, 0.8_163, 0.9_240, -1.6_544, -0.6_099, 2.5_259, 1.6_430, -0.9_090, -0.9_392, -0.0_126, 2.4_268, 2.3_266 ]) _a : Optional[Any] = torch.tensor([ -1.3_525, -1.9_628, -0.3_956, -0.6_860, 1.4_664, 1.0_014, -0.1_259, -2.7_212, 0.7_772, -1.8_811, 0.2_996, 0.4_388, 0.1_704, 1.4_029, 1.1_701, -0.3_027, -2.2_053, -2.6_287, 1.3_350, 0.8_131, 0.9_274, -1.6_292, -0.6_098, 2.5_131, 1.6_505, -0.8_958, -0.9_298, -0.0_151, 2.4_257, 2.3_355 ]) _a : Union[str, Any] = torch.tensor([ -2.0_585, -2.7_897, -0.2_850, -0.8_940, 1.9_052, 0.5_702, 0.6_345, -3.8_959, 1.5_932, -3.2_319, 0.1_974, 0.0_287, 1.7_566, 2.6_543, 0.8_387, -0.5_351, -3.2_736, -4.3_375, 2.9_029, 1.6_390, 1.4_640, -2.1_701, -1.9_013, 2.9_341, 3.4_981, -0.6_255, -1.1_644, -0.1_591, 3.7_097, 3.2_066 ]) _a : Optional[int] = torch.tensor([ -2.3_139, -2.5_594, -0.0_197, -0.6_785, 1.7_001, 1.1_606, 0.3_075, -2.1_740, 1.8_071, -2.5_630, -0.0_926, -0.3_811, 1.2_116, 2.6_246, 1.2_731, -0.5_398, -2.8_153, -3.6_140, 2.3_893, 1.3_262, 1.6_258, -2.1_856, -1.3_267, 2.8_395, 2.3_779, -1.0_623, -1.2_468, 0.8_959, 3.3_367, 3.2_243 ]) _a : Union[str, Any] = torch.tensor([ -2.0_628, -2.7_667, -0.2_089, -0.8_263, 2.0_539, 0.5_992, 0.6_495, -3.8_336, 1.6_025, -3.2_817, 0.1_721, -0.0_633, 1.7_516, 2.7_039, 0.8_100, -0.5_908, -3.2_113, -4.4_343, 2.9_257, 1.3_632, 1.5_562, -2.1_489, -1.9_894, 3.0_560, 3.3_396, -0.7_328, -1.0_417, 0.0_383, 3.7_093, 3.2_343 ]) _a : str = torch.tensor([ -1.4_574, -2.0_569, -0.0_473, -0.6_117, 1.4_018, 0.5_769, 0.4_129, -2.7_344, 1.2_241, -2.1_397, 0.2_000, 0.3_937, 0.7_616, 2.0_453, 0.7_324, -0.3_391, -2.1_746, -2.7_744, 1.6_963, 0.6_921, 1.2_187, -1.6_172, -0.8_877, 2.2_439, 1.8_471, -0.5_839, -0.5_605, -0.0_464, 2.3_250, 2.1_219 ]) # fmt: on _a : Optional[Any] = api.list_models(filter="diffusers") for mod in models: if "google" in mod.author or mod.modelId == "CompVis/ldm-celebahq-256": _a : List[str] = "/home/patrick/google_checkpoints/" + mod.modelId.split("/")[-1] print(f"""Started running {mod.modelId}!!!""") if mod.modelId.startswith("CompVis"): _a : int = UNetaDModel.from_pretrained(local_checkpoint, subfolder="unet") else: _a : Optional[int] = UNetaDModel.from_pretrained(local_checkpoint) torch.manual_seed(0) random.seed(0) _a : str = torch.randn(1, model.config.in_channels, model.config.sample_size, model.config.sample_size) _a : str = torch.tensor([10] * noise.shape[0]) with torch.no_grad(): _a : str = model(noise, time_step).sample assert torch.allclose( logits[0, 0, 0, :30], results["_".join("_".join(mod.modelId.split("/")).split("-"))], atol=1e-3 ) print(f"""{mod.modelId} has passed successfully!!!""")
10
1
'''simple docstring''' import os from tempfile import TemporaryDirectory from unittest import TestCase import pytest from absl.testing import parameterized from datasets import config from datasets.arrow_reader import HF_GCP_BASE_URL from datasets.builder import DatasetBuilder from datasets.dataset_dict import IterableDatasetDict from datasets.iterable_dataset import IterableDataset from datasets.load import dataset_module_factory, import_main_class from datasets.utils.file_utils import cached_path _a : int = [ {"dataset": "wikipedia", "config_name": "20220301.de"}, {"dataset": "wikipedia", "config_name": "20220301.en"}, {"dataset": "wikipedia", "config_name": "20220301.fr"}, {"dataset": "wikipedia", "config_name": "20220301.frr"}, {"dataset": "wikipedia", "config_name": "20220301.it"}, {"dataset": "wikipedia", "config_name": "20220301.simple"}, {"dataset": "snli", "config_name": "plain_text"}, {"dataset": "eli5", "config_name": "LFQA_reddit"}, {"dataset": "wiki40b", "config_name": "en"}, {"dataset": "wiki_dpr", "config_name": "psgs_w100.nq.compressed"}, {"dataset": "wiki_dpr", "config_name": "psgs_w100.nq.no_index"}, {"dataset": "wiki_dpr", "config_name": "psgs_w100.multiset.no_index"}, {"dataset": "natural_questions", "config_name": "default"}, ] def _lowercase ( lowerCamelCase__=True ) -> List[str]: """simple docstring""" if with_config: return [ { "testcase_name": d["dataset"] + "/" + d["config_name"], "dataset": d["dataset"], "config_name": d["config_name"], } for d in DATASETS_ON_HF_GCP ] else: return [ {"testcase_name": dataset, "dataset": dataset} for dataset in {d["dataset"] for d in DATASETS_ON_HF_GCP} ] @parameterized.named_parameters(list_datasets_on_hf_gcp_parameters(with_config=__magic_name__ ) ) class __A (__magic_name__ ): snake_case :Optional[int] = None snake_case :Tuple = None def _snake_case ( self , UpperCamelCase_ , UpperCamelCase_ ): with TemporaryDirectory() as tmp_dir: __UpperCAmelCase : Any = dataset_module_factory(UpperCamelCase_ , cache_dir=UpperCamelCase_ ) __UpperCAmelCase : Any = import_main_class(dataset_module.module_path , dataset=UpperCamelCase_ ) __UpperCAmelCase : DatasetBuilder = builder_cls( cache_dir=UpperCamelCase_ , config_name=UpperCamelCase_ , hash=dataset_module.hash , ) __UpperCAmelCase : str = "/".join( [ HF_GCP_BASE_URL, builder_instance._relative_data_dir(with_hash=UpperCamelCase_ ).replace(os.sep , "/" ), config.DATASET_INFO_FILENAME, ] ) __UpperCAmelCase : str = cached_path(UpperCamelCase_ , cache_dir=UpperCamelCase_ ) self.assertTrue(os.path.exists(UpperCamelCase_ ) ) @pytest.mark.integration def _lowercase ( lowerCamelCase__ ) -> Optional[Any]: """simple docstring""" __UpperCAmelCase : int = tmp_path_factory.mktemp("test_hf_gcp" ) / "test_wikipedia_simple" __UpperCAmelCase : Tuple = dataset_module_factory("wikipedia" , cache_dir=lowerCamelCase__ ) __UpperCAmelCase : Tuple = import_main_class(dataset_module.module_path ) __UpperCAmelCase : DatasetBuilder = builder_cls( cache_dir=lowerCamelCase__ , config_name="20220301.frr" , hash=dataset_module.hash , ) # use the HF cloud storage, not the original download_and_prepare that uses apache-beam __UpperCAmelCase : Any = None builder_instance.download_and_prepare() __UpperCAmelCase : Union[str, Any] = builder_instance.as_dataset() assert ds @pytest.mark.integration def _lowercase ( lowerCamelCase__ ) -> Optional[int]: """simple docstring""" __UpperCAmelCase : str = dataset_module_factory("wikipedia" , cache_dir=lowerCamelCase__ ) __UpperCAmelCase : Optional[Any] = import_main_class(dataset_module.module_path , dataset=lowerCamelCase__ ) __UpperCAmelCase : DatasetBuilder = builder_cls( cache_dir=lowerCamelCase__ , config_name="20220301.frr" , hash=dataset_module.hash , ) __UpperCAmelCase : str = builder_instance.as_streaming_dataset() assert ds assert isinstance(lowerCamelCase__ , lowerCamelCase__ ) assert "train" in ds assert isinstance(ds["train"] , lowerCamelCase__ ) assert next(iter(ds["train"] ) )
10
'''simple docstring''' from ...configuration_utils import PretrainedConfig from ...utils import logging _a : Any = logging.get_logger(__name__) _a : List[Any] = { "microsoft/cvt-13": "https://huggingface.co/microsoft/cvt-13/resolve/main/config.json", # See all Cvt models at https://huggingface.co/models?filter=cvt } class __A (__magic_name__ ): snake_case :Any = "cvt" def __init__( self , UpperCamelCase_=3 , UpperCamelCase_=[7, 3, 3] , UpperCamelCase_=[4, 2, 2] , UpperCamelCase_=[2, 1, 1] , UpperCamelCase_=[64, 1_92, 3_84] , UpperCamelCase_=[1, 3, 6] , UpperCamelCase_=[1, 2, 10] , UpperCamelCase_=[4.0, 4.0, 4.0] , UpperCamelCase_=[0.0, 0.0, 0.0] , UpperCamelCase_=[0.0, 0.0, 0.0] , UpperCamelCase_=[0.0, 0.0, 0.1] , UpperCamelCase_=[True, True, True] , UpperCamelCase_=[False, False, True] , UpperCamelCase_=["dw_bn", "dw_bn", "dw_bn"] , UpperCamelCase_=[3, 3, 3] , UpperCamelCase_=[1, 1, 1] , UpperCamelCase_=[2, 2, 2] , UpperCamelCase_=[1, 1, 1] , UpperCamelCase_=[1, 1, 1] , UpperCamelCase_=0.0_2 , UpperCamelCase_=1E-12 , **UpperCamelCase_ , ): super().__init__(**UpperCamelCase_ ) __UpperCAmelCase : Optional[int] = num_channels __UpperCAmelCase : Optional[Any] = patch_sizes __UpperCAmelCase : List[str] = patch_stride __UpperCAmelCase : Tuple = patch_padding __UpperCAmelCase : int = embed_dim __UpperCAmelCase : str = num_heads __UpperCAmelCase : Any = depth __UpperCAmelCase : List[str] = mlp_ratio __UpperCAmelCase : List[str] = attention_drop_rate __UpperCAmelCase : Dict = drop_rate __UpperCAmelCase : Dict = drop_path_rate __UpperCAmelCase : str = qkv_bias __UpperCAmelCase : Optional[int] = cls_token __UpperCAmelCase : Optional[Any] = qkv_projection_method __UpperCAmelCase : Tuple = kernel_qkv __UpperCAmelCase : Optional[Any] = padding_kv __UpperCAmelCase : Optional[int] = stride_kv __UpperCAmelCase : Any = padding_q __UpperCAmelCase : List[Any] = stride_q __UpperCAmelCase : Union[str, Any] = initializer_range __UpperCAmelCase : Any = layer_norm_eps
10
1
'''simple docstring''' import argparse import json import os import fairseq import torch from fairseq.data import Dictionary from transformers import ( UniSpeechConfig, UniSpeechForCTC, UniSpeechForPreTraining, WavaVecaFeatureExtractor, WavaVecaPhonemeCTCTokenizer, WavaVecaProcessor, logging, ) logging.set_verbosity_info() _a : List[Any] = logging.get_logger(__name__) _a : Union[str, Any] = { "post_extract_proj": "feature_projection.projection", "encoder.pos_conv.0": "encoder.pos_conv_embed.conv", "self_attn.k_proj": "encoder.layers.*.attention.k_proj", "self_attn.v_proj": "encoder.layers.*.attention.v_proj", "self_attn.q_proj": "encoder.layers.*.attention.q_proj", "self_attn.out_proj": "encoder.layers.*.attention.out_proj", "self_attn_layer_norm": "encoder.layers.*.layer_norm", "fc1": "encoder.layers.*.feed_forward.intermediate_dense", "fc2": "encoder.layers.*.feed_forward.output_dense", "final_layer_norm": "encoder.layers.*.final_layer_norm", "encoder.layer_norm": "encoder.layer_norm", "w2v_model.layer_norm": "feature_projection.layer_norm", "quantizer.weight_proj": "quantizer.weight_proj", "quantizer.vars": "quantizer.codevectors", "project_q": "project_q", "final_proj": "project_hid", "w2v_encoder.proj": "ctc_proj", "mask_emb": "masked_spec_embed", } _a : Optional[int] = [ "ctc_proj", "quantizer.weight_proj", "quantizer.codevectors", "project_q", "project_hid", ] def _lowercase ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> Optional[int]: """simple docstring""" for attribute in key.split("." ): if is_finetuned: if attribute in ["quantizer", "project_q", "project_hid"]: # those layers are only relevant for pretraining and should be dropped return if attribute == "ctc_proj": # we should rename `ctc_proj` to `lm_head` for fine-tuned phoneme models __UpperCAmelCase : Optional[int] = "lm_head" __UpperCAmelCase : Any = getattr(lowerCamelCase__ , lowerCamelCase__ ) if weight_type is not None: __UpperCAmelCase : Optional[Any] = getattr(lowerCamelCase__ , lowerCamelCase__ ).shape else: __UpperCAmelCase : Optional[Any] = hf_pointer.shape assert hf_shape == value.shape, ( f"""Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be""" f""" {value.shape} for {full_name}""" ) if weight_type == "weight": __UpperCAmelCase : Optional[int] = value elif weight_type == "weight_g": __UpperCAmelCase : Any = value elif weight_type == "weight_v": __UpperCAmelCase : int = value elif weight_type == "bias": __UpperCAmelCase : List[str] = value else: __UpperCAmelCase : int = value logger.info(f"""{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}.""" ) def _lowercase ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> Tuple: """simple docstring""" __UpperCAmelCase : List[str] = [] __UpperCAmelCase : Tuple = fairseq_model.state_dict() __UpperCAmelCase : Optional[Any] = hf_model.unispeech.feature_extractor for name, value in fairseq_dict.items(): __UpperCAmelCase : Union[str, Any] = False if "conv_layers" in name: load_conv_layer( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , hf_model.config.feat_extract_norm == "group" , ) __UpperCAmelCase : int = True else: for key, mapped_key in MAPPING.items(): __UpperCAmelCase : Dict = "unispeech." + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key if key in name or key.split("w2v_model." )[-1] == name.split("." )[0]: __UpperCAmelCase : str = True if "*" in mapped_key: __UpperCAmelCase : List[str] = name.split(lowerCamelCase__ )[0].split("." )[-2] __UpperCAmelCase : Dict = mapped_key.replace("*" , lowerCamelCase__ ) if "weight_g" in name: __UpperCAmelCase : Union[str, Any] = "weight_g" elif "weight_v" in name: __UpperCAmelCase : Any = "weight_v" elif "bias" in name: __UpperCAmelCase : List[Any] = "bias" elif "weight" in name: # TODO: don't match quantizer.weight_proj __UpperCAmelCase : Tuple = "weight" else: __UpperCAmelCase : Optional[Any] = None set_recursively(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) continue if not is_used: unused_weights.append(lowerCamelCase__ ) logger.warning(f"""Unused weights: {unused_weights}""" ) def _lowercase ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> Any: """simple docstring""" __UpperCAmelCase : Any = full_name.split("conv_layers." )[-1] __UpperCAmelCase : List[Any] = name.split("." ) __UpperCAmelCase : Any = int(items[0] ) __UpperCAmelCase : Any = int(items[1] ) if type_id == 0: if "bias" in name: assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, ( f"""{full_name} has size {value.shape}, but""" f""" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.""" ) __UpperCAmelCase : Optional[Any] = value logger.info(f"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" ) elif "weight" in name: assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, ( f"""{full_name} has size {value.shape}, but""" f""" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.""" ) __UpperCAmelCase : str = value logger.info(f"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" ) elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm): if "bias" in name: assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, ( f"""{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was""" " found." ) __UpperCAmelCase : List[str] = value logger.info(f"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" ) elif "weight" in name: assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, ( f"""{full_name} has size {value.shape}, but""" f""" {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.""" ) __UpperCAmelCase : Union[str, Any] = value logger.info(f"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" ) else: unused_weights.append(lowerCamelCase__ ) @torch.no_grad() def _lowercase ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__=None , lowerCamelCase__=None , lowerCamelCase__=True ) -> Union[str, Any]: """simple docstring""" if config_path is not None: __UpperCAmelCase : Tuple = UniSpeechConfig.from_pretrained(lowerCamelCase__ ) else: __UpperCAmelCase : Tuple = UniSpeechConfig() if is_finetuned: if dict_path: __UpperCAmelCase : str = Dictionary.load_from_json(lowerCamelCase__ ) # important change bos & pad token id since CTC symbol is <pad> and # not <s> as in fairseq __UpperCAmelCase : Optional[Any] = target_dict.pad_index __UpperCAmelCase : Optional[Any] = target_dict.bos_index __UpperCAmelCase : int = target_dict.eos_index __UpperCAmelCase : List[str] = len(target_dict.symbols ) __UpperCAmelCase : Optional[int] = os.path.join(lowerCamelCase__ , "vocab.json" ) if not os.path.isdir(lowerCamelCase__ ): logger.error("--pytorch_dump_folder_path ({}) should be a directory".format(lowerCamelCase__ ) ) return os.makedirs(lowerCamelCase__ , exist_ok=lowerCamelCase__ ) __UpperCAmelCase : Optional[Any] = target_dict.indices # fairseq has the <pad> and <s> switched __UpperCAmelCase : Optional[Any] = 42 __UpperCAmelCase : int = 43 with open(lowerCamelCase__ , "w" , encoding="utf-8" ) as vocab_handle: json.dump(lowerCamelCase__ , lowerCamelCase__ ) __UpperCAmelCase : str = WavaVecaPhonemeCTCTokenizer( lowerCamelCase__ , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token="|" , do_lower_case=lowerCamelCase__ , ) __UpperCAmelCase : List[Any] = True if config.feat_extract_norm == "layer" else False __UpperCAmelCase : Optional[int] = WavaVecaFeatureExtractor( feature_size=1 , sampling_rate=1_6000 , padding_value=0 , do_normalize=lowerCamelCase__ , return_attention_mask=lowerCamelCase__ , ) __UpperCAmelCase : List[Any] = WavaVecaProcessor(feature_extractor=lowerCamelCase__ , tokenizer=lowerCamelCase__ ) processor.save_pretrained(lowerCamelCase__ ) __UpperCAmelCase : Optional[Any] = UniSpeechForCTC(lowerCamelCase__ ) else: __UpperCAmelCase : Any = UniSpeechForPreTraining(lowerCamelCase__ ) if is_finetuned: __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase : Optional[int] = fairseq.checkpoint_utils.load_model_ensemble_and_task( [checkpoint_path] , arg_overrides={"data": "/".join(dict_path.split("/" )[:-1] ), "w2v_path": checkpoint_path} ) else: __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase : Dict = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] ) __UpperCAmelCase : Tuple = model[0].eval() recursively_load_weights(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) hf_unispeech.save_pretrained(lowerCamelCase__ ) if __name__ == "__main__": _a : Tuple = argparse.ArgumentParser() parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.") parser.add_argument("--checkpoint_path", default=None, type=str, help="Path to fairseq checkpoint") parser.add_argument("--dict_path", default=None, type=str, help="Path to dict of fine-tuned model") parser.add_argument("--config_path", default=None, type=str, help="Path to hf config.json of model to convert") parser.add_argument( "--not_finetuned", action="store_true", help="Whether the model to convert is a fine-tuned model or not" ) _a : Optional[int] = parser.parse_args() convert_unispeech_checkpoint( args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned )
10
'''simple docstring''' from __future__ import annotations import numpy as np from numpy import floataa from numpy.typing import NDArray def _lowercase ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , ) -> list[float]: """simple docstring""" __UpperCAmelCase , __UpperCAmelCase : Union[str, Any] = coefficient_matrix.shape __UpperCAmelCase , __UpperCAmelCase : Any = constant_matrix.shape if rowsa != colsa: __UpperCAmelCase : str = f"""Coefficient matrix dimensions must be nxn but received {rowsa}x{colsa}""" raise ValueError(lowerCamelCase__ ) if colsa != 1: __UpperCAmelCase : Optional[Any] = f"""Constant matrix must be nx1 but received {rowsa}x{colsa}""" raise ValueError(lowerCamelCase__ ) if rowsa != rowsa: __UpperCAmelCase : Optional[int] = ( "Coefficient and constant matrices dimensions must be nxn and nx1 but " f"""received {rowsa}x{colsa} and {rowsa}x{colsa}""" ) raise ValueError(lowerCamelCase__ ) if len(lowerCamelCase__ ) != rowsa: __UpperCAmelCase : List[str] = ( "Number of initial values must be equal to number of rows in coefficient " f"""matrix but received {len(lowerCamelCase__ )} and {rowsa}""" ) raise ValueError(lowerCamelCase__ ) if iterations <= 0: raise ValueError("Iterations must be at least 1" ) __UpperCAmelCase : NDArray[floataa] = np.concatenate( (coefficient_matrix, constant_matrix) , axis=1 ) __UpperCAmelCase , __UpperCAmelCase : Tuple = table.shape strictly_diagonally_dominant(lowerCamelCase__ ) # Iterates the whole matrix for given number of times for _ in range(lowerCamelCase__ ): __UpperCAmelCase : int = [] for row in range(lowerCamelCase__ ): __UpperCAmelCase : List[str] = 0 for col in range(lowerCamelCase__ ): if col == row: __UpperCAmelCase : int = table[row][col] elif col == cols - 1: __UpperCAmelCase : Any = table[row][col] else: temp += (-1) * table[row][col] * init_val[col] __UpperCAmelCase : List[Any] = (temp + val) / denom new_val.append(lowerCamelCase__ ) __UpperCAmelCase : str = new_val return [float(lowerCamelCase__ ) for i in new_val] def _lowercase ( lowerCamelCase__ ) -> bool: """simple docstring""" __UpperCAmelCase , __UpperCAmelCase : Optional[int] = table.shape __UpperCAmelCase : str = True for i in range(0 , lowerCamelCase__ ): __UpperCAmelCase : Union[str, Any] = 0 for j in range(0 , cols - 1 ): if i == j: continue else: total += table[i][j] if table[i][i] <= total: raise ValueError("Coefficient matrix is not strictly diagonally dominant" ) return is_diagonally_dominant # Test Cases if __name__ == "__main__": import doctest doctest.testmod()
10
1
'''simple docstring''' from typing import Dict, Iterable, List, Optional, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import ( center_crop, get_resize_output_image_size, normalize, rescale, resize, to_channel_dimension_format, ) from ...image_utils import ( IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, ChannelDimension, ImageInput, PILImageResampling, make_list_of_images, to_numpy_array, valid_images, ) from ...utils import TensorType, logging _a : List[Any] = logging.get_logger(__name__) class __A (__magic_name__ ): snake_case :Tuple = ["pixel_values"] def __init__( self , UpperCamelCase_ = True , UpperCamelCase_ = None , UpperCamelCase_ = PILImageResampling.BICUBIC , UpperCamelCase_ = True , UpperCamelCase_ = None , UpperCamelCase_ = True , UpperCamelCase_ = 1 / 2_55 , UpperCamelCase_ = True , UpperCamelCase_ = IMAGENET_DEFAULT_MEAN , UpperCamelCase_ = IMAGENET_DEFAULT_STD , **UpperCamelCase_ , ): super().__init__(**UpperCamelCase_ ) __UpperCAmelCase : Any = size if size is not None else {"shortest_edge": 2_24} __UpperCAmelCase : str = get_size_dict(UpperCamelCase_ , default_to_square=UpperCamelCase_ ) __UpperCAmelCase : Dict = crop_size if crop_size is not None else {"height": 2_24, "width": 2_24} __UpperCAmelCase : Dict = get_size_dict(UpperCamelCase_ , param_name="crop_size" ) __UpperCAmelCase : int = do_resize __UpperCAmelCase : Any = size __UpperCAmelCase : Any = resample __UpperCAmelCase : Any = do_center_crop __UpperCAmelCase : Optional[int] = crop_size __UpperCAmelCase : int = do_rescale __UpperCAmelCase : Any = rescale_factor __UpperCAmelCase : Any = do_normalize __UpperCAmelCase : Optional[int] = image_mean if image_mean is not None else IMAGENET_DEFAULT_MEAN __UpperCAmelCase : Union[str, Any] = image_std if image_std is not None else IMAGENET_DEFAULT_STD def _snake_case ( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ = PILImageResampling.BICUBIC , UpperCamelCase_ = None , **UpperCamelCase_ , ): __UpperCAmelCase : Union[str, Any] = get_size_dict(UpperCamelCase_ , default_to_square=UpperCamelCase_ ) # size_dict is a dict with either keys "height" and "width" or "shortest_edge" if "shortest_edge" in size: __UpperCAmelCase : Optional[Any] = int((2_56 / 2_24) * size["shortest_edge"] ) __UpperCAmelCase : List[Any] = get_resize_output_image_size(UpperCamelCase_ , size=UpperCamelCase_ , default_to_square=UpperCamelCase_ ) __UpperCAmelCase : Tuple = {"height": output_size[0], "width": output_size[1]} if "height" not in size_dict or "width" not in size_dict: raise ValueError( f"""Size dict must have keys 'height' and 'width' or 'shortest_edge'. Got {size_dict.keys()}""" ) return resize( UpperCamelCase_ , size=(size_dict["height"], size_dict["width"]) , resample=UpperCamelCase_ , data_format=UpperCamelCase_ , **UpperCamelCase_ ) def _snake_case ( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ = None , **UpperCamelCase_ , ): __UpperCAmelCase : Tuple = get_size_dict(UpperCamelCase_ ) if "height" not in size or "width" not in size: raise ValueError(f"""Size dict must have keys 'height' and 'width'. Got {size.keys()}""" ) return center_crop(UpperCamelCase_ , size=(size["height"], size["width"]) , data_format=UpperCamelCase_ , **UpperCamelCase_ ) def _snake_case ( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ = None , **UpperCamelCase_ , ): return rescale(UpperCamelCase_ , scale=UpperCamelCase_ , data_format=UpperCamelCase_ , **UpperCamelCase_ ) def _snake_case ( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ = None , **UpperCamelCase_ , ): return normalize(UpperCamelCase_ , mean=UpperCamelCase_ , std=UpperCamelCase_ , data_format=UpperCamelCase_ , **UpperCamelCase_ ) def _snake_case ( self , UpperCamelCase_ , UpperCamelCase_ = None , UpperCamelCase_ = None , UpperCamelCase_ = None , UpperCamelCase_ = None , UpperCamelCase_ = None , UpperCamelCase_ = None , UpperCamelCase_ = None , UpperCamelCase_ = None , UpperCamelCase_ = None , UpperCamelCase_ = None , UpperCamelCase_ = None , UpperCamelCase_ = ChannelDimension.FIRST , **UpperCamelCase_ , ): __UpperCAmelCase : int = do_resize if do_resize is not None else self.do_resize __UpperCAmelCase : Dict = resample if resample is not None else self.resample __UpperCAmelCase : int = do_center_crop if do_center_crop is not None else self.do_center_crop __UpperCAmelCase : str = do_rescale if do_rescale is not None else self.do_rescale __UpperCAmelCase : str = rescale_factor if rescale_factor is not None else self.rescale_factor __UpperCAmelCase : Union[str, Any] = do_normalize if do_normalize is not None else self.do_normalize __UpperCAmelCase : Any = image_mean if image_mean is not None else self.image_mean __UpperCAmelCase : Any = image_std if image_std is not None else self.image_std __UpperCAmelCase : Any = size if size is not None else self.size __UpperCAmelCase : Tuple = get_size_dict(UpperCamelCase_ , default_to_square=UpperCamelCase_ ) __UpperCAmelCase : List[str] = crop_size if crop_size is not None else self.crop_size __UpperCAmelCase : List[str] = get_size_dict(UpperCamelCase_ , param_name="crop_size" ) __UpperCAmelCase : int = make_list_of_images(UpperCamelCase_ ) if not valid_images(UpperCamelCase_ ): raise ValueError( "Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, " "torch.Tensor, tf.Tensor or jax.ndarray." ) if do_resize and size is None: raise ValueError("Size must be specified if do_resize is True." ) if do_center_crop and crop_size is None: raise ValueError("Crop size must be specified if do_center_crop is True." ) if do_rescale and rescale_factor is None: raise ValueError("Rescale factor must be specified if do_rescale is True." ) if do_normalize and (image_mean is None or image_std is None): raise ValueError("Image mean and std must be specified if do_normalize is True." ) # All transformations expect numpy arrays. __UpperCAmelCase : int = [to_numpy_array(UpperCamelCase_ ) for image in images] if do_resize: __UpperCAmelCase : int = [self.resize(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) for image in images] if do_center_crop: __UpperCAmelCase : List[Any] = [self.center_crop(UpperCamelCase_ , UpperCamelCase_ ) for image in images] if do_rescale: __UpperCAmelCase : Optional[Any] = [self.rescale(UpperCamelCase_ , UpperCamelCase_ ) for image in images] if do_normalize: __UpperCAmelCase : Any = [self.normalize(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) for image in images] __UpperCAmelCase : Any = [to_channel_dimension_format(UpperCamelCase_ , UpperCamelCase_ ) for image in images] __UpperCAmelCase : Union[str, Any] = {"pixel_values": images} return BatchFeature(data=UpperCamelCase_ , tensor_type=UpperCamelCase_ )
10
'''simple docstring''' from maths.is_square_free import is_square_free from maths.prime_factors import prime_factors def _lowercase ( lowerCamelCase__ ) -> int: """simple docstring""" __UpperCAmelCase : Any = prime_factors(lowerCamelCase__ ) if is_square_free(lowerCamelCase__ ): return -1 if len(lowerCamelCase__ ) % 2 else 1 return 0 if __name__ == "__main__": import doctest doctest.testmod()
10
1
'''simple docstring''' import unittest from transformers import MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING, is_vision_available, pipeline from transformers.testing_utils import ( is_pipeline_test, nested_simplify, require_tf, require_torch, require_vision, slow, ) from .test_pipelines_common import ANY if is_vision_available(): from PIL import Image else: class __A : @staticmethod def _snake_case ( *UpperCamelCase_ , **UpperCamelCase_ ): pass @is_pipeline_test @require_vision @require_torch class __A (unittest.TestCase ): snake_case :Optional[Any] = MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING def _snake_case ( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ): __UpperCAmelCase : List[str] = pipeline( "zero-shot-object-detection" , model="hf-internal-testing/tiny-random-owlvit-object-detection" ) __UpperCAmelCase : Optional[Any] = [ { "image": "./tests/fixtures/tests_samples/COCO/000000039769.png", "candidate_labels": ["cat", "remote", "couch"], } ] return object_detector, examples def _snake_case ( self , UpperCamelCase_ , UpperCamelCase_ ): __UpperCAmelCase : Union[str, Any] = object_detector(examples[0] , threshold=0.0 ) __UpperCAmelCase : Optional[int] = len(UpperCamelCase_ ) self.assertGreater(UpperCamelCase_ , 0 ) self.assertEqual( UpperCamelCase_ , [ { "score": ANY(UpperCamelCase_ ), "label": ANY(UpperCamelCase_ ), "box": {"xmin": ANY(UpperCamelCase_ ), "ymin": ANY(UpperCamelCase_ ), "xmax": ANY(UpperCamelCase_ ), "ymax": ANY(UpperCamelCase_ )}, } for i in range(UpperCamelCase_ ) ] , ) @require_tf @unittest.skip("Zero Shot Object Detection not implemented in TF" ) def _snake_case ( self ): pass @require_torch def _snake_case ( self ): __UpperCAmelCase : Optional[Any] = pipeline( "zero-shot-object-detection" , model="hf-internal-testing/tiny-random-owlvit-object-detection" ) __UpperCAmelCase : Dict = object_detector( "./tests/fixtures/tests_samples/COCO/000000039769.png" , candidate_labels=["cat", "remote", "couch"] , threshold=0.6_4 , ) self.assertEqual( nested_simplify(UpperCamelCase_ , decimals=4 ) , [ {"score": 0.7_2_3_5, "label": "cat", "box": {"xmin": 2_04, "ymin": 1_67, "xmax": 2_32, "ymax": 1_90}}, {"score": 0.7_2_1_8, "label": "remote", "box": {"xmin": 2_04, "ymin": 1_67, "xmax": 2_32, "ymax": 1_90}}, {"score": 0.7_1_8_4, "label": "couch", "box": {"xmin": 2_04, "ymin": 1_67, "xmax": 2_32, "ymax": 1_90}}, {"score": 0.6_7_4_8, "label": "remote", "box": {"xmin": 5_71, "ymin": 83, "xmax": 5_98, "ymax": 1_03}}, {"score": 0.6_6_5_6, "label": "cat", "box": {"xmin": 5_71, "ymin": 83, "xmax": 5_98, "ymax": 1_03}}, {"score": 0.6_6_1_4, "label": "couch", "box": {"xmin": 5_71, "ymin": 83, "xmax": 5_98, "ymax": 1_03}}, {"score": 0.6_4_5_6, "label": "remote", "box": {"xmin": 4_94, "ymin": 1_05, "xmax": 5_21, "ymax": 1_27}}, {"score": 0.6_4_2, "label": "remote", "box": {"xmin": 67, "ymin": 2_74, "xmax": 93, "ymax": 2_97}}, {"score": 0.6_4_1_9, "label": "cat", "box": {"xmin": 4_94, "ymin": 1_05, "xmax": 5_21, "ymax": 1_27}}, ] , ) __UpperCAmelCase : int = object_detector( [ { "image": "./tests/fixtures/tests_samples/COCO/000000039769.png", "candidate_labels": ["cat", "remote", "couch"], } ] , threshold=0.6_4 , ) self.assertEqual( nested_simplify(UpperCamelCase_ , decimals=4 ) , [ [ {"score": 0.7_2_3_5, "label": "cat", "box": {"xmin": 2_04, "ymin": 1_67, "xmax": 2_32, "ymax": 1_90}}, {"score": 0.7_2_1_8, "label": "remote", "box": {"xmin": 2_04, "ymin": 1_67, "xmax": 2_32, "ymax": 1_90}}, {"score": 0.7_1_8_4, "label": "couch", "box": {"xmin": 2_04, "ymin": 1_67, "xmax": 2_32, "ymax": 1_90}}, {"score": 0.6_7_4_8, "label": "remote", "box": {"xmin": 5_71, "ymin": 83, "xmax": 5_98, "ymax": 1_03}}, {"score": 0.6_6_5_6, "label": "cat", "box": {"xmin": 5_71, "ymin": 83, "xmax": 5_98, "ymax": 1_03}}, {"score": 0.6_6_1_4, "label": "couch", "box": {"xmin": 5_71, "ymin": 83, "xmax": 5_98, "ymax": 1_03}}, {"score": 0.6_4_5_6, "label": "remote", "box": {"xmin": 4_94, "ymin": 1_05, "xmax": 5_21, "ymax": 1_27}}, {"score": 0.6_4_2, "label": "remote", "box": {"xmin": 67, "ymin": 2_74, "xmax": 93, "ymax": 2_97}}, {"score": 0.6_4_1_9, "label": "cat", "box": {"xmin": 4_94, "ymin": 1_05, "xmax": 5_21, "ymax": 1_27}}, ] ] , ) @require_torch @slow def _snake_case ( self ): __UpperCAmelCase : Optional[Any] = pipeline("zero-shot-object-detection" ) __UpperCAmelCase : List[str] = object_detector( "http://images.cocodataset.org/val2017/000000039769.jpg" , candidate_labels=["cat", "remote", "couch"] , ) self.assertEqual( nested_simplify(UpperCamelCase_ , decimals=4 ) , [ {"score": 0.2_8_6_8, "label": "cat", "box": {"xmin": 3_24, "ymin": 20, "xmax": 6_40, "ymax": 3_73}}, {"score": 0.2_7_7, "label": "remote", "box": {"xmin": 40, "ymin": 72, "xmax": 1_77, "ymax": 1_15}}, {"score": 0.2_5_3_7, "label": "cat", "box": {"xmin": 1, "ymin": 55, "xmax": 3_15, "ymax": 4_72}}, {"score": 0.1_4_7_4, "label": "remote", "box": {"xmin": 3_35, "ymin": 74, "xmax": 3_71, "ymax": 1_87}}, {"score": 0.1_2_0_8, "label": "couch", "box": {"xmin": 4, "ymin": 0, "xmax": 6_42, "ymax": 4_76}}, ] , ) __UpperCAmelCase : Union[str, Any] = object_detector( [ { "image": "http://images.cocodataset.org/val2017/000000039769.jpg", "candidate_labels": ["cat", "remote", "couch"], }, { "image": "http://images.cocodataset.org/val2017/000000039769.jpg", "candidate_labels": ["cat", "remote", "couch"], }, ] , ) self.assertEqual( nested_simplify(UpperCamelCase_ , decimals=4 ) , [ [ {"score": 0.2_8_6_8, "label": "cat", "box": {"xmin": 3_24, "ymin": 20, "xmax": 6_40, "ymax": 3_73}}, {"score": 0.2_7_7, "label": "remote", "box": {"xmin": 40, "ymin": 72, "xmax": 1_77, "ymax": 1_15}}, {"score": 0.2_5_3_7, "label": "cat", "box": {"xmin": 1, "ymin": 55, "xmax": 3_15, "ymax": 4_72}}, {"score": 0.1_4_7_4, "label": "remote", "box": {"xmin": 3_35, "ymin": 74, "xmax": 3_71, "ymax": 1_87}}, {"score": 0.1_2_0_8, "label": "couch", "box": {"xmin": 4, "ymin": 0, "xmax": 6_42, "ymax": 4_76}}, ], [ {"score": 0.2_8_6_8, "label": "cat", "box": {"xmin": 3_24, "ymin": 20, "xmax": 6_40, "ymax": 3_73}}, {"score": 0.2_7_7, "label": "remote", "box": {"xmin": 40, "ymin": 72, "xmax": 1_77, "ymax": 1_15}}, {"score": 0.2_5_3_7, "label": "cat", "box": {"xmin": 1, "ymin": 55, "xmax": 3_15, "ymax": 4_72}}, {"score": 0.1_4_7_4, "label": "remote", "box": {"xmin": 3_35, "ymin": 74, "xmax": 3_71, "ymax": 1_87}}, {"score": 0.1_2_0_8, "label": "couch", "box": {"xmin": 4, "ymin": 0, "xmax": 6_42, "ymax": 4_76}}, ], ] , ) @require_tf @unittest.skip("Zero Shot Object Detection not implemented in TF" ) def _snake_case ( self ): pass @require_torch @slow def _snake_case ( self ): __UpperCAmelCase : str = 0.2 __UpperCAmelCase : Union[str, Any] = pipeline("zero-shot-object-detection" ) __UpperCAmelCase : int = object_detector( "http://images.cocodataset.org/val2017/000000039769.jpg" , candidate_labels=["cat", "remote", "couch"] , threshold=UpperCamelCase_ , ) self.assertEqual( nested_simplify(UpperCamelCase_ , decimals=4 ) , [ {"score": 0.2_8_6_8, "label": "cat", "box": {"xmin": 3_24, "ymin": 20, "xmax": 6_40, "ymax": 3_73}}, {"score": 0.2_7_7, "label": "remote", "box": {"xmin": 40, "ymin": 72, "xmax": 1_77, "ymax": 1_15}}, {"score": 0.2_5_3_7, "label": "cat", "box": {"xmin": 1, "ymin": 55, "xmax": 3_15, "ymax": 4_72}}, ] , ) @require_torch @slow def _snake_case ( self ): __UpperCAmelCase : Optional[int] = 2 __UpperCAmelCase : Optional[int] = pipeline("zero-shot-object-detection" ) __UpperCAmelCase : List[Any] = object_detector( "http://images.cocodataset.org/val2017/000000039769.jpg" , candidate_labels=["cat", "remote", "couch"] , top_k=UpperCamelCase_ , ) self.assertEqual( nested_simplify(UpperCamelCase_ , decimals=4 ) , [ {"score": 0.2_8_6_8, "label": "cat", "box": {"xmin": 3_24, "ymin": 20, "xmax": 6_40, "ymax": 3_73}}, {"score": 0.2_7_7, "label": "remote", "box": {"xmin": 40, "ymin": 72, "xmax": 1_77, "ymax": 1_15}}, ] , )
10
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available, is_tokenizers_available, is_torch_available, ) _a : Dict = {"configuration_reformer": ["REFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP", "ReformerConfig"]} try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _a : Dict = ["ReformerTokenizer"] try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _a : List[Any] = ["ReformerTokenizerFast"] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _a : int = [ "REFORMER_PRETRAINED_MODEL_ARCHIVE_LIST", "ReformerAttention", "ReformerForMaskedLM", "ReformerForQuestionAnswering", "ReformerForSequenceClassification", "ReformerLayer", "ReformerModel", "ReformerModelWithLMHead", "ReformerPreTrainedModel", ] if TYPE_CHECKING: from .configuration_reformer import REFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, ReformerConfig try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_reformer import ReformerTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_reformer_fast import ReformerTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_reformer import ( REFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, ReformerAttention, ReformerForMaskedLM, ReformerForQuestionAnswering, ReformerForSequenceClassification, ReformerLayer, ReformerModel, ReformerModelWithLMHead, ReformerPreTrainedModel, ) else: import sys _a : Optional[int] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
10
1
'''simple docstring''' from ...configuration_utils import PretrainedConfig from ...utils import logging _a : List[Any] = logging.get_logger(__name__) _a : Optional[int] = { "microsoft/swinv2-tiny-patch4-window8-256": ( "https://huggingface.co/microsoft/swinv2-tiny-patch4-window8-256/resolve/main/config.json" ), } class __A (__magic_name__ ): snake_case :Any = "swinv2" snake_case :Union[str, Any] = { "num_attention_heads": "num_heads", "num_hidden_layers": "num_layers", } def __init__( self , UpperCamelCase_=2_24 , UpperCamelCase_=4 , UpperCamelCase_=3 , UpperCamelCase_=96 , UpperCamelCase_=[2, 2, 6, 2] , UpperCamelCase_=[3, 6, 12, 24] , UpperCamelCase_=7 , UpperCamelCase_=4.0 , UpperCamelCase_=True , UpperCamelCase_=0.0 , UpperCamelCase_=0.0 , UpperCamelCase_=0.1 , UpperCamelCase_="gelu" , UpperCamelCase_=False , UpperCamelCase_=0.0_2 , UpperCamelCase_=1E-5 , UpperCamelCase_=32 , **UpperCamelCase_ , ): super().__init__(**UpperCamelCase_ ) __UpperCAmelCase : Dict = image_size __UpperCAmelCase : List[str] = patch_size __UpperCAmelCase : Dict = num_channels __UpperCAmelCase : Optional[Any] = embed_dim __UpperCAmelCase : str = depths __UpperCAmelCase : Dict = len(UpperCamelCase_ ) __UpperCAmelCase : int = num_heads __UpperCAmelCase : str = window_size __UpperCAmelCase : Any = mlp_ratio __UpperCAmelCase : List[str] = qkv_bias __UpperCAmelCase : int = hidden_dropout_prob __UpperCAmelCase : Tuple = attention_probs_dropout_prob __UpperCAmelCase : Optional[int] = drop_path_rate __UpperCAmelCase : Tuple = hidden_act __UpperCAmelCase : int = use_absolute_embeddings __UpperCAmelCase : Tuple = layer_norm_eps __UpperCAmelCase : int = initializer_range __UpperCAmelCase : str = encoder_stride # we set the hidden_size attribute in order to make Swinv2 work with VisionEncoderDecoderModel # this indicates the channel dimension after the last stage of the model __UpperCAmelCase : str = int(embed_dim * 2 ** (len(UpperCamelCase_ ) - 1) ) __UpperCAmelCase : Tuple = (0, 0, 0, 0)
10
'''simple docstring''' from collections import OrderedDict from typing import Mapping from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging _a : List[str] = logging.get_logger(__name__) _a : Any = { "kssteven/ibert-roberta-base": "https://huggingface.co/kssteven/ibert-roberta-base/resolve/main/config.json", "kssteven/ibert-roberta-large": "https://huggingface.co/kssteven/ibert-roberta-large/resolve/main/config.json", "kssteven/ibert-roberta-large-mnli": ( "https://huggingface.co/kssteven/ibert-roberta-large-mnli/resolve/main/config.json" ), } class __A (__magic_name__ ): snake_case :Union[str, Any] = "ibert" def __init__( self , UpperCamelCase_=3_05_22 , UpperCamelCase_=7_68 , UpperCamelCase_=12 , UpperCamelCase_=12 , UpperCamelCase_=30_72 , UpperCamelCase_="gelu" , UpperCamelCase_=0.1 , UpperCamelCase_=0.1 , UpperCamelCase_=5_12 , UpperCamelCase_=2 , UpperCamelCase_=0.0_2 , UpperCamelCase_=1E-12 , UpperCamelCase_=1 , UpperCamelCase_=0 , UpperCamelCase_=2 , UpperCamelCase_="absolute" , UpperCamelCase_=False , UpperCamelCase_="none" , **UpperCamelCase_ , ): super().__init__(pad_token_id=UpperCamelCase_ , bos_token_id=UpperCamelCase_ , eos_token_id=UpperCamelCase_ , **UpperCamelCase_ ) __UpperCAmelCase : List[Any] = vocab_size __UpperCAmelCase : Optional[Any] = hidden_size __UpperCAmelCase : List[Any] = num_hidden_layers __UpperCAmelCase : Any = num_attention_heads __UpperCAmelCase : List[str] = hidden_act __UpperCAmelCase : List[str] = intermediate_size __UpperCAmelCase : Optional[int] = hidden_dropout_prob __UpperCAmelCase : Union[str, Any] = attention_probs_dropout_prob __UpperCAmelCase : str = max_position_embeddings __UpperCAmelCase : List[str] = type_vocab_size __UpperCAmelCase : Dict = initializer_range __UpperCAmelCase : Optional[int] = layer_norm_eps __UpperCAmelCase : Any = position_embedding_type __UpperCAmelCase : Tuple = quant_mode __UpperCAmelCase : Union[str, Any] = force_dequant class __A (__magic_name__ ): @property def _snake_case ( self ): if self.task == "multiple-choice": __UpperCAmelCase : Optional[int] = {0: "batch", 1: "choice", 2: "sequence"} else: __UpperCAmelCase : Optional[int] = {0: "batch", 1: "sequence"} return OrderedDict( [ ("input_ids", dynamic_axis), ("attention_mask", dynamic_axis), ] )
10
1
'''simple docstring''' import argparse import shlex import runhouse as rh if __name__ == "__main__": # Refer to https://runhouse-docs.readthedocs-hosted.com/en/latest/api/python/cluster.html#hardware-setup for cloud access # setup instructions, if using on-demand hardware # If user passes --user <user> --host <host> --key_path <key_path> <example> <args>, fill them in as BYO cluster # If user passes --instance <instance> --provider <provider> <example> <args>, fill them in as on-demand cluster # Throw an error if user passes both BYO and on-demand cluster args # Otherwise, use default values _a : str = argparse.ArgumentParser() parser.add_argument("--user", type=str, default="ubuntu") parser.add_argument("--host", type=str, default="localhost") parser.add_argument("--key_path", type=str, default=None) parser.add_argument("--instance", type=str, default="V100:1") parser.add_argument("--provider", type=str, default="cheapest") parser.add_argument("--use_spot", type=bool, default=False) parser.add_argument("--example", type=str, default="pytorch/text-generation/run_generation.py") _a , _a : List[Any] = parser.parse_known_args() if args.host != "localhost": if args.instance != "V100:1" or args.provider != "cheapest": raise ValueError("Cannot specify both BYO and on-demand cluster args") _a : List[str] = rh.cluster( name="rh-cluster", ips=[args.host], ssh_creds={"ssh_user": args.user, "ssh_private_key": args.key_path} ) else: _a : Optional[Any] = rh.cluster( name="rh-cluster", instance_type=args.instance, provider=args.provider, use_spot=args.use_spot ) _a : List[str] = args.example.rsplit("/", 1)[0] # Set up remote environment cluster.install_packages(["pip:./"]) # Installs transformers from local source # Note transformers is copied into the home directory on the remote machine, so we can install from there cluster.run([f"""pip install -r transformers/examples/{example_dir}/requirements.txt"""]) cluster.run(["pip install torch --upgrade --extra-index-url https://download.pytorch.org/whl/cu117"]) # Run example. You can bypass the CLI wrapper and paste your own code here. cluster.run([f"""python transformers/examples/{args.example} {' '.join(shlex.quote(arg) for arg in unknown)}"""]) # Alternatively, we can just import and run a training function (especially if there's no wrapper CLI): # from my_script... import train # reqs = ['pip:./', 'torch', 'datasets', 'accelerate', 'evaluate', 'tqdm', 'scipy', 'scikit-learn', 'tensorboard'] # launch_train_gpu = rh.function(fn=train, # system=gpu, # reqs=reqs, # name='train_bert_glue') # # We can pass in arguments just like we would to a function: # launch_train_gpu(num_epochs = 3, lr = 2e-5, seed = 42, batch_size = 16 # stream_logs=True)
10
'''simple docstring''' from transformers import HfArgumentParser, TensorFlowBenchmark, TensorFlowBenchmarkArguments def _lowercase ( ) -> Dict: """simple docstring""" __UpperCAmelCase : str = HfArgumentParser(lowerCamelCase__ ) __UpperCAmelCase : Optional[Any] = parser.parse_args_into_dataclasses()[0] __UpperCAmelCase : Any = TensorFlowBenchmark(args=lowerCamelCase__ ) try: __UpperCAmelCase : List[Any] = parser.parse_args_into_dataclasses()[0] except ValueError as e: __UpperCAmelCase : str = "Arg --no_{0} is no longer used, please use --no-{0} instead." __UpperCAmelCase : Tuple = " ".join(str(lowerCamelCase__ ).split(" " )[:-1] ) __UpperCAmelCase : Any = "" __UpperCAmelCase : List[Any] = eval(str(lowerCamelCase__ ).split(" " )[-1] ) __UpperCAmelCase : Optional[int] = [] for arg in depreciated_args: # arg[2:] removes '--' if arg[2:] in TensorFlowBenchmark.deprecated_args: # arg[5:] removes '--no_' full_error_msg += arg_error_msg.format(arg[5:] ) else: wrong_args.append(lowerCamelCase__ ) if len(lowerCamelCase__ ) > 0: __UpperCAmelCase : Union[str, Any] = full_error_msg + begin_error_msg + str(lowerCamelCase__ ) raise ValueError(lowerCamelCase__ ) benchmark.run() if __name__ == "__main__": main()
10
1
'''simple docstring''' # limitations under the License. # NOTE: This file is deprecated and will be removed in a future version. # It only exists so that temporarely `from diffusers.pipelines import DiffusionPipeline` works from .pipelines import DiffusionPipeline, ImagePipelineOutput # noqa: F401 from .utils import deprecate deprecate( "pipelines_utils", "0.22.0", "Importing `DiffusionPipeline` or `ImagePipelineOutput` from diffusers.pipeline_utils is deprecated. Please import from diffusers.pipelines.pipeline_utils instead.", standard_warn=False, stacklevel=3, )
10
'''simple docstring''' import gc import random import unittest import numpy as np import torch from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer import diffusers from diffusers import ( AutoencoderKL, EulerDiscreteScheduler, StableDiffusionLatentUpscalePipeline, StableDiffusionPipeline, UNetaDConditionModel, ) from diffusers.schedulers import KarrasDiffusionSchedulers from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu from ..pipeline_params import TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin enable_full_determinism() def _lowercase ( lowerCamelCase__ ) -> Union[str, Any]: """simple docstring""" __UpperCAmelCase : Dict = [tensor.shape for tensor in tensor_list] return all(shape == shapes[0] for shape in shapes[1:] ) class __A (__magic_name__ , __magic_name__ , __magic_name__ , unittest.TestCase ): snake_case :Union[str, Any] = StableDiffusionLatentUpscalePipeline snake_case :Optional[int] = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - { "height", "width", "cross_attention_kwargs", "negative_prompt_embeds", "prompt_embeds", } snake_case :List[str] = PipelineTesterMixin.required_optional_params - {"num_images_per_prompt"} snake_case :Optional[Any] = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS snake_case :Optional[Any] = frozenset( [] ) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess snake_case :Any = frozenset([] ) snake_case :Optional[int] = True @property def _snake_case ( self ): __UpperCAmelCase : Optional[int] = 1 __UpperCAmelCase : Dict = 4 __UpperCAmelCase : List[str] = (16, 16) __UpperCAmelCase : Dict = floats_tensor((batch_size, num_channels) + sizes , rng=random.Random(0 ) ).to(UpperCamelCase_ ) return image def _snake_case ( self ): torch.manual_seed(0 ) __UpperCAmelCase : List[str] = UNetaDConditionModel( act_fn="gelu" , attention_head_dim=8 , norm_num_groups=UpperCamelCase_ , block_out_channels=[32, 32, 64, 64] , time_cond_proj_dim=1_60 , conv_in_kernel=1 , conv_out_kernel=1 , cross_attention_dim=32 , down_block_types=( "KDownBlock2D", "KCrossAttnDownBlock2D", "KCrossAttnDownBlock2D", "KCrossAttnDownBlock2D", ) , in_channels=8 , mid_block_type=UpperCamelCase_ , only_cross_attention=UpperCamelCase_ , out_channels=5 , resnet_time_scale_shift="scale_shift" , time_embedding_type="fourier" , timestep_post_act="gelu" , up_block_types=("KCrossAttnUpBlock2D", "KCrossAttnUpBlock2D", "KCrossAttnUpBlock2D", "KUpBlock2D") , ) __UpperCAmelCase : int = AutoencoderKL( block_out_channels=[32, 32, 64, 64] , in_channels=3 , out_channels=3 , down_block_types=[ "DownEncoderBlock2D", "DownEncoderBlock2D", "DownEncoderBlock2D", "DownEncoderBlock2D", ] , up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D"] , latent_channels=4 , ) __UpperCAmelCase : Optional[int] = EulerDiscreteScheduler(prediction_type="sample" ) __UpperCAmelCase : int = CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , hidden_act="quick_gelu" , projection_dim=5_12 , ) __UpperCAmelCase : List[str] = CLIPTextModel(UpperCamelCase_ ) __UpperCAmelCase : Tuple = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" ) __UpperCAmelCase : Union[str, Any] = { "unet": model.eval(), "vae": vae.eval(), "scheduler": scheduler, "text_encoder": text_encoder, "tokenizer": tokenizer, } return components def _snake_case ( self , UpperCamelCase_ , UpperCamelCase_=0 ): if str(UpperCamelCase_ ).startswith("mps" ): __UpperCAmelCase : str = torch.manual_seed(UpperCamelCase_ ) else: __UpperCAmelCase : Optional[int] = torch.Generator(device=UpperCamelCase_ ).manual_seed(UpperCamelCase_ ) __UpperCAmelCase : Any = { "prompt": "A painting of a squirrel eating a burger", "image": self.dummy_image.cpu(), "generator": generator, "num_inference_steps": 2, "output_type": "numpy", } return inputs def _snake_case ( self ): __UpperCAmelCase : List[str] = "cpu" __UpperCAmelCase : List[str] = self.get_dummy_components() __UpperCAmelCase : Tuple = self.pipeline_class(**UpperCamelCase_ ) pipe.to(UpperCamelCase_ ) pipe.set_progress_bar_config(disable=UpperCamelCase_ ) __UpperCAmelCase : Any = self.get_dummy_inputs(UpperCamelCase_ ) __UpperCAmelCase : int = pipe(**UpperCamelCase_ ).images __UpperCAmelCase : Any = image[0, -3:, -3:, -1] self.assertEqual(image.shape , (1, 2_56, 2_56, 3) ) __UpperCAmelCase : Tuple = np.array( [0.4_7_2_2_2_4_1_2, 0.4_1_9_2_1_6_3_3, 0.4_4_7_1_7_4_3_4, 0.4_6_8_7_4_1_9_2, 0.4_2_5_8_8_2_5_8, 0.4_6_1_5_0_7_2_6, 0.4_6_7_7_5_3_4, 0.4_5_5_8_3_8_3_2, 0.4_8_5_7_9_0_5_5] ) __UpperCAmelCase : List[str] = np.abs(image_slice.flatten() - expected_slice ).max() self.assertLessEqual(UpperCamelCase_ , 1E-3 ) def _snake_case ( self ): super().test_attention_slicing_forward_pass(expected_max_diff=7E-3 ) def _snake_case ( self ): super().test_cpu_offload_forward_pass(expected_max_diff=3E-3 ) def _snake_case ( self ): super().test_dict_tuple_outputs_equivalent(expected_max_difference=3E-3 ) def _snake_case ( self ): super().test_inference_batch_single_identical(expected_max_diff=7E-3 ) def _snake_case ( self ): super().test_pt_np_pil_outputs_equivalent(expected_max_diff=3E-3 ) def _snake_case ( self ): super().test_save_load_local(expected_max_difference=3E-3 ) def _snake_case ( self ): super().test_save_load_optional_components(expected_max_difference=3E-3 ) def _snake_case ( self ): __UpperCAmelCase : Dict = [ "DDIMScheduler", "DDPMScheduler", "PNDMScheduler", "HeunDiscreteScheduler", "EulerAncestralDiscreteScheduler", "KDPM2DiscreteScheduler", "KDPM2AncestralDiscreteScheduler", "DPMSolverSDEScheduler", ] __UpperCAmelCase : Tuple = self.get_dummy_components() __UpperCAmelCase : Union[str, Any] = self.pipeline_class(**UpperCamelCase_ ) # make sure that PNDM does not need warm-up pipe.scheduler.register_to_config(skip_prk_steps=UpperCamelCase_ ) pipe.to(UpperCamelCase_ ) pipe.set_progress_bar_config(disable=UpperCamelCase_ ) __UpperCAmelCase : Tuple = self.get_dummy_inputs(UpperCamelCase_ ) __UpperCAmelCase : List[str] = 2 __UpperCAmelCase : List[str] = [] for scheduler_enum in KarrasDiffusionSchedulers: if scheduler_enum.name in skip_schedulers: # no sigma schedulers are not supported # no schedulers continue __UpperCAmelCase : Optional[int] = getattr(UpperCamelCase_ , scheduler_enum.name ) __UpperCAmelCase : List[str] = scheduler_cls.from_config(pipe.scheduler.config ) __UpperCAmelCase : Optional[int] = pipe(**UpperCamelCase_ )[0] outputs.append(UpperCamelCase_ ) assert check_same_shape(UpperCamelCase_ ) @require_torch_gpu @slow class __A (unittest.TestCase ): def _snake_case ( self ): super().tearDown() gc.collect() torch.cuda.empty_cache() def _snake_case ( self ): __UpperCAmelCase : Optional[int] = torch.manual_seed(33 ) __UpperCAmelCase : str = StableDiffusionPipeline.from_pretrained("CompVis/stable-diffusion-v1-4" , torch_dtype=torch.floataa ) pipe.to("cuda" ) __UpperCAmelCase : Union[str, Any] = StableDiffusionLatentUpscalePipeline.from_pretrained( "stabilityai/sd-x2-latent-upscaler" , torch_dtype=torch.floataa ) upscaler.to("cuda" ) __UpperCAmelCase : Optional[int] = "a photo of an astronaut high resolution, unreal engine, ultra realistic" __UpperCAmelCase : Any = pipe(UpperCamelCase_ , generator=UpperCamelCase_ , output_type="latent" ).images __UpperCAmelCase : int = upscaler( prompt=UpperCamelCase_ , image=UpperCamelCase_ , num_inference_steps=20 , guidance_scale=0 , generator=UpperCamelCase_ , output_type="np" , ).images[0] __UpperCAmelCase : Optional[Any] = load_numpy( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/latent-upscaler/astronaut_1024.npy" ) assert np.abs((expected_image - image).mean() ) < 5E-2 def _snake_case ( self ): __UpperCAmelCase : List[Any] = torch.manual_seed(33 ) __UpperCAmelCase : Union[str, Any] = StableDiffusionLatentUpscalePipeline.from_pretrained( "stabilityai/sd-x2-latent-upscaler" , torch_dtype=torch.floataa ) upscaler.to("cuda" ) __UpperCAmelCase : Optional[Any] = "the temple of fire by Ross Tran and Gerardo Dottori, oil on canvas" __UpperCAmelCase : str = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/latent-upscaler/fire_temple_512.png" ) __UpperCAmelCase : Dict = upscaler( prompt=UpperCamelCase_ , image=UpperCamelCase_ , num_inference_steps=20 , guidance_scale=0 , generator=UpperCamelCase_ , output_type="np" , ).images[0] __UpperCAmelCase : Tuple = load_numpy( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/latent-upscaler/fire_temple_1024.npy" ) assert np.abs((expected_image - image).max() ) < 5E-2
10
1
'''simple docstring''' from __future__ import annotations import numpy as np from numpy import floataa from numpy.typing import NDArray def _lowercase ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , ) -> list[float]: """simple docstring""" __UpperCAmelCase , __UpperCAmelCase : Union[str, Any] = coefficient_matrix.shape __UpperCAmelCase , __UpperCAmelCase : Any = constant_matrix.shape if rowsa != colsa: __UpperCAmelCase : str = f"""Coefficient matrix dimensions must be nxn but received {rowsa}x{colsa}""" raise ValueError(lowerCamelCase__ ) if colsa != 1: __UpperCAmelCase : Optional[Any] = f"""Constant matrix must be nx1 but received {rowsa}x{colsa}""" raise ValueError(lowerCamelCase__ ) if rowsa != rowsa: __UpperCAmelCase : Optional[int] = ( "Coefficient and constant matrices dimensions must be nxn and nx1 but " f"""received {rowsa}x{colsa} and {rowsa}x{colsa}""" ) raise ValueError(lowerCamelCase__ ) if len(lowerCamelCase__ ) != rowsa: __UpperCAmelCase : List[str] = ( "Number of initial values must be equal to number of rows in coefficient " f"""matrix but received {len(lowerCamelCase__ )} and {rowsa}""" ) raise ValueError(lowerCamelCase__ ) if iterations <= 0: raise ValueError("Iterations must be at least 1" ) __UpperCAmelCase : NDArray[floataa] = np.concatenate( (coefficient_matrix, constant_matrix) , axis=1 ) __UpperCAmelCase , __UpperCAmelCase : Tuple = table.shape strictly_diagonally_dominant(lowerCamelCase__ ) # Iterates the whole matrix for given number of times for _ in range(lowerCamelCase__ ): __UpperCAmelCase : int = [] for row in range(lowerCamelCase__ ): __UpperCAmelCase : List[str] = 0 for col in range(lowerCamelCase__ ): if col == row: __UpperCAmelCase : int = table[row][col] elif col == cols - 1: __UpperCAmelCase : Any = table[row][col] else: temp += (-1) * table[row][col] * init_val[col] __UpperCAmelCase : List[Any] = (temp + val) / denom new_val.append(lowerCamelCase__ ) __UpperCAmelCase : str = new_val return [float(lowerCamelCase__ ) for i in new_val] def _lowercase ( lowerCamelCase__ ) -> bool: """simple docstring""" __UpperCAmelCase , __UpperCAmelCase : Optional[int] = table.shape __UpperCAmelCase : str = True for i in range(0 , lowerCamelCase__ ): __UpperCAmelCase : Union[str, Any] = 0 for j in range(0 , cols - 1 ): if i == j: continue else: total += table[i][j] if table[i][i] <= total: raise ValueError("Coefficient matrix is not strictly diagonally dominant" ) return is_diagonally_dominant # Test Cases if __name__ == "__main__": import doctest doctest.testmod()
10
'''simple docstring''' # Lint as: python3 import sys from collections.abc import Mapping from typing import TYPE_CHECKING import numpy as np import pyarrow as pa from .. import config from ..utils.py_utils import map_nested from .formatting import TensorFormatter if TYPE_CHECKING: import torch class __A (TensorFormatter[Mapping, "torch.Tensor", Mapping] ): def __init__( self , UpperCamelCase_=None , **UpperCamelCase_ ): super().__init__(features=UpperCamelCase_ ) __UpperCAmelCase : Union[str, Any] = torch_tensor_kwargs import torch # noqa import torch at initialization def _snake_case ( self , UpperCamelCase_ ): import torch if isinstance(UpperCamelCase_ , UpperCamelCase_ ) and column: if all( isinstance(UpperCamelCase_ , torch.Tensor ) and x.shape == column[0].shape and x.dtype == column[0].dtype for x in column ): return torch.stack(UpperCamelCase_ ) return column def _snake_case ( self , UpperCamelCase_ ): import torch if isinstance(UpperCamelCase_ , (str, bytes, type(UpperCamelCase_ )) ): return value elif isinstance(UpperCamelCase_ , (np.character, np.ndarray) ) and np.issubdtype(value.dtype , np.character ): return value.tolist() __UpperCAmelCase : int = {} if isinstance(UpperCamelCase_ , (np.number, np.ndarray) ) and np.issubdtype(value.dtype , np.integer ): __UpperCAmelCase : Optional[int] = {"dtype": torch.intaa} elif isinstance(UpperCamelCase_ , (np.number, np.ndarray) ) and np.issubdtype(value.dtype , np.floating ): __UpperCAmelCase : str = {"dtype": torch.floataa} elif config.PIL_AVAILABLE and "PIL" in sys.modules: import PIL.Image if isinstance(UpperCamelCase_ , PIL.Image.Image ): __UpperCAmelCase : str = np.asarray(UpperCamelCase_ ) return torch.tensor(UpperCamelCase_ , **{**default_dtype, **self.torch_tensor_kwargs} ) def _snake_case ( self , UpperCamelCase_ ): import torch # support for torch, tf, jax etc. if hasattr(UpperCamelCase_ , "__array__" ) and not isinstance(UpperCamelCase_ , torch.Tensor ): __UpperCAmelCase : Dict = data_struct.__array__() # support for nested types like struct of list of struct if isinstance(UpperCamelCase_ , np.ndarray ): if data_struct.dtype == object: # torch tensors cannot be instantied from an array of objects return self._consolidate([self.recursive_tensorize(UpperCamelCase_ ) for substruct in data_struct] ) elif isinstance(UpperCamelCase_ , (list, tuple) ): return self._consolidate([self.recursive_tensorize(UpperCamelCase_ ) for substruct in data_struct] ) return self._tensorize(UpperCamelCase_ ) def _snake_case ( self , UpperCamelCase_ ): return map_nested(self._recursive_tensorize , UpperCamelCase_ , map_list=UpperCamelCase_ ) def _snake_case ( self , UpperCamelCase_ ): __UpperCAmelCase : List[str] = self.numpy_arrow_extractor().extract_row(UpperCamelCase_ ) __UpperCAmelCase : Union[str, Any] = self.python_features_decoder.decode_row(UpperCamelCase_ ) return self.recursive_tensorize(UpperCamelCase_ ) def _snake_case ( self , UpperCamelCase_ ): __UpperCAmelCase : Union[str, Any] = self.numpy_arrow_extractor().extract_column(UpperCamelCase_ ) __UpperCAmelCase : Optional[Any] = self.python_features_decoder.decode_column(UpperCamelCase_ , pa_table.column_names[0] ) __UpperCAmelCase : List[Any] = self.recursive_tensorize(UpperCamelCase_ ) __UpperCAmelCase : List[str] = self._consolidate(UpperCamelCase_ ) return column def _snake_case ( self , UpperCamelCase_ ): __UpperCAmelCase : int = self.numpy_arrow_extractor().extract_batch(UpperCamelCase_ ) __UpperCAmelCase : Any = self.python_features_decoder.decode_batch(UpperCamelCase_ ) __UpperCAmelCase : Optional[int] = self.recursive_tensorize(UpperCamelCase_ ) for column_name in batch: __UpperCAmelCase : Tuple = self._consolidate(batch[column_name] ) return batch
10
1
'''simple docstring''' _a : List[str] = {0: [2, 3], 1: [0], 2: [1], 3: [4], 4: []} _a : int = {0: [1, 2, 3], 1: [2], 2: [0], 3: [4], 4: [5], 5: [3]} def _lowercase ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> list[int]: """simple docstring""" __UpperCAmelCase : int = True __UpperCAmelCase : Dict = [] for neighbour in graph[vert]: if not visited[neighbour]: order += topology_sort(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) order.append(lowerCamelCase__ ) return order def _lowercase ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> list[int]: """simple docstring""" __UpperCAmelCase : List[str] = True __UpperCAmelCase : Tuple = [vert] for neighbour in reversed_graph[vert]: if not visited[neighbour]: component += find_components(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) return component def _lowercase ( lowerCamelCase__ ) -> list[list[int]]: """simple docstring""" __UpperCAmelCase : Dict = len(lowerCamelCase__ ) * [False] __UpperCAmelCase : dict[int, list[int]] = {vert: [] for vert in range(len(lowerCamelCase__ ) )} for vert, neighbours in graph.items(): for neighbour in neighbours: reversed_graph[neighbour].append(lowerCamelCase__ ) __UpperCAmelCase : List[str] = [] for i, was_visited in enumerate(lowerCamelCase__ ): if not was_visited: order += topology_sort(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) __UpperCAmelCase : Dict = [] __UpperCAmelCase : int = len(lowerCamelCase__ ) * [False] for i in range(len(lowerCamelCase__ ) ): __UpperCAmelCase : Optional[int] = order[len(lowerCamelCase__ ) - i - 1] if not visited[vert]: __UpperCAmelCase : Dict = find_components(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) components_list.append(lowerCamelCase__ ) return components_list
10
'''simple docstring''' def _lowercase ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> bool: """simple docstring""" return not any( neighbour == 1 and colored_vertices[i] == color for i, neighbour in enumerate(lowerCamelCase__ ) ) def _lowercase ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> bool: """simple docstring""" if index == len(lowerCamelCase__ ): return True # Recursive Step for i in range(lowerCamelCase__ ): if valid_coloring(graph[index] , lowerCamelCase__ , lowerCamelCase__ ): # Color current vertex __UpperCAmelCase : List[str] = i # Validate coloring if util_color(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , index + 1 ): return True # Backtrack __UpperCAmelCase : Any = -1 return False def _lowercase ( lowerCamelCase__ , lowerCamelCase__ ) -> list[int]: """simple docstring""" __UpperCAmelCase : Optional[Any] = [-1] * len(lowerCamelCase__ ) if util_color(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , 0 ): return colored_vertices return []
10
1
'''simple docstring''' # coding=utf-8 # Copyright 2023 The HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # this script dumps information about the environment import os import platform import sys _a : List[str] = "3" print("Python version:", sys.version) print("OS platform:", platform.platform()) print("OS architecture:", platform.machine()) try: import torch print("Torch version:", torch.__version__) print("Cuda available:", torch.cuda.is_available()) print("Cuda version:", torch.version.cuda) print("CuDNN version:", torch.backends.cudnn.version()) print("Number of GPUs available:", torch.cuda.device_count()) except ImportError: print("Torch version:", None) try: import transformers print("transformers version:", transformers.__version__) except ImportError: print("transformers version:", None)
10
'''simple docstring''' def _lowercase ( lowerCamelCase__ , lowerCamelCase__ ) -> int: """simple docstring""" return number | (1 << position) def _lowercase ( lowerCamelCase__ , lowerCamelCase__ ) -> int: """simple docstring""" return number & ~(1 << position) def _lowercase ( lowerCamelCase__ , lowerCamelCase__ ) -> int: """simple docstring""" return number ^ (1 << position) def _lowercase ( lowerCamelCase__ , lowerCamelCase__ ) -> bool: """simple docstring""" return ((number >> position) & 1) == 1 def _lowercase ( lowerCamelCase__ , lowerCamelCase__ ) -> int: """simple docstring""" return int((number & (1 << position)) != 0 ) if __name__ == "__main__": import doctest doctest.testmod()
10
1
'''simple docstring''' import collections from typing import List, Optional, Union from ...tokenization_utils_base import BatchEncoding from ...utils import TensorType, add_end_docstrings, add_start_docstrings, logging from ..bert.tokenization_bert_fast import BertTokenizerFast from .tokenization_dpr import DPRContextEncoderTokenizer, DPRQuestionEncoderTokenizer, DPRReaderTokenizer _a : Optional[int] = logging.get_logger(__name__) _a : int = {"vocab_file": "vocab.txt", "tokenizer_file": "tokenizer.json"} _a : Optional[int] = { "vocab_file": { "facebook/dpr-ctx_encoder-single-nq-base": ( "https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/vocab.txt" ), "facebook/dpr-ctx_encoder-multiset-base": ( "https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/vocab.txt" ), }, "tokenizer_file": { "facebook/dpr-ctx_encoder-single-nq-base": ( "https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/tokenizer.json" ), "facebook/dpr-ctx_encoder-multiset-base": ( "https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/tokenizer.json" ), }, } _a : List[Any] = { "vocab_file": { "facebook/dpr-question_encoder-single-nq-base": ( "https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/vocab.txt" ), "facebook/dpr-question_encoder-multiset-base": ( "https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/vocab.txt" ), }, "tokenizer_file": { "facebook/dpr-question_encoder-single-nq-base": ( "https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/tokenizer.json" ), "facebook/dpr-question_encoder-multiset-base": ( "https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/tokenizer.json" ), }, } _a : List[str] = { "vocab_file": { "facebook/dpr-reader-single-nq-base": ( "https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/vocab.txt" ), "facebook/dpr-reader-multiset-base": ( "https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/vocab.txt" ), }, "tokenizer_file": { "facebook/dpr-reader-single-nq-base": ( "https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/tokenizer.json" ), "facebook/dpr-reader-multiset-base": ( "https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/tokenizer.json" ), }, } _a : Optional[Any] = { "facebook/dpr-ctx_encoder-single-nq-base": 512, "facebook/dpr-ctx_encoder-multiset-base": 512, } _a : Dict = { "facebook/dpr-question_encoder-single-nq-base": 512, "facebook/dpr-question_encoder-multiset-base": 512, } _a : Optional[int] = { "facebook/dpr-reader-single-nq-base": 512, "facebook/dpr-reader-multiset-base": 512, } _a : List[Any] = { "facebook/dpr-ctx_encoder-single-nq-base": {"do_lower_case": True}, "facebook/dpr-ctx_encoder-multiset-base": {"do_lower_case": True}, } _a : Any = { "facebook/dpr-question_encoder-single-nq-base": {"do_lower_case": True}, "facebook/dpr-question_encoder-multiset-base": {"do_lower_case": True}, } _a : str = { "facebook/dpr-reader-single-nq-base": {"do_lower_case": True}, "facebook/dpr-reader-multiset-base": {"do_lower_case": True}, } class __A (__magic_name__ ): snake_case :Optional[Any] = VOCAB_FILES_NAMES snake_case :List[Any] = CONTEXT_ENCODER_PRETRAINED_VOCAB_FILES_MAP snake_case :Tuple = CONTEXT_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES snake_case :Optional[Any] = CONTEXT_ENCODER_PRETRAINED_INIT_CONFIGURATION snake_case :List[str] = DPRContextEncoderTokenizer class __A (__magic_name__ ): snake_case :Optional[int] = VOCAB_FILES_NAMES snake_case :Union[str, Any] = QUESTION_ENCODER_PRETRAINED_VOCAB_FILES_MAP snake_case :Dict = QUESTION_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES snake_case :Union[str, Any] = QUESTION_ENCODER_PRETRAINED_INIT_CONFIGURATION snake_case :List[Any] = DPRQuestionEncoderTokenizer _a : List[Any] = collections.namedtuple( "DPRSpanPrediction", ["span_score", "relevance_score", "doc_id", "start_index", "end_index", "text"] ) _a : List[str] = collections.namedtuple("DPRReaderOutput", ["start_logits", "end_logits", "relevance_logits"]) _a : List[str] = R"\n Return a dictionary with the token ids of the input strings and other information to give to `.decode_best_spans`.\n It converts the strings of a question and different passages (title and text) in a sequence of IDs (integers),\n using the tokenizer and vocabulary. The resulting `input_ids` is a matrix of size `(n_passages, sequence_length)`\n with the format:\n\n [CLS] <question token ids> [SEP] <titles ids> [SEP] <texts ids>\n\n Args:\n questions (`str` or `List[str]`):\n The questions to be encoded. You can specify one question for many passages. In this case, the question\n will be duplicated like `[questions] * n_passages`. Otherwise you have to specify as many questions as in\n `titles` or `texts`.\n titles (`str` or `List[str]`):\n The passages titles to be encoded. This can be a string or a list of strings if there are several passages.\n texts (`str` or `List[str]`):\n The passages texts to be encoded. This can be a string or a list of strings if there are several passages.\n padding (`bool`, `str` or [`~utils.PaddingStrategy`], *optional*, defaults to `False`):\n Activates and controls padding. Accepts the following values:\n\n - `True` or `'longest'`: Pad to the longest sequence in the batch (or no padding if only a single sequence\n if provided).\n - `'max_length'`: Pad to a maximum length specified with the argument `max_length` or to the maximum\n acceptable input length for the model if that argument is not provided.\n - `False` or `'do_not_pad'` (default): No padding (i.e., can output a batch with sequences of different\n lengths).\n truncation (`bool`, `str` or [`~tokenization_utils_base.TruncationStrategy`], *optional*, defaults to `False`):\n Activates and controls truncation. Accepts the following values:\n\n - `True` or `'longest_first'`: Truncate to a maximum length specified with the argument `max_length` or to\n the maximum acceptable input length for the model if that argument is not provided. This will truncate\n token by token, removing a token from the longest sequence in the pair if a pair of sequences (or a batch\n of pairs) is provided.\n - `'only_first'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum\n acceptable input length for the model if that argument is not provided. This will only truncate the first\n sequence of a pair if a pair of sequences (or a batch of pairs) is provided.\n - `'only_second'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum\n acceptable input length for the model if that argument is not provided. This will only truncate the\n second sequence of a pair if a pair of sequences (or a batch of pairs) is provided.\n - `False` or `'do_not_truncate'` (default): No truncation (i.e., can output batch with sequence lengths\n greater than the model maximum admissible input size).\n max_length (`int`, *optional*):\n Controls the maximum length to use by one of the truncation/padding parameters.\n\n If left unset or set to `None`, this will use the predefined model maximum length if a maximum length\n is required by one of the truncation/padding parameters. If the model has no specific maximum input\n length (like XLNet) truncation/padding to a maximum length will be deactivated.\n return_tensors (`str` or [`~utils.TensorType`], *optional*):\n If set, will return tensors instead of list of python integers. Acceptable values are:\n\n - `'tf'`: Return TensorFlow `tf.constant` objects.\n - `'pt'`: Return PyTorch `torch.Tensor` objects.\n - `'np'`: Return Numpy `np.ndarray` objects.\n return_attention_mask (`bool`, *optional*):\n Whether or not to return the attention mask. If not set, will return the attention mask according to the\n specific tokenizer's default, defined by the `return_outputs` attribute.\n\n [What are attention masks?](../glossary#attention-mask)\n\n Return:\n `Dict[str, List[List[int]]]`: A dictionary with the following keys:\n\n - `input_ids`: List of token ids to be fed to a model.\n - `attention_mask`: List of indices specifying which tokens should be attended to by the model.\n " @add_start_docstrings(__magic_name__ ) class __A : def __call__( self , UpperCamelCase_ , UpperCamelCase_ = None , UpperCamelCase_ = None , UpperCamelCase_ = False , UpperCamelCase_ = False , UpperCamelCase_ = None , UpperCamelCase_ = None , UpperCamelCase_ = None , **UpperCamelCase_ , ): if titles is None and texts is None: return super().__call__( UpperCamelCase_ , padding=UpperCamelCase_ , truncation=UpperCamelCase_ , max_length=UpperCamelCase_ , return_tensors=UpperCamelCase_ , return_attention_mask=UpperCamelCase_ , **UpperCamelCase_ , ) elif titles is None or texts is None: __UpperCAmelCase : Union[str, Any] = titles if texts is None else texts return super().__call__( UpperCamelCase_ , UpperCamelCase_ , padding=UpperCamelCase_ , truncation=UpperCamelCase_ , max_length=UpperCamelCase_ , return_tensors=UpperCamelCase_ , return_attention_mask=UpperCamelCase_ , **UpperCamelCase_ , ) __UpperCAmelCase : Union[str, Any] = titles if not isinstance(UpperCamelCase_ , UpperCamelCase_ ) else [titles] __UpperCAmelCase : List[Any] = texts if not isinstance(UpperCamelCase_ , UpperCamelCase_ ) else [texts] __UpperCAmelCase : Union[str, Any] = len(UpperCamelCase_ ) __UpperCAmelCase : Dict = questions if not isinstance(UpperCamelCase_ , UpperCamelCase_ ) else [questions] * n_passages assert len(UpperCamelCase_ ) == len( UpperCamelCase_ ), f"""There should be as many titles than texts but got {len(UpperCamelCase_ )} titles and {len(UpperCamelCase_ )} texts.""" __UpperCAmelCase : Dict = super().__call__(UpperCamelCase_ , UpperCamelCase_ , padding=UpperCamelCase_ , truncation=UpperCamelCase_ )["input_ids"] __UpperCAmelCase : str = super().__call__(UpperCamelCase_ , add_special_tokens=UpperCamelCase_ , padding=UpperCamelCase_ , truncation=UpperCamelCase_ )["input_ids"] __UpperCAmelCase : List[str] = { "input_ids": [ (encoded_question_and_title + encoded_text)[:max_length] if max_length is not None and truncation else encoded_question_and_title + encoded_text for encoded_question_and_title, encoded_text in zip(UpperCamelCase_ , UpperCamelCase_ ) ] } if return_attention_mask is not False: __UpperCAmelCase : Optional[Any] = [] for input_ids in encoded_inputs["input_ids"]: attention_mask.append([int(input_id != self.pad_token_id ) for input_id in input_ids] ) __UpperCAmelCase : str = attention_mask return self.pad(UpperCamelCase_ , padding=UpperCamelCase_ , max_length=UpperCamelCase_ , return_tensors=UpperCamelCase_ ) def _snake_case ( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ = 16 , UpperCamelCase_ = 64 , UpperCamelCase_ = 4 , ): __UpperCAmelCase : Optional[Any] = reader_input["input_ids"] __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase : int = reader_output[:3] __UpperCAmelCase : int = len(UpperCamelCase_ ) __UpperCAmelCase : List[str] = sorted(range(UpperCamelCase_ ) , reverse=UpperCamelCase_ , key=relevance_logits.__getitem__ ) __UpperCAmelCase : List[DPRReaderOutput] = [] for doc_id in sorted_docs: __UpperCAmelCase : Optional[Any] = list(input_ids[doc_id] ) # assuming question & title information is at the beginning of the sequence __UpperCAmelCase : Dict = sequence_ids.index(self.sep_token_id , 2 ) + 1 # second sep id if sequence_ids[-1] == self.pad_token_id: __UpperCAmelCase : List[Any] = sequence_ids.index(self.pad_token_id ) else: __UpperCAmelCase : str = len(UpperCamelCase_ ) __UpperCAmelCase : Optional[Any] = self._get_best_spans( start_logits=start_logits[doc_id][passage_offset:sequence_len] , end_logits=end_logits[doc_id][passage_offset:sequence_len] , max_answer_length=UpperCamelCase_ , top_spans=UpperCamelCase_ , ) for start_index, end_index in best_spans: start_index += passage_offset end_index += passage_offset nbest_spans_predictions.append( DPRSpanPrediction( span_score=start_logits[doc_id][start_index] + end_logits[doc_id][end_index] , relevance_score=relevance_logits[doc_id] , doc_id=UpperCamelCase_ , start_index=UpperCamelCase_ , end_index=UpperCamelCase_ , text=self.decode(sequence_ids[start_index : end_index + 1] ) , ) ) if len(UpperCamelCase_ ) >= num_spans: break return nbest_spans_predictions[:num_spans] def _snake_case ( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , ): __UpperCAmelCase : List[Any] = [] for start_index, start_score in enumerate(UpperCamelCase_ ): for answer_length, end_score in enumerate(end_logits[start_index : start_index + max_answer_length] ): scores.append(((start_index, start_index + answer_length), start_score + end_score) ) __UpperCAmelCase : List[Any] = sorted(UpperCamelCase_ , key=lambda UpperCamelCase_ : x[1] , reverse=UpperCamelCase_ ) __UpperCAmelCase : Optional[int] = [] for (start_index, end_index), score in scores: assert start_index <= end_index, f"""Wrong span indices: [{start_index}:{end_index}]""" __UpperCAmelCase : Tuple = end_index - start_index + 1 assert length <= max_answer_length, f"""Span is too long: {length} > {max_answer_length}""" if any( start_index <= prev_start_index <= prev_end_index <= end_index or prev_start_index <= start_index <= end_index <= prev_end_index for (prev_start_index, prev_end_index) in chosen_span_intervals ): continue chosen_span_intervals.append((start_index, end_index) ) if len(UpperCamelCase_ ) == top_spans: break return chosen_span_intervals @add_end_docstrings(__magic_name__ ) class __A (__magic_name__ , __magic_name__ ): snake_case :int = VOCAB_FILES_NAMES snake_case :int = READER_PRETRAINED_VOCAB_FILES_MAP snake_case :Dict = READER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES snake_case :Dict = READER_PRETRAINED_INIT_CONFIGURATION snake_case :int = ["input_ids", "attention_mask"] snake_case :str = DPRReaderTokenizer
10
'''simple docstring''' from collections import Counter import numpy as np from sklearn import datasets from sklearn.model_selection import train_test_split _a : str = datasets.load_iris() _a : List[Any] = np.array(data["data"]) _a : Optional[Any] = np.array(data["target"]) _a : Dict = data["target_names"] _a , _a , _a , _a : Any = train_test_split(X, y) def _lowercase ( lowerCamelCase__ , lowerCamelCase__ ) -> Tuple: """simple docstring""" return np.linalg.norm(np.array(lowerCamelCase__ ) - np.array(lowerCamelCase__ ) ) def _lowercase ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__=5 ) -> int: """simple docstring""" __UpperCAmelCase : List[Any] = zip(lowerCamelCase__ , lowerCamelCase__ ) # List of distances of all points from the point to be classified __UpperCAmelCase : int = [] for data_point in data: __UpperCAmelCase : Optional[Any] = euclidean_distance(data_point[0] , lowerCamelCase__ ) distances.append((distance, data_point[1]) ) # Choosing 'k' points with the least distances. __UpperCAmelCase : Union[str, Any] = [i[1] for i in sorted(lowerCamelCase__ )[:k]] # Most commonly occurring class among them # is the class into which the point is classified __UpperCAmelCase : Dict = Counter(lowerCamelCase__ ).most_common(1 )[0][0] return classes[result] if __name__ == "__main__": print(classifier(X_train, y_train, classes, [4.4, 3.1, 1.3, 1.4]))
10
1
'''simple docstring''' import os from bleurt import score # From: git+https://github.com/google-research/bleurt.git import datasets _a : Union[str, Any] = datasets.logging.get_logger(__name__) _a : Tuple = "\\n@inproceedings{bleurt,\n title={BLEURT: Learning Robust Metrics for Text Generation},\n author={Thibault Sellam and Dipanjan Das and Ankur P. Parikh},\n booktitle={ACL},\n year={2020},\n url={https://arxiv.org/abs/2004.04696}\n}\n" _a : Optional[int] = "\\nBLEURT a learnt evaluation metric for Natural Language Generation. It is built using multiple phases of transfer learning starting from a pretrained BERT model (Devlin et al. 2018)\nand then employing another pre-training phrase using synthetic data. Finally it is trained on WMT human annotations. You may run BLEURT out-of-the-box or fine-tune\nit for your specific application (the latter is expected to perform better).\n\nSee the project's README at https://github.com/google-research/bleurt#readme for more information.\n" _a : Tuple = "\nBLEURT score.\n\nArgs:\n `predictions` (list of str): prediction/candidate sentences\n `references` (list of str): reference sentences\n `checkpoint` BLEURT checkpoint. Will default to BLEURT-tiny if None.\n\nReturns:\n 'scores': List of scores.\nExamples:\n\n >>> predictions = [\"hello there\", \"general kenobi\"]\n >>> references = [\"hello there\", \"general kenobi\"]\n >>> bleurt = datasets.load_metric(\"bleurt\")\n >>> results = bleurt.compute(predictions=predictions, references=references)\n >>> print([round(v, 2) for v in results[\"scores\"]])\n [1.03, 1.04]\n" _a : Optional[int] = { "bleurt-tiny-128": "https://storage.googleapis.com/bleurt-oss/bleurt-tiny-128.zip", "bleurt-tiny-512": "https://storage.googleapis.com/bleurt-oss/bleurt-tiny-512.zip", "bleurt-base-128": "https://storage.googleapis.com/bleurt-oss/bleurt-base-128.zip", "bleurt-base-512": "https://storage.googleapis.com/bleurt-oss/bleurt-base-512.zip", "bleurt-large-128": "https://storage.googleapis.com/bleurt-oss/bleurt-large-128.zip", "bleurt-large-512": "https://storage.googleapis.com/bleurt-oss/bleurt-large-512.zip", "BLEURT-20-D3": "https://storage.googleapis.com/bleurt-oss-21/BLEURT-20-D3.zip", "BLEURT-20-D6": "https://storage.googleapis.com/bleurt-oss-21/BLEURT-20-D6.zip", "BLEURT-20-D12": "https://storage.googleapis.com/bleurt-oss-21/BLEURT-20-D12.zip", "BLEURT-20": "https://storage.googleapis.com/bleurt-oss-21/BLEURT-20.zip", } @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class __A (datasets.Metric ): def _snake_case ( self ): return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , homepage="https://github.com/google-research/bleurt" , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { "predictions": datasets.Value("string" , id="sequence" ), "references": datasets.Value("string" , id="sequence" ), } ) , codebase_urls=["https://github.com/google-research/bleurt"] , reference_urls=["https://github.com/google-research/bleurt", "https://arxiv.org/abs/2004.04696"] , ) def _snake_case ( self , UpperCamelCase_ ): # check that config name specifies a valid BLEURT model if self.config_name == "default": logger.warning( "Using default BLEURT-Base checkpoint for sequence maximum length 128. " "You can use a bigger model for better results with e.g.: datasets.load_metric('bleurt', 'bleurt-large-512')." ) __UpperCAmelCase : Any = "bleurt-base-128" if self.config_name.lower() in CHECKPOINT_URLS: __UpperCAmelCase : Dict = self.config_name.lower() elif self.config_name.upper() in CHECKPOINT_URLS: __UpperCAmelCase : int = self.config_name.upper() else: raise KeyError( f"""{self.config_name} model not found. You should supply the name of a model checkpoint for bleurt in {CHECKPOINT_URLS.keys()}""" ) # download the model checkpoint specified by self.config_name and set up the scorer __UpperCAmelCase : Optional[Any] = dl_manager.download_and_extract(CHECKPOINT_URLS[checkpoint_name] ) __UpperCAmelCase : Optional[Any] = score.BleurtScorer(os.path.join(UpperCamelCase_ , UpperCamelCase_ ) ) def _snake_case ( self , UpperCamelCase_ , UpperCamelCase_ ): __UpperCAmelCase : str = self.scorer.score(references=UpperCamelCase_ , candidates=UpperCamelCase_ ) return {"scores": scores}
10
'''simple docstring''' class __A : def __init__( self , UpperCamelCase_ ): __UpperCAmelCase : Any = set_counts __UpperCAmelCase : int = max(UpperCamelCase_ ) __UpperCAmelCase : List[str] = len(UpperCamelCase_ ) __UpperCAmelCase : Any = [1] * num_sets __UpperCAmelCase : Any = list(range(UpperCamelCase_ ) ) def _snake_case ( self , UpperCamelCase_ , UpperCamelCase_ ): __UpperCAmelCase : Optional[int] = self.get_parent(UpperCamelCase_ ) __UpperCAmelCase : List[Any] = self.get_parent(UpperCamelCase_ ) if src_parent == dst_parent: return False if self.ranks[dst_parent] >= self.ranks[src_parent]: self.set_counts[dst_parent] += self.set_counts[src_parent] __UpperCAmelCase : Optional[Any] = 0 __UpperCAmelCase : List[Any] = dst_parent if self.ranks[dst_parent] == self.ranks[src_parent]: self.ranks[dst_parent] += 1 __UpperCAmelCase : Union[str, Any] = self.set_counts[dst_parent] else: self.set_counts[src_parent] += self.set_counts[dst_parent] __UpperCAmelCase : Union[str, Any] = 0 __UpperCAmelCase : Dict = src_parent __UpperCAmelCase : Dict = self.set_counts[src_parent] __UpperCAmelCase : Dict = max(self.max_set , UpperCamelCase_ ) return True def _snake_case ( self , UpperCamelCase_ ): if self.parents[disj_set] == disj_set: return disj_set __UpperCAmelCase : str = self.get_parent(self.parents[disj_set] ) return self.parents[disj_set]
10
1
'''simple docstring''' import json import os from functools import lru_cache from typing import List, Optional, Tuple import regex as re from ...tokenization_utils import AddedToken, PreTrainedTokenizer from ...utils import logging _a : Optional[Any] = logging.get_logger(__name__) _a : int = {"vocab_file": "vocab.json", "merges_file": "merges.txt"} # See all BART models at https://huggingface.co/models?filter=bart _a : Tuple = { "vocab_file": { "facebook/bart-base": "https://huggingface.co/facebook/bart-base/resolve/main/vocab.json", "facebook/bart-large": "https://huggingface.co/facebook/bart-large/resolve/main/vocab.json", "facebook/bart-large-mnli": "https://huggingface.co/facebook/bart-large-mnli/resolve/main/vocab.json", "facebook/bart-large-cnn": "https://huggingface.co/facebook/bart-large-cnn/resolve/main/vocab.json", "facebook/bart-large-xsum": "https://huggingface.co/facebook/bart-large-xsum/resolve/main/vocab.json", "yjernite/bart_eli5": "https://huggingface.co/yjernite/bart_eli5/resolve/main/vocab.json", }, "merges_file": { "facebook/bart-base": "https://huggingface.co/facebook/bart-base/resolve/main/merges.txt", "facebook/bart-large": "https://huggingface.co/facebook/bart-large/resolve/main/merges.txt", "facebook/bart-large-mnli": "https://huggingface.co/facebook/bart-large-mnli/resolve/main/merges.txt", "facebook/bart-large-cnn": "https://huggingface.co/facebook/bart-large-cnn/resolve/main/merges.txt", "facebook/bart-large-xsum": "https://huggingface.co/facebook/bart-large-xsum/resolve/main/merges.txt", "yjernite/bart_eli5": "https://huggingface.co/yjernite/bart_eli5/resolve/main/merges.txt", }, } _a : List[Any] = { "facebook/bart-base": 1024, "facebook/bart-large": 1024, "facebook/bart-large-mnli": 1024, "facebook/bart-large-cnn": 1024, "facebook/bart-large-xsum": 1024, "yjernite/bart_eli5": 1024, } @lru_cache() def _lowercase ( ) -> List[Any]: """simple docstring""" __UpperCAmelCase : Dict = ( list(range(ord("!" ) , ord("~" ) + 1 ) ) + list(range(ord("¡" ) , ord("¬" ) + 1 ) ) + list(range(ord("®" ) , ord("ÿ" ) + 1 ) ) ) __UpperCAmelCase : Optional[Any] = bs[:] __UpperCAmelCase : Optional[int] = 0 for b in range(2**8 ): if b not in bs: bs.append(lowerCamelCase__ ) cs.append(2**8 + n ) n += 1 __UpperCAmelCase : Dict = [chr(lowerCamelCase__ ) for n in cs] return dict(zip(lowerCamelCase__ , lowerCamelCase__ ) ) def _lowercase ( lowerCamelCase__ ) -> str: """simple docstring""" __UpperCAmelCase : Dict = set() __UpperCAmelCase : Union[str, Any] = word[0] for char in word[1:]: pairs.add((prev_char, char) ) __UpperCAmelCase : Optional[Any] = char return pairs class __A (__magic_name__ ): snake_case :Optional[int] = VOCAB_FILES_NAMES snake_case :List[Any] = PRETRAINED_VOCAB_FILES_MAP snake_case :Any = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES snake_case :Optional[int] = ["input_ids", "attention_mask"] def __init__( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_="replace" , UpperCamelCase_="<s>" , UpperCamelCase_="</s>" , UpperCamelCase_="</s>" , UpperCamelCase_="<s>" , UpperCamelCase_="<unk>" , UpperCamelCase_="<pad>" , UpperCamelCase_="<mask>" , UpperCamelCase_=False , **UpperCamelCase_ , ): __UpperCAmelCase : str = AddedToken(UpperCamelCase_ , lstrip=UpperCamelCase_ , rstrip=UpperCamelCase_ ) if isinstance(UpperCamelCase_ , UpperCamelCase_ ) else bos_token __UpperCAmelCase : List[str] = AddedToken(UpperCamelCase_ , lstrip=UpperCamelCase_ , rstrip=UpperCamelCase_ ) if isinstance(UpperCamelCase_ , UpperCamelCase_ ) else eos_token __UpperCAmelCase : Optional[int] = AddedToken(UpperCamelCase_ , lstrip=UpperCamelCase_ , rstrip=UpperCamelCase_ ) if isinstance(UpperCamelCase_ , UpperCamelCase_ ) else sep_token __UpperCAmelCase : int = AddedToken(UpperCamelCase_ , lstrip=UpperCamelCase_ , rstrip=UpperCamelCase_ ) if isinstance(UpperCamelCase_ , UpperCamelCase_ ) else cls_token __UpperCAmelCase : Optional[int] = AddedToken(UpperCamelCase_ , lstrip=UpperCamelCase_ , rstrip=UpperCamelCase_ ) if isinstance(UpperCamelCase_ , UpperCamelCase_ ) else unk_token __UpperCAmelCase : Dict = AddedToken(UpperCamelCase_ , lstrip=UpperCamelCase_ , rstrip=UpperCamelCase_ ) if isinstance(UpperCamelCase_ , UpperCamelCase_ ) else pad_token # Mask token behave like a normal word, i.e. include the space before it __UpperCAmelCase : Union[str, Any] = AddedToken(UpperCamelCase_ , lstrip=UpperCamelCase_ , rstrip=UpperCamelCase_ ) if isinstance(UpperCamelCase_ , UpperCamelCase_ ) else mask_token super().__init__( errors=UpperCamelCase_ , bos_token=UpperCamelCase_ , eos_token=UpperCamelCase_ , unk_token=UpperCamelCase_ , sep_token=UpperCamelCase_ , cls_token=UpperCamelCase_ , pad_token=UpperCamelCase_ , mask_token=UpperCamelCase_ , add_prefix_space=UpperCamelCase_ , **UpperCamelCase_ , ) with open(UpperCamelCase_ , encoding="utf-8" ) as vocab_handle: __UpperCAmelCase : int = json.load(UpperCamelCase_ ) __UpperCAmelCase : Any = {v: k for k, v in self.encoder.items()} __UpperCAmelCase : Any = errors # how to handle errors in decoding __UpperCAmelCase : str = bytes_to_unicode() __UpperCAmelCase : List[str] = {v: k for k, v in self.byte_encoder.items()} with open(UpperCamelCase_ , encoding="utf-8" ) as merges_handle: __UpperCAmelCase : str = merges_handle.read().split("\n" )[1:-1] __UpperCAmelCase : List[str] = [tuple(merge.split() ) for merge in bpe_merges] __UpperCAmelCase : Union[str, Any] = dict(zip(UpperCamelCase_ , range(len(UpperCamelCase_ ) ) ) ) __UpperCAmelCase : Optional[int] = {} __UpperCAmelCase : Optional[int] = add_prefix_space # Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions __UpperCAmelCase : Dict = re.compile(r"'s|'t|'re|'ve|'m|'ll|'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+" ) @property def _snake_case ( self ): return len(self.encoder ) def _snake_case ( self ): return dict(self.encoder , **self.added_tokens_encoder ) def _snake_case ( self , UpperCamelCase_ ): if token in self.cache: return self.cache[token] __UpperCAmelCase : List[str] = tuple(UpperCamelCase_ ) __UpperCAmelCase : str = get_pairs(UpperCamelCase_ ) if not pairs: return token while True: __UpperCAmelCase : str = min(UpperCamelCase_ , key=lambda UpperCamelCase_ : self.bpe_ranks.get(UpperCamelCase_ , float("inf" ) ) ) if bigram not in self.bpe_ranks: break __UpperCAmelCase , __UpperCAmelCase : List[Any] = bigram __UpperCAmelCase : Any = [] __UpperCAmelCase : List[str] = 0 while i < len(UpperCamelCase_ ): try: __UpperCAmelCase : Union[str, Any] = word.index(UpperCamelCase_ , UpperCamelCase_ ) except ValueError: new_word.extend(word[i:] ) break else: new_word.extend(word[i:j] ) __UpperCAmelCase : str = j if word[i] == first and i < len(UpperCamelCase_ ) - 1 and word[i + 1] == second: new_word.append(first + second ) i += 2 else: new_word.append(word[i] ) i += 1 __UpperCAmelCase : Dict = tuple(UpperCamelCase_ ) __UpperCAmelCase : str = new_word if len(UpperCamelCase_ ) == 1: break else: __UpperCAmelCase : int = get_pairs(UpperCamelCase_ ) __UpperCAmelCase : Optional[int] = " ".join(UpperCamelCase_ ) __UpperCAmelCase : Dict = word return word def _snake_case ( self , UpperCamelCase_ ): __UpperCAmelCase : Optional[Any] = [] for token in re.findall(self.pat , UpperCamelCase_ ): __UpperCAmelCase : Any = "".join( self.byte_encoder[b] for b in token.encode("utf-8" ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case) bpe_tokens.extend(bpe_token for bpe_token in self.bpe(UpperCamelCase_ ).split(" " ) ) return bpe_tokens def _snake_case ( self , UpperCamelCase_ ): return self.encoder.get(UpperCamelCase_ , self.encoder.get(self.unk_token ) ) def _snake_case ( self , UpperCamelCase_ ): return self.decoder.get(UpperCamelCase_ ) def _snake_case ( self , UpperCamelCase_ ): __UpperCAmelCase : List[str] = "".join(UpperCamelCase_ ) __UpperCAmelCase : Union[str, Any] = bytearray([self.byte_decoder[c] for c in text] ).decode("utf-8" , errors=self.errors ) return text def _snake_case ( self , UpperCamelCase_ , UpperCamelCase_ = None ): if not os.path.isdir(UpperCamelCase_ ): logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" ) return __UpperCAmelCase : Any = os.path.join( UpperCamelCase_ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] ) __UpperCAmelCase : Optional[int] = os.path.join( UpperCamelCase_ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["merges_file"] ) with open(UpperCamelCase_ , "w" , encoding="utf-8" ) as f: f.write(json.dumps(self.encoder , indent=2 , sort_keys=UpperCamelCase_ , ensure_ascii=UpperCamelCase_ ) + "\n" ) __UpperCAmelCase : str = 0 with open(UpperCamelCase_ , "w" , encoding="utf-8" ) as writer: writer.write("#version: 0.2\n" ) for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda UpperCamelCase_ : kv[1] ): if index != token_index: logger.warning( f"""Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.""" " Please check that the tokenizer is not corrupted!" ) __UpperCAmelCase : str = token_index writer.write(" ".join(UpperCamelCase_ ) + "\n" ) index += 1 return vocab_file, merge_file def _snake_case ( self , UpperCamelCase_ , UpperCamelCase_ = None ): if token_ids_a is None: return [self.cls_token_id] + token_ids_a + [self.sep_token_id] __UpperCAmelCase : List[Any] = [self.cls_token_id] __UpperCAmelCase : Tuple = [self.sep_token_id] return cls + token_ids_a + sep + sep + token_ids_a + sep def _snake_case ( self , UpperCamelCase_ , UpperCamelCase_ = None , UpperCamelCase_ = False ): if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=UpperCamelCase_ , token_ids_a=UpperCamelCase_ , already_has_special_tokens=UpperCamelCase_ ) if token_ids_a is None: return [1] + ([0] * len(UpperCamelCase_ )) + [1] return [1] + ([0] * len(UpperCamelCase_ )) + [1, 1] + ([0] * len(UpperCamelCase_ )) + [1] def _snake_case ( self , UpperCamelCase_ , UpperCamelCase_ = None ): __UpperCAmelCase : int = [self.sep_token_id] __UpperCAmelCase : List[str] = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0] def _snake_case ( self , UpperCamelCase_ , UpperCamelCase_=False , **UpperCamelCase_ ): __UpperCAmelCase : List[str] = kwargs.pop("add_prefix_space" , self.add_prefix_space ) if (is_split_into_words or add_prefix_space) and (len(UpperCamelCase_ ) > 0 and not text[0].isspace()): __UpperCAmelCase : Tuple = " " + text return (text, kwargs)
10
'''simple docstring''' def _lowercase ( lowerCamelCase__ , lowerCamelCase__ ) -> List[str]: """simple docstring""" __UpperCAmelCase : Dict = (boundary[1] - boundary[0]) / steps __UpperCAmelCase : Tuple = boundary[0] __UpperCAmelCase : List[str] = boundary[1] __UpperCAmelCase : List[Any] = make_points(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) __UpperCAmelCase : int = 0.0 y += (h / 2.0) * f(lowerCamelCase__ ) for i in x_i: # print(i) y += h * f(lowerCamelCase__ ) y += (h / 2.0) * f(lowerCamelCase__ ) return y def _lowercase ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> List[Any]: """simple docstring""" __UpperCAmelCase : Optional[Any] = a + h while x < (b - h): yield x __UpperCAmelCase : List[str] = x + h def _lowercase ( lowerCamelCase__ ) -> Optional[Any]: # enter your function here """simple docstring""" __UpperCAmelCase : str = (x - 0) * (x - 0) return y def _lowercase ( ) -> int: """simple docstring""" __UpperCAmelCase : Tuple = 0.0 # Lower bound of integration __UpperCAmelCase : Union[str, Any] = 1.0 # Upper bound of integration __UpperCAmelCase : Union[str, Any] = 10.0 # define number of steps or resolution __UpperCAmelCase : Dict = [a, b] # define boundary of integration __UpperCAmelCase : Optional[int] = method_a(lowerCamelCase__ , lowerCamelCase__ ) print(f"""y = {y}""" ) if __name__ == "__main__": main()
10
1