code
stringlengths
87
55.2k
code_codestyle
int64
0
349
style_context
stringlengths
135
49.1k
style_context_codestyle
int64
0
349
label
int64
0
1
import logging import os import sys from dataclasses import dataclass, field from typing import Optional import torch from datasets import load_dataset from torchvision.transforms import Compose, Lambda, Normalize, RandomHorizontalFlip, RandomResizedCrop, ToTensor from torchvision.transforms.functional import InterpolationMode import transformers from transformers import ( HfArgumentParser, Trainer, TrainingArguments, ViTImageProcessor, ViTMAEConfig, ViTMAEForPreTraining, ) from transformers.trainer_utils import get_last_checkpoint from transformers.utils import check_min_version, send_example_telemetry from transformers.utils.versions import require_version lowerCamelCase__ = logging.getLogger(__name__) # Will error if the minimal version of Transformers is not installed. Remove at your own risks. check_min_version("""4.31.0""") require_version("""datasets>=1.8.0""", """To fix: pip install -r examples/pytorch/image-pretraining/requirements.txt""") @dataclass class SCREAMING_SNAKE_CASE : __lowerCamelCase : Any =field( default='cifar10' , metadata={'help': 'Name of a dataset from the datasets package'} ) __lowerCamelCase : Optional[int] =field( default=UpperCAmelCase_ , metadata={'help': 'The configuration name of the dataset to use (via the datasets library).'} ) __lowerCamelCase : Union[str, Any] =field( default=UpperCAmelCase_ , metadata={'help': 'The column name of the images in the files.'} ) __lowerCamelCase : Tuple =field(default=UpperCAmelCase_ , metadata={'help': 'A folder containing the training data.'} ) __lowerCamelCase : int =field(default=UpperCAmelCase_ , metadata={'help': 'A folder containing the validation data.'} ) __lowerCamelCase : Optional[Any] =field( default=0.15 , metadata={'help': 'Percent to split off of train for validation.'} ) __lowerCamelCase : str =field( default=UpperCAmelCase_ , metadata={ 'help': ( 'For debugging purposes or quicker training, truncate the number of training examples to this ' 'value if set.' ) } , ) __lowerCamelCase : Optional[int] =field( default=UpperCAmelCase_ , metadata={ 'help': ( 'For debugging purposes or quicker training, truncate the number of evaluation examples to this ' 'value if set.' ) } , ) def UpperCamelCase_ ( self : Tuple ): '''simple docstring''' __a = {} if self.train_dir is not None: __a = self.train_dir if self.validation_dir is not None: __a = self.validation_dir __a = data_files if data_files else None @dataclass class SCREAMING_SNAKE_CASE : __lowerCamelCase : Union[str, Any] =field( default=UpperCAmelCase_ , metadata={ 'help': ( 'The model checkpoint for weights initialization.Don\'t set if you want to train a model from scratch.' ) } , ) __lowerCamelCase : Union[str, Any] =field( default=UpperCAmelCase_ , metadata={'help': 'Pretrained config name or path if not the same as model_name_or_path'} ) __lowerCamelCase : Dict =field( default=UpperCAmelCase_ , metadata={ 'help': ( 'Override some existing default config settings when a model is trained from scratch. Example: ' 'n_embd=10,resid_pdrop=0.2,scale_attn_weights=false,summary_type=cls_index' ) } , ) __lowerCamelCase : Dict =field( default=UpperCAmelCase_ , metadata={'help': 'Where do you want to store the pretrained models downloaded from s3'} ) __lowerCamelCase : Tuple =field( default='main' , metadata={'help': 'The specific model version to use (can be a branch name, tag name or commit id).'} , ) __lowerCamelCase : Tuple =field(default=UpperCAmelCase_ , metadata={'help': 'Name or path of preprocessor config.'} ) __lowerCamelCase : List[Any] =field( default=UpperCAmelCase_ , metadata={ 'help': ( 'Will use the token generated when running `huggingface-cli login` (necessary to use this script ' 'with private models).' ) } , ) __lowerCamelCase : Optional[Any] =field( default=0.75 , metadata={'help': 'The ratio of the number of masked tokens in the input sequence.'} ) __lowerCamelCase : Union[str, Any] =field( default=UpperCAmelCase_ , metadata={'help': 'Whether or not to train with normalized pixel values as target.'} ) @dataclass class SCREAMING_SNAKE_CASE ( UpperCAmelCase_ ): __lowerCamelCase : Tuple =field( default=1e-3 , metadata={'help': 'Base learning rate: absolute_lr = base_lr * total_batch_size / 256.'} ) def lowerCAmelCase__ ( _SCREAMING_SNAKE_CASE : Dict ): """simple docstring""" __a = torch.stack([example["""pixel_values"""] for example in examples] ) return {"pixel_values": pixel_values} def lowerCAmelCase__ ( ): """simple docstring""" __a = HfArgumentParser((ModelArguments, DataTrainingArguments, CustomTrainingArguments) ) if len(sys.argv ) == 2 and sys.argv[1].endswith(""".json""" ): # If we pass only one argument to the script and it's the path to a json file, # let's parse it to get our arguments. __a , __a , __a = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) ) else: __a , __a , __a = parser.parse_args_into_dataclasses() # Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The # information sent is the one passed as arguments along with your Python/PyTorch versions. send_example_telemetry("""run_mae""" , __A , __A ) # Setup logging logging.basicConfig( format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""" , datefmt="""%m/%d/%Y %H:%M:%S""" , handlers=[logging.StreamHandler(sys.stdout )] , ) if training_args.should_log: # The default of training_args.log_level is passive, so we set log level at info here to have that default. transformers.utils.logging.set_verbosity_info() __a = training_args.get_process_log_level() logger.setLevel(__A ) transformers.utils.logging.set_verbosity(__A ) transformers.utils.logging.enable_default_handler() transformers.utils.logging.enable_explicit_format() # Log on each process the small summary: logger.warning( f"Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}" + f"distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}" ) logger.info(f"Training/evaluation parameters {training_args}" ) # Detecting last checkpoint. __a = None if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir: __a = get_last_checkpoint(training_args.output_dir ) if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0: raise ValueError( f"Output directory ({training_args.output_dir}) already exists and is not empty. " """Use --overwrite_output_dir to overcome.""" ) elif last_checkpoint is not None and training_args.resume_from_checkpoint is None: logger.info( f"Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change " """the `--output_dir` or add `--overwrite_output_dir` to train from scratch.""" ) # Initialize our dataset. __a = load_dataset( data_args.dataset_name , data_args.dataset_config_name , data_files=data_args.data_files , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , ) # If we don't have a validation split, split off a percentage of train as validation. __a = None if """validation""" in ds.keys() else data_args.train_val_split if isinstance(data_args.train_val_split , __A ) and data_args.train_val_split > 0.0: __a = ds["""train"""].train_test_split(data_args.train_val_split ) __a = split["""train"""] __a = split["""test"""] # Load pretrained model and image processor # # Distributed training: # The .from_pretrained methods guarantee that only one local process can concurrently # download model & vocab. __a = { """cache_dir""": model_args.cache_dir, """revision""": model_args.model_revision, """use_auth_token""": True if model_args.use_auth_token else None, } if model_args.config_name: __a = ViTMAEConfig.from_pretrained(model_args.config_name , **__A ) elif model_args.model_name_or_path: __a = ViTMAEConfig.from_pretrained(model_args.model_name_or_path , **__A ) else: __a = ViTMAEConfig() logger.warning("""You are instantiating a new config instance from scratch.""" ) if model_args.config_overrides is not None: logger.info(f"Overriding config: {model_args.config_overrides}" ) config.update_from_string(model_args.config_overrides ) logger.info(f"New config: {config}" ) # adapt config config.update( { """mask_ratio""": model_args.mask_ratio, """norm_pix_loss""": model_args.norm_pix_loss, } ) # create image processor if model_args.image_processor_name: __a = ViTImageProcessor.from_pretrained(model_args.image_processor_name , **__A ) elif model_args.model_name_or_path: __a = ViTImageProcessor.from_pretrained(model_args.model_name_or_path , **__A ) else: __a = ViTImageProcessor() # create model if model_args.model_name_or_path: __a = ViTMAEForPreTraining.from_pretrained( model_args.model_name_or_path , from_tf=bool(""".ckpt""" in model_args.model_name_or_path ) , config=__A , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ) else: logger.info("""Training new model from scratch""" ) __a = ViTMAEForPreTraining(__A ) if training_args.do_train: __a = ds["""train"""].column_names else: __a = ds["""validation"""].column_names if data_args.image_column_name is not None: __a = data_args.image_column_name elif "image" in column_names: __a = """image""" elif "img" in column_names: __a = """img""" else: __a = column_names[0] # transformations as done in original MAE paper # source: https://github.com/facebookresearch/mae/blob/main/main_pretrain.py if "shortest_edge" in image_processor.size: __a = image_processor.size["""shortest_edge"""] else: __a = (image_processor.size["""height"""], image_processor.size["""width"""]) __a = Compose( [ Lambda(lambda _SCREAMING_SNAKE_CASE : img.convert("""RGB""" ) if img.mode != "RGB" else img ), RandomResizedCrop(__A , scale=(0.2, 1.0) , interpolation=InterpolationMode.BICUBIC ), RandomHorizontalFlip(), ToTensor(), Normalize(mean=image_processor.image_mean , std=image_processor.image_std ), ] ) def preprocess_images(_SCREAMING_SNAKE_CASE : Optional[int] ): __a = [transforms(__A ) for image in examples[image_column_name]] return examples if training_args.do_train: if "train" not in ds: raise ValueError("""--do_train requires a train dataset""" ) if data_args.max_train_samples is not None: __a = ds["""train"""].shuffle(seed=training_args.seed ).select(range(data_args.max_train_samples ) ) # Set the training transforms ds["train"].set_transform(__A ) if training_args.do_eval: if "validation" not in ds: raise ValueError("""--do_eval requires a validation dataset""" ) if data_args.max_eval_samples is not None: __a = ( ds["""validation"""].shuffle(seed=training_args.seed ).select(range(data_args.max_eval_samples ) ) ) # Set the validation transforms ds["validation"].set_transform(__A ) # Compute absolute learning rate __a = ( training_args.train_batch_size * training_args.gradient_accumulation_steps * training_args.world_size ) if training_args.base_learning_rate is not None: __a = training_args.base_learning_rate * total_train_batch_size / 256 # Initialize our trainer __a = Trainer( model=__A , args=__A , train_dataset=ds["""train"""] if training_args.do_train else None , eval_dataset=ds["""validation"""] if training_args.do_eval else None , tokenizer=__A , data_collator=__A , ) # Training if training_args.do_train: __a = None if training_args.resume_from_checkpoint is not None: __a = training_args.resume_from_checkpoint elif last_checkpoint is not None: __a = last_checkpoint __a = trainer.train(resume_from_checkpoint=__A ) trainer.save_model() trainer.log_metrics("""train""" , train_result.metrics ) trainer.save_metrics("""train""" , train_result.metrics ) trainer.save_state() # Evaluation if training_args.do_eval: __a = trainer.evaluate() trainer.log_metrics("""eval""" , __A ) trainer.save_metrics("""eval""" , __A ) # Write model card and (optionally) push to hub __a = { """tasks""": """masked-auto-encoding""", """dataset""": data_args.dataset_name, """tags""": ["""masked-auto-encoding"""], } if training_args.push_to_hub: trainer.push_to_hub(**__A ) else: trainer.create_model_card(**__A ) def lowerCAmelCase__ ( _SCREAMING_SNAKE_CASE : str ): """simple docstring""" main() if __name__ == "__main__": main()
302
'''simple docstring''' from collections import OrderedDict from typing import Mapping from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig a__ : Union[str, Any] = { 'albert-base-v1': 'https://huggingface.co/albert-base-v1/resolve/main/config.json', 'albert-large-v1': 'https://huggingface.co/albert-large-v1/resolve/main/config.json', 'albert-xlarge-v1': 'https://huggingface.co/albert-xlarge-v1/resolve/main/config.json', 'albert-xxlarge-v1': 'https://huggingface.co/albert-xxlarge-v1/resolve/main/config.json', 'albert-base-v2': 'https://huggingface.co/albert-base-v2/resolve/main/config.json', 'albert-large-v2': 'https://huggingface.co/albert-large-v2/resolve/main/config.json', 'albert-xlarge-v2': 'https://huggingface.co/albert-xlarge-v2/resolve/main/config.json', 'albert-xxlarge-v2': 'https://huggingface.co/albert-xxlarge-v2/resolve/main/config.json', } class UpperCAmelCase__ ( UpperCAmelCase_): __SCREAMING_SNAKE_CASE = '''albert''' def __init__( self , lowercase=3_0_0_0_0 , lowercase=1_2_8 , lowercase=4_0_9_6 , lowercase=1_2 , lowercase=1 , lowercase=6_4 , lowercase=1_6_3_8_4 , lowercase=1 , lowercase="gelu_new" , lowercase=0 , lowercase=0 , lowercase=5_1_2 , lowercase=2 , lowercase=0.02 , lowercase=1E-12 , lowercase=0.1 , lowercase="absolute" , lowercase=0 , lowercase=2 , lowercase=3 , **lowercase , ) -> Any: super().__init__(pad_token_id=lowercase , bos_token_id=lowercase , eos_token_id=lowercase , **lowercase ) __UpperCamelCase = vocab_size __UpperCamelCase = embedding_size __UpperCamelCase = hidden_size __UpperCamelCase = num_hidden_layers __UpperCamelCase = num_hidden_groups __UpperCamelCase = num_attention_heads __UpperCamelCase = inner_group_num __UpperCamelCase = hidden_act __UpperCamelCase = intermediate_size __UpperCamelCase = hidden_dropout_prob __UpperCamelCase = attention_probs_dropout_prob __UpperCamelCase = max_position_embeddings __UpperCamelCase = type_vocab_size __UpperCamelCase = initializer_range __UpperCamelCase = layer_norm_eps __UpperCamelCase = classifier_dropout_prob __UpperCamelCase = position_embedding_type class UpperCAmelCase__ ( UpperCAmelCase_): @property def __lowerCamelCase ( self ) -> Mapping[str, Mapping[int, str]]: if self.task == "multiple-choice": __UpperCamelCase = {0: """batch""", 1: """choice""", 2: """sequence"""} else: __UpperCamelCase = {0: """batch""", 1: """sequence"""} return OrderedDict( [ ("""input_ids""", dynamic_axis), ("""attention_mask""", dynamic_axis), ("""token_type_ids""", dynamic_axis), ] )
349
0
"""simple docstring""" import unittest from transformers import XLMConfig, is_torch_available from transformers.testing_utils import require_torch, slow, torch_device from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( XLMForMultipleChoice, XLMForQuestionAnswering, XLMForQuestionAnsweringSimple, XLMForSequenceClassification, XLMForTokenClassification, XLMModel, XLMWithLMHeadModel, ) from transformers.models.xlm.modeling_xlm import XLM_PRETRAINED_MODEL_ARCHIVE_LIST class __lowerCamelCase : '''simple docstring''' def __init__( self : Optional[Any] , a_ : List[str] , a_ : List[Any]=13 , a_ : List[Any]=7 , a_ : Any=True , a_ : str=True , a_ : Optional[int]=True , a_ : str=True , a_ : Any=True , a_ : Any=False , a_ : str=False , a_ : str=False , a_ : int=2 , a_ : Optional[Any]=99 , a_ : Optional[Any]=0 , a_ : Optional[int]=32 , a_ : int=5 , a_ : int=4 , a_ : Tuple=0.1 , a_ : List[Any]=0.1 , a_ : List[str]=5_12 , a_ : Optional[Any]=2 , a_ : Tuple=0.02 , a_ : Dict=2 , a_ : Any=4 , a_ : str="last" , a_ : Optional[Any]=True , a_ : Tuple=None , a_ : List[str]=0 , ): lowerCAmelCase_ : List[str] = parent lowerCAmelCase_ : Union[str, Any] = batch_size lowerCAmelCase_ : str = seq_length lowerCAmelCase_ : Union[str, Any] = is_training lowerCAmelCase_ : Tuple = use_input_lengths lowerCAmelCase_ : Optional[Any] = use_token_type_ids lowerCAmelCase_ : Union[str, Any] = use_labels lowerCAmelCase_ : List[str] = gelu_activation lowerCAmelCase_ : List[str] = sinusoidal_embeddings lowerCAmelCase_ : Dict = causal lowerCAmelCase_ : Optional[int] = asm lowerCAmelCase_ : Any = n_langs lowerCAmelCase_ : Optional[int] = vocab_size lowerCAmelCase_ : Dict = n_special lowerCAmelCase_ : List[str] = hidden_size lowerCAmelCase_ : Dict = num_hidden_layers lowerCAmelCase_ : str = num_attention_heads lowerCAmelCase_ : Any = hidden_dropout_prob lowerCAmelCase_ : List[Any] = attention_probs_dropout_prob lowerCAmelCase_ : Tuple = max_position_embeddings lowerCAmelCase_ : Dict = type_sequence_label_size lowerCAmelCase_ : List[str] = initializer_range lowerCAmelCase_ : Optional[int] = num_labels lowerCAmelCase_ : str = num_choices lowerCAmelCase_ : Optional[int] = summary_type lowerCAmelCase_ : Dict = use_proj lowerCAmelCase_ : str = scope lowerCAmelCase_ : Dict = bos_token_id def lowerCamelCase ( self : Tuple ): lowerCAmelCase_ : str = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) lowerCAmelCase_ : int = random_attention_mask([self.batch_size, self.seq_length] ) lowerCAmelCase_ : int = None if self.use_input_lengths: lowerCAmelCase_ : Any = ( ids_tensor([self.batch_size] , vocab_size=2 ) + self.seq_length - 2 ) # small variation of seq_length lowerCAmelCase_ : List[str] = None if self.use_token_type_ids: lowerCAmelCase_ : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.n_langs ) lowerCAmelCase_ : str = None lowerCAmelCase_ : List[Any] = None lowerCAmelCase_ : Optional[int] = None if self.use_labels: lowerCAmelCase_ : str = ids_tensor([self.batch_size] , self.type_sequence_label_size ) lowerCAmelCase_ : Dict = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) lowerCAmelCase_ : Optional[Any] = ids_tensor([self.batch_size] , 2 ).float() lowerCAmelCase_ : Tuple = ids_tensor([self.batch_size] , self.num_choices ) lowerCAmelCase_ : Dict = self.get_config() return ( config, input_ids, token_type_ids, input_lengths, sequence_labels, token_labels, is_impossible_labels, choice_labels, input_mask, ) def lowerCamelCase ( self : Union[str, Any] ): return XLMConfig( vocab_size=self.vocab_size , n_special=self.n_special , emb_dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , gelu_activation=self.gelu_activation , sinusoidal_embeddings=self.sinusoidal_embeddings , asm=self.asm , causal=self.causal , n_langs=self.n_langs , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , summary_type=self.summary_type , use_proj=self.use_proj , num_labels=self.num_labels , bos_token_id=self.bos_token_id , ) def lowerCamelCase ( self : Any , a_ : Optional[int] , a_ : Union[str, Any] , a_ : str , a_ : Dict , a_ : List[Any] , a_ : Optional[Any] , a_ : Any , a_ : Any , a_ : List[str] , ): lowerCAmelCase_ : Any = XLMModel(config=a_ ) model.to(a_ ) model.eval() lowerCAmelCase_ : Any = model(a_ , lengths=a_ , langs=a_ ) lowerCAmelCase_ : Tuple = model(a_ , langs=a_ ) lowerCAmelCase_ : Optional[Any] = model(a_ ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def lowerCamelCase ( self : Dict , a_ : Tuple , a_ : int , a_ : List[Any] , a_ : Optional[Any] , a_ : List[str] , a_ : Optional[Any] , a_ : Optional[Any] , a_ : List[Any] , a_ : Optional[Any] , ): lowerCAmelCase_ : Any = XLMWithLMHeadModel(a_ ) model.to(a_ ) model.eval() lowerCAmelCase_ : str = model(a_ , token_type_ids=a_ , labels=a_ ) self.parent.assertEqual(result.loss.shape , () ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def lowerCamelCase ( self : str , a_ : List[str] , a_ : List[Any] , a_ : str , a_ : Tuple , a_ : Union[str, Any] , a_ : Optional[Any] , a_ : int , a_ : List[str] , a_ : List[Any] , ): lowerCAmelCase_ : int = XLMForQuestionAnsweringSimple(a_ ) model.to(a_ ) model.eval() lowerCAmelCase_ : Any = model(a_ ) lowerCAmelCase_ : Any = model(a_ , start_positions=a_ , end_positions=a_ ) lowerCAmelCase_ : Optional[Any] = outputs self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def lowerCamelCase ( self : Tuple , a_ : Any , a_ : str , a_ : Optional[int] , a_ : Dict , a_ : Any , a_ : int , a_ : List[str] , a_ : int , a_ : List[str] , ): lowerCAmelCase_ : int = XLMForQuestionAnswering(a_ ) model.to(a_ ) model.eval() lowerCAmelCase_ : int = model(a_ ) lowerCAmelCase_ : List[Any] = model( a_ , start_positions=a_ , end_positions=a_ , cls_index=a_ , is_impossible=a_ , p_mask=a_ , ) lowerCAmelCase_ : List[str] = model( a_ , start_positions=a_ , end_positions=a_ , cls_index=a_ , is_impossible=a_ , ) ((lowerCAmelCase_ ) , ) : Optional[Any] = result_with_labels.to_tuple() lowerCAmelCase_ : Optional[int] = model(a_ , start_positions=a_ , end_positions=a_ ) ((lowerCAmelCase_ ) , ) : Tuple = result_with_labels.to_tuple() self.parent.assertEqual(result_with_labels.loss.shape , () ) self.parent.assertEqual(result.start_top_log_probs.shape , (self.batch_size, model.config.start_n_top) ) self.parent.assertEqual(result.start_top_index.shape , (self.batch_size, model.config.start_n_top) ) self.parent.assertEqual( result.end_top_log_probs.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top) ) self.parent.assertEqual( result.end_top_index.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top) ) self.parent.assertEqual(result.cls_logits.shape , (self.batch_size,) ) def lowerCamelCase ( self : str , a_ : Union[str, Any] , a_ : Any , a_ : int , a_ : int , a_ : Union[str, Any] , a_ : str , a_ : str , a_ : int , a_ : Optional[Any] , ): lowerCAmelCase_ : int = XLMForSequenceClassification(a_ ) model.to(a_ ) model.eval() lowerCAmelCase_ : List[str] = model(a_ ) lowerCAmelCase_ : Any = model(a_ , labels=a_ ) self.parent.assertEqual(result.loss.shape , () ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) ) def lowerCamelCase ( self : Optional[Any] , a_ : Tuple , a_ : Optional[Any] , a_ : int , a_ : Optional[Any] , a_ : str , a_ : Union[str, Any] , a_ : str , a_ : Any , a_ : Optional[Any] , ): lowerCAmelCase_ : Dict = self.num_labels lowerCAmelCase_ : Optional[Any] = XLMForTokenClassification(a_ ) model.to(a_ ) model.eval() lowerCAmelCase_ : Any = model(a_ , attention_mask=a_ , labels=a_ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def lowerCamelCase ( self : List[Any] , a_ : Dict , a_ : Any , a_ : List[str] , a_ : Tuple , a_ : Any , a_ : int , a_ : Optional[Any] , a_ : str , a_ : Dict , ): lowerCAmelCase_ : List[Any] = self.num_choices lowerCAmelCase_ : Union[str, Any] = XLMForMultipleChoice(config=a_ ) model.to(a_ ) model.eval() lowerCAmelCase_ : Tuple = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() lowerCAmelCase_ : Optional[Any] = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() lowerCAmelCase_ : Union[str, Any] = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() lowerCAmelCase_ : Any = model( a_ , attention_mask=a_ , token_type_ids=a_ , labels=a_ , ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) ) def lowerCamelCase ( self : Dict ): lowerCAmelCase_ : Dict = self.prepare_config_and_inputs() ( ( lowerCAmelCase_ ) , ( lowerCAmelCase_ ) , ( lowerCAmelCase_ ) , ( lowerCAmelCase_ ) , ( lowerCAmelCase_ ) , ( lowerCAmelCase_ ) , ( lowerCAmelCase_ ) , ( lowerCAmelCase_ ) , ( lowerCAmelCase_ ) , ) : Any = config_and_inputs lowerCAmelCase_ : Union[str, Any] = {"input_ids": input_ids, "token_type_ids": token_type_ids, "lengths": input_lengths} return config, inputs_dict @require_torch class __lowerCamelCase ( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , unittest.TestCase ): '''simple docstring''' a_ : Tuple = ( ( XLMModel, XLMWithLMHeadModel, XLMForQuestionAnswering, XLMForSequenceClassification, XLMForQuestionAnsweringSimple, XLMForTokenClassification, XLMForMultipleChoice, ) if is_torch_available() else () ) a_ : Any = ( (XLMWithLMHeadModel,) if is_torch_available() else () ) # TODO (PVP): Check other models whether language generation is also applicable a_ : Optional[int] = ( { """feature-extraction""": XLMModel, """fill-mask""": XLMWithLMHeadModel, """question-answering""": XLMForQuestionAnsweringSimple, """text-classification""": XLMForSequenceClassification, """text-generation""": XLMWithLMHeadModel, """token-classification""": XLMForTokenClassification, """zero-shot""": XLMForSequenceClassification, } if is_torch_available() else {} ) def lowerCamelCase ( self : Dict , a_ : int , a_ : int , a_ : int , a_ : Tuple , a_ : List[Any] ): if ( pipeline_test_casse_name == "QAPipelineTests" and tokenizer_name is not None and not tokenizer_name.endswith("Fast" ) ): # `QAPipelineTests` fails for a few models when the slower tokenizer are used. # (The slower tokenizers were never used for pipeline tests before the pipeline testing rework) # TODO: check (and possibly fix) the `QAPipelineTests` with slower tokenizer return True return False def lowerCamelCase ( self : str , a_ : Optional[int] , a_ : Optional[Any] , a_ : int=False ): lowerCAmelCase_ : List[Any] = super()._prepare_for_class(a_ , a_ , return_labels=a_ ) if return_labels: if model_class.__name__ == "XLMForQuestionAnswering": lowerCAmelCase_ : Union[str, Any] = torch.zeros( self.model_tester.batch_size , dtype=torch.long , device=a_ ) lowerCAmelCase_ : Optional[Any] = torch.zeros( self.model_tester.batch_size , dtype=torch.long , device=a_ ) return inputs_dict def lowerCamelCase ( self : List[Any] ): lowerCAmelCase_ : Any = XLMModelTester(self ) lowerCAmelCase_ : Dict = ConfigTester(self , config_class=a_ , emb_dim=37 ) def lowerCamelCase ( self : int ): self.config_tester.run_common_tests() def lowerCamelCase ( self : Tuple ): lowerCAmelCase_ : str = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_xlm_model(*a_ ) def lowerCamelCase ( self : str ): lowerCAmelCase_ : Optional[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_xlm_lm_head(*a_ ) def lowerCamelCase ( self : int ): lowerCAmelCase_ : Union[str, Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_xlm_simple_qa(*a_ ) def lowerCamelCase ( self : Union[str, Any] ): lowerCAmelCase_ : Dict = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_xlm_qa(*a_ ) def lowerCamelCase ( self : Union[str, Any] ): lowerCAmelCase_ : str = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_xlm_sequence_classif(*a_ ) def lowerCamelCase ( self : List[Any] ): lowerCAmelCase_ : int = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_xlm_token_classif(*a_ ) def lowerCamelCase ( self : List[Any] ): lowerCAmelCase_ : List[str] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_xlm_for_multiple_choice(*a_ ) def lowerCamelCase ( self : Any , a_ : Any , a_ : Dict , a_ : Any , a_ : str , a_ : str , a_ : Optional[Any]=False , a_ : Optional[Any]=1 ): self.assertIsInstance(a_ , a_ ) self.assertListEqual( [isinstance(a_ , a_ ) for iter_attentions in attentions] , [True] * len(a_ ) ) self.assertEqual(len(a_ ) , (max_length - min_length) * num_beam_groups ) for idx, iter_attentions in enumerate(a_ ): # adds PAD dummy token lowerCAmelCase_ : Optional[int] = min_length + idx + 1 lowerCAmelCase_ : int = min_length + idx + 1 lowerCAmelCase_ : List[Any] = ( batch_size * num_beam_groups, config.num_attention_heads, tgt_len, src_len, ) # check attn size self.assertListEqual( [layer_attention.shape for layer_attention in iter_attentions] , [expected_shape] * len(a_ ) ) def lowerCamelCase ( self : int , a_ : Union[str, Any] , a_ : Any , a_ : Union[str, Any] , a_ : str , a_ : Optional[int] , a_ : Dict=False , a_ : Optional[int]=1 ): self.assertIsInstance(a_ , a_ ) self.assertListEqual( [isinstance(a_ , a_ ) for iter_hidden_states in hidden_states] , [True] * len(a_ ) , ) self.assertEqual(len(a_ ) , (max_length - min_length) * num_beam_groups ) for idx, iter_hidden_states in enumerate(a_ ): # adds PAD dummy token lowerCAmelCase_ : int = min_length + idx + 1 lowerCAmelCase_ : List[str] = (batch_size * num_beam_groups, seq_len, config.hidden_size) # check hidden size self.assertListEqual( [layer_hidden_states.shape for layer_hidden_states in iter_hidden_states] , [expected_shape] * len(a_ ) , ) pass @slow def lowerCamelCase ( self : Union[str, Any] ): for model_name in XLM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: lowerCAmelCase_ : int = XLMModel.from_pretrained(a_ ) self.assertIsNotNone(a_ ) @require_torch class __lowerCamelCase ( unittest.TestCase ): '''simple docstring''' @slow def lowerCamelCase ( self : Tuple ): lowerCAmelCase_ : Dict = XLMWithLMHeadModel.from_pretrained("xlm-mlm-en-2048" ) model.to(a_ ) lowerCAmelCase_ : str = torch.tensor([[14, 4_47]] , dtype=torch.long , device=a_ ) # the president lowerCAmelCase_ : List[Any] = [ 14, 4_47, 14, 4_47, 14, 4_47, 14, 4_47, 14, 4_47, 14, 4_47, 14, 4_47, 14, 4_47, 14, 4_47, 14, 4_47, ] # the president the president the president the president the president the president the president the president the president the president # TODO(PVP): this and other input_ids I tried for generation give pretty bad results. Not sure why. Model might just not be made for auto-regressive inference lowerCAmelCase_ : str = model.generate(a_ , do_sample=a_ ) self.assertListEqual(output_ids[0].cpu().numpy().tolist() , a_ )
241
'''simple docstring''' import numpy as np from sklearn.datasets import fetch_california_housing from sklearn.metrics import mean_absolute_error, mean_squared_error from sklearn.model_selection import train_test_split from xgboost import XGBRegressor def _lowercase ( __A ): '''simple docstring''' return (data["data"], data["target"]) def _lowercase ( __A ,__A ,__A ): '''simple docstring''' __UpperCamelCase = XGBRegressor(verbosity=0 ,random_state=42 ) xgb.fit(__A ,__A ) # Predict target for test data __UpperCamelCase = xgb.predict(__A ) __UpperCamelCase = predictions.reshape(len(__A ) ,1 ) return predictions def _lowercase ( ): '''simple docstring''' __UpperCamelCase = fetch_california_housing() __UpperCamelCase , __UpperCamelCase = data_handling(__A ) __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = train_test_split( __A ,__A ,test_size=0.25 ,random_state=1 ) __UpperCamelCase = xgboost(__A ,__A ,__A ) # Error printing print(f"Mean Absolute Error : {mean_absolute_error(__A ,__A )}" ) print(f"Mean Square Error : {mean_squared_error(__A ,__A )}" ) if __name__ == "__main__": import doctest doctest.testmod(verbose=True) main()
349
0
from __future__ import annotations def lowerCAmelCase__( lowercase : Dict , lowercase : Union[str, Any] , lowercase : Optional[int] ) -> Dict: if (voltage, current, resistance).count(0 ) != 1: raise ValueError("One and only one argument must be 0" ) if resistance < 0: raise ValueError("Resistance cannot be negative" ) if voltage == 0: return {"voltage": float(current * resistance )} elif current == 0: return {"current": voltage / resistance} elif resistance == 0: return {"resistance": voltage / current} else: raise ValueError("Exactly one argument must be 0" ) if __name__ == "__main__": import doctest doctest.testmod()
326
'''simple docstring''' from __future__ import annotations import unittest from transformers import AutoTokenizer, PegasusConfig, is_tf_available from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow from transformers.utils import cached_property from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import TFAutoModelForSeqaSeqLM, TFPegasusForConditionalGeneration, TFPegasusModel @require_tf class UpperCAmelCase__ : __SCREAMING_SNAKE_CASE = PegasusConfig __SCREAMING_SNAKE_CASE = {} __SCREAMING_SNAKE_CASE = '''gelu''' def __init__( self , lowercase , lowercase=1_3 , lowercase=7 , lowercase=True , lowercase=False , lowercase=9_9 , lowercase=3_2 , lowercase=2 , lowercase=4 , lowercase=3_7 , lowercase=0.1 , lowercase=0.1 , lowercase=4_0 , lowercase=2 , lowercase=1 , lowercase=0 , ) -> Any: __UpperCamelCase = parent __UpperCamelCase = batch_size __UpperCamelCase = seq_length __UpperCamelCase = is_training __UpperCamelCase = use_labels __UpperCamelCase = vocab_size __UpperCamelCase = hidden_size __UpperCamelCase = num_hidden_layers __UpperCamelCase = num_attention_heads __UpperCamelCase = intermediate_size __UpperCamelCase = hidden_dropout_prob __UpperCamelCase = attention_probs_dropout_prob __UpperCamelCase = max_position_embeddings __UpperCamelCase = eos_token_id __UpperCamelCase = pad_token_id __UpperCamelCase = bos_token_id def __lowerCamelCase ( self ) -> Dict: __UpperCamelCase = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size ) __UpperCamelCase = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 ) __UpperCamelCase = tf.concat([input_ids, eos_tensor] , axis=1 ) __UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) __UpperCamelCase = self.config_cls( vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , ) __UpperCamelCase = prepare_pegasus_inputs_dict(lowercase , lowercase , lowercase ) return config, inputs_dict def __lowerCamelCase ( self , lowercase , lowercase ) -> Union[str, Any]: __UpperCamelCase = TFPegasusModel(config=lowercase ).get_decoder() __UpperCamelCase = inputs_dict["""input_ids"""] __UpperCamelCase = input_ids[:1, :] __UpperCamelCase = inputs_dict["""attention_mask"""][:1, :] __UpperCamelCase = inputs_dict["""head_mask"""] __UpperCamelCase = 1 # first forward pass __UpperCamelCase = model(lowercase , attention_mask=lowercase , head_mask=lowercase , use_cache=lowercase ) __UpperCamelCase , __UpperCamelCase = outputs.to_tuple() # create hypothetical next token and extent to next_input_ids __UpperCamelCase = ids_tensor((self.batch_size, 3) , config.vocab_size ) __UpperCamelCase = tf.cast(ids_tensor((self.batch_size, 3) , 2 ) , tf.inta ) # append to next input_ids and __UpperCamelCase = tf.concat([input_ids, next_tokens] , axis=-1 ) __UpperCamelCase = tf.concat([attention_mask, next_attn_mask] , axis=-1 ) __UpperCamelCase = model(lowercase , attention_mask=lowercase )[0] __UpperCamelCase = model(lowercase , attention_mask=lowercase , past_key_values=lowercase )[0] self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1] ) # select random slice __UpperCamelCase = int(ids_tensor((1,) , output_from_past.shape[-1] ) ) __UpperCamelCase = output_from_no_past[:, -3:, random_slice_idx] __UpperCamelCase = output_from_past[:, :, random_slice_idx] # test that outputs are equal for slice tf.debugging.assert_near(lowercase , lowercase , rtol=1E-3 ) def _lowercase ( __A ,__A ,__A ,__A=None ,__A=None ,__A=None ,__A=None ,__A=None ,): '''simple docstring''' if attention_mask is None: __UpperCamelCase = tf.cast(tf.math.not_equal(__A ,config.pad_token_id ) ,tf.inta ) if decoder_attention_mask is None: __UpperCamelCase = tf.concat( [ tf.ones(decoder_input_ids[:, :1].shape ,dtype=tf.inta ), tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] ,config.pad_token_id ) ,tf.inta ), ] ,axis=-1 ,) if head_mask is None: __UpperCamelCase = tf.ones((config.encoder_layers, config.encoder_attention_heads) ) if decoder_head_mask is None: __UpperCamelCase = tf.ones((config.decoder_layers, config.decoder_attention_heads) ) if cross_attn_head_mask is None: __UpperCamelCase = tf.ones((config.decoder_layers, config.decoder_attention_heads) ) return { "input_ids": input_ids, "decoder_input_ids": decoder_input_ids, "attention_mask": attention_mask, "decoder_attention_mask": decoder_attention_mask, "head_mask": head_mask, "decoder_head_mask": decoder_head_mask, "cross_attn_head_mask": cross_attn_head_mask, } @require_tf class UpperCAmelCase__ ( UpperCAmelCase_ , UpperCAmelCase_ , unittest.TestCase): __SCREAMING_SNAKE_CASE = (TFPegasusForConditionalGeneration, TFPegasusModel) if is_tf_available() else () __SCREAMING_SNAKE_CASE = (TFPegasusForConditionalGeneration,) if is_tf_available() else () __SCREAMING_SNAKE_CASE = ( { '''conversational''': TFPegasusForConditionalGeneration, '''feature-extraction''': TFPegasusModel, '''summarization''': TFPegasusForConditionalGeneration, '''text2text-generation''': TFPegasusForConditionalGeneration, '''translation''': TFPegasusForConditionalGeneration, } if is_tf_available() else {} ) __SCREAMING_SNAKE_CASE = True __SCREAMING_SNAKE_CASE = False __SCREAMING_SNAKE_CASE = False def __lowerCamelCase ( self ) -> str: __UpperCamelCase = TFPegasusModelTester(self ) __UpperCamelCase = ConfigTester(self , config_class=lowercase ) def __lowerCamelCase ( self ) -> str: self.config_tester.run_common_tests() def __lowerCamelCase ( self ) -> Tuple: __UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common() self.model_tester.check_decoder_model_past_large_inputs(*lowercase ) @require_sentencepiece @require_tokenizers @require_tf class UpperCAmelCase__ ( unittest.TestCase): __SCREAMING_SNAKE_CASE = [ ''' PG&E stated it scheduled the blackouts in response to forecasts for high winds amid dry conditions. The aim is to reduce the risk of wildfires. Nearly 800 thousand customers were scheduled to be affected by the shutoffs which were expected to last through at least midday tomorrow.''', ''' The London trio are up for best UK act and best album, as well as getting two nominations in the best song category."We got told like this morning \'Oh I think you\'re nominated\'", said Dappy."And I was like \'Oh yeah, which one?\' And now we\'ve got nominated for four awards. I mean, wow!"Bandmate Fazer added: "We thought it\'s best of us to come down and mingle with everyone and say hello to the cameras. And now we find we\'ve got four nominations."The band have two shots at the best song prize, getting the nod for their Tynchy Stryder collaboration Number One, and single Strong Again.Their album Uncle B will also go up against records by the likes of Beyonce and Kanye West.N-Dubz picked up the best newcomer Mobo in 2007, but female member Tulisa said they wouldn\'t be too disappointed if they didn\'t win this time around."At the end of the day we\'re grateful to be where we are in our careers."If it don\'t happen then it don\'t happen - live to fight another day and keep on making albums and hits for the fans."Dappy also revealed they could be performing live several times on the night.The group will be doing Number One and also a possible rendition of the War Child single, I Got Soul.The charity song is a re-working of The Killers\' All These Things That I\'ve Done and is set to feature artists like Chipmunk, Ironik and Pixie Lott.This year\'s Mobos will be held outside of London for the first time, in Glasgow on 30 September.N-Dubz said they were looking forward to performing for their Scottish fans and boasted about their recent shows north of the border."We just done Edinburgh the other day," said Dappy."We smashed up an N-Dubz show over there. We done Aberdeen about three or four months ago - we smashed up that show over there! Everywhere we go we smash it up!" ''', ] __SCREAMING_SNAKE_CASE = [ '''California\'s largest electricity provider has cut power to hundreds of thousands of customers in an effort to''' ''' reduce the risk of wildfires.''', '''N-Dubz have revealed they\'re "grateful" to have been nominated for four Mobo Awards.''', ] # differs slightly from pytorch, likely due to numerical differences in linear layers __SCREAMING_SNAKE_CASE = '''google/pegasus-xsum''' @cached_property def __lowerCamelCase ( self ) -> int: return AutoTokenizer.from_pretrained(self.model_name ) @cached_property def __lowerCamelCase ( self ) -> str: __UpperCamelCase = TFAutoModelForSeqaSeqLM.from_pretrained(self.model_name ) return model def __lowerCamelCase ( self , **lowercase ) -> Optional[int]: __UpperCamelCase = self.translate_src_text(**lowercase ) assert self.expected_text == generated_words def __lowerCamelCase ( self , **lowercase ) -> Optional[Any]: __UpperCamelCase = self.tokenizer(self.src_text , **lowercase , padding=lowercase , return_tensors="""tf""" ) __UpperCamelCase = self.model.generate( model_inputs.input_ids , attention_mask=model_inputs.attention_mask , num_beams=2 , use_cache=lowercase , ) __UpperCamelCase = self.tokenizer.batch_decode(generated_ids.numpy() , skip_special_tokens=lowercase ) return generated_words @slow def __lowerCamelCase ( self ) -> Dict: self._assert_generated_batch_equal_expected()
349
0
"""simple docstring""" from pickle import UnpicklingError import jax import jax.numpy as jnp import numpy as np from flax.serialization import from_bytes from flax.traverse_util import flatten_dict from ..utils import logging a = logging.get_logger(__name__) def lowercase (snake_case__ : str , snake_case__ : Dict ) -> Dict: '''simple docstring''' try: with open(__A , """rb""" ) as flax_state_f: lowerCAmelCase = from_bytes(__A , flax_state_f.read() ) except UnpicklingError as e: try: with open(__A ) as f: if f.read().startswith("""version""" ): raise OSError( """You seem to have cloned a repository without having git-lfs installed. Please""" """ install git-lfs and run `git lfs install` followed by `git lfs pull` in the""" """ folder you cloned.""" ) else: raise ValueError from e except (UnicodeDecodeError, ValueError): raise EnvironmentError(f'''Unable to convert {model_file} to Flax deserializable object. ''' ) return load_flax_weights_in_pytorch_model(__A , __A ) def lowercase (snake_case__ : Optional[Any] , snake_case__ : Optional[Any] ) -> Optional[Any]: '''simple docstring''' try: import torch # noqa: F401 except ImportError: logger.error( """Loading Flax weights in PyTorch requires both PyTorch and Flax to be installed. Please see""" """ https://pytorch.org/ and https://flax.readthedocs.io/en/latest/installation.html for installation""" """ instructions.""" ) raise # check if we have bf16 weights lowerCAmelCase = flatten_dict(jax.tree_util.tree_map(lambda snake_case__ : x.dtype == jnp.bfloataa , __A ) ).values() if any(__A ): # convert all weights to fp32 if they are bf16 since torch.from_numpy can-not handle bf16 # and bf16 is not fully supported in PT yet. logger.warning( """Found ``bfloat16`` weights in Flax model. Casting all ``bfloat16`` weights to ``float32`` """ """before loading those in PyTorch model.""" ) lowerCAmelCase = jax.tree_util.tree_map( lambda snake_case__ : params.astype(np.floataa ) if params.dtype == jnp.bfloataa else params , __A ) lowerCAmelCase = """""" lowerCAmelCase = flatten_dict(__A , sep=""".""" ) lowerCAmelCase = pt_model.state_dict() # keep track of unexpected & missing keys lowerCAmelCase = [] lowerCAmelCase = set(pt_model_dict.keys() ) for flax_key_tuple, flax_tensor in flax_state_dict.items(): lowerCAmelCase = flax_key_tuple.split(""".""" ) if flax_key_tuple_array[-1] == "kernel" and flax_tensor.ndim == 4: lowerCAmelCase = flax_key_tuple_array[:-1] + ["""weight"""] lowerCAmelCase = jnp.transpose(__A , (3, 2, 0, 1) ) elif flax_key_tuple_array[-1] == "kernel": lowerCAmelCase = flax_key_tuple_array[:-1] + ["""weight"""] lowerCAmelCase = flax_tensor.T elif flax_key_tuple_array[-1] == "scale": lowerCAmelCase = flax_key_tuple_array[:-1] + ["""weight"""] if "time_embedding" not in flax_key_tuple_array: for i, flax_key_tuple_string in enumerate(__A ): lowerCAmelCase = ( flax_key_tuple_string.replace("""_0""" , """.0""" ) .replace("""_1""" , """.1""" ) .replace("""_2""" , """.2""" ) .replace("""_3""" , """.3""" ) .replace("""_4""" , """.4""" ) .replace("""_5""" , """.5""" ) .replace("""_6""" , """.6""" ) .replace("""_7""" , """.7""" ) .replace("""_8""" , """.8""" ) .replace("""_9""" , """.9""" ) ) lowerCAmelCase = """.""".join(__A ) if flax_key in pt_model_dict: if flax_tensor.shape != pt_model_dict[flax_key].shape: raise ValueError( f'''Flax checkpoint seems to be incorrect. Weight {flax_key_tuple} was expected ''' f'''to be of shape {pt_model_dict[flax_key].shape}, but is {flax_tensor.shape}.''' ) else: # add weight to pytorch dict lowerCAmelCase = np.asarray(__A ) if not isinstance(__A , np.ndarray ) else flax_tensor lowerCAmelCase = torch.from_numpy(__A ) # remove from missing keys missing_keys.remove(__A ) else: # weight is not expected by PyTorch model unexpected_keys.append(__A ) pt_model.load_state_dict(__A ) # re-transform missing_keys to list lowerCAmelCase = list(__A ) if len(__A ) > 0: logger.warning( """Some weights of the Flax model were not used when initializing the PyTorch model""" f''' {pt_model.__class__.__name__}: {unexpected_keys}\n- This IS expected if you are initializing''' f''' {pt_model.__class__.__name__} from a Flax model trained on another task or with another architecture''' """ (e.g. initializing a BertForSequenceClassification model from a FlaxBertForPreTraining model).\n- This""" f''' IS NOT expected if you are initializing {pt_model.__class__.__name__} from a Flax model that you expect''' """ to be exactly identical (e.g. initializing a BertForSequenceClassification model from a""" """ FlaxBertForSequenceClassification model).""" ) if len(__A ) > 0: logger.warning( f'''Some weights of {pt_model.__class__.__name__} were not initialized from the Flax model and are newly''' f''' initialized: {missing_keys}\nYou should probably TRAIN this model on a down-stream task to be able to''' """ use it for predictions and inference.""" ) return pt_model
155
'''simple docstring''' import string def _lowercase ( __A ): '''simple docstring''' for key in range(len(string.ascii_uppercase ) ): __UpperCamelCase = """""" for symbol in message: if symbol in string.ascii_uppercase: __UpperCamelCase = string.ascii_uppercase.find(__A ) __UpperCamelCase = num - key if num < 0: __UpperCamelCase = num + len(string.ascii_uppercase ) __UpperCamelCase = translated + string.ascii_uppercase[num] else: __UpperCamelCase = translated + symbol print(f"Decryption using Key #{key}: {translated}" ) def _lowercase ( ): '''simple docstring''' __UpperCamelCase = input("""Encrypted message: """ ) __UpperCamelCase = message.upper() decrypt(__A ) if __name__ == "__main__": import doctest doctest.testmod() main()
349
0
"""simple docstring""" import copy import re class UpperCamelCase_ : __magic_name__ = '''hp''' __magic_name__ = {} __magic_name__ = None @classmethod def _SCREAMING_SNAKE_CASE ( cls : Any , lowerCAmelCase_ : Dict , lowerCAmelCase_ : Optional[int] ) -> Dict: UpperCAmelCase_ : List[str] = prefix UpperCAmelCase_ : Any = defaults cls.build_naming_info() @staticmethod def _SCREAMING_SNAKE_CASE ( lowerCAmelCase_ : List[str] , lowerCAmelCase_ : List[Any] ) -> Optional[Any]: if len(lowerCAmelCase_ ) == 0: return "" UpperCAmelCase_ : Union[str, Any] = None if any(char.isdigit() for char in word ): raise Exception(f"""Parameters should not contain numbers: '{word}' contains a number""" ) if word in info["short_word"]: return info["short_word"][word] for prefix_len in range(1 , len(lowerCAmelCase_ ) + 1 ): UpperCAmelCase_ : Optional[Any] = word[:prefix_len] if prefix in info["reverse_short_word"]: continue else: UpperCAmelCase_ : int = prefix break if short_word is None: # Paranoid fallback def int_to_alphabetic(lowerCAmelCase_ : int ): UpperCAmelCase_ : List[Any] = "" while integer != 0: UpperCAmelCase_ : int = chr(ord("A" ) + integer % 10 ) + s integer //= 10 return s UpperCAmelCase_ : Dict = 0 while True: UpperCAmelCase_ : List[str] = word + "#" + int_to_alphabetic(lowerCAmelCase_ ) if sword in info["reverse_short_word"]: continue else: UpperCAmelCase_ : str = sword break UpperCAmelCase_ : Tuple = short_word UpperCAmelCase_ : Union[str, Any] = word return short_word @staticmethod def _SCREAMING_SNAKE_CASE ( lowerCAmelCase_ : int , lowerCAmelCase_ : int ) -> Optional[int]: UpperCAmelCase_ : int = param_name.split("_" ) UpperCAmelCase_ : List[Any] = [TrialShortNamer.shortname_for_word(lowerCAmelCase_ , lowerCAmelCase_ ) for word in words] # We try to create a separatorless short name, but if there is a collision we have to fallback # to a separated short name UpperCAmelCase_ : str = ["", "_"] for separator in separators: UpperCAmelCase_ : str = separator.join(lowerCAmelCase_ ) if shortname not in info["reverse_short_param"]: UpperCAmelCase_ : int = shortname UpperCAmelCase_ : Union[str, Any] = param_name return shortname return param_name @staticmethod def _SCREAMING_SNAKE_CASE ( lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : int ) -> Union[str, Any]: UpperCAmelCase_ : int = TrialShortNamer.shortname_for_key(lowerCAmelCase_ , lowerCAmelCase_ ) UpperCAmelCase_ : Optional[int] = short_name UpperCAmelCase_ : Dict = param_name @classmethod def _SCREAMING_SNAKE_CASE ( cls : Optional[int] ) -> Any: if cls.NAMING_INFO is not None: return UpperCAmelCase_ : int = { "short_word": {}, "reverse_short_word": {}, "short_param": {}, "reverse_short_param": {}, } UpperCAmelCase_ : List[str] = list(cls.DEFAULTS.keys() ) for k in field_keys: cls.add_new_param_name(lowerCAmelCase_ , lowerCAmelCase_ ) UpperCAmelCase_ : Tuple = info @classmethod def _SCREAMING_SNAKE_CASE ( cls : Optional[Any] , lowerCAmelCase_ : Union[str, Any] ) -> str: cls.build_naming_info() assert cls.PREFIX is not None UpperCAmelCase_ : Dict = [copy.copy(cls.PREFIX )] for k, v in params.items(): if k not in cls.DEFAULTS: raise Exception(f"""You should provide a default value for the param name {k} with value {v}""" ) if v == cls.DEFAULTS[k]: # The default value is not added to the name continue UpperCAmelCase_ : Dict = cls.NAMING_INFO["short_param"][k] if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ): UpperCAmelCase_ : int = 1 if v else 0 UpperCAmelCase_ : str = "" if isinstance(lowerCAmelCase_ , (int, float) ) else "-" UpperCAmelCase_ : int = f"""{key}{sep}{v}""" name.append(lowerCAmelCase_ ) return "_".join(lowerCAmelCase_ ) @classmethod def _SCREAMING_SNAKE_CASE ( cls : str , lowerCAmelCase_ : Union[str, Any] ) -> List[str]: UpperCAmelCase_ : str = repr[len(cls.PREFIX ) + 1 :] if repr == "": UpperCAmelCase_ : Tuple = [] else: UpperCAmelCase_ : Dict = repr.split("_" ) UpperCAmelCase_ : List[str] = {} for value in values: if "-" in value: UpperCAmelCase_ , UpperCAmelCase_ : List[Any] = value.split("-" ) else: UpperCAmelCase_ : Optional[Any] = re.sub("[0-9.]" , "" , lowerCAmelCase_ ) UpperCAmelCase_ : str = float(re.sub("[^0-9.]" , "" , lowerCAmelCase_ ) ) UpperCAmelCase_ : List[str] = cls.NAMING_INFO["reverse_short_param"][p_k] UpperCAmelCase_ : int = p_v for k in cls.DEFAULTS: if k not in parameters: UpperCAmelCase_ : Union[str, Any] = cls.DEFAULTS[k] return parameters
268
'''simple docstring''' from collections import OrderedDict from typing import Any, List, Mapping, Optional from ... import PreTrainedTokenizer, TensorType, is_torch_available from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfigWithPast, PatchingSpec from ...utils import logging a__ : Optional[Any] = logging.get_logger(__name__) a__ : Dict = { 'EleutherAI/gpt-j-6B': 'https://huggingface.co/EleutherAI/gpt-j-6B/resolve/main/config.json', # See all GPT-J models at https://huggingface.co/models?filter=gpt_j } class UpperCAmelCase__ ( UpperCAmelCase_): __SCREAMING_SNAKE_CASE = '''gptj''' __SCREAMING_SNAKE_CASE = { '''max_position_embeddings''': '''n_positions''', '''hidden_size''': '''n_embd''', '''num_attention_heads''': '''n_head''', '''num_hidden_layers''': '''n_layer''', } def __init__( self , lowercase=5_0_4_0_0 , lowercase=2_0_4_8 , lowercase=4_0_9_6 , lowercase=2_8 , lowercase=1_6 , lowercase=6_4 , lowercase=None , lowercase="gelu_new" , lowercase=0.0 , lowercase=0.0 , lowercase=0.0 , lowercase=1E-5 , lowercase=0.02 , lowercase=True , lowercase=5_0_2_5_6 , lowercase=5_0_2_5_6 , lowercase=False , **lowercase , ) -> Tuple: __UpperCamelCase = vocab_size __UpperCamelCase = n_positions __UpperCamelCase = n_embd __UpperCamelCase = n_layer __UpperCamelCase = n_head __UpperCamelCase = n_inner __UpperCamelCase = rotary_dim __UpperCamelCase = activation_function __UpperCamelCase = resid_pdrop __UpperCamelCase = embd_pdrop __UpperCamelCase = attn_pdrop __UpperCamelCase = layer_norm_epsilon __UpperCamelCase = initializer_range __UpperCamelCase = use_cache __UpperCamelCase = bos_token_id __UpperCamelCase = eos_token_id super().__init__( bos_token_id=lowercase , eos_token_id=lowercase , tie_word_embeddings=lowercase , **lowercase ) class UpperCAmelCase__ ( UpperCAmelCase_): def __init__( self , lowercase , lowercase = "default" , lowercase = None , lowercase = False , ) -> List[str]: super().__init__(lowercase , task=lowercase , patching_specs=lowercase , use_past=lowercase ) if not getattr(self._config , """pad_token_id""" , lowercase ): # TODO: how to do that better? __UpperCamelCase = 0 @property def __lowerCamelCase ( self ) -> Mapping[str, Mapping[int, str]]: __UpperCamelCase = OrderedDict({"""input_ids""": {0: """batch""", 1: """sequence"""}} ) if self.use_past: self.fill_with_past_key_values_(lowercase , direction="""inputs""" ) __UpperCamelCase = {0: """batch""", 1: """past_sequence + sequence"""} else: __UpperCamelCase = {0: """batch""", 1: """sequence"""} return common_inputs @property def __lowerCamelCase ( self ) -> int: return self._config.n_layer @property def __lowerCamelCase ( self ) -> int: return self._config.n_head def __lowerCamelCase ( self , lowercase , lowercase = -1 , lowercase = -1 , lowercase = False , lowercase = None , ) -> Mapping[str, Any]: __UpperCamelCase = super(lowercase , self ).generate_dummy_inputs( lowercase , batch_size=lowercase , seq_length=lowercase , is_pair=lowercase , framework=lowercase ) # We need to order the input in the way they appears in the forward() __UpperCamelCase = OrderedDict({"""input_ids""": common_inputs["""input_ids"""]} ) # Need to add the past_keys if self.use_past: if not is_torch_available(): raise ValueError("""Cannot generate dummy past_keys inputs without PyTorch installed.""" ) else: import torch __UpperCamelCase , __UpperCamelCase = common_inputs["""input_ids"""].shape # Not using the same length for past_key_values __UpperCamelCase = seqlen + 2 __UpperCamelCase = ( batch, self.num_attention_heads, past_key_values_length, self._config.hidden_size // self.num_attention_heads, ) __UpperCamelCase = [ (torch.zeros(lowercase ), torch.zeros(lowercase )) for _ in range(self.num_layers ) ] __UpperCamelCase = common_inputs["""attention_mask"""] if self.use_past: __UpperCamelCase = ordered_inputs["""attention_mask"""].dtype __UpperCamelCase = torch.cat( [ordered_inputs["""attention_mask"""], torch.ones(lowercase , lowercase , dtype=lowercase )] , dim=1 ) return ordered_inputs @property def __lowerCamelCase ( self ) -> int: return 1_3
349
0
import importlib import sys from argparse import REMAINDER, ArgumentParser from pathlib import Path import torch_xla.distributed.xla_multiprocessing as xmp def __UpperCamelCase ( ) -> Dict: """simple docstring""" A : Optional[int] = ArgumentParser( description=( """PyTorch TPU distributed training launch helper utility that will spawn up multiple distributed processes""" ) ) # Optional arguments for the launch helper parser.add_argument("""--num_cores""" , type=__A , default=1 , help="""Number of TPU cores to use (1 or 8).""" ) # positional parser.add_argument( """training_script""" , type=__A , help=( """The full path to the single TPU training """ """program/script to be launched in parallel, """ """followed by all the arguments for the """ """training script""" ) , ) # rest from the training program parser.add_argument("""training_script_args""" , nargs=__A ) return parser.parse_args() def __UpperCamelCase ( ) -> List[Any]: """simple docstring""" A : Union[str, Any] = parse_args() # Import training_script as a module. A : int = Path(args.training_script ) sys.path.append(str(script_fpath.parent.resolve() ) ) A : Optional[Any] = script_fpath.stem A : Dict = importlib.import_module(__A ) # Patch sys.argv A : Dict = [args.training_script] + args.training_script_args + ["""--tpu_num_cores""", str(args.num_cores )] xmp.spawn(mod._mp_fn , args=() , nprocs=args.num_cores ) if __name__ == "__main__": main()
116
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_tokenizers_available, is_torch_available, is_vision_available, ) a__ : int = { 'configuration_layoutlmv3': [ 'LAYOUTLMV3_PRETRAINED_CONFIG_ARCHIVE_MAP', 'LayoutLMv3Config', 'LayoutLMv3OnnxConfig', ], 'processing_layoutlmv3': ['LayoutLMv3Processor'], 'tokenization_layoutlmv3': ['LayoutLMv3Tokenizer'], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a__ : Dict = ['LayoutLMv3TokenizerFast'] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a__ : Any = [ 'LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST', 'LayoutLMv3ForQuestionAnswering', 'LayoutLMv3ForSequenceClassification', 'LayoutLMv3ForTokenClassification', 'LayoutLMv3Model', 'LayoutLMv3PreTrainedModel', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a__ : str = [ 'TF_LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST', 'TFLayoutLMv3ForQuestionAnswering', 'TFLayoutLMv3ForSequenceClassification', 'TFLayoutLMv3ForTokenClassification', 'TFLayoutLMv3Model', 'TFLayoutLMv3PreTrainedModel', ] try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a__ : List[Any] = ['LayoutLMv3FeatureExtractor'] a__ : str = ['LayoutLMv3ImageProcessor'] if TYPE_CHECKING: from .configuration_layoutlmva import ( LAYOUTLMV3_PRETRAINED_CONFIG_ARCHIVE_MAP, LayoutLMvaConfig, LayoutLMvaOnnxConfig, ) from .processing_layoutlmva import LayoutLMvaProcessor from .tokenization_layoutlmva import LayoutLMvaTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_layoutlmva_fast import LayoutLMvaTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_layoutlmva import ( LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST, LayoutLMvaForQuestionAnswering, LayoutLMvaForSequenceClassification, LayoutLMvaForTokenClassification, LayoutLMvaModel, LayoutLMvaPreTrainedModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_layoutlmva import ( TF_LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST, TFLayoutLMvaForQuestionAnswering, TFLayoutLMvaForSequenceClassification, TFLayoutLMvaForTokenClassification, TFLayoutLMvaModel, TFLayoutLMvaPreTrainedModel, ) try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .feature_extraction_layoutlmva import LayoutLMvaFeatureExtractor from .image_processing_layoutlmva import LayoutLMvaImageProcessor else: import sys a__ : List[str] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
349
0
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available, is_speech_available, is_torch_available, ) lowerCAmelCase__ : str = { 'configuration_trocr': ['TROCR_PRETRAINED_CONFIG_ARCHIVE_MAP', 'TrOCRConfig'], 'processing_trocr': ['TrOCRProcessor'], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase__ : int = [ 'TROCR_PRETRAINED_MODEL_ARCHIVE_LIST', 'TrOCRForCausalLM', 'TrOCRPreTrainedModel', ] if TYPE_CHECKING: from .configuration_trocr import TROCR_PRETRAINED_CONFIG_ARCHIVE_MAP, TrOCRConfig from .processing_trocr import TrOCRProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_trocr import TROCR_PRETRAINED_MODEL_ARCHIVE_LIST, TrOCRForCausalLM, TrOCRPreTrainedModel else: import sys lowerCAmelCase__ : List[str] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
143
'''simple docstring''' def _lowercase ( __A ,__A ): '''simple docstring''' __UpperCamelCase = len(__A ) __UpperCamelCase = [[False] * (required_sum + 1) for _ in range(arr_len + 1 )] # for each arr value, a sum of zero(0) can be formed by not taking any element # hence True/1 for i in range(arr_len + 1 ): __UpperCamelCase = True # sum is not zero and set is empty then false for i in range(1 ,required_sum + 1 ): __UpperCamelCase = False for i in range(1 ,arr_len + 1 ): for j in range(1 ,required_sum + 1 ): if arr[i - 1] > j: __UpperCamelCase = subset[i - 1][j] if arr[i - 1] <= j: __UpperCamelCase = subset[i - 1][j] or subset[i - 1][j - arr[i - 1]] return subset[arr_len][required_sum] if __name__ == "__main__": import doctest doctest.testmod()
349
0
import argparse import glob import logging import os import sys import time from collections import defaultdict from pathlib import Path from typing import Dict, List, Tuple import numpy as np import pytorch_lightning as pl import torch from callbacks import SeqaSeqLoggingCallback, get_checkpoint_callback, get_early_stopping_callback from torch import nn from torch.utils.data import DataLoader from transformers import MBartTokenizer, TaForConditionalGeneration from transformers.models.bart.modeling_bart import shift_tokens_right from utils import ( ROUGE_KEYS, LegacySeqaSeqDataset, SeqaSeqDataset, assert_all_frozen, calculate_bleu, calculate_rouge, check_output_dir, flatten_list, freeze_embeds, freeze_params, get_git_info, label_smoothed_nll_loss, lmap, pickle_save, save_git_info, save_json, use_task_specific_params, ) # need the parent dir module sys.path.insert(2, str(Path(__file__).resolve().parents[1])) from lightning_base import BaseTransformer, add_generic_args, generic_train # noqa snake_case : str = logging.getLogger(__name__) class _snake_case ( UpperCAmelCase_ ): SCREAMING_SNAKE_CASE__ = 'summarization' SCREAMING_SNAKE_CASE__ = ['loss'] SCREAMING_SNAKE_CASE__ = ROUGE_KEYS SCREAMING_SNAKE_CASE__ = 'rouge2' def __init__( self , _lowerCamelCase , **_lowerCamelCase ): if hparams.sortish_sampler and hparams.gpus > 1: a :Optional[Any] = False elif hparams.max_tokens_per_batch is not None: if hparams.gpus > 1: raise NotImplementedError('''Dynamic Batch size does not work for multi-gpu training''' ) if hparams.sortish_sampler: raise ValueError('''--sortish_sampler and --max_tokens_per_batch may not be used simultaneously''' ) super().__init__(_lowerCamelCase , num_labels=_lowerCamelCase , mode=self.mode , **_lowerCamelCase ) use_task_specific_params(self.model , '''summarization''' ) save_git_info(self.hparams.output_dir ) a :Tuple = Path(self.output_dir ) / '''metrics.json''' a :Optional[Any] = Path(self.output_dir ) / '''hparams.pkl''' pickle_save(self.hparams , self.hparams_save_path ) a :Optional[Any] = 0 a :Union[str, Any] = defaultdict(_lowerCamelCase ) a :Dict = self.config.model_type a :Dict = self.config.tgt_vocab_size if self.model_type == '''fsmt''' else self.config.vocab_size a :Optional[Any] = { '''data_dir''': self.hparams.data_dir, '''max_source_length''': self.hparams.max_source_length, '''prefix''': self.model.config.prefix or '''''', } a :Tuple = { '''train''': self.hparams.n_train, '''val''': self.hparams.n_val, '''test''': self.hparams.n_test, } a :List[Any] = {k: v if v >= 0 else None for k, v in n_observations_per_split.items()} a :Union[str, Any] = { '''train''': self.hparams.max_target_length, '''val''': self.hparams.val_max_target_length, '''test''': self.hparams.test_max_target_length, } assert self.target_lens["train"] <= self.target_lens["val"], F'''target_lens: {self.target_lens}''' assert self.target_lens["train"] <= self.target_lens["test"], F'''target_lens: {self.target_lens}''' if self.hparams.freeze_embeds: freeze_embeds(self.model ) if self.hparams.freeze_encoder: freeze_params(self.model.get_encoder() ) assert_all_frozen(self.model.get_encoder() ) a :Tuple = get_git_info()['''repo_sha'''] a :Any = hparams.num_workers a :Any = None # default to config if self.model.config.decoder_start_token_id is None and isinstance(self.tokenizer , _lowerCamelCase ): a :int = self.tokenizer.lang_code_to_id[hparams.tgt_lang] a :Union[str, Any] = self.decoder_start_token_id a :Union[str, Any] = ( SeqaSeqDataset if hasattr(self.tokenizer , '''prepare_seq2seq_batch''' ) else LegacySeqaSeqDataset ) a :Union[str, Any] = False a :List[Any] = self.model.config.num_beams if self.hparams.eval_beams is None else self.hparams.eval_beams if self.hparams.eval_max_gen_length is not None: a :Optional[Any] = self.hparams.eval_max_gen_length else: a :Optional[int] = self.model.config.max_length a :Dict = self.default_val_metric if self.hparams.val_metric is None else self.hparams.val_metric def SCREAMING_SNAKE_CASE__ ( self , _lowerCamelCase ): a :Union[str, Any] = { k: self.tokenizer.batch_decode(v.tolist() ) if '''mask''' not in k else v.shape for k, v in batch.items() } save_json(_lowerCamelCase , Path(self.output_dir ) / '''text_batch.json''' ) save_json({k: v.tolist() for k, v in batch.items()} , Path(self.output_dir ) / '''tok_batch.json''' ) a :Union[str, Any] = True return readable_batch def SCREAMING_SNAKE_CASE__ ( self , _lowerCamelCase , **_lowerCamelCase ): return self.model(_lowerCamelCase , **_lowerCamelCase ) def SCREAMING_SNAKE_CASE__ ( self , _lowerCamelCase ): a :Dict = self.tokenizer.batch_decode( _lowerCamelCase , skip_special_tokens=_lowerCamelCase , clean_up_tokenization_spaces=_lowerCamelCase ) return lmap(str.strip , _lowerCamelCase ) def SCREAMING_SNAKE_CASE__ ( self , _lowerCamelCase ): a :Optional[int] = self.tokenizer.pad_token_id a , a :List[Any] = batch['''input_ids'''], batch['''attention_mask'''] a :Optional[Any] = batch['''labels'''] if isinstance(self.model , _lowerCamelCase ): a :Union[str, Any] = self.model._shift_right(_lowerCamelCase ) else: a :int = shift_tokens_right(_lowerCamelCase , _lowerCamelCase ) if not self.already_saved_batch: # This would be slightly better if it only happened on rank zero a :int = decoder_input_ids self.save_readable_batch(_lowerCamelCase ) a :Tuple = self(_lowerCamelCase , attention_mask=_lowerCamelCase , decoder_input_ids=_lowerCamelCase , use_cache=_lowerCamelCase ) a :Optional[int] = outputs['''logits'''] if self.hparams.label_smoothing == 0: # Same behavior as modeling_bart.py, besides ignoring pad_token_id a :Union[str, Any] = nn.CrossEntropyLoss(ignore_index=_lowerCamelCase ) assert lm_logits.shape[-1] == self.vocab_size a :Tuple = ce_loss_fct(lm_logits.view(-1 , lm_logits.shape[-1] ) , tgt_ids.view(-1 ) ) else: a :List[Any] = nn.functional.log_softmax(_lowerCamelCase , dim=-1 ) a , a :str = label_smoothed_nll_loss( _lowerCamelCase , _lowerCamelCase , self.hparams.label_smoothing , ignore_index=_lowerCamelCase ) return (loss,) @property def SCREAMING_SNAKE_CASE__ ( self ): return self.tokenizer.pad_token_id def SCREAMING_SNAKE_CASE__ ( self , _lowerCamelCase , _lowerCamelCase ): a :Tuple = self._step(_lowerCamelCase ) a :Any = dict(zip(self.loss_names , _lowerCamelCase ) ) # tokens per batch a :Tuple = batch['''input_ids'''].ne(self.pad ).sum() + batch['''labels'''].ne(self.pad ).sum() a :Any = batch['''input_ids'''].shape[0] a :Optional[int] = batch['''input_ids'''].eq(self.pad ).sum() a :int = batch['''input_ids'''].eq(self.pad ).float().mean() # TODO(SS): make a wandb summary metric for this return {"loss": loss_tensors[0], "log": logs} def SCREAMING_SNAKE_CASE__ ( self , _lowerCamelCase , _lowerCamelCase ): return self._generative_step(_lowerCamelCase ) def SCREAMING_SNAKE_CASE__ ( self , _lowerCamelCase , _lowerCamelCase="val" ): self.step_count += 1 a :List[Any] = {k: torch.stack([x[k] for x in outputs] ).mean() for k in self.loss_names} a :Any = losses['''loss'''] a :Tuple = { k: np.array([x[k] for x in outputs] ).mean() for k in self.metric_names + ['''gen_time''', '''gen_len'''] } a :Any = ( generative_metrics[self.val_metric] if self.val_metric in generative_metrics else losses[self.val_metric] ) a :Dict = torch.tensor(_lowerCamelCase ).type_as(_lowerCamelCase ) generative_metrics.update({k: v.item() for k, v in losses.items()} ) losses.update(_lowerCamelCase ) a :List[Any] = {F'''{prefix}_avg_{k}''': x for k, x in losses.items()} a :Tuple = self.step_count self.metrics[prefix].append(_lowerCamelCase ) # callback writes this to self.metrics_save_path a :Dict = flatten_list([x['''preds'''] for x in outputs] ) return { "log": all_metrics, "preds": preds, F'''{prefix}_loss''': loss, F'''{prefix}_{self.val_metric}''': metric_tensor, } def SCREAMING_SNAKE_CASE__ ( self , _lowerCamelCase , _lowerCamelCase ): return calculate_rouge(_lowerCamelCase , _lowerCamelCase ) def SCREAMING_SNAKE_CASE__ ( self , _lowerCamelCase ): a :List[Any] = time.time() # parser.add_argument('--eval_max_gen_length', type=int, default=None, help='never generate more than n tokens') a :Any = self.model.generate( batch['''input_ids'''] , attention_mask=batch['''attention_mask'''] , use_cache=_lowerCamelCase , decoder_start_token_id=self.decoder_start_token_id , num_beams=self.eval_beams , max_length=self.eval_max_length , ) a :Tuple = (time.time() - ta) / batch['''input_ids'''].shape[0] a :Union[str, Any] = self.ids_to_clean_text(_lowerCamelCase ) a :Optional[Any] = self.ids_to_clean_text(batch['''labels'''] ) a :List[str] = self._step(_lowerCamelCase ) a :Optional[Any] = dict(zip(self.loss_names , _lowerCamelCase ) ) a :List[Any] = self.calc_generative_metrics(_lowerCamelCase , _lowerCamelCase ) a :str = np.mean(lmap(_lowerCamelCase , _lowerCamelCase ) ) base_metrics.update(gen_time=_lowerCamelCase , gen_len=_lowerCamelCase , preds=_lowerCamelCase , target=_lowerCamelCase , **_lowerCamelCase ) return base_metrics def SCREAMING_SNAKE_CASE__ ( self , _lowerCamelCase , _lowerCamelCase ): return self._generative_step(_lowerCamelCase ) def SCREAMING_SNAKE_CASE__ ( self , _lowerCamelCase ): return self.validation_epoch_end(_lowerCamelCase , prefix='''test''' ) def SCREAMING_SNAKE_CASE__ ( self , _lowerCamelCase ): a :int = self.n_obs[type_path] a :int = self.target_lens[type_path] a :Union[str, Any] = self.dataset_class( self.tokenizer , type_path=_lowerCamelCase , n_obs=_lowerCamelCase , max_target_length=_lowerCamelCase , **self.dataset_kwargs , ) return dataset def SCREAMING_SNAKE_CASE__ ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase = False ): a :Union[str, Any] = self.get_dataset(_lowerCamelCase ) if self.hparams.sortish_sampler and type_path != "test" and type_path != "val": a :Optional[int] = dataset.make_sortish_sampler(_lowerCamelCase , distributed=self.hparams.gpus > 1 ) return DataLoader( _lowerCamelCase , batch_size=_lowerCamelCase , collate_fn=dataset.collate_fn , shuffle=_lowerCamelCase , num_workers=self.num_workers , sampler=_lowerCamelCase , ) elif self.hparams.max_tokens_per_batch is not None and type_path != "test" and type_path != "val": a :List[Any] = dataset.make_dynamic_sampler( self.hparams.max_tokens_per_batch , distributed=self.hparams.gpus > 1 ) return DataLoader( _lowerCamelCase , batch_sampler=_lowerCamelCase , collate_fn=dataset.collate_fn , num_workers=self.num_workers , ) else: return DataLoader( _lowerCamelCase , batch_size=_lowerCamelCase , collate_fn=dataset.collate_fn , shuffle=_lowerCamelCase , num_workers=self.num_workers , sampler=_lowerCamelCase , ) def SCREAMING_SNAKE_CASE__ ( self ): a :str = self.get_dataloader('''train''' , batch_size=self.hparams.train_batch_size , shuffle=_lowerCamelCase ) return dataloader def SCREAMING_SNAKE_CASE__ ( self ): return self.get_dataloader('''val''' , batch_size=self.hparams.eval_batch_size ) def SCREAMING_SNAKE_CASE__ ( self ): return self.get_dataloader('''test''' , batch_size=self.hparams.eval_batch_size ) @staticmethod def SCREAMING_SNAKE_CASE__ ( _lowerCamelCase , _lowerCamelCase ): BaseTransformer.add_model_specific_args(_lowerCamelCase , _lowerCamelCase ) add_generic_args(_lowerCamelCase , _lowerCamelCase ) parser.add_argument( '''--max_source_length''' , default=1024 , type=_lowerCamelCase , help=( '''The maximum total input sequence length after tokenization. Sequences longer ''' '''than this will be truncated, sequences shorter will be padded.''' ) , ) parser.add_argument( '''--max_target_length''' , default=56 , type=_lowerCamelCase , help=( '''The maximum total input sequence length after tokenization. Sequences longer ''' '''than this will be truncated, sequences shorter will be padded.''' ) , ) parser.add_argument( '''--val_max_target_length''' , default=142 , type=_lowerCamelCase , help=( '''The maximum total input sequence length after tokenization. Sequences longer ''' '''than this will be truncated, sequences shorter will be padded.''' ) , ) parser.add_argument( '''--test_max_target_length''' , default=142 , type=_lowerCamelCase , help=( '''The maximum total input sequence length after tokenization. Sequences longer ''' '''than this will be truncated, sequences shorter will be padded.''' ) , ) parser.add_argument('''--freeze_encoder''' , action='''store_true''' ) parser.add_argument('''--freeze_embeds''' , action='''store_true''' ) parser.add_argument('''--sortish_sampler''' , action='''store_true''' , default=_lowerCamelCase ) parser.add_argument('''--overwrite_output_dir''' , action='''store_true''' , default=_lowerCamelCase ) parser.add_argument('''--max_tokens_per_batch''' , type=_lowerCamelCase , default=_lowerCamelCase ) parser.add_argument('''--logger_name''' , type=_lowerCamelCase , choices=['''default''', '''wandb''', '''wandb_shared'''] , default='''default''' ) parser.add_argument('''--n_train''' , type=_lowerCamelCase , default=-1 , required=_lowerCamelCase , help='''# examples. -1 means use all.''' ) parser.add_argument('''--n_val''' , type=_lowerCamelCase , default=500 , required=_lowerCamelCase , help='''# examples. -1 means use all.''' ) parser.add_argument('''--n_test''' , type=_lowerCamelCase , default=-1 , required=_lowerCamelCase , help='''# examples. -1 means use all.''' ) parser.add_argument( '''--task''' , type=_lowerCamelCase , default='''summarization''' , required=_lowerCamelCase , help='''# examples. -1 means use all.''' ) parser.add_argument('''--label_smoothing''' , type=_lowerCamelCase , default=0.0 , required=_lowerCamelCase ) parser.add_argument('''--src_lang''' , type=_lowerCamelCase , default='''''' , required=_lowerCamelCase ) parser.add_argument('''--tgt_lang''' , type=_lowerCamelCase , default='''''' , required=_lowerCamelCase ) parser.add_argument('''--eval_beams''' , type=_lowerCamelCase , default=_lowerCamelCase , required=_lowerCamelCase ) parser.add_argument( '''--val_metric''' , type=_lowerCamelCase , default=_lowerCamelCase , required=_lowerCamelCase , choices=['''bleu''', '''rouge2''', '''loss''', None] ) parser.add_argument('''--eval_max_gen_length''' , type=_lowerCamelCase , default=_lowerCamelCase , help='''never generate more than n tokens''' ) parser.add_argument('''--save_top_k''' , type=_lowerCamelCase , default=1 , required=_lowerCamelCase , help='''How many checkpoints to save''' ) parser.add_argument( '''--early_stopping_patience''' , type=_lowerCamelCase , default=-1 , required=_lowerCamelCase , help=( '''-1 means never early stop. early_stopping_patience is measured in validation checks, not epochs. So''' ''' val_check_interval will effect it.''' ) , ) return parser class _snake_case ( UpperCAmelCase_ ): SCREAMING_SNAKE_CASE__ = 'translation' SCREAMING_SNAKE_CASE__ = ['loss'] SCREAMING_SNAKE_CASE__ = ['bleu'] SCREAMING_SNAKE_CASE__ = 'bleu' def __init__( self , _lowerCamelCase , **_lowerCamelCase ): super().__init__(_lowerCamelCase , **_lowerCamelCase ) a :List[Any] = hparams.src_lang a :str = hparams.tgt_lang def SCREAMING_SNAKE_CASE__ ( self , _lowerCamelCase , _lowerCamelCase ): return calculate_bleu(_lowerCamelCase , _lowerCamelCase ) def __lowerCamelCase ( UpperCAmelCase_ : int , UpperCAmelCase_ : Optional[Any]=None ): """simple docstring""" Path(args.output_dir ).mkdir(exist_ok=__A ) check_output_dir(__A , expected_items=3 ) if model is None: if "summarization" in args.task: a :Optional[Any] = SummarizationModule(__A ) else: a :str = TranslationModule(__A ) a :List[str] = Path(args.data_dir ).name if ( args.logger_name == "default" or args.fast_dev_run or str(args.output_dir ).startswith('''/tmp''' ) or str(args.output_dir ).startswith('''/var''' ) ): a :Tuple = True # don't pollute wandb logs unnecessarily elif args.logger_name == "wandb": from pytorch_lightning.loggers import WandbLogger a :Any = os.environ.get('''WANDB_PROJECT''' , __A ) a :Dict = WandbLogger(name=model.output_dir.name , project=__A ) elif args.logger_name == "wandb_shared": from pytorch_lightning.loggers import WandbLogger a :Dict = WandbLogger(name=model.output_dir.name , project=F'''hf_{dataset}''' ) if args.early_stopping_patience >= 0: a :List[Any] = get_early_stopping_callback(model.val_metric , args.early_stopping_patience ) else: a :str = False a :Any = args.val_metric == '''loss''' a :List[str] = generic_train( __A , __A , logging_callback=SeqaSeqLoggingCallback() , checkpoint_callback=get_checkpoint_callback( args.output_dir , model.val_metric , args.save_top_k , __A ) , early_stopping_callback=__A , logger=__A , ) pickle_save(model.hparams , model.output_dir / '''hparams.pkl''' ) if not args.do_predict: return model a :str = '''''' a :Tuple = sorted(glob.glob(os.path.join(args.output_dir , '''*.ckpt''' ) , recursive=__A ) ) if checkpoints: a :List[Any] = checkpoints[-1] a :Optional[Any] = checkpoints[-1] trainer.logger.log_hyperparams(model.hparams ) # test() without a model tests using the best checkpoint automatically trainer.test() return model if __name__ == "__main__": snake_case : Any = argparse.ArgumentParser() snake_case : Tuple = pl.Trainer.add_argparse_args(parser) snake_case : Optional[int] = SummarizationModule.add_model_specific_args(parser, os.getcwd()) snake_case : str = parser.parse_args() main(args)
94
'''simple docstring''' import bza import gzip import lzma import os import shutil import struct import tarfile import warnings import zipfile from abc import ABC, abstractmethod from pathlib import Path from typing import Dict, List, Optional, Type, Union from .. import config from .filelock import FileLock from .logging import get_logger a__ : Any = get_logger(__name__) class UpperCAmelCase__ : def __init__( self , lowercase = None ) -> List[str]: __UpperCamelCase = ( os.path.join(lowercase , config.EXTRACTED_DATASETS_DIR ) if cache_dir else config.EXTRACTED_DATASETS_PATH ) __UpperCamelCase = Extractor def __lowerCamelCase ( self , lowercase ) -> str: from .file_utils import hash_url_to_filename # Path where we extract compressed archives # We extract in the cache dir, and get the extracted path name by hashing the original path" __UpperCamelCase = os.path.abspath(lowercase ) return os.path.join(self.extract_dir , hash_url_to_filename(lowercase ) ) def __lowerCamelCase ( self , lowercase , lowercase ) -> bool: return force_extract or ( not os.path.isfile(lowercase ) and not (os.path.isdir(lowercase ) and os.listdir(lowercase )) ) def __lowerCamelCase ( self , lowercase , lowercase = False ) -> str: __UpperCamelCase = self.extractor.infer_extractor_format(lowercase ) if not extractor_format: return input_path __UpperCamelCase = self._get_output_path(lowercase ) if self._do_extract(lowercase , lowercase ): self.extractor.extract(lowercase , lowercase , lowercase ) return output_path class UpperCAmelCase__ ( UpperCAmelCase_): @classmethod @abstractmethod def __lowerCamelCase ( cls , lowercase , **lowercase ) -> bool: ... @staticmethod @abstractmethod def __lowerCamelCase ( lowercase , lowercase ) -> None: ... class UpperCAmelCase__ ( UpperCAmelCase_ , UpperCAmelCase_): __SCREAMING_SNAKE_CASE = [] @staticmethod def __lowerCamelCase ( lowercase , lowercase ) -> int: with open(lowercase , """rb""" ) as f: return f.read(lowercase ) @classmethod def __lowerCamelCase ( cls , lowercase , lowercase = b"" ) -> bool: if not magic_number: __UpperCamelCase = max(len(lowercase ) for cls_magic_number in cls.magic_numbers ) try: __UpperCamelCase = cls.read_magic_number(lowercase , lowercase ) except OSError: return False return any(magic_number.startswith(lowercase ) for cls_magic_number in cls.magic_numbers ) class UpperCAmelCase__ ( UpperCAmelCase_): @classmethod def __lowerCamelCase ( cls , lowercase , **lowercase ) -> bool: return tarfile.is_tarfile(lowercase ) @staticmethod def __lowerCamelCase ( lowercase , lowercase ) -> str: def resolved(lowercase ) -> str: return os.path.realpath(os.path.abspath(lowercase ) ) def badpath(lowercase , lowercase ) -> bool: # joinpath will ignore base if path is absolute return not resolved(os.path.join(lowercase , lowercase ) ).startswith(lowercase ) def badlink(lowercase , lowercase ) -> bool: # Links are interpreted relative to the directory containing the link __UpperCamelCase = resolved(os.path.join(lowercase , os.path.dirname(info.name ) ) ) return badpath(info.linkname , base=lowercase ) __UpperCamelCase = resolved(lowercase ) for finfo in members: if badpath(finfo.name , lowercase ): logger.error(f"Extraction of {finfo.name} is blocked (illegal path)" ) elif finfo.issym() and badlink(lowercase , lowercase ): logger.error(f"Extraction of {finfo.name} is blocked: Symlink to {finfo.linkname}" ) elif finfo.islnk() and badlink(lowercase , lowercase ): logger.error(f"Extraction of {finfo.name} is blocked: Hard link to {finfo.linkname}" ) else: yield finfo @staticmethod def __lowerCamelCase ( lowercase , lowercase ) -> None: os.makedirs(lowercase , exist_ok=lowercase ) __UpperCamelCase = tarfile.open(lowercase ) tar_file.extractall(lowercase , members=TarExtractor.safemembers(lowercase , lowercase ) ) tar_file.close() class UpperCAmelCase__ ( UpperCAmelCase_): __SCREAMING_SNAKE_CASE = [B'''\x1F\x8B'''] @staticmethod def __lowerCamelCase ( lowercase , lowercase ) -> None: with gzip.open(lowercase , """rb""" ) as gzip_file: with open(lowercase , """wb""" ) as extracted_file: shutil.copyfileobj(lowercase , lowercase ) class UpperCAmelCase__ ( UpperCAmelCase_): __SCREAMING_SNAKE_CASE = [ B'''PK\x03\x04''', B'''PK\x05\x06''', # empty archive B'''PK\x07\x08''', # spanned archive ] @classmethod def __lowerCamelCase ( cls , lowercase , lowercase = b"" ) -> bool: if super().is_extractable(lowercase , magic_number=lowercase ): return True try: # Alternative version of zipfile.is_zipfile that has less false positives, but misses executable zip archives. # From: https://github.com/python/cpython/pull/5053 from zipfile import ( _CD_SIGNATURE, _ECD_DISK_NUMBER, _ECD_DISK_START, _ECD_ENTRIES_TOTAL, _ECD_OFFSET, _ECD_SIZE, _EndRecData, sizeCentralDir, stringCentralDir, structCentralDir, ) with open(lowercase , """rb""" ) as fp: __UpperCamelCase = _EndRecData(lowercase ) if endrec: if endrec[_ECD_ENTRIES_TOTAL] == 0 and endrec[_ECD_SIZE] == 0 and endrec[_ECD_OFFSET] == 0: return True # Empty zipfiles are still zipfiles elif endrec[_ECD_DISK_NUMBER] == endrec[_ECD_DISK_START]: fp.seek(endrec[_ECD_OFFSET] ) # Central directory is on the same disk if fp.tell() == endrec[_ECD_OFFSET] and endrec[_ECD_SIZE] >= sizeCentralDir: __UpperCamelCase = fp.read(lowercase ) # CD is where we expect it to be if len(lowercase ) == sizeCentralDir: __UpperCamelCase = struct.unpack(lowercase , lowercase ) # CD is the right size if centdir[_CD_SIGNATURE] == stringCentralDir: return True # First central directory entry has correct magic number return False except Exception: # catch all errors in case future python versions change the zipfile internals return False @staticmethod def __lowerCamelCase ( lowercase , lowercase ) -> None: os.makedirs(lowercase , exist_ok=lowercase ) with zipfile.ZipFile(lowercase , """r""" ) as zip_file: zip_file.extractall(lowercase ) zip_file.close() class UpperCAmelCase__ ( UpperCAmelCase_): __SCREAMING_SNAKE_CASE = [B'''\xFD\x37\x7A\x58\x5A\x00'''] @staticmethod def __lowerCamelCase ( lowercase , lowercase ) -> None: with lzma.open(lowercase ) as compressed_file: with open(lowercase , """wb""" ) as extracted_file: shutil.copyfileobj(lowercase , lowercase ) class UpperCAmelCase__ ( UpperCAmelCase_): __SCREAMING_SNAKE_CASE = [B'''Rar!\x1a\x07\x00''', B'''Rar!\x1a\x07\x01\x00'''] # RAR_ID # RAR5_ID @staticmethod def __lowerCamelCase ( lowercase , lowercase ) -> None: if not config.RARFILE_AVAILABLE: raise ImportError("""Please pip install rarfile""" ) import rarfile os.makedirs(lowercase , exist_ok=lowercase ) __UpperCamelCase = rarfile.RarFile(lowercase ) rf.extractall(lowercase ) rf.close() class UpperCAmelCase__ ( UpperCAmelCase_): __SCREAMING_SNAKE_CASE = [B'''\x28\xb5\x2F\xFD'''] @staticmethod def __lowerCamelCase ( lowercase , lowercase ) -> None: if not config.ZSTANDARD_AVAILABLE: raise ImportError("""Please pip install zstandard""" ) import zstandard as zstd __UpperCamelCase = zstd.ZstdDecompressor() with open(lowercase , """rb""" ) as ifh, open(lowercase , """wb""" ) as ofh: dctx.copy_stream(lowercase , lowercase ) class UpperCAmelCase__ ( UpperCAmelCase_): __SCREAMING_SNAKE_CASE = [B'''\x42\x5A\x68'''] @staticmethod def __lowerCamelCase ( lowercase , lowercase ) -> None: with bza.open(lowercase , """rb""" ) as compressed_file: with open(lowercase , """wb""" ) as extracted_file: shutil.copyfileobj(lowercase , lowercase ) class UpperCAmelCase__ ( UpperCAmelCase_): __SCREAMING_SNAKE_CASE = [B'''\x37\x7A\xBC\xAF\x27\x1C'''] @staticmethod def __lowerCamelCase ( lowercase , lowercase ) -> None: if not config.PY7ZR_AVAILABLE: raise ImportError("""Please pip install py7zr""" ) import pyazr os.makedirs(lowercase , exist_ok=lowercase ) with pyazr.SevenZipFile(lowercase , """r""" ) as archive: archive.extractall(lowercase ) class UpperCAmelCase__ ( UpperCAmelCase_): __SCREAMING_SNAKE_CASE = [B'''\x04\x22\x4D\x18'''] @staticmethod def __lowerCamelCase ( lowercase , lowercase ) -> None: if not config.LZ4_AVAILABLE: raise ImportError("""Please pip install lz4""" ) import lza.frame with lza.frame.open(lowercase , """rb""" ) as compressed_file: with open(lowercase , """wb""" ) as extracted_file: shutil.copyfileobj(lowercase , lowercase ) class UpperCAmelCase__ : # Put zip file to the last, b/c it is possible wrongly detected as zip (I guess it means: as tar or gzip) __SCREAMING_SNAKE_CASE = { "tar": TarExtractor, "gzip": GzipExtractor, "zip": ZipExtractor, "xz": XzExtractor, "rar": RarExtractor, "zstd": ZstdExtractor, "bz2": BzipaExtractor, "7z": SevenZipExtractor, # <Added version="2.4.0"/> "lz4": LzaExtractor, # <Added version="2.4.0"/> } @classmethod def __lowerCamelCase ( cls ) -> Union[str, Any]: return max( len(lowercase ) for extractor in cls.extractors.values() if issubclass(lowercase , lowercase ) for extractor_magic_number in extractor.magic_numbers ) @staticmethod def __lowerCamelCase ( lowercase , lowercase ) -> str: try: return MagicNumberBaseExtractor.read_magic_number(lowercase , magic_number_length=lowercase ) except OSError: return b"" @classmethod def __lowerCamelCase ( cls , lowercase , lowercase = False ) -> bool: warnings.warn( """Method 'is_extractable' was deprecated in version 2.4.0 and will be removed in 3.0.0. """ """Use 'infer_extractor_format' instead.""" , category=lowercase , ) __UpperCamelCase = cls.infer_extractor_format(lowercase ) if extractor_format: return True if not return_extractor else (True, cls.extractors[extractor_format]) return False if not return_extractor else (False, None) @classmethod def __lowerCamelCase ( cls , lowercase ) -> str: # <Added version="2.4.0"/> __UpperCamelCase = cls._get_magic_number_max_length() __UpperCamelCase = cls._read_magic_number(lowercase , lowercase ) for extractor_format, extractor in cls.extractors.items(): if extractor.is_extractable(lowercase , magic_number=lowercase ): return extractor_format @classmethod def __lowerCamelCase ( cls , lowercase , lowercase , lowercase = None , lowercase = "deprecated" , ) -> None: os.makedirs(os.path.dirname(lowercase ) , exist_ok=lowercase ) # Prevent parallel extractions __UpperCamelCase = str(Path(lowercase ).with_suffix(""".lock""" ) ) with FileLock(lowercase ): shutil.rmtree(lowercase , ignore_errors=lowercase ) if extractor_format or extractor != "deprecated": if extractor != "deprecated" or not isinstance(lowercase , lowercase ): # passed as positional arg warnings.warn( """Parameter 'extractor' was deprecated in version 2.4.0 and will be removed in 3.0.0. """ """Use 'extractor_format' instead.""" , category=lowercase , ) __UpperCamelCase = extractor if extractor != """deprecated""" else extractor_format else: __UpperCamelCase = cls.extractors[extractor_format] return extractor.extract(lowercase , lowercase ) else: warnings.warn( """Parameter 'extractor_format' was made required in version 2.4.0 and not passing it will raise an """ """exception in 3.0.0.""" , category=lowercase , ) for extractor in cls.extractors.values(): if extractor.is_extractable(lowercase ): return extractor.extract(lowercase , lowercase )
349
0
'''simple docstring''' import copy from ...configuration_utils import PretrainedConfig from ...utils import logging from ..bit import BitConfig lowercase : List[Any] = logging.get_logger(__name__) lowercase : Optional[Any] = { 'Intel/dpt-large': 'https://huggingface.co/Intel/dpt-large/resolve/main/config.json', # See all DPT models at https://huggingface.co/models?filter=dpt } class A ( UpperCAmelCase_ ): __magic_name__ = '''dpt''' def __init__( self , SCREAMING_SNAKE_CASE=768 , SCREAMING_SNAKE_CASE=12 , SCREAMING_SNAKE_CASE=12 , SCREAMING_SNAKE_CASE=3072 , SCREAMING_SNAKE_CASE="gelu" , SCREAMING_SNAKE_CASE=0.0 , SCREAMING_SNAKE_CASE=0.0 , SCREAMING_SNAKE_CASE=0.02 , SCREAMING_SNAKE_CASE=1e-12 , SCREAMING_SNAKE_CASE=384 , SCREAMING_SNAKE_CASE=16 , SCREAMING_SNAKE_CASE=3 , SCREAMING_SNAKE_CASE=False , SCREAMING_SNAKE_CASE=True , SCREAMING_SNAKE_CASE=[2, 5, 8, 11] , SCREAMING_SNAKE_CASE="project" , SCREAMING_SNAKE_CASE=[4, 2, 1, 0.5] , SCREAMING_SNAKE_CASE=[96, 192, 384, 768] , SCREAMING_SNAKE_CASE=256 , SCREAMING_SNAKE_CASE=-1 , SCREAMING_SNAKE_CASE=False , SCREAMING_SNAKE_CASE=True , SCREAMING_SNAKE_CASE=0.4 , SCREAMING_SNAKE_CASE=255 , SCREAMING_SNAKE_CASE=0.1 , SCREAMING_SNAKE_CASE=[1, 1024, 24, 24] , SCREAMING_SNAKE_CASE=[0, 1] , SCREAMING_SNAKE_CASE=None , **SCREAMING_SNAKE_CASE , ) -> Any: """simple docstring""" super().__init__(**SCREAMING_SNAKE_CASE ) A : str = hidden_size A : Union[str, Any] = is_hybrid if self.is_hybrid: if backbone_config is None: logger.info('''Initializing the config with a `BiT` backbone.''' ) A : Union[str, Any] = { '''global_padding''': '''same''', '''layer_type''': '''bottleneck''', '''depths''': [3, 4, 9], '''out_features''': ['''stage1''', '''stage2''', '''stage3'''], '''embedding_dynamic_padding''': True, } A : Tuple = BitConfig(**SCREAMING_SNAKE_CASE ) elif isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ): logger.info('''Initializing the config with a `BiT` backbone.''' ) A : Any = BitConfig(**SCREAMING_SNAKE_CASE ) elif isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ): A : List[Any] = backbone_config else: raise ValueError( F'backbone_config must be a dictionary or a `PretrainedConfig`, got {backbone_config.__class__}.' ) A : Optional[Any] = backbone_featmap_shape A : Dict = neck_ignore_stages if readout_type != "project": raise ValueError('''Readout type must be \'project\' when using `DPT-hybrid` mode.''' ) else: A : Optional[int] = None A : List[str] = None A : Tuple = [] A : List[Any] = num_hidden_layers A : Optional[Any] = num_attention_heads A : Union[str, Any] = intermediate_size A : str = hidden_act A : Union[str, Any] = hidden_dropout_prob A : int = attention_probs_dropout_prob A : str = initializer_range A : Any = layer_norm_eps A : Any = image_size A : List[str] = patch_size A : Dict = num_channels A : int = qkv_bias A : Tuple = backbone_out_indices if readout_type not in ["ignore", "add", "project"]: raise ValueError('''Readout_type must be one of [\'ignore\', \'add\', \'project\']''' ) A : List[Any] = readout_type A : List[str] = reassemble_factors A : List[Any] = neck_hidden_sizes A : Tuple = fusion_hidden_size A : Any = head_in_index A : Tuple = use_batch_norm_in_fusion_residual # auxiliary head attributes (semantic segmentation) A : List[Any] = use_auxiliary_head A : Union[str, Any] = auxiliary_loss_weight A : Dict = semantic_loss_ignore_index A : Union[str, Any] = semantic_classifier_dropout def __lowerCAmelCase ( self ) -> str: """simple docstring""" A : Tuple = copy.deepcopy(self.__dict__ ) if output["backbone_config"] is not None: A : List[str] = self.backbone_config.to_dict() A : List[Any] = self.__class__.model_type return output
3
'''simple docstring''' import unittest from transformers import PegasusConfig, PegasusTokenizer, is_flax_available from transformers.testing_utils import require_flax, slow from ...test_configuration_common import ConfigTester from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor if is_flax_available(): import os # The slow tests are often failing with OOM error on GPU # This makes JAX allocate exactly what is needed on demand, and deallocate memory that is no longer needed # but will be slower as stated here https://jax.readthedocs.io/en/latest/gpu_memory_allocation.html a__ : List[str] = 'platform' import jax import jax.numpy as jnp import numpy as np from transformers import FlaxPegasusForConditionalGeneration, FlaxPegasusModel @require_flax class UpperCAmelCase__ : __SCREAMING_SNAKE_CASE = PegasusConfig __SCREAMING_SNAKE_CASE = {} __SCREAMING_SNAKE_CASE = '''gelu''' def __init__( self , lowercase , lowercase=1_3 , lowercase=7 , lowercase=True , lowercase=False , lowercase=9_9 , lowercase=3_2 , lowercase=5 , lowercase=4 , lowercase=3_7 , lowercase=0.1 , lowercase=0.1 , lowercase=2_0 , lowercase=2 , lowercase=1 , lowercase=0 , ) -> Optional[Any]: __UpperCamelCase = parent __UpperCamelCase = batch_size __UpperCamelCase = seq_length __UpperCamelCase = is_training __UpperCamelCase = use_labels __UpperCamelCase = vocab_size __UpperCamelCase = hidden_size __UpperCamelCase = num_hidden_layers __UpperCamelCase = num_attention_heads __UpperCamelCase = intermediate_size __UpperCamelCase = hidden_dropout_prob __UpperCamelCase = attention_probs_dropout_prob __UpperCamelCase = max_position_embeddings __UpperCamelCase = eos_token_id __UpperCamelCase = pad_token_id __UpperCamelCase = bos_token_id def __lowerCamelCase ( self ) -> str: __UpperCamelCase = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size ).clip(3 , self.vocab_size ) __UpperCamelCase = np.expand_dims(np.array([self.eos_token_id] * self.batch_size ) , 1 ) __UpperCamelCase = np.concatenate([input_ids, eos_tensor] , axis=1 ) __UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) __UpperCamelCase = self.config_cls( vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , ) __UpperCamelCase = prepare_pegasus_inputs_dict(lowercase , lowercase , lowercase ) return config, inputs_dict def __lowerCamelCase ( self , lowercase , lowercase , lowercase ) -> Dict: __UpperCamelCase = 2_0 __UpperCamelCase = model_class_name(lowercase ) __UpperCamelCase = model.encode(inputs_dict["""input_ids"""] ) __UpperCamelCase , __UpperCamelCase = ( inputs_dict["""decoder_input_ids"""], inputs_dict["""decoder_attention_mask"""], ) __UpperCamelCase = model.init_cache(decoder_input_ids.shape[0] , lowercase , lowercase ) __UpperCamelCase = jnp.ones((decoder_input_ids.shape[0], max_decoder_length) , dtype="""i4""" ) __UpperCamelCase = jnp.broadcast_to( jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , ) __UpperCamelCase = model.decode( decoder_input_ids[:, :-1] , lowercase , decoder_attention_mask=lowercase , past_key_values=lowercase , decoder_position_ids=lowercase , ) __UpperCamelCase = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype="""i4""" ) __UpperCamelCase = model.decode( decoder_input_ids[:, -1:] , lowercase , decoder_attention_mask=lowercase , past_key_values=outputs_cache.past_key_values , decoder_position_ids=lowercase , ) __UpperCamelCase = model.decode(lowercase , lowercase ) __UpperCamelCase = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) ) self.parent.assertTrue(diff < 1E-3 , msg=f"Max diff is {diff}" ) def __lowerCamelCase ( self , lowercase , lowercase , lowercase ) -> Any: __UpperCamelCase = 2_0 __UpperCamelCase = model_class_name(lowercase ) __UpperCamelCase = model.encode(inputs_dict["""input_ids"""] ) __UpperCamelCase , __UpperCamelCase = ( inputs_dict["""decoder_input_ids"""], inputs_dict["""decoder_attention_mask"""], ) __UpperCamelCase = jnp.concatenate( [ decoder_attention_mask, jnp.zeros((decoder_attention_mask.shape[0], max_decoder_length - decoder_attention_mask.shape[1]) ), ] , axis=-1 , ) __UpperCamelCase = model.init_cache(decoder_input_ids.shape[0] , lowercase , lowercase ) __UpperCamelCase = jnp.broadcast_to( jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , ) __UpperCamelCase = model.decode( decoder_input_ids[:, :-1] , lowercase , decoder_attention_mask=lowercase , past_key_values=lowercase , decoder_position_ids=lowercase , ) __UpperCamelCase = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype="""i4""" ) __UpperCamelCase = model.decode( decoder_input_ids[:, -1:] , lowercase , past_key_values=outputs_cache.past_key_values , decoder_attention_mask=lowercase , decoder_position_ids=lowercase , ) __UpperCamelCase = model.decode(lowercase , lowercase , decoder_attention_mask=lowercase ) __UpperCamelCase = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) ) self.parent.assertTrue(diff < 1E-3 , msg=f"Max diff is {diff}" ) def _lowercase ( __A ,__A ,__A ,__A=None ,__A=None ,): '''simple docstring''' if attention_mask is None: __UpperCamelCase = np.not_equal(__A ,config.pad_token_id ).astype(np.inta ) if decoder_attention_mask is None: __UpperCamelCase = np.concatenate( [ np.ones(decoder_input_ids[:, :1].shape ,dtype=np.inta ), np.not_equal(decoder_input_ids[:, 1:] ,config.pad_token_id ).astype(np.inta ), ] ,axis=-1 ,) return { "input_ids": input_ids, "decoder_input_ids": decoder_input_ids, "attention_mask": attention_mask, "decoder_attention_mask": decoder_attention_mask, } @require_flax class UpperCAmelCase__ ( UpperCAmelCase_ , unittest.TestCase): __SCREAMING_SNAKE_CASE = ( ( FlaxPegasusForConditionalGeneration, FlaxPegasusModel, ) if is_flax_available() else () ) __SCREAMING_SNAKE_CASE = (FlaxPegasusForConditionalGeneration,) if is_flax_available() else () __SCREAMING_SNAKE_CASE = True __SCREAMING_SNAKE_CASE = False __SCREAMING_SNAKE_CASE = False __SCREAMING_SNAKE_CASE = False def __lowerCamelCase ( self ) -> Optional[Any]: __UpperCamelCase = FlaxPegasusModelTester(self ) __UpperCamelCase = ConfigTester(self , config_class=lowercase ) def __lowerCamelCase ( self ) -> List[Any]: self.config_tester.run_common_tests() def __lowerCamelCase ( self ) -> List[Any]: __UpperCamelCase , __UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: self.model_tester.check_use_cache_forward(lowercase , lowercase , lowercase ) def __lowerCamelCase ( self ) -> List[Any]: __UpperCamelCase , __UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: self.model_tester.check_use_cache_forward_with_attn_mask(lowercase , lowercase , lowercase ) def __lowerCamelCase ( self ) -> List[str]: __UpperCamelCase , __UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: with self.subTest(model_class.__name__ ): __UpperCamelCase = self._prepare_for_class(lowercase , lowercase ) __UpperCamelCase = model_class(lowercase ) @jax.jit def encode_jitted(lowercase , lowercase=None , **lowercase ): return model.encode(input_ids=lowercase , attention_mask=lowercase ) with self.subTest("""JIT Enabled""" ): __UpperCamelCase = encode_jitted(**lowercase ).to_tuple() with self.subTest("""JIT Disabled""" ): with jax.disable_jit(): __UpperCamelCase = encode_jitted(**lowercase ).to_tuple() self.assertEqual(len(lowercase ) , len(lowercase ) ) for jitted_output, output in zip(lowercase , lowercase ): self.assertEqual(jitted_output.shape , output.shape ) def __lowerCamelCase ( self ) -> List[Any]: __UpperCamelCase , __UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: with self.subTest(model_class.__name__ ): __UpperCamelCase = model_class(lowercase ) __UpperCamelCase = model.encode(inputs_dict["""input_ids"""] , inputs_dict["""attention_mask"""] ) __UpperCamelCase = { """decoder_input_ids""": inputs_dict["""decoder_input_ids"""], """decoder_attention_mask""": inputs_dict["""decoder_attention_mask"""], """encoder_outputs""": encoder_outputs, } @jax.jit def decode_jitted(lowercase , lowercase , lowercase ): return model.decode( decoder_input_ids=lowercase , decoder_attention_mask=lowercase , encoder_outputs=lowercase , ) with self.subTest("""JIT Enabled""" ): __UpperCamelCase = decode_jitted(**lowercase ).to_tuple() with self.subTest("""JIT Disabled""" ): with jax.disable_jit(): __UpperCamelCase = decode_jitted(**lowercase ).to_tuple() self.assertEqual(len(lowercase ) , len(lowercase ) ) for jitted_output, output in zip(lowercase , lowercase ): self.assertEqual(jitted_output.shape , output.shape ) @slow def __lowerCamelCase ( self ) -> Dict: for model_class_name in self.all_model_classes: __UpperCamelCase = model_class_name.from_pretrained("""google/pegasus-large""" , from_pt=lowercase ) __UpperCamelCase = np.ones((1, 1) ) __UpperCamelCase = model(lowercase ) self.assertIsNotNone(lowercase ) @slow def __lowerCamelCase ( self ) -> str: __UpperCamelCase = FlaxPegasusForConditionalGeneration.from_pretrained("""google/pegasus-xsum""" ) __UpperCamelCase = PegasusTokenizer.from_pretrained("""google/pegasus-xsum""" ) __UpperCamelCase = [ """ PG&E stated it scheduled the blackouts in response to forecasts for high winds amid dry conditions. The aim is to reduce the risk of wildfires. Nearly 800 thousand customers were scheduled to be affected by the shutoffs which were expected to last through at least midday tomorrow.""", """ The London trio are up for best UK act and best album, as well as getting two nominations in the best song category.\"We got told like this morning 'Oh I think you're nominated'\", said Dappy.\"And I was like 'Oh yeah, which one?' And now we've got nominated for four awards. I mean, wow!\"Bandmate Fazer added: \"We thought it's best of us to come down and mingle with everyone and say hello to the cameras. And now we find we've got four nominations.\"The band have two shots at the best song prize, getting the nod for their Tynchy Stryder collaboration Number One, and single Strong Again.Their album Uncle B will also go up against records by the likes of Beyonce and Kanye West.N-Dubz picked up the best newcomer Mobo in 2007, but female member Tulisa said they wouldn't be too disappointed if they didn't win this time around.\"At the end of the day we're grateful to be where we are in our careers.\"If it don't happen then it don't happen - live to fight another day and keep on making albums and hits for the fans.\"Dappy also revealed they could be performing live several times on the night.The group will be doing Number One and also a possible rendition of the War Child single, I Got Soul.The charity song is a re-working of The Killers' All These Things That I've Done and is set to feature artists like Chipmunk, Ironik and Pixie Lott.This year's Mobos will be held outside of London for the first time, in Glasgow on 30 September.N-Dubz said they were looking forward to performing for their Scottish fans and boasted about their recent shows north of the border.\"We just done Edinburgh the other day,\" said Dappy.\"We smashed up an N-Dubz show over there. We done Aberdeen about three or four months ago - we smashed up that show over there! Everywhere we go we smash it up!\" """, ] __UpperCamelCase = [ """California's largest electricity provider has turned off power to hundreds of thousands of customers.""", """Pop group N-Dubz have revealed they were surprised to get four nominations for this year's Mobo Awards.""", ] __UpperCamelCase = tokenizer(lowercase , return_tensors="""np""" , truncation=lowercase , max_length=5_1_2 , padding=lowercase ) __UpperCamelCase = model.generate(**lowercase , num_beams=2 ).sequences __UpperCamelCase = tokenizer.batch_decode(lowercase , skip_special_tokens=lowercase ) assert tgt_text == decoded
349
0
import shutil import tempfile import unittest import numpy as np import pytest from transformers.testing_utils import require_vision from transformers.utils import is_vision_available if is_vision_available(): from PIL import Image from transformers import AutoProcessor, BertTokenizer, BlipImageProcessor, BlipProcessor, PreTrainedTokenizerFast @require_vision class a_ ( unittest.TestCase ): """simple docstring""" def _lowerCAmelCase ( self : Dict ): SCREAMING_SNAKE_CASE =tempfile.mkdtemp() SCREAMING_SNAKE_CASE =BlipImageProcessor() SCREAMING_SNAKE_CASE =BertTokenizer.from_pretrained('hf-internal-testing/tiny-random-BertModel' ) SCREAMING_SNAKE_CASE =BlipProcessor(snake_case ,snake_case ) processor.save_pretrained(self.tmpdirname ) def _lowerCAmelCase ( self : List[str] ,**snake_case : Tuple ): return AutoProcessor.from_pretrained(self.tmpdirname ,**snake_case ).tokenizer def _lowerCAmelCase ( self : Optional[Any] ,**snake_case : Optional[Any] ): return AutoProcessor.from_pretrained(self.tmpdirname ,**snake_case ).image_processor def _lowerCAmelCase ( self : List[str] ): shutil.rmtree(self.tmpdirname ) def _lowerCAmelCase ( self : Tuple ): SCREAMING_SNAKE_CASE =[np.random.randint(255 ,size=(3, 30, 400) ,dtype=np.uinta )] SCREAMING_SNAKE_CASE =[Image.fromarray(np.moveaxis(snake_case ,0 ,-1 ) ) for x in image_inputs] return image_inputs def _lowerCAmelCase ( self : Union[str, Any] ): SCREAMING_SNAKE_CASE =BlipProcessor(tokenizer=self.get_tokenizer() ,image_processor=self.get_image_processor() ) processor.save_pretrained(self.tmpdirname ) SCREAMING_SNAKE_CASE =self.get_tokenizer(bos_token='(BOS)' ,eos_token='(EOS)' ) SCREAMING_SNAKE_CASE =self.get_image_processor(do_normalize=snake_case ,padding_value=1.0 ) SCREAMING_SNAKE_CASE =BlipProcessor.from_pretrained( self.tmpdirname ,bos_token='(BOS)' ,eos_token='(EOS)' ,do_normalize=snake_case ,padding_value=1.0 ) self.assertEqual(processor.tokenizer.get_vocab() ,tokenizer_add_kwargs.get_vocab() ) self.assertIsInstance(processor.tokenizer ,snake_case ) self.assertEqual(processor.image_processor.to_json_string() ,image_processor_add_kwargs.to_json_string() ) self.assertIsInstance(processor.image_processor ,snake_case ) def _lowerCAmelCase ( self : Dict ): SCREAMING_SNAKE_CASE =self.get_image_processor() SCREAMING_SNAKE_CASE =self.get_tokenizer() SCREAMING_SNAKE_CASE =BlipProcessor(tokenizer=snake_case ,image_processor=snake_case ) SCREAMING_SNAKE_CASE =self.prepare_image_inputs() SCREAMING_SNAKE_CASE =image_processor(snake_case ,return_tensors='np' ) SCREAMING_SNAKE_CASE =processor(images=snake_case ,return_tensors='np' ) for key in input_feat_extract.keys(): self.assertAlmostEqual(input_feat_extract[key].sum() ,input_processor[key].sum() ,delta=1e-2 ) def _lowerCAmelCase ( self : Tuple ): SCREAMING_SNAKE_CASE =self.get_image_processor() SCREAMING_SNAKE_CASE =self.get_tokenizer() SCREAMING_SNAKE_CASE =BlipProcessor(tokenizer=snake_case ,image_processor=snake_case ) SCREAMING_SNAKE_CASE ='lower newer' SCREAMING_SNAKE_CASE =processor(text=snake_case ) SCREAMING_SNAKE_CASE =tokenizer(snake_case ,return_token_type_ids=snake_case ) for key in encoded_tok.keys(): self.assertListEqual(encoded_tok[key] ,encoded_processor[key] ) def _lowerCAmelCase ( self : Optional[Any] ): SCREAMING_SNAKE_CASE =self.get_image_processor() SCREAMING_SNAKE_CASE =self.get_tokenizer() SCREAMING_SNAKE_CASE =BlipProcessor(tokenizer=snake_case ,image_processor=snake_case ) SCREAMING_SNAKE_CASE ='lower newer' SCREAMING_SNAKE_CASE =self.prepare_image_inputs() SCREAMING_SNAKE_CASE =processor(text=snake_case ,images=snake_case ) self.assertListEqual(list(inputs.keys() ) ,['pixel_values', 'input_ids', 'attention_mask'] ) # test if it raises when no input is passed with pytest.raises(snake_case ): processor() def _lowerCAmelCase ( self : Dict ): SCREAMING_SNAKE_CASE =self.get_image_processor() SCREAMING_SNAKE_CASE =self.get_tokenizer() SCREAMING_SNAKE_CASE =BlipProcessor(tokenizer=snake_case ,image_processor=snake_case ) SCREAMING_SNAKE_CASE =[[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]] SCREAMING_SNAKE_CASE =processor.batch_decode(snake_case ) SCREAMING_SNAKE_CASE =tokenizer.batch_decode(snake_case ) self.assertListEqual(snake_case ,snake_case ) def _lowerCAmelCase ( self : List[str] ): SCREAMING_SNAKE_CASE =self.get_image_processor() SCREAMING_SNAKE_CASE =self.get_tokenizer() SCREAMING_SNAKE_CASE =BlipProcessor(tokenizer=snake_case ,image_processor=snake_case ) SCREAMING_SNAKE_CASE ='lower newer' SCREAMING_SNAKE_CASE =self.prepare_image_inputs() SCREAMING_SNAKE_CASE =processor(text=snake_case ,images=snake_case ) # For now the processor supports only ['pixel_values', 'input_ids', 'attention_mask'] self.assertListEqual(list(inputs.keys() ) ,['pixel_values', 'input_ids', 'attention_mask'] )
334
'''simple docstring''' import pytest a__ : List[str] = '__dummy_dataset1__' a__ : Optional[int] = '\nimport json\nimport os\n\nimport datasets\n\n\nREPO_URL = "https://huggingface.co/datasets/albertvillanova/tests-raw-jsonl/resolve/main/"\nURLS = {"train": REPO_URL + "wikiann-bn-train.jsonl", "validation": REPO_URL + "wikiann-bn-validation.jsonl"}\n\n\nclass __DummyDataset1__(datasets.GeneratorBasedBuilder):\n\n def _info(self):\n features = datasets.Features(\n {\n "tokens": datasets.Sequence(datasets.Value("string")),\n "ner_tags": datasets.Sequence(\n datasets.features.ClassLabel(\n names=[\n "O",\n "B-PER",\n "I-PER",\n "B-ORG",\n "I-ORG",\n "B-LOC",\n "I-LOC",\n ]\n )\n ),\n "langs": datasets.Sequence(datasets.Value("string")),\n "spans": datasets.Sequence(datasets.Value("string")),\n }\n )\n return datasets.DatasetInfo(features=features)\n\n def _split_generators(self, dl_manager):\n dl_path = dl_manager.download(URLS)\n return [\n datasets.SplitGenerator(datasets.Split.TRAIN, gen_kwargs={"filepath": dl_path["train"]}),\n datasets.SplitGenerator(datasets.Split.VALIDATION, gen_kwargs={"filepath": dl_path["validation"]}),\n ]\n\n def _generate_examples(self, filepath):\n with open(filepath, "r", encoding="utf-8") as f:\n for i, line in enumerate(f):\n yield i, json.loads(line)\n' @pytest.fixture def _lowercase ( ): '''simple docstring''' return DATASET_LOADING_SCRIPT_NAME @pytest.fixture def _lowercase ( ): '''simple docstring''' return DATASET_LOADING_SCRIPT_CODE @pytest.fixture def _lowercase ( __A ,__A ,__A ): '''simple docstring''' __UpperCamelCase = dataset_loading_script_name __UpperCamelCase = tmp_path / """datasets""" / script_name script_dir.mkdir(parents=__A ) __UpperCamelCase = script_dir / f"{script_name}.py" with open(__A ,"""w""" ) as f: f.write(__A ) return str(__A )
349
0
from dataclasses import dataclass from typing import List, Optional, Union import numpy as np import PIL from ...utils import BaseOutput, OptionalDependencyNotAvailable, is_torch_available, is_transformers_available from .timesteps import ( fastaa_timesteps, smartaa_timesteps, smartaa_timesteps, smartaaa_timesteps, smartaaa_timesteps, superaa_timesteps, superaa_timesteps, superaaa_timesteps, ) @dataclass class A_ (UpperCAmelCase_ ): UpperCAmelCase__ = 4_2 UpperCAmelCase__ = 4_2 UpperCAmelCase__ = 4_2 try: if not (is_transformers_available() and is_torch_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ...utils.dummy_torch_and_transformers_objects import * # noqa F403 else: from .pipeline_if import IFPipeline from .pipeline_if_imgaimg import IFImgaImgPipeline from .pipeline_if_imgaimg_superresolution import IFImgaImgSuperResolutionPipeline from .pipeline_if_inpainting import IFInpaintingPipeline from .pipeline_if_inpainting_superresolution import IFInpaintingSuperResolutionPipeline from .pipeline_if_superresolution import IFSuperResolutionPipeline from .safety_checker import IFSafetyChecker from .watermark import IFWatermarker
273
'''simple docstring''' import argparse import json import os import fairseq import torch from fairseq.data import Dictionary from transformers import ( WavaVecaConfig, WavaVecaCTCTokenizer, WavaVecaFeatureExtractor, WavaVecaForCTC, WavaVecaForPreTraining, WavaVecaProcessor, logging, ) from transformers.models.wavaveca.modeling_wavaveca import WavaVecaForSequenceClassification logging.set_verbosity_info() a__ : Any = logging.get_logger(__name__) a__ : Optional[int] = { 'post_extract_proj': 'feature_projection.projection', 'encoder.pos_conv.0': 'encoder.pos_conv_embed.conv', 'self_attn.k_proj': 'encoder.layers.*.attention.k_proj', 'self_attn.v_proj': 'encoder.layers.*.attention.v_proj', 'self_attn.q_proj': 'encoder.layers.*.attention.q_proj', 'self_attn.out_proj': 'encoder.layers.*.attention.out_proj', 'self_attn_layer_norm': 'encoder.layers.*.layer_norm', 'fc1': 'encoder.layers.*.feed_forward.intermediate_dense', 'fc2': 'encoder.layers.*.feed_forward.output_dense', 'final_layer_norm': 'encoder.layers.*.final_layer_norm', 'encoder.layer_norm': 'encoder.layer_norm', 'adapter_layer': 'encoder.layers.*.adapter_layer', 'w2v_model.layer_norm': 'feature_projection.layer_norm', 'quantizer.weight_proj': 'quantizer.weight_proj', 'quantizer.vars': 'quantizer.codevectors', 'project_q': 'project_q', 'final_proj': 'project_hid', 'w2v_encoder.proj': 'lm_head', 'mask_emb': 'masked_spec_embed', 'pooling_layer.linear': 'projector', 'pooling_layer.projection': 'classifier', } a__ : List[str] = [ 'lm_head', 'quantizer.weight_proj', 'quantizer.codevectors', 'project_q', 'project_hid', 'projector', 'classifier', ] def _lowercase ( __A ): '''simple docstring''' __UpperCamelCase = {} with open(__A ,"""r""" ) as file: for line_number, line in enumerate(__A ): __UpperCamelCase = line.strip() if line: __UpperCamelCase = line.split() __UpperCamelCase = line_number __UpperCamelCase = words[0] __UpperCamelCase = value return result def _lowercase ( __A ,__A ,__A ,__A ,__A ): '''simple docstring''' for attribute in key.split(""".""" ): __UpperCamelCase = getattr(__A ,__A ) __UpperCamelCase = None for param_key in PARAM_MAPPING.keys(): if full_name.endswith(__A ): __UpperCamelCase = PARAM_MAPPING[full_name.split(""".""" )[-1]] __UpperCamelCase = """param""" if weight_type is not None and weight_type != "param": __UpperCamelCase = getattr(__A ,__A ).shape elif weight_type is not None and weight_type == "param": __UpperCamelCase = hf_pointer for attribute in hf_param_name.split(""".""" ): __UpperCamelCase = getattr(__A ,__A ) __UpperCamelCase = shape_pointer.shape # let's reduce dimension __UpperCamelCase = value[0] else: __UpperCamelCase = hf_pointer.shape if hf_shape != value.shape: raise ValueError( f"Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be" f" {value.shape} for {full_name}" ) if weight_type == "weight": __UpperCamelCase = value elif weight_type == "weight_g": __UpperCamelCase = value elif weight_type == "weight_v": __UpperCamelCase = value elif weight_type == "bias": __UpperCamelCase = value elif weight_type == "param": for attribute in hf_param_name.split(""".""" ): __UpperCamelCase = getattr(__A ,__A ) __UpperCamelCase = value else: __UpperCamelCase = value logger.info(f"{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}." ) def _lowercase ( __A ,__A ,__A ,__A ,__A ): '''simple docstring''' __UpperCamelCase = None for param_key in PARAM_MAPPING.keys(): if full_name.endswith(__A ): __UpperCamelCase = PARAM_MAPPING[full_name.split(""".""" )[-1]] __UpperCamelCase = """param""" if weight_type is not None and weight_type != "param": __UpperCamelCase = """.""".join([key, weight_type] ) elif weight_type is not None and weight_type == "param": __UpperCamelCase = """.""".join([key, hf_param_name] ) else: __UpperCamelCase = key __UpperCamelCase = value if """lm_head""" in full_key else value[0] a__ : Dict = { 'W_a': 'linear_1.weight', 'W_b': 'linear_2.weight', 'b_a': 'linear_1.bias', 'b_b': 'linear_2.bias', 'ln_W': 'norm.weight', 'ln_b': 'norm.bias', } def _lowercase ( __A ,__A ,__A=None ,__A=None ): '''simple docstring''' __UpperCamelCase = False for key, mapped_key in MAPPING.items(): __UpperCamelCase = """wav2vec2.""" + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key if key in name or key.split("""w2v_model.""" )[-1] == name.split(""".""" )[0]: __UpperCamelCase = True if "*" in mapped_key: __UpperCamelCase = name.split(__A )[0].split(""".""" )[-2] __UpperCamelCase = mapped_key.replace("""*""" ,__A ) if "weight_g" in name: __UpperCamelCase = """weight_g""" elif "weight_v" in name: __UpperCamelCase = """weight_v""" elif "bias" in name: __UpperCamelCase = """bias""" elif "weight" in name: # TODO: don't match quantizer.weight_proj __UpperCamelCase = """weight""" else: __UpperCamelCase = None if hf_dict is not None: rename_dict(__A ,__A ,__A ,__A ,__A ) else: set_recursively(__A ,__A ,__A ,__A ,__A ) return is_used return is_used def _lowercase ( __A ,__A ,__A ): '''simple docstring''' __UpperCamelCase = [] __UpperCamelCase = fairseq_model.state_dict() __UpperCamelCase = hf_model.wavaveca.feature_extractor for name, value in fairseq_dict.items(): __UpperCamelCase = False if "conv_layers" in name: load_conv_layer( __A ,__A ,__A ,__A ,hf_model.config.feat_extract_norm == """group""" ,) __UpperCamelCase = True else: __UpperCamelCase = load_wavaveca_layer(__A ,__A ,__A ) if not is_used: unused_weights.append(__A ) logger.warning(f"Unused weights: {unused_weights}" ) def _lowercase ( __A ,__A ,__A ,__A ,__A ): '''simple docstring''' __UpperCamelCase = full_name.split("""conv_layers.""" )[-1] __UpperCamelCase = name.split(""".""" ) __UpperCamelCase = int(items[0] ) __UpperCamelCase = int(items[1] ) if type_id == 0: if "bias" in name: if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape: raise ValueError( f"{full_name} has size {value.shape}, but" f" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found." ) __UpperCamelCase = value logger.info(f"Feat extract conv layer {layer_id} was initialized from {full_name}." ) elif "weight" in name: if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape: raise ValueError( f"{full_name} has size {value.shape}, but" f" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found." ) __UpperCamelCase = value logger.info(f"Feat extract conv layer {layer_id} was initialized from {full_name}." ) elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm): if "bias" in name: if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape: raise ValueError( f"{full_name} has size {value.shape}, but" f" {feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape} was found." ) __UpperCamelCase = value logger.info(f"Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}." ) elif "weight" in name: if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape: raise ValueError( f"{full_name} has size {value.shape}, but" f" {feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape} was found." ) __UpperCamelCase = value logger.info(f"Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}." ) else: unused_weights.append(__A ) @torch.no_grad() def _lowercase ( __A ,__A ,__A=None ,__A=None ,__A=True ,__A=False ): '''simple docstring''' if config_path is not None: __UpperCamelCase = WavaVecaConfig.from_pretrained(__A ) else: __UpperCamelCase = WavaVecaConfig() if is_seq_class: __UpperCamelCase = read_txt_into_dict(__A ) __UpperCamelCase = idalabel __UpperCamelCase = WavaVecaForSequenceClassification(__A ) __UpperCamelCase = WavaVecaFeatureExtractor( feature_size=1 ,sampling_rate=16_000 ,padding_value=0 ,do_normalize=__A ,return_attention_mask=__A ,) feature_extractor.save_pretrained(__A ) elif is_finetuned: if dict_path: __UpperCamelCase = Dictionary.load(__A ) # important change bos & pad token id since CTC symbol is <pad> and # not <s> as in fairseq __UpperCamelCase = target_dict.pad_index __UpperCamelCase = target_dict.bos_index __UpperCamelCase = target_dict.eos_index __UpperCamelCase = len(target_dict.symbols ) __UpperCamelCase = os.path.join(__A ,"""vocab.json""" ) if not os.path.isdir(__A ): logger.error("""--pytorch_dump_folder_path ({}) should be a directory""".format(__A ) ) return os.makedirs(__A ,exist_ok=__A ) __UpperCamelCase = target_dict.indices # fairseq has the <pad> and <s> switched __UpperCamelCase = 0 __UpperCamelCase = 1 with open(__A ,"""w""" ,encoding="""utf-8""" ) as vocab_handle: json.dump(__A ,__A ) __UpperCamelCase = WavaVecaCTCTokenizer( __A ,unk_token=target_dict.unk_word ,pad_token=target_dict.pad_word ,bos_token=target_dict.bos_word ,eos_token=target_dict.eos_word ,word_delimiter_token="""|""" ,do_lower_case=__A ,) __UpperCamelCase = True if config.feat_extract_norm == """layer""" else False __UpperCamelCase = WavaVecaFeatureExtractor( feature_size=1 ,sampling_rate=16_000 ,padding_value=0 ,do_normalize=__A ,return_attention_mask=__A ,) __UpperCamelCase = WavaVecaProcessor(feature_extractor=__A ,tokenizer=__A ) processor.save_pretrained(__A ) __UpperCamelCase = WavaVecaForCTC(__A ) else: __UpperCamelCase = WavaVecaForPreTraining(__A ) if is_finetuned or is_seq_class: __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = fairseq.checkpoint_utils.load_model_ensemble_and_task( [checkpoint_path] ,arg_overrides={"""data""": """/""".join(dict_path.split("""/""" )[:-1] )} ) else: __UpperCamelCase = argparse.Namespace(task="""audio_pretraining""" ) __UpperCamelCase = fairseq.tasks.setup_task(__A ) __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] ,task=__A ) __UpperCamelCase = model[0].eval() recursively_load_weights(__A ,__A ,not is_finetuned ) hf_wavavec.save_pretrained(__A ) if __name__ == "__main__": a__ : int = argparse.ArgumentParser() parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.') parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to fairseq checkpoint') parser.add_argument('--dict_path', default=None, type=str, help='Path to dict of fine-tuned model') parser.add_argument('--config_path', default=None, type=str, help='Path to hf config.json of model to convert') parser.add_argument( '--not_finetuned', action='store_true', help='Whether the model to convert is a fine-tuned model or not' ) parser.add_argument( '--is_seq_class', action='store_true', help='Whether the model to convert is a fine-tuned sequence classification model or not', ) a__ : Optional[int] = parser.parse_args() a__ : str = not args.not_finetuned and not args.is_seq_class convert_wavaveca_checkpoint( args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, is_finetuned, args.is_seq_class, )
349
0
import json from typing import TYPE_CHECKING, List, Optional, Tuple from tokenizers import pre_tokenizers from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import logging if TYPE_CHECKING: from transformers.pipelines.conversational import Conversation lowerCamelCase__ = logging.get_logger(__name__) lowerCamelCase__ = {'vocab_file': 'vocab.json', 'merges_file': 'merges.txt', 'tokenizer_file': 'tokenizer.json'} lowerCamelCase__ = { 'tokenizer_file': { 'EleutherAI/gpt-neox-20b': 'https://huggingface.co/EleutherAI/gpt-neox-20b/resolve/main/tokenizer.json', }, } lowerCamelCase__ = { 'gpt-neox-20b': 2048, } class SCREAMING_SNAKE_CASE ( UpperCAmelCase_ ): __lowerCamelCase : Any =VOCAB_FILES_NAMES __lowerCamelCase : int =PRETRAINED_VOCAB_FILES_MAP __lowerCamelCase : int =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES __lowerCamelCase : int =['input_ids', 'attention_mask'] def __init__( self : int , __lowercase : Any=None , __lowercase : Tuple=None , __lowercase : Dict=None , __lowercase : Tuple="<|endoftext|>" , __lowercase : Optional[Any]="<|endoftext|>" , __lowercase : str="<|endoftext|>" , __lowercase : List[str]=False , **__lowercase : Optional[Any] , ): '''simple docstring''' super().__init__( __lowercase , __lowercase , tokenizer_file=__lowercase , unk_token=__lowercase , bos_token=__lowercase , eos_token=__lowercase , add_prefix_space=__lowercase , **__lowercase , ) __a = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() ) if pre_tok_state.get("""add_prefix_space""" , __lowercase ) != add_prefix_space: __a = getattr(__lowercase , pre_tok_state.pop("""type""" ) ) __a = add_prefix_space __a = pre_tok_class(**__lowercase ) __a = add_prefix_space def UpperCamelCase_ ( self : int , __lowercase : Optional[int] , __lowercase : Optional[Any] = None ): '''simple docstring''' __a = self._tokenizer.model.save(__lowercase , name=__lowercase ) return tuple(__lowercase ) def UpperCamelCase_ ( self : Optional[Any] , __lowercase : Any ): '''simple docstring''' __a = [] for is_user, text in conversation.iter_texts(): input_ids.extend(self.encode(__lowercase , add_special_tokens=__lowercase ) + [self.eos_token_id] ) if len(__lowercase ) > self.model_max_length: __a = input_ids[-self.model_max_length :] return input_ids
302
'''simple docstring''' from __future__ import annotations import unittest from transformers import DistilBertConfig, is_tf_available from transformers.testing_utils import require_tf, slow from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers.models.distilbert.modeling_tf_distilbert import ( TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST, TFDistilBertForMaskedLM, TFDistilBertForMultipleChoice, TFDistilBertForQuestionAnswering, TFDistilBertForSequenceClassification, TFDistilBertForTokenClassification, TFDistilBertModel, ) class UpperCAmelCase__ : def __init__( self , lowercase , ) -> Union[str, Any]: __UpperCamelCase = parent __UpperCamelCase = 1_3 __UpperCamelCase = 7 __UpperCamelCase = True __UpperCamelCase = True __UpperCamelCase = False __UpperCamelCase = True __UpperCamelCase = 9_9 __UpperCamelCase = 3_2 __UpperCamelCase = 2 __UpperCamelCase = 4 __UpperCamelCase = 3_7 __UpperCamelCase = """gelu""" __UpperCamelCase = 0.1 __UpperCamelCase = 0.1 __UpperCamelCase = 5_1_2 __UpperCamelCase = 1_6 __UpperCamelCase = 2 __UpperCamelCase = 0.02 __UpperCamelCase = 3 __UpperCamelCase = 4 __UpperCamelCase = None def __lowerCamelCase ( self ) -> List[str]: __UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) __UpperCamelCase = None if self.use_input_mask: __UpperCamelCase = random_attention_mask([self.batch_size, self.seq_length] ) __UpperCamelCase = None __UpperCamelCase = None __UpperCamelCase = None if self.use_labels: __UpperCamelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size ) __UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) __UpperCamelCase = ids_tensor([self.batch_size] , self.num_choices ) __UpperCamelCase = DistilBertConfig( vocab_size=self.vocab_size , dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , hidden_dim=self.intermediate_size , hidden_act=self.hidden_act , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , ) return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels def __lowerCamelCase ( self , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase ) -> Dict: __UpperCamelCase = TFDistilBertModel(config=lowercase ) __UpperCamelCase = {"""input_ids""": input_ids, """attention_mask""": input_mask} __UpperCamelCase = model(lowercase ) __UpperCamelCase = [input_ids, input_mask] __UpperCamelCase = model(lowercase ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def __lowerCamelCase ( self , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase ) -> Optional[Any]: __UpperCamelCase = TFDistilBertForMaskedLM(config=lowercase ) __UpperCamelCase = {"""input_ids""": input_ids, """attention_mask""": input_mask} __UpperCamelCase = model(lowercase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def __lowerCamelCase ( self , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase ) -> Tuple: __UpperCamelCase = TFDistilBertForQuestionAnswering(config=lowercase ) __UpperCamelCase = { """input_ids""": input_ids, """attention_mask""": input_mask, } __UpperCamelCase = model(lowercase ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def __lowerCamelCase ( self , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase ) -> Tuple: __UpperCamelCase = self.num_labels __UpperCamelCase = TFDistilBertForSequenceClassification(lowercase ) __UpperCamelCase = {"""input_ids""": input_ids, """attention_mask""": input_mask} __UpperCamelCase = model(lowercase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def __lowerCamelCase ( self , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase ) -> int: __UpperCamelCase = self.num_choices __UpperCamelCase = TFDistilBertForMultipleChoice(lowercase ) __UpperCamelCase = tf.tile(tf.expand_dims(lowercase , 1 ) , (1, self.num_choices, 1) ) __UpperCamelCase = tf.tile(tf.expand_dims(lowercase , 1 ) , (1, self.num_choices, 1) ) __UpperCamelCase = { """input_ids""": multiple_choice_inputs_ids, """attention_mask""": multiple_choice_input_mask, } __UpperCamelCase = model(lowercase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) ) def __lowerCamelCase ( self , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase ) -> Optional[int]: __UpperCamelCase = self.num_labels __UpperCamelCase = TFDistilBertForTokenClassification(lowercase ) __UpperCamelCase = {"""input_ids""": input_ids, """attention_mask""": input_mask} __UpperCamelCase = model(lowercase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def __lowerCamelCase ( self ) -> Dict: __UpperCamelCase = self.prepare_config_and_inputs() ((__UpperCamelCase) , (__UpperCamelCase) , (__UpperCamelCase) , (__UpperCamelCase) , (__UpperCamelCase) , (__UpperCamelCase)) = config_and_inputs __UpperCamelCase = {"""input_ids""": input_ids, """attention_mask""": input_mask} return config, inputs_dict @require_tf class UpperCAmelCase__ ( UpperCAmelCase_ , UpperCAmelCase_ , unittest.TestCase): __SCREAMING_SNAKE_CASE = ( ( TFDistilBertModel, TFDistilBertForMaskedLM, TFDistilBertForQuestionAnswering, TFDistilBertForSequenceClassification, TFDistilBertForTokenClassification, TFDistilBertForMultipleChoice, ) if is_tf_available() else None ) __SCREAMING_SNAKE_CASE = ( { '''feature-extraction''': TFDistilBertModel, '''fill-mask''': TFDistilBertForMaskedLM, '''question-answering''': TFDistilBertForQuestionAnswering, '''text-classification''': TFDistilBertForSequenceClassification, '''token-classification''': TFDistilBertForTokenClassification, '''zero-shot''': TFDistilBertForSequenceClassification, } if is_tf_available() else {} ) __SCREAMING_SNAKE_CASE = False __SCREAMING_SNAKE_CASE = False def __lowerCamelCase ( self ) -> Optional[Any]: __UpperCamelCase = TFDistilBertModelTester(self ) __UpperCamelCase = ConfigTester(self , config_class=lowercase , dim=3_7 ) def __lowerCamelCase ( self ) -> Any: self.config_tester.run_common_tests() def __lowerCamelCase ( self ) -> Dict: __UpperCamelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_distilbert_model(*lowercase ) def __lowerCamelCase ( self ) -> Union[str, Any]: __UpperCamelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_distilbert_for_masked_lm(*lowercase ) def __lowerCamelCase ( self ) -> int: __UpperCamelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_distilbert_for_question_answering(*lowercase ) def __lowerCamelCase ( self ) -> Any: __UpperCamelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_distilbert_for_sequence_classification(*lowercase ) def __lowerCamelCase ( self ) -> Optional[Any]: __UpperCamelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_distilbert_for_multiple_choice(*lowercase ) def __lowerCamelCase ( self ) -> Union[str, Any]: __UpperCamelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_distilbert_for_token_classification(*lowercase ) @slow def __lowerCamelCase ( self ) -> Tuple: for model_name in list(TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1] ): __UpperCamelCase = TFDistilBertModel.from_pretrained(lowercase ) self.assertIsNotNone(lowercase ) @require_tf class UpperCAmelCase__ ( unittest.TestCase): @slow def __lowerCamelCase ( self ) -> Optional[int]: __UpperCamelCase = TFDistilBertModel.from_pretrained("""distilbert-base-uncased""" ) __UpperCamelCase = tf.constant([[0, 1, 2, 3, 4, 5]] ) __UpperCamelCase = model(lowercase )[0] __UpperCamelCase = [1, 6, 7_6_8] self.assertEqual(output.shape , lowercase ) __UpperCamelCase = tf.constant( [ [ [0.19_261_885, -0.13_732_955, 0.4_119_799], [0.22_150_156, -0.07_422_661, 0.39_037_204], [0.22_756_018, -0.0_896_414, 0.3_701_467], ] ] ) tf.debugging.assert_near(output[:, :3, :3] , lowercase , atol=1E-4 )
349
0
"""simple docstring""" import asyncio import os import re import sys import tempfile import unittest from contextlib import contextmanager from copy import deepcopy from distutils.util import strtobool from enum import Enum from importlib.util import find_spec from pathlib import Path from unittest.mock import patch import pyarrow as pa import pytest import requests from packaging import version from datasets import config if config.PY_VERSION < version.parse("""3.8"""): import importlib_metadata else: import importlib.metadata as importlib_metadata def __lowerCamelCase ( __UpperCamelCase , __UpperCamelCase=False ) -> List[Any]: """simple docstring""" try: lowerCAmelCase_ : Optional[int] = os.environ[key] except KeyError: # KEY isn't set, default to `default`. lowerCAmelCase_ : Dict = default else: # KEY is set, convert it to True or False. try: lowerCAmelCase_ : Tuple = strtobool(__A ) except ValueError: # More values are supported, but let's keep the message simple. raise ValueError(f'''If set, {key} must be yes or no.''' ) return _value lowercase__ = parse_flag_from_env("""RUN_SLOW""", default=False) lowercase__ = parse_flag_from_env("""RUN_REMOTE""", default=False) lowercase__ = parse_flag_from_env("""RUN_LOCAL""", default=True) lowercase__ = parse_flag_from_env("""RUN_PACKAGED""", default=True) # Compression lowercase__ = pytest.mark.skipif(not config.LZ4_AVAILABLE, reason="""test requires lz4""") lowercase__ = pytest.mark.skipif(not config.PY7ZR_AVAILABLE, reason="""test requires py7zr""") lowercase__ = pytest.mark.skipif(not config.ZSTANDARD_AVAILABLE, reason="""test requires zstandard""") # Audio lowercase__ = pytest.mark.skipif( # On Windows and OS X, soundfile installs sndfile find_spec("""soundfile""") is None or version.parse(importlib_metadata.version("""soundfile""")) < version.parse("""0.12.0"""), reason="""test requires sndfile>=0.12.1: \'pip install \"soundfile>=0.12.1\"\'; """, ) # Beam lowercase__ = pytest.mark.skipif( not config.BEAM_AVAILABLE or config.DILL_VERSION >= version.parse("""0.3.2"""), reason="""test requires apache-beam and a compatible dill version""", ) # Dill-cloudpickle compatibility lowercase__ = pytest.mark.skipif( config.DILL_VERSION <= version.parse("""0.3.2"""), reason="""test requires dill>0.3.2 for cloudpickle compatibility""", ) # Windows lowercase__ = pytest.mark.skipif( sys.platform == """win32""", reason="""test should not be run on Windows""", ) def __lowerCamelCase ( __UpperCamelCase ) -> Dict: """simple docstring""" try: import faiss # noqa except ImportError: lowerCAmelCase_ : Optional[Any] = unittest.skip("test requires faiss" )(__A ) return test_case def __lowerCamelCase ( __UpperCamelCase ) -> Dict: """simple docstring""" try: import regex # noqa except ImportError: lowerCAmelCase_ : Optional[Any] = unittest.skip("test requires regex" )(__A ) return test_case def __lowerCamelCase ( __UpperCamelCase ) -> Optional[Any]: """simple docstring""" try: import elasticsearch # noqa except ImportError: lowerCAmelCase_ : Any = unittest.skip("test requires elasticsearch" )(__A ) return test_case def __lowerCamelCase ( __UpperCamelCase ) -> List[Any]: """simple docstring""" try: import sqlalchemy # noqa except ImportError: lowerCAmelCase_ : Any = unittest.skip("test requires sqlalchemy" )(__A ) return test_case def __lowerCamelCase ( __UpperCamelCase ) -> Optional[int]: """simple docstring""" if not config.TORCH_AVAILABLE: lowerCAmelCase_ : List[str] = unittest.skip("test requires PyTorch" )(__A ) return test_case def __lowerCamelCase ( __UpperCamelCase ) -> Tuple: """simple docstring""" if not config.TF_AVAILABLE: lowerCAmelCase_ : Optional[int] = unittest.skip("test requires TensorFlow" )(__A ) return test_case def __lowerCamelCase ( __UpperCamelCase ) -> List[Any]: """simple docstring""" if not config.JAX_AVAILABLE: lowerCAmelCase_ : int = unittest.skip("test requires JAX" )(__A ) return test_case def __lowerCamelCase ( __UpperCamelCase ) -> Any: """simple docstring""" if not config.PIL_AVAILABLE: lowerCAmelCase_ : int = unittest.skip("test requires Pillow" )(__A ) return test_case def __lowerCamelCase ( __UpperCamelCase ) -> str: """simple docstring""" try: import transformers # noqa F401 except ImportError: return unittest.skip("test requires transformers" )(__A ) else: return test_case def __lowerCamelCase ( __UpperCamelCase ) -> List[str]: """simple docstring""" try: import tiktoken # noqa F401 except ImportError: return unittest.skip("test requires tiktoken" )(__A ) else: return test_case def __lowerCamelCase ( __UpperCamelCase ) -> Optional[Any]: """simple docstring""" try: import spacy # noqa F401 except ImportError: return unittest.skip("test requires spacy" )(__A ) else: return test_case def __lowerCamelCase ( __UpperCamelCase ) -> List[str]: """simple docstring""" def _require_spacy_model(__UpperCamelCase ): try: import spacy # noqa F401 spacy.load(__A ) except ImportError: return unittest.skip("test requires spacy" )(__A ) except OSError: return unittest.skip("test requires spacy model '{}'".format(__A ) )(__A ) else: return test_case return _require_spacy_model def __lowerCamelCase ( __UpperCamelCase ) -> Tuple: """simple docstring""" try: import pyspark # noqa F401 except ImportError: return unittest.skip("test requires pyspark" )(__A ) else: return test_case def __lowerCamelCase ( __UpperCamelCase ) -> Any: """simple docstring""" try: import joblibspark # noqa F401 except ImportError: return unittest.skip("test requires joblibspark" )(__A ) else: return test_case def __lowerCamelCase ( __UpperCamelCase ) -> str: """simple docstring""" if not _run_slow_tests or _run_slow_tests == 0: lowerCAmelCase_ : Any = unittest.skip("test is slow" )(__A ) return test_case def __lowerCamelCase ( __UpperCamelCase ) -> Any: """simple docstring""" if not _run_local_tests or _run_local_tests == 0: lowerCAmelCase_ : Dict = unittest.skip("test is local" )(__A ) return test_case def __lowerCamelCase ( __UpperCamelCase ) -> Union[str, Any]: """simple docstring""" if not _run_packaged_tests or _run_packaged_tests == 0: lowerCAmelCase_ : Tuple = unittest.skip("test is packaged" )(__A ) return test_case def __lowerCamelCase ( __UpperCamelCase ) -> str: """simple docstring""" if not _run_remote_tests or _run_remote_tests == 0: lowerCAmelCase_ : Tuple = unittest.skip("test requires remote" )(__A ) return test_case def __lowerCamelCase ( *__UpperCamelCase ) -> Any: """simple docstring""" def decorate(cls ): for name, fn in cls.__dict__.items(): if callable(__A ) and name.startswith("test" ): for decorator in decorators: lowerCAmelCase_ : List[Any] = decorator(__A ) setattr(cls , __A , __A ) return cls return decorate class __lowerCamelCase ( UpperCAmelCase_ ): '''simple docstring''' pass class __lowerCamelCase ( UpperCAmelCase_ ): '''simple docstring''' a_ : Any = 0 a_ : Tuple = 1 a_ : Optional[Any] = 2 @contextmanager def __lowerCamelCase ( __UpperCamelCase=OfflineSimulationMode.CONNECTION_FAILS , __UpperCamelCase=1e-16 ) -> Optional[int]: """simple docstring""" lowerCAmelCase_ : Optional[Any] = requests.Session().request def timeout_request(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , **__UpperCamelCase ): # Change the url to an invalid url so that the connection hangs lowerCAmelCase_ : int = "https://10.255.255.1" if kwargs.get("timeout" ) is None: raise RequestWouldHangIndefinitelyError( f'''Tried a call to {url} in offline mode with no timeout set. Please set a timeout.''' ) lowerCAmelCase_ : int = timeout try: return online_request(__A , __A , **__A ) except Exception as e: # The following changes in the error are just here to make the offline timeout error prettier lowerCAmelCase_ : List[Any] = url lowerCAmelCase_ : List[str] = e.args[0] lowerCAmelCase_ : Optional[int] = (max_retry_error.args[0].replace("10.255.255.1" , f'''OfflineMock[{url}]''' ),) lowerCAmelCase_ : Union[str, Any] = (max_retry_error,) raise def raise_connection_error(__UpperCamelCase , __UpperCamelCase , **__UpperCamelCase ): raise requests.ConnectionError("Offline mode is enabled." , request=__A ) if mode is OfflineSimulationMode.CONNECTION_FAILS: with patch("requests.Session.send" , __A ): yield elif mode is OfflineSimulationMode.CONNECTION_TIMES_OUT: # inspired from https://stackoverflow.com/a/904609 with patch("requests.Session.request" , __A ): yield elif mode is OfflineSimulationMode.HF_DATASETS_OFFLINE_SET_TO_1: with patch("datasets.config.HF_DATASETS_OFFLINE" , __A ): yield else: raise ValueError("Please use a value from the OfflineSimulationMode enum." ) @contextmanager def __lowerCamelCase ( *__UpperCamelCase , **__UpperCamelCase ) -> str: """simple docstring""" lowerCAmelCase_ : Optional[int] = str(Path().resolve() ) with tempfile.TemporaryDirectory(*__A , **__A ) as tmp_dir: try: os.chdir(__A ) yield finally: os.chdir(__A ) @contextmanager def __lowerCamelCase ( ) -> int: """simple docstring""" import gc gc.collect() lowerCAmelCase_ : List[Any] = pa.total_allocated_bytes() yield assert pa.total_allocated_bytes() - previous_allocated_memory > 0, "Arrow memory didn't increase." @contextmanager def __lowerCamelCase ( ) -> List[str]: """simple docstring""" import gc gc.collect() lowerCAmelCase_ : Tuple = pa.total_allocated_bytes() yield assert pa.total_allocated_bytes() - previous_allocated_memory <= 0, "Arrow memory wasn't expected to increase." def __lowerCamelCase ( __UpperCamelCase , __UpperCamelCase ) -> str: """simple docstring""" return deepcopy(__A ).integers(0 , 100 , 10 ).tolist() == deepcopy(__A ).integers(0 , 100 , 10 ).tolist() def __lowerCamelCase ( __UpperCamelCase ) -> Dict: """simple docstring""" import decorator from requests.exceptions import HTTPError def _wrapper(__UpperCamelCase , *__UpperCamelCase , **__UpperCamelCase ): try: return func(*__A , **__A ) except HTTPError as err: if str(__A ).startswith("500" ) or str(__A ).startswith("502" ): pytest.xfail(str(__A ) ) raise err return decorator.decorator(_wrapper , __A ) class __lowerCamelCase : '''simple docstring''' def __init__( self : str , a_ : List[Any] , a_ : int , a_ : Optional[int] ): lowerCAmelCase_ : Optional[int] = returncode lowerCAmelCase_ : str = stdout lowerCAmelCase_ : List[str] = stderr async def __lowerCamelCase ( __UpperCamelCase , __UpperCamelCase ) -> int: """simple docstring""" while True: lowerCAmelCase_ : Any = await stream.readline() if line: callback(__A ) else: break async def __lowerCamelCase ( __UpperCamelCase , __UpperCamelCase=None , __UpperCamelCase=None , __UpperCamelCase=None , __UpperCamelCase=False , __UpperCamelCase=False ) -> Optional[int]: """simple docstring""" if echo: print("\nRunning: " , " ".join(__A ) ) lowerCAmelCase_ : Dict = await asyncio.create_subprocess_exec( cmd[0] , *cmd[1:] , stdin=__A , stdout=asyncio.subprocess.PIPE , stderr=asyncio.subprocess.PIPE , env=__A , ) # note: there is a warning for a possible deadlock when using `wait` with huge amounts of data in the pipe # https://docs.python.org/3/library/asyncio-subprocess.html#asyncio.asyncio.subprocess.Process.wait # # If it starts hanging, will need to switch to the following code. The problem is that no data # will be seen until it's done and if it hangs for example there will be no debug info. # out, err = await p.communicate() # return _RunOutput(p.returncode, out, err) lowerCAmelCase_ : List[Any] = [] lowerCAmelCase_ : int = [] def tee(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase="" ): lowerCAmelCase_ : List[Any] = line.decode("utf-8" ).rstrip() sink.append(__A ) if not quiet: print(__A , __A , file=__A ) # XXX: the timeout doesn't seem to make any difference here await asyncio.wait( [ _read_stream(p.stdout , lambda __UpperCamelCase : tee(__A , __A , sys.stdout , label="stdout:" ) ), _read_stream(p.stderr , lambda __UpperCamelCase : tee(__A , __A , sys.stderr , label="stderr:" ) ), ] , timeout=__A , ) return _RunOutput(await p.wait() , __A , __A ) def __lowerCamelCase ( __UpperCamelCase , __UpperCamelCase=None , __UpperCamelCase=None , __UpperCamelCase=180 , __UpperCamelCase=False , __UpperCamelCase=True ) -> Union[str, Any]: """simple docstring""" lowerCAmelCase_ : Tuple = asyncio.get_event_loop() lowerCAmelCase_ : Any = loop.run_until_complete( _stream_subprocess(__A , env=__A , stdin=__A , timeout=__A , quiet=__A , echo=__A ) ) lowerCAmelCase_ : Union[str, Any] = " ".join(__A ) if result.returncode > 0: lowerCAmelCase_ : str = "\n".join(result.stderr ) raise RuntimeError( f'''\'{cmd_str}\' failed with returncode {result.returncode}\n\n''' f'''The combined stderr from workers follows:\n{stderr}''' ) # check that the subprocess actually did run and produced some output, should the test rely on # the remote side to do the testing if not result.stdout and not result.stderr: raise RuntimeError(f'''\'{cmd_str}\' produced no output.''' ) return result def __lowerCamelCase ( ) -> Optional[int]: """simple docstring""" lowerCAmelCase_ : List[Any] = os.environ.get("PYTEST_XDIST_WORKER" , "gw0" ) lowerCAmelCase_ : Dict = re.sub(r"^gw" , "" , __A , 0 , re.M ) return int(__A ) def __lowerCamelCase ( ) -> Optional[Any]: """simple docstring""" lowerCAmelCase_ : Union[str, Any] = 29500 lowerCAmelCase_ : List[Any] = pytest_xdist_worker_id() return port + uniq_delta
241
'''simple docstring''' from __future__ import annotations import math import numpy as np from numpy.linalg import norm def _lowercase ( __A ,__A ): '''simple docstring''' return math.sqrt(sum(pow(a - b ,2 ) for a, b in zip(__A ,__A ) ) ) def _lowercase ( __A ,__A ): '''simple docstring''' if dataset.ndim != value_array.ndim: __UpperCamelCase = ( """Wrong input data's dimensions... """ f"dataset : {dataset.ndim}, value_array : {value_array.ndim}" ) raise ValueError(__A ) try: if dataset.shape[1] != value_array.shape[1]: __UpperCamelCase = ( """Wrong input data's shape... """ f"dataset : {dataset.shape[1]}, value_array : {value_array.shape[1]}" ) raise ValueError(__A ) except IndexError: if dataset.ndim != value_array.ndim: raise TypeError("""Wrong shape""" ) if dataset.dtype != value_array.dtype: __UpperCamelCase = ( """Input data have different datatype... """ f"dataset : {dataset.dtype}, value_array : {value_array.dtype}" ) raise TypeError(__A ) __UpperCamelCase = [] for value in value_array: __UpperCamelCase = euclidean(__A ,dataset[0] ) __UpperCamelCase = dataset[0].tolist() for dataset_value in dataset[1:]: __UpperCamelCase = euclidean(__A ,__A ) if dist > temp_dist: __UpperCamelCase = temp_dist __UpperCamelCase = dataset_value.tolist() answer.append([vector, dist] ) return answer def _lowercase ( __A ,__A ): '''simple docstring''' return np.dot(__A ,__A ) / (norm(__A ) * norm(__A )) if __name__ == "__main__": import doctest doctest.testmod()
349
0
from bisect import bisect from itertools import accumulate def lowerCAmelCase__( lowercase : Tuple , lowercase : str , lowercase : Union[str, Any] , lowercase : Dict ) -> str: __snake_case : Optional[int] = sorted(zip(__A , __A ) , key=lambda lowercase : x[0] / x[1] , reverse=__A ) __snake_case , __snake_case : Optional[int] = [i[0] for i in r], [i[1] for i in r] __snake_case : int = list(accumulate(__A ) ) __snake_case : List[str] = bisect(__A , __A ) return ( 0 if k == 0 else sum(vl[:k] ) + (w - acc[k - 1]) * (vl[k]) / (wt[k]) if k != n else sum(vl[:k] ) ) if __name__ == "__main__": import doctest doctest.testmod()
326
'''simple docstring''' from datetime import datetime import requests def _lowercase ( __A ): '''simple docstring''' __UpperCamelCase = """https://downloadgram.net/wp-json/wppress/video-downloader/video?url=""" __UpperCamelCase = requests.get(base_url + url ).json()[0]["""urls"""][0]["""src"""] return requests.get(__A ).content if __name__ == "__main__": a__ : int = input('Enter Video/IGTV url: ').strip() a__ : int = f'''{datetime.now():%Y-%m-%d_%H:%M:%S}.mp4''' with open(file_name, 'wb') as fp: fp.write(download_video(url)) print(f'''Done. Video saved to disk as {file_name}.''')
349
0
"""simple docstring""" print((lambda quine: quine % quine)('print((lambda quine: quine %% quine)(%r))'))
155
'''simple docstring''' import asyncio import os import re import sys import tempfile import unittest from contextlib import contextmanager from copy import deepcopy from distutils.util import strtobool from enum import Enum from importlib.util import find_spec from pathlib import Path from unittest.mock import patch import pyarrow as pa import pytest import requests from packaging import version from datasets import config if config.PY_VERSION < version.parse('3.8'): import importlib_metadata else: import importlib.metadata as importlib_metadata def _lowercase ( __A ,__A=False ): '''simple docstring''' try: __UpperCamelCase = os.environ[key] except KeyError: # KEY isn't set, default to `default`. __UpperCamelCase = default else: # KEY is set, convert it to True or False. try: __UpperCamelCase = strtobool(__A ) except ValueError: # More values are supported, but let's keep the message simple. raise ValueError(f"If set, {key} must be yes or no." ) return _value a__ : Optional[Any] = parse_flag_from_env('RUN_SLOW', default=False) a__ : Union[str, Any] = parse_flag_from_env('RUN_REMOTE', default=False) a__ : Any = parse_flag_from_env('RUN_LOCAL', default=True) a__ : List[Any] = parse_flag_from_env('RUN_PACKAGED', default=True) # Compression a__ : Optional[int] = pytest.mark.skipif(not config.LZ4_AVAILABLE, reason='test requires lz4') a__ : Optional[int] = pytest.mark.skipif(not config.PY7ZR_AVAILABLE, reason='test requires py7zr') a__ : Optional[Any] = pytest.mark.skipif(not config.ZSTANDARD_AVAILABLE, reason='test requires zstandard') # Audio a__ : List[Any] = pytest.mark.skipif( # On Windows and OS X, soundfile installs sndfile find_spec('soundfile') is None or version.parse(importlib_metadata.version('soundfile')) < version.parse('0.12.0'), reason='test requires sndfile>=0.12.1: \'pip install \"soundfile>=0.12.1\"\'; ', ) # Beam a__ : str = pytest.mark.skipif( not config.BEAM_AVAILABLE or config.DILL_VERSION >= version.parse('0.3.2'), reason='test requires apache-beam and a compatible dill version', ) # Dill-cloudpickle compatibility a__ : str = pytest.mark.skipif( config.DILL_VERSION <= version.parse('0.3.2'), reason='test requires dill>0.3.2 for cloudpickle compatibility', ) # Windows a__ : Tuple = pytest.mark.skipif( sys.platform == 'win32', reason='test should not be run on Windows', ) def _lowercase ( __A ): '''simple docstring''' try: import faiss # noqa except ImportError: __UpperCamelCase = unittest.skip("""test requires faiss""" )(__A ) return test_case def _lowercase ( __A ): '''simple docstring''' try: import regex # noqa except ImportError: __UpperCamelCase = unittest.skip("""test requires regex""" )(__A ) return test_case def _lowercase ( __A ): '''simple docstring''' try: import elasticsearch # noqa except ImportError: __UpperCamelCase = unittest.skip("""test requires elasticsearch""" )(__A ) return test_case def _lowercase ( __A ): '''simple docstring''' try: import sqlalchemy # noqa except ImportError: __UpperCamelCase = unittest.skip("""test requires sqlalchemy""" )(__A ) return test_case def _lowercase ( __A ): '''simple docstring''' if not config.TORCH_AVAILABLE: __UpperCamelCase = unittest.skip("""test requires PyTorch""" )(__A ) return test_case def _lowercase ( __A ): '''simple docstring''' if not config.TF_AVAILABLE: __UpperCamelCase = unittest.skip("""test requires TensorFlow""" )(__A ) return test_case def _lowercase ( __A ): '''simple docstring''' if not config.JAX_AVAILABLE: __UpperCamelCase = unittest.skip("""test requires JAX""" )(__A ) return test_case def _lowercase ( __A ): '''simple docstring''' if not config.PIL_AVAILABLE: __UpperCamelCase = unittest.skip("""test requires Pillow""" )(__A ) return test_case def _lowercase ( __A ): '''simple docstring''' try: import transformers # noqa F401 except ImportError: return unittest.skip("""test requires transformers""" )(__A ) else: return test_case def _lowercase ( __A ): '''simple docstring''' try: import tiktoken # noqa F401 except ImportError: return unittest.skip("""test requires tiktoken""" )(__A ) else: return test_case def _lowercase ( __A ): '''simple docstring''' try: import spacy # noqa F401 except ImportError: return unittest.skip("""test requires spacy""" )(__A ) else: return test_case def _lowercase ( __A ): '''simple docstring''' def _require_spacy_model(__A ): try: import spacy # noqa F401 spacy.load(__A ) except ImportError: return unittest.skip("""test requires spacy""" )(__A ) except OSError: return unittest.skip("""test requires spacy model '{}'""".format(__A ) )(__A ) else: return test_case return _require_spacy_model def _lowercase ( __A ): '''simple docstring''' try: import pyspark # noqa F401 except ImportError: return unittest.skip("""test requires pyspark""" )(__A ) else: return test_case def _lowercase ( __A ): '''simple docstring''' try: import joblibspark # noqa F401 except ImportError: return unittest.skip("""test requires joblibspark""" )(__A ) else: return test_case def _lowercase ( __A ): '''simple docstring''' if not _run_slow_tests or _run_slow_tests == 0: __UpperCamelCase = unittest.skip("""test is slow""" )(__A ) return test_case def _lowercase ( __A ): '''simple docstring''' if not _run_local_tests or _run_local_tests == 0: __UpperCamelCase = unittest.skip("""test is local""" )(__A ) return test_case def _lowercase ( __A ): '''simple docstring''' if not _run_packaged_tests or _run_packaged_tests == 0: __UpperCamelCase = unittest.skip("""test is packaged""" )(__A ) return test_case def _lowercase ( __A ): '''simple docstring''' if not _run_remote_tests or _run_remote_tests == 0: __UpperCamelCase = unittest.skip("""test requires remote""" )(__A ) return test_case def _lowercase ( *__A ): '''simple docstring''' def decorate(cls ): for name, fn in cls.__dict__.items(): if callable(__A ) and name.startswith("""test""" ): for decorator in decorators: __UpperCamelCase = decorator(__A ) setattr(cls ,__A ,__A ) return cls return decorate class UpperCAmelCase__ ( UpperCAmelCase_): pass class UpperCAmelCase__ ( UpperCAmelCase_): __SCREAMING_SNAKE_CASE = 0 __SCREAMING_SNAKE_CASE = 1 __SCREAMING_SNAKE_CASE = 2 @contextmanager def _lowercase ( __A=OfflineSimulationMode.CONNECTION_FAILS ,__A=1E-16 ): '''simple docstring''' __UpperCamelCase = requests.Session().request def timeout_request(__A ,__A ,__A ,**__A ): # Change the url to an invalid url so that the connection hangs __UpperCamelCase = """https://10.255.255.1""" if kwargs.get("""timeout""" ) is None: raise RequestWouldHangIndefinitelyError( f"Tried a call to {url} in offline mode with no timeout set. Please set a timeout." ) __UpperCamelCase = timeout try: return online_request(__A ,__A ,**__A ) except Exception as e: # The following changes in the error are just here to make the offline timeout error prettier __UpperCamelCase = url __UpperCamelCase = e.args[0] __UpperCamelCase = (max_retry_error.args[0].replace("""10.255.255.1""" ,f"OfflineMock[{url}]" ),) __UpperCamelCase = (max_retry_error,) raise def raise_connection_error(__A ,__A ,**__A ): raise requests.ConnectionError("""Offline mode is enabled.""" ,request=__A ) if mode is OfflineSimulationMode.CONNECTION_FAILS: with patch("""requests.Session.send""" ,__A ): yield elif mode is OfflineSimulationMode.CONNECTION_TIMES_OUT: # inspired from https://stackoverflow.com/a/904609 with patch("""requests.Session.request""" ,__A ): yield elif mode is OfflineSimulationMode.HF_DATASETS_OFFLINE_SET_TO_1: with patch("""datasets.config.HF_DATASETS_OFFLINE""" ,__A ): yield else: raise ValueError("""Please use a value from the OfflineSimulationMode enum.""" ) @contextmanager def _lowercase ( *__A ,**__A ): '''simple docstring''' __UpperCamelCase = str(Path().resolve() ) with tempfile.TemporaryDirectory(*__A ,**__A ) as tmp_dir: try: os.chdir(__A ) yield finally: os.chdir(__A ) @contextmanager def _lowercase ( ): '''simple docstring''' import gc gc.collect() __UpperCamelCase = pa.total_allocated_bytes() yield assert pa.total_allocated_bytes() - previous_allocated_memory > 0, "Arrow memory didn't increase." @contextmanager def _lowercase ( ): '''simple docstring''' import gc gc.collect() __UpperCamelCase = pa.total_allocated_bytes() yield assert pa.total_allocated_bytes() - previous_allocated_memory <= 0, "Arrow memory wasn't expected to increase." def _lowercase ( __A ,__A ): '''simple docstring''' return deepcopy(__A ).integers(0 ,100 ,10 ).tolist() == deepcopy(__A ).integers(0 ,100 ,10 ).tolist() def _lowercase ( __A ): '''simple docstring''' import decorator from requests.exceptions import HTTPError def _wrapper(__A ,*__A ,**__A ): try: return func(*__A ,**__A ) except HTTPError as err: if str(__A ).startswith("""500""" ) or str(__A ).startswith("""502""" ): pytest.xfail(str(__A ) ) raise err return decorator.decorator(_wrapper ,__A ) class UpperCAmelCase__ : def __init__( self , lowercase , lowercase , lowercase ) -> str: __UpperCamelCase = returncode __UpperCamelCase = stdout __UpperCamelCase = stderr async def _lowercase ( __A ,__A ): '''simple docstring''' while True: __UpperCamelCase = await stream.readline() if line: callback(__A ) else: break async def _lowercase ( __A ,__A=None ,__A=None ,__A=None ,__A=False ,__A=False ): '''simple docstring''' if echo: print("""\nRunning: """ ,""" """.join(__A ) ) __UpperCamelCase = await asyncio.create_subprocess_exec( cmd[0] ,*cmd[1:] ,stdin=__A ,stdout=asyncio.subprocess.PIPE ,stderr=asyncio.subprocess.PIPE ,env=__A ,) # note: there is a warning for a possible deadlock when using `wait` with huge amounts of data in the pipe # https://docs.python.org/3/library/asyncio-subprocess.html#asyncio.asyncio.subprocess.Process.wait # # If it starts hanging, will need to switch to the following code. The problem is that no data # will be seen until it's done and if it hangs for example there will be no debug info. # out, err = await p.communicate() # return _RunOutput(p.returncode, out, err) __UpperCamelCase = [] __UpperCamelCase = [] def tee(__A ,__A ,__A ,__A="" ): __UpperCamelCase = line.decode("""utf-8""" ).rstrip() sink.append(__A ) if not quiet: print(__A ,__A ,file=__A ) # XXX: the timeout doesn't seem to make any difference here await asyncio.wait( [ _read_stream(p.stdout ,lambda __A : tee(__A ,__A ,sys.stdout ,label="""stdout:""" ) ), _read_stream(p.stderr ,lambda __A : tee(__A ,__A ,sys.stderr ,label="""stderr:""" ) ), ] ,timeout=__A ,) return _RunOutput(await p.wait() ,__A ,__A ) def _lowercase ( __A ,__A=None ,__A=None ,__A=180 ,__A=False ,__A=True ): '''simple docstring''' __UpperCamelCase = asyncio.get_event_loop() __UpperCamelCase = loop.run_until_complete( _stream_subprocess(__A ,env=__A ,stdin=__A ,timeout=__A ,quiet=__A ,echo=__A ) ) __UpperCamelCase = """ """.join(__A ) if result.returncode > 0: __UpperCamelCase = """\n""".join(result.stderr ) raise RuntimeError( f"'{cmd_str}' failed with returncode {result.returncode}\n\n" f"The combined stderr from workers follows:\n{stderr}" ) # check that the subprocess actually did run and produced some output, should the test rely on # the remote side to do the testing if not result.stdout and not result.stderr: raise RuntimeError(f"'{cmd_str}' produced no output." ) return result def _lowercase ( ): '''simple docstring''' __UpperCamelCase = os.environ.get("""PYTEST_XDIST_WORKER""" ,"""gw0""" ) __UpperCamelCase = re.sub(R"""^gw""" ,"""""" ,__A ,0 ,re.M ) return int(__A ) def _lowercase ( ): '''simple docstring''' __UpperCamelCase = 29_500 __UpperCamelCase = pytest_xdist_worker_id() return port + uniq_delta
349
0
"""simple docstring""" from dataclasses import dataclass, field from typing import Optional from transformers import AutoConfig, AutoImageProcessor, AutoTokenizer, FlaxVisionEncoderDecoderModel, HfArgumentParser @dataclass class UpperCamelCase_ : __magic_name__ = field( metadata={'''help''': '''The output directory where the model will be written.'''} , ) __magic_name__ = field( metadata={ '''help''': ( '''The encoder model checkpoint for weights initialization.''' '''Don\'t set if you want to train an encoder model from scratch.''' ) } , ) __magic_name__ = field( metadata={ '''help''': ( '''The decoder model checkpoint for weights initialization.''' '''Don\'t set if you want to train a decoder model from scratch.''' ) } , ) __magic_name__ = field( default=UpperCAmelCase_ , metadata={'''help''': '''Pretrained encoder config name or path if not the same as encoder_model_name'''} ) __magic_name__ = field( default=UpperCAmelCase_ , metadata={'''help''': '''Pretrained decoder config name or path if not the same as decoder_model_name'''} ) def snake_case ( ): UpperCAmelCase_ : Union[str, Any] = HfArgumentParser((ModelArguments,) ) ((UpperCAmelCase_ ) , ) : Union[str, Any] = parser.parse_args_into_dataclasses() # Load pretrained model and tokenizer # Use explicit specified encoder config if model_args.encoder_config_name: UpperCAmelCase_ : Optional[int] = AutoConfig.from_pretrained(model_args.encoder_config_name ) # Use pretrained encoder model's config else: UpperCAmelCase_ : Dict = AutoConfig.from_pretrained(model_args.encoder_model_name_or_path ) # Use explicit specified decoder config if model_args.decoder_config_name: UpperCAmelCase_ : Union[str, Any] = AutoConfig.from_pretrained(model_args.decoder_config_name ) # Use pretrained decoder model's config else: UpperCAmelCase_ : int = AutoConfig.from_pretrained(model_args.decoder_model_name_or_path ) # necessary for `from_encoder_decoder_pretrained` when `decoder_config` is passed UpperCAmelCase_ : Dict = True UpperCAmelCase_ : int = True UpperCAmelCase_ : List[Any] = FlaxVisionEncoderDecoderModel.from_encoder_decoder_pretrained( encoder_pretrained_model_name_or_path=model_args.encoder_model_name_or_path ,decoder_pretrained_model_name_or_path=model_args.decoder_model_name_or_path ,encoder_config=__A ,decoder_config=__A ,) # GPT2 only has bos/eos tokens but not decoder_start/pad tokens UpperCAmelCase_ : Dict = decoder_config.decoder_start_token_id UpperCAmelCase_ : Tuple = decoder_config.pad_token_id if decoder_start_token_id is None: UpperCAmelCase_ : List[str] = decoder_config.bos_token_id if pad_token_id is None: UpperCAmelCase_ : int = decoder_config.eos_token_id # This is necessary to make Flax's generate() work UpperCAmelCase_ : str = decoder_config.eos_token_id UpperCAmelCase_ : Dict = decoder_start_token_id UpperCAmelCase_ : Tuple = pad_token_id UpperCAmelCase_ : Union[str, Any] = AutoImageProcessor.from_pretrained(model_args.encoder_model_name_or_path ) UpperCAmelCase_ : Tuple = AutoTokenizer.from_pretrained(model_args.decoder_model_name_or_path ) UpperCAmelCase_ : Dict = tokenizer.convert_ids_to_tokens(model.config.pad_token_id ) model.save_pretrained(model_args.output_dir ) image_processor.save_pretrained(model_args.output_dir ) tokenizer.save_pretrained(model_args.output_dir ) if __name__ == "__main__": main()
268
'''simple docstring''' import re def _lowercase ( __A ): '''simple docstring''' return [char.split() for char in re.split(R"""[^ a-z A-Z 0-9 \s]""" ,str_ )] def _lowercase ( __A ): '''simple docstring''' __UpperCamelCase = split_input(str_ ) return "".join( ["""""".join([char.capitalize() for char in sub_str] ) for sub_str in string_split] ) def _lowercase ( __A ,__A ,__A ): '''simple docstring''' try: __UpperCamelCase = split_input(__A ) if upper: __UpperCamelCase = """""".join( [ separator.join([char.upper() for char in sub_str] ) for sub_str in string_split ] ) else: __UpperCamelCase = """""".join( [ separator.join([char.lower() for char in sub_str] ) for sub_str in string_split ] ) return res_str except IndexError: return "not valid string" def _lowercase ( __A ): '''simple docstring''' return to_simple_case(__A ) def _lowercase ( __A ): '''simple docstring''' try: __UpperCamelCase = to_simple_case(__A ) return res_str[0].lower() + res_str[1:] except IndexError: return "not valid string" def _lowercase ( __A ,__A ): '''simple docstring''' return to_complex_case(__A ,__A ,"""_""" ) def _lowercase ( __A ,__A ): '''simple docstring''' return to_complex_case(__A ,__A ,"""-""" ) if __name__ == "__main__": __import__('doctest').testmod()
349
0
import argparse import datetime import json import time import warnings from logging import getLogger from pathlib import Path from typing import Dict, List import torch from tqdm import tqdm from transformers import AutoModelForSeqaSeqLM, AutoTokenizer from utils import calculate_bleu, calculate_rouge, chunks, parse_numeric_n_bool_cl_kwargs, use_task_specific_params SCREAMING_SNAKE_CASE_:Tuple = getLogger(__name__) SCREAMING_SNAKE_CASE_:Tuple = 'cuda' if torch.cuda.is_available() else 'cpu' def __UpperCamelCase ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase = 8 , _lowerCAmelCase = DEFAULT_DEVICE , _lowerCAmelCase=False , _lowerCAmelCase="summarization" , _lowerCAmelCase=None , **_lowerCAmelCase , ) -> Tuple: """simple docstring""" A : Optional[int] = Path(__A ).open("""w""" , encoding="""utf-8""" ) A : Tuple = str(__A ) A : int = AutoModelForSeqaSeqLM.from_pretrained(__A ).to(__A ) if fpaa: A : List[Any] = model.half() A : List[str] = AutoTokenizer.from_pretrained(__A ) logger.info(f'''Inferred tokenizer type: {tokenizer.__class__}''' ) # if this is wrong, check config.model_type. A : Dict = time.time() # update config with task specific params use_task_specific_params(__A , __A ) if prefix is None: A : int = prefix or getattr(model.config , """prefix""" , """""" ) or """""" for examples_chunk in tqdm(list(chunks(__A , __A ) ) ): A : Union[str, Any] = [prefix + text for text in examples_chunk] A : Union[str, Any] = tokenizer(__A , return_tensors="""pt""" , truncation=__A , padding="""longest""" ).to(__A ) A : int = model.generate( input_ids=batch.input_ids , attention_mask=batch.attention_mask , **__A , ) A : Optional[Any] = tokenizer.batch_decode(__A , skip_special_tokens=__A , clean_up_tokenization_spaces=__A ) for hypothesis in dec: fout.write(hypothesis + """\n""" ) fout.flush() fout.close() A : str = int(time.time() - start_time ) # seconds A : Tuple = len(__A ) return {"n_obs": n_obs, "runtime": runtime, "seconds_per_sample": round(runtime / n_obs , 4 )} def __UpperCamelCase ( ) -> List[str]: """simple docstring""" return datetime.datetime.now().strftime("""%Y-%m-%d %H:%M:%S""" ) def __UpperCamelCase ( _lowerCAmelCase=True ) -> Tuple: """simple docstring""" A : Optional[int] = argparse.ArgumentParser() parser.add_argument("""model_name""" , type=__A , help="""like facebook/bart-large-cnn,t5-base, etc.""" ) parser.add_argument("""input_path""" , type=__A , help="""like cnn_dm/test.source""" ) parser.add_argument("""save_path""" , type=__A , help="""where to save summaries""" ) parser.add_argument("""--reference_path""" , type=__A , required=__A , help="""like cnn_dm/test.target""" ) parser.add_argument("""--score_path""" , type=__A , required=__A , default="""metrics.json""" , help="""where to save metrics""" ) parser.add_argument("""--device""" , type=__A , required=__A , default=__A , help="""cuda, cuda:1, cpu etc.""" ) parser.add_argument( """--prefix""" , type=__A , required=__A , default=__A , help="""will be added to the begininng of src examples""" ) parser.add_argument("""--task""" , type=__A , default="""summarization""" , help="""used for task_specific_params + metrics""" ) parser.add_argument("""--bs""" , type=__A , default=8 , required=__A , help="""batch size""" ) parser.add_argument( """--n_obs""" , type=__A , default=-1 , required=__A , help="""How many observations. Defaults to all.""" ) parser.add_argument("""--fp16""" , action="""store_true""" ) parser.add_argument("""--dump-args""" , action="""store_true""" , help="""print the custom hparams with the results""" ) parser.add_argument( """--info""" , nargs="""?""" , type=__A , const=datetime_now() , help=( """use in conjunction w/ --dump-args to print with the results whatever other info you'd like, e.g.""" """ lang=en-ru. If no value is passed, the current datetime string will be used.""" ) , ) # Unspecified args like --num_beams=2 --decoder_start_token_id=4 are passed to model.generate A , A : List[Any] = parser.parse_known_args() A : Dict = parse_numeric_n_bool_cl_kwargs(__A ) if parsed_args and verbose: print(f'''parsed the following generate kwargs: {parsed_args}''' ) A : List[Any] = [""" """ + x.rstrip() if """t5""" in args.model_name else x.rstrip() for x in open(args.input_path ).readlines()] if args.n_obs > 0: A : int = examples[: args.n_obs] Path(args.save_path ).parent.mkdir(exist_ok=__A ) if args.reference_path is None and Path(args.score_path ).exists(): warnings.warn(f'''score_path {args.score_path} will be overwritten unless you type ctrl-c.''' ) if args.device == "cpu" and args.fpaa: # this mix leads to RuntimeError: "threshold_cpu" not implemented for 'Half' raise ValueError("""Can't mix --fp16 and --device cpu""" ) A : Optional[int] = generate_summaries_or_translations( __A , args.save_path , args.model_name , batch_size=args.bs , device=args.device , fpaa=args.fpaa , task=args.task , prefix=args.prefix , **__A , ) if args.reference_path is None: return {} # Compute scores A : List[str] = calculate_bleu if """translation""" in args.task else calculate_rouge A : int = [x.rstrip() for x in open(args.save_path ).readlines()] A : Dict = [x.rstrip() for x in open(args.reference_path ).readlines()][: len(__A )] A : List[Any] = score_fn(__A , __A ) scores.update(__A ) if args.dump_args: scores.update(__A ) if args.info: A : str = args.info if verbose: print(__A ) if args.score_path is not None: json.dump(__A , open(args.score_path , """w""" ) ) return scores if __name__ == "__main__": # Usage for MT: # python run_eval.py MODEL_NAME $DATA_DIR/test.source $save_dir/test_translations.txt --reference_path $DATA_DIR/test.target --score_path $save_dir/test_bleu.json --task translation $@ run_generate(verbose=True)
116
'''simple docstring''' import gc import random import tempfile import unittest import numpy as np import torch from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer from diffusers import AutoencoderKL, DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler, UNetaDConditionModel from diffusers.pipelines.stable_diffusion_safe import StableDiffusionPipelineSafe as StableDiffusionPipeline from diffusers.utils import floats_tensor, nightly, torch_device from diffusers.utils.testing_utils import require_torch_gpu class UpperCAmelCase__ ( unittest.TestCase): def __lowerCamelCase ( self ) -> List[str]: # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() @property def __lowerCamelCase ( self ) -> int: __UpperCamelCase = 1 __UpperCamelCase = 3 __UpperCamelCase = (3_2, 3_2) __UpperCamelCase = floats_tensor((batch_size, num_channels) + sizes , rng=random.Random(0 ) ).to(lowercase ) return image @property def __lowerCamelCase ( self ) -> Dict: torch.manual_seed(0 ) __UpperCamelCase = UNetaDConditionModel( block_out_channels=(3_2, 6_4) , layers_per_block=2 , sample_size=3_2 , in_channels=4 , out_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D""") , cross_attention_dim=3_2 , ) return model @property def __lowerCamelCase ( self ) -> List[str]: torch.manual_seed(0 ) __UpperCamelCase = AutoencoderKL( block_out_channels=[3_2, 6_4] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , ) return model @property def __lowerCamelCase ( self ) -> Optional[int]: torch.manual_seed(0 ) __UpperCamelCase = CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=3_2 , intermediate_size=3_7 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_0_0_0 , ) return CLIPTextModel(lowercase ) @property def __lowerCamelCase ( self ) -> Tuple: def extract(*lowercase , **lowercase ): class UpperCAmelCase__ : def __init__( self ) -> Tuple: __UpperCamelCase = torch.ones([0] ) def __lowerCamelCase ( self , lowercase ) -> List[str]: self.pixel_values.to(lowercase ) return self return Out() return extract def __lowerCamelCase ( self ) -> Any: __UpperCamelCase = """cpu""" # ensure determinism for the device-dependent torch.Generator __UpperCamelCase = self.dummy_cond_unet __UpperCamelCase = DDIMScheduler( beta_start=0.00_085 , beta_end=0.012 , beta_schedule="""scaled_linear""" , clip_sample=lowercase , set_alpha_to_one=lowercase , ) __UpperCamelCase = self.dummy_vae __UpperCamelCase = self.dummy_text_encoder __UpperCamelCase = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" ) # make sure here that pndm scheduler skips prk __UpperCamelCase = StableDiffusionPipeline( unet=lowercase , scheduler=lowercase , vae=lowercase , text_encoder=lowercase , tokenizer=lowercase , safety_checker=lowercase , feature_extractor=self.dummy_extractor , ) __UpperCamelCase = sd_pipe.to(lowercase ) sd_pipe.set_progress_bar_config(disable=lowercase ) __UpperCamelCase = """A painting of a squirrel eating a burger""" __UpperCamelCase = torch.Generator(device=lowercase ).manual_seed(0 ) __UpperCamelCase = sd_pipe([prompt] , generator=lowercase , guidance_scale=6.0 , num_inference_steps=2 , output_type="""np""" ) __UpperCamelCase = output.images __UpperCamelCase = torch.Generator(device=lowercase ).manual_seed(0 ) __UpperCamelCase = sd_pipe( [prompt] , generator=lowercase , guidance_scale=6.0 , num_inference_steps=2 , output_type="""np""" , return_dict=lowercase , )[0] __UpperCamelCase = image[0, -3:, -3:, -1] __UpperCamelCase = image_from_tuple[0, -3:, -3:, -1] assert image.shape == (1, 6_4, 6_4, 3) __UpperCamelCase = np.array([0.5_756, 0.6_118, 0.5_005, 0.5_041, 0.5_471, 0.4_726, 0.4_976, 0.4_865, 0.4_864] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2 def __lowerCamelCase ( self ) -> Tuple: __UpperCamelCase = """cpu""" # ensure determinism for the device-dependent torch.Generator __UpperCamelCase = self.dummy_cond_unet __UpperCamelCase = PNDMScheduler(skip_prk_steps=lowercase ) __UpperCamelCase = self.dummy_vae __UpperCamelCase = self.dummy_text_encoder __UpperCamelCase = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" ) # make sure here that pndm scheduler skips prk __UpperCamelCase = StableDiffusionPipeline( unet=lowercase , scheduler=lowercase , vae=lowercase , text_encoder=lowercase , tokenizer=lowercase , safety_checker=lowercase , feature_extractor=self.dummy_extractor , ) __UpperCamelCase = sd_pipe.to(lowercase ) sd_pipe.set_progress_bar_config(disable=lowercase ) __UpperCamelCase = """A painting of a squirrel eating a burger""" __UpperCamelCase = torch.Generator(device=lowercase ).manual_seed(0 ) __UpperCamelCase = sd_pipe([prompt] , generator=lowercase , guidance_scale=6.0 , num_inference_steps=2 , output_type="""np""" ) __UpperCamelCase = output.images __UpperCamelCase = torch.Generator(device=lowercase ).manual_seed(0 ) __UpperCamelCase = sd_pipe( [prompt] , generator=lowercase , guidance_scale=6.0 , num_inference_steps=2 , output_type="""np""" , return_dict=lowercase , )[0] __UpperCamelCase = image[0, -3:, -3:, -1] __UpperCamelCase = image_from_tuple[0, -3:, -3:, -1] assert image.shape == (1, 6_4, 6_4, 3) __UpperCamelCase = np.array([0.5_125, 0.5_716, 0.4_828, 0.5_060, 0.5_650, 0.4_768, 0.5_185, 0.4_895, 0.4_993] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2 def __lowerCamelCase ( self ) -> Union[str, Any]: __UpperCamelCase = StableDiffusionPipeline.from_pretrained( """hf-internal-testing/tiny-stable-diffusion-lms-pipe""" , safety_checker=lowercase ) assert isinstance(lowercase , lowercase ) assert isinstance(pipe.scheduler , lowercase ) assert pipe.safety_checker is None __UpperCamelCase = pipe("""example prompt""" , num_inference_steps=2 ).images[0] assert image is not None # check that there's no error when saving a pipeline with one of the models being None with tempfile.TemporaryDirectory() as tmpdirname: pipe.save_pretrained(lowercase ) __UpperCamelCase = StableDiffusionPipeline.from_pretrained(lowercase ) # sanity check that the pipeline still works assert pipe.safety_checker is None __UpperCamelCase = pipe("""example prompt""" , num_inference_steps=2 ).images[0] assert image is not None @unittest.skipIf(torch_device != """cuda""" , """This test requires a GPU""" ) def __lowerCamelCase ( self ) -> Optional[int]: __UpperCamelCase = self.dummy_cond_unet __UpperCamelCase = PNDMScheduler(skip_prk_steps=lowercase ) __UpperCamelCase = self.dummy_vae __UpperCamelCase = self.dummy_text_encoder __UpperCamelCase = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" ) # put models in fp16 __UpperCamelCase = unet.half() __UpperCamelCase = vae.half() __UpperCamelCase = bert.half() # make sure here that pndm scheduler skips prk __UpperCamelCase = StableDiffusionPipeline( unet=lowercase , scheduler=lowercase , vae=lowercase , text_encoder=lowercase , tokenizer=lowercase , safety_checker=lowercase , feature_extractor=self.dummy_extractor , ) __UpperCamelCase = sd_pipe.to(lowercase ) sd_pipe.set_progress_bar_config(disable=lowercase ) __UpperCamelCase = """A painting of a squirrel eating a burger""" __UpperCamelCase = sd_pipe([prompt] , num_inference_steps=2 , output_type="""np""" ).images assert image.shape == (1, 6_4, 6_4, 3) @nightly @require_torch_gpu class UpperCAmelCase__ ( unittest.TestCase): def __lowerCamelCase ( self ) -> Tuple: # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() def __lowerCamelCase ( self ) -> Dict: __UpperCamelCase = StableDiffusionPipeline.from_pretrained("""runwayml/stable-diffusion-v1-5""" , safety_checker=lowercase ) __UpperCamelCase = LMSDiscreteScheduler.from_config(sd_pipe.scheduler.config ) __UpperCamelCase = sd_pipe.to(lowercase ) sd_pipe.set_progress_bar_config(disable=lowercase ) __UpperCamelCase = ( """portrait of girl with smokey eyes makeup in abandoned hotel, grange clothes, redshift, wide high angle""" """ coloured polaroid photograph with flash, kodak film, hyper real, stunning moody cinematography, with""" """ anamorphic lenses, by maripol, fallen angels by wong kar - wai, style of suspiria and neon demon and""" """ children from bahnhof zoo, detailed """ ) __UpperCamelCase = 4_0_0_3_6_6_0_3_4_6 __UpperCamelCase = 7 # without safety guidance (sld_guidance_scale = 0) __UpperCamelCase = torch.manual_seed(lowercase ) __UpperCamelCase = sd_pipe( [prompt] , generator=lowercase , guidance_scale=lowercase , num_inference_steps=5_0 , output_type="""np""" , width=5_1_2 , height=5_1_2 , sld_guidance_scale=0 , ) __UpperCamelCase = output.images __UpperCamelCase = image[0, -3:, -3:, -1] __UpperCamelCase = [0.2_278, 0.2_231, 0.2_249, 0.2_333, 0.2_303, 0.1_885, 0.2_273, 0.2_144, 0.2_176] assert image.shape == (1, 5_1_2, 5_1_2, 3) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 # without safety guidance (strong configuration) __UpperCamelCase = torch.manual_seed(lowercase ) __UpperCamelCase = sd_pipe( [prompt] , generator=lowercase , guidance_scale=lowercase , num_inference_steps=5_0 , output_type="""np""" , width=5_1_2 , height=5_1_2 , sld_guidance_scale=2_0_0_0 , sld_warmup_steps=7 , sld_threshold=0.025 , sld_momentum_scale=0.5 , sld_mom_beta=0.7 , ) __UpperCamelCase = output.images __UpperCamelCase = image[0, -3:, -3:, -1] __UpperCamelCase = [0.2_383, 0.2_276, 0.236, 0.2_192, 0.2_186, 0.2_053, 0.1_971, 0.1_901, 0.1_719] assert image.shape == (1, 5_1_2, 5_1_2, 3) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 def __lowerCamelCase ( self ) -> Optional[Any]: __UpperCamelCase = StableDiffusionPipeline.from_pretrained("""runwayml/stable-diffusion-v1-5""" , safety_checker=lowercase ) __UpperCamelCase = LMSDiscreteScheduler.from_config(sd_pipe.scheduler.config ) __UpperCamelCase = sd_pipe.to(lowercase ) sd_pipe.set_progress_bar_config(disable=lowercase ) __UpperCamelCase = """padme amidala taking a bath artwork, safe for work, no nudity""" __UpperCamelCase = 2_7_3_4_9_7_1_7_5_5 __UpperCamelCase = 7 __UpperCamelCase = torch.manual_seed(lowercase ) __UpperCamelCase = sd_pipe( [prompt] , generator=lowercase , guidance_scale=lowercase , num_inference_steps=5_0 , output_type="""np""" , width=5_1_2 , height=5_1_2 , sld_guidance_scale=0 , ) __UpperCamelCase = output.images __UpperCamelCase = image[0, -3:, -3:, -1] __UpperCamelCase = [0.3_502, 0.3_622, 0.3_396, 0.3_642, 0.3_478, 0.3_318, 0.35, 0.3_348, 0.3_297] assert image.shape == (1, 5_1_2, 5_1_2, 3) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 __UpperCamelCase = torch.manual_seed(lowercase ) __UpperCamelCase = sd_pipe( [prompt] , generator=lowercase , guidance_scale=lowercase , num_inference_steps=5_0 , output_type="""np""" , width=5_1_2 , height=5_1_2 , sld_guidance_scale=2_0_0_0 , sld_warmup_steps=7 , sld_threshold=0.025 , sld_momentum_scale=0.5 , sld_mom_beta=0.7 , ) __UpperCamelCase = output.images __UpperCamelCase = image[0, -3:, -3:, -1] __UpperCamelCase = [0.5_531, 0.5_206, 0.4_895, 0.5_156, 0.5_182, 0.4_751, 0.4_802, 0.4_803, 0.4_443] assert image.shape == (1, 5_1_2, 5_1_2, 3) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 def __lowerCamelCase ( self ) -> Optional[Any]: __UpperCamelCase = StableDiffusionPipeline.from_pretrained("""runwayml/stable-diffusion-v1-5""" ) __UpperCamelCase = sd_pipe.to(lowercase ) sd_pipe.set_progress_bar_config(disable=lowercase ) __UpperCamelCase = ( """the four horsewomen of the apocalypse, painting by tom of finland, gaston bussiere, craig mullins, j. c.""" """ leyendecker""" ) __UpperCamelCase = 1_0_4_4_3_5_5_2_3_4 __UpperCamelCase = 1_2 __UpperCamelCase = torch.manual_seed(lowercase ) __UpperCamelCase = sd_pipe( [prompt] , generator=lowercase , guidance_scale=lowercase , num_inference_steps=5_0 , output_type="""np""" , width=5_1_2 , height=5_1_2 , sld_guidance_scale=0 , ) __UpperCamelCase = output.images __UpperCamelCase = image[0, -3:, -3:, -1] __UpperCamelCase = np.array([0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0] ) assert image.shape == (1, 5_1_2, 5_1_2, 3) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-7 __UpperCamelCase = torch.manual_seed(lowercase ) __UpperCamelCase = sd_pipe( [prompt] , generator=lowercase , guidance_scale=lowercase , num_inference_steps=5_0 , output_type="""np""" , width=5_1_2 , height=5_1_2 , sld_guidance_scale=2_0_0_0 , sld_warmup_steps=7 , sld_threshold=0.025 , sld_momentum_scale=0.5 , sld_mom_beta=0.7 , ) __UpperCamelCase = output.images __UpperCamelCase = image[0, -3:, -3:, -1] __UpperCamelCase = np.array([0.5_818, 0.6_285, 0.6_835, 0.6_019, 0.625, 0.6_754, 0.6_096, 0.6_334, 0.6_561] ) assert image.shape == (1, 5_1_2, 5_1_2, 3) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
349
0
from __future__ import annotations import unittest from transformers import DistilBertConfig, is_tf_available from transformers.testing_utils import require_tf, slow from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers.models.distilbert.modeling_tf_distilbert import ( TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST, TFDistilBertForMaskedLM, TFDistilBertForMultipleChoice, TFDistilBertForQuestionAnswering, TFDistilBertForSequenceClassification, TFDistilBertForTokenClassification, TFDistilBertModel, ) class __snake_case : def __init__( self , __UpperCamelCase , ) -> Union[str, Any]: '''simple docstring''' snake_case__ : Optional[int] = parent snake_case__ : int = 13 snake_case__ : Tuple = 7 snake_case__ : Union[str, Any] = True snake_case__ : Optional[int] = True snake_case__ : str = False snake_case__ : Optional[Any] = True snake_case__ : Dict = 99 snake_case__ : int = 32 snake_case__ : Dict = 2 snake_case__ : str = 4 snake_case__ : Union[str, Any] = 37 snake_case__ : Tuple = 'gelu' snake_case__ : Union[str, Any] = 0.1 snake_case__ : Union[str, Any] = 0.1 snake_case__ : Optional[int] = 512 snake_case__ : str = 16 snake_case__ : Tuple = 2 snake_case__ : int = 0.0_2 snake_case__ : Optional[int] = 3 snake_case__ : Any = 4 snake_case__ : Optional[int] = None def __a ( self ) -> List[str]: '''simple docstring''' snake_case__ : Dict = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) snake_case__ : Dict = None if self.use_input_mask: snake_case__ : List[str] = random_attention_mask([self.batch_size, self.seq_length] ) snake_case__ : Optional[Any] = None snake_case__ : Any = None snake_case__ : int = None if self.use_labels: snake_case__ : Union[str, Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size ) snake_case__ : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) snake_case__ : Optional[Any] = ids_tensor([self.batch_size] , self.num_choices ) snake_case__ : str = DistilBertConfig( vocab_size=self.vocab_size , dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , hidden_dim=self.intermediate_size , hidden_act=self.hidden_act , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , ) return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels def __a ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) -> Dict: '''simple docstring''' snake_case__ : int = TFDistilBertModel(config=__UpperCamelCase ) snake_case__ : Dict = {'input_ids': input_ids, 'attention_mask': input_mask} snake_case__ : Dict = model(__UpperCamelCase ) snake_case__ : Dict = [input_ids, input_mask] snake_case__ : List[str] = model(__UpperCamelCase ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def __a ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) -> Optional[Any]: '''simple docstring''' snake_case__ : List[str] = TFDistilBertForMaskedLM(config=__UpperCamelCase ) snake_case__ : Union[str, Any] = {'input_ids': input_ids, 'attention_mask': input_mask} snake_case__ : List[Any] = model(__UpperCamelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def __a ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) -> Tuple: '''simple docstring''' snake_case__ : Union[str, Any] = TFDistilBertForQuestionAnswering(config=__UpperCamelCase ) snake_case__ : Tuple = { 'input_ids': input_ids, 'attention_mask': input_mask, } snake_case__ : int = model(__UpperCamelCase ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def __a ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) -> Tuple: '''simple docstring''' snake_case__ : Any = self.num_labels snake_case__ : str = TFDistilBertForSequenceClassification(__UpperCamelCase ) snake_case__ : Optional[int] = {'input_ids': input_ids, 'attention_mask': input_mask} snake_case__ : Optional[int] = model(__UpperCamelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def __a ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) -> int: '''simple docstring''' snake_case__ : int = self.num_choices snake_case__ : Any = TFDistilBertForMultipleChoice(__UpperCamelCase ) snake_case__ : Optional[int] = tf.tile(tf.expand_dims(__UpperCamelCase , 1 ) , (1, self.num_choices, 1) ) snake_case__ : Dict = tf.tile(tf.expand_dims(__UpperCamelCase , 1 ) , (1, self.num_choices, 1) ) snake_case__ : Tuple = { 'input_ids': multiple_choice_inputs_ids, 'attention_mask': multiple_choice_input_mask, } snake_case__ : Optional[int] = model(__UpperCamelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) ) def __a ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) -> Optional[int]: '''simple docstring''' snake_case__ : int = self.num_labels snake_case__ : Any = TFDistilBertForTokenClassification(__UpperCamelCase ) snake_case__ : List[Any] = {'input_ids': input_ids, 'attention_mask': input_mask} snake_case__ : Any = model(__UpperCamelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def __a ( self ) -> Dict: '''simple docstring''' snake_case__ : Union[str, Any] = self.prepare_config_and_inputs() ((snake_case__) , (snake_case__) , (snake_case__) , (snake_case__) , (snake_case__) , (snake_case__)) : Optional[int] = config_and_inputs snake_case__ : Optional[int] = {'input_ids': input_ids, 'attention_mask': input_mask} return config, inputs_dict @require_tf class __snake_case ( UpperCAmelCase_ ,UpperCAmelCase_ ,unittest.TestCase ): __lowerCamelCase = ( ( TFDistilBertModel, TFDistilBertForMaskedLM, TFDistilBertForQuestionAnswering, TFDistilBertForSequenceClassification, TFDistilBertForTokenClassification, TFDistilBertForMultipleChoice, ) if is_tf_available() else None ) __lowerCamelCase = ( { """feature-extraction""": TFDistilBertModel, """fill-mask""": TFDistilBertForMaskedLM, """question-answering""": TFDistilBertForQuestionAnswering, """text-classification""": TFDistilBertForSequenceClassification, """token-classification""": TFDistilBertForTokenClassification, """zero-shot""": TFDistilBertForSequenceClassification, } if is_tf_available() else {} ) __lowerCamelCase = False __lowerCamelCase = False def __a ( self ) -> Optional[Any]: '''simple docstring''' snake_case__ : Optional[Any] = TFDistilBertModelTester(self ) snake_case__ : List[Any] = ConfigTester(self , config_class=__UpperCamelCase , dim=37 ) def __a ( self ) -> Any: '''simple docstring''' self.config_tester.run_common_tests() def __a ( self ) -> Dict: '''simple docstring''' snake_case__ : int = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_distilbert_model(*__UpperCamelCase ) def __a ( self ) -> Union[str, Any]: '''simple docstring''' snake_case__ : int = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_distilbert_for_masked_lm(*__UpperCamelCase ) def __a ( self ) -> int: '''simple docstring''' snake_case__ : int = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_distilbert_for_question_answering(*__UpperCamelCase ) def __a ( self ) -> Any: '''simple docstring''' snake_case__ : str = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_distilbert_for_sequence_classification(*__UpperCamelCase ) def __a ( self ) -> Optional[Any]: '''simple docstring''' snake_case__ : Optional[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_distilbert_for_multiple_choice(*__UpperCamelCase ) def __a ( self ) -> Union[str, Any]: '''simple docstring''' snake_case__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_distilbert_for_token_classification(*__UpperCamelCase ) @slow def __a ( self ) -> Tuple: '''simple docstring''' for model_name in list(TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1] ): snake_case__ : int = TFDistilBertModel.from_pretrained(__UpperCamelCase ) self.assertIsNotNone(__UpperCamelCase ) @require_tf class __snake_case ( unittest.TestCase ): @slow def __a ( self ) -> Optional[int]: '''simple docstring''' snake_case__ : Optional[int] = TFDistilBertModel.from_pretrained('distilbert-base-uncased' ) snake_case__ : Dict = tf.constant([[0, 1, 2, 3, 4, 5]] ) snake_case__ : Union[str, Any] = model(__UpperCamelCase )[0] snake_case__ : str = [1, 6, 768] self.assertEqual(output.shape , __UpperCamelCase ) snake_case__ : Tuple = tf.constant( [ [ [0.1_9_2_6_1_8_8_5, -0.1_3_7_3_2_9_5_5, 0.4_1_1_9_7_9_9], [0.2_2_1_5_0_1_5_6, -0.0_7_4_2_2_6_6_1, 0.3_9_0_3_7_2_0_4], [0.2_2_7_5_6_0_1_8, -0.0_8_9_6_4_1_4, 0.3_7_0_1_4_6_7], ] ] ) tf.debugging.assert_near(output[:, :3, :3] , __UpperCamelCase , atol=1E-4 )
143
'''simple docstring''' from ..utils import DummyObject, requires_backends class UpperCAmelCase__ ( metaclass=UpperCAmelCase_): __SCREAMING_SNAKE_CASE = ['''flax'''] def __init__( self , *lowercase , **lowercase ) -> List[Any]: requires_backends(self , ["""flax"""] ) @classmethod def __lowerCamelCase ( cls , *lowercase , **lowercase ) -> Union[str, Any]: requires_backends(cls , ["""flax"""] ) @classmethod def __lowerCamelCase ( cls , *lowercase , **lowercase ) -> Tuple: requires_backends(cls , ["""flax"""] ) class UpperCAmelCase__ ( metaclass=UpperCAmelCase_): __SCREAMING_SNAKE_CASE = ['''flax'''] def __init__( self , *lowercase , **lowercase ) -> str: requires_backends(self , ["""flax"""] ) @classmethod def __lowerCamelCase ( cls , *lowercase , **lowercase ) -> Any: requires_backends(cls , ["""flax"""] ) @classmethod def __lowerCamelCase ( cls , *lowercase , **lowercase ) -> int: requires_backends(cls , ["""flax"""] ) class UpperCAmelCase__ ( metaclass=UpperCAmelCase_): __SCREAMING_SNAKE_CASE = ['''flax'''] def __init__( self , *lowercase , **lowercase ) -> Dict: requires_backends(self , ["""flax"""] ) @classmethod def __lowerCamelCase ( cls , *lowercase , **lowercase ) -> int: requires_backends(cls , ["""flax"""] ) @classmethod def __lowerCamelCase ( cls , *lowercase , **lowercase ) -> List[Any]: requires_backends(cls , ["""flax"""] ) class UpperCAmelCase__ ( metaclass=UpperCAmelCase_): __SCREAMING_SNAKE_CASE = ['''flax'''] def __init__( self , *lowercase , **lowercase ) -> Optional[Any]: requires_backends(self , ["""flax"""] ) @classmethod def __lowerCamelCase ( cls , *lowercase , **lowercase ) -> Optional[int]: requires_backends(cls , ["""flax"""] ) @classmethod def __lowerCamelCase ( cls , *lowercase , **lowercase ) -> Optional[int]: requires_backends(cls , ["""flax"""] ) class UpperCAmelCase__ ( metaclass=UpperCAmelCase_): __SCREAMING_SNAKE_CASE = ['''flax'''] def __init__( self , *lowercase , **lowercase ) -> Optional[Any]: requires_backends(self , ["""flax"""] ) @classmethod def __lowerCamelCase ( cls , *lowercase , **lowercase ) -> str: requires_backends(cls , ["""flax"""] ) @classmethod def __lowerCamelCase ( cls , *lowercase , **lowercase ) -> List[str]: requires_backends(cls , ["""flax"""] ) class UpperCAmelCase__ ( metaclass=UpperCAmelCase_): __SCREAMING_SNAKE_CASE = ['''flax'''] def __init__( self , *lowercase , **lowercase ) -> Optional[int]: requires_backends(self , ["""flax"""] ) @classmethod def __lowerCamelCase ( cls , *lowercase , **lowercase ) -> str: requires_backends(cls , ["""flax"""] ) @classmethod def __lowerCamelCase ( cls , *lowercase , **lowercase ) -> int: requires_backends(cls , ["""flax"""] ) class UpperCAmelCase__ ( metaclass=UpperCAmelCase_): __SCREAMING_SNAKE_CASE = ['''flax'''] def __init__( self , *lowercase , **lowercase ) -> List[Any]: requires_backends(self , ["""flax"""] ) @classmethod def __lowerCamelCase ( cls , *lowercase , **lowercase ) -> Any: requires_backends(cls , ["""flax"""] ) @classmethod def __lowerCamelCase ( cls , *lowercase , **lowercase ) -> Dict: requires_backends(cls , ["""flax"""] ) class UpperCAmelCase__ ( metaclass=UpperCAmelCase_): __SCREAMING_SNAKE_CASE = ['''flax'''] def __init__( self , *lowercase , **lowercase ) -> Union[str, Any]: requires_backends(self , ["""flax"""] ) @classmethod def __lowerCamelCase ( cls , *lowercase , **lowercase ) -> Dict: requires_backends(cls , ["""flax"""] ) @classmethod def __lowerCamelCase ( cls , *lowercase , **lowercase ) -> Tuple: requires_backends(cls , ["""flax"""] ) class UpperCAmelCase__ ( metaclass=UpperCAmelCase_): __SCREAMING_SNAKE_CASE = ['''flax'''] def __init__( self , *lowercase , **lowercase ) -> int: requires_backends(self , ["""flax"""] ) @classmethod def __lowerCamelCase ( cls , *lowercase , **lowercase ) -> Optional[int]: requires_backends(cls , ["""flax"""] ) @classmethod def __lowerCamelCase ( cls , *lowercase , **lowercase ) -> List[str]: requires_backends(cls , ["""flax"""] ) class UpperCAmelCase__ ( metaclass=UpperCAmelCase_): __SCREAMING_SNAKE_CASE = ['''flax'''] def __init__( self , *lowercase , **lowercase ) -> int: requires_backends(self , ["""flax"""] ) @classmethod def __lowerCamelCase ( cls , *lowercase , **lowercase ) -> str: requires_backends(cls , ["""flax"""] ) @classmethod def __lowerCamelCase ( cls , *lowercase , **lowercase ) -> str: requires_backends(cls , ["""flax"""] ) class UpperCAmelCase__ ( metaclass=UpperCAmelCase_): __SCREAMING_SNAKE_CASE = ['''flax'''] def __init__( self , *lowercase , **lowercase ) -> int: requires_backends(self , ["""flax"""] ) @classmethod def __lowerCamelCase ( cls , *lowercase , **lowercase ) -> Tuple: requires_backends(cls , ["""flax"""] ) @classmethod def __lowerCamelCase ( cls , *lowercase , **lowercase ) -> Tuple: requires_backends(cls , ["""flax"""] ) class UpperCAmelCase__ ( metaclass=UpperCAmelCase_): __SCREAMING_SNAKE_CASE = ['''flax'''] def __init__( self , *lowercase , **lowercase ) -> Optional[int]: requires_backends(self , ["""flax"""] ) @classmethod def __lowerCamelCase ( cls , *lowercase , **lowercase ) -> Tuple: requires_backends(cls , ["""flax"""] ) @classmethod def __lowerCamelCase ( cls , *lowercase , **lowercase ) -> Union[str, Any]: requires_backends(cls , ["""flax"""] ) class UpperCAmelCase__ ( metaclass=UpperCAmelCase_): __SCREAMING_SNAKE_CASE = ['''flax'''] def __init__( self , *lowercase , **lowercase ) -> Any: requires_backends(self , ["""flax"""] ) @classmethod def __lowerCamelCase ( cls , *lowercase , **lowercase ) -> Tuple: requires_backends(cls , ["""flax"""] ) @classmethod def __lowerCamelCase ( cls , *lowercase , **lowercase ) -> List[str]: requires_backends(cls , ["""flax"""] )
349
0
import contextlib import importlib import io import unittest import transformers # Try to import everything from transformers to ensure every object can be loaded. from transformers import * # noqa F406 from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, require_flax, require_tf, require_torch from transformers.utils import ContextManagers, find_labels, is_flax_available, is_tf_available, is_torch_available if is_torch_available(): from transformers import BertForPreTraining, BertForQuestionAnswering, BertForSequenceClassification if is_tf_available(): from transformers import TFBertForPreTraining, TFBertForQuestionAnswering, TFBertForSequenceClassification if is_flax_available(): from transformers import FlaxBertForPreTraining, FlaxBertForQuestionAnswering, FlaxBertForSequenceClassification snake_case : Optional[Any] = DUMMY_UNKNOWN_IDENTIFIER # An actual model hosted on huggingface.co snake_case : Optional[int] = 'main' # Default branch name snake_case : Any = 'f2c752cfc5c0ab6f4bdec59acea69eefbee381c2' # One particular commit (not the top of `main`) snake_case : Tuple = 'aaaaaaa' # This commit does not exist, so we should 404. snake_case : Any = 'd9e9f15bc825e4b2c9249e9578f884bbcb5e3684' # Sha-1 of config.json on the top of `main`, for checking purposes snake_case : Any = '4b243c475af8d0a7754e87d7d096c92e5199ec2fe168a2ee7998e3b8e9bcb1d3' @contextlib.contextmanager def __lowerCamelCase ( ): """simple docstring""" print('''Welcome!''' ) yield print('''Bye!''' ) @contextlib.contextmanager def __lowerCamelCase ( ): """simple docstring""" print('''Bonjour!''' ) yield print('''Au revoir!''' ) class _snake_case ( unittest.TestCase ): def SCREAMING_SNAKE_CASE__ ( self ): # If the spec is missing, importlib would not be able to import the module dynamically. assert transformers.__spec__ is not None assert importlib.util.find_spec('''transformers''' ) is not None class _snake_case ( unittest.TestCase ): @unittest.mock.patch('''sys.stdout''' , new_callable=io.StringIO ) def SCREAMING_SNAKE_CASE__ ( self , _lowerCamelCase ): with ContextManagers([] ): print('''Transformers are awesome!''' ) # The print statement adds a new line at the end of the output self.assertEqual(mock_stdout.getvalue() , '''Transformers are awesome!\n''' ) @unittest.mock.patch('''sys.stdout''' , new_callable=io.StringIO ) def SCREAMING_SNAKE_CASE__ ( self , _lowerCamelCase ): with ContextManagers([context_en()] ): print('''Transformers are awesome!''' ) # The output should be wrapped with an English welcome and goodbye self.assertEqual(mock_stdout.getvalue() , '''Welcome!\nTransformers are awesome!\nBye!\n''' ) @unittest.mock.patch('''sys.stdout''' , new_callable=io.StringIO ) def SCREAMING_SNAKE_CASE__ ( self , _lowerCamelCase ): with ContextManagers([context_fr(), context_en()] ): print('''Transformers are awesome!''' ) # The output should be wrapped with an English and French welcome and goodbye self.assertEqual(mock_stdout.getvalue() , '''Bonjour!\nWelcome!\nTransformers are awesome!\nBye!\nAu revoir!\n''' ) @require_torch def SCREAMING_SNAKE_CASE__ ( self ): self.assertEqual(find_labels(_lowerCamelCase ) , ['''labels'''] ) self.assertEqual(find_labels(_lowerCamelCase ) , ['''labels''', '''next_sentence_label'''] ) self.assertEqual(find_labels(_lowerCamelCase ) , ['''start_positions''', '''end_positions'''] ) class _snake_case ( UpperCAmelCase_ ): pass self.assertEqual(find_labels(_lowerCamelCase ) , ['''labels'''] ) @require_tf def SCREAMING_SNAKE_CASE__ ( self ): self.assertEqual(find_labels(_lowerCamelCase ) , ['''labels'''] ) self.assertEqual(find_labels(_lowerCamelCase ) , ['''labels''', '''next_sentence_label'''] ) self.assertEqual(find_labels(_lowerCamelCase ) , ['''start_positions''', '''end_positions'''] ) class _snake_case ( UpperCAmelCase_ ): pass self.assertEqual(find_labels(_lowerCamelCase ) , ['''labels'''] ) @require_flax def SCREAMING_SNAKE_CASE__ ( self ): # Flax models don't have labels self.assertEqual(find_labels(_lowerCamelCase ) , [] ) self.assertEqual(find_labels(_lowerCamelCase ) , [] ) self.assertEqual(find_labels(_lowerCamelCase ) , [] ) class _snake_case ( UpperCAmelCase_ ): pass self.assertEqual(find_labels(_lowerCamelCase ) , [] )
94
'''simple docstring''' import logging import os from .state import PartialState class UpperCAmelCase__ ( logging.LoggerAdapter): @staticmethod def __lowerCamelCase ( lowercase ) -> Dict: __UpperCamelCase = PartialState() return not main_process_only or (main_process_only and state.is_main_process) def __lowerCamelCase ( self , lowercase , lowercase , *lowercase , **lowercase ) -> List[str]: if PartialState._shared_state == {}: raise RuntimeError( """You must initialize the accelerate state by calling either `PartialState()` or `Accelerator()` before using the logging utility.""" ) __UpperCamelCase = kwargs.pop("""main_process_only""" , lowercase ) __UpperCamelCase = kwargs.pop("""in_order""" , lowercase ) if self.isEnabledFor(lowercase ): if self._should_log(lowercase ): __UpperCamelCase , __UpperCamelCase = self.process(lowercase , lowercase ) self.logger.log(lowercase , lowercase , *lowercase , **lowercase ) elif in_order: __UpperCamelCase = PartialState() for i in range(state.num_processes ): if i == state.process_index: __UpperCamelCase , __UpperCamelCase = self.process(lowercase , lowercase ) self.logger.log(lowercase , lowercase , *lowercase , **lowercase ) state.wait_for_everyone() def _lowercase ( __A ,__A = None ): '''simple docstring''' if log_level is None: __UpperCamelCase = os.environ.get("""ACCELERATE_LOG_LEVEL""" ,__A ) __UpperCamelCase = logging.getLogger(__A ) if log_level is not None: logger.setLevel(log_level.upper() ) logger.root.setLevel(log_level.upper() ) return MultiProcessAdapter(__A ,{} )
349
0
'''simple docstring''' lowercase : int = [0, 2, 4, 6, 8] lowercase : Optional[Any] = [1, 3, 5, 7, 9] def lowerCAmelCase_ ( snake_case__ , snake_case__ , snake_case__ , snake_case__ ): '''simple docstring''' if remaining_length == 0: if digits[0] == 0 or digits[-1] == 0: return 0 for i in range(length // 2 - 1 , -1 , -1 ): remainder += digits[i] + digits[length - i - 1] if remainder % 2 == 0: return 0 remainder //= 10 return 1 if remaining_length == 1: if remainder % 2 == 0: return 0 A : Optional[Any] = 0 for digit in range(10 ): A : Dict = digit result += reversible_numbers( 0 , (remainder + 2 * digit) // 10 , __A , __A ) return result A : Any = 0 for digita in range(10 ): A : List[Any] = digita if (remainder + digita) % 2 == 0: A : int = ODD_DIGITS else: A : Optional[Any] = EVEN_DIGITS for digita in other_parity_digits: A : Dict = digita result += reversible_numbers( remaining_length - 2 , (remainder + digita + digita) // 10 , __A , __A , ) return result def lowerCAmelCase_ ( snake_case__ = 9 ): '''simple docstring''' A : List[str] = 0 for length in range(1 , max_power + 1 ): result += reversible_numbers(__A , 0 , [0] * length , __A ) return result if __name__ == "__main__": print(f'''{solution() = }''')
3
'''simple docstring''' import logging import random import ray from transformers import RagConfig, RagRetriever, RagTokenizer from transformers.models.rag.retrieval_rag import CustomHFIndex a__ : Optional[Any] = logging.getLogger(__name__) class UpperCAmelCase__ : def __init__( self ) -> Any: __UpperCamelCase = False def __lowerCamelCase ( self , lowercase , lowercase , lowercase , lowercase ) -> str: if not self.initialized: __UpperCamelCase = RagRetriever( lowercase , question_encoder_tokenizer=lowercase , generator_tokenizer=lowercase , index=lowercase , init_retrieval=lowercase , ) __UpperCamelCase = True def __lowerCamelCase ( self ) -> Optional[Any]: self.retriever.index.init_index() def __lowerCamelCase ( self , lowercase , lowercase ) -> Dict: __UpperCamelCase , __UpperCamelCase = self.retriever._main_retrieve(lowercase , lowercase ) return doc_ids, retrieved_doc_embeds class UpperCAmelCase__ ( UpperCAmelCase_): def __init__( self , lowercase , lowercase , lowercase , lowercase , lowercase=None ) -> List[Any]: if index is not None and index.is_initialized() and len(lowercase ) > 0: raise ValueError( """When using Ray for distributed fine-tuning, """ """you'll need to provide the paths instead, """ """as the dataset and the index are loaded """ """separately. More info in examples/rag/use_own_knowledge_dataset.py """ ) super().__init__( lowercase , question_encoder_tokenizer=lowercase , generator_tokenizer=lowercase , index=lowercase , init_retrieval=lowercase , ) __UpperCamelCase = retrieval_workers if len(self.retrieval_workers ) > 0: ray.get( [ worker.create_rag_retriever.remote(lowercase , lowercase , lowercase , lowercase ) for worker in self.retrieval_workers ] ) def __lowerCamelCase ( self ) -> Dict: logger.info("""initializing retrieval""" ) if len(self.retrieval_workers ) > 0: ray.get([worker.init_retrieval.remote() for worker in self.retrieval_workers] ) else: # Non-distributed training. Load index into this same process. self.index.init_index() def __lowerCamelCase ( self , lowercase , lowercase ) -> List[str]: if len(self.retrieval_workers ) > 0: # Select a random retrieval actor. __UpperCamelCase = self.retrieval_workers[random.randint(0 , len(self.retrieval_workers ) - 1 )] __UpperCamelCase , __UpperCamelCase = ray.get(random_worker.retrieve.remote(lowercase , lowercase ) ) else: __UpperCamelCase , __UpperCamelCase = self._main_retrieve(lowercase , lowercase ) return retrieved_doc_embeds, doc_ids, self.index.get_doc_dicts(lowercase ) @classmethod def __lowerCamelCase ( cls , lowercase , lowercase=None , **lowercase ) -> Any: return super(lowercase , cls ).get_tokenizers(lowercase , lowercase , **lowercase ) @classmethod def __lowerCamelCase ( cls , lowercase , lowercase , lowercase=None , **lowercase ) -> int: __UpperCamelCase = kwargs.pop("""config""" , lowercase ) or RagConfig.from_pretrained(lowercase , **lowercase ) __UpperCamelCase = RagTokenizer.from_pretrained(lowercase , config=lowercase ) __UpperCamelCase = rag_tokenizer.question_encoder __UpperCamelCase = rag_tokenizer.generator if indexed_dataset is not None: __UpperCamelCase = """custom""" __UpperCamelCase = CustomHFIndex(config.retrieval_vector_size , lowercase ) else: __UpperCamelCase = cls._build_index(lowercase ) return cls( lowercase , question_encoder_tokenizer=lowercase , generator_tokenizer=lowercase , retrieval_workers=lowercase , index=lowercase , )
349
0
import gc import random import tempfile import unittest import numpy as np import torch from PIL import Image from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer from diffusers import ( AutoencoderKL, ControlNetModel, DDIMScheduler, StableDiffusionControlNetImgaImgPipeline, UNetaDConditionModel, ) from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_controlnet import MultiControlNetModel from diffusers.utils import floats_tensor, load_image, load_numpy, randn_tensor, slow, torch_device from diffusers.utils.import_utils import is_xformers_available from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu from ..pipeline_params import ( IMAGE_TO_IMAGE_IMAGE_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS, ) from ..test_pipelines_common import ( PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin, ) enable_full_determinism() class a_ ( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , unittest.TestCase ): """simple docstring""" __UpperCAmelCase = StableDiffusionControlNetImgaImgPipeline __UpperCAmelCase = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {'height', 'width'} __UpperCAmelCase = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS __UpperCAmelCase = IMAGE_TO_IMAGE_IMAGE_PARAMS.union({'control_image'} ) __UpperCAmelCase = IMAGE_TO_IMAGE_IMAGE_PARAMS def _lowerCAmelCase ( self : List[str] ): torch.manual_seed(0 ) SCREAMING_SNAKE_CASE =UNetaDConditionModel( block_out_channels=(32, 64) ,layers_per_block=2 ,sample_size=32 ,in_channels=4 ,out_channels=4 ,down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') ,up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D') ,cross_attention_dim=32 ,) torch.manual_seed(0 ) SCREAMING_SNAKE_CASE =ControlNetModel( block_out_channels=(32, 64) ,layers_per_block=2 ,in_channels=4 ,down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') ,cross_attention_dim=32 ,conditioning_embedding_out_channels=(16, 32) ,) torch.manual_seed(0 ) SCREAMING_SNAKE_CASE =DDIMScheduler( beta_start=0.00_085 ,beta_end=0.012 ,beta_schedule='scaled_linear' ,clip_sample=snake_case ,set_alpha_to_one=snake_case ,) torch.manual_seed(0 ) SCREAMING_SNAKE_CASE =AutoencoderKL( block_out_channels=[32, 64] ,in_channels=3 ,out_channels=3 ,down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] ,up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] ,latent_channels=4 ,) torch.manual_seed(0 ) SCREAMING_SNAKE_CASE =CLIPTextConfig( bos_token_id=0 ,eos_token_id=2 ,hidden_size=32 ,intermediate_size=37 ,layer_norm_eps=1e-05 ,num_attention_heads=4 ,num_hidden_layers=5 ,pad_token_id=1 ,vocab_size=1000 ,) SCREAMING_SNAKE_CASE =CLIPTextModel(snake_case ) SCREAMING_SNAKE_CASE =CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' ) SCREAMING_SNAKE_CASE ={ 'unet': unet, 'controlnet': controlnet, 'scheduler': scheduler, 'vae': vae, 'text_encoder': text_encoder, 'tokenizer': tokenizer, 'safety_checker': None, 'feature_extractor': None, } return components def _lowerCAmelCase ( self : Tuple ,snake_case : int ,snake_case : Any=0 ): if str(snake_case ).startswith('mps' ): SCREAMING_SNAKE_CASE =torch.manual_seed(snake_case ) else: SCREAMING_SNAKE_CASE =torch.Generator(device=snake_case ).manual_seed(snake_case ) SCREAMING_SNAKE_CASE =2 SCREAMING_SNAKE_CASE =randn_tensor( (1, 3, 32 * controlnet_embedder_scale_factor, 32 * controlnet_embedder_scale_factor) ,generator=snake_case ,device=torch.device(snake_case ) ,) SCREAMING_SNAKE_CASE =floats_tensor(control_image.shape ,rng=random.Random(snake_case ) ).to(snake_case ) SCREAMING_SNAKE_CASE =image.cpu().permute(0 ,2 ,3 ,1 )[0] SCREAMING_SNAKE_CASE =Image.fromarray(np.uinta(snake_case ) ).convert('RGB' ).resize((64, 64) ) SCREAMING_SNAKE_CASE ={ 'prompt': 'A painting of a squirrel eating a burger', 'generator': generator, 'num_inference_steps': 2, 'guidance_scale': 6.0, 'output_type': 'numpy', 'image': image, 'control_image': control_image, } return inputs def _lowerCAmelCase ( self : List[str] ): return self._test_attention_slicing_forward_pass(expected_max_diff=2e-3 ) @unittest.skipIf( torch_device != 'cuda' or not is_xformers_available() ,reason='XFormers attention is only available with CUDA and `xformers` installed' ,) def _lowerCAmelCase ( self : Dict ): self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=2e-3 ) def _lowerCAmelCase ( self : Optional[Any] ): self._test_inference_batch_single_identical(expected_max_diff=2e-3 ) class a_ ( UpperCAmelCase_ , UpperCAmelCase_ , unittest.TestCase ): """simple docstring""" __UpperCAmelCase = StableDiffusionControlNetImgaImgPipeline __UpperCAmelCase = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {'height', 'width'} __UpperCAmelCase = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS __UpperCAmelCase = frozenset([] ) # TO_DO: add image_params once refactored VaeImageProcessor.preprocess def _lowerCAmelCase ( self : Union[str, Any] ): torch.manual_seed(0 ) SCREAMING_SNAKE_CASE =UNetaDConditionModel( block_out_channels=(32, 64) ,layers_per_block=2 ,sample_size=32 ,in_channels=4 ,out_channels=4 ,down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') ,up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D') ,cross_attention_dim=32 ,) torch.manual_seed(0 ) def init_weights(snake_case : List[str] ): if isinstance(snake_case ,torch.nn.Convad ): torch.nn.init.normal(m.weight ) m.bias.data.fill_(1.0 ) SCREAMING_SNAKE_CASE =ControlNetModel( block_out_channels=(32, 64) ,layers_per_block=2 ,in_channels=4 ,down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') ,cross_attention_dim=32 ,conditioning_embedding_out_channels=(16, 32) ,) controlneta.controlnet_down_blocks.apply(snake_case ) torch.manual_seed(0 ) SCREAMING_SNAKE_CASE =ControlNetModel( block_out_channels=(32, 64) ,layers_per_block=2 ,in_channels=4 ,down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') ,cross_attention_dim=32 ,conditioning_embedding_out_channels=(16, 32) ,) controlneta.controlnet_down_blocks.apply(snake_case ) torch.manual_seed(0 ) SCREAMING_SNAKE_CASE =DDIMScheduler( beta_start=0.00_085 ,beta_end=0.012 ,beta_schedule='scaled_linear' ,clip_sample=snake_case ,set_alpha_to_one=snake_case ,) torch.manual_seed(0 ) SCREAMING_SNAKE_CASE =AutoencoderKL( block_out_channels=[32, 64] ,in_channels=3 ,out_channels=3 ,down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] ,up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] ,latent_channels=4 ,) torch.manual_seed(0 ) SCREAMING_SNAKE_CASE =CLIPTextConfig( bos_token_id=0 ,eos_token_id=2 ,hidden_size=32 ,intermediate_size=37 ,layer_norm_eps=1e-05 ,num_attention_heads=4 ,num_hidden_layers=5 ,pad_token_id=1 ,vocab_size=1000 ,) SCREAMING_SNAKE_CASE =CLIPTextModel(snake_case ) SCREAMING_SNAKE_CASE =CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' ) SCREAMING_SNAKE_CASE =MultiControlNetModel([controlneta, controlneta] ) SCREAMING_SNAKE_CASE ={ 'unet': unet, 'controlnet': controlnet, 'scheduler': scheduler, 'vae': vae, 'text_encoder': text_encoder, 'tokenizer': tokenizer, 'safety_checker': None, 'feature_extractor': None, } return components def _lowerCAmelCase ( self : List[str] ,snake_case : List[str] ,snake_case : List[Any]=0 ): if str(snake_case ).startswith('mps' ): SCREAMING_SNAKE_CASE =torch.manual_seed(snake_case ) else: SCREAMING_SNAKE_CASE =torch.Generator(device=snake_case ).manual_seed(snake_case ) SCREAMING_SNAKE_CASE =2 SCREAMING_SNAKE_CASE =[ randn_tensor( (1, 3, 32 * controlnet_embedder_scale_factor, 32 * controlnet_embedder_scale_factor) ,generator=snake_case ,device=torch.device(snake_case ) ,), randn_tensor( (1, 3, 32 * controlnet_embedder_scale_factor, 32 * controlnet_embedder_scale_factor) ,generator=snake_case ,device=torch.device(snake_case ) ,), ] SCREAMING_SNAKE_CASE =floats_tensor(control_image[0].shape ,rng=random.Random(snake_case ) ).to(snake_case ) SCREAMING_SNAKE_CASE =image.cpu().permute(0 ,2 ,3 ,1 )[0] SCREAMING_SNAKE_CASE =Image.fromarray(np.uinta(snake_case ) ).convert('RGB' ).resize((64, 64) ) SCREAMING_SNAKE_CASE ={ 'prompt': 'A painting of a squirrel eating a burger', 'generator': generator, 'num_inference_steps': 2, 'guidance_scale': 6.0, 'output_type': 'numpy', 'image': image, 'control_image': control_image, } return inputs def _lowerCAmelCase ( self : int ): SCREAMING_SNAKE_CASE =self.get_dummy_components() SCREAMING_SNAKE_CASE =self.pipeline_class(**snake_case ) pipe.to(snake_case ) SCREAMING_SNAKE_CASE =10.0 SCREAMING_SNAKE_CASE =4 SCREAMING_SNAKE_CASE =self.get_dummy_inputs(snake_case ) SCREAMING_SNAKE_CASE =steps SCREAMING_SNAKE_CASE =scale SCREAMING_SNAKE_CASE =pipe(**snake_case )[0] SCREAMING_SNAKE_CASE =self.get_dummy_inputs(snake_case ) SCREAMING_SNAKE_CASE =steps SCREAMING_SNAKE_CASE =scale SCREAMING_SNAKE_CASE =pipe(**snake_case ,control_guidance_start=0.1 ,control_guidance_end=0.2 )[0] SCREAMING_SNAKE_CASE =self.get_dummy_inputs(snake_case ) SCREAMING_SNAKE_CASE =steps SCREAMING_SNAKE_CASE =scale SCREAMING_SNAKE_CASE =pipe(**snake_case ,control_guidance_start=[0.1, 0.3] ,control_guidance_end=[0.2, 0.7] )[0] SCREAMING_SNAKE_CASE =self.get_dummy_inputs(snake_case ) SCREAMING_SNAKE_CASE =steps SCREAMING_SNAKE_CASE =scale SCREAMING_SNAKE_CASE =pipe(**snake_case ,control_guidance_start=0.4 ,control_guidance_end=[0.5, 0.8] )[0] # make sure that all outputs are different assert np.sum(np.abs(output_a - output_a ) ) > 1e-3 assert np.sum(np.abs(output_a - output_a ) ) > 1e-3 assert np.sum(np.abs(output_a - output_a ) ) > 1e-3 def _lowerCAmelCase ( self : Optional[int] ): return self._test_attention_slicing_forward_pass(expected_max_diff=2e-3 ) @unittest.skipIf( torch_device != 'cuda' or not is_xformers_available() ,reason='XFormers attention is only available with CUDA and `xformers` installed' ,) def _lowerCAmelCase ( self : Union[str, Any] ): self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=2e-3 ) def _lowerCAmelCase ( self : Any ): self._test_inference_batch_single_identical(expected_max_diff=2e-3 ) def _lowerCAmelCase ( self : Tuple ): SCREAMING_SNAKE_CASE =self.get_dummy_components() SCREAMING_SNAKE_CASE =self.pipeline_class(**snake_case ) pipe.to(snake_case ) pipe.set_progress_bar_config(disable=snake_case ) with tempfile.TemporaryDirectory() as tmpdir: try: # save_pretrained is not implemented for Multi-ControlNet pipe.save_pretrained(snake_case ) except NotImplementedError: pass @slow @require_torch_gpu class a_ ( unittest.TestCase ): """simple docstring""" def _lowerCAmelCase ( self : Any ): super().tearDown() gc.collect() torch.cuda.empty_cache() def _lowerCAmelCase ( self : List[Any] ): SCREAMING_SNAKE_CASE =ControlNetModel.from_pretrained('lllyasviel/sd-controlnet-canny' ) SCREAMING_SNAKE_CASE =StableDiffusionControlNetImgaImgPipeline.from_pretrained( 'runwayml/stable-diffusion-v1-5' ,safety_checker=snake_case ,controlnet=snake_case ) pipe.enable_model_cpu_offload() pipe.set_progress_bar_config(disable=snake_case ) SCREAMING_SNAKE_CASE =torch.Generator(device='cpu' ).manual_seed(0 ) SCREAMING_SNAKE_CASE ='evil space-punk bird' SCREAMING_SNAKE_CASE =load_image( 'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/bird_canny.png' ).resize((512, 512) ) SCREAMING_SNAKE_CASE =load_image( 'https://huggingface.co/lllyasviel/sd-controlnet-canny/resolve/main/images/bird.png' ).resize((512, 512) ) SCREAMING_SNAKE_CASE =pipe( snake_case ,snake_case ,control_image=snake_case ,generator=snake_case ,output_type='np' ,num_inference_steps=50 ,strength=0.6 ,) SCREAMING_SNAKE_CASE =output.images[0] assert image.shape == (512, 512, 3) SCREAMING_SNAKE_CASE =load_numpy( 'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/img2img.npy' ) assert np.abs(expected_image - image ).max() < 9e-2
334
'''simple docstring''' import json from typing import List, Optional, Tuple from tokenizers import normalizers from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import logging from .tokenization_squeezebert import SqueezeBertTokenizer a__ : Optional[Any] = logging.get_logger(__name__) a__ : Union[str, Any] = {'vocab_file': 'vocab.txt', 'tokenizer_file': 'tokenizer.json'} a__ : Any = { 'vocab_file': { 'squeezebert/squeezebert-uncased': ( 'https://huggingface.co/squeezebert/squeezebert-uncased/resolve/main/vocab.txt' ), 'squeezebert/squeezebert-mnli': 'https://huggingface.co/squeezebert/squeezebert-mnli/resolve/main/vocab.txt', 'squeezebert/squeezebert-mnli-headless': ( 'https://huggingface.co/squeezebert/squeezebert-mnli-headless/resolve/main/vocab.txt' ), }, 'tokenizer_file': { 'squeezebert/squeezebert-uncased': ( 'https://huggingface.co/squeezebert/squeezebert-uncased/resolve/main/tokenizer.json' ), 'squeezebert/squeezebert-mnli': ( 'https://huggingface.co/squeezebert/squeezebert-mnli/resolve/main/tokenizer.json' ), 'squeezebert/squeezebert-mnli-headless': ( 'https://huggingface.co/squeezebert/squeezebert-mnli-headless/resolve/main/tokenizer.json' ), }, } a__ : Optional[Any] = { 'squeezebert/squeezebert-uncased': 5_1_2, 'squeezebert/squeezebert-mnli': 5_1_2, 'squeezebert/squeezebert-mnli-headless': 5_1_2, } a__ : Optional[Any] = { 'squeezebert/squeezebert-uncased': {'do_lower_case': True}, 'squeezebert/squeezebert-mnli': {'do_lower_case': True}, 'squeezebert/squeezebert-mnli-headless': {'do_lower_case': True}, } class UpperCAmelCase__ ( UpperCAmelCase_): __SCREAMING_SNAKE_CASE = VOCAB_FILES_NAMES __SCREAMING_SNAKE_CASE = PRETRAINED_VOCAB_FILES_MAP __SCREAMING_SNAKE_CASE = PRETRAINED_INIT_CONFIGURATION __SCREAMING_SNAKE_CASE = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES __SCREAMING_SNAKE_CASE = SqueezeBertTokenizer def __init__( self , lowercase=None , lowercase=None , lowercase=True , lowercase="[UNK]" , lowercase="[SEP]" , lowercase="[PAD]" , lowercase="[CLS]" , lowercase="[MASK]" , lowercase=True , lowercase=None , **lowercase , ) -> Tuple: super().__init__( lowercase , tokenizer_file=lowercase , do_lower_case=lowercase , unk_token=lowercase , sep_token=lowercase , pad_token=lowercase , cls_token=lowercase , mask_token=lowercase , tokenize_chinese_chars=lowercase , strip_accents=lowercase , **lowercase , ) __UpperCamelCase = json.loads(self.backend_tokenizer.normalizer.__getstate__() ) if ( normalizer_state.get("""lowercase""" , lowercase ) != do_lower_case or normalizer_state.get("""strip_accents""" , lowercase ) != strip_accents or normalizer_state.get("""handle_chinese_chars""" , lowercase ) != tokenize_chinese_chars ): __UpperCamelCase = getattr(lowercase , normalizer_state.pop("""type""" ) ) __UpperCamelCase = do_lower_case __UpperCamelCase = strip_accents __UpperCamelCase = tokenize_chinese_chars __UpperCamelCase = normalizer_class(**lowercase ) __UpperCamelCase = do_lower_case def __lowerCamelCase ( self , lowercase , lowercase=None ) -> Tuple: __UpperCamelCase = [self.cls_token_id] + token_ids_a + [self.sep_token_id] if token_ids_a: output += token_ids_a + [self.sep_token_id] return output def __lowerCamelCase ( self , lowercase , lowercase = None ) -> List[int]: __UpperCamelCase = [self.sep_token_id] __UpperCamelCase = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] def __lowerCamelCase ( self , lowercase , lowercase = None ) -> Tuple[str]: __UpperCamelCase = self._tokenizer.model.save(lowercase , name=lowercase ) return tuple(lowercase )
349
0
import bza import gzip import lzma import os import shutil import struct import tarfile import warnings import zipfile from abc import ABC, abstractmethod from pathlib import Path from typing import Dict, List, Optional, Type, Union from .. import config from .filelock import FileLock from .logging import get_logger __A : Any = get_logger(__name__) class A_ : def __init__( self , _A = None ): '''simple docstring''' UpperCAmelCase = ( os.path.join(_A , config.EXTRACTED_DATASETS_DIR ) if cache_dir else config.EXTRACTED_DATASETS_PATH ) UpperCAmelCase = Extractor def _lowercase ( self , _A ): '''simple docstring''' from .file_utils import hash_url_to_filename # Path where we extract compressed archives # We extract in the cache dir, and get the extracted path name by hashing the original path" UpperCAmelCase = os.path.abspath(_A ) return os.path.join(self.extract_dir , hash_url_to_filename(_A ) ) def _lowercase ( self , _A , _A ): '''simple docstring''' return force_extract or ( not os.path.isfile(_A ) and not (os.path.isdir(_A ) and os.listdir(_A )) ) def _lowercase ( self , _A , _A = False ): '''simple docstring''' UpperCAmelCase = self.extractor.infer_extractor_format(_A ) if not extractor_format: return input_path UpperCAmelCase = self._get_output_path(_A ) if self._do_extract(_A , _A ): self.extractor.extract(_A , _A , _A ) return output_path class A_ (UpperCAmelCase_ ): @classmethod @abstractmethod def _lowercase ( cls , _A , **_A ): '''simple docstring''' ... @staticmethod @abstractmethod def _lowercase ( _A , _A ): '''simple docstring''' ... class A_ (UpperCAmelCase_ , UpperCAmelCase_ ): UpperCAmelCase__ = [] @staticmethod def _lowercase ( _A , _A ): '''simple docstring''' with open(_A , '''rb''' ) as f: return f.read(_A ) @classmethod def _lowercase ( cls , _A , _A = b"" ): '''simple docstring''' if not magic_number: UpperCAmelCase = max(len(_A ) for cls_magic_number in cls.magic_numbers ) try: UpperCAmelCase = cls.read_magic_number(_A , _A ) except OSError: return False return any(magic_number.startswith(_A ) for cls_magic_number in cls.magic_numbers ) class A_ (UpperCAmelCase_ ): @classmethod def _lowercase ( cls , _A , **_A ): '''simple docstring''' return tarfile.is_tarfile(_A ) @staticmethod def _lowercase ( _A , _A ): '''simple docstring''' def resolved(_A ) -> str: return os.path.realpath(os.path.abspath(_A ) ) def badpath(_A , _A ) -> bool: # joinpath will ignore base if path is absolute return not resolved(os.path.join(_A , _A ) ).startswith(_A ) def badlink(_A , _A ) -> bool: # Links are interpreted relative to the directory containing the link UpperCAmelCase = resolved(os.path.join(_A , os.path.dirname(info.name ) ) ) return badpath(info.linkname , base=_A ) UpperCAmelCase = resolved(_A ) for finfo in members: if badpath(finfo.name , _A ): logger.error(F"""Extraction of {finfo.name} is blocked (illegal path)""" ) elif finfo.issym() and badlink(_A , _A ): logger.error(F"""Extraction of {finfo.name} is blocked: Symlink to {finfo.linkname}""" ) elif finfo.islnk() and badlink(_A , _A ): logger.error(F"""Extraction of {finfo.name} is blocked: Hard link to {finfo.linkname}""" ) else: yield finfo @staticmethod def _lowercase ( _A , _A ): '''simple docstring''' os.makedirs(_A , exist_ok=_A ) UpperCAmelCase = tarfile.open(_A ) tar_file.extractall(_A , members=TarExtractor.safemembers(_A , _A ) ) tar_file.close() class A_ (UpperCAmelCase_ ): UpperCAmelCase__ = [B'''\x1F\x8B'''] @staticmethod def _lowercase ( _A , _A ): '''simple docstring''' with gzip.open(_A , '''rb''' ) as gzip_file: with open(_A , '''wb''' ) as extracted_file: shutil.copyfileobj(_A , _A ) class A_ (UpperCAmelCase_ ): UpperCAmelCase__ = [ B'''PK\x03\x04''', B'''PK\x05\x06''', # empty archive B'''PK\x07\x08''', # spanned archive ] @classmethod def _lowercase ( cls , _A , _A = b"" ): '''simple docstring''' if super().is_extractable(_A , magic_number=_A ): return True try: # Alternative version of zipfile.is_zipfile that has less false positives, but misses executable zip archives. # From: https://github.com/python/cpython/pull/5053 from zipfile import ( _CD_SIGNATURE, _ECD_DISK_NUMBER, _ECD_DISK_START, _ECD_ENTRIES_TOTAL, _ECD_OFFSET, _ECD_SIZE, _EndRecData, sizeCentralDir, stringCentralDir, structCentralDir, ) with open(_A , '''rb''' ) as fp: UpperCAmelCase = _EndRecData(_A ) if endrec: if endrec[_ECD_ENTRIES_TOTAL] == 0 and endrec[_ECD_SIZE] == 0 and endrec[_ECD_OFFSET] == 0: return True # Empty zipfiles are still zipfiles elif endrec[_ECD_DISK_NUMBER] == endrec[_ECD_DISK_START]: fp.seek(endrec[_ECD_OFFSET] ) # Central directory is on the same disk if fp.tell() == endrec[_ECD_OFFSET] and endrec[_ECD_SIZE] >= sizeCentralDir: UpperCAmelCase = fp.read(_A ) # CD is where we expect it to be if len(_A ) == sizeCentralDir: UpperCAmelCase = struct.unpack(_A , _A ) # CD is the right size if centdir[_CD_SIGNATURE] == stringCentralDir: return True # First central directory entry has correct magic number return False except Exception: # catch all errors in case future python versions change the zipfile internals return False @staticmethod def _lowercase ( _A , _A ): '''simple docstring''' os.makedirs(_A , exist_ok=_A ) with zipfile.ZipFile(_A , '''r''' ) as zip_file: zip_file.extractall(_A ) zip_file.close() class A_ (UpperCAmelCase_ ): UpperCAmelCase__ = [B'''\xFD\x37\x7A\x58\x5A\x00'''] @staticmethod def _lowercase ( _A , _A ): '''simple docstring''' with lzma.open(_A ) as compressed_file: with open(_A , '''wb''' ) as extracted_file: shutil.copyfileobj(_A , _A ) class A_ (UpperCAmelCase_ ): UpperCAmelCase__ = [B'''Rar!\x1a\x07\x00''', B'''Rar!\x1a\x07\x01\x00'''] # RAR_ID # RAR5_ID @staticmethod def _lowercase ( _A , _A ): '''simple docstring''' if not config.RARFILE_AVAILABLE: raise ImportError('''Please pip install rarfile''' ) import rarfile os.makedirs(_A , exist_ok=_A ) UpperCAmelCase = rarfile.RarFile(_A ) rf.extractall(_A ) rf.close() class A_ (UpperCAmelCase_ ): UpperCAmelCase__ = [B'''\x28\xb5\x2F\xFD'''] @staticmethod def _lowercase ( _A , _A ): '''simple docstring''' if not config.ZSTANDARD_AVAILABLE: raise ImportError('''Please pip install zstandard''' ) import zstandard as zstd UpperCAmelCase = zstd.ZstdDecompressor() with open(_A , '''rb''' ) as ifh, open(_A , '''wb''' ) as ofh: dctx.copy_stream(_A , _A ) class A_ (UpperCAmelCase_ ): UpperCAmelCase__ = [B'''\x42\x5A\x68'''] @staticmethod def _lowercase ( _A , _A ): '''simple docstring''' with bza.open(_A , '''rb''' ) as compressed_file: with open(_A , '''wb''' ) as extracted_file: shutil.copyfileobj(_A , _A ) class A_ (UpperCAmelCase_ ): UpperCAmelCase__ = [B'''\x37\x7A\xBC\xAF\x27\x1C'''] @staticmethod def _lowercase ( _A , _A ): '''simple docstring''' if not config.PY7ZR_AVAILABLE: raise ImportError('''Please pip install py7zr''' ) import pyazr os.makedirs(_A , exist_ok=_A ) with pyazr.SevenZipFile(_A , '''r''' ) as archive: archive.extractall(_A ) class A_ (UpperCAmelCase_ ): UpperCAmelCase__ = [B'''\x04\x22\x4D\x18'''] @staticmethod def _lowercase ( _A , _A ): '''simple docstring''' if not config.LZ4_AVAILABLE: raise ImportError('''Please pip install lz4''' ) import lza.frame with lza.frame.open(_A , '''rb''' ) as compressed_file: with open(_A , '''wb''' ) as extracted_file: shutil.copyfileobj(_A , _A ) class A_ : # Put zip file to the last, b/c it is possible wrongly detected as zip (I guess it means: as tar or gzip) UpperCAmelCase__ = { '''tar''': TarExtractor, '''gzip''': GzipExtractor, '''zip''': ZipExtractor, '''xz''': XzExtractor, '''rar''': RarExtractor, '''zstd''': ZstdExtractor, '''bz2''': BzipaExtractor, '''7z''': SevenZipExtractor, # <Added version="2.4.0"/> '''lz4''': LzaExtractor, # <Added version="2.4.0"/> } @classmethod def _lowercase ( cls ): '''simple docstring''' return max( len(_A ) for extractor in cls.extractors.values() if issubclass(_A , _A ) for extractor_magic_number in extractor.magic_numbers ) @staticmethod def _lowercase ( _A , _A ): '''simple docstring''' try: return MagicNumberBaseExtractor.read_magic_number(_A , magic_number_length=_A ) except OSError: return b"" @classmethod def _lowercase ( cls , _A , _A = False ): '''simple docstring''' warnings.warn( '''Method \'is_extractable\' was deprecated in version 2.4.0 and will be removed in 3.0.0. ''' '''Use \'infer_extractor_format\' instead.''' , category=_A , ) UpperCAmelCase = cls.infer_extractor_format(_A ) if extractor_format: return True if not return_extractor else (True, cls.extractors[extractor_format]) return False if not return_extractor else (False, None) @classmethod def _lowercase ( cls , _A ): # <Added version="2.4.0"/> '''simple docstring''' UpperCAmelCase = cls._get_magic_number_max_length() UpperCAmelCase = cls._read_magic_number(_A , _A ) for extractor_format, extractor in cls.extractors.items(): if extractor.is_extractable(_A , magic_number=_A ): return extractor_format @classmethod def _lowercase ( cls , _A , _A , _A = None , _A = "deprecated" , ): '''simple docstring''' os.makedirs(os.path.dirname(_A ) , exist_ok=_A ) # Prevent parallel extractions UpperCAmelCase = str(Path(_A ).with_suffix('''.lock''' ) ) with FileLock(_A ): shutil.rmtree(_A , ignore_errors=_A ) if extractor_format or extractor != "deprecated": if extractor != "deprecated" or not isinstance(_A , _A ): # passed as positional arg warnings.warn( '''Parameter \'extractor\' was deprecated in version 2.4.0 and will be removed in 3.0.0. ''' '''Use \'extractor_format\' instead.''' , category=_A , ) UpperCAmelCase = extractor if extractor != '''deprecated''' else extractor_format else: UpperCAmelCase = cls.extractors[extractor_format] return extractor.extract(_A , _A ) else: warnings.warn( '''Parameter \'extractor_format\' was made required in version 2.4.0 and not passing it will raise an ''' '''exception in 3.0.0.''' , category=_A , ) for extractor in cls.extractors.values(): if extractor.is_extractable(_A ): return extractor.extract(_A , _A )
273
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available, is_speech_available, is_torch_available, ) a__ : str = { 'configuration_trocr': ['TROCR_PRETRAINED_CONFIG_ARCHIVE_MAP', 'TrOCRConfig'], 'processing_trocr': ['TrOCRProcessor'], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a__ : int = [ 'TROCR_PRETRAINED_MODEL_ARCHIVE_LIST', 'TrOCRForCausalLM', 'TrOCRPreTrainedModel', ] if TYPE_CHECKING: from .configuration_trocr import TROCR_PRETRAINED_CONFIG_ARCHIVE_MAP, TrOCRConfig from .processing_trocr import TrOCRProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_trocr import TROCR_PRETRAINED_MODEL_ARCHIVE_LIST, TrOCRForCausalLM, TrOCRPreTrainedModel else: import sys a__ : List[str] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
349
0
import argparse import io import requests import torch from omegaconf import OmegaConf from diffusers import AutoencoderKL from diffusers.pipelines.stable_diffusion.convert_from_ckpt import ( assign_to_checkpoint, conv_attn_to_linear, create_vae_diffusers_config, renew_vae_attention_paths, renew_vae_resnet_paths, ) def lowerCAmelCase__ ( _SCREAMING_SNAKE_CASE : Union[str, Any] , _SCREAMING_SNAKE_CASE : List[str] ): """simple docstring""" __a = checkpoint __a = {} __a = vae_state_dict["""encoder.conv_in.weight"""] __a = vae_state_dict["""encoder.conv_in.bias"""] __a = vae_state_dict["""encoder.conv_out.weight"""] __a = vae_state_dict["""encoder.conv_out.bias"""] __a = vae_state_dict["""encoder.norm_out.weight"""] __a = vae_state_dict["""encoder.norm_out.bias"""] __a = vae_state_dict["""decoder.conv_in.weight"""] __a = vae_state_dict["""decoder.conv_in.bias"""] __a = vae_state_dict["""decoder.conv_out.weight"""] __a = vae_state_dict["""decoder.conv_out.bias"""] __a = vae_state_dict["""decoder.norm_out.weight"""] __a = vae_state_dict["""decoder.norm_out.bias"""] __a = vae_state_dict["""quant_conv.weight"""] __a = vae_state_dict["""quant_conv.bias"""] __a = vae_state_dict["""post_quant_conv.weight"""] __a = vae_state_dict["""post_quant_conv.bias"""] # Retrieves the keys for the encoder down blocks only __a = len({""".""".join(layer.split(""".""" )[:3] ) for layer in vae_state_dict if """encoder.down""" in layer} ) __a = { layer_id: [key for key in vae_state_dict if f"down.{layer_id}" in key] for layer_id in range(__A ) } # Retrieves the keys for the decoder up blocks only __a = len({""".""".join(layer.split(""".""" )[:3] ) for layer in vae_state_dict if """decoder.up""" in layer} ) __a = { layer_id: [key for key in vae_state_dict if f"up.{layer_id}" in key] for layer_id in range(__A ) } for i in range(__A ): __a = [key for key in down_blocks[i] if f"down.{i}" in key and f"down.{i}.downsample" not in key] if f"encoder.down.{i}.downsample.conv.weight" in vae_state_dict: __a = vae_state_dict.pop( f"encoder.down.{i}.downsample.conv.weight" ) __a = vae_state_dict.pop( f"encoder.down.{i}.downsample.conv.bias" ) __a = renew_vae_resnet_paths(__A ) __a = {"""old""": f"down.{i}.block", """new""": f"down_blocks.{i}.resnets"} assign_to_checkpoint(__A , __A , __A , additional_replacements=[meta_path] , config=__A ) __a = [key for key in vae_state_dict if """encoder.mid.block""" in key] __a = 2 for i in range(1 , num_mid_res_blocks + 1 ): __a = [key for key in mid_resnets if f"encoder.mid.block_{i}" in key] __a = renew_vae_resnet_paths(__A ) __a = {"""old""": f"mid.block_{i}", """new""": f"mid_block.resnets.{i - 1}"} assign_to_checkpoint(__A , __A , __A , additional_replacements=[meta_path] , config=__A ) __a = [key for key in vae_state_dict if """encoder.mid.attn""" in key] __a = renew_vae_attention_paths(__A ) __a = {"""old""": """mid.attn_1""", """new""": """mid_block.attentions.0"""} assign_to_checkpoint(__A , __A , __A , additional_replacements=[meta_path] , config=__A ) conv_attn_to_linear(__A ) for i in range(__A ): __a = num_up_blocks - 1 - i __a = [ key for key in up_blocks[block_id] if f"up.{block_id}" in key and f"up.{block_id}.upsample" not in key ] if f"decoder.up.{block_id}.upsample.conv.weight" in vae_state_dict: __a = vae_state_dict[ f"decoder.up.{block_id}.upsample.conv.weight" ] __a = vae_state_dict[ f"decoder.up.{block_id}.upsample.conv.bias" ] __a = renew_vae_resnet_paths(__A ) __a = {"""old""": f"up.{block_id}.block", """new""": f"up_blocks.{i}.resnets"} assign_to_checkpoint(__A , __A , __A , additional_replacements=[meta_path] , config=__A ) __a = [key for key in vae_state_dict if """decoder.mid.block""" in key] __a = 2 for i in range(1 , num_mid_res_blocks + 1 ): __a = [key for key in mid_resnets if f"decoder.mid.block_{i}" in key] __a = renew_vae_resnet_paths(__A ) __a = {"""old""": f"mid.block_{i}", """new""": f"mid_block.resnets.{i - 1}"} assign_to_checkpoint(__A , __A , __A , additional_replacements=[meta_path] , config=__A ) __a = [key for key in vae_state_dict if """decoder.mid.attn""" in key] __a = renew_vae_attention_paths(__A ) __a = {"""old""": """mid.attn_1""", """new""": """mid_block.attentions.0"""} assign_to_checkpoint(__A , __A , __A , additional_replacements=[meta_path] , config=__A ) conv_attn_to_linear(__A ) return new_checkpoint def lowerCAmelCase__ ( _SCREAMING_SNAKE_CASE : Optional[int] , _SCREAMING_SNAKE_CASE : Tuple , ): """simple docstring""" __a = requests.get( """ https://raw.githubusercontent.com/CompVis/stable-diffusion/main/configs/stable-diffusion/v1-inference.yaml""" ) __a = io.BytesIO(r.content ) __a = OmegaConf.load(__A ) __a = 512 __a = """cuda""" if torch.cuda.is_available() else """cpu""" if checkpoint_path.endswith("""safetensors""" ): from safetensors import safe_open __a = {} with safe_open(__A , framework="""pt""" , device="""cpu""" ) as f: for key in f.keys(): __a = f.get_tensor(__A ) else: __a = torch.load(__A , map_location=__A )["""state_dict"""] # Convert the VAE model. __a = create_vae_diffusers_config(__A , image_size=__A ) __a = custom_convert_ldm_vae_checkpoint(__A , __A ) __a = AutoencoderKL(**__A ) vae.load_state_dict(__A ) vae.save_pretrained(__A ) if __name__ == "__main__": lowerCamelCase__ = argparse.ArgumentParser() parser.add_argument("""--vae_pt_path""", default=None, type=str, required=True, help="""Path to the VAE.pt to convert.""") parser.add_argument("""--dump_path""", default=None, type=str, required=True, help="""Path to the VAE.pt to convert.""") lowerCamelCase__ = parser.parse_args() vae_pt_to_vae_diffuser(args.vae_pt_path, args.dump_path)
302
'''simple docstring''' from collections import OrderedDict from typing import Mapping from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig a__ : Union[str, Any] = { 'albert-base-v1': 'https://huggingface.co/albert-base-v1/resolve/main/config.json', 'albert-large-v1': 'https://huggingface.co/albert-large-v1/resolve/main/config.json', 'albert-xlarge-v1': 'https://huggingface.co/albert-xlarge-v1/resolve/main/config.json', 'albert-xxlarge-v1': 'https://huggingface.co/albert-xxlarge-v1/resolve/main/config.json', 'albert-base-v2': 'https://huggingface.co/albert-base-v2/resolve/main/config.json', 'albert-large-v2': 'https://huggingface.co/albert-large-v2/resolve/main/config.json', 'albert-xlarge-v2': 'https://huggingface.co/albert-xlarge-v2/resolve/main/config.json', 'albert-xxlarge-v2': 'https://huggingface.co/albert-xxlarge-v2/resolve/main/config.json', } class UpperCAmelCase__ ( UpperCAmelCase_): __SCREAMING_SNAKE_CASE = '''albert''' def __init__( self , lowercase=3_0_0_0_0 , lowercase=1_2_8 , lowercase=4_0_9_6 , lowercase=1_2 , lowercase=1 , lowercase=6_4 , lowercase=1_6_3_8_4 , lowercase=1 , lowercase="gelu_new" , lowercase=0 , lowercase=0 , lowercase=5_1_2 , lowercase=2 , lowercase=0.02 , lowercase=1E-12 , lowercase=0.1 , lowercase="absolute" , lowercase=0 , lowercase=2 , lowercase=3 , **lowercase , ) -> Any: super().__init__(pad_token_id=lowercase , bos_token_id=lowercase , eos_token_id=lowercase , **lowercase ) __UpperCamelCase = vocab_size __UpperCamelCase = embedding_size __UpperCamelCase = hidden_size __UpperCamelCase = num_hidden_layers __UpperCamelCase = num_hidden_groups __UpperCamelCase = num_attention_heads __UpperCamelCase = inner_group_num __UpperCamelCase = hidden_act __UpperCamelCase = intermediate_size __UpperCamelCase = hidden_dropout_prob __UpperCamelCase = attention_probs_dropout_prob __UpperCamelCase = max_position_embeddings __UpperCamelCase = type_vocab_size __UpperCamelCase = initializer_range __UpperCamelCase = layer_norm_eps __UpperCamelCase = classifier_dropout_prob __UpperCamelCase = position_embedding_type class UpperCAmelCase__ ( UpperCAmelCase_): @property def __lowerCamelCase ( self ) -> Mapping[str, Mapping[int, str]]: if self.task == "multiple-choice": __UpperCamelCase = {0: """batch""", 1: """choice""", 2: """sequence"""} else: __UpperCamelCase = {0: """batch""", 1: """sequence"""} return OrderedDict( [ ("""input_ids""", dynamic_axis), ("""attention_mask""", dynamic_axis), ("""token_type_ids""", dynamic_axis), ] )
349
0
"""simple docstring""" import re def __lowerCamelCase ( __UpperCamelCase ) -> List[str]: """simple docstring""" return [char.split() for char in re.split(r"[^ a-z A-Z 0-9 \s]" , str_ )] def __lowerCamelCase ( __UpperCamelCase ) -> List[str]: """simple docstring""" lowerCAmelCase_ : List[str] = split_input(str_ ) return "".join( ["".join([char.capitalize() for char in sub_str] ) for sub_str in string_split] ) def __lowerCamelCase ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) -> Tuple: """simple docstring""" try: lowerCAmelCase_ : Union[str, Any] = split_input(__A ) if upper: lowerCAmelCase_ : str = "".join( [ separator.join([char.upper() for char in sub_str] ) for sub_str in string_split ] ) else: lowerCAmelCase_ : int = "".join( [ separator.join([char.lower() for char in sub_str] ) for sub_str in string_split ] ) return res_str except IndexError: return "not valid string" def __lowerCamelCase ( __UpperCamelCase ) -> Any: """simple docstring""" return to_simple_case(__A ) def __lowerCamelCase ( __UpperCamelCase ) -> Any: """simple docstring""" try: lowerCAmelCase_ : int = to_simple_case(__A ) return res_str[0].lower() + res_str[1:] except IndexError: return "not valid string" def __lowerCamelCase ( __UpperCamelCase , __UpperCamelCase ) -> Union[str, Any]: """simple docstring""" return to_complex_case(__A , __A , "_" ) def __lowerCamelCase ( __UpperCamelCase , __UpperCamelCase ) -> Tuple: """simple docstring""" return to_complex_case(__A , __A , "-" ) if __name__ == "__main__": __import__("""doctest""").testmod()
241
'''simple docstring''' import numpy as np from sklearn.datasets import fetch_california_housing from sklearn.metrics import mean_absolute_error, mean_squared_error from sklearn.model_selection import train_test_split from xgboost import XGBRegressor def _lowercase ( __A ): '''simple docstring''' return (data["data"], data["target"]) def _lowercase ( __A ,__A ,__A ): '''simple docstring''' __UpperCamelCase = XGBRegressor(verbosity=0 ,random_state=42 ) xgb.fit(__A ,__A ) # Predict target for test data __UpperCamelCase = xgb.predict(__A ) __UpperCamelCase = predictions.reshape(len(__A ) ,1 ) return predictions def _lowercase ( ): '''simple docstring''' __UpperCamelCase = fetch_california_housing() __UpperCamelCase , __UpperCamelCase = data_handling(__A ) __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = train_test_split( __A ,__A ,test_size=0.25 ,random_state=1 ) __UpperCamelCase = xgboost(__A ,__A ,__A ) # Error printing print(f"Mean Absolute Error : {mean_absolute_error(__A ,__A )}" ) print(f"Mean Square Error : {mean_squared_error(__A ,__A )}" ) if __name__ == "__main__": import doctest doctest.testmod(verbose=True) main()
349
0
import flax.linen as nn import jax.numpy as jnp from .attention_flax import FlaxTransformeraDModel from .resnet_flax import FlaxDownsampleaD, FlaxResnetBlockaD, FlaxUpsampleaD class _lowerCamelCase ( nn.Module ): """simple docstring""" UpperCAmelCase_ : List[str] =42 UpperCAmelCase_ : int =42 UpperCAmelCase_ : Tuple =0.0 UpperCAmelCase_ : Dict =1 UpperCAmelCase_ : int =1 UpperCAmelCase_ : int =True UpperCAmelCase_ : Union[str, Any] =False UpperCAmelCase_ : str =False UpperCAmelCase_ : List[str] =False UpperCAmelCase_ : List[Any] =jnp.floataa def UpperCAmelCase ( self ) -> Optional[int]: '''simple docstring''' __snake_case : Tuple = [] __snake_case : Optional[int] = [] for i in range(self.num_layers ): __snake_case : int = self.in_channels if i == 0 else self.out_channels __snake_case : Union[str, Any] = FlaxResnetBlockaD( in_channels=UpperCAmelCase , out_channels=self.out_channels , dropout_prob=self.dropout , dtype=self.dtype , ) resnets.append(UpperCAmelCase ) __snake_case : Dict = FlaxTransformeraDModel( in_channels=self.out_channels , n_heads=self.num_attention_heads , d_head=self.out_channels // self.num_attention_heads , depth=1 , use_linear_projection=self.use_linear_projection , only_cross_attention=self.only_cross_attention , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , ) attentions.append(UpperCAmelCase ) __snake_case : Union[str, Any] = resnets __snake_case : Any = attentions if self.add_downsample: __snake_case : Any = FlaxDownsampleaD(self.out_channels , dtype=self.dtype ) def __call__( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase=True ) -> Dict: '''simple docstring''' __snake_case : Any = () for resnet, attn in zip(self.resnets , self.attentions ): __snake_case : Any = resnet(UpperCAmelCase , UpperCAmelCase , deterministic=UpperCAmelCase ) __snake_case : List[Any] = attn(UpperCAmelCase , UpperCAmelCase , deterministic=UpperCAmelCase ) output_states += (hidden_states,) if self.add_downsample: __snake_case : Optional[Any] = self.downsamplers_a(UpperCAmelCase ) output_states += (hidden_states,) return hidden_states, output_states class _lowerCamelCase ( nn.Module ): """simple docstring""" UpperCAmelCase_ : Optional[Any] =42 UpperCAmelCase_ : List[str] =42 UpperCAmelCase_ : Optional[Any] =0.0 UpperCAmelCase_ : List[Any] =1 UpperCAmelCase_ : Any =True UpperCAmelCase_ : int =jnp.floataa def UpperCAmelCase ( self ) -> Optional[int]: '''simple docstring''' __snake_case : Dict = [] for i in range(self.num_layers ): __snake_case : List[str] = self.in_channels if i == 0 else self.out_channels __snake_case : Any = FlaxResnetBlockaD( in_channels=UpperCAmelCase , out_channels=self.out_channels , dropout_prob=self.dropout , dtype=self.dtype , ) resnets.append(UpperCAmelCase ) __snake_case : List[Any] = resnets if self.add_downsample: __snake_case : Dict = FlaxDownsampleaD(self.out_channels , dtype=self.dtype ) def __call__( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase=True ) -> Any: '''simple docstring''' __snake_case : int = () for resnet in self.resnets: __snake_case : Union[str, Any] = resnet(UpperCAmelCase , UpperCAmelCase , deterministic=UpperCAmelCase ) output_states += (hidden_states,) if self.add_downsample: __snake_case : Dict = self.downsamplers_a(UpperCAmelCase ) output_states += (hidden_states,) return hidden_states, output_states class _lowerCamelCase ( nn.Module ): """simple docstring""" UpperCAmelCase_ : Optional[Any] =42 UpperCAmelCase_ : Any =42 UpperCAmelCase_ : Optional[int] =42 UpperCAmelCase_ : str =0.0 UpperCAmelCase_ : Tuple =1 UpperCAmelCase_ : Optional[Any] =1 UpperCAmelCase_ : Optional[int] =True UpperCAmelCase_ : List[Any] =False UpperCAmelCase_ : int =False UpperCAmelCase_ : str =False UpperCAmelCase_ : List[Any] =jnp.floataa def UpperCAmelCase ( self ) -> Dict: '''simple docstring''' __snake_case : str = [] __snake_case : int = [] for i in range(self.num_layers ): __snake_case : List[Any] = self.in_channels if (i == self.num_layers - 1) else self.out_channels __snake_case : Dict = self.prev_output_channel if i == 0 else self.out_channels __snake_case : Union[str, Any] = FlaxResnetBlockaD( in_channels=resnet_in_channels + res_skip_channels , out_channels=self.out_channels , dropout_prob=self.dropout , dtype=self.dtype , ) resnets.append(UpperCAmelCase ) __snake_case : List[Any] = FlaxTransformeraDModel( in_channels=self.out_channels , n_heads=self.num_attention_heads , d_head=self.out_channels // self.num_attention_heads , depth=1 , use_linear_projection=self.use_linear_projection , only_cross_attention=self.only_cross_attention , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , ) attentions.append(UpperCAmelCase ) __snake_case : str = resnets __snake_case : Any = attentions if self.add_upsample: __snake_case : Tuple = FlaxUpsampleaD(self.out_channels , dtype=self.dtype ) def __call__( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase=True ) -> Any: '''simple docstring''' for resnet, attn in zip(self.resnets , self.attentions ): # pop res hidden states __snake_case : int = res_hidden_states_tuple[-1] __snake_case : str = res_hidden_states_tuple[:-1] __snake_case : Optional[Any] = jnp.concatenate((hidden_states, res_hidden_states) , axis=-1 ) __snake_case : Any = resnet(UpperCAmelCase , UpperCAmelCase , deterministic=UpperCAmelCase ) __snake_case : List[Any] = attn(UpperCAmelCase , UpperCAmelCase , deterministic=UpperCAmelCase ) if self.add_upsample: __snake_case : List[Any] = self.upsamplers_a(UpperCAmelCase ) return hidden_states class _lowerCamelCase ( nn.Module ): """simple docstring""" UpperCAmelCase_ : Union[str, Any] =42 UpperCAmelCase_ : int =42 UpperCAmelCase_ : Dict =42 UpperCAmelCase_ : int =0.0 UpperCAmelCase_ : Dict =1 UpperCAmelCase_ : Any =True UpperCAmelCase_ : Optional[int] =jnp.floataa def UpperCAmelCase ( self ) -> Dict: '''simple docstring''' __snake_case : Optional[Any] = [] for i in range(self.num_layers ): __snake_case : str = self.in_channels if (i == self.num_layers - 1) else self.out_channels __snake_case : Union[str, Any] = self.prev_output_channel if i == 0 else self.out_channels __snake_case : List[str] = FlaxResnetBlockaD( in_channels=resnet_in_channels + res_skip_channels , out_channels=self.out_channels , dropout_prob=self.dropout , dtype=self.dtype , ) resnets.append(UpperCAmelCase ) __snake_case : Optional[int] = resnets if self.add_upsample: __snake_case : List[Any] = FlaxUpsampleaD(self.out_channels , dtype=self.dtype ) def __call__( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase=True ) -> Tuple: '''simple docstring''' for resnet in self.resnets: # pop res hidden states __snake_case : Union[str, Any] = res_hidden_states_tuple[-1] __snake_case : Tuple = res_hidden_states_tuple[:-1] __snake_case : List[str] = jnp.concatenate((hidden_states, res_hidden_states) , axis=-1 ) __snake_case : Optional[int] = resnet(UpperCAmelCase , UpperCAmelCase , deterministic=UpperCAmelCase ) if self.add_upsample: __snake_case : Optional[int] = self.upsamplers_a(UpperCAmelCase ) return hidden_states class _lowerCamelCase ( nn.Module ): """simple docstring""" UpperCAmelCase_ : Optional[int] =42 UpperCAmelCase_ : int =0.0 UpperCAmelCase_ : int =1 UpperCAmelCase_ : Any =1 UpperCAmelCase_ : List[Any] =False UpperCAmelCase_ : Dict =False UpperCAmelCase_ : Optional[Any] =jnp.floataa def UpperCAmelCase ( self ) -> Optional[Any]: '''simple docstring''' __snake_case : Dict = [ FlaxResnetBlockaD( in_channels=self.in_channels , out_channels=self.in_channels , dropout_prob=self.dropout , dtype=self.dtype , ) ] __snake_case : List[Any] = [] for _ in range(self.num_layers ): __snake_case : str = FlaxTransformeraDModel( in_channels=self.in_channels , n_heads=self.num_attention_heads , d_head=self.in_channels // self.num_attention_heads , depth=1 , use_linear_projection=self.use_linear_projection , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , ) attentions.append(UpperCAmelCase ) __snake_case : str = FlaxResnetBlockaD( in_channels=self.in_channels , out_channels=self.in_channels , dropout_prob=self.dropout , dtype=self.dtype , ) resnets.append(UpperCAmelCase ) __snake_case : Optional[Any] = resnets __snake_case : List[Any] = attentions def __call__( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase=True ) -> Dict: '''simple docstring''' __snake_case : Optional[int] = self.resnets[0](UpperCAmelCase , UpperCAmelCase ) for attn, resnet in zip(self.attentions , self.resnets[1:] ): __snake_case : List[Any] = attn(UpperCAmelCase , UpperCAmelCase , deterministic=UpperCAmelCase ) __snake_case : Any = resnet(UpperCAmelCase , UpperCAmelCase , deterministic=UpperCAmelCase ) return hidden_states
326
'''simple docstring''' from __future__ import annotations import unittest from transformers import AutoTokenizer, PegasusConfig, is_tf_available from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow from transformers.utils import cached_property from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import TFAutoModelForSeqaSeqLM, TFPegasusForConditionalGeneration, TFPegasusModel @require_tf class UpperCAmelCase__ : __SCREAMING_SNAKE_CASE = PegasusConfig __SCREAMING_SNAKE_CASE = {} __SCREAMING_SNAKE_CASE = '''gelu''' def __init__( self , lowercase , lowercase=1_3 , lowercase=7 , lowercase=True , lowercase=False , lowercase=9_9 , lowercase=3_2 , lowercase=2 , lowercase=4 , lowercase=3_7 , lowercase=0.1 , lowercase=0.1 , lowercase=4_0 , lowercase=2 , lowercase=1 , lowercase=0 , ) -> Any: __UpperCamelCase = parent __UpperCamelCase = batch_size __UpperCamelCase = seq_length __UpperCamelCase = is_training __UpperCamelCase = use_labels __UpperCamelCase = vocab_size __UpperCamelCase = hidden_size __UpperCamelCase = num_hidden_layers __UpperCamelCase = num_attention_heads __UpperCamelCase = intermediate_size __UpperCamelCase = hidden_dropout_prob __UpperCamelCase = attention_probs_dropout_prob __UpperCamelCase = max_position_embeddings __UpperCamelCase = eos_token_id __UpperCamelCase = pad_token_id __UpperCamelCase = bos_token_id def __lowerCamelCase ( self ) -> Dict: __UpperCamelCase = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size ) __UpperCamelCase = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 ) __UpperCamelCase = tf.concat([input_ids, eos_tensor] , axis=1 ) __UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) __UpperCamelCase = self.config_cls( vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , ) __UpperCamelCase = prepare_pegasus_inputs_dict(lowercase , lowercase , lowercase ) return config, inputs_dict def __lowerCamelCase ( self , lowercase , lowercase ) -> Union[str, Any]: __UpperCamelCase = TFPegasusModel(config=lowercase ).get_decoder() __UpperCamelCase = inputs_dict["""input_ids"""] __UpperCamelCase = input_ids[:1, :] __UpperCamelCase = inputs_dict["""attention_mask"""][:1, :] __UpperCamelCase = inputs_dict["""head_mask"""] __UpperCamelCase = 1 # first forward pass __UpperCamelCase = model(lowercase , attention_mask=lowercase , head_mask=lowercase , use_cache=lowercase ) __UpperCamelCase , __UpperCamelCase = outputs.to_tuple() # create hypothetical next token and extent to next_input_ids __UpperCamelCase = ids_tensor((self.batch_size, 3) , config.vocab_size ) __UpperCamelCase = tf.cast(ids_tensor((self.batch_size, 3) , 2 ) , tf.inta ) # append to next input_ids and __UpperCamelCase = tf.concat([input_ids, next_tokens] , axis=-1 ) __UpperCamelCase = tf.concat([attention_mask, next_attn_mask] , axis=-1 ) __UpperCamelCase = model(lowercase , attention_mask=lowercase )[0] __UpperCamelCase = model(lowercase , attention_mask=lowercase , past_key_values=lowercase )[0] self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1] ) # select random slice __UpperCamelCase = int(ids_tensor((1,) , output_from_past.shape[-1] ) ) __UpperCamelCase = output_from_no_past[:, -3:, random_slice_idx] __UpperCamelCase = output_from_past[:, :, random_slice_idx] # test that outputs are equal for slice tf.debugging.assert_near(lowercase , lowercase , rtol=1E-3 ) def _lowercase ( __A ,__A ,__A ,__A=None ,__A=None ,__A=None ,__A=None ,__A=None ,): '''simple docstring''' if attention_mask is None: __UpperCamelCase = tf.cast(tf.math.not_equal(__A ,config.pad_token_id ) ,tf.inta ) if decoder_attention_mask is None: __UpperCamelCase = tf.concat( [ tf.ones(decoder_input_ids[:, :1].shape ,dtype=tf.inta ), tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] ,config.pad_token_id ) ,tf.inta ), ] ,axis=-1 ,) if head_mask is None: __UpperCamelCase = tf.ones((config.encoder_layers, config.encoder_attention_heads) ) if decoder_head_mask is None: __UpperCamelCase = tf.ones((config.decoder_layers, config.decoder_attention_heads) ) if cross_attn_head_mask is None: __UpperCamelCase = tf.ones((config.decoder_layers, config.decoder_attention_heads) ) return { "input_ids": input_ids, "decoder_input_ids": decoder_input_ids, "attention_mask": attention_mask, "decoder_attention_mask": decoder_attention_mask, "head_mask": head_mask, "decoder_head_mask": decoder_head_mask, "cross_attn_head_mask": cross_attn_head_mask, } @require_tf class UpperCAmelCase__ ( UpperCAmelCase_ , UpperCAmelCase_ , unittest.TestCase): __SCREAMING_SNAKE_CASE = (TFPegasusForConditionalGeneration, TFPegasusModel) if is_tf_available() else () __SCREAMING_SNAKE_CASE = (TFPegasusForConditionalGeneration,) if is_tf_available() else () __SCREAMING_SNAKE_CASE = ( { '''conversational''': TFPegasusForConditionalGeneration, '''feature-extraction''': TFPegasusModel, '''summarization''': TFPegasusForConditionalGeneration, '''text2text-generation''': TFPegasusForConditionalGeneration, '''translation''': TFPegasusForConditionalGeneration, } if is_tf_available() else {} ) __SCREAMING_SNAKE_CASE = True __SCREAMING_SNAKE_CASE = False __SCREAMING_SNAKE_CASE = False def __lowerCamelCase ( self ) -> str: __UpperCamelCase = TFPegasusModelTester(self ) __UpperCamelCase = ConfigTester(self , config_class=lowercase ) def __lowerCamelCase ( self ) -> str: self.config_tester.run_common_tests() def __lowerCamelCase ( self ) -> Tuple: __UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common() self.model_tester.check_decoder_model_past_large_inputs(*lowercase ) @require_sentencepiece @require_tokenizers @require_tf class UpperCAmelCase__ ( unittest.TestCase): __SCREAMING_SNAKE_CASE = [ ''' PG&E stated it scheduled the blackouts in response to forecasts for high winds amid dry conditions. The aim is to reduce the risk of wildfires. Nearly 800 thousand customers were scheduled to be affected by the shutoffs which were expected to last through at least midday tomorrow.''', ''' The London trio are up for best UK act and best album, as well as getting two nominations in the best song category."We got told like this morning \'Oh I think you\'re nominated\'", said Dappy."And I was like \'Oh yeah, which one?\' And now we\'ve got nominated for four awards. I mean, wow!"Bandmate Fazer added: "We thought it\'s best of us to come down and mingle with everyone and say hello to the cameras. And now we find we\'ve got four nominations."The band have two shots at the best song prize, getting the nod for their Tynchy Stryder collaboration Number One, and single Strong Again.Their album Uncle B will also go up against records by the likes of Beyonce and Kanye West.N-Dubz picked up the best newcomer Mobo in 2007, but female member Tulisa said they wouldn\'t be too disappointed if they didn\'t win this time around."At the end of the day we\'re grateful to be where we are in our careers."If it don\'t happen then it don\'t happen - live to fight another day and keep on making albums and hits for the fans."Dappy also revealed they could be performing live several times on the night.The group will be doing Number One and also a possible rendition of the War Child single, I Got Soul.The charity song is a re-working of The Killers\' All These Things That I\'ve Done and is set to feature artists like Chipmunk, Ironik and Pixie Lott.This year\'s Mobos will be held outside of London for the first time, in Glasgow on 30 September.N-Dubz said they were looking forward to performing for their Scottish fans and boasted about their recent shows north of the border."We just done Edinburgh the other day," said Dappy."We smashed up an N-Dubz show over there. We done Aberdeen about three or four months ago - we smashed up that show over there! Everywhere we go we smash it up!" ''', ] __SCREAMING_SNAKE_CASE = [ '''California\'s largest electricity provider has cut power to hundreds of thousands of customers in an effort to''' ''' reduce the risk of wildfires.''', '''N-Dubz have revealed they\'re "grateful" to have been nominated for four Mobo Awards.''', ] # differs slightly from pytorch, likely due to numerical differences in linear layers __SCREAMING_SNAKE_CASE = '''google/pegasus-xsum''' @cached_property def __lowerCamelCase ( self ) -> int: return AutoTokenizer.from_pretrained(self.model_name ) @cached_property def __lowerCamelCase ( self ) -> str: __UpperCamelCase = TFAutoModelForSeqaSeqLM.from_pretrained(self.model_name ) return model def __lowerCamelCase ( self , **lowercase ) -> Optional[int]: __UpperCamelCase = self.translate_src_text(**lowercase ) assert self.expected_text == generated_words def __lowerCamelCase ( self , **lowercase ) -> Optional[Any]: __UpperCamelCase = self.tokenizer(self.src_text , **lowercase , padding=lowercase , return_tensors="""tf""" ) __UpperCamelCase = self.model.generate( model_inputs.input_ids , attention_mask=model_inputs.attention_mask , num_beams=2 , use_cache=lowercase , ) __UpperCamelCase = self.tokenizer.batch_decode(generated_ids.numpy() , skip_special_tokens=lowercase ) return generated_words @slow def __lowerCamelCase ( self ) -> Dict: self._assert_generated_batch_equal_expected()
349
0
"""simple docstring""" import functools import logging import os import sys import threading from logging import ( CRITICAL, # NOQA DEBUG, # NOQA ERROR, # NOQA FATAL, # NOQA INFO, # NOQA NOTSET, # NOQA WARN, # NOQA WARNING, # NOQA ) from typing import Optional import huggingface_hub.utils as hf_hub_utils from tqdm import auto as tqdm_lib a = threading.Lock() a = None a = { 'debug': logging.DEBUG, 'info': logging.INFO, 'warning': logging.WARNING, 'error': logging.ERROR, 'critical': logging.CRITICAL, } a = logging.WARNING a = True def lowercase () -> Union[str, Any]: '''simple docstring''' lowerCAmelCase = os.getenv("""TRANSFORMERS_VERBOSITY""" , __A ) if env_level_str: if env_level_str in log_levels: return log_levels[env_level_str] else: logging.getLogger().warning( f'''Unknown option TRANSFORMERS_VERBOSITY={env_level_str}, ''' f'''has to be one of: { ', '.join(log_levels.keys() ) }''' ) return _default_log_level def lowercase () -> Dict: '''simple docstring''' return __name__.split(""".""" )[0] def lowercase () -> Tuple: '''simple docstring''' return logging.getLogger(_get_library_name() ) def lowercase () -> Dict: '''simple docstring''' global _default_handler with _lock: if _default_handler: # This library has already configured the library root logger. return lowerCAmelCase = logging.StreamHandler() # Set sys.stderr as stream. lowerCAmelCase = sys.stderr.flush # Apply our default configuration to the library root logger. lowerCAmelCase = _get_library_root_logger() library_root_logger.addHandler(_default_handler ) library_root_logger.setLevel(_get_default_logging_level() ) lowerCAmelCase = False def lowercase () -> Dict: '''simple docstring''' global _default_handler with _lock: if not _default_handler: return lowerCAmelCase = _get_library_root_logger() library_root_logger.removeHandler(_default_handler ) library_root_logger.setLevel(logging.NOTSET ) lowerCAmelCase = None def lowercase () -> List[Any]: '''simple docstring''' return log_levels def lowercase (snake_case__ : str = None ) -> Tuple: '''simple docstring''' if name is None: lowerCAmelCase = _get_library_name() _configure_library_root_logger() return logging.getLogger(__A ) def lowercase () -> int: '''simple docstring''' _configure_library_root_logger() return _get_library_root_logger().getEffectiveLevel() def lowercase (snake_case__ : int ) -> str: '''simple docstring''' _configure_library_root_logger() _get_library_root_logger().setLevel(__A ) def lowercase () -> Optional[Any]: '''simple docstring''' return set_verbosity(__A ) def lowercase () -> Optional[int]: '''simple docstring''' return set_verbosity(__A ) def lowercase () -> List[str]: '''simple docstring''' return set_verbosity(__A ) def lowercase () -> List[str]: '''simple docstring''' return set_verbosity(__A ) def lowercase () -> List[Any]: '''simple docstring''' _configure_library_root_logger() assert _default_handler is not None _get_library_root_logger().removeHandler(_default_handler ) def lowercase () -> List[str]: '''simple docstring''' _configure_library_root_logger() assert _default_handler is not None _get_library_root_logger().addHandler(_default_handler ) def lowercase (snake_case__ : Union[str, Any] ) -> int: '''simple docstring''' _configure_library_root_logger() assert handler is not None _get_library_root_logger().addHandler(__A ) def lowercase (snake_case__ : Optional[int] ) -> Any: '''simple docstring''' _configure_library_root_logger() assert handler is not None and handler not in _get_library_root_logger().handlers _get_library_root_logger().removeHandler(__A ) def lowercase () -> Dict: '''simple docstring''' _configure_library_root_logger() lowerCAmelCase = False def lowercase () -> Any: '''simple docstring''' _configure_library_root_logger() lowerCAmelCase = True def lowercase () -> List[str]: '''simple docstring''' lowerCAmelCase = _get_library_root_logger().handlers for handler in handlers: lowerCAmelCase = logging.Formatter("""[%(levelname)s|%(filename)s:%(lineno)s] %(asctime)s >> %(message)s""" ) handler.setFormatter(__A ) def lowercase () -> int: '''simple docstring''' lowerCAmelCase = _get_library_root_logger().handlers for handler in handlers: handler.setFormatter(__A ) def lowercase (self : Dict , *snake_case__ : Dict , **snake_case__ : Tuple ) -> Tuple: '''simple docstring''' lowerCAmelCase = os.getenv("""TRANSFORMERS_NO_ADVISORY_WARNINGS""" , __A ) if no_advisory_warnings: return self.warning(*__A , **__A ) a = warning_advice @functools.lru_cache(__A ) def lowercase (self : List[str] , *snake_case__ : Union[str, Any] , **snake_case__ : Tuple ) -> Optional[int]: '''simple docstring''' self.warning(*__A , **__A ) a = warning_once class SCREAMING_SNAKE_CASE__ : def __init__( self : int , *lowerCAmelCase : Optional[Any] , **lowerCAmelCase : Any ): # pylint: disable=unused-argument lowerCAmelCase = args[0] if args else None def __iter__( self : Dict ): return iter(self._iterator ) def __getattr__( self : str , lowerCAmelCase : Any ): def empty_fn(*lowerCAmelCase : str , **lowerCAmelCase : Optional[Any] ): # pylint: disable=unused-argument return return empty_fn def __enter__( self : int ): return self def __exit__( self : Dict , lowerCAmelCase : Optional[int] , lowerCAmelCase : List[str] , lowerCAmelCase : Any ): return class SCREAMING_SNAKE_CASE__ : def __call__( self : List[str] , *lowerCAmelCase : Union[str, Any] , **lowerCAmelCase : Union[str, Any] ): if _tqdm_active: return tqdm_lib.tqdm(*lowerCAmelCase , **lowerCAmelCase ) else: return EmptyTqdm(*lowerCAmelCase , **lowerCAmelCase ) def __lowercase ( self : str , *lowerCAmelCase : int , **lowerCAmelCase : Dict ): lowerCAmelCase = None if _tqdm_active: return tqdm_lib.tqdm.set_lock(*lowerCAmelCase , **lowerCAmelCase ) def __lowercase ( self : Union[str, Any] ): if _tqdm_active: return tqdm_lib.tqdm.get_lock() a = _tqdm_cls() def lowercase () -> List[str]: '''simple docstring''' global _tqdm_active return bool(_tqdm_active ) def lowercase () -> Tuple: '''simple docstring''' global _tqdm_active lowerCAmelCase = True hf_hub_utils.enable_progress_bars() def lowercase () -> Optional[Any]: '''simple docstring''' global _tqdm_active lowerCAmelCase = False hf_hub_utils.disable_progress_bars()
155
'''simple docstring''' import string def _lowercase ( __A ): '''simple docstring''' for key in range(len(string.ascii_uppercase ) ): __UpperCamelCase = """""" for symbol in message: if symbol in string.ascii_uppercase: __UpperCamelCase = string.ascii_uppercase.find(__A ) __UpperCamelCase = num - key if num < 0: __UpperCamelCase = num + len(string.ascii_uppercase ) __UpperCamelCase = translated + string.ascii_uppercase[num] else: __UpperCamelCase = translated + symbol print(f"Decryption using Key #{key}: {translated}" ) def _lowercase ( ): '''simple docstring''' __UpperCamelCase = input("""Encrypted message: """ ) __UpperCamelCase = message.upper() decrypt(__A ) if __name__ == "__main__": import doctest doctest.testmod() main()
349
0
"""simple docstring""" from __future__ import annotations import copy import inspect import unittest import numpy as np from transformers import is_tf_available, is_vision_available from transformers.models.auto import get_values from transformers.testing_utils import require_tf, slow from transformers.utils import cached_property from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import ( TF_LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST, TF_MODEL_FOR_MULTIPLE_CHOICE_MAPPING, TF_MODEL_FOR_QUESTION_ANSWERING_MAPPING, TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING, TF_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING, LayoutLMvaConfig, TFLayoutLMvaForQuestionAnswering, TFLayoutLMvaForSequenceClassification, TFLayoutLMvaForTokenClassification, TFLayoutLMvaModel, ) if is_vision_available(): from PIL import Image from transformers import LayoutLMvaImageProcessor class UpperCamelCase_ : def __init__( self : List[str] , lowerCAmelCase_ : int , lowerCAmelCase_ : Optional[Any]=2 , lowerCAmelCase_ : List[Any]=3 , lowerCAmelCase_ : Optional[int]=4 , lowerCAmelCase_ : Dict=2 , lowerCAmelCase_ : Any=7 , lowerCAmelCase_ : Any=True , lowerCAmelCase_ : List[Any]=True , lowerCAmelCase_ : str=True , lowerCAmelCase_ : List[Any]=True , lowerCAmelCase_ : List[str]=99 , lowerCAmelCase_ : List[str]=36 , lowerCAmelCase_ : Any=2 , lowerCAmelCase_ : Optional[int]=4 , lowerCAmelCase_ : List[Any]=37 , lowerCAmelCase_ : Tuple="gelu" , lowerCAmelCase_ : int=0.1 , lowerCAmelCase_ : int=0.1 , lowerCAmelCase_ : Dict=512 , lowerCAmelCase_ : List[Any]=16 , lowerCAmelCase_ : Union[str, Any]=2 , lowerCAmelCase_ : Union[str, Any]=0.0_2 , lowerCAmelCase_ : str=6 , lowerCAmelCase_ : Dict=6 , lowerCAmelCase_ : Union[str, Any]=3 , lowerCAmelCase_ : Union[str, Any]=4 , lowerCAmelCase_ : Any=None , lowerCAmelCase_ : Tuple=1_000 , ) -> Tuple: UpperCAmelCase_ : str = parent UpperCAmelCase_ : Optional[Any] = batch_size UpperCAmelCase_ : List[str] = num_channels UpperCAmelCase_ : int = image_size UpperCAmelCase_ : Optional[Any] = patch_size UpperCAmelCase_ : Union[str, Any] = is_training UpperCAmelCase_ : Union[str, Any] = use_input_mask UpperCAmelCase_ : Union[str, Any] = use_token_type_ids UpperCAmelCase_ : List[Any] = use_labels UpperCAmelCase_ : List[Any] = vocab_size UpperCAmelCase_ : str = hidden_size UpperCAmelCase_ : Tuple = num_hidden_layers UpperCAmelCase_ : Union[str, Any] = num_attention_heads UpperCAmelCase_ : List[str] = intermediate_size UpperCAmelCase_ : Dict = hidden_act UpperCAmelCase_ : str = hidden_dropout_prob UpperCAmelCase_ : Tuple = attention_probs_dropout_prob UpperCAmelCase_ : str = max_position_embeddings UpperCAmelCase_ : Any = type_vocab_size UpperCAmelCase_ : str = type_sequence_label_size UpperCAmelCase_ : Union[str, Any] = initializer_range UpperCAmelCase_ : Dict = coordinate_size UpperCAmelCase_ : Tuple = shape_size UpperCAmelCase_ : Optional[int] = num_labels UpperCAmelCase_ : Optional[int] = num_choices UpperCAmelCase_ : Optional[Any] = scope UpperCAmelCase_ : str = range_bbox # LayoutLMv3's sequence length equals the number of text tokens + number of patches + 1 (we add 1 for the CLS token) UpperCAmelCase_ : Optional[int] = text_seq_length UpperCAmelCase_ : List[str] = (image_size // patch_size) ** 2 + 1 UpperCAmelCase_ : Union[str, Any] = self.text_seq_length + self.image_seq_length def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> str: UpperCAmelCase_ : List[str] = ids_tensor([self.batch_size, self.text_seq_length] , self.vocab_size ) UpperCAmelCase_ : Union[str, Any] = ids_tensor([self.batch_size, self.text_seq_length, 4] , self.range_bbox ) UpperCAmelCase_ : Optional[Any] = bbox.numpy() # Ensure that bbox is legal for i in range(bbox.shape[0] ): for j in range(bbox.shape[1] ): if bbox[i, j, 3] < bbox[i, j, 1]: UpperCAmelCase_ : Dict = bbox[i, j, 3] UpperCAmelCase_ : Optional[int] = bbox[i, j, 1] UpperCAmelCase_ : Dict = tmp_coordinate if bbox[i, j, 2] < bbox[i, j, 0]: UpperCAmelCase_ : List[Any] = bbox[i, j, 2] UpperCAmelCase_ : Dict = bbox[i, j, 0] UpperCAmelCase_ : Tuple = tmp_coordinate UpperCAmelCase_ : str = tf.constant(lowerCAmelCase_ ) UpperCAmelCase_ : str = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) UpperCAmelCase_ : Union[str, Any] = None if self.use_input_mask: UpperCAmelCase_ : Optional[Any] = random_attention_mask([self.batch_size, self.text_seq_length] ) UpperCAmelCase_ : Any = None if self.use_token_type_ids: UpperCAmelCase_ : Any = ids_tensor([self.batch_size, self.text_seq_length] , self.type_vocab_size ) UpperCAmelCase_ : List[Any] = None UpperCAmelCase_ : Tuple = None if self.use_labels: UpperCAmelCase_ : Dict = ids_tensor([self.batch_size] , self.type_sequence_label_size ) UpperCAmelCase_ : str = ids_tensor([self.batch_size, self.text_seq_length] , self.num_labels ) UpperCAmelCase_ : Union[str, Any] = LayoutLMvaConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , coordinate_size=self.coordinate_size , shape_size=self.shape_size , input_size=self.image_size , patch_size=self.patch_size , ) return config, input_ids, bbox, pixel_values, token_type_ids, input_mask, sequence_labels, token_labels def _SCREAMING_SNAKE_CASE ( self : List[Any] , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : str , lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : List[str] ) -> Optional[Any]: UpperCAmelCase_ : List[str] = TFLayoutLMvaModel(config=lowerCAmelCase_ ) # text + image UpperCAmelCase_ : Any = model(lowerCAmelCase_ , pixel_values=lowerCAmelCase_ , training=lowerCAmelCase_ ) UpperCAmelCase_ : Tuple = model( lowerCAmelCase_ , bbox=lowerCAmelCase_ , pixel_values=lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , token_type_ids=lowerCAmelCase_ , training=lowerCAmelCase_ , ) UpperCAmelCase_ : Dict = model(lowerCAmelCase_ , bbox=lowerCAmelCase_ , pixel_values=lowerCAmelCase_ , training=lowerCAmelCase_ ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) # text only UpperCAmelCase_ : str = model(lowerCAmelCase_ , training=lowerCAmelCase_ ) self.parent.assertEqual( result.last_hidden_state.shape , (self.batch_size, self.text_seq_length, self.hidden_size) ) # image only UpperCAmelCase_ : List[str] = model({"pixel_values": pixel_values} , training=lowerCAmelCase_ ) self.parent.assertEqual( result.last_hidden_state.shape , (self.batch_size, self.image_seq_length, self.hidden_size) ) def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] , lowerCAmelCase_ : str , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : int , lowerCAmelCase_ : int , lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : int , lowerCAmelCase_ : Optional[Any] ) -> str: UpperCAmelCase_ : str = self.num_labels UpperCAmelCase_ : List[str] = TFLayoutLMvaForSequenceClassification(config=lowerCAmelCase_ ) UpperCAmelCase_ : List[Any] = model( lowerCAmelCase_ , bbox=lowerCAmelCase_ , pixel_values=lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , token_type_ids=lowerCAmelCase_ , labels=lowerCAmelCase_ , training=lowerCAmelCase_ , ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def _SCREAMING_SNAKE_CASE ( self : List[Any] , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : List[str] ) -> str: UpperCAmelCase_ : List[str] = self.num_labels UpperCAmelCase_ : str = TFLayoutLMvaForTokenClassification(config=lowerCAmelCase_ ) UpperCAmelCase_ : List[Any] = model( lowerCAmelCase_ , bbox=lowerCAmelCase_ , pixel_values=lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , token_type_ids=lowerCAmelCase_ , labels=lowerCAmelCase_ , training=lowerCAmelCase_ , ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.text_seq_length, self.num_labels) ) def _SCREAMING_SNAKE_CASE ( self : Tuple , lowerCAmelCase_ : Any , lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : Dict , lowerCAmelCase_ : int , lowerCAmelCase_ : Any , lowerCAmelCase_ : int , lowerCAmelCase_ : Any ) -> Any: UpperCAmelCase_ : int = 2 UpperCAmelCase_ : Tuple = TFLayoutLMvaForQuestionAnswering(config=lowerCAmelCase_ ) UpperCAmelCase_ : Optional[Any] = model( lowerCAmelCase_ , bbox=lowerCAmelCase_ , pixel_values=lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , token_type_ids=lowerCAmelCase_ , start_positions=lowerCAmelCase_ , end_positions=lowerCAmelCase_ , training=lowerCAmelCase_ , ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def _SCREAMING_SNAKE_CASE ( self : Any ) -> int: UpperCAmelCase_ : Union[str, Any] = self.prepare_config_and_inputs() ((UpperCAmelCase_) , (UpperCAmelCase_) , (UpperCAmelCase_) , (UpperCAmelCase_) , (UpperCAmelCase_) , (UpperCAmelCase_) , (UpperCAmelCase_) , (UpperCAmelCase_)) : str = config_and_inputs UpperCAmelCase_ : str = { "input_ids": input_ids, "bbox": bbox, "pixel_values": pixel_values, "token_type_ids": token_type_ids, "attention_mask": input_mask, } return config, inputs_dict @require_tf class UpperCamelCase_ (UpperCAmelCase_ , UpperCAmelCase_ , unittest.TestCase ): __magic_name__ = ( ( TFLayoutLMvaModel, TFLayoutLMvaForQuestionAnswering, TFLayoutLMvaForSequenceClassification, TFLayoutLMvaForTokenClassification, ) if is_tf_available() else () ) __magic_name__ = ( {'''document-question-answering''': TFLayoutLMvaForQuestionAnswering, '''feature-extraction''': TFLayoutLMvaModel} if is_tf_available() else {} ) __magic_name__ = False __magic_name__ = False __magic_name__ = False def _SCREAMING_SNAKE_CASE ( self : List[str] , lowerCAmelCase_ : Dict , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : int , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : str ) -> Optional[Any]: return True def _SCREAMING_SNAKE_CASE ( self : Dict , lowerCAmelCase_ : Any , lowerCAmelCase_ : int , lowerCAmelCase_ : Optional[Any]=False ) -> dict: UpperCAmelCase_ : str = copy.deepcopy(lowerCAmelCase_ ) if model_class in get_values(lowerCAmelCase_ ): UpperCAmelCase_ : Optional[Any] = { k: tf.tile(tf.expand_dims(lowerCAmelCase_ , 1 ) , (1, self.model_tester.num_choices) + (1,) * (v.ndim - 1) ) if isinstance(lowerCAmelCase_ , tf.Tensor ) and v.ndim > 0 else v for k, v in inputs_dict.items() } if return_labels: if model_class in get_values(lowerCAmelCase_ ): UpperCAmelCase_ : Optional[int] = tf.ones(self.model_tester.batch_size , dtype=tf.intaa ) elif model_class in get_values(lowerCAmelCase_ ): UpperCAmelCase_ : Optional[Any] = tf.zeros(self.model_tester.batch_size , dtype=tf.intaa ) UpperCAmelCase_ : Tuple = tf.zeros(self.model_tester.batch_size , dtype=tf.intaa ) elif model_class in get_values(lowerCAmelCase_ ): UpperCAmelCase_ : Optional[Any] = tf.zeros(self.model_tester.batch_size , dtype=tf.intaa ) elif model_class in get_values(lowerCAmelCase_ ): UpperCAmelCase_ : Any = tf.zeros( (self.model_tester.batch_size, self.model_tester.text_seq_length) , dtype=tf.intaa ) return inputs_dict def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> Any: UpperCAmelCase_ : Union[str, Any] = TFLayoutLMvaModelTester(self ) UpperCAmelCase_ : Dict = ConfigTester(self , config_class=lowerCAmelCase_ , hidden_size=37 ) def _SCREAMING_SNAKE_CASE ( self : str ) -> int: self.config_tester.run_common_tests() def _SCREAMING_SNAKE_CASE ( self : int ) -> List[str]: UpperCAmelCase_ , UpperCAmelCase_ : str = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: UpperCAmelCase_ : Optional[Any] = model_class(lowerCAmelCase_ ) if getattr(lowerCAmelCase_ , "hf_compute_loss" , lowerCAmelCase_ ): # The number of elements in the loss should be the same as the number of elements in the label UpperCAmelCase_ : int = self._prepare_for_class(inputs_dict.copy() , lowerCAmelCase_ , return_labels=lowerCAmelCase_ ) UpperCAmelCase_ : List[str] = prepared_for_class[ sorted(prepared_for_class.keys() - inputs_dict.keys() , reverse=lowerCAmelCase_ )[0] ] UpperCAmelCase_ : Optional[int] = added_label.shape.as_list()[:1] # Test that model correctly compute the loss with kwargs UpperCAmelCase_ : Optional[int] = self._prepare_for_class(inputs_dict.copy() , lowerCAmelCase_ , return_labels=lowerCAmelCase_ ) UpperCAmelCase_ : Union[str, Any] = prepared_for_class.pop("input_ids" ) UpperCAmelCase_ : Optional[Any] = model(lowerCAmelCase_ , **lowerCAmelCase_ )[0] self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1] ) # Test that model correctly compute the loss when we mask some positions UpperCAmelCase_ : Tuple = self._prepare_for_class(inputs_dict.copy() , lowerCAmelCase_ , return_labels=lowerCAmelCase_ ) UpperCAmelCase_ : Optional[int] = prepared_for_class.pop("input_ids" ) if "labels" in prepared_for_class: UpperCAmelCase_ : Dict = prepared_for_class["labels"].numpy() if len(labels.shape ) > 1 and labels.shape[1] != 1: UpperCAmelCase_ : Optional[Any] = -100 UpperCAmelCase_ : Any = tf.convert_to_tensor(lowerCAmelCase_ ) UpperCAmelCase_ : Dict = model(lowerCAmelCase_ , **lowerCAmelCase_ )[0] self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1] ) self.assertTrue(not np.any(np.isnan(loss.numpy() ) ) ) # Test that model correctly compute the loss with a dict UpperCAmelCase_ : Optional[Any] = self._prepare_for_class(inputs_dict.copy() , lowerCAmelCase_ , return_labels=lowerCAmelCase_ ) UpperCAmelCase_ : Union[str, Any] = model(lowerCAmelCase_ )[0] self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1] ) # Test that model correctly compute the loss with a tuple UpperCAmelCase_ : List[Any] = self._prepare_for_class(inputs_dict.copy() , lowerCAmelCase_ , return_labels=lowerCAmelCase_ ) # Get keys that were added with the _prepare_for_class function UpperCAmelCase_ : List[str] = prepared_for_class.keys() - inputs_dict.keys() UpperCAmelCase_ : int = inspect.signature(model.call ).parameters UpperCAmelCase_ : Union[str, Any] = list(signature.keys() ) # Create a dictionary holding the location of the tensors in the tuple UpperCAmelCase_ : int = {0: "input_ids"} for label_key in label_keys: UpperCAmelCase_ : Dict = signature_names.index(lowerCAmelCase_ ) UpperCAmelCase_ : Tuple = label_key UpperCAmelCase_ : Tuple = sorted(tuple_index_mapping.items() ) # Initialize a list with their default values, update the values and convert to a tuple UpperCAmelCase_ : List[Any] = [] for name in signature_names: if name != "kwargs": list_input.append(signature[name].default ) for index, value in sorted_tuple_index_mapping: UpperCAmelCase_ : List[str] = prepared_for_class[value] UpperCAmelCase_ : Tuple = tuple(lowerCAmelCase_ ) # Send to model UpperCAmelCase_ : Optional[Any] = model(tuple_input[:-1] )[0] self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1] ) def _SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Any: ( ( UpperCAmelCase_ ) , ( UpperCAmelCase_ ) , ( UpperCAmelCase_ ) , ( UpperCAmelCase_ ) , ( UpperCAmelCase_ ) , ( UpperCAmelCase_ ) , ( UpperCAmelCase_ ) , ( UpperCAmelCase_ ) , ) : Any = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> str: ( ( UpperCAmelCase_ ) , ( UpperCAmelCase_ ) , ( UpperCAmelCase_ ) , ( UpperCAmelCase_ ) , ( UpperCAmelCase_ ) , ( UpperCAmelCase_ ) , ( UpperCAmelCase_ ) , ( UpperCAmelCase_ ) , ) : Dict = self.model_tester.prepare_config_and_inputs() for type in ["absolute", "relative_key", "relative_key_query"]: UpperCAmelCase_ : List[str] = type self.model_tester.create_and_check_model(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) def _SCREAMING_SNAKE_CASE ( self : Any ) -> List[Any]: ( ( UpperCAmelCase_ ) , ( UpperCAmelCase_ ) , ( UpperCAmelCase_ ) , ( UpperCAmelCase_ ) , ( UpperCAmelCase_ ) , ( UpperCAmelCase_ ) , ( UpperCAmelCase_ ) , ( UpperCAmelCase_ ) , ) : Optional[int] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_sequence_classification( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) def _SCREAMING_SNAKE_CASE ( self : Dict ) -> int: ( ( UpperCAmelCase_ ) , ( UpperCAmelCase_ ) , ( UpperCAmelCase_ ) , ( UpperCAmelCase_ ) , ( UpperCAmelCase_ ) , ( UpperCAmelCase_ ) , ( UpperCAmelCase_ ) , ( UpperCAmelCase_ ) , ) : Any = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_token_classification( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> str: ( ( UpperCAmelCase_ ) , ( UpperCAmelCase_ ) , ( UpperCAmelCase_ ) , ( UpperCAmelCase_ ) , ( UpperCAmelCase_ ) , ( UpperCAmelCase_ ) , ( UpperCAmelCase_ ) , ( UpperCAmelCase_ ) , ) : List[str] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_question_answering( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) @slow def _SCREAMING_SNAKE_CASE ( self : Tuple ) -> Any: for model_name in TF_LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: UpperCAmelCase_ : Any = TFLayoutLMvaModel.from_pretrained(lowerCAmelCase_ ) self.assertIsNotNone(lowerCAmelCase_ ) def snake_case ( ): UpperCAmelCase_ : Union[str, Any] = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" ) return image @require_tf class UpperCamelCase_ (unittest.TestCase ): @cached_property def _SCREAMING_SNAKE_CASE ( self : Tuple ) -> Union[str, Any]: return LayoutLMvaImageProcessor(apply_ocr=lowerCAmelCase_ ) if is_vision_available() else None @slow def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> str: UpperCAmelCase_ : Dict = TFLayoutLMvaModel.from_pretrained("microsoft/layoutlmv3-base" ) UpperCAmelCase_ : Tuple = self.default_image_processor UpperCAmelCase_ : int = prepare_img() UpperCAmelCase_ : Optional[Any] = image_processor(images=lowerCAmelCase_ , return_tensors="tf" ).pixel_values UpperCAmelCase_ : Dict = tf.constant([[1, 2]] ) UpperCAmelCase_ : str = tf.expand_dims(tf.constant([[1, 2, 3, 4], [5, 6, 7, 8]] ) , axis=0 ) # forward pass UpperCAmelCase_ : Optional[Any] = model(input_ids=lowerCAmelCase_ , bbox=lowerCAmelCase_ , pixel_values=lowerCAmelCase_ , training=lowerCAmelCase_ ) # verify the logits UpperCAmelCase_ : List[str] = (1, 199, 768) self.assertEqual(outputs.last_hidden_state.shape , lowerCAmelCase_ ) UpperCAmelCase_ : List[Any] = tf.constant( [[-0.0_5_2_9, 0.3_6_1_8, 0.1_6_3_2], [-0.1_5_8_7, -0.1_6_6_7, -0.0_4_0_0], [-0.1_5_5_7, -0.1_6_7_1, -0.0_5_0_5]] ) self.assertTrue(np.allclose(outputs.last_hidden_state[0, :3, :3] , lowerCAmelCase_ , atol=1e-4 ) )
268
'''simple docstring''' from collections import OrderedDict from typing import Any, List, Mapping, Optional from ... import PreTrainedTokenizer, TensorType, is_torch_available from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfigWithPast, PatchingSpec from ...utils import logging a__ : Optional[Any] = logging.get_logger(__name__) a__ : Dict = { 'EleutherAI/gpt-j-6B': 'https://huggingface.co/EleutherAI/gpt-j-6B/resolve/main/config.json', # See all GPT-J models at https://huggingface.co/models?filter=gpt_j } class UpperCAmelCase__ ( UpperCAmelCase_): __SCREAMING_SNAKE_CASE = '''gptj''' __SCREAMING_SNAKE_CASE = { '''max_position_embeddings''': '''n_positions''', '''hidden_size''': '''n_embd''', '''num_attention_heads''': '''n_head''', '''num_hidden_layers''': '''n_layer''', } def __init__( self , lowercase=5_0_4_0_0 , lowercase=2_0_4_8 , lowercase=4_0_9_6 , lowercase=2_8 , lowercase=1_6 , lowercase=6_4 , lowercase=None , lowercase="gelu_new" , lowercase=0.0 , lowercase=0.0 , lowercase=0.0 , lowercase=1E-5 , lowercase=0.02 , lowercase=True , lowercase=5_0_2_5_6 , lowercase=5_0_2_5_6 , lowercase=False , **lowercase , ) -> Tuple: __UpperCamelCase = vocab_size __UpperCamelCase = n_positions __UpperCamelCase = n_embd __UpperCamelCase = n_layer __UpperCamelCase = n_head __UpperCamelCase = n_inner __UpperCamelCase = rotary_dim __UpperCamelCase = activation_function __UpperCamelCase = resid_pdrop __UpperCamelCase = embd_pdrop __UpperCamelCase = attn_pdrop __UpperCamelCase = layer_norm_epsilon __UpperCamelCase = initializer_range __UpperCamelCase = use_cache __UpperCamelCase = bos_token_id __UpperCamelCase = eos_token_id super().__init__( bos_token_id=lowercase , eos_token_id=lowercase , tie_word_embeddings=lowercase , **lowercase ) class UpperCAmelCase__ ( UpperCAmelCase_): def __init__( self , lowercase , lowercase = "default" , lowercase = None , lowercase = False , ) -> List[str]: super().__init__(lowercase , task=lowercase , patching_specs=lowercase , use_past=lowercase ) if not getattr(self._config , """pad_token_id""" , lowercase ): # TODO: how to do that better? __UpperCamelCase = 0 @property def __lowerCamelCase ( self ) -> Mapping[str, Mapping[int, str]]: __UpperCamelCase = OrderedDict({"""input_ids""": {0: """batch""", 1: """sequence"""}} ) if self.use_past: self.fill_with_past_key_values_(lowercase , direction="""inputs""" ) __UpperCamelCase = {0: """batch""", 1: """past_sequence + sequence"""} else: __UpperCamelCase = {0: """batch""", 1: """sequence"""} return common_inputs @property def __lowerCamelCase ( self ) -> int: return self._config.n_layer @property def __lowerCamelCase ( self ) -> int: return self._config.n_head def __lowerCamelCase ( self , lowercase , lowercase = -1 , lowercase = -1 , lowercase = False , lowercase = None , ) -> Mapping[str, Any]: __UpperCamelCase = super(lowercase , self ).generate_dummy_inputs( lowercase , batch_size=lowercase , seq_length=lowercase , is_pair=lowercase , framework=lowercase ) # We need to order the input in the way they appears in the forward() __UpperCamelCase = OrderedDict({"""input_ids""": common_inputs["""input_ids"""]} ) # Need to add the past_keys if self.use_past: if not is_torch_available(): raise ValueError("""Cannot generate dummy past_keys inputs without PyTorch installed.""" ) else: import torch __UpperCamelCase , __UpperCamelCase = common_inputs["""input_ids"""].shape # Not using the same length for past_key_values __UpperCamelCase = seqlen + 2 __UpperCamelCase = ( batch, self.num_attention_heads, past_key_values_length, self._config.hidden_size // self.num_attention_heads, ) __UpperCamelCase = [ (torch.zeros(lowercase ), torch.zeros(lowercase )) for _ in range(self.num_layers ) ] __UpperCamelCase = common_inputs["""attention_mask"""] if self.use_past: __UpperCamelCase = ordered_inputs["""attention_mask"""].dtype __UpperCamelCase = torch.cat( [ordered_inputs["""attention_mask"""], torch.ones(lowercase , lowercase , dtype=lowercase )] , dim=1 ) return ordered_inputs @property def __lowerCamelCase ( self ) -> int: return 1_3
349
0
import string # frequency taken from https://en.wikipedia.org/wiki/Letter_frequency SCREAMING_SNAKE_CASE_:Union[str, Any] = { 'E': 12.70, 'T': 9.06, 'A': 8.17, 'O': 7.51, 'I': 6.97, 'N': 6.75, 'S': 6.33, 'H': 6.09, 'R': 5.99, 'D': 4.25, 'L': 4.03, 'C': 2.78, 'U': 2.76, 'M': 2.41, 'W': 2.36, 'F': 2.23, 'G': 2.02, 'Y': 1.97, 'P': 1.93, 'B': 1.29, 'V': 0.98, 'K': 0.77, 'J': 0.15, 'X': 0.15, 'Q': 0.10, 'Z': 0.07, } SCREAMING_SNAKE_CASE_:int = 'ETAOINSHRDLCUMWFGYPBVKJXQZ' SCREAMING_SNAKE_CASE_:str = 'ABCDEFGHIJKLMNOPQRSTUVWXYZ' def __UpperCamelCase ( _lowerCAmelCase ) -> List[Any]: """simple docstring""" A : Optional[int] = {letter: 0 for letter in string.ascii_uppercase} for letter in message.upper(): if letter in LETTERS: letter_count[letter] += 1 return letter_count def __UpperCamelCase ( _lowerCAmelCase ) -> Tuple: """simple docstring""" return x[0] def __UpperCamelCase ( _lowerCAmelCase ) -> List[Any]: """simple docstring""" A : int = get_letter_count(__A ) A : Tuple = { freq: [] for letter, freq in letter_to_freq.items() } for letter in LETTERS: freq_to_letter[letter_to_freq[letter]].append(__A ) A : str = {} for freq in freq_to_letter: freq_to_letter[freq].sort(key=ETAOIN.find , reverse=__A ) A : Any = """""".join(freq_to_letter[freq] ) A : Tuple = list(freq_to_letter_str.items() ) freq_pairs.sort(key=__A , reverse=__A ) A : Any = [freq_pair[1] for freq_pair in freq_pairs] return "".join(__A ) def __UpperCamelCase ( _lowerCAmelCase ) -> int: """simple docstring""" A : Union[str, Any] = get_frequency_order(__A ) A : List[str] = 0 for common_letter in ETAOIN[:6]: if common_letter in freq_order[:6]: match_score += 1 for uncommon_letter in ETAOIN[-6:]: if uncommon_letter in freq_order[-6:]: match_score += 1 return match_score if __name__ == "__main__": import doctest doctest.testmod()
116
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_tokenizers_available, is_torch_available, is_vision_available, ) a__ : int = { 'configuration_layoutlmv3': [ 'LAYOUTLMV3_PRETRAINED_CONFIG_ARCHIVE_MAP', 'LayoutLMv3Config', 'LayoutLMv3OnnxConfig', ], 'processing_layoutlmv3': ['LayoutLMv3Processor'], 'tokenization_layoutlmv3': ['LayoutLMv3Tokenizer'], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a__ : Dict = ['LayoutLMv3TokenizerFast'] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a__ : Any = [ 'LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST', 'LayoutLMv3ForQuestionAnswering', 'LayoutLMv3ForSequenceClassification', 'LayoutLMv3ForTokenClassification', 'LayoutLMv3Model', 'LayoutLMv3PreTrainedModel', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a__ : str = [ 'TF_LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST', 'TFLayoutLMv3ForQuestionAnswering', 'TFLayoutLMv3ForSequenceClassification', 'TFLayoutLMv3ForTokenClassification', 'TFLayoutLMv3Model', 'TFLayoutLMv3PreTrainedModel', ] try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a__ : List[Any] = ['LayoutLMv3FeatureExtractor'] a__ : str = ['LayoutLMv3ImageProcessor'] if TYPE_CHECKING: from .configuration_layoutlmva import ( LAYOUTLMV3_PRETRAINED_CONFIG_ARCHIVE_MAP, LayoutLMvaConfig, LayoutLMvaOnnxConfig, ) from .processing_layoutlmva import LayoutLMvaProcessor from .tokenization_layoutlmva import LayoutLMvaTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_layoutlmva_fast import LayoutLMvaTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_layoutlmva import ( LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST, LayoutLMvaForQuestionAnswering, LayoutLMvaForSequenceClassification, LayoutLMvaForTokenClassification, LayoutLMvaModel, LayoutLMvaPreTrainedModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_layoutlmva import ( TF_LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST, TFLayoutLMvaForQuestionAnswering, TFLayoutLMvaForSequenceClassification, TFLayoutLMvaForTokenClassification, TFLayoutLMvaModel, TFLayoutLMvaPreTrainedModel, ) try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .feature_extraction_layoutlmva import LayoutLMvaFeatureExtractor from .image_processing_layoutlmva import LayoutLMvaImageProcessor else: import sys a__ : List[str] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
349
0
from unittest import TestCase from datasets import Dataset from minhash_deduplication import deduplicate_dataset, make_duplicate_clusters def UpperCamelCase__ ( ) -> Optional[Any]: snake_case__ : Dict = { 'repo_name': ['test_repo1', 'test_repo2', 'test_repo3'], 'path': ['test_1.py', 'test_2.py', 'unit_test.py'], 'content': ['a ' * 20, 'a ' * 30, 'b ' * 7], } snake_case__ : Optional[Any] = Dataset.from_dict(__A ) return dataset class __snake_case ( UpperCAmelCase_ ): def __a ( self ) -> Dict: '''simple docstring''' snake_case__ : int = get_dataset() snake_case__ : int = make_duplicate_clusters(__UpperCamelCase , 0.8_5 ) self.assertEqual(len(duplicate_clusters[0] ) , 2 ) def __a ( self ) -> Any: '''simple docstring''' snake_case__ : Optional[Any] = get_dataset() snake_case__ , snake_case__ : int = deduplicate_dataset(__UpperCamelCase ) self.assertEqual(len(__UpperCamelCase ) , 2 ) print(__UpperCamelCase ) self.assertEqual(duplicate_clusters[0][0]['copies'] , 2 ) self.assertEqual(duplicate_clusters[0][0]['is_extreme'] , __UpperCamelCase )
143
'''simple docstring''' def _lowercase ( __A ,__A ): '''simple docstring''' __UpperCamelCase = len(__A ) __UpperCamelCase = [[False] * (required_sum + 1) for _ in range(arr_len + 1 )] # for each arr value, a sum of zero(0) can be formed by not taking any element # hence True/1 for i in range(arr_len + 1 ): __UpperCamelCase = True # sum is not zero and set is empty then false for i in range(1 ,required_sum + 1 ): __UpperCamelCase = False for i in range(1 ,arr_len + 1 ): for j in range(1 ,required_sum + 1 ): if arr[i - 1] > j: __UpperCamelCase = subset[i - 1][j] if arr[i - 1] <= j: __UpperCamelCase = subset[i - 1][j] or subset[i - 1][j - arr[i - 1]] return subset[arr_len][required_sum] if __name__ == "__main__": import doctest doctest.testmod()
349
0
import numpy as np def __lowerCamelCase ( UpperCAmelCase_ : Dict , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : List[Any] ): """simple docstring""" a :Dict = int(np.ceil((x_end - xa) / h ) ) a :str = np.zeros((n + 1,) ) a :List[Any] = ya a :int = xa for k in range(__A ): a :Tuple = f(__A , y[k] ) a :Dict = f(x + 0.5 * h , y[k] + 0.5 * h * ka ) a :Any = f(x + 0.5 * h , y[k] + 0.5 * h * ka ) a :Any = f(x + h , y[k] + h * ka ) a :List[Any] = y[k] + (1 / 6) * h * (ka + 2 * ka + 2 * ka + ka) x += h return y if __name__ == "__main__": import doctest doctest.testmod()
94
'''simple docstring''' import bza import gzip import lzma import os import shutil import struct import tarfile import warnings import zipfile from abc import ABC, abstractmethod from pathlib import Path from typing import Dict, List, Optional, Type, Union from .. import config from .filelock import FileLock from .logging import get_logger a__ : Any = get_logger(__name__) class UpperCAmelCase__ : def __init__( self , lowercase = None ) -> List[str]: __UpperCamelCase = ( os.path.join(lowercase , config.EXTRACTED_DATASETS_DIR ) if cache_dir else config.EXTRACTED_DATASETS_PATH ) __UpperCamelCase = Extractor def __lowerCamelCase ( self , lowercase ) -> str: from .file_utils import hash_url_to_filename # Path where we extract compressed archives # We extract in the cache dir, and get the extracted path name by hashing the original path" __UpperCamelCase = os.path.abspath(lowercase ) return os.path.join(self.extract_dir , hash_url_to_filename(lowercase ) ) def __lowerCamelCase ( self , lowercase , lowercase ) -> bool: return force_extract or ( not os.path.isfile(lowercase ) and not (os.path.isdir(lowercase ) and os.listdir(lowercase )) ) def __lowerCamelCase ( self , lowercase , lowercase = False ) -> str: __UpperCamelCase = self.extractor.infer_extractor_format(lowercase ) if not extractor_format: return input_path __UpperCamelCase = self._get_output_path(lowercase ) if self._do_extract(lowercase , lowercase ): self.extractor.extract(lowercase , lowercase , lowercase ) return output_path class UpperCAmelCase__ ( UpperCAmelCase_): @classmethod @abstractmethod def __lowerCamelCase ( cls , lowercase , **lowercase ) -> bool: ... @staticmethod @abstractmethod def __lowerCamelCase ( lowercase , lowercase ) -> None: ... class UpperCAmelCase__ ( UpperCAmelCase_ , UpperCAmelCase_): __SCREAMING_SNAKE_CASE = [] @staticmethod def __lowerCamelCase ( lowercase , lowercase ) -> int: with open(lowercase , """rb""" ) as f: return f.read(lowercase ) @classmethod def __lowerCamelCase ( cls , lowercase , lowercase = b"" ) -> bool: if not magic_number: __UpperCamelCase = max(len(lowercase ) for cls_magic_number in cls.magic_numbers ) try: __UpperCamelCase = cls.read_magic_number(lowercase , lowercase ) except OSError: return False return any(magic_number.startswith(lowercase ) for cls_magic_number in cls.magic_numbers ) class UpperCAmelCase__ ( UpperCAmelCase_): @classmethod def __lowerCamelCase ( cls , lowercase , **lowercase ) -> bool: return tarfile.is_tarfile(lowercase ) @staticmethod def __lowerCamelCase ( lowercase , lowercase ) -> str: def resolved(lowercase ) -> str: return os.path.realpath(os.path.abspath(lowercase ) ) def badpath(lowercase , lowercase ) -> bool: # joinpath will ignore base if path is absolute return not resolved(os.path.join(lowercase , lowercase ) ).startswith(lowercase ) def badlink(lowercase , lowercase ) -> bool: # Links are interpreted relative to the directory containing the link __UpperCamelCase = resolved(os.path.join(lowercase , os.path.dirname(info.name ) ) ) return badpath(info.linkname , base=lowercase ) __UpperCamelCase = resolved(lowercase ) for finfo in members: if badpath(finfo.name , lowercase ): logger.error(f"Extraction of {finfo.name} is blocked (illegal path)" ) elif finfo.issym() and badlink(lowercase , lowercase ): logger.error(f"Extraction of {finfo.name} is blocked: Symlink to {finfo.linkname}" ) elif finfo.islnk() and badlink(lowercase , lowercase ): logger.error(f"Extraction of {finfo.name} is blocked: Hard link to {finfo.linkname}" ) else: yield finfo @staticmethod def __lowerCamelCase ( lowercase , lowercase ) -> None: os.makedirs(lowercase , exist_ok=lowercase ) __UpperCamelCase = tarfile.open(lowercase ) tar_file.extractall(lowercase , members=TarExtractor.safemembers(lowercase , lowercase ) ) tar_file.close() class UpperCAmelCase__ ( UpperCAmelCase_): __SCREAMING_SNAKE_CASE = [B'''\x1F\x8B'''] @staticmethod def __lowerCamelCase ( lowercase , lowercase ) -> None: with gzip.open(lowercase , """rb""" ) as gzip_file: with open(lowercase , """wb""" ) as extracted_file: shutil.copyfileobj(lowercase , lowercase ) class UpperCAmelCase__ ( UpperCAmelCase_): __SCREAMING_SNAKE_CASE = [ B'''PK\x03\x04''', B'''PK\x05\x06''', # empty archive B'''PK\x07\x08''', # spanned archive ] @classmethod def __lowerCamelCase ( cls , lowercase , lowercase = b"" ) -> bool: if super().is_extractable(lowercase , magic_number=lowercase ): return True try: # Alternative version of zipfile.is_zipfile that has less false positives, but misses executable zip archives. # From: https://github.com/python/cpython/pull/5053 from zipfile import ( _CD_SIGNATURE, _ECD_DISK_NUMBER, _ECD_DISK_START, _ECD_ENTRIES_TOTAL, _ECD_OFFSET, _ECD_SIZE, _EndRecData, sizeCentralDir, stringCentralDir, structCentralDir, ) with open(lowercase , """rb""" ) as fp: __UpperCamelCase = _EndRecData(lowercase ) if endrec: if endrec[_ECD_ENTRIES_TOTAL] == 0 and endrec[_ECD_SIZE] == 0 and endrec[_ECD_OFFSET] == 0: return True # Empty zipfiles are still zipfiles elif endrec[_ECD_DISK_NUMBER] == endrec[_ECD_DISK_START]: fp.seek(endrec[_ECD_OFFSET] ) # Central directory is on the same disk if fp.tell() == endrec[_ECD_OFFSET] and endrec[_ECD_SIZE] >= sizeCentralDir: __UpperCamelCase = fp.read(lowercase ) # CD is where we expect it to be if len(lowercase ) == sizeCentralDir: __UpperCamelCase = struct.unpack(lowercase , lowercase ) # CD is the right size if centdir[_CD_SIGNATURE] == stringCentralDir: return True # First central directory entry has correct magic number return False except Exception: # catch all errors in case future python versions change the zipfile internals return False @staticmethod def __lowerCamelCase ( lowercase , lowercase ) -> None: os.makedirs(lowercase , exist_ok=lowercase ) with zipfile.ZipFile(lowercase , """r""" ) as zip_file: zip_file.extractall(lowercase ) zip_file.close() class UpperCAmelCase__ ( UpperCAmelCase_): __SCREAMING_SNAKE_CASE = [B'''\xFD\x37\x7A\x58\x5A\x00'''] @staticmethod def __lowerCamelCase ( lowercase , lowercase ) -> None: with lzma.open(lowercase ) as compressed_file: with open(lowercase , """wb""" ) as extracted_file: shutil.copyfileobj(lowercase , lowercase ) class UpperCAmelCase__ ( UpperCAmelCase_): __SCREAMING_SNAKE_CASE = [B'''Rar!\x1a\x07\x00''', B'''Rar!\x1a\x07\x01\x00'''] # RAR_ID # RAR5_ID @staticmethod def __lowerCamelCase ( lowercase , lowercase ) -> None: if not config.RARFILE_AVAILABLE: raise ImportError("""Please pip install rarfile""" ) import rarfile os.makedirs(lowercase , exist_ok=lowercase ) __UpperCamelCase = rarfile.RarFile(lowercase ) rf.extractall(lowercase ) rf.close() class UpperCAmelCase__ ( UpperCAmelCase_): __SCREAMING_SNAKE_CASE = [B'''\x28\xb5\x2F\xFD'''] @staticmethod def __lowerCamelCase ( lowercase , lowercase ) -> None: if not config.ZSTANDARD_AVAILABLE: raise ImportError("""Please pip install zstandard""" ) import zstandard as zstd __UpperCamelCase = zstd.ZstdDecompressor() with open(lowercase , """rb""" ) as ifh, open(lowercase , """wb""" ) as ofh: dctx.copy_stream(lowercase , lowercase ) class UpperCAmelCase__ ( UpperCAmelCase_): __SCREAMING_SNAKE_CASE = [B'''\x42\x5A\x68'''] @staticmethod def __lowerCamelCase ( lowercase , lowercase ) -> None: with bza.open(lowercase , """rb""" ) as compressed_file: with open(lowercase , """wb""" ) as extracted_file: shutil.copyfileobj(lowercase , lowercase ) class UpperCAmelCase__ ( UpperCAmelCase_): __SCREAMING_SNAKE_CASE = [B'''\x37\x7A\xBC\xAF\x27\x1C'''] @staticmethod def __lowerCamelCase ( lowercase , lowercase ) -> None: if not config.PY7ZR_AVAILABLE: raise ImportError("""Please pip install py7zr""" ) import pyazr os.makedirs(lowercase , exist_ok=lowercase ) with pyazr.SevenZipFile(lowercase , """r""" ) as archive: archive.extractall(lowercase ) class UpperCAmelCase__ ( UpperCAmelCase_): __SCREAMING_SNAKE_CASE = [B'''\x04\x22\x4D\x18'''] @staticmethod def __lowerCamelCase ( lowercase , lowercase ) -> None: if not config.LZ4_AVAILABLE: raise ImportError("""Please pip install lz4""" ) import lza.frame with lza.frame.open(lowercase , """rb""" ) as compressed_file: with open(lowercase , """wb""" ) as extracted_file: shutil.copyfileobj(lowercase , lowercase ) class UpperCAmelCase__ : # Put zip file to the last, b/c it is possible wrongly detected as zip (I guess it means: as tar or gzip) __SCREAMING_SNAKE_CASE = { "tar": TarExtractor, "gzip": GzipExtractor, "zip": ZipExtractor, "xz": XzExtractor, "rar": RarExtractor, "zstd": ZstdExtractor, "bz2": BzipaExtractor, "7z": SevenZipExtractor, # <Added version="2.4.0"/> "lz4": LzaExtractor, # <Added version="2.4.0"/> } @classmethod def __lowerCamelCase ( cls ) -> Union[str, Any]: return max( len(lowercase ) for extractor in cls.extractors.values() if issubclass(lowercase , lowercase ) for extractor_magic_number in extractor.magic_numbers ) @staticmethod def __lowerCamelCase ( lowercase , lowercase ) -> str: try: return MagicNumberBaseExtractor.read_magic_number(lowercase , magic_number_length=lowercase ) except OSError: return b"" @classmethod def __lowerCamelCase ( cls , lowercase , lowercase = False ) -> bool: warnings.warn( """Method 'is_extractable' was deprecated in version 2.4.0 and will be removed in 3.0.0. """ """Use 'infer_extractor_format' instead.""" , category=lowercase , ) __UpperCamelCase = cls.infer_extractor_format(lowercase ) if extractor_format: return True if not return_extractor else (True, cls.extractors[extractor_format]) return False if not return_extractor else (False, None) @classmethod def __lowerCamelCase ( cls , lowercase ) -> str: # <Added version="2.4.0"/> __UpperCamelCase = cls._get_magic_number_max_length() __UpperCamelCase = cls._read_magic_number(lowercase , lowercase ) for extractor_format, extractor in cls.extractors.items(): if extractor.is_extractable(lowercase , magic_number=lowercase ): return extractor_format @classmethod def __lowerCamelCase ( cls , lowercase , lowercase , lowercase = None , lowercase = "deprecated" , ) -> None: os.makedirs(os.path.dirname(lowercase ) , exist_ok=lowercase ) # Prevent parallel extractions __UpperCamelCase = str(Path(lowercase ).with_suffix(""".lock""" ) ) with FileLock(lowercase ): shutil.rmtree(lowercase , ignore_errors=lowercase ) if extractor_format or extractor != "deprecated": if extractor != "deprecated" or not isinstance(lowercase , lowercase ): # passed as positional arg warnings.warn( """Parameter 'extractor' was deprecated in version 2.4.0 and will be removed in 3.0.0. """ """Use 'extractor_format' instead.""" , category=lowercase , ) __UpperCamelCase = extractor if extractor != """deprecated""" else extractor_format else: __UpperCamelCase = cls.extractors[extractor_format] return extractor.extract(lowercase , lowercase ) else: warnings.warn( """Parameter 'extractor_format' was made required in version 2.4.0 and not passing it will raise an """ """exception in 3.0.0.""" , category=lowercase , ) for extractor in cls.extractors.values(): if extractor.is_extractable(lowercase ): return extractor.extract(lowercase , lowercase )
349
0
'''simple docstring''' from ...configuration_utils import PretrainedConfig from ...utils import logging lowercase : Any = logging.get_logger(__name__) lowercase : int = { 'microsoft/trocr-base-handwritten': ( 'https://huggingface.co/microsoft/trocr-base-handwritten/resolve/main/config.json' ), # See all TrOCR models at https://huggingface.co/models?filter=trocr } class A ( UpperCAmelCase_ ): __magic_name__ = '''trocr''' __magic_name__ = ['''past_key_values'''] __magic_name__ = { '''num_attention_heads''': '''decoder_attention_heads''', '''hidden_size''': '''d_model''', '''num_hidden_layers''': '''decoder_layers''', } def __init__( self , SCREAMING_SNAKE_CASE=50265 , SCREAMING_SNAKE_CASE=1024 , SCREAMING_SNAKE_CASE=12 , SCREAMING_SNAKE_CASE=16 , SCREAMING_SNAKE_CASE=4096 , SCREAMING_SNAKE_CASE="gelu" , SCREAMING_SNAKE_CASE=512 , SCREAMING_SNAKE_CASE=0.1 , SCREAMING_SNAKE_CASE=0.0 , SCREAMING_SNAKE_CASE=0.0 , SCREAMING_SNAKE_CASE=2 , SCREAMING_SNAKE_CASE=0.02 , SCREAMING_SNAKE_CASE=0.0 , SCREAMING_SNAKE_CASE=True , SCREAMING_SNAKE_CASE=False , SCREAMING_SNAKE_CASE=True , SCREAMING_SNAKE_CASE=True , SCREAMING_SNAKE_CASE=1 , SCREAMING_SNAKE_CASE=0 , SCREAMING_SNAKE_CASE=2 , **SCREAMING_SNAKE_CASE , ) -> str: """simple docstring""" A : List[Any] = vocab_size A : List[Any] = d_model A : List[Any] = decoder_layers A : Tuple = decoder_attention_heads A : Any = decoder_ffn_dim A : Tuple = activation_function A : Union[str, Any] = max_position_embeddings A : Tuple = dropout A : List[Any] = attention_dropout A : Union[str, Any] = activation_dropout A : Optional[Any] = init_std A : List[str] = decoder_layerdrop A : Optional[Any] = use_cache A : List[Any] = scale_embedding A : int = use_learned_position_embeddings A : List[str] = layernorm_embedding super().__init__( pad_token_id=SCREAMING_SNAKE_CASE , bos_token_id=SCREAMING_SNAKE_CASE , eos_token_id=SCREAMING_SNAKE_CASE , decoder_start_token_id=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE , )
3
'''simple docstring''' import unittest from transformers import PegasusConfig, PegasusTokenizer, is_flax_available from transformers.testing_utils import require_flax, slow from ...test_configuration_common import ConfigTester from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor if is_flax_available(): import os # The slow tests are often failing with OOM error on GPU # This makes JAX allocate exactly what is needed on demand, and deallocate memory that is no longer needed # but will be slower as stated here https://jax.readthedocs.io/en/latest/gpu_memory_allocation.html a__ : List[str] = 'platform' import jax import jax.numpy as jnp import numpy as np from transformers import FlaxPegasusForConditionalGeneration, FlaxPegasusModel @require_flax class UpperCAmelCase__ : __SCREAMING_SNAKE_CASE = PegasusConfig __SCREAMING_SNAKE_CASE = {} __SCREAMING_SNAKE_CASE = '''gelu''' def __init__( self , lowercase , lowercase=1_3 , lowercase=7 , lowercase=True , lowercase=False , lowercase=9_9 , lowercase=3_2 , lowercase=5 , lowercase=4 , lowercase=3_7 , lowercase=0.1 , lowercase=0.1 , lowercase=2_0 , lowercase=2 , lowercase=1 , lowercase=0 , ) -> Optional[Any]: __UpperCamelCase = parent __UpperCamelCase = batch_size __UpperCamelCase = seq_length __UpperCamelCase = is_training __UpperCamelCase = use_labels __UpperCamelCase = vocab_size __UpperCamelCase = hidden_size __UpperCamelCase = num_hidden_layers __UpperCamelCase = num_attention_heads __UpperCamelCase = intermediate_size __UpperCamelCase = hidden_dropout_prob __UpperCamelCase = attention_probs_dropout_prob __UpperCamelCase = max_position_embeddings __UpperCamelCase = eos_token_id __UpperCamelCase = pad_token_id __UpperCamelCase = bos_token_id def __lowerCamelCase ( self ) -> str: __UpperCamelCase = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size ).clip(3 , self.vocab_size ) __UpperCamelCase = np.expand_dims(np.array([self.eos_token_id] * self.batch_size ) , 1 ) __UpperCamelCase = np.concatenate([input_ids, eos_tensor] , axis=1 ) __UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) __UpperCamelCase = self.config_cls( vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , ) __UpperCamelCase = prepare_pegasus_inputs_dict(lowercase , lowercase , lowercase ) return config, inputs_dict def __lowerCamelCase ( self , lowercase , lowercase , lowercase ) -> Dict: __UpperCamelCase = 2_0 __UpperCamelCase = model_class_name(lowercase ) __UpperCamelCase = model.encode(inputs_dict["""input_ids"""] ) __UpperCamelCase , __UpperCamelCase = ( inputs_dict["""decoder_input_ids"""], inputs_dict["""decoder_attention_mask"""], ) __UpperCamelCase = model.init_cache(decoder_input_ids.shape[0] , lowercase , lowercase ) __UpperCamelCase = jnp.ones((decoder_input_ids.shape[0], max_decoder_length) , dtype="""i4""" ) __UpperCamelCase = jnp.broadcast_to( jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , ) __UpperCamelCase = model.decode( decoder_input_ids[:, :-1] , lowercase , decoder_attention_mask=lowercase , past_key_values=lowercase , decoder_position_ids=lowercase , ) __UpperCamelCase = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype="""i4""" ) __UpperCamelCase = model.decode( decoder_input_ids[:, -1:] , lowercase , decoder_attention_mask=lowercase , past_key_values=outputs_cache.past_key_values , decoder_position_ids=lowercase , ) __UpperCamelCase = model.decode(lowercase , lowercase ) __UpperCamelCase = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) ) self.parent.assertTrue(diff < 1E-3 , msg=f"Max diff is {diff}" ) def __lowerCamelCase ( self , lowercase , lowercase , lowercase ) -> Any: __UpperCamelCase = 2_0 __UpperCamelCase = model_class_name(lowercase ) __UpperCamelCase = model.encode(inputs_dict["""input_ids"""] ) __UpperCamelCase , __UpperCamelCase = ( inputs_dict["""decoder_input_ids"""], inputs_dict["""decoder_attention_mask"""], ) __UpperCamelCase = jnp.concatenate( [ decoder_attention_mask, jnp.zeros((decoder_attention_mask.shape[0], max_decoder_length - decoder_attention_mask.shape[1]) ), ] , axis=-1 , ) __UpperCamelCase = model.init_cache(decoder_input_ids.shape[0] , lowercase , lowercase ) __UpperCamelCase = jnp.broadcast_to( jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , ) __UpperCamelCase = model.decode( decoder_input_ids[:, :-1] , lowercase , decoder_attention_mask=lowercase , past_key_values=lowercase , decoder_position_ids=lowercase , ) __UpperCamelCase = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype="""i4""" ) __UpperCamelCase = model.decode( decoder_input_ids[:, -1:] , lowercase , past_key_values=outputs_cache.past_key_values , decoder_attention_mask=lowercase , decoder_position_ids=lowercase , ) __UpperCamelCase = model.decode(lowercase , lowercase , decoder_attention_mask=lowercase ) __UpperCamelCase = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) ) self.parent.assertTrue(diff < 1E-3 , msg=f"Max diff is {diff}" ) def _lowercase ( __A ,__A ,__A ,__A=None ,__A=None ,): '''simple docstring''' if attention_mask is None: __UpperCamelCase = np.not_equal(__A ,config.pad_token_id ).astype(np.inta ) if decoder_attention_mask is None: __UpperCamelCase = np.concatenate( [ np.ones(decoder_input_ids[:, :1].shape ,dtype=np.inta ), np.not_equal(decoder_input_ids[:, 1:] ,config.pad_token_id ).astype(np.inta ), ] ,axis=-1 ,) return { "input_ids": input_ids, "decoder_input_ids": decoder_input_ids, "attention_mask": attention_mask, "decoder_attention_mask": decoder_attention_mask, } @require_flax class UpperCAmelCase__ ( UpperCAmelCase_ , unittest.TestCase): __SCREAMING_SNAKE_CASE = ( ( FlaxPegasusForConditionalGeneration, FlaxPegasusModel, ) if is_flax_available() else () ) __SCREAMING_SNAKE_CASE = (FlaxPegasusForConditionalGeneration,) if is_flax_available() else () __SCREAMING_SNAKE_CASE = True __SCREAMING_SNAKE_CASE = False __SCREAMING_SNAKE_CASE = False __SCREAMING_SNAKE_CASE = False def __lowerCamelCase ( self ) -> Optional[Any]: __UpperCamelCase = FlaxPegasusModelTester(self ) __UpperCamelCase = ConfigTester(self , config_class=lowercase ) def __lowerCamelCase ( self ) -> List[Any]: self.config_tester.run_common_tests() def __lowerCamelCase ( self ) -> List[Any]: __UpperCamelCase , __UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: self.model_tester.check_use_cache_forward(lowercase , lowercase , lowercase ) def __lowerCamelCase ( self ) -> List[Any]: __UpperCamelCase , __UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: self.model_tester.check_use_cache_forward_with_attn_mask(lowercase , lowercase , lowercase ) def __lowerCamelCase ( self ) -> List[str]: __UpperCamelCase , __UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: with self.subTest(model_class.__name__ ): __UpperCamelCase = self._prepare_for_class(lowercase , lowercase ) __UpperCamelCase = model_class(lowercase ) @jax.jit def encode_jitted(lowercase , lowercase=None , **lowercase ): return model.encode(input_ids=lowercase , attention_mask=lowercase ) with self.subTest("""JIT Enabled""" ): __UpperCamelCase = encode_jitted(**lowercase ).to_tuple() with self.subTest("""JIT Disabled""" ): with jax.disable_jit(): __UpperCamelCase = encode_jitted(**lowercase ).to_tuple() self.assertEqual(len(lowercase ) , len(lowercase ) ) for jitted_output, output in zip(lowercase , lowercase ): self.assertEqual(jitted_output.shape , output.shape ) def __lowerCamelCase ( self ) -> List[Any]: __UpperCamelCase , __UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: with self.subTest(model_class.__name__ ): __UpperCamelCase = model_class(lowercase ) __UpperCamelCase = model.encode(inputs_dict["""input_ids"""] , inputs_dict["""attention_mask"""] ) __UpperCamelCase = { """decoder_input_ids""": inputs_dict["""decoder_input_ids"""], """decoder_attention_mask""": inputs_dict["""decoder_attention_mask"""], """encoder_outputs""": encoder_outputs, } @jax.jit def decode_jitted(lowercase , lowercase , lowercase ): return model.decode( decoder_input_ids=lowercase , decoder_attention_mask=lowercase , encoder_outputs=lowercase , ) with self.subTest("""JIT Enabled""" ): __UpperCamelCase = decode_jitted(**lowercase ).to_tuple() with self.subTest("""JIT Disabled""" ): with jax.disable_jit(): __UpperCamelCase = decode_jitted(**lowercase ).to_tuple() self.assertEqual(len(lowercase ) , len(lowercase ) ) for jitted_output, output in zip(lowercase , lowercase ): self.assertEqual(jitted_output.shape , output.shape ) @slow def __lowerCamelCase ( self ) -> Dict: for model_class_name in self.all_model_classes: __UpperCamelCase = model_class_name.from_pretrained("""google/pegasus-large""" , from_pt=lowercase ) __UpperCamelCase = np.ones((1, 1) ) __UpperCamelCase = model(lowercase ) self.assertIsNotNone(lowercase ) @slow def __lowerCamelCase ( self ) -> str: __UpperCamelCase = FlaxPegasusForConditionalGeneration.from_pretrained("""google/pegasus-xsum""" ) __UpperCamelCase = PegasusTokenizer.from_pretrained("""google/pegasus-xsum""" ) __UpperCamelCase = [ """ PG&E stated it scheduled the blackouts in response to forecasts for high winds amid dry conditions. The aim is to reduce the risk of wildfires. Nearly 800 thousand customers were scheduled to be affected by the shutoffs which were expected to last through at least midday tomorrow.""", """ The London trio are up for best UK act and best album, as well as getting two nominations in the best song category.\"We got told like this morning 'Oh I think you're nominated'\", said Dappy.\"And I was like 'Oh yeah, which one?' And now we've got nominated for four awards. I mean, wow!\"Bandmate Fazer added: \"We thought it's best of us to come down and mingle with everyone and say hello to the cameras. And now we find we've got four nominations.\"The band have two shots at the best song prize, getting the nod for their Tynchy Stryder collaboration Number One, and single Strong Again.Their album Uncle B will also go up against records by the likes of Beyonce and Kanye West.N-Dubz picked up the best newcomer Mobo in 2007, but female member Tulisa said they wouldn't be too disappointed if they didn't win this time around.\"At the end of the day we're grateful to be where we are in our careers.\"If it don't happen then it don't happen - live to fight another day and keep on making albums and hits for the fans.\"Dappy also revealed they could be performing live several times on the night.The group will be doing Number One and also a possible rendition of the War Child single, I Got Soul.The charity song is a re-working of The Killers' All These Things That I've Done and is set to feature artists like Chipmunk, Ironik and Pixie Lott.This year's Mobos will be held outside of London for the first time, in Glasgow on 30 September.N-Dubz said they were looking forward to performing for their Scottish fans and boasted about their recent shows north of the border.\"We just done Edinburgh the other day,\" said Dappy.\"We smashed up an N-Dubz show over there. We done Aberdeen about three or four months ago - we smashed up that show over there! Everywhere we go we smash it up!\" """, ] __UpperCamelCase = [ """California's largest electricity provider has turned off power to hundreds of thousands of customers.""", """Pop group N-Dubz have revealed they were surprised to get four nominations for this year's Mobo Awards.""", ] __UpperCamelCase = tokenizer(lowercase , return_tensors="""np""" , truncation=lowercase , max_length=5_1_2 , padding=lowercase ) __UpperCamelCase = model.generate(**lowercase , num_beams=2 ).sequences __UpperCamelCase = tokenizer.batch_decode(lowercase , skip_special_tokens=lowercase ) assert tgt_text == decoded
349
0
import gc import unittest import numpy as np import torch from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer from diffusers import ( AutoencoderKL, DDIMScheduler, StableDiffusionSAGPipeline, UNetaDConditionModel, ) from diffusers.utils import slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin enable_full_determinism() class a_ ( UpperCAmelCase_ , UpperCAmelCase_ , unittest.TestCase ): """simple docstring""" __UpperCAmelCase = StableDiffusionSAGPipeline __UpperCAmelCase = TEXT_TO_IMAGE_PARAMS __UpperCAmelCase = TEXT_TO_IMAGE_BATCH_PARAMS __UpperCAmelCase = TEXT_TO_IMAGE_IMAGE_PARAMS __UpperCAmelCase = TEXT_TO_IMAGE_IMAGE_PARAMS __UpperCAmelCase = False def _lowerCAmelCase ( self : List[Any] ): torch.manual_seed(0 ) SCREAMING_SNAKE_CASE =UNetaDConditionModel( block_out_channels=(32, 64) ,layers_per_block=2 ,sample_size=32 ,in_channels=4 ,out_channels=4 ,down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') ,up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D') ,cross_attention_dim=32 ,) SCREAMING_SNAKE_CASE =DDIMScheduler( beta_start=0.00_085 ,beta_end=0.012 ,beta_schedule='scaled_linear' ,clip_sample=snake_case ,set_alpha_to_one=snake_case ,) torch.manual_seed(0 ) SCREAMING_SNAKE_CASE =AutoencoderKL( block_out_channels=[32, 64] ,in_channels=3 ,out_channels=3 ,down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] ,up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] ,latent_channels=4 ,) torch.manual_seed(0 ) SCREAMING_SNAKE_CASE =CLIPTextConfig( bos_token_id=0 ,eos_token_id=2 ,hidden_size=32 ,intermediate_size=37 ,layer_norm_eps=1e-05 ,num_attention_heads=4 ,num_hidden_layers=5 ,pad_token_id=1 ,vocab_size=1000 ,) SCREAMING_SNAKE_CASE =CLIPTextModel(snake_case ) SCREAMING_SNAKE_CASE =CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' ) SCREAMING_SNAKE_CASE ={ 'unet': unet, 'scheduler': scheduler, 'vae': vae, 'text_encoder': text_encoder, 'tokenizer': tokenizer, 'safety_checker': None, 'feature_extractor': None, } return components def _lowerCAmelCase ( self : Tuple ,snake_case : List[str] ,snake_case : Dict=0 ): if str(snake_case ).startswith('mps' ): SCREAMING_SNAKE_CASE =torch.manual_seed(snake_case ) else: SCREAMING_SNAKE_CASE =torch.Generator(device=snake_case ).manual_seed(snake_case ) SCREAMING_SNAKE_CASE ={ 'prompt': '.', 'generator': generator, 'num_inference_steps': 2, 'guidance_scale': 1.0, 'sag_scale': 1.0, 'output_type': 'numpy', } return inputs def _lowerCAmelCase ( self : Union[str, Any] ): super().test_inference_batch_single_identical(expected_max_diff=3e-3 ) @slow @require_torch_gpu class a_ ( unittest.TestCase ): """simple docstring""" def _lowerCAmelCase ( self : Tuple ): # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() def _lowerCAmelCase ( self : Any ): SCREAMING_SNAKE_CASE =StableDiffusionSAGPipeline.from_pretrained('CompVis/stable-diffusion-v1-4' ) SCREAMING_SNAKE_CASE =sag_pipe.to(snake_case ) sag_pipe.set_progress_bar_config(disable=snake_case ) SCREAMING_SNAKE_CASE ='.' SCREAMING_SNAKE_CASE =torch.manual_seed(0 ) SCREAMING_SNAKE_CASE =sag_pipe( [prompt] ,generator=snake_case ,guidance_scale=7.5 ,sag_scale=1.0 ,num_inference_steps=20 ,output_type='np' ) SCREAMING_SNAKE_CASE =output.images SCREAMING_SNAKE_CASE =image[0, -3:, -3:, -1] assert image.shape == (1, 512, 512, 3) SCREAMING_SNAKE_CASE =np.array([0.1_568, 0.1_738, 0.1_695, 0.1_693, 0.1_507, 0.1_705, 0.1_547, 0.1_751, 0.1_949] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 5e-2 def _lowerCAmelCase ( self : int ): SCREAMING_SNAKE_CASE =StableDiffusionSAGPipeline.from_pretrained('stabilityai/stable-diffusion-2-1-base' ) SCREAMING_SNAKE_CASE =sag_pipe.to(snake_case ) sag_pipe.set_progress_bar_config(disable=snake_case ) SCREAMING_SNAKE_CASE ='.' SCREAMING_SNAKE_CASE =torch.manual_seed(0 ) SCREAMING_SNAKE_CASE =sag_pipe( [prompt] ,generator=snake_case ,guidance_scale=7.5 ,sag_scale=1.0 ,num_inference_steps=20 ,output_type='np' ) SCREAMING_SNAKE_CASE =output.images SCREAMING_SNAKE_CASE =image[0, -3:, -3:, -1] assert image.shape == (1, 512, 512, 3) SCREAMING_SNAKE_CASE =np.array([0.3_459, 0.2_876, 0.2_537, 0.3_002, 0.2_671, 0.2_160, 0.3_026, 0.2_262, 0.2_371] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 5e-2 def _lowerCAmelCase ( self : int ): SCREAMING_SNAKE_CASE =StableDiffusionSAGPipeline.from_pretrained('stabilityai/stable-diffusion-2-1-base' ) SCREAMING_SNAKE_CASE =sag_pipe.to(snake_case ) sag_pipe.set_progress_bar_config(disable=snake_case ) SCREAMING_SNAKE_CASE ='.' SCREAMING_SNAKE_CASE =torch.manual_seed(0 ) SCREAMING_SNAKE_CASE =sag_pipe( [prompt] ,width=768 ,height=512 ,generator=snake_case ,guidance_scale=7.5 ,sag_scale=1.0 ,num_inference_steps=20 ,output_type='np' ,) SCREAMING_SNAKE_CASE =output.images assert image.shape == (1, 512, 768, 3)
334
'''simple docstring''' import pytest a__ : List[str] = '__dummy_dataset1__' a__ : Optional[int] = '\nimport json\nimport os\n\nimport datasets\n\n\nREPO_URL = "https://huggingface.co/datasets/albertvillanova/tests-raw-jsonl/resolve/main/"\nURLS = {"train": REPO_URL + "wikiann-bn-train.jsonl", "validation": REPO_URL + "wikiann-bn-validation.jsonl"}\n\n\nclass __DummyDataset1__(datasets.GeneratorBasedBuilder):\n\n def _info(self):\n features = datasets.Features(\n {\n "tokens": datasets.Sequence(datasets.Value("string")),\n "ner_tags": datasets.Sequence(\n datasets.features.ClassLabel(\n names=[\n "O",\n "B-PER",\n "I-PER",\n "B-ORG",\n "I-ORG",\n "B-LOC",\n "I-LOC",\n ]\n )\n ),\n "langs": datasets.Sequence(datasets.Value("string")),\n "spans": datasets.Sequence(datasets.Value("string")),\n }\n )\n return datasets.DatasetInfo(features=features)\n\n def _split_generators(self, dl_manager):\n dl_path = dl_manager.download(URLS)\n return [\n datasets.SplitGenerator(datasets.Split.TRAIN, gen_kwargs={"filepath": dl_path["train"]}),\n datasets.SplitGenerator(datasets.Split.VALIDATION, gen_kwargs={"filepath": dl_path["validation"]}),\n ]\n\n def _generate_examples(self, filepath):\n with open(filepath, "r", encoding="utf-8") as f:\n for i, line in enumerate(f):\n yield i, json.loads(line)\n' @pytest.fixture def _lowercase ( ): '''simple docstring''' return DATASET_LOADING_SCRIPT_NAME @pytest.fixture def _lowercase ( ): '''simple docstring''' return DATASET_LOADING_SCRIPT_CODE @pytest.fixture def _lowercase ( __A ,__A ,__A ): '''simple docstring''' __UpperCamelCase = dataset_loading_script_name __UpperCamelCase = tmp_path / """datasets""" / script_name script_dir.mkdir(parents=__A ) __UpperCamelCase = script_dir / f"{script_name}.py" with open(__A ,"""w""" ) as f: f.write(__A ) return str(__A )
349
0
from dataclasses import dataclass from typing import Dict, Optional, Union import torch import torch.nn.functional as F from torch import nn from ..configuration_utils import ConfigMixin, register_to_config from ..utils import BaseOutput from .attention import BasicTransformerBlock from .attention_processor import AttentionProcessor, AttnProcessor from .embeddings import TimestepEmbedding, Timesteps from .modeling_utils import ModelMixin @dataclass class A_ (UpperCAmelCase_ ): UpperCAmelCase__ = 4_2 class A_ (UpperCAmelCase_ , UpperCAmelCase_ ): @register_to_config def __init__( self , _A = 3_2 , _A = 6_4 , _A = 2_0 , _A = 7_6_8 , _A=7_7 , _A=4 , _A = 0.0 , _A = "silu" , _A = None , _A = None , _A = "linear" , _A = "prd" , _A = None , _A = None , _A = None , ): '''simple docstring''' super().__init__() UpperCAmelCase = num_attention_heads UpperCAmelCase = attention_head_dim UpperCAmelCase = num_attention_heads * attention_head_dim UpperCAmelCase = additional_embeddings UpperCAmelCase = time_embed_dim or inner_dim UpperCAmelCase = embedding_proj_dim or embedding_dim UpperCAmelCase = clip_embed_dim or embedding_dim UpperCAmelCase = Timesteps(_A , _A , 0 ) UpperCAmelCase = TimestepEmbedding(_A , _A , out_dim=_A , act_fn=_A ) UpperCAmelCase = nn.Linear(_A , _A ) if embedding_proj_norm_type is None: UpperCAmelCase = None elif embedding_proj_norm_type == "layer": UpperCAmelCase = nn.LayerNorm(_A ) else: raise ValueError(F"""unsupported embedding_proj_norm_type: {embedding_proj_norm_type}""" ) UpperCAmelCase = nn.Linear(_A , _A ) if encoder_hid_proj_type is None: UpperCAmelCase = None elif encoder_hid_proj_type == "linear": UpperCAmelCase = nn.Linear(_A , _A ) else: raise ValueError(F"""unsupported encoder_hid_proj_type: {encoder_hid_proj_type}""" ) UpperCAmelCase = nn.Parameter(torch.zeros(1 , num_embeddings + additional_embeddings , _A ) ) if added_emb_type == "prd": UpperCAmelCase = nn.Parameter(torch.zeros(1 , 1 , _A ) ) elif added_emb_type is None: UpperCAmelCase = None else: raise ValueError( F"""`added_emb_type`: {added_emb_type} is not supported. Make sure to choose one of `'prd'` or `None`.""" ) UpperCAmelCase = nn.ModuleList( [ BasicTransformerBlock( _A , _A , _A , dropout=_A , activation_fn='''gelu''' , attention_bias=_A , ) for d in range(_A ) ] ) if norm_in_type == "layer": UpperCAmelCase = nn.LayerNorm(_A ) elif norm_in_type is None: UpperCAmelCase = None else: raise ValueError(F"""Unsupported norm_in_type: {norm_in_type}.""" ) UpperCAmelCase = nn.LayerNorm(_A ) UpperCAmelCase = nn.Linear(_A , _A ) UpperCAmelCase = torch.full( [num_embeddings + additional_embeddings, num_embeddings + additional_embeddings] , -1_0_0_0_0.0 ) causal_attention_mask.triu_(1 ) UpperCAmelCase = causal_attention_mask[None, ...] self.register_buffer('''causal_attention_mask''' , _A , persistent=_A ) UpperCAmelCase = nn.Parameter(torch.zeros(1 , _A ) ) UpperCAmelCase = nn.Parameter(torch.zeros(1 , _A ) ) @property # Copied from diffusers.models.unet_2d_condition.UNet2DConditionModel.attn_processors def _lowercase ( self ): '''simple docstring''' UpperCAmelCase = {} def fn_recursive_add_processors(_A , _A , _A ): if hasattr(_A , '''set_processor''' ): UpperCAmelCase = module.processor for sub_name, child in module.named_children(): fn_recursive_add_processors(F"""{name}.{sub_name}""" , _A , _A ) return processors for name, module in self.named_children(): fn_recursive_add_processors(_A , _A , _A ) return processors def _lowercase ( self , _A ): '''simple docstring''' UpperCAmelCase = len(self.attn_processors.keys() ) if isinstance(_A , _A ) and len(_A ) != count: raise ValueError( F"""A dict of processors was passed, but the number of processors {len(_A )} does not match the""" F""" number of attention layers: {count}. Please make sure to pass {count} processor classes.""" ) def fn_recursive_attn_processor(_A , _A , _A ): if hasattr(_A , '''set_processor''' ): if not isinstance(_A , _A ): module.set_processor(_A ) else: module.set_processor(processor.pop(F"""{name}.processor""" ) ) for sub_name, child in module.named_children(): fn_recursive_attn_processor(F"""{name}.{sub_name}""" , _A , _A ) for name, module in self.named_children(): fn_recursive_attn_processor(_A , _A , _A ) def _lowercase ( self ): '''simple docstring''' self.set_attn_processor(AttnProcessor() ) def _lowercase ( self , _A , _A , _A , _A = None , _A = None , _A = True , ): '''simple docstring''' UpperCAmelCase = hidden_states.shape[0] UpperCAmelCase = timestep if not torch.is_tensor(_A ): UpperCAmelCase = torch.tensor([timesteps] , dtype=torch.long , device=hidden_states.device ) elif torch.is_tensor(_A ) and len(timesteps.shape ) == 0: UpperCAmelCase = timesteps[None].to(hidden_states.device ) # broadcast to batch dimension in a way that's compatible with ONNX/Core ML UpperCAmelCase = timesteps * torch.ones(_A , dtype=timesteps.dtype , device=timesteps.device ) UpperCAmelCase = self.time_proj(_A ) # timesteps does not contain any weights and will always return f32 tensors # but time_embedding might be fp16, so we need to cast here. UpperCAmelCase = timesteps_projected.to(dtype=self.dtype ) UpperCAmelCase = self.time_embedding(_A ) if self.embedding_proj_norm is not None: UpperCAmelCase = self.embedding_proj_norm(_A ) UpperCAmelCase = self.embedding_proj(_A ) if self.encoder_hidden_states_proj is not None and encoder_hidden_states is not None: UpperCAmelCase = self.encoder_hidden_states_proj(_A ) elif self.encoder_hidden_states_proj is not None and encoder_hidden_states is None: raise ValueError('''`encoder_hidden_states_proj` requires `encoder_hidden_states` to be set''' ) UpperCAmelCase = self.proj_in(_A ) UpperCAmelCase = self.positional_embedding.to(hidden_states.dtype ) UpperCAmelCase = [] UpperCAmelCase = 0 if encoder_hidden_states is not None: additional_embeds.append(_A ) additional_embeddings_len += encoder_hidden_states.shape[1] if len(proj_embeddings.shape ) == 2: UpperCAmelCase = proj_embeddings[:, None, :] if len(hidden_states.shape ) == 2: UpperCAmelCase = hidden_states[:, None, :] UpperCAmelCase = additional_embeds + [ proj_embeddings, time_embeddings[:, None, :], hidden_states, ] if self.prd_embedding is not None: UpperCAmelCase = self.prd_embedding.to(hidden_states.dtype ).expand(_A , -1 , -1 ) additional_embeds.append(_A ) UpperCAmelCase = torch.cat( _A , dim=1 , ) # Allow positional_embedding to not include the `addtional_embeddings` and instead pad it with zeros for these additional tokens UpperCAmelCase = additional_embeddings_len + proj_embeddings.shape[1] + 1 if positional_embeddings.shape[1] < hidden_states.shape[1]: UpperCAmelCase = F.pad( _A , ( 0, 0, additional_embeddings_len, self.prd_embedding.shape[1] if self.prd_embedding is not None else 0, ) , value=0.0 , ) UpperCAmelCase = hidden_states + positional_embeddings if attention_mask is not None: UpperCAmelCase = (1 - attention_mask.to(hidden_states.dtype )) * -1_0_0_0_0.0 UpperCAmelCase = F.pad(_A , (0, self.additional_embeddings) , value=0.0 ) UpperCAmelCase = (attention_mask[:, None, :] + self.causal_attention_mask).to(hidden_states.dtype ) UpperCAmelCase = attention_mask.repeat_interleave(self.config.num_attention_heads , dim=0 ) if self.norm_in is not None: UpperCAmelCase = self.norm_in(_A ) for block in self.transformer_blocks: UpperCAmelCase = block(_A , attention_mask=_A ) UpperCAmelCase = self.norm_out(_A ) if self.prd_embedding is not None: UpperCAmelCase = hidden_states[:, -1] else: UpperCAmelCase = hidden_states[:, additional_embeddings_len:] UpperCAmelCase = self.proj_to_clip_embeddings(_A ) if not return_dict: return (predicted_image_embedding,) return PriorTransformerOutput(predicted_image_embedding=_A ) def _lowercase ( self , _A ): '''simple docstring''' UpperCAmelCase = (prior_latents * self.clip_std) + self.clip_mean return prior_latents
273
'''simple docstring''' import argparse import json import os import fairseq import torch from fairseq.data import Dictionary from transformers import ( WavaVecaConfig, WavaVecaCTCTokenizer, WavaVecaFeatureExtractor, WavaVecaForCTC, WavaVecaForPreTraining, WavaVecaProcessor, logging, ) from transformers.models.wavaveca.modeling_wavaveca import WavaVecaForSequenceClassification logging.set_verbosity_info() a__ : Any = logging.get_logger(__name__) a__ : Optional[int] = { 'post_extract_proj': 'feature_projection.projection', 'encoder.pos_conv.0': 'encoder.pos_conv_embed.conv', 'self_attn.k_proj': 'encoder.layers.*.attention.k_proj', 'self_attn.v_proj': 'encoder.layers.*.attention.v_proj', 'self_attn.q_proj': 'encoder.layers.*.attention.q_proj', 'self_attn.out_proj': 'encoder.layers.*.attention.out_proj', 'self_attn_layer_norm': 'encoder.layers.*.layer_norm', 'fc1': 'encoder.layers.*.feed_forward.intermediate_dense', 'fc2': 'encoder.layers.*.feed_forward.output_dense', 'final_layer_norm': 'encoder.layers.*.final_layer_norm', 'encoder.layer_norm': 'encoder.layer_norm', 'adapter_layer': 'encoder.layers.*.adapter_layer', 'w2v_model.layer_norm': 'feature_projection.layer_norm', 'quantizer.weight_proj': 'quantizer.weight_proj', 'quantizer.vars': 'quantizer.codevectors', 'project_q': 'project_q', 'final_proj': 'project_hid', 'w2v_encoder.proj': 'lm_head', 'mask_emb': 'masked_spec_embed', 'pooling_layer.linear': 'projector', 'pooling_layer.projection': 'classifier', } a__ : List[str] = [ 'lm_head', 'quantizer.weight_proj', 'quantizer.codevectors', 'project_q', 'project_hid', 'projector', 'classifier', ] def _lowercase ( __A ): '''simple docstring''' __UpperCamelCase = {} with open(__A ,"""r""" ) as file: for line_number, line in enumerate(__A ): __UpperCamelCase = line.strip() if line: __UpperCamelCase = line.split() __UpperCamelCase = line_number __UpperCamelCase = words[0] __UpperCamelCase = value return result def _lowercase ( __A ,__A ,__A ,__A ,__A ): '''simple docstring''' for attribute in key.split(""".""" ): __UpperCamelCase = getattr(__A ,__A ) __UpperCamelCase = None for param_key in PARAM_MAPPING.keys(): if full_name.endswith(__A ): __UpperCamelCase = PARAM_MAPPING[full_name.split(""".""" )[-1]] __UpperCamelCase = """param""" if weight_type is not None and weight_type != "param": __UpperCamelCase = getattr(__A ,__A ).shape elif weight_type is not None and weight_type == "param": __UpperCamelCase = hf_pointer for attribute in hf_param_name.split(""".""" ): __UpperCamelCase = getattr(__A ,__A ) __UpperCamelCase = shape_pointer.shape # let's reduce dimension __UpperCamelCase = value[0] else: __UpperCamelCase = hf_pointer.shape if hf_shape != value.shape: raise ValueError( f"Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be" f" {value.shape} for {full_name}" ) if weight_type == "weight": __UpperCamelCase = value elif weight_type == "weight_g": __UpperCamelCase = value elif weight_type == "weight_v": __UpperCamelCase = value elif weight_type == "bias": __UpperCamelCase = value elif weight_type == "param": for attribute in hf_param_name.split(""".""" ): __UpperCamelCase = getattr(__A ,__A ) __UpperCamelCase = value else: __UpperCamelCase = value logger.info(f"{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}." ) def _lowercase ( __A ,__A ,__A ,__A ,__A ): '''simple docstring''' __UpperCamelCase = None for param_key in PARAM_MAPPING.keys(): if full_name.endswith(__A ): __UpperCamelCase = PARAM_MAPPING[full_name.split(""".""" )[-1]] __UpperCamelCase = """param""" if weight_type is not None and weight_type != "param": __UpperCamelCase = """.""".join([key, weight_type] ) elif weight_type is not None and weight_type == "param": __UpperCamelCase = """.""".join([key, hf_param_name] ) else: __UpperCamelCase = key __UpperCamelCase = value if """lm_head""" in full_key else value[0] a__ : Dict = { 'W_a': 'linear_1.weight', 'W_b': 'linear_2.weight', 'b_a': 'linear_1.bias', 'b_b': 'linear_2.bias', 'ln_W': 'norm.weight', 'ln_b': 'norm.bias', } def _lowercase ( __A ,__A ,__A=None ,__A=None ): '''simple docstring''' __UpperCamelCase = False for key, mapped_key in MAPPING.items(): __UpperCamelCase = """wav2vec2.""" + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key if key in name or key.split("""w2v_model.""" )[-1] == name.split(""".""" )[0]: __UpperCamelCase = True if "*" in mapped_key: __UpperCamelCase = name.split(__A )[0].split(""".""" )[-2] __UpperCamelCase = mapped_key.replace("""*""" ,__A ) if "weight_g" in name: __UpperCamelCase = """weight_g""" elif "weight_v" in name: __UpperCamelCase = """weight_v""" elif "bias" in name: __UpperCamelCase = """bias""" elif "weight" in name: # TODO: don't match quantizer.weight_proj __UpperCamelCase = """weight""" else: __UpperCamelCase = None if hf_dict is not None: rename_dict(__A ,__A ,__A ,__A ,__A ) else: set_recursively(__A ,__A ,__A ,__A ,__A ) return is_used return is_used def _lowercase ( __A ,__A ,__A ): '''simple docstring''' __UpperCamelCase = [] __UpperCamelCase = fairseq_model.state_dict() __UpperCamelCase = hf_model.wavaveca.feature_extractor for name, value in fairseq_dict.items(): __UpperCamelCase = False if "conv_layers" in name: load_conv_layer( __A ,__A ,__A ,__A ,hf_model.config.feat_extract_norm == """group""" ,) __UpperCamelCase = True else: __UpperCamelCase = load_wavaveca_layer(__A ,__A ,__A ) if not is_used: unused_weights.append(__A ) logger.warning(f"Unused weights: {unused_weights}" ) def _lowercase ( __A ,__A ,__A ,__A ,__A ): '''simple docstring''' __UpperCamelCase = full_name.split("""conv_layers.""" )[-1] __UpperCamelCase = name.split(""".""" ) __UpperCamelCase = int(items[0] ) __UpperCamelCase = int(items[1] ) if type_id == 0: if "bias" in name: if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape: raise ValueError( f"{full_name} has size {value.shape}, but" f" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found." ) __UpperCamelCase = value logger.info(f"Feat extract conv layer {layer_id} was initialized from {full_name}." ) elif "weight" in name: if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape: raise ValueError( f"{full_name} has size {value.shape}, but" f" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found." ) __UpperCamelCase = value logger.info(f"Feat extract conv layer {layer_id} was initialized from {full_name}." ) elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm): if "bias" in name: if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape: raise ValueError( f"{full_name} has size {value.shape}, but" f" {feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape} was found." ) __UpperCamelCase = value logger.info(f"Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}." ) elif "weight" in name: if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape: raise ValueError( f"{full_name} has size {value.shape}, but" f" {feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape} was found." ) __UpperCamelCase = value logger.info(f"Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}." ) else: unused_weights.append(__A ) @torch.no_grad() def _lowercase ( __A ,__A ,__A=None ,__A=None ,__A=True ,__A=False ): '''simple docstring''' if config_path is not None: __UpperCamelCase = WavaVecaConfig.from_pretrained(__A ) else: __UpperCamelCase = WavaVecaConfig() if is_seq_class: __UpperCamelCase = read_txt_into_dict(__A ) __UpperCamelCase = idalabel __UpperCamelCase = WavaVecaForSequenceClassification(__A ) __UpperCamelCase = WavaVecaFeatureExtractor( feature_size=1 ,sampling_rate=16_000 ,padding_value=0 ,do_normalize=__A ,return_attention_mask=__A ,) feature_extractor.save_pretrained(__A ) elif is_finetuned: if dict_path: __UpperCamelCase = Dictionary.load(__A ) # important change bos & pad token id since CTC symbol is <pad> and # not <s> as in fairseq __UpperCamelCase = target_dict.pad_index __UpperCamelCase = target_dict.bos_index __UpperCamelCase = target_dict.eos_index __UpperCamelCase = len(target_dict.symbols ) __UpperCamelCase = os.path.join(__A ,"""vocab.json""" ) if not os.path.isdir(__A ): logger.error("""--pytorch_dump_folder_path ({}) should be a directory""".format(__A ) ) return os.makedirs(__A ,exist_ok=__A ) __UpperCamelCase = target_dict.indices # fairseq has the <pad> and <s> switched __UpperCamelCase = 0 __UpperCamelCase = 1 with open(__A ,"""w""" ,encoding="""utf-8""" ) as vocab_handle: json.dump(__A ,__A ) __UpperCamelCase = WavaVecaCTCTokenizer( __A ,unk_token=target_dict.unk_word ,pad_token=target_dict.pad_word ,bos_token=target_dict.bos_word ,eos_token=target_dict.eos_word ,word_delimiter_token="""|""" ,do_lower_case=__A ,) __UpperCamelCase = True if config.feat_extract_norm == """layer""" else False __UpperCamelCase = WavaVecaFeatureExtractor( feature_size=1 ,sampling_rate=16_000 ,padding_value=0 ,do_normalize=__A ,return_attention_mask=__A ,) __UpperCamelCase = WavaVecaProcessor(feature_extractor=__A ,tokenizer=__A ) processor.save_pretrained(__A ) __UpperCamelCase = WavaVecaForCTC(__A ) else: __UpperCamelCase = WavaVecaForPreTraining(__A ) if is_finetuned or is_seq_class: __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = fairseq.checkpoint_utils.load_model_ensemble_and_task( [checkpoint_path] ,arg_overrides={"""data""": """/""".join(dict_path.split("""/""" )[:-1] )} ) else: __UpperCamelCase = argparse.Namespace(task="""audio_pretraining""" ) __UpperCamelCase = fairseq.tasks.setup_task(__A ) __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] ,task=__A ) __UpperCamelCase = model[0].eval() recursively_load_weights(__A ,__A ,not is_finetuned ) hf_wavavec.save_pretrained(__A ) if __name__ == "__main__": a__ : int = argparse.ArgumentParser() parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.') parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to fairseq checkpoint') parser.add_argument('--dict_path', default=None, type=str, help='Path to dict of fine-tuned model') parser.add_argument('--config_path', default=None, type=str, help='Path to hf config.json of model to convert') parser.add_argument( '--not_finetuned', action='store_true', help='Whether the model to convert is a fine-tuned model or not' ) parser.add_argument( '--is_seq_class', action='store_true', help='Whether the model to convert is a fine-tuned sequence classification model or not', ) a__ : Optional[int] = parser.parse_args() a__ : str = not args.not_finetuned and not args.is_seq_class convert_wavaveca_checkpoint( args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, is_finetuned, args.is_seq_class, )
349
0
from ...configuration_utils import PretrainedConfig from ...utils import logging lowerCamelCase__ = logging.get_logger(__name__) lowerCamelCase__ = { 'uw-madison/mra-base-512-4': 'https://huggingface.co/uw-madison/mra-base-512-4/resolve/main/config.json', } class SCREAMING_SNAKE_CASE ( UpperCAmelCase_ ): __lowerCamelCase : Dict ='mra' def __init__( self : Optional[Any] , __lowercase : Optional[int]=50265 , __lowercase : List[Any]=768 , __lowercase : Any=12 , __lowercase : Tuple=12 , __lowercase : int=3072 , __lowercase : Tuple="gelu" , __lowercase : List[Any]=0.1 , __lowercase : Optional[int]=0.1 , __lowercase : Dict=512 , __lowercase : Any=1 , __lowercase : int=0.02 , __lowercase : List[Any]=1E-5 , __lowercase : List[str]="absolute" , __lowercase : Optional[Any]=4 , __lowercase : str="full" , __lowercase : Any=0 , __lowercase : int=0 , __lowercase : List[Any]=1 , __lowercase : str=0 , __lowercase : Dict=2 , **__lowercase : Any , ): '''simple docstring''' super().__init__(pad_token_id=__lowercase , bos_token_id=__lowercase , eos_token_id=__lowercase , **__lowercase ) __a = vocab_size __a = max_position_embeddings __a = hidden_size __a = num_hidden_layers __a = num_attention_heads __a = intermediate_size __a = hidden_act __a = hidden_dropout_prob __a = attention_probs_dropout_prob __a = initializer_range __a = type_vocab_size __a = layer_norm_eps __a = position_embedding_type __a = block_per_row __a = approx_mode __a = initial_prior_first_n_blocks __a = initial_prior_diagonal_n_blocks
302
'''simple docstring''' from __future__ import annotations import unittest from transformers import DistilBertConfig, is_tf_available from transformers.testing_utils import require_tf, slow from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers.models.distilbert.modeling_tf_distilbert import ( TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST, TFDistilBertForMaskedLM, TFDistilBertForMultipleChoice, TFDistilBertForQuestionAnswering, TFDistilBertForSequenceClassification, TFDistilBertForTokenClassification, TFDistilBertModel, ) class UpperCAmelCase__ : def __init__( self , lowercase , ) -> Union[str, Any]: __UpperCamelCase = parent __UpperCamelCase = 1_3 __UpperCamelCase = 7 __UpperCamelCase = True __UpperCamelCase = True __UpperCamelCase = False __UpperCamelCase = True __UpperCamelCase = 9_9 __UpperCamelCase = 3_2 __UpperCamelCase = 2 __UpperCamelCase = 4 __UpperCamelCase = 3_7 __UpperCamelCase = """gelu""" __UpperCamelCase = 0.1 __UpperCamelCase = 0.1 __UpperCamelCase = 5_1_2 __UpperCamelCase = 1_6 __UpperCamelCase = 2 __UpperCamelCase = 0.02 __UpperCamelCase = 3 __UpperCamelCase = 4 __UpperCamelCase = None def __lowerCamelCase ( self ) -> List[str]: __UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) __UpperCamelCase = None if self.use_input_mask: __UpperCamelCase = random_attention_mask([self.batch_size, self.seq_length] ) __UpperCamelCase = None __UpperCamelCase = None __UpperCamelCase = None if self.use_labels: __UpperCamelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size ) __UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) __UpperCamelCase = ids_tensor([self.batch_size] , self.num_choices ) __UpperCamelCase = DistilBertConfig( vocab_size=self.vocab_size , dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , hidden_dim=self.intermediate_size , hidden_act=self.hidden_act , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , ) return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels def __lowerCamelCase ( self , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase ) -> Dict: __UpperCamelCase = TFDistilBertModel(config=lowercase ) __UpperCamelCase = {"""input_ids""": input_ids, """attention_mask""": input_mask} __UpperCamelCase = model(lowercase ) __UpperCamelCase = [input_ids, input_mask] __UpperCamelCase = model(lowercase ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def __lowerCamelCase ( self , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase ) -> Optional[Any]: __UpperCamelCase = TFDistilBertForMaskedLM(config=lowercase ) __UpperCamelCase = {"""input_ids""": input_ids, """attention_mask""": input_mask} __UpperCamelCase = model(lowercase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def __lowerCamelCase ( self , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase ) -> Tuple: __UpperCamelCase = TFDistilBertForQuestionAnswering(config=lowercase ) __UpperCamelCase = { """input_ids""": input_ids, """attention_mask""": input_mask, } __UpperCamelCase = model(lowercase ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def __lowerCamelCase ( self , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase ) -> Tuple: __UpperCamelCase = self.num_labels __UpperCamelCase = TFDistilBertForSequenceClassification(lowercase ) __UpperCamelCase = {"""input_ids""": input_ids, """attention_mask""": input_mask} __UpperCamelCase = model(lowercase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def __lowerCamelCase ( self , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase ) -> int: __UpperCamelCase = self.num_choices __UpperCamelCase = TFDistilBertForMultipleChoice(lowercase ) __UpperCamelCase = tf.tile(tf.expand_dims(lowercase , 1 ) , (1, self.num_choices, 1) ) __UpperCamelCase = tf.tile(tf.expand_dims(lowercase , 1 ) , (1, self.num_choices, 1) ) __UpperCamelCase = { """input_ids""": multiple_choice_inputs_ids, """attention_mask""": multiple_choice_input_mask, } __UpperCamelCase = model(lowercase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) ) def __lowerCamelCase ( self , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase ) -> Optional[int]: __UpperCamelCase = self.num_labels __UpperCamelCase = TFDistilBertForTokenClassification(lowercase ) __UpperCamelCase = {"""input_ids""": input_ids, """attention_mask""": input_mask} __UpperCamelCase = model(lowercase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def __lowerCamelCase ( self ) -> Dict: __UpperCamelCase = self.prepare_config_and_inputs() ((__UpperCamelCase) , (__UpperCamelCase) , (__UpperCamelCase) , (__UpperCamelCase) , (__UpperCamelCase) , (__UpperCamelCase)) = config_and_inputs __UpperCamelCase = {"""input_ids""": input_ids, """attention_mask""": input_mask} return config, inputs_dict @require_tf class UpperCAmelCase__ ( UpperCAmelCase_ , UpperCAmelCase_ , unittest.TestCase): __SCREAMING_SNAKE_CASE = ( ( TFDistilBertModel, TFDistilBertForMaskedLM, TFDistilBertForQuestionAnswering, TFDistilBertForSequenceClassification, TFDistilBertForTokenClassification, TFDistilBertForMultipleChoice, ) if is_tf_available() else None ) __SCREAMING_SNAKE_CASE = ( { '''feature-extraction''': TFDistilBertModel, '''fill-mask''': TFDistilBertForMaskedLM, '''question-answering''': TFDistilBertForQuestionAnswering, '''text-classification''': TFDistilBertForSequenceClassification, '''token-classification''': TFDistilBertForTokenClassification, '''zero-shot''': TFDistilBertForSequenceClassification, } if is_tf_available() else {} ) __SCREAMING_SNAKE_CASE = False __SCREAMING_SNAKE_CASE = False def __lowerCamelCase ( self ) -> Optional[Any]: __UpperCamelCase = TFDistilBertModelTester(self ) __UpperCamelCase = ConfigTester(self , config_class=lowercase , dim=3_7 ) def __lowerCamelCase ( self ) -> Any: self.config_tester.run_common_tests() def __lowerCamelCase ( self ) -> Dict: __UpperCamelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_distilbert_model(*lowercase ) def __lowerCamelCase ( self ) -> Union[str, Any]: __UpperCamelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_distilbert_for_masked_lm(*lowercase ) def __lowerCamelCase ( self ) -> int: __UpperCamelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_distilbert_for_question_answering(*lowercase ) def __lowerCamelCase ( self ) -> Any: __UpperCamelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_distilbert_for_sequence_classification(*lowercase ) def __lowerCamelCase ( self ) -> Optional[Any]: __UpperCamelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_distilbert_for_multiple_choice(*lowercase ) def __lowerCamelCase ( self ) -> Union[str, Any]: __UpperCamelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_distilbert_for_token_classification(*lowercase ) @slow def __lowerCamelCase ( self ) -> Tuple: for model_name in list(TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1] ): __UpperCamelCase = TFDistilBertModel.from_pretrained(lowercase ) self.assertIsNotNone(lowercase ) @require_tf class UpperCAmelCase__ ( unittest.TestCase): @slow def __lowerCamelCase ( self ) -> Optional[int]: __UpperCamelCase = TFDistilBertModel.from_pretrained("""distilbert-base-uncased""" ) __UpperCamelCase = tf.constant([[0, 1, 2, 3, 4, 5]] ) __UpperCamelCase = model(lowercase )[0] __UpperCamelCase = [1, 6, 7_6_8] self.assertEqual(output.shape , lowercase ) __UpperCamelCase = tf.constant( [ [ [0.19_261_885, -0.13_732_955, 0.4_119_799], [0.22_150_156, -0.07_422_661, 0.39_037_204], [0.22_756_018, -0.0_896_414, 0.3_701_467], ] ] ) tf.debugging.assert_near(output[:, :3, :3] , lowercase , atol=1E-4 )
349
0
"""simple docstring""" from __future__ import annotations def __lowerCamelCase ( __UpperCamelCase ) -> List[Any]: """simple docstring""" if not nums: return 0 lowerCAmelCase_ : Tuple = nums[0] lowerCAmelCase_ : Tuple = 0 for num in nums[1:]: lowerCAmelCase_ , lowerCAmelCase_ : int = ( max_excluding + num, max(__A , __A ), ) return max(__A , __A ) if __name__ == "__main__": import doctest doctest.testmod()
241
'''simple docstring''' from __future__ import annotations import math import numpy as np from numpy.linalg import norm def _lowercase ( __A ,__A ): '''simple docstring''' return math.sqrt(sum(pow(a - b ,2 ) for a, b in zip(__A ,__A ) ) ) def _lowercase ( __A ,__A ): '''simple docstring''' if dataset.ndim != value_array.ndim: __UpperCamelCase = ( """Wrong input data's dimensions... """ f"dataset : {dataset.ndim}, value_array : {value_array.ndim}" ) raise ValueError(__A ) try: if dataset.shape[1] != value_array.shape[1]: __UpperCamelCase = ( """Wrong input data's shape... """ f"dataset : {dataset.shape[1]}, value_array : {value_array.shape[1]}" ) raise ValueError(__A ) except IndexError: if dataset.ndim != value_array.ndim: raise TypeError("""Wrong shape""" ) if dataset.dtype != value_array.dtype: __UpperCamelCase = ( """Input data have different datatype... """ f"dataset : {dataset.dtype}, value_array : {value_array.dtype}" ) raise TypeError(__A ) __UpperCamelCase = [] for value in value_array: __UpperCamelCase = euclidean(__A ,dataset[0] ) __UpperCamelCase = dataset[0].tolist() for dataset_value in dataset[1:]: __UpperCamelCase = euclidean(__A ,__A ) if dist > temp_dist: __UpperCamelCase = temp_dist __UpperCamelCase = dataset_value.tolist() answer.append([vector, dist] ) return answer def _lowercase ( __A ,__A ): '''simple docstring''' return np.dot(__A ,__A ) / (norm(__A ) * norm(__A )) if __name__ == "__main__": import doctest doctest.testmod()
349
0
import baseaa import io import json import os from copy import deepcopy from ..optimizer import AcceleratedOptimizer from ..scheduler import AcceleratedScheduler class _lowerCamelCase : """simple docstring""" def __init__( self , UpperCAmelCase ) -> str: '''simple docstring''' if isinstance(UpperCAmelCase , UpperCAmelCase ): # Don't modify user's data should they want to reuse it (e.g. in tests), because once we # modified it, it will not be accepted here again, since `auto` values would have been overridden __snake_case : List[str] = deepcopy(UpperCAmelCase ) elif os.path.exists(UpperCAmelCase ): with io.open(UpperCAmelCase , "r" , encoding="utf-8" ) as f: __snake_case : Optional[int] = json.load(UpperCAmelCase ) else: try: __snake_case : Dict = baseaa.urlsafe_baadecode(UpperCAmelCase ).decode("utf-8" ) __snake_case : Optional[Any] = json.loads(UpperCAmelCase ) except (UnicodeDecodeError, AttributeError, ValueError): raise ValueError( F"""Expected a string path to an existing deepspeed config, or a dictionary, or a base64 encoded string. Received: {config_file_or_dict}""" ) __snake_case : Optional[Any] = config self.set_stage_and_offload() def UpperCAmelCase ( self ) -> Dict: '''simple docstring''' __snake_case : List[Any] = self.get_value("zero_optimization.stage" , -1 ) # offload __snake_case : List[str] = False if self.is_zeroa() or self.is_zeroa(): __snake_case : Any = set(["cpu", "nvme"] ) __snake_case : int = set( [ self.get_value("zero_optimization.offload_optimizer.device" ), self.get_value("zero_optimization.offload_param.device" ), ] ) if len(offload_devices & offload_devices_valid ) > 0: __snake_case : str = True def UpperCAmelCase ( self , UpperCAmelCase ) -> int: '''simple docstring''' __snake_case : Any = self.config # find the config node of interest if it exists __snake_case : Optional[Any] = ds_key_long.split("." ) __snake_case : str = nodes.pop() for node in nodes: __snake_case : Union[str, Any] = config.get(UpperCAmelCase ) if config is None: return None, ds_key return config, ds_key def UpperCAmelCase ( self , UpperCAmelCase , UpperCAmelCase=None ) -> int: '''simple docstring''' __snake_case , __snake_case : Dict = self.find_config_node(UpperCAmelCase ) if config is None: return default return config.get(UpperCAmelCase , UpperCAmelCase ) def UpperCAmelCase ( self , UpperCAmelCase , UpperCAmelCase=False ) -> Dict: '''simple docstring''' __snake_case : Optional[Any] = self.config # find the config node of interest if it exists __snake_case : Dict = ds_key_long.split("." ) for node in nodes: __snake_case : Union[str, Any] = config __snake_case : Dict = config.get(UpperCAmelCase ) if config is None: if must_exist: raise ValueError(F"""Can't find {ds_key_long} entry in the config: {self.config}""" ) else: return # if found remove it if parent_config is not None: parent_config.pop(UpperCAmelCase ) def UpperCAmelCase ( self , UpperCAmelCase ) -> str: '''simple docstring''' __snake_case : List[Any] = self.get_value(UpperCAmelCase ) return False if value is None else bool(UpperCAmelCase ) def UpperCAmelCase ( self , UpperCAmelCase ) -> Any: '''simple docstring''' __snake_case : Optional[Any] = self.get_value(UpperCAmelCase ) return False if value is None else not bool(UpperCAmelCase ) def UpperCAmelCase ( self ) -> List[Any]: '''simple docstring''' return self._stage == 2 def UpperCAmelCase ( self ) -> List[Any]: '''simple docstring''' return self._stage == 3 def UpperCAmelCase ( self ) -> List[str]: '''simple docstring''' return self._offload class _lowerCamelCase : """simple docstring""" def __init__( self , UpperCAmelCase ) -> Tuple: '''simple docstring''' __snake_case : List[str] = engine def UpperCAmelCase ( self , UpperCAmelCase , **UpperCAmelCase ) -> Union[str, Any]: '''simple docstring''' self.engine.backward(UpperCAmelCase , **UpperCAmelCase ) # Deepspeed's `engine.step` performs the following operations: # - gradient accumulation check # - gradient clipping # - optimizer step # - zero grad # - checking overflow # - lr_scheduler step (only if engine.lr_scheduler is not None) self.engine.step() # and this plugin overrides the above calls with no-ops when Accelerate runs under # Deepspeed, but allows normal functionality for non-Deepspeed cases thus enabling a simple # training loop that works transparently under many training regimes. class _lowerCamelCase ( UpperCAmelCase_ ): """simple docstring""" def __init__( self , UpperCAmelCase ) -> Union[str, Any]: '''simple docstring''' super().__init__(UpperCAmelCase , device_placement=UpperCAmelCase , scaler=UpperCAmelCase ) __snake_case : Optional[Any] = hasattr(self.optimizer , "overflow" ) def UpperCAmelCase ( self , UpperCAmelCase=None ) -> str: '''simple docstring''' pass # `accelerator.backward(loss)` is doing that automatically. Therefore, its implementation is not needed def UpperCAmelCase ( self ) -> Tuple: '''simple docstring''' pass # `accelerator.backward(loss)` is doing that automatically. Therefore, its implementation is not needed @property def UpperCAmelCase ( self ) -> List[Any]: '''simple docstring''' if self.__has_overflow__: return self.optimizer.overflow return False class _lowerCamelCase ( UpperCAmelCase_ ): """simple docstring""" def __init__( self , UpperCAmelCase , UpperCAmelCase ) -> Optional[Any]: '''simple docstring''' super().__init__(UpperCAmelCase , UpperCAmelCase ) def UpperCAmelCase ( self ) -> int: '''simple docstring''' pass # `accelerator.backward(loss)` is doing that automatically. Therefore, its implementation is not needed class _lowerCamelCase : """simple docstring""" def __init__( self , UpperCAmelCase , UpperCAmelCase=0.001 , UpperCAmelCase=0 , **UpperCAmelCase ) -> List[Any]: '''simple docstring''' __snake_case : List[Any] = params __snake_case : Any = lr __snake_case : List[str] = weight_decay __snake_case : Dict = kwargs class _lowerCamelCase : """simple docstring""" def __init__( self , UpperCAmelCase , UpperCAmelCase=None , UpperCAmelCase=0 , **UpperCAmelCase ) -> Optional[int]: '''simple docstring''' __snake_case : List[str] = optimizer __snake_case : Optional[Any] = total_num_steps __snake_case : Optional[int] = warmup_num_steps __snake_case : Any = kwargs
326
'''simple docstring''' from datetime import datetime import requests def _lowercase ( __A ): '''simple docstring''' __UpperCamelCase = """https://downloadgram.net/wp-json/wppress/video-downloader/video?url=""" __UpperCamelCase = requests.get(base_url + url ).json()[0]["""urls"""][0]["""src"""] return requests.get(__A ).content if __name__ == "__main__": a__ : int = input('Enter Video/IGTV url: ').strip() a__ : int = f'''{datetime.now():%Y-%m-%d_%H:%M:%S}.mp4''' with open(file_name, 'wb') as fp: fp.write(download_video(url)) print(f'''Done. Video saved to disk as {file_name}.''')
349
0
"""simple docstring""" import baseaa def lowercase (snake_case__ : List[str] ) -> Union[str, Any]: '''simple docstring''' return baseaa.baaencode(string.encode("""utf-8""" ) ) def lowercase (snake_case__ : List[str] ) -> Optional[Any]: '''simple docstring''' return baseaa.baadecode(__A ).decode("""utf-8""" ) if __name__ == "__main__": a = 'Hello World!' a = baseaa_encode(test) print(encoded) a = baseaa_decode(encoded) print(decoded)
155
'''simple docstring''' import asyncio import os import re import sys import tempfile import unittest from contextlib import contextmanager from copy import deepcopy from distutils.util import strtobool from enum import Enum from importlib.util import find_spec from pathlib import Path from unittest.mock import patch import pyarrow as pa import pytest import requests from packaging import version from datasets import config if config.PY_VERSION < version.parse('3.8'): import importlib_metadata else: import importlib.metadata as importlib_metadata def _lowercase ( __A ,__A=False ): '''simple docstring''' try: __UpperCamelCase = os.environ[key] except KeyError: # KEY isn't set, default to `default`. __UpperCamelCase = default else: # KEY is set, convert it to True or False. try: __UpperCamelCase = strtobool(__A ) except ValueError: # More values are supported, but let's keep the message simple. raise ValueError(f"If set, {key} must be yes or no." ) return _value a__ : Optional[Any] = parse_flag_from_env('RUN_SLOW', default=False) a__ : Union[str, Any] = parse_flag_from_env('RUN_REMOTE', default=False) a__ : Any = parse_flag_from_env('RUN_LOCAL', default=True) a__ : List[Any] = parse_flag_from_env('RUN_PACKAGED', default=True) # Compression a__ : Optional[int] = pytest.mark.skipif(not config.LZ4_AVAILABLE, reason='test requires lz4') a__ : Optional[int] = pytest.mark.skipif(not config.PY7ZR_AVAILABLE, reason='test requires py7zr') a__ : Optional[Any] = pytest.mark.skipif(not config.ZSTANDARD_AVAILABLE, reason='test requires zstandard') # Audio a__ : List[Any] = pytest.mark.skipif( # On Windows and OS X, soundfile installs sndfile find_spec('soundfile') is None or version.parse(importlib_metadata.version('soundfile')) < version.parse('0.12.0'), reason='test requires sndfile>=0.12.1: \'pip install \"soundfile>=0.12.1\"\'; ', ) # Beam a__ : str = pytest.mark.skipif( not config.BEAM_AVAILABLE or config.DILL_VERSION >= version.parse('0.3.2'), reason='test requires apache-beam and a compatible dill version', ) # Dill-cloudpickle compatibility a__ : str = pytest.mark.skipif( config.DILL_VERSION <= version.parse('0.3.2'), reason='test requires dill>0.3.2 for cloudpickle compatibility', ) # Windows a__ : Tuple = pytest.mark.skipif( sys.platform == 'win32', reason='test should not be run on Windows', ) def _lowercase ( __A ): '''simple docstring''' try: import faiss # noqa except ImportError: __UpperCamelCase = unittest.skip("""test requires faiss""" )(__A ) return test_case def _lowercase ( __A ): '''simple docstring''' try: import regex # noqa except ImportError: __UpperCamelCase = unittest.skip("""test requires regex""" )(__A ) return test_case def _lowercase ( __A ): '''simple docstring''' try: import elasticsearch # noqa except ImportError: __UpperCamelCase = unittest.skip("""test requires elasticsearch""" )(__A ) return test_case def _lowercase ( __A ): '''simple docstring''' try: import sqlalchemy # noqa except ImportError: __UpperCamelCase = unittest.skip("""test requires sqlalchemy""" )(__A ) return test_case def _lowercase ( __A ): '''simple docstring''' if not config.TORCH_AVAILABLE: __UpperCamelCase = unittest.skip("""test requires PyTorch""" )(__A ) return test_case def _lowercase ( __A ): '''simple docstring''' if not config.TF_AVAILABLE: __UpperCamelCase = unittest.skip("""test requires TensorFlow""" )(__A ) return test_case def _lowercase ( __A ): '''simple docstring''' if not config.JAX_AVAILABLE: __UpperCamelCase = unittest.skip("""test requires JAX""" )(__A ) return test_case def _lowercase ( __A ): '''simple docstring''' if not config.PIL_AVAILABLE: __UpperCamelCase = unittest.skip("""test requires Pillow""" )(__A ) return test_case def _lowercase ( __A ): '''simple docstring''' try: import transformers # noqa F401 except ImportError: return unittest.skip("""test requires transformers""" )(__A ) else: return test_case def _lowercase ( __A ): '''simple docstring''' try: import tiktoken # noqa F401 except ImportError: return unittest.skip("""test requires tiktoken""" )(__A ) else: return test_case def _lowercase ( __A ): '''simple docstring''' try: import spacy # noqa F401 except ImportError: return unittest.skip("""test requires spacy""" )(__A ) else: return test_case def _lowercase ( __A ): '''simple docstring''' def _require_spacy_model(__A ): try: import spacy # noqa F401 spacy.load(__A ) except ImportError: return unittest.skip("""test requires spacy""" )(__A ) except OSError: return unittest.skip("""test requires spacy model '{}'""".format(__A ) )(__A ) else: return test_case return _require_spacy_model def _lowercase ( __A ): '''simple docstring''' try: import pyspark # noqa F401 except ImportError: return unittest.skip("""test requires pyspark""" )(__A ) else: return test_case def _lowercase ( __A ): '''simple docstring''' try: import joblibspark # noqa F401 except ImportError: return unittest.skip("""test requires joblibspark""" )(__A ) else: return test_case def _lowercase ( __A ): '''simple docstring''' if not _run_slow_tests or _run_slow_tests == 0: __UpperCamelCase = unittest.skip("""test is slow""" )(__A ) return test_case def _lowercase ( __A ): '''simple docstring''' if not _run_local_tests or _run_local_tests == 0: __UpperCamelCase = unittest.skip("""test is local""" )(__A ) return test_case def _lowercase ( __A ): '''simple docstring''' if not _run_packaged_tests or _run_packaged_tests == 0: __UpperCamelCase = unittest.skip("""test is packaged""" )(__A ) return test_case def _lowercase ( __A ): '''simple docstring''' if not _run_remote_tests or _run_remote_tests == 0: __UpperCamelCase = unittest.skip("""test requires remote""" )(__A ) return test_case def _lowercase ( *__A ): '''simple docstring''' def decorate(cls ): for name, fn in cls.__dict__.items(): if callable(__A ) and name.startswith("""test""" ): for decorator in decorators: __UpperCamelCase = decorator(__A ) setattr(cls ,__A ,__A ) return cls return decorate class UpperCAmelCase__ ( UpperCAmelCase_): pass class UpperCAmelCase__ ( UpperCAmelCase_): __SCREAMING_SNAKE_CASE = 0 __SCREAMING_SNAKE_CASE = 1 __SCREAMING_SNAKE_CASE = 2 @contextmanager def _lowercase ( __A=OfflineSimulationMode.CONNECTION_FAILS ,__A=1E-16 ): '''simple docstring''' __UpperCamelCase = requests.Session().request def timeout_request(__A ,__A ,__A ,**__A ): # Change the url to an invalid url so that the connection hangs __UpperCamelCase = """https://10.255.255.1""" if kwargs.get("""timeout""" ) is None: raise RequestWouldHangIndefinitelyError( f"Tried a call to {url} in offline mode with no timeout set. Please set a timeout." ) __UpperCamelCase = timeout try: return online_request(__A ,__A ,**__A ) except Exception as e: # The following changes in the error are just here to make the offline timeout error prettier __UpperCamelCase = url __UpperCamelCase = e.args[0] __UpperCamelCase = (max_retry_error.args[0].replace("""10.255.255.1""" ,f"OfflineMock[{url}]" ),) __UpperCamelCase = (max_retry_error,) raise def raise_connection_error(__A ,__A ,**__A ): raise requests.ConnectionError("""Offline mode is enabled.""" ,request=__A ) if mode is OfflineSimulationMode.CONNECTION_FAILS: with patch("""requests.Session.send""" ,__A ): yield elif mode is OfflineSimulationMode.CONNECTION_TIMES_OUT: # inspired from https://stackoverflow.com/a/904609 with patch("""requests.Session.request""" ,__A ): yield elif mode is OfflineSimulationMode.HF_DATASETS_OFFLINE_SET_TO_1: with patch("""datasets.config.HF_DATASETS_OFFLINE""" ,__A ): yield else: raise ValueError("""Please use a value from the OfflineSimulationMode enum.""" ) @contextmanager def _lowercase ( *__A ,**__A ): '''simple docstring''' __UpperCamelCase = str(Path().resolve() ) with tempfile.TemporaryDirectory(*__A ,**__A ) as tmp_dir: try: os.chdir(__A ) yield finally: os.chdir(__A ) @contextmanager def _lowercase ( ): '''simple docstring''' import gc gc.collect() __UpperCamelCase = pa.total_allocated_bytes() yield assert pa.total_allocated_bytes() - previous_allocated_memory > 0, "Arrow memory didn't increase." @contextmanager def _lowercase ( ): '''simple docstring''' import gc gc.collect() __UpperCamelCase = pa.total_allocated_bytes() yield assert pa.total_allocated_bytes() - previous_allocated_memory <= 0, "Arrow memory wasn't expected to increase." def _lowercase ( __A ,__A ): '''simple docstring''' return deepcopy(__A ).integers(0 ,100 ,10 ).tolist() == deepcopy(__A ).integers(0 ,100 ,10 ).tolist() def _lowercase ( __A ): '''simple docstring''' import decorator from requests.exceptions import HTTPError def _wrapper(__A ,*__A ,**__A ): try: return func(*__A ,**__A ) except HTTPError as err: if str(__A ).startswith("""500""" ) or str(__A ).startswith("""502""" ): pytest.xfail(str(__A ) ) raise err return decorator.decorator(_wrapper ,__A ) class UpperCAmelCase__ : def __init__( self , lowercase , lowercase , lowercase ) -> str: __UpperCamelCase = returncode __UpperCamelCase = stdout __UpperCamelCase = stderr async def _lowercase ( __A ,__A ): '''simple docstring''' while True: __UpperCamelCase = await stream.readline() if line: callback(__A ) else: break async def _lowercase ( __A ,__A=None ,__A=None ,__A=None ,__A=False ,__A=False ): '''simple docstring''' if echo: print("""\nRunning: """ ,""" """.join(__A ) ) __UpperCamelCase = await asyncio.create_subprocess_exec( cmd[0] ,*cmd[1:] ,stdin=__A ,stdout=asyncio.subprocess.PIPE ,stderr=asyncio.subprocess.PIPE ,env=__A ,) # note: there is a warning for a possible deadlock when using `wait` with huge amounts of data in the pipe # https://docs.python.org/3/library/asyncio-subprocess.html#asyncio.asyncio.subprocess.Process.wait # # If it starts hanging, will need to switch to the following code. The problem is that no data # will be seen until it's done and if it hangs for example there will be no debug info. # out, err = await p.communicate() # return _RunOutput(p.returncode, out, err) __UpperCamelCase = [] __UpperCamelCase = [] def tee(__A ,__A ,__A ,__A="" ): __UpperCamelCase = line.decode("""utf-8""" ).rstrip() sink.append(__A ) if not quiet: print(__A ,__A ,file=__A ) # XXX: the timeout doesn't seem to make any difference here await asyncio.wait( [ _read_stream(p.stdout ,lambda __A : tee(__A ,__A ,sys.stdout ,label="""stdout:""" ) ), _read_stream(p.stderr ,lambda __A : tee(__A ,__A ,sys.stderr ,label="""stderr:""" ) ), ] ,timeout=__A ,) return _RunOutput(await p.wait() ,__A ,__A ) def _lowercase ( __A ,__A=None ,__A=None ,__A=180 ,__A=False ,__A=True ): '''simple docstring''' __UpperCamelCase = asyncio.get_event_loop() __UpperCamelCase = loop.run_until_complete( _stream_subprocess(__A ,env=__A ,stdin=__A ,timeout=__A ,quiet=__A ,echo=__A ) ) __UpperCamelCase = """ """.join(__A ) if result.returncode > 0: __UpperCamelCase = """\n""".join(result.stderr ) raise RuntimeError( f"'{cmd_str}' failed with returncode {result.returncode}\n\n" f"The combined stderr from workers follows:\n{stderr}" ) # check that the subprocess actually did run and produced some output, should the test rely on # the remote side to do the testing if not result.stdout and not result.stderr: raise RuntimeError(f"'{cmd_str}' produced no output." ) return result def _lowercase ( ): '''simple docstring''' __UpperCamelCase = os.environ.get("""PYTEST_XDIST_WORKER""" ,"""gw0""" ) __UpperCamelCase = re.sub(R"""^gw""" ,"""""" ,__A ,0 ,re.M ) return int(__A ) def _lowercase ( ): '''simple docstring''' __UpperCamelCase = 29_500 __UpperCamelCase = pytest_xdist_worker_id() return port + uniq_delta
349
0
"""simple docstring""" from math import factorial def snake_case ( A__ = 20 ): UpperCAmelCase_ : List[Any] = 2 * n # middle entry of odd rows starting at row 3 is the solution for n = 1, # 2, 3,... UpperCAmelCase_ : Any = n // 2 return int(factorial(__A ) / (factorial(__A ) * factorial(n - k )) ) if __name__ == "__main__": import sys if len(sys.argv) == 1: print(solution(20)) else: try: lowerCamelCase_ = int(sys.argv[1]) print(solution(n)) except ValueError: print('''Invalid entry - please enter a number.''')
268
'''simple docstring''' import re def _lowercase ( __A ): '''simple docstring''' return [char.split() for char in re.split(R"""[^ a-z A-Z 0-9 \s]""" ,str_ )] def _lowercase ( __A ): '''simple docstring''' __UpperCamelCase = split_input(str_ ) return "".join( ["""""".join([char.capitalize() for char in sub_str] ) for sub_str in string_split] ) def _lowercase ( __A ,__A ,__A ): '''simple docstring''' try: __UpperCamelCase = split_input(__A ) if upper: __UpperCamelCase = """""".join( [ separator.join([char.upper() for char in sub_str] ) for sub_str in string_split ] ) else: __UpperCamelCase = """""".join( [ separator.join([char.lower() for char in sub_str] ) for sub_str in string_split ] ) return res_str except IndexError: return "not valid string" def _lowercase ( __A ): '''simple docstring''' return to_simple_case(__A ) def _lowercase ( __A ): '''simple docstring''' try: __UpperCamelCase = to_simple_case(__A ) return res_str[0].lower() + res_str[1:] except IndexError: return "not valid string" def _lowercase ( __A ,__A ): '''simple docstring''' return to_complex_case(__A ,__A ,"""_""" ) def _lowercase ( __A ,__A ): '''simple docstring''' return to_complex_case(__A ,__A ,"""-""" ) if __name__ == "__main__": __import__('doctest').testmod()
349
0
SCREAMING_SNAKE_CASE_:List[str] = tuple[float, float, float] SCREAMING_SNAKE_CASE_:Any = tuple[float, float, float] def __UpperCamelCase ( _lowerCAmelCase , _lowerCAmelCase ) -> int: """simple docstring""" A : Optional[int] = end_pointa[0] - end_pointa[0] A : List[str] = end_pointa[1] - end_pointa[1] A : Optional[int] = end_pointa[2] - end_pointa[2] return (x, y, z) def __UpperCamelCase ( _lowerCAmelCase , _lowerCAmelCase ) -> str: """simple docstring""" A : Optional[Any] = ab[1] * ac[2] - ab[2] * ac[1] # *i A : Any = (ab[0] * ac[2] - ab[2] * ac[0]) * -1 # *j A : List[Any] = ab[0] * ac[1] - ab[1] * ac[0] # *k return (x, y, z) def __UpperCamelCase ( _lowerCAmelCase , _lowerCAmelCase ) -> Optional[int]: """simple docstring""" return tuple(round(__A , __A ) for x in vector ) == (0, 0, 0) def __UpperCamelCase ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase = 10 ) -> int: """simple docstring""" A : Any = create_vector(__A , __A ) A : List[str] = create_vector(__A , __A ) return is_zero_vector(get_ad_vectors_cross(__A , __A ) , __A )
116
'''simple docstring''' import gc import random import tempfile import unittest import numpy as np import torch from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer from diffusers import AutoencoderKL, DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler, UNetaDConditionModel from diffusers.pipelines.stable_diffusion_safe import StableDiffusionPipelineSafe as StableDiffusionPipeline from diffusers.utils import floats_tensor, nightly, torch_device from diffusers.utils.testing_utils import require_torch_gpu class UpperCAmelCase__ ( unittest.TestCase): def __lowerCamelCase ( self ) -> List[str]: # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() @property def __lowerCamelCase ( self ) -> int: __UpperCamelCase = 1 __UpperCamelCase = 3 __UpperCamelCase = (3_2, 3_2) __UpperCamelCase = floats_tensor((batch_size, num_channels) + sizes , rng=random.Random(0 ) ).to(lowercase ) return image @property def __lowerCamelCase ( self ) -> Dict: torch.manual_seed(0 ) __UpperCamelCase = UNetaDConditionModel( block_out_channels=(3_2, 6_4) , layers_per_block=2 , sample_size=3_2 , in_channels=4 , out_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D""") , cross_attention_dim=3_2 , ) return model @property def __lowerCamelCase ( self ) -> List[str]: torch.manual_seed(0 ) __UpperCamelCase = AutoencoderKL( block_out_channels=[3_2, 6_4] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , ) return model @property def __lowerCamelCase ( self ) -> Optional[int]: torch.manual_seed(0 ) __UpperCamelCase = CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=3_2 , intermediate_size=3_7 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_0_0_0 , ) return CLIPTextModel(lowercase ) @property def __lowerCamelCase ( self ) -> Tuple: def extract(*lowercase , **lowercase ): class UpperCAmelCase__ : def __init__( self ) -> Tuple: __UpperCamelCase = torch.ones([0] ) def __lowerCamelCase ( self , lowercase ) -> List[str]: self.pixel_values.to(lowercase ) return self return Out() return extract def __lowerCamelCase ( self ) -> Any: __UpperCamelCase = """cpu""" # ensure determinism for the device-dependent torch.Generator __UpperCamelCase = self.dummy_cond_unet __UpperCamelCase = DDIMScheduler( beta_start=0.00_085 , beta_end=0.012 , beta_schedule="""scaled_linear""" , clip_sample=lowercase , set_alpha_to_one=lowercase , ) __UpperCamelCase = self.dummy_vae __UpperCamelCase = self.dummy_text_encoder __UpperCamelCase = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" ) # make sure here that pndm scheduler skips prk __UpperCamelCase = StableDiffusionPipeline( unet=lowercase , scheduler=lowercase , vae=lowercase , text_encoder=lowercase , tokenizer=lowercase , safety_checker=lowercase , feature_extractor=self.dummy_extractor , ) __UpperCamelCase = sd_pipe.to(lowercase ) sd_pipe.set_progress_bar_config(disable=lowercase ) __UpperCamelCase = """A painting of a squirrel eating a burger""" __UpperCamelCase = torch.Generator(device=lowercase ).manual_seed(0 ) __UpperCamelCase = sd_pipe([prompt] , generator=lowercase , guidance_scale=6.0 , num_inference_steps=2 , output_type="""np""" ) __UpperCamelCase = output.images __UpperCamelCase = torch.Generator(device=lowercase ).manual_seed(0 ) __UpperCamelCase = sd_pipe( [prompt] , generator=lowercase , guidance_scale=6.0 , num_inference_steps=2 , output_type="""np""" , return_dict=lowercase , )[0] __UpperCamelCase = image[0, -3:, -3:, -1] __UpperCamelCase = image_from_tuple[0, -3:, -3:, -1] assert image.shape == (1, 6_4, 6_4, 3) __UpperCamelCase = np.array([0.5_756, 0.6_118, 0.5_005, 0.5_041, 0.5_471, 0.4_726, 0.4_976, 0.4_865, 0.4_864] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2 def __lowerCamelCase ( self ) -> Tuple: __UpperCamelCase = """cpu""" # ensure determinism for the device-dependent torch.Generator __UpperCamelCase = self.dummy_cond_unet __UpperCamelCase = PNDMScheduler(skip_prk_steps=lowercase ) __UpperCamelCase = self.dummy_vae __UpperCamelCase = self.dummy_text_encoder __UpperCamelCase = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" ) # make sure here that pndm scheduler skips prk __UpperCamelCase = StableDiffusionPipeline( unet=lowercase , scheduler=lowercase , vae=lowercase , text_encoder=lowercase , tokenizer=lowercase , safety_checker=lowercase , feature_extractor=self.dummy_extractor , ) __UpperCamelCase = sd_pipe.to(lowercase ) sd_pipe.set_progress_bar_config(disable=lowercase ) __UpperCamelCase = """A painting of a squirrel eating a burger""" __UpperCamelCase = torch.Generator(device=lowercase ).manual_seed(0 ) __UpperCamelCase = sd_pipe([prompt] , generator=lowercase , guidance_scale=6.0 , num_inference_steps=2 , output_type="""np""" ) __UpperCamelCase = output.images __UpperCamelCase = torch.Generator(device=lowercase ).manual_seed(0 ) __UpperCamelCase = sd_pipe( [prompt] , generator=lowercase , guidance_scale=6.0 , num_inference_steps=2 , output_type="""np""" , return_dict=lowercase , )[0] __UpperCamelCase = image[0, -3:, -3:, -1] __UpperCamelCase = image_from_tuple[0, -3:, -3:, -1] assert image.shape == (1, 6_4, 6_4, 3) __UpperCamelCase = np.array([0.5_125, 0.5_716, 0.4_828, 0.5_060, 0.5_650, 0.4_768, 0.5_185, 0.4_895, 0.4_993] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2 def __lowerCamelCase ( self ) -> Union[str, Any]: __UpperCamelCase = StableDiffusionPipeline.from_pretrained( """hf-internal-testing/tiny-stable-diffusion-lms-pipe""" , safety_checker=lowercase ) assert isinstance(lowercase , lowercase ) assert isinstance(pipe.scheduler , lowercase ) assert pipe.safety_checker is None __UpperCamelCase = pipe("""example prompt""" , num_inference_steps=2 ).images[0] assert image is not None # check that there's no error when saving a pipeline with one of the models being None with tempfile.TemporaryDirectory() as tmpdirname: pipe.save_pretrained(lowercase ) __UpperCamelCase = StableDiffusionPipeline.from_pretrained(lowercase ) # sanity check that the pipeline still works assert pipe.safety_checker is None __UpperCamelCase = pipe("""example prompt""" , num_inference_steps=2 ).images[0] assert image is not None @unittest.skipIf(torch_device != """cuda""" , """This test requires a GPU""" ) def __lowerCamelCase ( self ) -> Optional[int]: __UpperCamelCase = self.dummy_cond_unet __UpperCamelCase = PNDMScheduler(skip_prk_steps=lowercase ) __UpperCamelCase = self.dummy_vae __UpperCamelCase = self.dummy_text_encoder __UpperCamelCase = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" ) # put models in fp16 __UpperCamelCase = unet.half() __UpperCamelCase = vae.half() __UpperCamelCase = bert.half() # make sure here that pndm scheduler skips prk __UpperCamelCase = StableDiffusionPipeline( unet=lowercase , scheduler=lowercase , vae=lowercase , text_encoder=lowercase , tokenizer=lowercase , safety_checker=lowercase , feature_extractor=self.dummy_extractor , ) __UpperCamelCase = sd_pipe.to(lowercase ) sd_pipe.set_progress_bar_config(disable=lowercase ) __UpperCamelCase = """A painting of a squirrel eating a burger""" __UpperCamelCase = sd_pipe([prompt] , num_inference_steps=2 , output_type="""np""" ).images assert image.shape == (1, 6_4, 6_4, 3) @nightly @require_torch_gpu class UpperCAmelCase__ ( unittest.TestCase): def __lowerCamelCase ( self ) -> Tuple: # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() def __lowerCamelCase ( self ) -> Dict: __UpperCamelCase = StableDiffusionPipeline.from_pretrained("""runwayml/stable-diffusion-v1-5""" , safety_checker=lowercase ) __UpperCamelCase = LMSDiscreteScheduler.from_config(sd_pipe.scheduler.config ) __UpperCamelCase = sd_pipe.to(lowercase ) sd_pipe.set_progress_bar_config(disable=lowercase ) __UpperCamelCase = ( """portrait of girl with smokey eyes makeup in abandoned hotel, grange clothes, redshift, wide high angle""" """ coloured polaroid photograph with flash, kodak film, hyper real, stunning moody cinematography, with""" """ anamorphic lenses, by maripol, fallen angels by wong kar - wai, style of suspiria and neon demon and""" """ children from bahnhof zoo, detailed """ ) __UpperCamelCase = 4_0_0_3_6_6_0_3_4_6 __UpperCamelCase = 7 # without safety guidance (sld_guidance_scale = 0) __UpperCamelCase = torch.manual_seed(lowercase ) __UpperCamelCase = sd_pipe( [prompt] , generator=lowercase , guidance_scale=lowercase , num_inference_steps=5_0 , output_type="""np""" , width=5_1_2 , height=5_1_2 , sld_guidance_scale=0 , ) __UpperCamelCase = output.images __UpperCamelCase = image[0, -3:, -3:, -1] __UpperCamelCase = [0.2_278, 0.2_231, 0.2_249, 0.2_333, 0.2_303, 0.1_885, 0.2_273, 0.2_144, 0.2_176] assert image.shape == (1, 5_1_2, 5_1_2, 3) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 # without safety guidance (strong configuration) __UpperCamelCase = torch.manual_seed(lowercase ) __UpperCamelCase = sd_pipe( [prompt] , generator=lowercase , guidance_scale=lowercase , num_inference_steps=5_0 , output_type="""np""" , width=5_1_2 , height=5_1_2 , sld_guidance_scale=2_0_0_0 , sld_warmup_steps=7 , sld_threshold=0.025 , sld_momentum_scale=0.5 , sld_mom_beta=0.7 , ) __UpperCamelCase = output.images __UpperCamelCase = image[0, -3:, -3:, -1] __UpperCamelCase = [0.2_383, 0.2_276, 0.236, 0.2_192, 0.2_186, 0.2_053, 0.1_971, 0.1_901, 0.1_719] assert image.shape == (1, 5_1_2, 5_1_2, 3) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 def __lowerCamelCase ( self ) -> Optional[Any]: __UpperCamelCase = StableDiffusionPipeline.from_pretrained("""runwayml/stable-diffusion-v1-5""" , safety_checker=lowercase ) __UpperCamelCase = LMSDiscreteScheduler.from_config(sd_pipe.scheduler.config ) __UpperCamelCase = sd_pipe.to(lowercase ) sd_pipe.set_progress_bar_config(disable=lowercase ) __UpperCamelCase = """padme amidala taking a bath artwork, safe for work, no nudity""" __UpperCamelCase = 2_7_3_4_9_7_1_7_5_5 __UpperCamelCase = 7 __UpperCamelCase = torch.manual_seed(lowercase ) __UpperCamelCase = sd_pipe( [prompt] , generator=lowercase , guidance_scale=lowercase , num_inference_steps=5_0 , output_type="""np""" , width=5_1_2 , height=5_1_2 , sld_guidance_scale=0 , ) __UpperCamelCase = output.images __UpperCamelCase = image[0, -3:, -3:, -1] __UpperCamelCase = [0.3_502, 0.3_622, 0.3_396, 0.3_642, 0.3_478, 0.3_318, 0.35, 0.3_348, 0.3_297] assert image.shape == (1, 5_1_2, 5_1_2, 3) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 __UpperCamelCase = torch.manual_seed(lowercase ) __UpperCamelCase = sd_pipe( [prompt] , generator=lowercase , guidance_scale=lowercase , num_inference_steps=5_0 , output_type="""np""" , width=5_1_2 , height=5_1_2 , sld_guidance_scale=2_0_0_0 , sld_warmup_steps=7 , sld_threshold=0.025 , sld_momentum_scale=0.5 , sld_mom_beta=0.7 , ) __UpperCamelCase = output.images __UpperCamelCase = image[0, -3:, -3:, -1] __UpperCamelCase = [0.5_531, 0.5_206, 0.4_895, 0.5_156, 0.5_182, 0.4_751, 0.4_802, 0.4_803, 0.4_443] assert image.shape == (1, 5_1_2, 5_1_2, 3) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 def __lowerCamelCase ( self ) -> Optional[Any]: __UpperCamelCase = StableDiffusionPipeline.from_pretrained("""runwayml/stable-diffusion-v1-5""" ) __UpperCamelCase = sd_pipe.to(lowercase ) sd_pipe.set_progress_bar_config(disable=lowercase ) __UpperCamelCase = ( """the four horsewomen of the apocalypse, painting by tom of finland, gaston bussiere, craig mullins, j. c.""" """ leyendecker""" ) __UpperCamelCase = 1_0_4_4_3_5_5_2_3_4 __UpperCamelCase = 1_2 __UpperCamelCase = torch.manual_seed(lowercase ) __UpperCamelCase = sd_pipe( [prompt] , generator=lowercase , guidance_scale=lowercase , num_inference_steps=5_0 , output_type="""np""" , width=5_1_2 , height=5_1_2 , sld_guidance_scale=0 , ) __UpperCamelCase = output.images __UpperCamelCase = image[0, -3:, -3:, -1] __UpperCamelCase = np.array([0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0] ) assert image.shape == (1, 5_1_2, 5_1_2, 3) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-7 __UpperCamelCase = torch.manual_seed(lowercase ) __UpperCamelCase = sd_pipe( [prompt] , generator=lowercase , guidance_scale=lowercase , num_inference_steps=5_0 , output_type="""np""" , width=5_1_2 , height=5_1_2 , sld_guidance_scale=2_0_0_0 , sld_warmup_steps=7 , sld_threshold=0.025 , sld_momentum_scale=0.5 , sld_mom_beta=0.7 , ) __UpperCamelCase = output.images __UpperCamelCase = image[0, -3:, -3:, -1] __UpperCamelCase = np.array([0.5_818, 0.6_285, 0.6_835, 0.6_019, 0.625, 0.6_754, 0.6_096, 0.6_334, 0.6_561] ) assert image.shape == (1, 5_1_2, 5_1_2, 3) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
349
0
import logging import os from .state import PartialState class __snake_case ( logging.LoggerAdapter ): @staticmethod def __a ( __UpperCamelCase ) -> Dict: '''simple docstring''' snake_case__ : List[str] = PartialState() return not main_process_only or (main_process_only and state.is_main_process) def __a ( self , __UpperCamelCase , __UpperCamelCase , *__UpperCamelCase , **__UpperCamelCase ) -> List[str]: '''simple docstring''' if PartialState._shared_state == {}: raise RuntimeError( 'You must initialize the accelerate state by calling either `PartialState()` or `Accelerator()` before using the logging utility.' ) snake_case__ : Dict = kwargs.pop('main_process_only' , __UpperCamelCase ) snake_case__ : str = kwargs.pop('in_order' , __UpperCamelCase ) if self.isEnabledFor(__UpperCamelCase ): if self._should_log(__UpperCamelCase ): snake_case__ , snake_case__ : List[Any] = self.process(__UpperCamelCase , __UpperCamelCase ) self.logger.log(__UpperCamelCase , __UpperCamelCase , *__UpperCamelCase , **__UpperCamelCase ) elif in_order: snake_case__ : int = PartialState() for i in range(state.num_processes ): if i == state.process_index: snake_case__ , snake_case__ : Dict = self.process(__UpperCamelCase , __UpperCamelCase ) self.logger.log(__UpperCamelCase , __UpperCamelCase , *__UpperCamelCase , **__UpperCamelCase ) state.wait_for_everyone() def UpperCamelCase__ ( A__ , A__ = None ) -> Optional[int]: if log_level is None: snake_case__ : Optional[int] = os.environ.get('ACCELERATE_LOG_LEVEL' , __A ) snake_case__ : int = logging.getLogger(__A ) if log_level is not None: logger.setLevel(log_level.upper() ) logger.root.setLevel(log_level.upper() ) return MultiProcessAdapter(__A , {} )
143
'''simple docstring''' from ..utils import DummyObject, requires_backends class UpperCAmelCase__ ( metaclass=UpperCAmelCase_): __SCREAMING_SNAKE_CASE = ['''flax'''] def __init__( self , *lowercase , **lowercase ) -> List[Any]: requires_backends(self , ["""flax"""] ) @classmethod def __lowerCamelCase ( cls , *lowercase , **lowercase ) -> Union[str, Any]: requires_backends(cls , ["""flax"""] ) @classmethod def __lowerCamelCase ( cls , *lowercase , **lowercase ) -> Tuple: requires_backends(cls , ["""flax"""] ) class UpperCAmelCase__ ( metaclass=UpperCAmelCase_): __SCREAMING_SNAKE_CASE = ['''flax'''] def __init__( self , *lowercase , **lowercase ) -> str: requires_backends(self , ["""flax"""] ) @classmethod def __lowerCamelCase ( cls , *lowercase , **lowercase ) -> Any: requires_backends(cls , ["""flax"""] ) @classmethod def __lowerCamelCase ( cls , *lowercase , **lowercase ) -> int: requires_backends(cls , ["""flax"""] ) class UpperCAmelCase__ ( metaclass=UpperCAmelCase_): __SCREAMING_SNAKE_CASE = ['''flax'''] def __init__( self , *lowercase , **lowercase ) -> Dict: requires_backends(self , ["""flax"""] ) @classmethod def __lowerCamelCase ( cls , *lowercase , **lowercase ) -> int: requires_backends(cls , ["""flax"""] ) @classmethod def __lowerCamelCase ( cls , *lowercase , **lowercase ) -> List[Any]: requires_backends(cls , ["""flax"""] ) class UpperCAmelCase__ ( metaclass=UpperCAmelCase_): __SCREAMING_SNAKE_CASE = ['''flax'''] def __init__( self , *lowercase , **lowercase ) -> Optional[Any]: requires_backends(self , ["""flax"""] ) @classmethod def __lowerCamelCase ( cls , *lowercase , **lowercase ) -> Optional[int]: requires_backends(cls , ["""flax"""] ) @classmethod def __lowerCamelCase ( cls , *lowercase , **lowercase ) -> Optional[int]: requires_backends(cls , ["""flax"""] ) class UpperCAmelCase__ ( metaclass=UpperCAmelCase_): __SCREAMING_SNAKE_CASE = ['''flax'''] def __init__( self , *lowercase , **lowercase ) -> Optional[Any]: requires_backends(self , ["""flax"""] ) @classmethod def __lowerCamelCase ( cls , *lowercase , **lowercase ) -> str: requires_backends(cls , ["""flax"""] ) @classmethod def __lowerCamelCase ( cls , *lowercase , **lowercase ) -> List[str]: requires_backends(cls , ["""flax"""] ) class UpperCAmelCase__ ( metaclass=UpperCAmelCase_): __SCREAMING_SNAKE_CASE = ['''flax'''] def __init__( self , *lowercase , **lowercase ) -> Optional[int]: requires_backends(self , ["""flax"""] ) @classmethod def __lowerCamelCase ( cls , *lowercase , **lowercase ) -> str: requires_backends(cls , ["""flax"""] ) @classmethod def __lowerCamelCase ( cls , *lowercase , **lowercase ) -> int: requires_backends(cls , ["""flax"""] ) class UpperCAmelCase__ ( metaclass=UpperCAmelCase_): __SCREAMING_SNAKE_CASE = ['''flax'''] def __init__( self , *lowercase , **lowercase ) -> List[Any]: requires_backends(self , ["""flax"""] ) @classmethod def __lowerCamelCase ( cls , *lowercase , **lowercase ) -> Any: requires_backends(cls , ["""flax"""] ) @classmethod def __lowerCamelCase ( cls , *lowercase , **lowercase ) -> Dict: requires_backends(cls , ["""flax"""] ) class UpperCAmelCase__ ( metaclass=UpperCAmelCase_): __SCREAMING_SNAKE_CASE = ['''flax'''] def __init__( self , *lowercase , **lowercase ) -> Union[str, Any]: requires_backends(self , ["""flax"""] ) @classmethod def __lowerCamelCase ( cls , *lowercase , **lowercase ) -> Dict: requires_backends(cls , ["""flax"""] ) @classmethod def __lowerCamelCase ( cls , *lowercase , **lowercase ) -> Tuple: requires_backends(cls , ["""flax"""] ) class UpperCAmelCase__ ( metaclass=UpperCAmelCase_): __SCREAMING_SNAKE_CASE = ['''flax'''] def __init__( self , *lowercase , **lowercase ) -> int: requires_backends(self , ["""flax"""] ) @classmethod def __lowerCamelCase ( cls , *lowercase , **lowercase ) -> Optional[int]: requires_backends(cls , ["""flax"""] ) @classmethod def __lowerCamelCase ( cls , *lowercase , **lowercase ) -> List[str]: requires_backends(cls , ["""flax"""] ) class UpperCAmelCase__ ( metaclass=UpperCAmelCase_): __SCREAMING_SNAKE_CASE = ['''flax'''] def __init__( self , *lowercase , **lowercase ) -> int: requires_backends(self , ["""flax"""] ) @classmethod def __lowerCamelCase ( cls , *lowercase , **lowercase ) -> str: requires_backends(cls , ["""flax"""] ) @classmethod def __lowerCamelCase ( cls , *lowercase , **lowercase ) -> str: requires_backends(cls , ["""flax"""] ) class UpperCAmelCase__ ( metaclass=UpperCAmelCase_): __SCREAMING_SNAKE_CASE = ['''flax'''] def __init__( self , *lowercase , **lowercase ) -> int: requires_backends(self , ["""flax"""] ) @classmethod def __lowerCamelCase ( cls , *lowercase , **lowercase ) -> Tuple: requires_backends(cls , ["""flax"""] ) @classmethod def __lowerCamelCase ( cls , *lowercase , **lowercase ) -> Tuple: requires_backends(cls , ["""flax"""] ) class UpperCAmelCase__ ( metaclass=UpperCAmelCase_): __SCREAMING_SNAKE_CASE = ['''flax'''] def __init__( self , *lowercase , **lowercase ) -> Optional[int]: requires_backends(self , ["""flax"""] ) @classmethod def __lowerCamelCase ( cls , *lowercase , **lowercase ) -> Tuple: requires_backends(cls , ["""flax"""] ) @classmethod def __lowerCamelCase ( cls , *lowercase , **lowercase ) -> Union[str, Any]: requires_backends(cls , ["""flax"""] ) class UpperCAmelCase__ ( metaclass=UpperCAmelCase_): __SCREAMING_SNAKE_CASE = ['''flax'''] def __init__( self , *lowercase , **lowercase ) -> Any: requires_backends(self , ["""flax"""] ) @classmethod def __lowerCamelCase ( cls , *lowercase , **lowercase ) -> Tuple: requires_backends(cls , ["""flax"""] ) @classmethod def __lowerCamelCase ( cls , *lowercase , **lowercase ) -> List[str]: requires_backends(cls , ["""flax"""] )
349
0
import warnings from pathlib import Path from typing import List, Tuple, Union import fire from torch import nn from transformers import AutoModelForSeqaSeqLM, AutoTokenizer, PreTrainedModel from transformers.utils import logging snake_case : str = logging.get_logger(__name__) def __lowerCamelCase ( UpperCAmelCase_ : str , UpperCAmelCase_ : int , UpperCAmelCase_ : List[Any] ): """simple docstring""" a :int = nn.ModuleList([src_layers[i] for i in layers_to_copy] ) assert len(__A ) == len(__A ), F'''{len(__A )} != {len(__A )}''' dest_layers.load_state_dict(layers_to_copy.state_dict() ) snake_case : Optional[int] = { # maps num layers in teacher -> num_layers in student -> which teacher layers to copy. # 12: bart, 16: pegasus, 6: marian/Helsinki-NLP 12: { 1: [0], # This says that if the teacher has 12 layers and the student has 1, copy layer 0 of the teacher 2: [0, 6], 3: [0, 6, 11], 4: [0, 4, 8, 11], 6: [0, 2, 4, 7, 9, 11], 9: [0, 1, 2, 4, 5, 7, 9, 10, 11], 12: list(range(12)), }, 16: { # maps num layers in student -> which teacher layers to copy 1: [0], 2: [0, 15], 3: [0, 8, 15], 4: [0, 5, 10, 15], 6: [0, 3, 6, 9, 12, 15], 8: [0, 2, 4, 6, 8, 10, 12, 15], 9: [0, 1, 3, 5, 7, 9, 11, 13, 15], 12: [0, 1, 2, 3, 4, 5, 6, 7, 9, 11, 13, 15], 16: list(range(16)), }, 6: {1: [0], 2: [0, 5], 3: [0, 2, 5], 4: [0, 1, 3, 5], 6: list(range(6))}, } snake_case : Union[str, Any] = { # maps num layers in student -> which teacher layers to copy. 6: {1: [5], 2: [3, 5], 3: [1, 4, 5], 4: [1, 2, 4, 5]}, 12: {1: [11], 2: [5, 11], 3: [3, 7, 11], 6: [1, 3, 5, 8, 10, 11]}, 16: {1: [15], 4: [4, 9, 12, 15], 8: [1, 3, 5, 7, 9, 11, 13, 15]}, } def __lowerCamelCase ( UpperCAmelCase_ : str , UpperCAmelCase_ : Tuple ): """simple docstring""" try: a :Union[str, Any] = LAYERS_TO_COPY[n_teacher][n_student] return val except KeyError: if n_student != n_teacher: warnings.warn( F'''no hardcoded layers to copy for teacher {n_teacher} -> student {n_student}, defaulting to first''' F''' {n_student}''' ) return list(range(__A ) ) def __lowerCamelCase ( UpperCAmelCase_ : Tuple , UpperCAmelCase_ : Optional[int] ): """simple docstring""" if n_student > n_teacher: raise ValueError(F'''Cannot perform intermediate supervision for student {n_student} > teacher {n_teacher}''' ) elif n_teacher == n_student: return list(range(__A ) ) elif n_student == 1: return [n_teacher - 1] else: return LAYERS_TO_SUPERVISE[n_teacher][n_student] def __lowerCamelCase ( UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : Optional[int] = "student" , UpperCAmelCase_ : str = None , UpperCAmelCase_ : List[Any] = None , UpperCAmelCase_ : str=False , UpperCAmelCase_ : str=None , UpperCAmelCase_ : Optional[Any]=None , **UpperCAmelCase_ : Dict , ): """simple docstring""" a :Optional[int] = '''encoder_layers and decoder_layers cannot be both None-- you would just have an identical teacher.''' assert (e is not None) or (d is not None), _msg if isinstance(__A , __A ): AutoTokenizer.from_pretrained(__A ).save_pretrained(__A ) # purely for convenience a :Optional[Any] = AutoModelForSeqaSeqLM.from_pretrained(__A ).eval() else: assert isinstance(__A , __A ), F'''teacher must be a model or string got type {type(__A )}''' a :List[str] = teacher.config.to_diff_dict() try: a , a :Dict = teacher.config.encoder_layers, teacher.config.decoder_layers if e is None: a :Union[str, Any] = teacher_e if d is None: a :Optional[Any] = teacher_d init_kwargs.update({'''encoder_layers''': e, '''decoder_layers''': d} ) except AttributeError: # T5 if hasattr(teacher.config , '''num_encoder_layers''' ): a , a :List[str] = teacher.config.num_encoder_layers, teacher.config.num_decoder_layers else: a , a :Tuple = teacher.config.num_layers, teacher.config.num_decoder_layers if e is None: a :str = teacher_e if d is None: a :List[str] = teacher_d if hasattr(teacher.config , '''num_encoder_layers''' ): init_kwargs.update({'''num_encoder_layers''': e, '''num_decoder_layers''': d} ) else: init_kwargs.update({'''num_layers''': e, '''num_decoder_layers''': d} ) # Kwargs to instantiate student: teacher kwargs with updated layer numbers + **extra_config_kwargs init_kwargs.update(__A ) # Copy weights a :Any = teacher.config_class(**__A ) a :str = AutoModelForSeqaSeqLM.from_config(__A ) # Start by copying the full teacher state dict this will copy the first N teacher layers to the student. a :Optional[int] = student.load_state_dict(teacher.state_dict() , strict=__A ) assert info.missing_keys == [], info.missing_keys # every student key should have a teacher keys. if copy_first_teacher_layers: # Our copying is done. We just log and save a , a :Optional[int] = list(range(__A ) ), list(range(__A ) ) logger.info( F'''Copied encoder layers {e_layers_to_copy} and decoder layers {d_layers_to_copy}. Saving them to''' F''' {save_path}''' ) student.save_pretrained(__A ) return student, e_layers_to_copy, d_layers_to_copy # Decide which layers of the teacher to copy. Not exactly alternating -- we try to keep first and last layer. if e_layers_to_copy is None: a :Dict = pick_layers_to_copy(__A , __A ) if d_layers_to_copy is None: a :Tuple = pick_layers_to_copy(__A , __A ) try: if hasattr( __A , '''prophetnet''' ): # For ProphetNet, student.model.encoder.layers is called student.prophetnet.encoder.layers copy_layers(teacher.prophetnet.encoder.layers , student.prophetnet.encoder.layers , __A ) copy_layers(teacher.prophetnet.decoder.layers , student.prophetnet.decoder.layers , __A ) else: copy_layers(teacher.model.encoder.layers , student.model.encoder.layers , __A ) copy_layers(teacher.model.decoder.layers , student.model.decoder.layers , __A ) except AttributeError: # For t5, student.model.encoder.layers is called student.encoder.block copy_layers(teacher.encoder.block , student.encoder.block , __A ) copy_layers(teacher.decoder.block , student.decoder.block , __A ) logger.info( F'''Copied encoder layers {e_layers_to_copy} and decoder layers {d_layers_to_copy}. Saving them to {save_path}''' ) a :Optional[Any] = { '''teacher_type''': teacher.config.model_type, '''copied_encoder_layers''': e_layers_to_copy, '''copied_decoder_layers''': d_layers_to_copy, } student.save_pretrained(__A ) # Save information about copying for easier reproducibility return student, e_layers_to_copy, d_layers_to_copy if __name__ == "__main__": fire.Fire(create_student_by_copying_alternating_layers)
94
'''simple docstring''' import logging import os from .state import PartialState class UpperCAmelCase__ ( logging.LoggerAdapter): @staticmethod def __lowerCamelCase ( lowercase ) -> Dict: __UpperCamelCase = PartialState() return not main_process_only or (main_process_only and state.is_main_process) def __lowerCamelCase ( self , lowercase , lowercase , *lowercase , **lowercase ) -> List[str]: if PartialState._shared_state == {}: raise RuntimeError( """You must initialize the accelerate state by calling either `PartialState()` or `Accelerator()` before using the logging utility.""" ) __UpperCamelCase = kwargs.pop("""main_process_only""" , lowercase ) __UpperCamelCase = kwargs.pop("""in_order""" , lowercase ) if self.isEnabledFor(lowercase ): if self._should_log(lowercase ): __UpperCamelCase , __UpperCamelCase = self.process(lowercase , lowercase ) self.logger.log(lowercase , lowercase , *lowercase , **lowercase ) elif in_order: __UpperCamelCase = PartialState() for i in range(state.num_processes ): if i == state.process_index: __UpperCamelCase , __UpperCamelCase = self.process(lowercase , lowercase ) self.logger.log(lowercase , lowercase , *lowercase , **lowercase ) state.wait_for_everyone() def _lowercase ( __A ,__A = None ): '''simple docstring''' if log_level is None: __UpperCamelCase = os.environ.get("""ACCELERATE_LOG_LEVEL""" ,__A ) __UpperCamelCase = logging.getLogger(__A ) if log_level is not None: logger.setLevel(log_level.upper() ) logger.root.setLevel(log_level.upper() ) return MultiProcessAdapter(__A ,{} )
349
0
'''simple docstring''' from statistics import mean, stdev def lowerCAmelCase_ ( snake_case__ , snake_case__ = 3 ): '''simple docstring''' A : Optional[int] = min(__A ) A : Optional[int] = max(__A ) # normalize data return [round((x - x_min) / (x_max - x_min) , __A ) for x in data] def lowerCAmelCase_ ( snake_case__ , snake_case__ = 3 ): '''simple docstring''' A : List[Any] = mean(__A ) A : List[Any] = stdev(__A ) # standardize data return [round((x - mu) / (sigma) , __A ) for x in data]
3
'''simple docstring''' import logging import random import ray from transformers import RagConfig, RagRetriever, RagTokenizer from transformers.models.rag.retrieval_rag import CustomHFIndex a__ : Optional[Any] = logging.getLogger(__name__) class UpperCAmelCase__ : def __init__( self ) -> Any: __UpperCamelCase = False def __lowerCamelCase ( self , lowercase , lowercase , lowercase , lowercase ) -> str: if not self.initialized: __UpperCamelCase = RagRetriever( lowercase , question_encoder_tokenizer=lowercase , generator_tokenizer=lowercase , index=lowercase , init_retrieval=lowercase , ) __UpperCamelCase = True def __lowerCamelCase ( self ) -> Optional[Any]: self.retriever.index.init_index() def __lowerCamelCase ( self , lowercase , lowercase ) -> Dict: __UpperCamelCase , __UpperCamelCase = self.retriever._main_retrieve(lowercase , lowercase ) return doc_ids, retrieved_doc_embeds class UpperCAmelCase__ ( UpperCAmelCase_): def __init__( self , lowercase , lowercase , lowercase , lowercase , lowercase=None ) -> List[Any]: if index is not None and index.is_initialized() and len(lowercase ) > 0: raise ValueError( """When using Ray for distributed fine-tuning, """ """you'll need to provide the paths instead, """ """as the dataset and the index are loaded """ """separately. More info in examples/rag/use_own_knowledge_dataset.py """ ) super().__init__( lowercase , question_encoder_tokenizer=lowercase , generator_tokenizer=lowercase , index=lowercase , init_retrieval=lowercase , ) __UpperCamelCase = retrieval_workers if len(self.retrieval_workers ) > 0: ray.get( [ worker.create_rag_retriever.remote(lowercase , lowercase , lowercase , lowercase ) for worker in self.retrieval_workers ] ) def __lowerCamelCase ( self ) -> Dict: logger.info("""initializing retrieval""" ) if len(self.retrieval_workers ) > 0: ray.get([worker.init_retrieval.remote() for worker in self.retrieval_workers] ) else: # Non-distributed training. Load index into this same process. self.index.init_index() def __lowerCamelCase ( self , lowercase , lowercase ) -> List[str]: if len(self.retrieval_workers ) > 0: # Select a random retrieval actor. __UpperCamelCase = self.retrieval_workers[random.randint(0 , len(self.retrieval_workers ) - 1 )] __UpperCamelCase , __UpperCamelCase = ray.get(random_worker.retrieve.remote(lowercase , lowercase ) ) else: __UpperCamelCase , __UpperCamelCase = self._main_retrieve(lowercase , lowercase ) return retrieved_doc_embeds, doc_ids, self.index.get_doc_dicts(lowercase ) @classmethod def __lowerCamelCase ( cls , lowercase , lowercase=None , **lowercase ) -> Any: return super(lowercase , cls ).get_tokenizers(lowercase , lowercase , **lowercase ) @classmethod def __lowerCamelCase ( cls , lowercase , lowercase , lowercase=None , **lowercase ) -> int: __UpperCamelCase = kwargs.pop("""config""" , lowercase ) or RagConfig.from_pretrained(lowercase , **lowercase ) __UpperCamelCase = RagTokenizer.from_pretrained(lowercase , config=lowercase ) __UpperCamelCase = rag_tokenizer.question_encoder __UpperCamelCase = rag_tokenizer.generator if indexed_dataset is not None: __UpperCamelCase = """custom""" __UpperCamelCase = CustomHFIndex(config.retrieval_vector_size , lowercase ) else: __UpperCamelCase = cls._build_index(lowercase ) return cls( lowercase , question_encoder_tokenizer=lowercase , generator_tokenizer=lowercase , retrieval_workers=lowercase , index=lowercase , )
349
0
import doctest import sys import warnings from os.path import abspath, dirname, join import _pytest from transformers.testing_utils import HfDoctestModule, HfDocTestParser # allow having multiple repository checkouts and not needing to remember to rerun # 'pip install -e .[dev]' when switching between checkouts and running tests. _lowerCamelCase =abspath(join(dirname(__file__), "src")) sys.path.insert(1, git_repo_path) # silence FutureWarning warnings in tests since often we can't act on them until # they become normal warnings - i.e. the tests still need to test the current functionality warnings.simplefilter(action="ignore", category=FutureWarning) def snake_case__ ( lowerCAmelCase_ ): """simple docstring""" config.addinivalue_line( 'markers', 'is_pt_tf_cross_test: mark test to run only when PT and TF interactions are tested' ) config.addinivalue_line( 'markers', 'is_pt_flax_cross_test: mark test to run only when PT and FLAX interactions are tested' ) config.addinivalue_line('markers', 'is_pipeline_test: mark test to run only when pipelines are tested' ) config.addinivalue_line('markers', 'is_staging_test: mark test to run only in the staging environment' ) config.addinivalue_line('markers', 'accelerate_tests: mark test that require accelerate' ) config.addinivalue_line('markers', 'tool_tests: mark the tool tests that are run on their specific schedule' ) def snake_case__ ( lowerCAmelCase_ ): """simple docstring""" from transformers.testing_utils import pytest_addoption_shared pytest_addoption_shared(__A ) def snake_case__ ( lowerCAmelCase_ ): """simple docstring""" from transformers.testing_utils import pytest_terminal_summary_main SCREAMING_SNAKE_CASE =terminalreporter.config.getoption('--make-reports' ) if make_reports: pytest_terminal_summary_main(__A, id=__A ) def snake_case__ ( lowerCAmelCase_, lowerCAmelCase_ ): """simple docstring""" if exitstatus == 5: SCREAMING_SNAKE_CASE =0 # Doctest custom flag to ignore output. _lowerCamelCase =doctest.register_optionflag("IGNORE_RESULT") _lowerCamelCase =doctest.OutputChecker class a_ ( UpperCAmelCase_ ): """simple docstring""" def _lowerCAmelCase ( self : Optional[int] ,snake_case : Any ,snake_case : str ,snake_case : int ): if IGNORE_RESULT & optionflags: return True return OutputChecker.check_output(self ,snake_case ,snake_case ,snake_case ) _lowerCamelCase =CustomOutputChecker _lowerCamelCase =HfDoctestModule _lowerCamelCase =HfDocTestParser
334
'''simple docstring''' import json from typing import List, Optional, Tuple from tokenizers import normalizers from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import logging from .tokenization_squeezebert import SqueezeBertTokenizer a__ : Optional[Any] = logging.get_logger(__name__) a__ : Union[str, Any] = {'vocab_file': 'vocab.txt', 'tokenizer_file': 'tokenizer.json'} a__ : Any = { 'vocab_file': { 'squeezebert/squeezebert-uncased': ( 'https://huggingface.co/squeezebert/squeezebert-uncased/resolve/main/vocab.txt' ), 'squeezebert/squeezebert-mnli': 'https://huggingface.co/squeezebert/squeezebert-mnli/resolve/main/vocab.txt', 'squeezebert/squeezebert-mnli-headless': ( 'https://huggingface.co/squeezebert/squeezebert-mnli-headless/resolve/main/vocab.txt' ), }, 'tokenizer_file': { 'squeezebert/squeezebert-uncased': ( 'https://huggingface.co/squeezebert/squeezebert-uncased/resolve/main/tokenizer.json' ), 'squeezebert/squeezebert-mnli': ( 'https://huggingface.co/squeezebert/squeezebert-mnli/resolve/main/tokenizer.json' ), 'squeezebert/squeezebert-mnli-headless': ( 'https://huggingface.co/squeezebert/squeezebert-mnli-headless/resolve/main/tokenizer.json' ), }, } a__ : Optional[Any] = { 'squeezebert/squeezebert-uncased': 5_1_2, 'squeezebert/squeezebert-mnli': 5_1_2, 'squeezebert/squeezebert-mnli-headless': 5_1_2, } a__ : Optional[Any] = { 'squeezebert/squeezebert-uncased': {'do_lower_case': True}, 'squeezebert/squeezebert-mnli': {'do_lower_case': True}, 'squeezebert/squeezebert-mnli-headless': {'do_lower_case': True}, } class UpperCAmelCase__ ( UpperCAmelCase_): __SCREAMING_SNAKE_CASE = VOCAB_FILES_NAMES __SCREAMING_SNAKE_CASE = PRETRAINED_VOCAB_FILES_MAP __SCREAMING_SNAKE_CASE = PRETRAINED_INIT_CONFIGURATION __SCREAMING_SNAKE_CASE = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES __SCREAMING_SNAKE_CASE = SqueezeBertTokenizer def __init__( self , lowercase=None , lowercase=None , lowercase=True , lowercase="[UNK]" , lowercase="[SEP]" , lowercase="[PAD]" , lowercase="[CLS]" , lowercase="[MASK]" , lowercase=True , lowercase=None , **lowercase , ) -> Tuple: super().__init__( lowercase , tokenizer_file=lowercase , do_lower_case=lowercase , unk_token=lowercase , sep_token=lowercase , pad_token=lowercase , cls_token=lowercase , mask_token=lowercase , tokenize_chinese_chars=lowercase , strip_accents=lowercase , **lowercase , ) __UpperCamelCase = json.loads(self.backend_tokenizer.normalizer.__getstate__() ) if ( normalizer_state.get("""lowercase""" , lowercase ) != do_lower_case or normalizer_state.get("""strip_accents""" , lowercase ) != strip_accents or normalizer_state.get("""handle_chinese_chars""" , lowercase ) != tokenize_chinese_chars ): __UpperCamelCase = getattr(lowercase , normalizer_state.pop("""type""" ) ) __UpperCamelCase = do_lower_case __UpperCamelCase = strip_accents __UpperCamelCase = tokenize_chinese_chars __UpperCamelCase = normalizer_class(**lowercase ) __UpperCamelCase = do_lower_case def __lowerCamelCase ( self , lowercase , lowercase=None ) -> Tuple: __UpperCamelCase = [self.cls_token_id] + token_ids_a + [self.sep_token_id] if token_ids_a: output += token_ids_a + [self.sep_token_id] return output def __lowerCamelCase ( self , lowercase , lowercase = None ) -> List[int]: __UpperCamelCase = [self.sep_token_id] __UpperCamelCase = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] def __lowerCamelCase ( self , lowercase , lowercase = None ) -> Tuple[str]: __UpperCamelCase = self._tokenizer.model.save(lowercase , name=lowercase ) return tuple(lowercase )
349
0
import json import os import unittest from transformers import BatchEncoding, LEDTokenizer, LEDTokenizerFast from transformers.models.led.tokenization_led import VOCAB_FILES_NAMES from transformers.testing_utils import require_tokenizers, require_torch from transformers.utils import cached_property from ...test_tokenization_common import TokenizerTesterMixin @require_tokenizers class A_ (UpperCAmelCase_ , unittest.TestCase ): UpperCAmelCase__ = LEDTokenizer UpperCAmelCase__ = LEDTokenizerFast UpperCAmelCase__ = True def _lowercase ( self ): '''simple docstring''' super().setUp() UpperCAmelCase = [ '''l''', '''o''', '''w''', '''e''', '''r''', '''s''', '''t''', '''i''', '''d''', '''n''', '''\u0120''', '''\u0120l''', '''\u0120n''', '''\u0120lo''', '''\u0120low''', '''er''', '''\u0120lowest''', '''\u0120newer''', '''\u0120wider''', '''<unk>''', ] UpperCAmelCase = dict(zip(_A , range(len(_A ) ) ) ) UpperCAmelCase = ['''#version: 0.2''', '''\u0120 l''', '''\u0120l o''', '''\u0120lo w''', '''e r''', ''''''] UpperCAmelCase = {'''unk_token''': '''<unk>'''} UpperCAmelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] ) UpperCAmelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] ) with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp: fp.write(json.dumps(_A ) + '''\n''' ) with open(self.merges_file , '''w''' , encoding='''utf-8''' ) as fp: fp.write('''\n'''.join(_A ) ) def _lowercase ( self , **_A ): '''simple docstring''' kwargs.update(self.special_tokens_map ) return self.tokenizer_class.from_pretrained(self.tmpdirname , **_A ) def _lowercase ( self , **_A ): '''simple docstring''' kwargs.update(self.special_tokens_map ) return self.rust_tokenizer_class.from_pretrained(self.tmpdirname , **_A ) def _lowercase ( self , _A ): '''simple docstring''' return "lower newer", "lower newer" @cached_property def _lowercase ( self ): '''simple docstring''' return LEDTokenizer.from_pretrained('''allenai/led-base-16384''' ) @cached_property def _lowercase ( self ): '''simple docstring''' return LEDTokenizerFast.from_pretrained('''allenai/led-base-16384''' ) @require_torch def _lowercase ( self ): '''simple docstring''' UpperCAmelCase = ['''A long paragraph for summarization.''', '''Another paragraph for summarization.'''] UpperCAmelCase = [0, 2_5_0, 2_5_1, 1_7_8_1_8, 1_3, 3_9_1_8_6, 1_9_3_8, 4, 2] for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]: UpperCAmelCase = tokenizer(_A , max_length=len(_A ) , padding=_A , return_tensors='''pt''' ) self.assertIsInstance(_A , _A ) self.assertEqual((2, 9) , batch.input_ids.shape ) self.assertEqual((2, 9) , batch.attention_mask.shape ) UpperCAmelCase = batch.input_ids.tolist()[0] self.assertListEqual(_A , _A ) @require_torch def _lowercase ( self ): '''simple docstring''' UpperCAmelCase = ['''A long paragraph for summarization.''', '''Another paragraph for summarization.'''] for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]: UpperCAmelCase = tokenizer(_A , padding=_A , return_tensors='''pt''' ) self.assertIn('''input_ids''' , _A ) self.assertIn('''attention_mask''' , _A ) self.assertNotIn('''labels''' , _A ) self.assertNotIn('''decoder_attention_mask''' , _A ) @require_torch def _lowercase ( self ): '''simple docstring''' UpperCAmelCase = [ '''Summary of the text.''', '''Another summary.''', ] for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]: UpperCAmelCase = tokenizer(text_target=_A , max_length=3_2 , padding='''max_length''' , return_tensors='''pt''' ) self.assertEqual(3_2 , targets['''input_ids'''].shape[1] ) @require_torch def _lowercase ( self ): '''simple docstring''' for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]: UpperCAmelCase = tokenizer( ['''I am a small frog''' * 1_0_2_4, '''I am a small frog'''] , padding=_A , truncation=_A , return_tensors='''pt''' ) self.assertIsInstance(_A , _A ) self.assertEqual(batch.input_ids.shape , (2, 5_1_2_2) ) @require_torch def _lowercase ( self ): '''simple docstring''' UpperCAmelCase = ['''A long paragraph for summarization.'''] UpperCAmelCase = [ '''Summary of the text.''', ] for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]: UpperCAmelCase = tokenizer(_A , return_tensors='''pt''' ) UpperCAmelCase = tokenizer(text_target=_A , return_tensors='''pt''' ) UpperCAmelCase = inputs['''input_ids'''] UpperCAmelCase = targets['''input_ids'''] self.assertTrue((input_ids[:, 0] == tokenizer.bos_token_id).all().item() ) self.assertTrue((labels[:, 0] == tokenizer.bos_token_id).all().item() ) self.assertTrue((input_ids[:, -1] == tokenizer.eos_token_id).all().item() ) self.assertTrue((labels[:, -1] == tokenizer.eos_token_id).all().item() ) @require_torch def _lowercase ( self ): '''simple docstring''' for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]: UpperCAmelCase = ['''Summary of the text.''', '''Another summary.'''] UpperCAmelCase = [[0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, -1, -1]] UpperCAmelCase = tokenizer(_A , padding=_A ) UpperCAmelCase = [[0] * len(_A ) for x in encoded_output['''input_ids''']] UpperCAmelCase = tokenizer.pad(_A ) self.assertSequenceEqual(outputs['''global_attention_mask'''] , _A ) def _lowercase ( self ): '''simple docstring''' pass def _lowercase ( self ): '''simple docstring''' for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(F"""{tokenizer.__class__.__name__} ({pretrained_name})""" ): UpperCAmelCase = self.rust_tokenizer_class.from_pretrained(_A , **_A ) UpperCAmelCase = self.tokenizer_class.from_pretrained(_A , **_A ) UpperCAmelCase = '''A, <mask> AllenNLP sentence.''' UpperCAmelCase = tokenizer_r.encode_plus(_A , add_special_tokens=_A , return_token_type_ids=_A ) UpperCAmelCase = tokenizer_p.encode_plus(_A , add_special_tokens=_A , return_token_type_ids=_A ) self.assertEqual(sum(tokens_r['''token_type_ids'''] ) , sum(tokens_p['''token_type_ids'''] ) ) self.assertEqual( sum(tokens_r['''attention_mask'''] ) / len(tokens_r['''attention_mask'''] ) , sum(tokens_p['''attention_mask'''] ) / len(tokens_p['''attention_mask'''] ) , ) UpperCAmelCase = tokenizer_r.convert_ids_to_tokens(tokens_r['''input_ids'''] ) UpperCAmelCase = tokenizer_p.convert_ids_to_tokens(tokens_p['''input_ids'''] ) self.assertSequenceEqual(tokens_p['''input_ids'''] , [0, 2_5_0, 6, 5_0_2_6_4, 3_8_2_3, 4_8_7, 2_1_9_9_2, 3_6_4_5, 4, 2] ) self.assertSequenceEqual(tokens_r['''input_ids'''] , [0, 2_5_0, 6, 5_0_2_6_4, 3_8_2_3, 4_8_7, 2_1_9_9_2, 3_6_4_5, 4, 2] ) self.assertSequenceEqual( _A , ['''<s>''', '''A''', ''',''', '''<mask>''', '''ĠAllen''', '''N''', '''LP''', '''Ġsentence''', '''.''', '''</s>'''] ) self.assertSequenceEqual( _A , ['''<s>''', '''A''', ''',''', '''<mask>''', '''ĠAllen''', '''N''', '''LP''', '''Ġsentence''', '''.''', '''</s>'''] )
273
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available, is_speech_available, is_torch_available, ) a__ : str = { 'configuration_trocr': ['TROCR_PRETRAINED_CONFIG_ARCHIVE_MAP', 'TrOCRConfig'], 'processing_trocr': ['TrOCRProcessor'], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a__ : int = [ 'TROCR_PRETRAINED_MODEL_ARCHIVE_LIST', 'TrOCRForCausalLM', 'TrOCRPreTrainedModel', ] if TYPE_CHECKING: from .configuration_trocr import TROCR_PRETRAINED_CONFIG_ARCHIVE_MAP, TrOCRConfig from .processing_trocr import TrOCRProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_trocr import TROCR_PRETRAINED_MODEL_ARCHIVE_LIST, TrOCRForCausalLM, TrOCRPreTrainedModel else: import sys a__ : List[str] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
349
0
from dataclasses import dataclass from typing import Optional import numpy as np import torch import torch.nn as nn from ..utils import BaseOutput, is_torch_version, randn_tensor from .attention_processor import SpatialNorm from .unet_ad_blocks import UNetMidBlockaD, get_down_block, get_up_block @dataclass class SCREAMING_SNAKE_CASE ( UpperCAmelCase_ ): __lowerCamelCase : Optional[Any] =42 class SCREAMING_SNAKE_CASE ( nn.Module ): def __init__( self : Optional[Any] , __lowercase : Union[str, Any]=3 , __lowercase : Optional[int]=3 , __lowercase : Optional[Any]=("DownEncoderBlock2D",) , __lowercase : List[str]=(64,) , __lowercase : Any=2 , __lowercase : int=32 , __lowercase : Optional[Any]="silu" , __lowercase : Dict=True , ): '''simple docstring''' super().__init__() __a = layers_per_block __a = torch.nn.Convad( __lowercase , block_out_channels[0] , kernel_size=3 , stride=1 , padding=1 , ) __a = None __a = nn.ModuleList([] ) # down __a = block_out_channels[0] for i, down_block_type in enumerate(__lowercase ): __a = output_channel __a = block_out_channels[i] __a = i == len(__lowercase ) - 1 __a = get_down_block( __lowercase , num_layers=self.layers_per_block , in_channels=__lowercase , out_channels=__lowercase , add_downsample=not is_final_block , resnet_eps=1E-6 , downsample_padding=0 , resnet_act_fn=__lowercase , resnet_groups=__lowercase , attention_head_dim=__lowercase , temb_channels=__lowercase , ) self.down_blocks.append(__lowercase ) # mid __a = UNetMidBlockaD( in_channels=block_out_channels[-1] , resnet_eps=1E-6 , resnet_act_fn=__lowercase , output_scale_factor=1 , resnet_time_scale_shift="""default""" , attention_head_dim=block_out_channels[-1] , resnet_groups=__lowercase , temb_channels=__lowercase , ) # out __a = nn.GroupNorm(num_channels=block_out_channels[-1] , num_groups=__lowercase , eps=1E-6 ) __a = nn.SiLU() __a = 2 * out_channels if double_z else out_channels __a = nn.Convad(block_out_channels[-1] , __lowercase , 3 , padding=1 ) __a = False def UpperCamelCase_ ( self : List[str] , __lowercase : Optional[Any] ): '''simple docstring''' __a = x __a = self.conv_in(__lowercase ) if self.training and self.gradient_checkpointing: def create_custom_forward(__lowercase : Any ): def custom_forward(*__lowercase : Any ): return module(*__lowercase ) return custom_forward # down if is_torch_version(""">=""" , """1.11.0""" ): for down_block in self.down_blocks: __a = torch.utils.checkpoint.checkpoint( create_custom_forward(__lowercase ) , __lowercase , use_reentrant=__lowercase ) # middle __a = torch.utils.checkpoint.checkpoint( create_custom_forward(self.mid_block ) , __lowercase , use_reentrant=__lowercase ) else: for down_block in self.down_blocks: __a = torch.utils.checkpoint.checkpoint(create_custom_forward(__lowercase ) , __lowercase ) # middle __a = torch.utils.checkpoint.checkpoint(create_custom_forward(self.mid_block ) , __lowercase ) else: # down for down_block in self.down_blocks: __a = down_block(__lowercase ) # middle __a = self.mid_block(__lowercase ) # post-process __a = self.conv_norm_out(__lowercase ) __a = self.conv_act(__lowercase ) __a = self.conv_out(__lowercase ) return sample class SCREAMING_SNAKE_CASE ( nn.Module ): def __init__( self : str , __lowercase : Optional[int]=3 , __lowercase : List[str]=3 , __lowercase : Union[str, Any]=("UpDecoderBlock2D",) , __lowercase : List[Any]=(64,) , __lowercase : Tuple=2 , __lowercase : List[Any]=32 , __lowercase : str="silu" , __lowercase : Union[str, Any]="group" , ): '''simple docstring''' super().__init__() __a = layers_per_block __a = nn.Convad( __lowercase , block_out_channels[-1] , kernel_size=3 , stride=1 , padding=1 , ) __a = None __a = nn.ModuleList([] ) __a = in_channels if norm_type == """spatial""" else None # mid __a = UNetMidBlockaD( in_channels=block_out_channels[-1] , resnet_eps=1E-6 , resnet_act_fn=__lowercase , output_scale_factor=1 , resnet_time_scale_shift="""default""" if norm_type == """group""" else norm_type , attention_head_dim=block_out_channels[-1] , resnet_groups=__lowercase , temb_channels=__lowercase , ) # up __a = list(reversed(__lowercase ) ) __a = reversed_block_out_channels[0] for i, up_block_type in enumerate(__lowercase ): __a = output_channel __a = reversed_block_out_channels[i] __a = i == len(__lowercase ) - 1 __a = get_up_block( __lowercase , num_layers=self.layers_per_block + 1 , in_channels=__lowercase , out_channels=__lowercase , prev_output_channel=__lowercase , add_upsample=not is_final_block , resnet_eps=1E-6 , resnet_act_fn=__lowercase , resnet_groups=__lowercase , attention_head_dim=__lowercase , temb_channels=__lowercase , resnet_time_scale_shift=__lowercase , ) self.up_blocks.append(__lowercase ) __a = output_channel # out if norm_type == "spatial": __a = SpatialNorm(block_out_channels[0] , __lowercase ) else: __a = nn.GroupNorm(num_channels=block_out_channels[0] , num_groups=__lowercase , eps=1E-6 ) __a = nn.SiLU() __a = nn.Convad(block_out_channels[0] , __lowercase , 3 , padding=1 ) __a = False def UpperCamelCase_ ( self : List[Any] , __lowercase : str , __lowercase : Union[str, Any]=None ): '''simple docstring''' __a = z __a = self.conv_in(__lowercase ) __a = next(iter(self.up_blocks.parameters() ) ).dtype if self.training and self.gradient_checkpointing: def create_custom_forward(__lowercase : Union[str, Any] ): def custom_forward(*__lowercase : str ): return module(*__lowercase ) return custom_forward if is_torch_version(""">=""" , """1.11.0""" ): # middle __a = torch.utils.checkpoint.checkpoint( create_custom_forward(self.mid_block ) , __lowercase , __lowercase , use_reentrant=__lowercase ) __a = sample.to(__lowercase ) # up for up_block in self.up_blocks: __a = torch.utils.checkpoint.checkpoint( create_custom_forward(__lowercase ) , __lowercase , __lowercase , use_reentrant=__lowercase ) else: # middle __a = torch.utils.checkpoint.checkpoint( create_custom_forward(self.mid_block ) , __lowercase , __lowercase ) __a = sample.to(__lowercase ) # up for up_block in self.up_blocks: __a = torch.utils.checkpoint.checkpoint(create_custom_forward(__lowercase ) , __lowercase , __lowercase ) else: # middle __a = self.mid_block(__lowercase , __lowercase ) __a = sample.to(__lowercase ) # up for up_block in self.up_blocks: __a = up_block(__lowercase , __lowercase ) # post-process if latent_embeds is None: __a = self.conv_norm_out(__lowercase ) else: __a = self.conv_norm_out(__lowercase , __lowercase ) __a = self.conv_act(__lowercase ) __a = self.conv_out(__lowercase ) return sample class SCREAMING_SNAKE_CASE ( nn.Module ): def __init__( self : Any , __lowercase : Tuple , __lowercase : Union[str, Any] , __lowercase : Optional[int] , __lowercase : Optional[int]=None , __lowercase : Tuple="random" , __lowercase : str=False , __lowercase : List[str]=True ): '''simple docstring''' super().__init__() __a = n_e __a = vq_embed_dim __a = beta __a = legacy __a = nn.Embedding(self.n_e , self.vq_embed_dim ) self.embedding.weight.data.uniform_(-1.0 / self.n_e , 1.0 / self.n_e ) __a = remap if self.remap is not None: self.register_buffer("""used""" , torch.tensor(np.load(self.remap ) ) ) __a = self.used.shape[0] __a = unknown_index # "random" or "extra" or integer if self.unknown_index == "extra": __a = self.re_embed __a = self.re_embed + 1 print( F"Remapping {self.n_e} indices to {self.re_embed} indices. " F"Using {self.unknown_index} for unknown indices." ) else: __a = n_e __a = sane_index_shape def UpperCamelCase_ ( self : List[Any] , __lowercase : Dict ): '''simple docstring''' __a = inds.shape assert len(__lowercase ) > 1 __a = inds.reshape(ishape[0] , -1 ) __a = self.used.to(__lowercase ) __a = (inds[:, :, None] == used[None, None, ...]).long() __a = match.argmax(-1 ) __a = match.sum(2 ) < 1 if self.unknown_index == "random": __a = torch.randint(0 , self.re_embed , size=new[unknown].shape ).to(device=new.device ) else: __a = self.unknown_index return new.reshape(__lowercase ) def UpperCamelCase_ ( self : List[Any] , __lowercase : Dict ): '''simple docstring''' __a = inds.shape assert len(__lowercase ) > 1 __a = inds.reshape(ishape[0] , -1 ) __a = self.used.to(__lowercase ) if self.re_embed > self.used.shape[0]: # extra token __a = 0 # simply set to zero __a = torch.gather(used[None, :][inds.shape[0] * [0], :] , 1 , __lowercase ) return back.reshape(__lowercase ) def UpperCamelCase_ ( self : int , __lowercase : int ): '''simple docstring''' # reshape z -> (batch, height, width, channel) and flatten __a = z.permute(0 , 2 , 3 , 1 ).contiguous() __a = z.view(-1 , self.vq_embed_dim ) # distances from z to embeddings e_j (z - e)^2 = z^2 + e^2 - 2 e * z __a = torch.argmin(torch.cdist(__lowercase , self.embedding.weight ) , dim=1 ) __a = self.embedding(__lowercase ).view(z.shape ) __a = None __a = None # compute loss for embedding if not self.legacy: __a = self.beta * torch.mean((z_q.detach() - z) ** 2 ) + torch.mean((z_q - z.detach()) ** 2 ) else: __a = torch.mean((z_q.detach() - z) ** 2 ) + self.beta * torch.mean((z_q - z.detach()) ** 2 ) # preserve gradients __a = z + (z_q - z).detach() # reshape back to match original input shape __a = z_q.permute(0 , 3 , 1 , 2 ).contiguous() if self.remap is not None: __a = min_encoding_indices.reshape(z.shape[0] , -1 ) # add batch axis __a = self.remap_to_used(__lowercase ) __a = min_encoding_indices.reshape(-1 , 1 ) # flatten if self.sane_index_shape: __a = min_encoding_indices.reshape(z_q.shape[0] , z_q.shape[2] , z_q.shape[3] ) return z_q, loss, (perplexity, min_encodings, min_encoding_indices) def UpperCamelCase_ ( self : str , __lowercase : Optional[Any] , __lowercase : Optional[Any] ): '''simple docstring''' # shape specifying (batch, height, width, channel) if self.remap is not None: __a = indices.reshape(shape[0] , -1 ) # add batch axis __a = self.unmap_to_all(__lowercase ) __a = indices.reshape(-1 ) # flatten again # get quantized latent vectors __a = self.embedding(__lowercase ) if shape is not None: __a = z_q.view(__lowercase ) # reshape back to match original input shape __a = z_q.permute(0 , 3 , 1 , 2 ).contiguous() return z_q class SCREAMING_SNAKE_CASE ( UpperCAmelCase_ ): def __init__( self : Dict , __lowercase : Dict , __lowercase : str=False ): '''simple docstring''' __a = parameters __a , __a = torch.chunk(__lowercase , 2 , dim=1 ) __a = torch.clamp(self.logvar , -30.0 , 20.0 ) __a = deterministic __a = torch.exp(0.5 * self.logvar ) __a = torch.exp(self.logvar ) if self.deterministic: __a = __a = torch.zeros_like( self.mean , device=self.parameters.device , dtype=self.parameters.dtype ) def UpperCamelCase_ ( self : Optional[int] , __lowercase : List[Any] = None ): '''simple docstring''' # make sure sample is on the same device as the parameters and has same dtype __a = randn_tensor( self.mean.shape , generator=__lowercase , device=self.parameters.device , dtype=self.parameters.dtype ) __a = self.mean + self.std * sample return x def UpperCamelCase_ ( self : List[Any] , __lowercase : Dict=None ): '''simple docstring''' if self.deterministic: return torch.Tensor([0.0] ) else: if other is None: return 0.5 * torch.sum(torch.pow(self.mean , 2 ) + self.var - 1.0 - self.logvar , dim=[1, 2, 3] ) else: return 0.5 * torch.sum( torch.pow(self.mean - other.mean , 2 ) / other.var + self.var / other.var - 1.0 - self.logvar + other.logvar , dim=[1, 2, 3] , ) def UpperCamelCase_ ( self : Tuple , __lowercase : List[Any] , __lowercase : Optional[int]=[1, 2, 3] ): '''simple docstring''' if self.deterministic: return torch.Tensor([0.0] ) __a = np.log(2.0 * np.pi ) return 0.5 * torch.sum(logtwopi + self.logvar + torch.pow(sample - self.mean , 2 ) / self.var , dim=__lowercase ) def UpperCamelCase_ ( self : Any ): '''simple docstring''' return self.mean
302
'''simple docstring''' from collections import OrderedDict from typing import Mapping from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig a__ : Union[str, Any] = { 'albert-base-v1': 'https://huggingface.co/albert-base-v1/resolve/main/config.json', 'albert-large-v1': 'https://huggingface.co/albert-large-v1/resolve/main/config.json', 'albert-xlarge-v1': 'https://huggingface.co/albert-xlarge-v1/resolve/main/config.json', 'albert-xxlarge-v1': 'https://huggingface.co/albert-xxlarge-v1/resolve/main/config.json', 'albert-base-v2': 'https://huggingface.co/albert-base-v2/resolve/main/config.json', 'albert-large-v2': 'https://huggingface.co/albert-large-v2/resolve/main/config.json', 'albert-xlarge-v2': 'https://huggingface.co/albert-xlarge-v2/resolve/main/config.json', 'albert-xxlarge-v2': 'https://huggingface.co/albert-xxlarge-v2/resolve/main/config.json', } class UpperCAmelCase__ ( UpperCAmelCase_): __SCREAMING_SNAKE_CASE = '''albert''' def __init__( self , lowercase=3_0_0_0_0 , lowercase=1_2_8 , lowercase=4_0_9_6 , lowercase=1_2 , lowercase=1 , lowercase=6_4 , lowercase=1_6_3_8_4 , lowercase=1 , lowercase="gelu_new" , lowercase=0 , lowercase=0 , lowercase=5_1_2 , lowercase=2 , lowercase=0.02 , lowercase=1E-12 , lowercase=0.1 , lowercase="absolute" , lowercase=0 , lowercase=2 , lowercase=3 , **lowercase , ) -> Any: super().__init__(pad_token_id=lowercase , bos_token_id=lowercase , eos_token_id=lowercase , **lowercase ) __UpperCamelCase = vocab_size __UpperCamelCase = embedding_size __UpperCamelCase = hidden_size __UpperCamelCase = num_hidden_layers __UpperCamelCase = num_hidden_groups __UpperCamelCase = num_attention_heads __UpperCamelCase = inner_group_num __UpperCamelCase = hidden_act __UpperCamelCase = intermediate_size __UpperCamelCase = hidden_dropout_prob __UpperCamelCase = attention_probs_dropout_prob __UpperCamelCase = max_position_embeddings __UpperCamelCase = type_vocab_size __UpperCamelCase = initializer_range __UpperCamelCase = layer_norm_eps __UpperCamelCase = classifier_dropout_prob __UpperCamelCase = position_embedding_type class UpperCAmelCase__ ( UpperCAmelCase_): @property def __lowerCamelCase ( self ) -> Mapping[str, Mapping[int, str]]: if self.task == "multiple-choice": __UpperCamelCase = {0: """batch""", 1: """choice""", 2: """sequence"""} else: __UpperCamelCase = {0: """batch""", 1: """sequence"""} return OrderedDict( [ ("""input_ids""", dynamic_axis), ("""attention_mask""", dynamic_axis), ("""token_type_ids""", dynamic_axis), ] )
349
0
"""simple docstring""" from collections import OrderedDict from typing import Any, List, Mapping, Optional from ... import PreTrainedTokenizer, TensorType, is_torch_available from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfigWithPast, PatchingSpec from ...utils import logging lowercase__ = logging.get_logger(__name__) lowercase__ = { 'EleutherAI/gpt-j-6B': 'https://huggingface.co/EleutherAI/gpt-j-6B/resolve/main/config.json', # See all GPT-J models at https://huggingface.co/models?filter=gpt_j } class __lowerCamelCase ( UpperCAmelCase_ ): '''simple docstring''' a_ : Optional[Any] = """gptj""" a_ : Any = { """max_position_embeddings""": """n_positions""", """hidden_size""": """n_embd""", """num_attention_heads""": """n_head""", """num_hidden_layers""": """n_layer""", } def __init__( self : Tuple , a_ : int=5_04_00 , a_ : Union[str, Any]=20_48 , a_ : List[Any]=40_96 , a_ : Any=28 , a_ : str=16 , a_ : Any=64 , a_ : str=None , a_ : Any="gelu_new" , a_ : Optional[Any]=0.0 , a_ : Dict=0.0 , a_ : Tuple=0.0 , a_ : List[str]=1e-5 , a_ : Any=0.02 , a_ : Optional[Any]=True , a_ : Dict=5_02_56 , a_ : Tuple=5_02_56 , a_ : Optional[Any]=False , **a_ : int , ): lowerCAmelCase_ : Tuple = vocab_size lowerCAmelCase_ : Union[str, Any] = n_positions lowerCAmelCase_ : List[Any] = n_embd lowerCAmelCase_ : Union[str, Any] = n_layer lowerCAmelCase_ : List[Any] = n_head lowerCAmelCase_ : List[str] = n_inner lowerCAmelCase_ : Optional[int] = rotary_dim lowerCAmelCase_ : str = activation_function lowerCAmelCase_ : Any = resid_pdrop lowerCAmelCase_ : Optional[Any] = embd_pdrop lowerCAmelCase_ : Tuple = attn_pdrop lowerCAmelCase_ : Tuple = layer_norm_epsilon lowerCAmelCase_ : Dict = initializer_range lowerCAmelCase_ : Optional[int] = use_cache lowerCAmelCase_ : str = bos_token_id lowerCAmelCase_ : Optional[Any] = eos_token_id super().__init__( bos_token_id=a_ , eos_token_id=a_ , tie_word_embeddings=a_ , **a_ ) class __lowerCamelCase ( UpperCAmelCase_ ): '''simple docstring''' def __init__( self : Union[str, Any] , a_ : Optional[int] , a_ : int = "default" , a_ : int = None , a_ : List[str] = False , ): super().__init__(a_ , task=a_ , patching_specs=a_ , use_past=a_ ) if not getattr(self._config , "pad_token_id" , a_ ): # TODO: how to do that better? lowerCAmelCase_ : Tuple = 0 @property def lowerCamelCase ( self : str ): lowerCAmelCase_ : str = OrderedDict({"input_ids": {0: "batch", 1: "sequence"}} ) if self.use_past: self.fill_with_past_key_values_(a_ , direction="inputs" ) lowerCAmelCase_ : Optional[Any] = {0: "batch", 1: "past_sequence + sequence"} else: lowerCAmelCase_ : Optional[Any] = {0: "batch", 1: "sequence"} return common_inputs @property def lowerCamelCase ( self : str ): return self._config.n_layer @property def lowerCamelCase ( self : Dict ): return self._config.n_head def lowerCamelCase ( self : Any , a_ : int , a_ : List[str] = -1 , a_ : Optional[int] = -1 , a_ : Optional[int] = False , a_ : Dict = None , ): lowerCAmelCase_ : Union[str, Any] = super(a_ , self ).generate_dummy_inputs( a_ , batch_size=a_ , seq_length=a_ , is_pair=a_ , framework=a_ ) # We need to order the input in the way they appears in the forward() lowerCAmelCase_ : Dict = OrderedDict({"input_ids": common_inputs["input_ids"]} ) # Need to add the past_keys if self.use_past: if not is_torch_available(): raise ValueError("Cannot generate dummy past_keys inputs without PyTorch installed." ) else: import torch lowerCAmelCase_ , lowerCAmelCase_ : Dict = common_inputs["input_ids"].shape # Not using the same length for past_key_values lowerCAmelCase_ : str = seqlen + 2 lowerCAmelCase_ : Tuple = ( batch, self.num_attention_heads, past_key_values_length, self._config.hidden_size // self.num_attention_heads, ) lowerCAmelCase_ : int = [ (torch.zeros(a_ ), torch.zeros(a_ )) for _ in range(self.num_layers ) ] lowerCAmelCase_ : Dict = common_inputs["attention_mask"] if self.use_past: lowerCAmelCase_ : List[str] = ordered_inputs["attention_mask"].dtype lowerCAmelCase_ : Union[str, Any] = torch.cat( [ordered_inputs["attention_mask"], torch.ones(a_ , a_ , dtype=a_ )] , dim=1 ) return ordered_inputs @property def lowerCamelCase ( self : Optional[Any] ): return 13
241
'''simple docstring''' import numpy as np from sklearn.datasets import fetch_california_housing from sklearn.metrics import mean_absolute_error, mean_squared_error from sklearn.model_selection import train_test_split from xgboost import XGBRegressor def _lowercase ( __A ): '''simple docstring''' return (data["data"], data["target"]) def _lowercase ( __A ,__A ,__A ): '''simple docstring''' __UpperCamelCase = XGBRegressor(verbosity=0 ,random_state=42 ) xgb.fit(__A ,__A ) # Predict target for test data __UpperCamelCase = xgb.predict(__A ) __UpperCamelCase = predictions.reshape(len(__A ) ,1 ) return predictions def _lowercase ( ): '''simple docstring''' __UpperCamelCase = fetch_california_housing() __UpperCamelCase , __UpperCamelCase = data_handling(__A ) __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = train_test_split( __A ,__A ,test_size=0.25 ,random_state=1 ) __UpperCamelCase = xgboost(__A ,__A ,__A ) # Error printing print(f"Mean Absolute Error : {mean_absolute_error(__A ,__A )}" ) print(f"Mean Square Error : {mean_squared_error(__A ,__A )}" ) if __name__ == "__main__": import doctest doctest.testmod(verbose=True) main()
349
0
from typing import Dict from transformers import EvalPrediction, HfArgumentParser, TrainingArguments, is_torch_available from transformers.testing_utils import ( TestCasePlus, execute_subprocess_async, get_torch_dist_unique_port, require_torch_multi_gpu, require_torch_neuroncore, ) from transformers.training_args import ParallelMode from transformers.utils import logging _UpperCamelCase = logging.get_logger(__name__) if is_torch_available(): import torch from torch import nn from torch.utils.data import Dataset from transformers import Trainer class _lowerCamelCase ( UpperCAmelCase_ ): """simple docstring""" def __init__( self , UpperCAmelCase = 101 ) -> Optional[Any]: '''simple docstring''' __snake_case : str = length def __len__( self ) -> int: '''simple docstring''' return self.length def __getitem__( self , UpperCAmelCase ) -> int: '''simple docstring''' return i class _lowerCamelCase : """simple docstring""" def __call__( self , UpperCAmelCase ) -> Any: '''simple docstring''' return {"input_ids": torch.tensor(UpperCAmelCase ), "labels": torch.tensor(UpperCAmelCase )} class _lowerCamelCase ( nn.Module ): """simple docstring""" def __init__( self ) -> Any: '''simple docstring''' super().__init__() # Add some (unused) params otherwise DDP will complain. __snake_case : Dict = nn.Linear(120 , 80 ) def UpperCAmelCase ( self , UpperCAmelCase , UpperCAmelCase=None ) -> Dict: '''simple docstring''' if labels is not None: return torch.tensor(0.0 , device=input_ids.device ), input_ids else: return input_ids class _lowerCamelCase ( UpperCAmelCase_ ): """simple docstring""" @require_torch_neuroncore def UpperCAmelCase ( self ) -> int: '''simple docstring''' __snake_case : str = F"""--nproc_per_node=2\n --master_port={get_torch_dist_unique_port()}\n {self.test_file_dir}/test_trainer_distributed.py\n """.split() __snake_case : Union[str, Any] = self.get_auto_remove_tmp_dir() __snake_case : int = F"""--output_dir {output_dir}""".split() __snake_case : Dict = ["torchrun"] + distributed_args + args execute_subprocess_async(UpperCAmelCase , env=self.get_env() ) # successful return here == success - any errors would have caused an error in the sub-call class _lowerCamelCase ( UpperCAmelCase_ ): """simple docstring""" @require_torch_multi_gpu def UpperCAmelCase ( self ) -> Tuple: '''simple docstring''' __snake_case : Union[str, Any] = F"""--nproc_per_node={torch.cuda.device_count()}\n --master_port={get_torch_dist_unique_port()}\n {self.test_file_dir}/test_trainer_distributed.py\n """.split() __snake_case : List[Any] = self.get_auto_remove_tmp_dir() __snake_case : Tuple = F"""--output_dir {output_dir}""".split() __snake_case : List[str] = ["torchrun"] + distributed_args + args execute_subprocess_async(UpperCAmelCase , env=self.get_env() ) # successful return here == success - any errors would have caused an error in the sub-call if __name__ == "__main__": # The script below is meant to be run under torch.distributed, on a machine with multiple GPUs: # # PYTHONPATH="src" python -m torch.distributed.run --nproc_per_node 2 --output_dir output_dir ./tests/test_trainer_distributed.py _UpperCamelCase = HfArgumentParser((TrainingArguments,)) _UpperCamelCase = parser.parse_args_into_dataclasses()[0] logger.warning( F'''Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}, ''' F'''distributed training: {training_args.parallel_mode != ParallelMode.NOT_DISTRIBUTED}''' ) # Essentially, what we want to verify in the distributed case is that we get all samples back, # in the right order. (this is crucial for prediction for instance) for dataset_length in [101, 40, 7]: _UpperCamelCase = DummyDataset(dataset_length) def lowerCAmelCase__( lowercase : Tuple ) -> Dict: __snake_case : List[Any] = list(range(len(__A ) ) ) __snake_case : List[Any] = p.predictions.tolist() == sequential and p.label_ids.tolist() == sequential if not success and training_args.local_rank == 0: logger.warning( "Predictions and/or labels do not match expected results:\n - predictions: " f"""{p.predictions.tolist()}\n - labels: {p.label_ids.tolist()}\n - expected: {sequential}""" ) return {"success": success} _UpperCamelCase = Trainer( model=DummyModel(), args=training_args, data_collator=DummyDataCollator(), eval_dataset=dataset, compute_metrics=compute_metrics, ) _UpperCamelCase = trainer.evaluate() logger.info(metrics) if metrics["eval_success"] is not True: logger.error(metrics) exit(1) _UpperCamelCase = trainer.predict(dataset) logger.info(p.metrics) if p.metrics["test_success"] is not True: logger.error(p.metrics) exit(1) _UpperCamelCase = 2 _UpperCamelCase = trainer.evaluate() logger.info(metrics) if metrics["eval_success"] is not True: logger.error(metrics) exit(1) _UpperCamelCase = trainer.predict(dataset) logger.info(p.metrics) if p.metrics["test_success"] is not True: logger.error(p.metrics) exit(1) _UpperCamelCase = None
326
'''simple docstring''' from __future__ import annotations import unittest from transformers import AutoTokenizer, PegasusConfig, is_tf_available from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow from transformers.utils import cached_property from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import TFAutoModelForSeqaSeqLM, TFPegasusForConditionalGeneration, TFPegasusModel @require_tf class UpperCAmelCase__ : __SCREAMING_SNAKE_CASE = PegasusConfig __SCREAMING_SNAKE_CASE = {} __SCREAMING_SNAKE_CASE = '''gelu''' def __init__( self , lowercase , lowercase=1_3 , lowercase=7 , lowercase=True , lowercase=False , lowercase=9_9 , lowercase=3_2 , lowercase=2 , lowercase=4 , lowercase=3_7 , lowercase=0.1 , lowercase=0.1 , lowercase=4_0 , lowercase=2 , lowercase=1 , lowercase=0 , ) -> Any: __UpperCamelCase = parent __UpperCamelCase = batch_size __UpperCamelCase = seq_length __UpperCamelCase = is_training __UpperCamelCase = use_labels __UpperCamelCase = vocab_size __UpperCamelCase = hidden_size __UpperCamelCase = num_hidden_layers __UpperCamelCase = num_attention_heads __UpperCamelCase = intermediate_size __UpperCamelCase = hidden_dropout_prob __UpperCamelCase = attention_probs_dropout_prob __UpperCamelCase = max_position_embeddings __UpperCamelCase = eos_token_id __UpperCamelCase = pad_token_id __UpperCamelCase = bos_token_id def __lowerCamelCase ( self ) -> Dict: __UpperCamelCase = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size ) __UpperCamelCase = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 ) __UpperCamelCase = tf.concat([input_ids, eos_tensor] , axis=1 ) __UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) __UpperCamelCase = self.config_cls( vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , ) __UpperCamelCase = prepare_pegasus_inputs_dict(lowercase , lowercase , lowercase ) return config, inputs_dict def __lowerCamelCase ( self , lowercase , lowercase ) -> Union[str, Any]: __UpperCamelCase = TFPegasusModel(config=lowercase ).get_decoder() __UpperCamelCase = inputs_dict["""input_ids"""] __UpperCamelCase = input_ids[:1, :] __UpperCamelCase = inputs_dict["""attention_mask"""][:1, :] __UpperCamelCase = inputs_dict["""head_mask"""] __UpperCamelCase = 1 # first forward pass __UpperCamelCase = model(lowercase , attention_mask=lowercase , head_mask=lowercase , use_cache=lowercase ) __UpperCamelCase , __UpperCamelCase = outputs.to_tuple() # create hypothetical next token and extent to next_input_ids __UpperCamelCase = ids_tensor((self.batch_size, 3) , config.vocab_size ) __UpperCamelCase = tf.cast(ids_tensor((self.batch_size, 3) , 2 ) , tf.inta ) # append to next input_ids and __UpperCamelCase = tf.concat([input_ids, next_tokens] , axis=-1 ) __UpperCamelCase = tf.concat([attention_mask, next_attn_mask] , axis=-1 ) __UpperCamelCase = model(lowercase , attention_mask=lowercase )[0] __UpperCamelCase = model(lowercase , attention_mask=lowercase , past_key_values=lowercase )[0] self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1] ) # select random slice __UpperCamelCase = int(ids_tensor((1,) , output_from_past.shape[-1] ) ) __UpperCamelCase = output_from_no_past[:, -3:, random_slice_idx] __UpperCamelCase = output_from_past[:, :, random_slice_idx] # test that outputs are equal for slice tf.debugging.assert_near(lowercase , lowercase , rtol=1E-3 ) def _lowercase ( __A ,__A ,__A ,__A=None ,__A=None ,__A=None ,__A=None ,__A=None ,): '''simple docstring''' if attention_mask is None: __UpperCamelCase = tf.cast(tf.math.not_equal(__A ,config.pad_token_id ) ,tf.inta ) if decoder_attention_mask is None: __UpperCamelCase = tf.concat( [ tf.ones(decoder_input_ids[:, :1].shape ,dtype=tf.inta ), tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] ,config.pad_token_id ) ,tf.inta ), ] ,axis=-1 ,) if head_mask is None: __UpperCamelCase = tf.ones((config.encoder_layers, config.encoder_attention_heads) ) if decoder_head_mask is None: __UpperCamelCase = tf.ones((config.decoder_layers, config.decoder_attention_heads) ) if cross_attn_head_mask is None: __UpperCamelCase = tf.ones((config.decoder_layers, config.decoder_attention_heads) ) return { "input_ids": input_ids, "decoder_input_ids": decoder_input_ids, "attention_mask": attention_mask, "decoder_attention_mask": decoder_attention_mask, "head_mask": head_mask, "decoder_head_mask": decoder_head_mask, "cross_attn_head_mask": cross_attn_head_mask, } @require_tf class UpperCAmelCase__ ( UpperCAmelCase_ , UpperCAmelCase_ , unittest.TestCase): __SCREAMING_SNAKE_CASE = (TFPegasusForConditionalGeneration, TFPegasusModel) if is_tf_available() else () __SCREAMING_SNAKE_CASE = (TFPegasusForConditionalGeneration,) if is_tf_available() else () __SCREAMING_SNAKE_CASE = ( { '''conversational''': TFPegasusForConditionalGeneration, '''feature-extraction''': TFPegasusModel, '''summarization''': TFPegasusForConditionalGeneration, '''text2text-generation''': TFPegasusForConditionalGeneration, '''translation''': TFPegasusForConditionalGeneration, } if is_tf_available() else {} ) __SCREAMING_SNAKE_CASE = True __SCREAMING_SNAKE_CASE = False __SCREAMING_SNAKE_CASE = False def __lowerCamelCase ( self ) -> str: __UpperCamelCase = TFPegasusModelTester(self ) __UpperCamelCase = ConfigTester(self , config_class=lowercase ) def __lowerCamelCase ( self ) -> str: self.config_tester.run_common_tests() def __lowerCamelCase ( self ) -> Tuple: __UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common() self.model_tester.check_decoder_model_past_large_inputs(*lowercase ) @require_sentencepiece @require_tokenizers @require_tf class UpperCAmelCase__ ( unittest.TestCase): __SCREAMING_SNAKE_CASE = [ ''' PG&E stated it scheduled the blackouts in response to forecasts for high winds amid dry conditions. The aim is to reduce the risk of wildfires. Nearly 800 thousand customers were scheduled to be affected by the shutoffs which were expected to last through at least midday tomorrow.''', ''' The London trio are up for best UK act and best album, as well as getting two nominations in the best song category."We got told like this morning \'Oh I think you\'re nominated\'", said Dappy."And I was like \'Oh yeah, which one?\' And now we\'ve got nominated for four awards. I mean, wow!"Bandmate Fazer added: "We thought it\'s best of us to come down and mingle with everyone and say hello to the cameras. And now we find we\'ve got four nominations."The band have two shots at the best song prize, getting the nod for their Tynchy Stryder collaboration Number One, and single Strong Again.Their album Uncle B will also go up against records by the likes of Beyonce and Kanye West.N-Dubz picked up the best newcomer Mobo in 2007, but female member Tulisa said they wouldn\'t be too disappointed if they didn\'t win this time around."At the end of the day we\'re grateful to be where we are in our careers."If it don\'t happen then it don\'t happen - live to fight another day and keep on making albums and hits for the fans."Dappy also revealed they could be performing live several times on the night.The group will be doing Number One and also a possible rendition of the War Child single, I Got Soul.The charity song is a re-working of The Killers\' All These Things That I\'ve Done and is set to feature artists like Chipmunk, Ironik and Pixie Lott.This year\'s Mobos will be held outside of London for the first time, in Glasgow on 30 September.N-Dubz said they were looking forward to performing for their Scottish fans and boasted about their recent shows north of the border."We just done Edinburgh the other day," said Dappy."We smashed up an N-Dubz show over there. We done Aberdeen about three or four months ago - we smashed up that show over there! Everywhere we go we smash it up!" ''', ] __SCREAMING_SNAKE_CASE = [ '''California\'s largest electricity provider has cut power to hundreds of thousands of customers in an effort to''' ''' reduce the risk of wildfires.''', '''N-Dubz have revealed they\'re "grateful" to have been nominated for four Mobo Awards.''', ] # differs slightly from pytorch, likely due to numerical differences in linear layers __SCREAMING_SNAKE_CASE = '''google/pegasus-xsum''' @cached_property def __lowerCamelCase ( self ) -> int: return AutoTokenizer.from_pretrained(self.model_name ) @cached_property def __lowerCamelCase ( self ) -> str: __UpperCamelCase = TFAutoModelForSeqaSeqLM.from_pretrained(self.model_name ) return model def __lowerCamelCase ( self , **lowercase ) -> Optional[int]: __UpperCamelCase = self.translate_src_text(**lowercase ) assert self.expected_text == generated_words def __lowerCamelCase ( self , **lowercase ) -> Optional[Any]: __UpperCamelCase = self.tokenizer(self.src_text , **lowercase , padding=lowercase , return_tensors="""tf""" ) __UpperCamelCase = self.model.generate( model_inputs.input_ids , attention_mask=model_inputs.attention_mask , num_beams=2 , use_cache=lowercase , ) __UpperCamelCase = self.tokenizer.batch_decode(generated_ids.numpy() , skip_special_tokens=lowercase ) return generated_words @slow def __lowerCamelCase ( self ) -> Dict: self._assert_generated_batch_equal_expected()
349
0
"""simple docstring""" from typing import List, Optional, Tuple, Union import torch from ...utils import logging, randn_tensor from ..pipeline_utils import AudioPipelineOutput, DiffusionPipeline a = logging.get_logger(__name__) # pylint: disable=invalid-name class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase_ ): def __init__( self : List[Any] , lowerCAmelCase : Dict , lowerCAmelCase : Tuple ): super().__init__() self.register_modules(unet=lowerCAmelCase , scheduler=lowerCAmelCase ) @torch.no_grad() def __call__( self : int , lowerCAmelCase : Dict = 1 , lowerCAmelCase : Tuple = 100 , lowerCAmelCase : Tuple = None , lowerCAmelCase : List[Any] = None , lowerCAmelCase : int = True , ): if audio_length_in_s is None: lowerCAmelCase = self.unet.config.sample_size / self.unet.config.sample_rate lowerCAmelCase = audio_length_in_s * self.unet.config.sample_rate lowerCAmelCase = 2 ** len(self.unet.up_blocks ) if sample_size < 3 * down_scale_factor: raise ValueError( f'''{audio_length_in_s} is too small. Make sure it\'s bigger or equal to''' f''' {3 * down_scale_factor / self.unet.config.sample_rate}.''' ) lowerCAmelCase = int(lowerCAmelCase ) if sample_size % down_scale_factor != 0: lowerCAmelCase = ( (audio_length_in_s * self.unet.config.sample_rate) // down_scale_factor + 1 ) * down_scale_factor logger.info( f'''{audio_length_in_s} is increased to {sample_size / self.unet.config.sample_rate} so that it can be handled''' f''' by the model. It will be cut to {original_sample_size / self.unet.config.sample_rate} after the denoising''' """ process.""" ) lowerCAmelCase = int(lowerCAmelCase ) lowerCAmelCase = next(iter(self.unet.parameters() ) ).dtype lowerCAmelCase = (batch_size, self.unet.config.in_channels, sample_size) if isinstance(lowerCAmelCase , lowerCAmelCase ) and len(lowerCAmelCase ) != batch_size: raise ValueError( f'''You have passed a list of generators of length {len(lowerCAmelCase )}, but requested an effective batch''' f''' size of {batch_size}. Make sure the batch size matches the length of the generators.''' ) lowerCAmelCase = randn_tensor(lowerCAmelCase , generator=lowerCAmelCase , device=self.device , dtype=lowerCAmelCase ) # set step values self.scheduler.set_timesteps(lowerCAmelCase , device=audio.device ) lowerCAmelCase = self.scheduler.timesteps.to(lowerCAmelCase ) for t in self.progress_bar(self.scheduler.timesteps ): # 1. predict noise model_output lowerCAmelCase = self.unet(lowerCAmelCase , lowerCAmelCase ).sample # 2. compute previous image: x_t -> t_t-1 lowerCAmelCase = self.scheduler.step(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ).prev_sample lowerCAmelCase = audio.clamp(-1 , 1 ).float().cpu().numpy() lowerCAmelCase = audio[:, :, :original_sample_size] if not return_dict: return (audio,) return AudioPipelineOutput(audios=lowerCAmelCase )
155
'''simple docstring''' import string def _lowercase ( __A ): '''simple docstring''' for key in range(len(string.ascii_uppercase ) ): __UpperCamelCase = """""" for symbol in message: if symbol in string.ascii_uppercase: __UpperCamelCase = string.ascii_uppercase.find(__A ) __UpperCamelCase = num - key if num < 0: __UpperCamelCase = num + len(string.ascii_uppercase ) __UpperCamelCase = translated + string.ascii_uppercase[num] else: __UpperCamelCase = translated + symbol print(f"Decryption using Key #{key}: {translated}" ) def _lowercase ( ): '''simple docstring''' __UpperCamelCase = input("""Encrypted message: """ ) __UpperCamelCase = message.upper() decrypt(__A ) if __name__ == "__main__": import doctest doctest.testmod() main()
349
0
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available lowerCamelCase_ = { 'configuration_conditional_detr': [ 'CONDITIONAL_DETR_PRETRAINED_CONFIG_ARCHIVE_MAP', 'ConditionalDetrConfig', 'ConditionalDetrOnnxConfig', ] } try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCamelCase_ = ['ConditionalDetrFeatureExtractor'] lowerCamelCase_ = ['ConditionalDetrImageProcessor'] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCamelCase_ = [ 'CONDITIONAL_DETR_PRETRAINED_MODEL_ARCHIVE_LIST', 'ConditionalDetrForObjectDetection', 'ConditionalDetrForSegmentation', 'ConditionalDetrModel', 'ConditionalDetrPreTrainedModel', ] if TYPE_CHECKING: from .configuration_conditional_detr import ( CONDITIONAL_DETR_PRETRAINED_CONFIG_ARCHIVE_MAP, ConditionalDetrConfig, ConditionalDetrOnnxConfig, ) try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .feature_extraction_conditional_detr import ConditionalDetrFeatureExtractor from .image_processing_conditional_detr import ConditionalDetrImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_conditional_detr import ( CONDITIONAL_DETR_PRETRAINED_MODEL_ARCHIVE_LIST, ConditionalDetrForObjectDetection, ConditionalDetrForSegmentation, ConditionalDetrModel, ConditionalDetrPreTrainedModel, ) else: import sys lowerCamelCase_ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
268
'''simple docstring''' from collections import OrderedDict from typing import Any, List, Mapping, Optional from ... import PreTrainedTokenizer, TensorType, is_torch_available from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfigWithPast, PatchingSpec from ...utils import logging a__ : Optional[Any] = logging.get_logger(__name__) a__ : Dict = { 'EleutherAI/gpt-j-6B': 'https://huggingface.co/EleutherAI/gpt-j-6B/resolve/main/config.json', # See all GPT-J models at https://huggingface.co/models?filter=gpt_j } class UpperCAmelCase__ ( UpperCAmelCase_): __SCREAMING_SNAKE_CASE = '''gptj''' __SCREAMING_SNAKE_CASE = { '''max_position_embeddings''': '''n_positions''', '''hidden_size''': '''n_embd''', '''num_attention_heads''': '''n_head''', '''num_hidden_layers''': '''n_layer''', } def __init__( self , lowercase=5_0_4_0_0 , lowercase=2_0_4_8 , lowercase=4_0_9_6 , lowercase=2_8 , lowercase=1_6 , lowercase=6_4 , lowercase=None , lowercase="gelu_new" , lowercase=0.0 , lowercase=0.0 , lowercase=0.0 , lowercase=1E-5 , lowercase=0.02 , lowercase=True , lowercase=5_0_2_5_6 , lowercase=5_0_2_5_6 , lowercase=False , **lowercase , ) -> Tuple: __UpperCamelCase = vocab_size __UpperCamelCase = n_positions __UpperCamelCase = n_embd __UpperCamelCase = n_layer __UpperCamelCase = n_head __UpperCamelCase = n_inner __UpperCamelCase = rotary_dim __UpperCamelCase = activation_function __UpperCamelCase = resid_pdrop __UpperCamelCase = embd_pdrop __UpperCamelCase = attn_pdrop __UpperCamelCase = layer_norm_epsilon __UpperCamelCase = initializer_range __UpperCamelCase = use_cache __UpperCamelCase = bos_token_id __UpperCamelCase = eos_token_id super().__init__( bos_token_id=lowercase , eos_token_id=lowercase , tie_word_embeddings=lowercase , **lowercase ) class UpperCAmelCase__ ( UpperCAmelCase_): def __init__( self , lowercase , lowercase = "default" , lowercase = None , lowercase = False , ) -> List[str]: super().__init__(lowercase , task=lowercase , patching_specs=lowercase , use_past=lowercase ) if not getattr(self._config , """pad_token_id""" , lowercase ): # TODO: how to do that better? __UpperCamelCase = 0 @property def __lowerCamelCase ( self ) -> Mapping[str, Mapping[int, str]]: __UpperCamelCase = OrderedDict({"""input_ids""": {0: """batch""", 1: """sequence"""}} ) if self.use_past: self.fill_with_past_key_values_(lowercase , direction="""inputs""" ) __UpperCamelCase = {0: """batch""", 1: """past_sequence + sequence"""} else: __UpperCamelCase = {0: """batch""", 1: """sequence"""} return common_inputs @property def __lowerCamelCase ( self ) -> int: return self._config.n_layer @property def __lowerCamelCase ( self ) -> int: return self._config.n_head def __lowerCamelCase ( self , lowercase , lowercase = -1 , lowercase = -1 , lowercase = False , lowercase = None , ) -> Mapping[str, Any]: __UpperCamelCase = super(lowercase , self ).generate_dummy_inputs( lowercase , batch_size=lowercase , seq_length=lowercase , is_pair=lowercase , framework=lowercase ) # We need to order the input in the way they appears in the forward() __UpperCamelCase = OrderedDict({"""input_ids""": common_inputs["""input_ids"""]} ) # Need to add the past_keys if self.use_past: if not is_torch_available(): raise ValueError("""Cannot generate dummy past_keys inputs without PyTorch installed.""" ) else: import torch __UpperCamelCase , __UpperCamelCase = common_inputs["""input_ids"""].shape # Not using the same length for past_key_values __UpperCamelCase = seqlen + 2 __UpperCamelCase = ( batch, self.num_attention_heads, past_key_values_length, self._config.hidden_size // self.num_attention_heads, ) __UpperCamelCase = [ (torch.zeros(lowercase ), torch.zeros(lowercase )) for _ in range(self.num_layers ) ] __UpperCamelCase = common_inputs["""attention_mask"""] if self.use_past: __UpperCamelCase = ordered_inputs["""attention_mask"""].dtype __UpperCamelCase = torch.cat( [ordered_inputs["""attention_mask"""], torch.ones(lowercase , lowercase , dtype=lowercase )] , dim=1 ) return ordered_inputs @property def __lowerCamelCase ( self ) -> int: return 1_3
349
0
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available SCREAMING_SNAKE_CASE_:str = { 'configuration_lilt': ['LILT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'LiltConfig'], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: SCREAMING_SNAKE_CASE_:Union[str, Any] = [ 'LILT_PRETRAINED_MODEL_ARCHIVE_LIST', 'LiltForQuestionAnswering', 'LiltForSequenceClassification', 'LiltForTokenClassification', 'LiltModel', 'LiltPreTrainedModel', ] if TYPE_CHECKING: from .configuration_lilt import LILT_PRETRAINED_CONFIG_ARCHIVE_MAP, LiltConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_lilt import ( LILT_PRETRAINED_MODEL_ARCHIVE_LIST, LiltForQuestionAnswering, LiltForSequenceClassification, LiltForTokenClassification, LiltModel, LiltPreTrainedModel, ) else: import sys SCREAMING_SNAKE_CASE_:Optional[int] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
116
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_tokenizers_available, is_torch_available, is_vision_available, ) a__ : int = { 'configuration_layoutlmv3': [ 'LAYOUTLMV3_PRETRAINED_CONFIG_ARCHIVE_MAP', 'LayoutLMv3Config', 'LayoutLMv3OnnxConfig', ], 'processing_layoutlmv3': ['LayoutLMv3Processor'], 'tokenization_layoutlmv3': ['LayoutLMv3Tokenizer'], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a__ : Dict = ['LayoutLMv3TokenizerFast'] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a__ : Any = [ 'LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST', 'LayoutLMv3ForQuestionAnswering', 'LayoutLMv3ForSequenceClassification', 'LayoutLMv3ForTokenClassification', 'LayoutLMv3Model', 'LayoutLMv3PreTrainedModel', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a__ : str = [ 'TF_LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST', 'TFLayoutLMv3ForQuestionAnswering', 'TFLayoutLMv3ForSequenceClassification', 'TFLayoutLMv3ForTokenClassification', 'TFLayoutLMv3Model', 'TFLayoutLMv3PreTrainedModel', ] try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a__ : List[Any] = ['LayoutLMv3FeatureExtractor'] a__ : str = ['LayoutLMv3ImageProcessor'] if TYPE_CHECKING: from .configuration_layoutlmva import ( LAYOUTLMV3_PRETRAINED_CONFIG_ARCHIVE_MAP, LayoutLMvaConfig, LayoutLMvaOnnxConfig, ) from .processing_layoutlmva import LayoutLMvaProcessor from .tokenization_layoutlmva import LayoutLMvaTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_layoutlmva_fast import LayoutLMvaTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_layoutlmva import ( LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST, LayoutLMvaForQuestionAnswering, LayoutLMvaForSequenceClassification, LayoutLMvaForTokenClassification, LayoutLMvaModel, LayoutLMvaPreTrainedModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_layoutlmva import ( TF_LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST, TFLayoutLMvaForQuestionAnswering, TFLayoutLMvaForSequenceClassification, TFLayoutLMvaForTokenClassification, TFLayoutLMvaModel, TFLayoutLMvaPreTrainedModel, ) try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .feature_extraction_layoutlmva import LayoutLMvaFeatureExtractor from .image_processing_layoutlmva import LayoutLMvaImageProcessor else: import sys a__ : List[str] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
349
0
from __future__ import annotations def UpperCamelCase__ ( A__ ) -> str: if len(__A ) == 0: return array snake_case__ , snake_case__ : Dict = min(__A ), max(__A ) # Compute the variables snake_case__ : Optional[int] = _max - _min + 1 snake_case__ , snake_case__ : int = [0] * holes_range, [0] * holes_range # Make the sorting. for i in array: snake_case__ : Optional[Any] = i - _min snake_case__ : Tuple = i holes_repeat[index] += 1 # Makes the array back by replacing the numbers. snake_case__ : List[Any] = 0 for i in range(__A ): while holes_repeat[i] > 0: snake_case__ : Any = holes[i] index += 1 holes_repeat[i] -= 1 # Returns the sorted array. return array if __name__ == "__main__": import doctest doctest.testmod() lowerCAmelCase__ : Optional[Any] = input('''Enter numbers separated by comma:\n''') lowerCAmelCase__ : List[str] = [int(x) for x in user_input.split(''',''')] print(pigeon_sort(unsorted))
143
'''simple docstring''' def _lowercase ( __A ,__A ): '''simple docstring''' __UpperCamelCase = len(__A ) __UpperCamelCase = [[False] * (required_sum + 1) for _ in range(arr_len + 1 )] # for each arr value, a sum of zero(0) can be formed by not taking any element # hence True/1 for i in range(arr_len + 1 ): __UpperCamelCase = True # sum is not zero and set is empty then false for i in range(1 ,required_sum + 1 ): __UpperCamelCase = False for i in range(1 ,arr_len + 1 ): for j in range(1 ,required_sum + 1 ): if arr[i - 1] > j: __UpperCamelCase = subset[i - 1][j] if arr[i - 1] <= j: __UpperCamelCase = subset[i - 1][j] or subset[i - 1][j - arr[i - 1]] return subset[arr_len][required_sum] if __name__ == "__main__": import doctest doctest.testmod()
349
0
def __lowerCamelCase ( UpperCAmelCase_ : List[str] , UpperCAmelCase_ : Dict , UpperCAmelCase_ : Any = 0 , UpperCAmelCase_ : Optional[int] = 0 ): """simple docstring""" a :Union[str, Any] = right or len(__A ) - 1 if left > right: return -1 elif list_data[left] == key: return left elif list_data[right] == key: return right else: return search(__A , __A , left + 1 , right - 1 ) if __name__ == "__main__": import doctest doctest.testmod()
94
'''simple docstring''' import bza import gzip import lzma import os import shutil import struct import tarfile import warnings import zipfile from abc import ABC, abstractmethod from pathlib import Path from typing import Dict, List, Optional, Type, Union from .. import config from .filelock import FileLock from .logging import get_logger a__ : Any = get_logger(__name__) class UpperCAmelCase__ : def __init__( self , lowercase = None ) -> List[str]: __UpperCamelCase = ( os.path.join(lowercase , config.EXTRACTED_DATASETS_DIR ) if cache_dir else config.EXTRACTED_DATASETS_PATH ) __UpperCamelCase = Extractor def __lowerCamelCase ( self , lowercase ) -> str: from .file_utils import hash_url_to_filename # Path where we extract compressed archives # We extract in the cache dir, and get the extracted path name by hashing the original path" __UpperCamelCase = os.path.abspath(lowercase ) return os.path.join(self.extract_dir , hash_url_to_filename(lowercase ) ) def __lowerCamelCase ( self , lowercase , lowercase ) -> bool: return force_extract or ( not os.path.isfile(lowercase ) and not (os.path.isdir(lowercase ) and os.listdir(lowercase )) ) def __lowerCamelCase ( self , lowercase , lowercase = False ) -> str: __UpperCamelCase = self.extractor.infer_extractor_format(lowercase ) if not extractor_format: return input_path __UpperCamelCase = self._get_output_path(lowercase ) if self._do_extract(lowercase , lowercase ): self.extractor.extract(lowercase , lowercase , lowercase ) return output_path class UpperCAmelCase__ ( UpperCAmelCase_): @classmethod @abstractmethod def __lowerCamelCase ( cls , lowercase , **lowercase ) -> bool: ... @staticmethod @abstractmethod def __lowerCamelCase ( lowercase , lowercase ) -> None: ... class UpperCAmelCase__ ( UpperCAmelCase_ , UpperCAmelCase_): __SCREAMING_SNAKE_CASE = [] @staticmethod def __lowerCamelCase ( lowercase , lowercase ) -> int: with open(lowercase , """rb""" ) as f: return f.read(lowercase ) @classmethod def __lowerCamelCase ( cls , lowercase , lowercase = b"" ) -> bool: if not magic_number: __UpperCamelCase = max(len(lowercase ) for cls_magic_number in cls.magic_numbers ) try: __UpperCamelCase = cls.read_magic_number(lowercase , lowercase ) except OSError: return False return any(magic_number.startswith(lowercase ) for cls_magic_number in cls.magic_numbers ) class UpperCAmelCase__ ( UpperCAmelCase_): @classmethod def __lowerCamelCase ( cls , lowercase , **lowercase ) -> bool: return tarfile.is_tarfile(lowercase ) @staticmethod def __lowerCamelCase ( lowercase , lowercase ) -> str: def resolved(lowercase ) -> str: return os.path.realpath(os.path.abspath(lowercase ) ) def badpath(lowercase , lowercase ) -> bool: # joinpath will ignore base if path is absolute return not resolved(os.path.join(lowercase , lowercase ) ).startswith(lowercase ) def badlink(lowercase , lowercase ) -> bool: # Links are interpreted relative to the directory containing the link __UpperCamelCase = resolved(os.path.join(lowercase , os.path.dirname(info.name ) ) ) return badpath(info.linkname , base=lowercase ) __UpperCamelCase = resolved(lowercase ) for finfo in members: if badpath(finfo.name , lowercase ): logger.error(f"Extraction of {finfo.name} is blocked (illegal path)" ) elif finfo.issym() and badlink(lowercase , lowercase ): logger.error(f"Extraction of {finfo.name} is blocked: Symlink to {finfo.linkname}" ) elif finfo.islnk() and badlink(lowercase , lowercase ): logger.error(f"Extraction of {finfo.name} is blocked: Hard link to {finfo.linkname}" ) else: yield finfo @staticmethod def __lowerCamelCase ( lowercase , lowercase ) -> None: os.makedirs(lowercase , exist_ok=lowercase ) __UpperCamelCase = tarfile.open(lowercase ) tar_file.extractall(lowercase , members=TarExtractor.safemembers(lowercase , lowercase ) ) tar_file.close() class UpperCAmelCase__ ( UpperCAmelCase_): __SCREAMING_SNAKE_CASE = [B'''\x1F\x8B'''] @staticmethod def __lowerCamelCase ( lowercase , lowercase ) -> None: with gzip.open(lowercase , """rb""" ) as gzip_file: with open(lowercase , """wb""" ) as extracted_file: shutil.copyfileobj(lowercase , lowercase ) class UpperCAmelCase__ ( UpperCAmelCase_): __SCREAMING_SNAKE_CASE = [ B'''PK\x03\x04''', B'''PK\x05\x06''', # empty archive B'''PK\x07\x08''', # spanned archive ] @classmethod def __lowerCamelCase ( cls , lowercase , lowercase = b"" ) -> bool: if super().is_extractable(lowercase , magic_number=lowercase ): return True try: # Alternative version of zipfile.is_zipfile that has less false positives, but misses executable zip archives. # From: https://github.com/python/cpython/pull/5053 from zipfile import ( _CD_SIGNATURE, _ECD_DISK_NUMBER, _ECD_DISK_START, _ECD_ENTRIES_TOTAL, _ECD_OFFSET, _ECD_SIZE, _EndRecData, sizeCentralDir, stringCentralDir, structCentralDir, ) with open(lowercase , """rb""" ) as fp: __UpperCamelCase = _EndRecData(lowercase ) if endrec: if endrec[_ECD_ENTRIES_TOTAL] == 0 and endrec[_ECD_SIZE] == 0 and endrec[_ECD_OFFSET] == 0: return True # Empty zipfiles are still zipfiles elif endrec[_ECD_DISK_NUMBER] == endrec[_ECD_DISK_START]: fp.seek(endrec[_ECD_OFFSET] ) # Central directory is on the same disk if fp.tell() == endrec[_ECD_OFFSET] and endrec[_ECD_SIZE] >= sizeCentralDir: __UpperCamelCase = fp.read(lowercase ) # CD is where we expect it to be if len(lowercase ) == sizeCentralDir: __UpperCamelCase = struct.unpack(lowercase , lowercase ) # CD is the right size if centdir[_CD_SIGNATURE] == stringCentralDir: return True # First central directory entry has correct magic number return False except Exception: # catch all errors in case future python versions change the zipfile internals return False @staticmethod def __lowerCamelCase ( lowercase , lowercase ) -> None: os.makedirs(lowercase , exist_ok=lowercase ) with zipfile.ZipFile(lowercase , """r""" ) as zip_file: zip_file.extractall(lowercase ) zip_file.close() class UpperCAmelCase__ ( UpperCAmelCase_): __SCREAMING_SNAKE_CASE = [B'''\xFD\x37\x7A\x58\x5A\x00'''] @staticmethod def __lowerCamelCase ( lowercase , lowercase ) -> None: with lzma.open(lowercase ) as compressed_file: with open(lowercase , """wb""" ) as extracted_file: shutil.copyfileobj(lowercase , lowercase ) class UpperCAmelCase__ ( UpperCAmelCase_): __SCREAMING_SNAKE_CASE = [B'''Rar!\x1a\x07\x00''', B'''Rar!\x1a\x07\x01\x00'''] # RAR_ID # RAR5_ID @staticmethod def __lowerCamelCase ( lowercase , lowercase ) -> None: if not config.RARFILE_AVAILABLE: raise ImportError("""Please pip install rarfile""" ) import rarfile os.makedirs(lowercase , exist_ok=lowercase ) __UpperCamelCase = rarfile.RarFile(lowercase ) rf.extractall(lowercase ) rf.close() class UpperCAmelCase__ ( UpperCAmelCase_): __SCREAMING_SNAKE_CASE = [B'''\x28\xb5\x2F\xFD'''] @staticmethod def __lowerCamelCase ( lowercase , lowercase ) -> None: if not config.ZSTANDARD_AVAILABLE: raise ImportError("""Please pip install zstandard""" ) import zstandard as zstd __UpperCamelCase = zstd.ZstdDecompressor() with open(lowercase , """rb""" ) as ifh, open(lowercase , """wb""" ) as ofh: dctx.copy_stream(lowercase , lowercase ) class UpperCAmelCase__ ( UpperCAmelCase_): __SCREAMING_SNAKE_CASE = [B'''\x42\x5A\x68'''] @staticmethod def __lowerCamelCase ( lowercase , lowercase ) -> None: with bza.open(lowercase , """rb""" ) as compressed_file: with open(lowercase , """wb""" ) as extracted_file: shutil.copyfileobj(lowercase , lowercase ) class UpperCAmelCase__ ( UpperCAmelCase_): __SCREAMING_SNAKE_CASE = [B'''\x37\x7A\xBC\xAF\x27\x1C'''] @staticmethod def __lowerCamelCase ( lowercase , lowercase ) -> None: if not config.PY7ZR_AVAILABLE: raise ImportError("""Please pip install py7zr""" ) import pyazr os.makedirs(lowercase , exist_ok=lowercase ) with pyazr.SevenZipFile(lowercase , """r""" ) as archive: archive.extractall(lowercase ) class UpperCAmelCase__ ( UpperCAmelCase_): __SCREAMING_SNAKE_CASE = [B'''\x04\x22\x4D\x18'''] @staticmethod def __lowerCamelCase ( lowercase , lowercase ) -> None: if not config.LZ4_AVAILABLE: raise ImportError("""Please pip install lz4""" ) import lza.frame with lza.frame.open(lowercase , """rb""" ) as compressed_file: with open(lowercase , """wb""" ) as extracted_file: shutil.copyfileobj(lowercase , lowercase ) class UpperCAmelCase__ : # Put zip file to the last, b/c it is possible wrongly detected as zip (I guess it means: as tar or gzip) __SCREAMING_SNAKE_CASE = { "tar": TarExtractor, "gzip": GzipExtractor, "zip": ZipExtractor, "xz": XzExtractor, "rar": RarExtractor, "zstd": ZstdExtractor, "bz2": BzipaExtractor, "7z": SevenZipExtractor, # <Added version="2.4.0"/> "lz4": LzaExtractor, # <Added version="2.4.0"/> } @classmethod def __lowerCamelCase ( cls ) -> Union[str, Any]: return max( len(lowercase ) for extractor in cls.extractors.values() if issubclass(lowercase , lowercase ) for extractor_magic_number in extractor.magic_numbers ) @staticmethod def __lowerCamelCase ( lowercase , lowercase ) -> str: try: return MagicNumberBaseExtractor.read_magic_number(lowercase , magic_number_length=lowercase ) except OSError: return b"" @classmethod def __lowerCamelCase ( cls , lowercase , lowercase = False ) -> bool: warnings.warn( """Method 'is_extractable' was deprecated in version 2.4.0 and will be removed in 3.0.0. """ """Use 'infer_extractor_format' instead.""" , category=lowercase , ) __UpperCamelCase = cls.infer_extractor_format(lowercase ) if extractor_format: return True if not return_extractor else (True, cls.extractors[extractor_format]) return False if not return_extractor else (False, None) @classmethod def __lowerCamelCase ( cls , lowercase ) -> str: # <Added version="2.4.0"/> __UpperCamelCase = cls._get_magic_number_max_length() __UpperCamelCase = cls._read_magic_number(lowercase , lowercase ) for extractor_format, extractor in cls.extractors.items(): if extractor.is_extractable(lowercase , magic_number=lowercase ): return extractor_format @classmethod def __lowerCamelCase ( cls , lowercase , lowercase , lowercase = None , lowercase = "deprecated" , ) -> None: os.makedirs(os.path.dirname(lowercase ) , exist_ok=lowercase ) # Prevent parallel extractions __UpperCamelCase = str(Path(lowercase ).with_suffix(""".lock""" ) ) with FileLock(lowercase ): shutil.rmtree(lowercase , ignore_errors=lowercase ) if extractor_format or extractor != "deprecated": if extractor != "deprecated" or not isinstance(lowercase , lowercase ): # passed as positional arg warnings.warn( """Parameter 'extractor' was deprecated in version 2.4.0 and will be removed in 3.0.0. """ """Use 'extractor_format' instead.""" , category=lowercase , ) __UpperCamelCase = extractor if extractor != """deprecated""" else extractor_format else: __UpperCamelCase = cls.extractors[extractor_format] return extractor.extract(lowercase , lowercase ) else: warnings.warn( """Parameter 'extractor_format' was made required in version 2.4.0 and not passing it will raise an """ """exception in 3.0.0.""" , category=lowercase , ) for extractor in cls.extractors.values(): if extractor.is_extractable(lowercase ): return extractor.extract(lowercase , lowercase )
349
0
'''simple docstring''' from __future__ import annotations import math import numpy as np from numpy.linalg import norm def lowerCAmelCase_ ( snake_case__ , snake_case__ ): '''simple docstring''' return math.sqrt(sum(pow(a - b , 2 ) for a, b in zip(__A , __A ) ) ) def lowerCAmelCase_ ( snake_case__ , snake_case__ ): '''simple docstring''' if dataset.ndim != value_array.ndim: A : str = ( '''Wrong input data\'s dimensions... ''' F'dataset : {dataset.ndim}, value_array : {value_array.ndim}' ) raise ValueError(__A ) try: if dataset.shape[1] != value_array.shape[1]: A : Optional[Any] = ( '''Wrong input data\'s shape... ''' F'dataset : {dataset.shape[1]}, value_array : {value_array.shape[1]}' ) raise ValueError(__A ) except IndexError: if dataset.ndim != value_array.ndim: raise TypeError('''Wrong shape''' ) if dataset.dtype != value_array.dtype: A : str = ( '''Input data have different datatype... ''' F'dataset : {dataset.dtype}, value_array : {value_array.dtype}' ) raise TypeError(__A ) A : List[Any] = [] for value in value_array: A : List[str] = euclidean(__A , dataset[0] ) A : Optional[int] = dataset[0].tolist() for dataset_value in dataset[1:]: A : str = euclidean(__A , __A ) if dist > temp_dist: A : Tuple = temp_dist A : Any = dataset_value.tolist() answer.append([vector, dist] ) return answer def lowerCAmelCase_ ( snake_case__ , snake_case__ ): '''simple docstring''' return np.dot(__A , __A ) / (norm(__A ) * norm(__A )) if __name__ == "__main__": import doctest doctest.testmod()
3
'''simple docstring''' import unittest from transformers import PegasusConfig, PegasusTokenizer, is_flax_available from transformers.testing_utils import require_flax, slow from ...test_configuration_common import ConfigTester from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor if is_flax_available(): import os # The slow tests are often failing with OOM error on GPU # This makes JAX allocate exactly what is needed on demand, and deallocate memory that is no longer needed # but will be slower as stated here https://jax.readthedocs.io/en/latest/gpu_memory_allocation.html a__ : List[str] = 'platform' import jax import jax.numpy as jnp import numpy as np from transformers import FlaxPegasusForConditionalGeneration, FlaxPegasusModel @require_flax class UpperCAmelCase__ : __SCREAMING_SNAKE_CASE = PegasusConfig __SCREAMING_SNAKE_CASE = {} __SCREAMING_SNAKE_CASE = '''gelu''' def __init__( self , lowercase , lowercase=1_3 , lowercase=7 , lowercase=True , lowercase=False , lowercase=9_9 , lowercase=3_2 , lowercase=5 , lowercase=4 , lowercase=3_7 , lowercase=0.1 , lowercase=0.1 , lowercase=2_0 , lowercase=2 , lowercase=1 , lowercase=0 , ) -> Optional[Any]: __UpperCamelCase = parent __UpperCamelCase = batch_size __UpperCamelCase = seq_length __UpperCamelCase = is_training __UpperCamelCase = use_labels __UpperCamelCase = vocab_size __UpperCamelCase = hidden_size __UpperCamelCase = num_hidden_layers __UpperCamelCase = num_attention_heads __UpperCamelCase = intermediate_size __UpperCamelCase = hidden_dropout_prob __UpperCamelCase = attention_probs_dropout_prob __UpperCamelCase = max_position_embeddings __UpperCamelCase = eos_token_id __UpperCamelCase = pad_token_id __UpperCamelCase = bos_token_id def __lowerCamelCase ( self ) -> str: __UpperCamelCase = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size ).clip(3 , self.vocab_size ) __UpperCamelCase = np.expand_dims(np.array([self.eos_token_id] * self.batch_size ) , 1 ) __UpperCamelCase = np.concatenate([input_ids, eos_tensor] , axis=1 ) __UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) __UpperCamelCase = self.config_cls( vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , ) __UpperCamelCase = prepare_pegasus_inputs_dict(lowercase , lowercase , lowercase ) return config, inputs_dict def __lowerCamelCase ( self , lowercase , lowercase , lowercase ) -> Dict: __UpperCamelCase = 2_0 __UpperCamelCase = model_class_name(lowercase ) __UpperCamelCase = model.encode(inputs_dict["""input_ids"""] ) __UpperCamelCase , __UpperCamelCase = ( inputs_dict["""decoder_input_ids"""], inputs_dict["""decoder_attention_mask"""], ) __UpperCamelCase = model.init_cache(decoder_input_ids.shape[0] , lowercase , lowercase ) __UpperCamelCase = jnp.ones((decoder_input_ids.shape[0], max_decoder_length) , dtype="""i4""" ) __UpperCamelCase = jnp.broadcast_to( jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , ) __UpperCamelCase = model.decode( decoder_input_ids[:, :-1] , lowercase , decoder_attention_mask=lowercase , past_key_values=lowercase , decoder_position_ids=lowercase , ) __UpperCamelCase = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype="""i4""" ) __UpperCamelCase = model.decode( decoder_input_ids[:, -1:] , lowercase , decoder_attention_mask=lowercase , past_key_values=outputs_cache.past_key_values , decoder_position_ids=lowercase , ) __UpperCamelCase = model.decode(lowercase , lowercase ) __UpperCamelCase = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) ) self.parent.assertTrue(diff < 1E-3 , msg=f"Max diff is {diff}" ) def __lowerCamelCase ( self , lowercase , lowercase , lowercase ) -> Any: __UpperCamelCase = 2_0 __UpperCamelCase = model_class_name(lowercase ) __UpperCamelCase = model.encode(inputs_dict["""input_ids"""] ) __UpperCamelCase , __UpperCamelCase = ( inputs_dict["""decoder_input_ids"""], inputs_dict["""decoder_attention_mask"""], ) __UpperCamelCase = jnp.concatenate( [ decoder_attention_mask, jnp.zeros((decoder_attention_mask.shape[0], max_decoder_length - decoder_attention_mask.shape[1]) ), ] , axis=-1 , ) __UpperCamelCase = model.init_cache(decoder_input_ids.shape[0] , lowercase , lowercase ) __UpperCamelCase = jnp.broadcast_to( jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , ) __UpperCamelCase = model.decode( decoder_input_ids[:, :-1] , lowercase , decoder_attention_mask=lowercase , past_key_values=lowercase , decoder_position_ids=lowercase , ) __UpperCamelCase = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype="""i4""" ) __UpperCamelCase = model.decode( decoder_input_ids[:, -1:] , lowercase , past_key_values=outputs_cache.past_key_values , decoder_attention_mask=lowercase , decoder_position_ids=lowercase , ) __UpperCamelCase = model.decode(lowercase , lowercase , decoder_attention_mask=lowercase ) __UpperCamelCase = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) ) self.parent.assertTrue(diff < 1E-3 , msg=f"Max diff is {diff}" ) def _lowercase ( __A ,__A ,__A ,__A=None ,__A=None ,): '''simple docstring''' if attention_mask is None: __UpperCamelCase = np.not_equal(__A ,config.pad_token_id ).astype(np.inta ) if decoder_attention_mask is None: __UpperCamelCase = np.concatenate( [ np.ones(decoder_input_ids[:, :1].shape ,dtype=np.inta ), np.not_equal(decoder_input_ids[:, 1:] ,config.pad_token_id ).astype(np.inta ), ] ,axis=-1 ,) return { "input_ids": input_ids, "decoder_input_ids": decoder_input_ids, "attention_mask": attention_mask, "decoder_attention_mask": decoder_attention_mask, } @require_flax class UpperCAmelCase__ ( UpperCAmelCase_ , unittest.TestCase): __SCREAMING_SNAKE_CASE = ( ( FlaxPegasusForConditionalGeneration, FlaxPegasusModel, ) if is_flax_available() else () ) __SCREAMING_SNAKE_CASE = (FlaxPegasusForConditionalGeneration,) if is_flax_available() else () __SCREAMING_SNAKE_CASE = True __SCREAMING_SNAKE_CASE = False __SCREAMING_SNAKE_CASE = False __SCREAMING_SNAKE_CASE = False def __lowerCamelCase ( self ) -> Optional[Any]: __UpperCamelCase = FlaxPegasusModelTester(self ) __UpperCamelCase = ConfigTester(self , config_class=lowercase ) def __lowerCamelCase ( self ) -> List[Any]: self.config_tester.run_common_tests() def __lowerCamelCase ( self ) -> List[Any]: __UpperCamelCase , __UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: self.model_tester.check_use_cache_forward(lowercase , lowercase , lowercase ) def __lowerCamelCase ( self ) -> List[Any]: __UpperCamelCase , __UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: self.model_tester.check_use_cache_forward_with_attn_mask(lowercase , lowercase , lowercase ) def __lowerCamelCase ( self ) -> List[str]: __UpperCamelCase , __UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: with self.subTest(model_class.__name__ ): __UpperCamelCase = self._prepare_for_class(lowercase , lowercase ) __UpperCamelCase = model_class(lowercase ) @jax.jit def encode_jitted(lowercase , lowercase=None , **lowercase ): return model.encode(input_ids=lowercase , attention_mask=lowercase ) with self.subTest("""JIT Enabled""" ): __UpperCamelCase = encode_jitted(**lowercase ).to_tuple() with self.subTest("""JIT Disabled""" ): with jax.disable_jit(): __UpperCamelCase = encode_jitted(**lowercase ).to_tuple() self.assertEqual(len(lowercase ) , len(lowercase ) ) for jitted_output, output in zip(lowercase , lowercase ): self.assertEqual(jitted_output.shape , output.shape ) def __lowerCamelCase ( self ) -> List[Any]: __UpperCamelCase , __UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: with self.subTest(model_class.__name__ ): __UpperCamelCase = model_class(lowercase ) __UpperCamelCase = model.encode(inputs_dict["""input_ids"""] , inputs_dict["""attention_mask"""] ) __UpperCamelCase = { """decoder_input_ids""": inputs_dict["""decoder_input_ids"""], """decoder_attention_mask""": inputs_dict["""decoder_attention_mask"""], """encoder_outputs""": encoder_outputs, } @jax.jit def decode_jitted(lowercase , lowercase , lowercase ): return model.decode( decoder_input_ids=lowercase , decoder_attention_mask=lowercase , encoder_outputs=lowercase , ) with self.subTest("""JIT Enabled""" ): __UpperCamelCase = decode_jitted(**lowercase ).to_tuple() with self.subTest("""JIT Disabled""" ): with jax.disable_jit(): __UpperCamelCase = decode_jitted(**lowercase ).to_tuple() self.assertEqual(len(lowercase ) , len(lowercase ) ) for jitted_output, output in zip(lowercase , lowercase ): self.assertEqual(jitted_output.shape , output.shape ) @slow def __lowerCamelCase ( self ) -> Dict: for model_class_name in self.all_model_classes: __UpperCamelCase = model_class_name.from_pretrained("""google/pegasus-large""" , from_pt=lowercase ) __UpperCamelCase = np.ones((1, 1) ) __UpperCamelCase = model(lowercase ) self.assertIsNotNone(lowercase ) @slow def __lowerCamelCase ( self ) -> str: __UpperCamelCase = FlaxPegasusForConditionalGeneration.from_pretrained("""google/pegasus-xsum""" ) __UpperCamelCase = PegasusTokenizer.from_pretrained("""google/pegasus-xsum""" ) __UpperCamelCase = [ """ PG&E stated it scheduled the blackouts in response to forecasts for high winds amid dry conditions. The aim is to reduce the risk of wildfires. Nearly 800 thousand customers were scheduled to be affected by the shutoffs which were expected to last through at least midday tomorrow.""", """ The London trio are up for best UK act and best album, as well as getting two nominations in the best song category.\"We got told like this morning 'Oh I think you're nominated'\", said Dappy.\"And I was like 'Oh yeah, which one?' And now we've got nominated for four awards. I mean, wow!\"Bandmate Fazer added: \"We thought it's best of us to come down and mingle with everyone and say hello to the cameras. And now we find we've got four nominations.\"The band have two shots at the best song prize, getting the nod for their Tynchy Stryder collaboration Number One, and single Strong Again.Their album Uncle B will also go up against records by the likes of Beyonce and Kanye West.N-Dubz picked up the best newcomer Mobo in 2007, but female member Tulisa said they wouldn't be too disappointed if they didn't win this time around.\"At the end of the day we're grateful to be where we are in our careers.\"If it don't happen then it don't happen - live to fight another day and keep on making albums and hits for the fans.\"Dappy also revealed they could be performing live several times on the night.The group will be doing Number One and also a possible rendition of the War Child single, I Got Soul.The charity song is a re-working of The Killers' All These Things That I've Done and is set to feature artists like Chipmunk, Ironik and Pixie Lott.This year's Mobos will be held outside of London for the first time, in Glasgow on 30 September.N-Dubz said they were looking forward to performing for their Scottish fans and boasted about their recent shows north of the border.\"We just done Edinburgh the other day,\" said Dappy.\"We smashed up an N-Dubz show over there. We done Aberdeen about three or four months ago - we smashed up that show over there! Everywhere we go we smash it up!\" """, ] __UpperCamelCase = [ """California's largest electricity provider has turned off power to hundreds of thousands of customers.""", """Pop group N-Dubz have revealed they were surprised to get four nominations for this year's Mobo Awards.""", ] __UpperCamelCase = tokenizer(lowercase , return_tensors="""np""" , truncation=lowercase , max_length=5_1_2 , padding=lowercase ) __UpperCamelCase = model.generate(**lowercase , num_beams=2 ).sequences __UpperCamelCase = tokenizer.batch_decode(lowercase , skip_special_tokens=lowercase ) assert tgt_text == decoded
349
0
import argparse import hashlib # hashlib is only used inside the Test class import struct class a_ : """simple docstring""" def __init__( self : Dict ,snake_case : Dict ): SCREAMING_SNAKE_CASE =data SCREAMING_SNAKE_CASE =[0X67_452_301, 0XEF_CDA_B89, 0X98_BAD_CFE, 0X10_325_476, 0XC3_D2E_1F0] @staticmethod def _lowerCAmelCase ( snake_case : Dict ,snake_case : Optional[int] ): return ((n << b) | (n >> (32 - b))) & 0XFF_FFF_FFF def _lowerCAmelCase ( self : Optional[int] ): SCREAMING_SNAKE_CASE =B'\x80' + B'\x00' * (63 - (len(self.data ) + 8) % 64) SCREAMING_SNAKE_CASE =self.data + padding + struct.pack('>Q' ,8 * len(self.data ) ) return padded_data def _lowerCAmelCase ( self : List[Any] ): return [ self.padded_data[i : i + 64] for i in range(0 ,len(self.padded_data ) ,64 ) ] def _lowerCAmelCase ( self : List[str] ,snake_case : Any ): SCREAMING_SNAKE_CASE =list(struct.unpack('>16L' ,snake_case ) ) + [0] * 64 for i in range(16 ,80 ): SCREAMING_SNAKE_CASE =self.rotate((w[i - 3] ^ w[i - 8] ^ w[i - 14] ^ w[i - 16]) ,1 ) return w def _lowerCAmelCase ( self : int ): SCREAMING_SNAKE_CASE =self.padding() SCREAMING_SNAKE_CASE =self.split_blocks() for block in self.blocks: SCREAMING_SNAKE_CASE =self.expand_block(snake_case ) SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE =self.h for i in range(0 ,80 ): if 0 <= i < 20: SCREAMING_SNAKE_CASE =(b & c) | ((~b) & d) SCREAMING_SNAKE_CASE =0X5A_827_999 elif 20 <= i < 40: SCREAMING_SNAKE_CASE =b ^ c ^ d SCREAMING_SNAKE_CASE =0X6E_D9E_BA1 elif 40 <= i < 60: SCREAMING_SNAKE_CASE =(b & c) | (b & d) | (c & d) SCREAMING_SNAKE_CASE =0X8F_1BB_CDC elif 60 <= i < 80: SCREAMING_SNAKE_CASE =b ^ c ^ d SCREAMING_SNAKE_CASE =0XCA_62C_1D6 SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE =( self.rotate(snake_case ,5 ) + f + e + k + expanded_block[i] & 0XFF_FFF_FFF, a, self.rotate(snake_case ,30 ), c, d, ) SCREAMING_SNAKE_CASE =( self.h[0] + a & 0XFF_FFF_FFF, self.h[1] + b & 0XFF_FFF_FFF, self.h[2] + c & 0XFF_FFF_FFF, self.h[3] + d & 0XFF_FFF_FFF, self.h[4] + e & 0XFF_FFF_FFF, ) return ("{:08x}" * 5).format(*self.h ) def snake_case__ ( ): """simple docstring""" SCREAMING_SNAKE_CASE =b'Test String' assert SHAaHash(__A ).final_hash() == hashlib.shaa(__A ).hexdigest() # noqa: S324 def snake_case__ ( ): """simple docstring""" SCREAMING_SNAKE_CASE =argparse.ArgumentParser(description='Process some strings or files' ) parser.add_argument( '--string', dest='input_string', default='Hello World!! Welcome to Cryptography', help='Hash the string', ) parser.add_argument('--file', dest='input_file', help='Hash contents of a file' ) SCREAMING_SNAKE_CASE =parser.parse_args() SCREAMING_SNAKE_CASE =args.input_string # In any case hash input should be a bytestring if args.input_file: with open(args.input_file, 'rb' ) as f: SCREAMING_SNAKE_CASE =f.read() else: SCREAMING_SNAKE_CASE =bytes(__A, 'utf-8' ) print(SHAaHash(__A ).final_hash() ) if __name__ == "__main__": main() import doctest doctest.testmod()
334
'''simple docstring''' import pytest a__ : List[str] = '__dummy_dataset1__' a__ : Optional[int] = '\nimport json\nimport os\n\nimport datasets\n\n\nREPO_URL = "https://huggingface.co/datasets/albertvillanova/tests-raw-jsonl/resolve/main/"\nURLS = {"train": REPO_URL + "wikiann-bn-train.jsonl", "validation": REPO_URL + "wikiann-bn-validation.jsonl"}\n\n\nclass __DummyDataset1__(datasets.GeneratorBasedBuilder):\n\n def _info(self):\n features = datasets.Features(\n {\n "tokens": datasets.Sequence(datasets.Value("string")),\n "ner_tags": datasets.Sequence(\n datasets.features.ClassLabel(\n names=[\n "O",\n "B-PER",\n "I-PER",\n "B-ORG",\n "I-ORG",\n "B-LOC",\n "I-LOC",\n ]\n )\n ),\n "langs": datasets.Sequence(datasets.Value("string")),\n "spans": datasets.Sequence(datasets.Value("string")),\n }\n )\n return datasets.DatasetInfo(features=features)\n\n def _split_generators(self, dl_manager):\n dl_path = dl_manager.download(URLS)\n return [\n datasets.SplitGenerator(datasets.Split.TRAIN, gen_kwargs={"filepath": dl_path["train"]}),\n datasets.SplitGenerator(datasets.Split.VALIDATION, gen_kwargs={"filepath": dl_path["validation"]}),\n ]\n\n def _generate_examples(self, filepath):\n with open(filepath, "r", encoding="utf-8") as f:\n for i, line in enumerate(f):\n yield i, json.loads(line)\n' @pytest.fixture def _lowercase ( ): '''simple docstring''' return DATASET_LOADING_SCRIPT_NAME @pytest.fixture def _lowercase ( ): '''simple docstring''' return DATASET_LOADING_SCRIPT_CODE @pytest.fixture def _lowercase ( __A ,__A ,__A ): '''simple docstring''' __UpperCamelCase = dataset_loading_script_name __UpperCamelCase = tmp_path / """datasets""" / script_name script_dir.mkdir(parents=__A ) __UpperCamelCase = script_dir / f"{script_name}.py" with open(__A ,"""w""" ) as f: f.write(__A ) return str(__A )
349
0
import argparse import copy def __SCREAMING_SNAKE_CASE ( UpperCamelCase__ ) -> Optional[Any]: '''simple docstring''' UpperCAmelCase = {} with open(__A ) as f: for line in f: if line.split()[0] not in dict_of_neighbours: UpperCAmelCase = [] _list.append([line.split()[1], line.split()[2]] ) UpperCAmelCase = _list else: dict_of_neighbours[line.split()[0]].append( [line.split()[1], line.split()[2]] ) if line.split()[1] not in dict_of_neighbours: UpperCAmelCase = [] _list.append([line.split()[0], line.split()[2]] ) UpperCAmelCase = _list else: dict_of_neighbours[line.split()[1]].append( [line.split()[0], line.split()[2]] ) return dict_of_neighbours def __SCREAMING_SNAKE_CASE ( UpperCamelCase__ , UpperCamelCase__ ) -> str: '''simple docstring''' with open(__A ) as f: UpperCAmelCase = f.read(1 ) UpperCAmelCase = start_node UpperCAmelCase = [] UpperCAmelCase = start_node UpperCAmelCase = 0 while visiting not in first_solution: UpperCAmelCase = 1_0000 for k in dict_of_neighbours[visiting]: if int(k[1] ) < int(__A ) and k[0] not in first_solution: UpperCAmelCase = k[1] UpperCAmelCase = k[0] first_solution.append(__A ) UpperCAmelCase = distance_of_first_solution + int(__A ) UpperCAmelCase = best_node first_solution.append(__A ) UpperCAmelCase = 0 for k in dict_of_neighbours[first_solution[-2]]: if k[0] == start_node: break position += 1 UpperCAmelCase = ( distance_of_first_solution + int(dict_of_neighbours[first_solution[-2]][position][1] ) - 1_0000 ) return first_solution, distance_of_first_solution def __SCREAMING_SNAKE_CASE ( UpperCamelCase__ , UpperCamelCase__ ) -> Union[str, Any]: '''simple docstring''' UpperCAmelCase = [] for n in solution[1:-1]: UpperCAmelCase = solution.index(__A ) for kn in solution[1:-1]: UpperCAmelCase = solution.index(__A ) if n == kn: continue UpperCAmelCase = copy.deepcopy(__A ) UpperCAmelCase = kn UpperCAmelCase = n UpperCAmelCase = 0 for k in _tmp[:-1]: UpperCAmelCase = _tmp[_tmp.index(__A ) + 1] for i in dict_of_neighbours[k]: if i[0] == next_node: UpperCAmelCase = distance + int(i[1] ) _tmp.append(__A ) if _tmp not in neighborhood_of_solution: neighborhood_of_solution.append(_tmp ) UpperCAmelCase = len(neighborhood_of_solution[0] ) - 1 neighborhood_of_solution.sort(key=lambda UpperCamelCase__ : x[index_of_last_item_in_the_list] ) return neighborhood_of_solution def __SCREAMING_SNAKE_CASE ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) -> Any: '''simple docstring''' UpperCAmelCase = 1 UpperCAmelCase = first_solution UpperCAmelCase = [] UpperCAmelCase = distance_of_first_solution UpperCAmelCase = solution while count <= iters: UpperCAmelCase = find_neighborhood(__A , __A ) UpperCAmelCase = 0 UpperCAmelCase = neighborhood[index_of_best_solution] UpperCAmelCase = len(__A ) - 1 UpperCAmelCase = False while not found: UpperCAmelCase = 0 while i < len(__A ): if best_solution[i] != solution[i]: UpperCAmelCase = best_solution[i] UpperCAmelCase = solution[i] break UpperCAmelCase = i + 1 if [first_exchange_node, second_exchange_node] not in tabu_list and [ second_exchange_node, first_exchange_node, ] not in tabu_list: tabu_list.append([first_exchange_node, second_exchange_node] ) UpperCAmelCase = True UpperCAmelCase = best_solution[:-1] UpperCAmelCase = neighborhood[index_of_best_solution][best_cost_index] if cost < best_cost: UpperCAmelCase = cost UpperCAmelCase = solution else: UpperCAmelCase = index_of_best_solution + 1 UpperCAmelCase = neighborhood[index_of_best_solution] if len(__A ) >= size: tabu_list.pop(0 ) UpperCAmelCase = count + 1 return best_solution_ever, best_cost def __SCREAMING_SNAKE_CASE ( UpperCamelCase__=None ) -> List[Any]: '''simple docstring''' UpperCAmelCase = generate_neighbours(args.File ) UpperCAmelCase , UpperCAmelCase = generate_first_solution( args.File , __A ) UpperCAmelCase , UpperCAmelCase = tabu_search( __A , __A , __A , args.Iterations , args.Size , ) print(F"""Best solution: {best_sol}, with total distance: {best_cost}.""" ) if __name__ == "__main__": __A : str = argparse.ArgumentParser(description="Tabu Search") parser.add_argument( "-f", "--File", type=str, help="Path to the file containing the data", required=True, ) parser.add_argument( "-i", "--Iterations", type=int, help="How many iterations the algorithm should perform", required=True, ) parser.add_argument( "-s", "--Size", type=int, help="Size of the tabu list", required=True ) # Pass the arguments to main method main(parser.parse_args())
273
'''simple docstring''' import argparse import json import os import fairseq import torch from fairseq.data import Dictionary from transformers import ( WavaVecaConfig, WavaVecaCTCTokenizer, WavaVecaFeatureExtractor, WavaVecaForCTC, WavaVecaForPreTraining, WavaVecaProcessor, logging, ) from transformers.models.wavaveca.modeling_wavaveca import WavaVecaForSequenceClassification logging.set_verbosity_info() a__ : Any = logging.get_logger(__name__) a__ : Optional[int] = { 'post_extract_proj': 'feature_projection.projection', 'encoder.pos_conv.0': 'encoder.pos_conv_embed.conv', 'self_attn.k_proj': 'encoder.layers.*.attention.k_proj', 'self_attn.v_proj': 'encoder.layers.*.attention.v_proj', 'self_attn.q_proj': 'encoder.layers.*.attention.q_proj', 'self_attn.out_proj': 'encoder.layers.*.attention.out_proj', 'self_attn_layer_norm': 'encoder.layers.*.layer_norm', 'fc1': 'encoder.layers.*.feed_forward.intermediate_dense', 'fc2': 'encoder.layers.*.feed_forward.output_dense', 'final_layer_norm': 'encoder.layers.*.final_layer_norm', 'encoder.layer_norm': 'encoder.layer_norm', 'adapter_layer': 'encoder.layers.*.adapter_layer', 'w2v_model.layer_norm': 'feature_projection.layer_norm', 'quantizer.weight_proj': 'quantizer.weight_proj', 'quantizer.vars': 'quantizer.codevectors', 'project_q': 'project_q', 'final_proj': 'project_hid', 'w2v_encoder.proj': 'lm_head', 'mask_emb': 'masked_spec_embed', 'pooling_layer.linear': 'projector', 'pooling_layer.projection': 'classifier', } a__ : List[str] = [ 'lm_head', 'quantizer.weight_proj', 'quantizer.codevectors', 'project_q', 'project_hid', 'projector', 'classifier', ] def _lowercase ( __A ): '''simple docstring''' __UpperCamelCase = {} with open(__A ,"""r""" ) as file: for line_number, line in enumerate(__A ): __UpperCamelCase = line.strip() if line: __UpperCamelCase = line.split() __UpperCamelCase = line_number __UpperCamelCase = words[0] __UpperCamelCase = value return result def _lowercase ( __A ,__A ,__A ,__A ,__A ): '''simple docstring''' for attribute in key.split(""".""" ): __UpperCamelCase = getattr(__A ,__A ) __UpperCamelCase = None for param_key in PARAM_MAPPING.keys(): if full_name.endswith(__A ): __UpperCamelCase = PARAM_MAPPING[full_name.split(""".""" )[-1]] __UpperCamelCase = """param""" if weight_type is not None and weight_type != "param": __UpperCamelCase = getattr(__A ,__A ).shape elif weight_type is not None and weight_type == "param": __UpperCamelCase = hf_pointer for attribute in hf_param_name.split(""".""" ): __UpperCamelCase = getattr(__A ,__A ) __UpperCamelCase = shape_pointer.shape # let's reduce dimension __UpperCamelCase = value[0] else: __UpperCamelCase = hf_pointer.shape if hf_shape != value.shape: raise ValueError( f"Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be" f" {value.shape} for {full_name}" ) if weight_type == "weight": __UpperCamelCase = value elif weight_type == "weight_g": __UpperCamelCase = value elif weight_type == "weight_v": __UpperCamelCase = value elif weight_type == "bias": __UpperCamelCase = value elif weight_type == "param": for attribute in hf_param_name.split(""".""" ): __UpperCamelCase = getattr(__A ,__A ) __UpperCamelCase = value else: __UpperCamelCase = value logger.info(f"{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}." ) def _lowercase ( __A ,__A ,__A ,__A ,__A ): '''simple docstring''' __UpperCamelCase = None for param_key in PARAM_MAPPING.keys(): if full_name.endswith(__A ): __UpperCamelCase = PARAM_MAPPING[full_name.split(""".""" )[-1]] __UpperCamelCase = """param""" if weight_type is not None and weight_type != "param": __UpperCamelCase = """.""".join([key, weight_type] ) elif weight_type is not None and weight_type == "param": __UpperCamelCase = """.""".join([key, hf_param_name] ) else: __UpperCamelCase = key __UpperCamelCase = value if """lm_head""" in full_key else value[0] a__ : Dict = { 'W_a': 'linear_1.weight', 'W_b': 'linear_2.weight', 'b_a': 'linear_1.bias', 'b_b': 'linear_2.bias', 'ln_W': 'norm.weight', 'ln_b': 'norm.bias', } def _lowercase ( __A ,__A ,__A=None ,__A=None ): '''simple docstring''' __UpperCamelCase = False for key, mapped_key in MAPPING.items(): __UpperCamelCase = """wav2vec2.""" + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key if key in name or key.split("""w2v_model.""" )[-1] == name.split(""".""" )[0]: __UpperCamelCase = True if "*" in mapped_key: __UpperCamelCase = name.split(__A )[0].split(""".""" )[-2] __UpperCamelCase = mapped_key.replace("""*""" ,__A ) if "weight_g" in name: __UpperCamelCase = """weight_g""" elif "weight_v" in name: __UpperCamelCase = """weight_v""" elif "bias" in name: __UpperCamelCase = """bias""" elif "weight" in name: # TODO: don't match quantizer.weight_proj __UpperCamelCase = """weight""" else: __UpperCamelCase = None if hf_dict is not None: rename_dict(__A ,__A ,__A ,__A ,__A ) else: set_recursively(__A ,__A ,__A ,__A ,__A ) return is_used return is_used def _lowercase ( __A ,__A ,__A ): '''simple docstring''' __UpperCamelCase = [] __UpperCamelCase = fairseq_model.state_dict() __UpperCamelCase = hf_model.wavaveca.feature_extractor for name, value in fairseq_dict.items(): __UpperCamelCase = False if "conv_layers" in name: load_conv_layer( __A ,__A ,__A ,__A ,hf_model.config.feat_extract_norm == """group""" ,) __UpperCamelCase = True else: __UpperCamelCase = load_wavaveca_layer(__A ,__A ,__A ) if not is_used: unused_weights.append(__A ) logger.warning(f"Unused weights: {unused_weights}" ) def _lowercase ( __A ,__A ,__A ,__A ,__A ): '''simple docstring''' __UpperCamelCase = full_name.split("""conv_layers.""" )[-1] __UpperCamelCase = name.split(""".""" ) __UpperCamelCase = int(items[0] ) __UpperCamelCase = int(items[1] ) if type_id == 0: if "bias" in name: if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape: raise ValueError( f"{full_name} has size {value.shape}, but" f" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found." ) __UpperCamelCase = value logger.info(f"Feat extract conv layer {layer_id} was initialized from {full_name}." ) elif "weight" in name: if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape: raise ValueError( f"{full_name} has size {value.shape}, but" f" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found." ) __UpperCamelCase = value logger.info(f"Feat extract conv layer {layer_id} was initialized from {full_name}." ) elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm): if "bias" in name: if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape: raise ValueError( f"{full_name} has size {value.shape}, but" f" {feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape} was found." ) __UpperCamelCase = value logger.info(f"Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}." ) elif "weight" in name: if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape: raise ValueError( f"{full_name} has size {value.shape}, but" f" {feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape} was found." ) __UpperCamelCase = value logger.info(f"Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}." ) else: unused_weights.append(__A ) @torch.no_grad() def _lowercase ( __A ,__A ,__A=None ,__A=None ,__A=True ,__A=False ): '''simple docstring''' if config_path is not None: __UpperCamelCase = WavaVecaConfig.from_pretrained(__A ) else: __UpperCamelCase = WavaVecaConfig() if is_seq_class: __UpperCamelCase = read_txt_into_dict(__A ) __UpperCamelCase = idalabel __UpperCamelCase = WavaVecaForSequenceClassification(__A ) __UpperCamelCase = WavaVecaFeatureExtractor( feature_size=1 ,sampling_rate=16_000 ,padding_value=0 ,do_normalize=__A ,return_attention_mask=__A ,) feature_extractor.save_pretrained(__A ) elif is_finetuned: if dict_path: __UpperCamelCase = Dictionary.load(__A ) # important change bos & pad token id since CTC symbol is <pad> and # not <s> as in fairseq __UpperCamelCase = target_dict.pad_index __UpperCamelCase = target_dict.bos_index __UpperCamelCase = target_dict.eos_index __UpperCamelCase = len(target_dict.symbols ) __UpperCamelCase = os.path.join(__A ,"""vocab.json""" ) if not os.path.isdir(__A ): logger.error("""--pytorch_dump_folder_path ({}) should be a directory""".format(__A ) ) return os.makedirs(__A ,exist_ok=__A ) __UpperCamelCase = target_dict.indices # fairseq has the <pad> and <s> switched __UpperCamelCase = 0 __UpperCamelCase = 1 with open(__A ,"""w""" ,encoding="""utf-8""" ) as vocab_handle: json.dump(__A ,__A ) __UpperCamelCase = WavaVecaCTCTokenizer( __A ,unk_token=target_dict.unk_word ,pad_token=target_dict.pad_word ,bos_token=target_dict.bos_word ,eos_token=target_dict.eos_word ,word_delimiter_token="""|""" ,do_lower_case=__A ,) __UpperCamelCase = True if config.feat_extract_norm == """layer""" else False __UpperCamelCase = WavaVecaFeatureExtractor( feature_size=1 ,sampling_rate=16_000 ,padding_value=0 ,do_normalize=__A ,return_attention_mask=__A ,) __UpperCamelCase = WavaVecaProcessor(feature_extractor=__A ,tokenizer=__A ) processor.save_pretrained(__A ) __UpperCamelCase = WavaVecaForCTC(__A ) else: __UpperCamelCase = WavaVecaForPreTraining(__A ) if is_finetuned or is_seq_class: __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = fairseq.checkpoint_utils.load_model_ensemble_and_task( [checkpoint_path] ,arg_overrides={"""data""": """/""".join(dict_path.split("""/""" )[:-1] )} ) else: __UpperCamelCase = argparse.Namespace(task="""audio_pretraining""" ) __UpperCamelCase = fairseq.tasks.setup_task(__A ) __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] ,task=__A ) __UpperCamelCase = model[0].eval() recursively_load_weights(__A ,__A ,not is_finetuned ) hf_wavavec.save_pretrained(__A ) if __name__ == "__main__": a__ : int = argparse.ArgumentParser() parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.') parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to fairseq checkpoint') parser.add_argument('--dict_path', default=None, type=str, help='Path to dict of fine-tuned model') parser.add_argument('--config_path', default=None, type=str, help='Path to hf config.json of model to convert') parser.add_argument( '--not_finetuned', action='store_true', help='Whether the model to convert is a fine-tuned model or not' ) parser.add_argument( '--is_seq_class', action='store_true', help='Whether the model to convert is a fine-tuned sequence classification model or not', ) a__ : Optional[int] = parser.parse_args() a__ : str = not args.not_finetuned and not args.is_seq_class convert_wavaveca_checkpoint( args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, is_finetuned, args.is_seq_class, )
349
0
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_tokenizers_available, is_torch_available, is_vision_available, ) lowerCamelCase__ = { 'configuration_layoutlmv3': [ 'LAYOUTLMV3_PRETRAINED_CONFIG_ARCHIVE_MAP', 'LayoutLMv3Config', 'LayoutLMv3OnnxConfig', ], 'processing_layoutlmv3': ['LayoutLMv3Processor'], 'tokenization_layoutlmv3': ['LayoutLMv3Tokenizer'], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCamelCase__ = ['LayoutLMv3TokenizerFast'] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCamelCase__ = [ 'LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST', 'LayoutLMv3ForQuestionAnswering', 'LayoutLMv3ForSequenceClassification', 'LayoutLMv3ForTokenClassification', 'LayoutLMv3Model', 'LayoutLMv3PreTrainedModel', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCamelCase__ = [ 'TF_LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST', 'TFLayoutLMv3ForQuestionAnswering', 'TFLayoutLMv3ForSequenceClassification', 'TFLayoutLMv3ForTokenClassification', 'TFLayoutLMv3Model', 'TFLayoutLMv3PreTrainedModel', ] try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCamelCase__ = ['LayoutLMv3FeatureExtractor'] lowerCamelCase__ = ['LayoutLMv3ImageProcessor'] if TYPE_CHECKING: from .configuration_layoutlmva import ( LAYOUTLMV3_PRETRAINED_CONFIG_ARCHIVE_MAP, LayoutLMvaConfig, LayoutLMvaOnnxConfig, ) from .processing_layoutlmva import LayoutLMvaProcessor from .tokenization_layoutlmva import LayoutLMvaTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_layoutlmva_fast import LayoutLMvaTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_layoutlmva import ( LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST, LayoutLMvaForQuestionAnswering, LayoutLMvaForSequenceClassification, LayoutLMvaForTokenClassification, LayoutLMvaModel, LayoutLMvaPreTrainedModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_layoutlmva import ( TF_LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST, TFLayoutLMvaForQuestionAnswering, TFLayoutLMvaForSequenceClassification, TFLayoutLMvaForTokenClassification, TFLayoutLMvaModel, TFLayoutLMvaPreTrainedModel, ) try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .feature_extraction_layoutlmva import LayoutLMvaFeatureExtractor from .image_processing_layoutlmva import LayoutLMvaImageProcessor else: import sys lowerCamelCase__ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
302
'''simple docstring''' from __future__ import annotations import unittest from transformers import DistilBertConfig, is_tf_available from transformers.testing_utils import require_tf, slow from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers.models.distilbert.modeling_tf_distilbert import ( TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST, TFDistilBertForMaskedLM, TFDistilBertForMultipleChoice, TFDistilBertForQuestionAnswering, TFDistilBertForSequenceClassification, TFDistilBertForTokenClassification, TFDistilBertModel, ) class UpperCAmelCase__ : def __init__( self , lowercase , ) -> Union[str, Any]: __UpperCamelCase = parent __UpperCamelCase = 1_3 __UpperCamelCase = 7 __UpperCamelCase = True __UpperCamelCase = True __UpperCamelCase = False __UpperCamelCase = True __UpperCamelCase = 9_9 __UpperCamelCase = 3_2 __UpperCamelCase = 2 __UpperCamelCase = 4 __UpperCamelCase = 3_7 __UpperCamelCase = """gelu""" __UpperCamelCase = 0.1 __UpperCamelCase = 0.1 __UpperCamelCase = 5_1_2 __UpperCamelCase = 1_6 __UpperCamelCase = 2 __UpperCamelCase = 0.02 __UpperCamelCase = 3 __UpperCamelCase = 4 __UpperCamelCase = None def __lowerCamelCase ( self ) -> List[str]: __UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) __UpperCamelCase = None if self.use_input_mask: __UpperCamelCase = random_attention_mask([self.batch_size, self.seq_length] ) __UpperCamelCase = None __UpperCamelCase = None __UpperCamelCase = None if self.use_labels: __UpperCamelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size ) __UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) __UpperCamelCase = ids_tensor([self.batch_size] , self.num_choices ) __UpperCamelCase = DistilBertConfig( vocab_size=self.vocab_size , dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , hidden_dim=self.intermediate_size , hidden_act=self.hidden_act , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , ) return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels def __lowerCamelCase ( self , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase ) -> Dict: __UpperCamelCase = TFDistilBertModel(config=lowercase ) __UpperCamelCase = {"""input_ids""": input_ids, """attention_mask""": input_mask} __UpperCamelCase = model(lowercase ) __UpperCamelCase = [input_ids, input_mask] __UpperCamelCase = model(lowercase ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def __lowerCamelCase ( self , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase ) -> Optional[Any]: __UpperCamelCase = TFDistilBertForMaskedLM(config=lowercase ) __UpperCamelCase = {"""input_ids""": input_ids, """attention_mask""": input_mask} __UpperCamelCase = model(lowercase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def __lowerCamelCase ( self , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase ) -> Tuple: __UpperCamelCase = TFDistilBertForQuestionAnswering(config=lowercase ) __UpperCamelCase = { """input_ids""": input_ids, """attention_mask""": input_mask, } __UpperCamelCase = model(lowercase ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def __lowerCamelCase ( self , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase ) -> Tuple: __UpperCamelCase = self.num_labels __UpperCamelCase = TFDistilBertForSequenceClassification(lowercase ) __UpperCamelCase = {"""input_ids""": input_ids, """attention_mask""": input_mask} __UpperCamelCase = model(lowercase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def __lowerCamelCase ( self , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase ) -> int: __UpperCamelCase = self.num_choices __UpperCamelCase = TFDistilBertForMultipleChoice(lowercase ) __UpperCamelCase = tf.tile(tf.expand_dims(lowercase , 1 ) , (1, self.num_choices, 1) ) __UpperCamelCase = tf.tile(tf.expand_dims(lowercase , 1 ) , (1, self.num_choices, 1) ) __UpperCamelCase = { """input_ids""": multiple_choice_inputs_ids, """attention_mask""": multiple_choice_input_mask, } __UpperCamelCase = model(lowercase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) ) def __lowerCamelCase ( self , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase ) -> Optional[int]: __UpperCamelCase = self.num_labels __UpperCamelCase = TFDistilBertForTokenClassification(lowercase ) __UpperCamelCase = {"""input_ids""": input_ids, """attention_mask""": input_mask} __UpperCamelCase = model(lowercase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def __lowerCamelCase ( self ) -> Dict: __UpperCamelCase = self.prepare_config_and_inputs() ((__UpperCamelCase) , (__UpperCamelCase) , (__UpperCamelCase) , (__UpperCamelCase) , (__UpperCamelCase) , (__UpperCamelCase)) = config_and_inputs __UpperCamelCase = {"""input_ids""": input_ids, """attention_mask""": input_mask} return config, inputs_dict @require_tf class UpperCAmelCase__ ( UpperCAmelCase_ , UpperCAmelCase_ , unittest.TestCase): __SCREAMING_SNAKE_CASE = ( ( TFDistilBertModel, TFDistilBertForMaskedLM, TFDistilBertForQuestionAnswering, TFDistilBertForSequenceClassification, TFDistilBertForTokenClassification, TFDistilBertForMultipleChoice, ) if is_tf_available() else None ) __SCREAMING_SNAKE_CASE = ( { '''feature-extraction''': TFDistilBertModel, '''fill-mask''': TFDistilBertForMaskedLM, '''question-answering''': TFDistilBertForQuestionAnswering, '''text-classification''': TFDistilBertForSequenceClassification, '''token-classification''': TFDistilBertForTokenClassification, '''zero-shot''': TFDistilBertForSequenceClassification, } if is_tf_available() else {} ) __SCREAMING_SNAKE_CASE = False __SCREAMING_SNAKE_CASE = False def __lowerCamelCase ( self ) -> Optional[Any]: __UpperCamelCase = TFDistilBertModelTester(self ) __UpperCamelCase = ConfigTester(self , config_class=lowercase , dim=3_7 ) def __lowerCamelCase ( self ) -> Any: self.config_tester.run_common_tests() def __lowerCamelCase ( self ) -> Dict: __UpperCamelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_distilbert_model(*lowercase ) def __lowerCamelCase ( self ) -> Union[str, Any]: __UpperCamelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_distilbert_for_masked_lm(*lowercase ) def __lowerCamelCase ( self ) -> int: __UpperCamelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_distilbert_for_question_answering(*lowercase ) def __lowerCamelCase ( self ) -> Any: __UpperCamelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_distilbert_for_sequence_classification(*lowercase ) def __lowerCamelCase ( self ) -> Optional[Any]: __UpperCamelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_distilbert_for_multiple_choice(*lowercase ) def __lowerCamelCase ( self ) -> Union[str, Any]: __UpperCamelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_distilbert_for_token_classification(*lowercase ) @slow def __lowerCamelCase ( self ) -> Tuple: for model_name in list(TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1] ): __UpperCamelCase = TFDistilBertModel.from_pretrained(lowercase ) self.assertIsNotNone(lowercase ) @require_tf class UpperCAmelCase__ ( unittest.TestCase): @slow def __lowerCamelCase ( self ) -> Optional[int]: __UpperCamelCase = TFDistilBertModel.from_pretrained("""distilbert-base-uncased""" ) __UpperCamelCase = tf.constant([[0, 1, 2, 3, 4, 5]] ) __UpperCamelCase = model(lowercase )[0] __UpperCamelCase = [1, 6, 7_6_8] self.assertEqual(output.shape , lowercase ) __UpperCamelCase = tf.constant( [ [ [0.19_261_885, -0.13_732_955, 0.4_119_799], [0.22_150_156, -0.07_422_661, 0.39_037_204], [0.22_756_018, -0.0_896_414, 0.3_701_467], ] ] ) tf.debugging.assert_near(output[:, :3, :3] , lowercase , atol=1E-4 )
349
0
"""simple docstring""" import requests lowercase__ = 'https://newsapi.org/v1/articles?source=bbc-news&sortBy=top&apiKey=' def __lowerCamelCase ( __UpperCamelCase ) -> int: """simple docstring""" lowerCAmelCase_ : List[str] = requests.get(_NEWS_API + bbc_news_api_key ).json() # each article in the list is a dict for i, article in enumerate(bbc_news_page["articles"] , 1 ): print(f'''{i}.) {article["title"]}''' ) if __name__ == "__main__": fetch_bbc_news(bbc_news_api_key="""<Your BBC News API key goes here>""")
241
'''simple docstring''' from __future__ import annotations import math import numpy as np from numpy.linalg import norm def _lowercase ( __A ,__A ): '''simple docstring''' return math.sqrt(sum(pow(a - b ,2 ) for a, b in zip(__A ,__A ) ) ) def _lowercase ( __A ,__A ): '''simple docstring''' if dataset.ndim != value_array.ndim: __UpperCamelCase = ( """Wrong input data's dimensions... """ f"dataset : {dataset.ndim}, value_array : {value_array.ndim}" ) raise ValueError(__A ) try: if dataset.shape[1] != value_array.shape[1]: __UpperCamelCase = ( """Wrong input data's shape... """ f"dataset : {dataset.shape[1]}, value_array : {value_array.shape[1]}" ) raise ValueError(__A ) except IndexError: if dataset.ndim != value_array.ndim: raise TypeError("""Wrong shape""" ) if dataset.dtype != value_array.dtype: __UpperCamelCase = ( """Input data have different datatype... """ f"dataset : {dataset.dtype}, value_array : {value_array.dtype}" ) raise TypeError(__A ) __UpperCamelCase = [] for value in value_array: __UpperCamelCase = euclidean(__A ,dataset[0] ) __UpperCamelCase = dataset[0].tolist() for dataset_value in dataset[1:]: __UpperCamelCase = euclidean(__A ,__A ) if dist > temp_dist: __UpperCamelCase = temp_dist __UpperCamelCase = dataset_value.tolist() answer.append([vector, dist] ) return answer def _lowercase ( __A ,__A ): '''simple docstring''' return np.dot(__A ,__A ) / (norm(__A ) * norm(__A )) if __name__ == "__main__": import doctest doctest.testmod()
349
0
from datetime import datetime import requests def lowerCAmelCase__( lowercase : Optional[int] ) -> List[str]: __snake_case : Dict = "https://downloadgram.net/wp-json/wppress/video-downloader/video?url=" __snake_case : Optional[int] = requests.get(base_url + url ).json()[0]["urls"][0]["src"] return requests.get(__A ).content if __name__ == "__main__": _UpperCamelCase = input('''Enter Video/IGTV url: ''').strip() _UpperCamelCase = F'''{datetime.now():%Y-%m-%d_%H:%M:%S}.mp4''' with open(file_name, '''wb''') as fp: fp.write(download_video(url)) print(F'''Done. Video saved to disk as {file_name}.''')
326
'''simple docstring''' from datetime import datetime import requests def _lowercase ( __A ): '''simple docstring''' __UpperCamelCase = """https://downloadgram.net/wp-json/wppress/video-downloader/video?url=""" __UpperCamelCase = requests.get(base_url + url ).json()[0]["""urls"""][0]["""src"""] return requests.get(__A ).content if __name__ == "__main__": a__ : int = input('Enter Video/IGTV url: ').strip() a__ : int = f'''{datetime.now():%Y-%m-%d_%H:%M:%S}.mp4''' with open(file_name, 'wb') as fp: fp.write(download_video(url)) print(f'''Done. Video saved to disk as {file_name}.''')
349
0
"""simple docstring""" import warnings from collections import OrderedDict from typing import Mapping from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging a = logging.get_logger(__name__) a = { 'nvidia/segformer-b0-finetuned-ade-512-512': ( 'https://huggingface.co/nvidia/segformer-b0-finetuned-ade-512-512/resolve/main/config.json' ), # See all SegFormer models at https://huggingface.co/models?filter=segformer } class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase_ ): _a = 'segformer' def __init__( self : int , lowerCAmelCase : Any=3 , lowerCAmelCase : Dict=4 , lowerCAmelCase : Tuple=[2, 2, 2, 2] , lowerCAmelCase : Union[str, Any]=[8, 4, 2, 1] , lowerCAmelCase : Union[str, Any]=[32, 64, 160, 256] , lowerCAmelCase : int=[7, 3, 3, 3] , lowerCAmelCase : List[Any]=[4, 2, 2, 2] , lowerCAmelCase : Optional[int]=[1, 2, 5, 8] , lowerCAmelCase : Union[str, Any]=[4, 4, 4, 4] , lowerCAmelCase : Optional[int]="gelu" , lowerCAmelCase : Optional[int]=0.0 , lowerCAmelCase : Any=0.0 , lowerCAmelCase : Dict=0.1 , lowerCAmelCase : int=0.02 , lowerCAmelCase : Union[str, Any]=0.1 , lowerCAmelCase : Union[str, Any]=1e-6 , lowerCAmelCase : Any=256 , lowerCAmelCase : Any=255 , **lowerCAmelCase : Any , ): super().__init__(**lowerCAmelCase ) if "reshape_last_stage" in kwargs and kwargs["reshape_last_stage"] is False: warnings.warn( """Reshape_last_stage is set to False in this config. This argument is deprecated and will soon be""" """ removed, as the behaviour will default to that of reshape_last_stage = True.""" , lowerCAmelCase , ) lowerCAmelCase = num_channels lowerCAmelCase = num_encoder_blocks lowerCAmelCase = depths lowerCAmelCase = sr_ratios lowerCAmelCase = hidden_sizes lowerCAmelCase = patch_sizes lowerCAmelCase = strides lowerCAmelCase = mlp_ratios lowerCAmelCase = num_attention_heads lowerCAmelCase = hidden_act lowerCAmelCase = hidden_dropout_prob lowerCAmelCase = attention_probs_dropout_prob lowerCAmelCase = classifier_dropout_prob lowerCAmelCase = initializer_range lowerCAmelCase = drop_path_rate lowerCAmelCase = layer_norm_eps lowerCAmelCase = decoder_hidden_size lowerCAmelCase = kwargs.get("""reshape_last_stage""" , lowerCAmelCase ) lowerCAmelCase = semantic_loss_ignore_index class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase_ ): _a = version.parse('1.11' ) @property def __lowercase ( self : Optional[int] ): return OrderedDict( [ ("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}), ] ) @property def __lowercase ( self : Dict ): return 1e-4 @property def __lowercase ( self : Tuple ): return 12
155
'''simple docstring''' import asyncio import os import re import sys import tempfile import unittest from contextlib import contextmanager from copy import deepcopy from distutils.util import strtobool from enum import Enum from importlib.util import find_spec from pathlib import Path from unittest.mock import patch import pyarrow as pa import pytest import requests from packaging import version from datasets import config if config.PY_VERSION < version.parse('3.8'): import importlib_metadata else: import importlib.metadata as importlib_metadata def _lowercase ( __A ,__A=False ): '''simple docstring''' try: __UpperCamelCase = os.environ[key] except KeyError: # KEY isn't set, default to `default`. __UpperCamelCase = default else: # KEY is set, convert it to True or False. try: __UpperCamelCase = strtobool(__A ) except ValueError: # More values are supported, but let's keep the message simple. raise ValueError(f"If set, {key} must be yes or no." ) return _value a__ : Optional[Any] = parse_flag_from_env('RUN_SLOW', default=False) a__ : Union[str, Any] = parse_flag_from_env('RUN_REMOTE', default=False) a__ : Any = parse_flag_from_env('RUN_LOCAL', default=True) a__ : List[Any] = parse_flag_from_env('RUN_PACKAGED', default=True) # Compression a__ : Optional[int] = pytest.mark.skipif(not config.LZ4_AVAILABLE, reason='test requires lz4') a__ : Optional[int] = pytest.mark.skipif(not config.PY7ZR_AVAILABLE, reason='test requires py7zr') a__ : Optional[Any] = pytest.mark.skipif(not config.ZSTANDARD_AVAILABLE, reason='test requires zstandard') # Audio a__ : List[Any] = pytest.mark.skipif( # On Windows and OS X, soundfile installs sndfile find_spec('soundfile') is None or version.parse(importlib_metadata.version('soundfile')) < version.parse('0.12.0'), reason='test requires sndfile>=0.12.1: \'pip install \"soundfile>=0.12.1\"\'; ', ) # Beam a__ : str = pytest.mark.skipif( not config.BEAM_AVAILABLE or config.DILL_VERSION >= version.parse('0.3.2'), reason='test requires apache-beam and a compatible dill version', ) # Dill-cloudpickle compatibility a__ : str = pytest.mark.skipif( config.DILL_VERSION <= version.parse('0.3.2'), reason='test requires dill>0.3.2 for cloudpickle compatibility', ) # Windows a__ : Tuple = pytest.mark.skipif( sys.platform == 'win32', reason='test should not be run on Windows', ) def _lowercase ( __A ): '''simple docstring''' try: import faiss # noqa except ImportError: __UpperCamelCase = unittest.skip("""test requires faiss""" )(__A ) return test_case def _lowercase ( __A ): '''simple docstring''' try: import regex # noqa except ImportError: __UpperCamelCase = unittest.skip("""test requires regex""" )(__A ) return test_case def _lowercase ( __A ): '''simple docstring''' try: import elasticsearch # noqa except ImportError: __UpperCamelCase = unittest.skip("""test requires elasticsearch""" )(__A ) return test_case def _lowercase ( __A ): '''simple docstring''' try: import sqlalchemy # noqa except ImportError: __UpperCamelCase = unittest.skip("""test requires sqlalchemy""" )(__A ) return test_case def _lowercase ( __A ): '''simple docstring''' if not config.TORCH_AVAILABLE: __UpperCamelCase = unittest.skip("""test requires PyTorch""" )(__A ) return test_case def _lowercase ( __A ): '''simple docstring''' if not config.TF_AVAILABLE: __UpperCamelCase = unittest.skip("""test requires TensorFlow""" )(__A ) return test_case def _lowercase ( __A ): '''simple docstring''' if not config.JAX_AVAILABLE: __UpperCamelCase = unittest.skip("""test requires JAX""" )(__A ) return test_case def _lowercase ( __A ): '''simple docstring''' if not config.PIL_AVAILABLE: __UpperCamelCase = unittest.skip("""test requires Pillow""" )(__A ) return test_case def _lowercase ( __A ): '''simple docstring''' try: import transformers # noqa F401 except ImportError: return unittest.skip("""test requires transformers""" )(__A ) else: return test_case def _lowercase ( __A ): '''simple docstring''' try: import tiktoken # noqa F401 except ImportError: return unittest.skip("""test requires tiktoken""" )(__A ) else: return test_case def _lowercase ( __A ): '''simple docstring''' try: import spacy # noqa F401 except ImportError: return unittest.skip("""test requires spacy""" )(__A ) else: return test_case def _lowercase ( __A ): '''simple docstring''' def _require_spacy_model(__A ): try: import spacy # noqa F401 spacy.load(__A ) except ImportError: return unittest.skip("""test requires spacy""" )(__A ) except OSError: return unittest.skip("""test requires spacy model '{}'""".format(__A ) )(__A ) else: return test_case return _require_spacy_model def _lowercase ( __A ): '''simple docstring''' try: import pyspark # noqa F401 except ImportError: return unittest.skip("""test requires pyspark""" )(__A ) else: return test_case def _lowercase ( __A ): '''simple docstring''' try: import joblibspark # noqa F401 except ImportError: return unittest.skip("""test requires joblibspark""" )(__A ) else: return test_case def _lowercase ( __A ): '''simple docstring''' if not _run_slow_tests or _run_slow_tests == 0: __UpperCamelCase = unittest.skip("""test is slow""" )(__A ) return test_case def _lowercase ( __A ): '''simple docstring''' if not _run_local_tests or _run_local_tests == 0: __UpperCamelCase = unittest.skip("""test is local""" )(__A ) return test_case def _lowercase ( __A ): '''simple docstring''' if not _run_packaged_tests or _run_packaged_tests == 0: __UpperCamelCase = unittest.skip("""test is packaged""" )(__A ) return test_case def _lowercase ( __A ): '''simple docstring''' if not _run_remote_tests or _run_remote_tests == 0: __UpperCamelCase = unittest.skip("""test requires remote""" )(__A ) return test_case def _lowercase ( *__A ): '''simple docstring''' def decorate(cls ): for name, fn in cls.__dict__.items(): if callable(__A ) and name.startswith("""test""" ): for decorator in decorators: __UpperCamelCase = decorator(__A ) setattr(cls ,__A ,__A ) return cls return decorate class UpperCAmelCase__ ( UpperCAmelCase_): pass class UpperCAmelCase__ ( UpperCAmelCase_): __SCREAMING_SNAKE_CASE = 0 __SCREAMING_SNAKE_CASE = 1 __SCREAMING_SNAKE_CASE = 2 @contextmanager def _lowercase ( __A=OfflineSimulationMode.CONNECTION_FAILS ,__A=1E-16 ): '''simple docstring''' __UpperCamelCase = requests.Session().request def timeout_request(__A ,__A ,__A ,**__A ): # Change the url to an invalid url so that the connection hangs __UpperCamelCase = """https://10.255.255.1""" if kwargs.get("""timeout""" ) is None: raise RequestWouldHangIndefinitelyError( f"Tried a call to {url} in offline mode with no timeout set. Please set a timeout." ) __UpperCamelCase = timeout try: return online_request(__A ,__A ,**__A ) except Exception as e: # The following changes in the error are just here to make the offline timeout error prettier __UpperCamelCase = url __UpperCamelCase = e.args[0] __UpperCamelCase = (max_retry_error.args[0].replace("""10.255.255.1""" ,f"OfflineMock[{url}]" ),) __UpperCamelCase = (max_retry_error,) raise def raise_connection_error(__A ,__A ,**__A ): raise requests.ConnectionError("""Offline mode is enabled.""" ,request=__A ) if mode is OfflineSimulationMode.CONNECTION_FAILS: with patch("""requests.Session.send""" ,__A ): yield elif mode is OfflineSimulationMode.CONNECTION_TIMES_OUT: # inspired from https://stackoverflow.com/a/904609 with patch("""requests.Session.request""" ,__A ): yield elif mode is OfflineSimulationMode.HF_DATASETS_OFFLINE_SET_TO_1: with patch("""datasets.config.HF_DATASETS_OFFLINE""" ,__A ): yield else: raise ValueError("""Please use a value from the OfflineSimulationMode enum.""" ) @contextmanager def _lowercase ( *__A ,**__A ): '''simple docstring''' __UpperCamelCase = str(Path().resolve() ) with tempfile.TemporaryDirectory(*__A ,**__A ) as tmp_dir: try: os.chdir(__A ) yield finally: os.chdir(__A ) @contextmanager def _lowercase ( ): '''simple docstring''' import gc gc.collect() __UpperCamelCase = pa.total_allocated_bytes() yield assert pa.total_allocated_bytes() - previous_allocated_memory > 0, "Arrow memory didn't increase." @contextmanager def _lowercase ( ): '''simple docstring''' import gc gc.collect() __UpperCamelCase = pa.total_allocated_bytes() yield assert pa.total_allocated_bytes() - previous_allocated_memory <= 0, "Arrow memory wasn't expected to increase." def _lowercase ( __A ,__A ): '''simple docstring''' return deepcopy(__A ).integers(0 ,100 ,10 ).tolist() == deepcopy(__A ).integers(0 ,100 ,10 ).tolist() def _lowercase ( __A ): '''simple docstring''' import decorator from requests.exceptions import HTTPError def _wrapper(__A ,*__A ,**__A ): try: return func(*__A ,**__A ) except HTTPError as err: if str(__A ).startswith("""500""" ) or str(__A ).startswith("""502""" ): pytest.xfail(str(__A ) ) raise err return decorator.decorator(_wrapper ,__A ) class UpperCAmelCase__ : def __init__( self , lowercase , lowercase , lowercase ) -> str: __UpperCamelCase = returncode __UpperCamelCase = stdout __UpperCamelCase = stderr async def _lowercase ( __A ,__A ): '''simple docstring''' while True: __UpperCamelCase = await stream.readline() if line: callback(__A ) else: break async def _lowercase ( __A ,__A=None ,__A=None ,__A=None ,__A=False ,__A=False ): '''simple docstring''' if echo: print("""\nRunning: """ ,""" """.join(__A ) ) __UpperCamelCase = await asyncio.create_subprocess_exec( cmd[0] ,*cmd[1:] ,stdin=__A ,stdout=asyncio.subprocess.PIPE ,stderr=asyncio.subprocess.PIPE ,env=__A ,) # note: there is a warning for a possible deadlock when using `wait` with huge amounts of data in the pipe # https://docs.python.org/3/library/asyncio-subprocess.html#asyncio.asyncio.subprocess.Process.wait # # If it starts hanging, will need to switch to the following code. The problem is that no data # will be seen until it's done and if it hangs for example there will be no debug info. # out, err = await p.communicate() # return _RunOutput(p.returncode, out, err) __UpperCamelCase = [] __UpperCamelCase = [] def tee(__A ,__A ,__A ,__A="" ): __UpperCamelCase = line.decode("""utf-8""" ).rstrip() sink.append(__A ) if not quiet: print(__A ,__A ,file=__A ) # XXX: the timeout doesn't seem to make any difference here await asyncio.wait( [ _read_stream(p.stdout ,lambda __A : tee(__A ,__A ,sys.stdout ,label="""stdout:""" ) ), _read_stream(p.stderr ,lambda __A : tee(__A ,__A ,sys.stderr ,label="""stderr:""" ) ), ] ,timeout=__A ,) return _RunOutput(await p.wait() ,__A ,__A ) def _lowercase ( __A ,__A=None ,__A=None ,__A=180 ,__A=False ,__A=True ): '''simple docstring''' __UpperCamelCase = asyncio.get_event_loop() __UpperCamelCase = loop.run_until_complete( _stream_subprocess(__A ,env=__A ,stdin=__A ,timeout=__A ,quiet=__A ,echo=__A ) ) __UpperCamelCase = """ """.join(__A ) if result.returncode > 0: __UpperCamelCase = """\n""".join(result.stderr ) raise RuntimeError( f"'{cmd_str}' failed with returncode {result.returncode}\n\n" f"The combined stderr from workers follows:\n{stderr}" ) # check that the subprocess actually did run and produced some output, should the test rely on # the remote side to do the testing if not result.stdout and not result.stderr: raise RuntimeError(f"'{cmd_str}' produced no output." ) return result def _lowercase ( ): '''simple docstring''' __UpperCamelCase = os.environ.get("""PYTEST_XDIST_WORKER""" ,"""gw0""" ) __UpperCamelCase = re.sub(R"""^gw""" ,"""""" ,__A ,0 ,re.M ) return int(__A ) def _lowercase ( ): '''simple docstring''' __UpperCamelCase = 29_500 __UpperCamelCase = pytest_xdist_worker_id() return port + uniq_delta
349
0
"""simple docstring""" from argparse import ArgumentParser from .env import EnvironmentCommand def snake_case ( ): UpperCAmelCase_ : Optional[Any] = ArgumentParser("Diffusers CLI tool" ,usage="diffusers-cli <command> [<args>]" ) UpperCAmelCase_ : Dict = parser.add_subparsers(help="diffusers-cli command helpers" ) # Register commands EnvironmentCommand.register_subcommand(__A ) # Let's go UpperCAmelCase_ : List[Any] = parser.parse_args() if not hasattr(__A ,"func" ): parser.print_help() exit(1 ) # Run UpperCAmelCase_ : Tuple = args.func(__A ) service.run() if __name__ == "__main__": main()
268
'''simple docstring''' import re def _lowercase ( __A ): '''simple docstring''' return [char.split() for char in re.split(R"""[^ a-z A-Z 0-9 \s]""" ,str_ )] def _lowercase ( __A ): '''simple docstring''' __UpperCamelCase = split_input(str_ ) return "".join( ["""""".join([char.capitalize() for char in sub_str] ) for sub_str in string_split] ) def _lowercase ( __A ,__A ,__A ): '''simple docstring''' try: __UpperCamelCase = split_input(__A ) if upper: __UpperCamelCase = """""".join( [ separator.join([char.upper() for char in sub_str] ) for sub_str in string_split ] ) else: __UpperCamelCase = """""".join( [ separator.join([char.lower() for char in sub_str] ) for sub_str in string_split ] ) return res_str except IndexError: return "not valid string" def _lowercase ( __A ): '''simple docstring''' return to_simple_case(__A ) def _lowercase ( __A ): '''simple docstring''' try: __UpperCamelCase = to_simple_case(__A ) return res_str[0].lower() + res_str[1:] except IndexError: return "not valid string" def _lowercase ( __A ,__A ): '''simple docstring''' return to_complex_case(__A ,__A ,"""_""" ) def _lowercase ( __A ,__A ): '''simple docstring''' return to_complex_case(__A ,__A ,"""-""" ) if __name__ == "__main__": __import__('doctest').testmod()
349
0
import argparse import torch from transformers import MobileBertConfig, MobileBertForPreTraining, load_tf_weights_in_mobilebert from transformers.utils import logging logging.set_verbosity_info() def __UpperCamelCase ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> int: """simple docstring""" A : Optional[Any] = MobileBertConfig.from_json_file(__A ) print(f'''Building PyTorch model from configuration: {config}''' ) A : Optional[int] = MobileBertForPreTraining(__A ) # Load weights from tf checkpoint A : int = load_tf_weights_in_mobilebert(__A , __A , __A ) # Save pytorch-model print(f'''Save PyTorch model to {pytorch_dump_path}''' ) torch.save(model.state_dict() , __A ) if __name__ == "__main__": SCREAMING_SNAKE_CASE_:Any = argparse.ArgumentParser() # Required parameters parser.add_argument( """--tf_checkpoint_path""", default=None, type=str, required=True, help="""Path to the TensorFlow checkpoint path.""" ) parser.add_argument( """--mobilebert_config_file""", default=None, type=str, required=True, help=( """The config json file corresponding to the pre-trained MobileBERT model. \n""" """This specifies the model architecture.""" ), ) parser.add_argument( """--pytorch_dump_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model.""" ) SCREAMING_SNAKE_CASE_:Any = parser.parse_args() convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.mobilebert_config_file, args.pytorch_dump_path)
116
'''simple docstring''' import gc import random import tempfile import unittest import numpy as np import torch from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer from diffusers import AutoencoderKL, DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler, UNetaDConditionModel from diffusers.pipelines.stable_diffusion_safe import StableDiffusionPipelineSafe as StableDiffusionPipeline from diffusers.utils import floats_tensor, nightly, torch_device from diffusers.utils.testing_utils import require_torch_gpu class UpperCAmelCase__ ( unittest.TestCase): def __lowerCamelCase ( self ) -> List[str]: # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() @property def __lowerCamelCase ( self ) -> int: __UpperCamelCase = 1 __UpperCamelCase = 3 __UpperCamelCase = (3_2, 3_2) __UpperCamelCase = floats_tensor((batch_size, num_channels) + sizes , rng=random.Random(0 ) ).to(lowercase ) return image @property def __lowerCamelCase ( self ) -> Dict: torch.manual_seed(0 ) __UpperCamelCase = UNetaDConditionModel( block_out_channels=(3_2, 6_4) , layers_per_block=2 , sample_size=3_2 , in_channels=4 , out_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D""") , cross_attention_dim=3_2 , ) return model @property def __lowerCamelCase ( self ) -> List[str]: torch.manual_seed(0 ) __UpperCamelCase = AutoencoderKL( block_out_channels=[3_2, 6_4] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , ) return model @property def __lowerCamelCase ( self ) -> Optional[int]: torch.manual_seed(0 ) __UpperCamelCase = CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=3_2 , intermediate_size=3_7 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_0_0_0 , ) return CLIPTextModel(lowercase ) @property def __lowerCamelCase ( self ) -> Tuple: def extract(*lowercase , **lowercase ): class UpperCAmelCase__ : def __init__( self ) -> Tuple: __UpperCamelCase = torch.ones([0] ) def __lowerCamelCase ( self , lowercase ) -> List[str]: self.pixel_values.to(lowercase ) return self return Out() return extract def __lowerCamelCase ( self ) -> Any: __UpperCamelCase = """cpu""" # ensure determinism for the device-dependent torch.Generator __UpperCamelCase = self.dummy_cond_unet __UpperCamelCase = DDIMScheduler( beta_start=0.00_085 , beta_end=0.012 , beta_schedule="""scaled_linear""" , clip_sample=lowercase , set_alpha_to_one=lowercase , ) __UpperCamelCase = self.dummy_vae __UpperCamelCase = self.dummy_text_encoder __UpperCamelCase = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" ) # make sure here that pndm scheduler skips prk __UpperCamelCase = StableDiffusionPipeline( unet=lowercase , scheduler=lowercase , vae=lowercase , text_encoder=lowercase , tokenizer=lowercase , safety_checker=lowercase , feature_extractor=self.dummy_extractor , ) __UpperCamelCase = sd_pipe.to(lowercase ) sd_pipe.set_progress_bar_config(disable=lowercase ) __UpperCamelCase = """A painting of a squirrel eating a burger""" __UpperCamelCase = torch.Generator(device=lowercase ).manual_seed(0 ) __UpperCamelCase = sd_pipe([prompt] , generator=lowercase , guidance_scale=6.0 , num_inference_steps=2 , output_type="""np""" ) __UpperCamelCase = output.images __UpperCamelCase = torch.Generator(device=lowercase ).manual_seed(0 ) __UpperCamelCase = sd_pipe( [prompt] , generator=lowercase , guidance_scale=6.0 , num_inference_steps=2 , output_type="""np""" , return_dict=lowercase , )[0] __UpperCamelCase = image[0, -3:, -3:, -1] __UpperCamelCase = image_from_tuple[0, -3:, -3:, -1] assert image.shape == (1, 6_4, 6_4, 3) __UpperCamelCase = np.array([0.5_756, 0.6_118, 0.5_005, 0.5_041, 0.5_471, 0.4_726, 0.4_976, 0.4_865, 0.4_864] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2 def __lowerCamelCase ( self ) -> Tuple: __UpperCamelCase = """cpu""" # ensure determinism for the device-dependent torch.Generator __UpperCamelCase = self.dummy_cond_unet __UpperCamelCase = PNDMScheduler(skip_prk_steps=lowercase ) __UpperCamelCase = self.dummy_vae __UpperCamelCase = self.dummy_text_encoder __UpperCamelCase = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" ) # make sure here that pndm scheduler skips prk __UpperCamelCase = StableDiffusionPipeline( unet=lowercase , scheduler=lowercase , vae=lowercase , text_encoder=lowercase , tokenizer=lowercase , safety_checker=lowercase , feature_extractor=self.dummy_extractor , ) __UpperCamelCase = sd_pipe.to(lowercase ) sd_pipe.set_progress_bar_config(disable=lowercase ) __UpperCamelCase = """A painting of a squirrel eating a burger""" __UpperCamelCase = torch.Generator(device=lowercase ).manual_seed(0 ) __UpperCamelCase = sd_pipe([prompt] , generator=lowercase , guidance_scale=6.0 , num_inference_steps=2 , output_type="""np""" ) __UpperCamelCase = output.images __UpperCamelCase = torch.Generator(device=lowercase ).manual_seed(0 ) __UpperCamelCase = sd_pipe( [prompt] , generator=lowercase , guidance_scale=6.0 , num_inference_steps=2 , output_type="""np""" , return_dict=lowercase , )[0] __UpperCamelCase = image[0, -3:, -3:, -1] __UpperCamelCase = image_from_tuple[0, -3:, -3:, -1] assert image.shape == (1, 6_4, 6_4, 3) __UpperCamelCase = np.array([0.5_125, 0.5_716, 0.4_828, 0.5_060, 0.5_650, 0.4_768, 0.5_185, 0.4_895, 0.4_993] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2 def __lowerCamelCase ( self ) -> Union[str, Any]: __UpperCamelCase = StableDiffusionPipeline.from_pretrained( """hf-internal-testing/tiny-stable-diffusion-lms-pipe""" , safety_checker=lowercase ) assert isinstance(lowercase , lowercase ) assert isinstance(pipe.scheduler , lowercase ) assert pipe.safety_checker is None __UpperCamelCase = pipe("""example prompt""" , num_inference_steps=2 ).images[0] assert image is not None # check that there's no error when saving a pipeline with one of the models being None with tempfile.TemporaryDirectory() as tmpdirname: pipe.save_pretrained(lowercase ) __UpperCamelCase = StableDiffusionPipeline.from_pretrained(lowercase ) # sanity check that the pipeline still works assert pipe.safety_checker is None __UpperCamelCase = pipe("""example prompt""" , num_inference_steps=2 ).images[0] assert image is not None @unittest.skipIf(torch_device != """cuda""" , """This test requires a GPU""" ) def __lowerCamelCase ( self ) -> Optional[int]: __UpperCamelCase = self.dummy_cond_unet __UpperCamelCase = PNDMScheduler(skip_prk_steps=lowercase ) __UpperCamelCase = self.dummy_vae __UpperCamelCase = self.dummy_text_encoder __UpperCamelCase = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" ) # put models in fp16 __UpperCamelCase = unet.half() __UpperCamelCase = vae.half() __UpperCamelCase = bert.half() # make sure here that pndm scheduler skips prk __UpperCamelCase = StableDiffusionPipeline( unet=lowercase , scheduler=lowercase , vae=lowercase , text_encoder=lowercase , tokenizer=lowercase , safety_checker=lowercase , feature_extractor=self.dummy_extractor , ) __UpperCamelCase = sd_pipe.to(lowercase ) sd_pipe.set_progress_bar_config(disable=lowercase ) __UpperCamelCase = """A painting of a squirrel eating a burger""" __UpperCamelCase = sd_pipe([prompt] , num_inference_steps=2 , output_type="""np""" ).images assert image.shape == (1, 6_4, 6_4, 3) @nightly @require_torch_gpu class UpperCAmelCase__ ( unittest.TestCase): def __lowerCamelCase ( self ) -> Tuple: # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() def __lowerCamelCase ( self ) -> Dict: __UpperCamelCase = StableDiffusionPipeline.from_pretrained("""runwayml/stable-diffusion-v1-5""" , safety_checker=lowercase ) __UpperCamelCase = LMSDiscreteScheduler.from_config(sd_pipe.scheduler.config ) __UpperCamelCase = sd_pipe.to(lowercase ) sd_pipe.set_progress_bar_config(disable=lowercase ) __UpperCamelCase = ( """portrait of girl with smokey eyes makeup in abandoned hotel, grange clothes, redshift, wide high angle""" """ coloured polaroid photograph with flash, kodak film, hyper real, stunning moody cinematography, with""" """ anamorphic lenses, by maripol, fallen angels by wong kar - wai, style of suspiria and neon demon and""" """ children from bahnhof zoo, detailed """ ) __UpperCamelCase = 4_0_0_3_6_6_0_3_4_6 __UpperCamelCase = 7 # without safety guidance (sld_guidance_scale = 0) __UpperCamelCase = torch.manual_seed(lowercase ) __UpperCamelCase = sd_pipe( [prompt] , generator=lowercase , guidance_scale=lowercase , num_inference_steps=5_0 , output_type="""np""" , width=5_1_2 , height=5_1_2 , sld_guidance_scale=0 , ) __UpperCamelCase = output.images __UpperCamelCase = image[0, -3:, -3:, -1] __UpperCamelCase = [0.2_278, 0.2_231, 0.2_249, 0.2_333, 0.2_303, 0.1_885, 0.2_273, 0.2_144, 0.2_176] assert image.shape == (1, 5_1_2, 5_1_2, 3) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 # without safety guidance (strong configuration) __UpperCamelCase = torch.manual_seed(lowercase ) __UpperCamelCase = sd_pipe( [prompt] , generator=lowercase , guidance_scale=lowercase , num_inference_steps=5_0 , output_type="""np""" , width=5_1_2 , height=5_1_2 , sld_guidance_scale=2_0_0_0 , sld_warmup_steps=7 , sld_threshold=0.025 , sld_momentum_scale=0.5 , sld_mom_beta=0.7 , ) __UpperCamelCase = output.images __UpperCamelCase = image[0, -3:, -3:, -1] __UpperCamelCase = [0.2_383, 0.2_276, 0.236, 0.2_192, 0.2_186, 0.2_053, 0.1_971, 0.1_901, 0.1_719] assert image.shape == (1, 5_1_2, 5_1_2, 3) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 def __lowerCamelCase ( self ) -> Optional[Any]: __UpperCamelCase = StableDiffusionPipeline.from_pretrained("""runwayml/stable-diffusion-v1-5""" , safety_checker=lowercase ) __UpperCamelCase = LMSDiscreteScheduler.from_config(sd_pipe.scheduler.config ) __UpperCamelCase = sd_pipe.to(lowercase ) sd_pipe.set_progress_bar_config(disable=lowercase ) __UpperCamelCase = """padme amidala taking a bath artwork, safe for work, no nudity""" __UpperCamelCase = 2_7_3_4_9_7_1_7_5_5 __UpperCamelCase = 7 __UpperCamelCase = torch.manual_seed(lowercase ) __UpperCamelCase = sd_pipe( [prompt] , generator=lowercase , guidance_scale=lowercase , num_inference_steps=5_0 , output_type="""np""" , width=5_1_2 , height=5_1_2 , sld_guidance_scale=0 , ) __UpperCamelCase = output.images __UpperCamelCase = image[0, -3:, -3:, -1] __UpperCamelCase = [0.3_502, 0.3_622, 0.3_396, 0.3_642, 0.3_478, 0.3_318, 0.35, 0.3_348, 0.3_297] assert image.shape == (1, 5_1_2, 5_1_2, 3) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 __UpperCamelCase = torch.manual_seed(lowercase ) __UpperCamelCase = sd_pipe( [prompt] , generator=lowercase , guidance_scale=lowercase , num_inference_steps=5_0 , output_type="""np""" , width=5_1_2 , height=5_1_2 , sld_guidance_scale=2_0_0_0 , sld_warmup_steps=7 , sld_threshold=0.025 , sld_momentum_scale=0.5 , sld_mom_beta=0.7 , ) __UpperCamelCase = output.images __UpperCamelCase = image[0, -3:, -3:, -1] __UpperCamelCase = [0.5_531, 0.5_206, 0.4_895, 0.5_156, 0.5_182, 0.4_751, 0.4_802, 0.4_803, 0.4_443] assert image.shape == (1, 5_1_2, 5_1_2, 3) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 def __lowerCamelCase ( self ) -> Optional[Any]: __UpperCamelCase = StableDiffusionPipeline.from_pretrained("""runwayml/stable-diffusion-v1-5""" ) __UpperCamelCase = sd_pipe.to(lowercase ) sd_pipe.set_progress_bar_config(disable=lowercase ) __UpperCamelCase = ( """the four horsewomen of the apocalypse, painting by tom of finland, gaston bussiere, craig mullins, j. c.""" """ leyendecker""" ) __UpperCamelCase = 1_0_4_4_3_5_5_2_3_4 __UpperCamelCase = 1_2 __UpperCamelCase = torch.manual_seed(lowercase ) __UpperCamelCase = sd_pipe( [prompt] , generator=lowercase , guidance_scale=lowercase , num_inference_steps=5_0 , output_type="""np""" , width=5_1_2 , height=5_1_2 , sld_guidance_scale=0 , ) __UpperCamelCase = output.images __UpperCamelCase = image[0, -3:, -3:, -1] __UpperCamelCase = np.array([0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0] ) assert image.shape == (1, 5_1_2, 5_1_2, 3) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-7 __UpperCamelCase = torch.manual_seed(lowercase ) __UpperCamelCase = sd_pipe( [prompt] , generator=lowercase , guidance_scale=lowercase , num_inference_steps=5_0 , output_type="""np""" , width=5_1_2 , height=5_1_2 , sld_guidance_scale=2_0_0_0 , sld_warmup_steps=7 , sld_threshold=0.025 , sld_momentum_scale=0.5 , sld_mom_beta=0.7 , ) __UpperCamelCase = output.images __UpperCamelCase = image[0, -3:, -3:, -1] __UpperCamelCase = np.array([0.5_818, 0.6_285, 0.6_835, 0.6_019, 0.625, 0.6_754, 0.6_096, 0.6_334, 0.6_561] ) assert image.shape == (1, 5_1_2, 5_1_2, 3) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
349
0
def UpperCamelCase__ ( A__ , A__ , A__ , A__ , A__ ) -> Dict: if index == number_of_items: return 0 snake_case__ : List[str] = 0 snake_case__ : Dict = 0 snake_case__ : Dict = knapsack(__A , __A , __A , __A , index + 1 ) if weights[index] <= max_weight: snake_case__ : int = values[index] + knapsack( __A , __A , __A , max_weight - weights[index] , index + 1 ) return max(__A , __A ) if __name__ == "__main__": import doctest doctest.testmod()
143
'''simple docstring''' from ..utils import DummyObject, requires_backends class UpperCAmelCase__ ( metaclass=UpperCAmelCase_): __SCREAMING_SNAKE_CASE = ['''flax'''] def __init__( self , *lowercase , **lowercase ) -> List[Any]: requires_backends(self , ["""flax"""] ) @classmethod def __lowerCamelCase ( cls , *lowercase , **lowercase ) -> Union[str, Any]: requires_backends(cls , ["""flax"""] ) @classmethod def __lowerCamelCase ( cls , *lowercase , **lowercase ) -> Tuple: requires_backends(cls , ["""flax"""] ) class UpperCAmelCase__ ( metaclass=UpperCAmelCase_): __SCREAMING_SNAKE_CASE = ['''flax'''] def __init__( self , *lowercase , **lowercase ) -> str: requires_backends(self , ["""flax"""] ) @classmethod def __lowerCamelCase ( cls , *lowercase , **lowercase ) -> Any: requires_backends(cls , ["""flax"""] ) @classmethod def __lowerCamelCase ( cls , *lowercase , **lowercase ) -> int: requires_backends(cls , ["""flax"""] ) class UpperCAmelCase__ ( metaclass=UpperCAmelCase_): __SCREAMING_SNAKE_CASE = ['''flax'''] def __init__( self , *lowercase , **lowercase ) -> Dict: requires_backends(self , ["""flax"""] ) @classmethod def __lowerCamelCase ( cls , *lowercase , **lowercase ) -> int: requires_backends(cls , ["""flax"""] ) @classmethod def __lowerCamelCase ( cls , *lowercase , **lowercase ) -> List[Any]: requires_backends(cls , ["""flax"""] ) class UpperCAmelCase__ ( metaclass=UpperCAmelCase_): __SCREAMING_SNAKE_CASE = ['''flax'''] def __init__( self , *lowercase , **lowercase ) -> Optional[Any]: requires_backends(self , ["""flax"""] ) @classmethod def __lowerCamelCase ( cls , *lowercase , **lowercase ) -> Optional[int]: requires_backends(cls , ["""flax"""] ) @classmethod def __lowerCamelCase ( cls , *lowercase , **lowercase ) -> Optional[int]: requires_backends(cls , ["""flax"""] ) class UpperCAmelCase__ ( metaclass=UpperCAmelCase_): __SCREAMING_SNAKE_CASE = ['''flax'''] def __init__( self , *lowercase , **lowercase ) -> Optional[Any]: requires_backends(self , ["""flax"""] ) @classmethod def __lowerCamelCase ( cls , *lowercase , **lowercase ) -> str: requires_backends(cls , ["""flax"""] ) @classmethod def __lowerCamelCase ( cls , *lowercase , **lowercase ) -> List[str]: requires_backends(cls , ["""flax"""] ) class UpperCAmelCase__ ( metaclass=UpperCAmelCase_): __SCREAMING_SNAKE_CASE = ['''flax'''] def __init__( self , *lowercase , **lowercase ) -> Optional[int]: requires_backends(self , ["""flax"""] ) @classmethod def __lowerCamelCase ( cls , *lowercase , **lowercase ) -> str: requires_backends(cls , ["""flax"""] ) @classmethod def __lowerCamelCase ( cls , *lowercase , **lowercase ) -> int: requires_backends(cls , ["""flax"""] ) class UpperCAmelCase__ ( metaclass=UpperCAmelCase_): __SCREAMING_SNAKE_CASE = ['''flax'''] def __init__( self , *lowercase , **lowercase ) -> List[Any]: requires_backends(self , ["""flax"""] ) @classmethod def __lowerCamelCase ( cls , *lowercase , **lowercase ) -> Any: requires_backends(cls , ["""flax"""] ) @classmethod def __lowerCamelCase ( cls , *lowercase , **lowercase ) -> Dict: requires_backends(cls , ["""flax"""] ) class UpperCAmelCase__ ( metaclass=UpperCAmelCase_): __SCREAMING_SNAKE_CASE = ['''flax'''] def __init__( self , *lowercase , **lowercase ) -> Union[str, Any]: requires_backends(self , ["""flax"""] ) @classmethod def __lowerCamelCase ( cls , *lowercase , **lowercase ) -> Dict: requires_backends(cls , ["""flax"""] ) @classmethod def __lowerCamelCase ( cls , *lowercase , **lowercase ) -> Tuple: requires_backends(cls , ["""flax"""] ) class UpperCAmelCase__ ( metaclass=UpperCAmelCase_): __SCREAMING_SNAKE_CASE = ['''flax'''] def __init__( self , *lowercase , **lowercase ) -> int: requires_backends(self , ["""flax"""] ) @classmethod def __lowerCamelCase ( cls , *lowercase , **lowercase ) -> Optional[int]: requires_backends(cls , ["""flax"""] ) @classmethod def __lowerCamelCase ( cls , *lowercase , **lowercase ) -> List[str]: requires_backends(cls , ["""flax"""] ) class UpperCAmelCase__ ( metaclass=UpperCAmelCase_): __SCREAMING_SNAKE_CASE = ['''flax'''] def __init__( self , *lowercase , **lowercase ) -> int: requires_backends(self , ["""flax"""] ) @classmethod def __lowerCamelCase ( cls , *lowercase , **lowercase ) -> str: requires_backends(cls , ["""flax"""] ) @classmethod def __lowerCamelCase ( cls , *lowercase , **lowercase ) -> str: requires_backends(cls , ["""flax"""] ) class UpperCAmelCase__ ( metaclass=UpperCAmelCase_): __SCREAMING_SNAKE_CASE = ['''flax'''] def __init__( self , *lowercase , **lowercase ) -> int: requires_backends(self , ["""flax"""] ) @classmethod def __lowerCamelCase ( cls , *lowercase , **lowercase ) -> Tuple: requires_backends(cls , ["""flax"""] ) @classmethod def __lowerCamelCase ( cls , *lowercase , **lowercase ) -> Tuple: requires_backends(cls , ["""flax"""] ) class UpperCAmelCase__ ( metaclass=UpperCAmelCase_): __SCREAMING_SNAKE_CASE = ['''flax'''] def __init__( self , *lowercase , **lowercase ) -> Optional[int]: requires_backends(self , ["""flax"""] ) @classmethod def __lowerCamelCase ( cls , *lowercase , **lowercase ) -> Tuple: requires_backends(cls , ["""flax"""] ) @classmethod def __lowerCamelCase ( cls , *lowercase , **lowercase ) -> Union[str, Any]: requires_backends(cls , ["""flax"""] ) class UpperCAmelCase__ ( metaclass=UpperCAmelCase_): __SCREAMING_SNAKE_CASE = ['''flax'''] def __init__( self , *lowercase , **lowercase ) -> Any: requires_backends(self , ["""flax"""] ) @classmethod def __lowerCamelCase ( cls , *lowercase , **lowercase ) -> Tuple: requires_backends(cls , ["""flax"""] ) @classmethod def __lowerCamelCase ( cls , *lowercase , **lowercase ) -> List[str]: requires_backends(cls , ["""flax"""] )
349
0
from collections import OrderedDict from typing import Mapping from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging snake_case : Dict = logging.get_logger(__name__) snake_case : Tuple = { 'bert-base-uncased': 'https://huggingface.co/bert-base-uncased/resolve/main/config.json', 'bert-large-uncased': 'https://huggingface.co/bert-large-uncased/resolve/main/config.json', 'bert-base-cased': 'https://huggingface.co/bert-base-cased/resolve/main/config.json', 'bert-large-cased': 'https://huggingface.co/bert-large-cased/resolve/main/config.json', 'bert-base-multilingual-uncased': 'https://huggingface.co/bert-base-multilingual-uncased/resolve/main/config.json', 'bert-base-multilingual-cased': 'https://huggingface.co/bert-base-multilingual-cased/resolve/main/config.json', 'bert-base-chinese': 'https://huggingface.co/bert-base-chinese/resolve/main/config.json', 'bert-base-german-cased': 'https://huggingface.co/bert-base-german-cased/resolve/main/config.json', 'bert-large-uncased-whole-word-masking': ( 'https://huggingface.co/bert-large-uncased-whole-word-masking/resolve/main/config.json' ), 'bert-large-cased-whole-word-masking': ( 'https://huggingface.co/bert-large-cased-whole-word-masking/resolve/main/config.json' ), 'bert-large-uncased-whole-word-masking-finetuned-squad': ( 'https://huggingface.co/bert-large-uncased-whole-word-masking-finetuned-squad/resolve/main/config.json' ), 'bert-large-cased-whole-word-masking-finetuned-squad': ( 'https://huggingface.co/bert-large-cased-whole-word-masking-finetuned-squad/resolve/main/config.json' ), 'bert-base-cased-finetuned-mrpc': 'https://huggingface.co/bert-base-cased-finetuned-mrpc/resolve/main/config.json', 'bert-base-german-dbmdz-cased': 'https://huggingface.co/bert-base-german-dbmdz-cased/resolve/main/config.json', 'bert-base-german-dbmdz-uncased': 'https://huggingface.co/bert-base-german-dbmdz-uncased/resolve/main/config.json', 'cl-tohoku/bert-base-japanese': 'https://huggingface.co/cl-tohoku/bert-base-japanese/resolve/main/config.json', 'cl-tohoku/bert-base-japanese-whole-word-masking': ( 'https://huggingface.co/cl-tohoku/bert-base-japanese-whole-word-masking/resolve/main/config.json' ), 'cl-tohoku/bert-base-japanese-char': ( 'https://huggingface.co/cl-tohoku/bert-base-japanese-char/resolve/main/config.json' ), 'cl-tohoku/bert-base-japanese-char-whole-word-masking': ( 'https://huggingface.co/cl-tohoku/bert-base-japanese-char-whole-word-masking/resolve/main/config.json' ), 'TurkuNLP/bert-base-finnish-cased-v1': ( 'https://huggingface.co/TurkuNLP/bert-base-finnish-cased-v1/resolve/main/config.json' ), 'TurkuNLP/bert-base-finnish-uncased-v1': ( 'https://huggingface.co/TurkuNLP/bert-base-finnish-uncased-v1/resolve/main/config.json' ), 'wietsedv/bert-base-dutch-cased': 'https://huggingface.co/wietsedv/bert-base-dutch-cased/resolve/main/config.json', # See all BERT models at https://huggingface.co/models?filter=bert } class _snake_case ( UpperCAmelCase_ ): SCREAMING_SNAKE_CASE__ = 'bert' def __init__( self , _lowerCamelCase=3_0522 , _lowerCamelCase=768 , _lowerCamelCase=12 , _lowerCamelCase=12 , _lowerCamelCase=3072 , _lowerCamelCase="gelu" , _lowerCamelCase=0.1 , _lowerCamelCase=0.1 , _lowerCamelCase=512 , _lowerCamelCase=2 , _lowerCamelCase=0.02 , _lowerCamelCase=1e-12 , _lowerCamelCase=0 , _lowerCamelCase="absolute" , _lowerCamelCase=True , _lowerCamelCase=None , **_lowerCamelCase , ): super().__init__(pad_token_id=_lowerCamelCase , **_lowerCamelCase ) a :Union[str, Any] = vocab_size a :Optional[int] = hidden_size a :Optional[Any] = num_hidden_layers a :Union[str, Any] = num_attention_heads a :List[Any] = hidden_act a :Tuple = intermediate_size a :Any = hidden_dropout_prob a :Tuple = attention_probs_dropout_prob a :List[Any] = max_position_embeddings a :Optional[Any] = type_vocab_size a :Any = initializer_range a :Optional[int] = layer_norm_eps a :Optional[int] = position_embedding_type a :Optional[int] = use_cache a :Tuple = classifier_dropout class _snake_case ( UpperCAmelCase_ ): @property def SCREAMING_SNAKE_CASE__ ( self ): if self.task == "multiple-choice": a :Any = {0: '''batch''', 1: '''choice''', 2: '''sequence'''} else: a :List[str] = {0: '''batch''', 1: '''sequence'''} return OrderedDict( [ ('''input_ids''', dynamic_axis), ('''attention_mask''', dynamic_axis), ('''token_type_ids''', dynamic_axis), ] )
94
'''simple docstring''' import logging import os from .state import PartialState class UpperCAmelCase__ ( logging.LoggerAdapter): @staticmethod def __lowerCamelCase ( lowercase ) -> Dict: __UpperCamelCase = PartialState() return not main_process_only or (main_process_only and state.is_main_process) def __lowerCamelCase ( self , lowercase , lowercase , *lowercase , **lowercase ) -> List[str]: if PartialState._shared_state == {}: raise RuntimeError( """You must initialize the accelerate state by calling either `PartialState()` or `Accelerator()` before using the logging utility.""" ) __UpperCamelCase = kwargs.pop("""main_process_only""" , lowercase ) __UpperCamelCase = kwargs.pop("""in_order""" , lowercase ) if self.isEnabledFor(lowercase ): if self._should_log(lowercase ): __UpperCamelCase , __UpperCamelCase = self.process(lowercase , lowercase ) self.logger.log(lowercase , lowercase , *lowercase , **lowercase ) elif in_order: __UpperCamelCase = PartialState() for i in range(state.num_processes ): if i == state.process_index: __UpperCamelCase , __UpperCamelCase = self.process(lowercase , lowercase ) self.logger.log(lowercase , lowercase , *lowercase , **lowercase ) state.wait_for_everyone() def _lowercase ( __A ,__A = None ): '''simple docstring''' if log_level is None: __UpperCamelCase = os.environ.get("""ACCELERATE_LOG_LEVEL""" ,__A ) __UpperCamelCase = logging.getLogger(__A ) if log_level is not None: logger.setLevel(log_level.upper() ) logger.root.setLevel(log_level.upper() ) return MultiProcessAdapter(__A ,{} )
349
0
'''simple docstring''' def lowerCAmelCase_ ( snake_case__ , snake_case__ , snake_case__=False ): '''simple docstring''' if isinstance(__A , __A ) and isinstance(__A , __A ): A : Tuple = len(set_a.intersection(__A ) ) if alternative_union: A : int = len(__A ) + len(__A ) else: A : Optional[Any] = len(set_a.union(__A ) ) return intersection / union if isinstance(__A , (list, tuple) ) and isinstance(__A , (list, tuple) ): A : Tuple = [element for element in set_a if element in set_b] if alternative_union: A : Tuple = len(__A ) + len(__A ) return len(__A ) / union else: A : List[str] = set_a + [element for element in set_b if element not in set_a] return len(__A ) / len(__A ) return len(__A ) / len(__A ) return None if __name__ == "__main__": lowercase : List[Any] = {'a', 'b', 'c', 'd', 'e'} lowercase : List[str] = {'c', 'd', 'e', 'f', 'h', 'i'} print(jaccard_similarity(set_a, set_b))
3
'''simple docstring''' import logging import random import ray from transformers import RagConfig, RagRetriever, RagTokenizer from transformers.models.rag.retrieval_rag import CustomHFIndex a__ : Optional[Any] = logging.getLogger(__name__) class UpperCAmelCase__ : def __init__( self ) -> Any: __UpperCamelCase = False def __lowerCamelCase ( self , lowercase , lowercase , lowercase , lowercase ) -> str: if not self.initialized: __UpperCamelCase = RagRetriever( lowercase , question_encoder_tokenizer=lowercase , generator_tokenizer=lowercase , index=lowercase , init_retrieval=lowercase , ) __UpperCamelCase = True def __lowerCamelCase ( self ) -> Optional[Any]: self.retriever.index.init_index() def __lowerCamelCase ( self , lowercase , lowercase ) -> Dict: __UpperCamelCase , __UpperCamelCase = self.retriever._main_retrieve(lowercase , lowercase ) return doc_ids, retrieved_doc_embeds class UpperCAmelCase__ ( UpperCAmelCase_): def __init__( self , lowercase , lowercase , lowercase , lowercase , lowercase=None ) -> List[Any]: if index is not None and index.is_initialized() and len(lowercase ) > 0: raise ValueError( """When using Ray for distributed fine-tuning, """ """you'll need to provide the paths instead, """ """as the dataset and the index are loaded """ """separately. More info in examples/rag/use_own_knowledge_dataset.py """ ) super().__init__( lowercase , question_encoder_tokenizer=lowercase , generator_tokenizer=lowercase , index=lowercase , init_retrieval=lowercase , ) __UpperCamelCase = retrieval_workers if len(self.retrieval_workers ) > 0: ray.get( [ worker.create_rag_retriever.remote(lowercase , lowercase , lowercase , lowercase ) for worker in self.retrieval_workers ] ) def __lowerCamelCase ( self ) -> Dict: logger.info("""initializing retrieval""" ) if len(self.retrieval_workers ) > 0: ray.get([worker.init_retrieval.remote() for worker in self.retrieval_workers] ) else: # Non-distributed training. Load index into this same process. self.index.init_index() def __lowerCamelCase ( self , lowercase , lowercase ) -> List[str]: if len(self.retrieval_workers ) > 0: # Select a random retrieval actor. __UpperCamelCase = self.retrieval_workers[random.randint(0 , len(self.retrieval_workers ) - 1 )] __UpperCamelCase , __UpperCamelCase = ray.get(random_worker.retrieve.remote(lowercase , lowercase ) ) else: __UpperCamelCase , __UpperCamelCase = self._main_retrieve(lowercase , lowercase ) return retrieved_doc_embeds, doc_ids, self.index.get_doc_dicts(lowercase ) @classmethod def __lowerCamelCase ( cls , lowercase , lowercase=None , **lowercase ) -> Any: return super(lowercase , cls ).get_tokenizers(lowercase , lowercase , **lowercase ) @classmethod def __lowerCamelCase ( cls , lowercase , lowercase , lowercase=None , **lowercase ) -> int: __UpperCamelCase = kwargs.pop("""config""" , lowercase ) or RagConfig.from_pretrained(lowercase , **lowercase ) __UpperCamelCase = RagTokenizer.from_pretrained(lowercase , config=lowercase ) __UpperCamelCase = rag_tokenizer.question_encoder __UpperCamelCase = rag_tokenizer.generator if indexed_dataset is not None: __UpperCamelCase = """custom""" __UpperCamelCase = CustomHFIndex(config.retrieval_vector_size , lowercase ) else: __UpperCamelCase = cls._build_index(lowercase ) return cls( lowercase , question_encoder_tokenizer=lowercase , generator_tokenizer=lowercase , retrieval_workers=lowercase , index=lowercase , )
349
0
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_torch_available, ) _lowerCamelCase ={ 'configuration_roberta_prelayernorm': [ 'ROBERTA_PRELAYERNORM_PRETRAINED_CONFIG_ARCHIVE_MAP', 'RobertaPreLayerNormConfig', 'RobertaPreLayerNormOnnxConfig', ], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowerCamelCase =[ 'ROBERTA_PRELAYERNORM_PRETRAINED_MODEL_ARCHIVE_LIST', 'RobertaPreLayerNormForCausalLM', 'RobertaPreLayerNormForMaskedLM', 'RobertaPreLayerNormForMultipleChoice', 'RobertaPreLayerNormForQuestionAnswering', 'RobertaPreLayerNormForSequenceClassification', 'RobertaPreLayerNormForTokenClassification', 'RobertaPreLayerNormModel', 'RobertaPreLayerNormPreTrainedModel', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowerCamelCase =[ 'TF_ROBERTA_PRELAYERNORM_PRETRAINED_MODEL_ARCHIVE_LIST', 'TFRobertaPreLayerNormForCausalLM', 'TFRobertaPreLayerNormForMaskedLM', 'TFRobertaPreLayerNormForMultipleChoice', 'TFRobertaPreLayerNormForQuestionAnswering', 'TFRobertaPreLayerNormForSequenceClassification', 'TFRobertaPreLayerNormForTokenClassification', 'TFRobertaPreLayerNormMainLayer', 'TFRobertaPreLayerNormModel', 'TFRobertaPreLayerNormPreTrainedModel', ] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowerCamelCase =[ 'FlaxRobertaPreLayerNormForCausalLM', 'FlaxRobertaPreLayerNormForMaskedLM', 'FlaxRobertaPreLayerNormForMultipleChoice', 'FlaxRobertaPreLayerNormForQuestionAnswering', 'FlaxRobertaPreLayerNormForSequenceClassification', 'FlaxRobertaPreLayerNormForTokenClassification', 'FlaxRobertaPreLayerNormModel', 'FlaxRobertaPreLayerNormPreTrainedModel', ] if TYPE_CHECKING: from .configuration_roberta_prelayernorm import ( ROBERTA_PRELAYERNORM_PRETRAINED_CONFIG_ARCHIVE_MAP, RobertaPreLayerNormConfig, RobertaPreLayerNormOnnxConfig, ) try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_roberta_prelayernorm import ( ROBERTA_PRELAYERNORM_PRETRAINED_MODEL_ARCHIVE_LIST, RobertaPreLayerNormForCausalLM, RobertaPreLayerNormForMaskedLM, RobertaPreLayerNormForMultipleChoice, RobertaPreLayerNormForQuestionAnswering, RobertaPreLayerNormForSequenceClassification, RobertaPreLayerNormForTokenClassification, RobertaPreLayerNormModel, RobertaPreLayerNormPreTrainedModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_roberta_prelayernorm import ( TF_ROBERTA_PRELAYERNORM_PRETRAINED_MODEL_ARCHIVE_LIST, TFRobertaPreLayerNormForCausalLM, TFRobertaPreLayerNormForMaskedLM, TFRobertaPreLayerNormForMultipleChoice, TFRobertaPreLayerNormForQuestionAnswering, TFRobertaPreLayerNormForSequenceClassification, TFRobertaPreLayerNormForTokenClassification, TFRobertaPreLayerNormMainLayer, TFRobertaPreLayerNormModel, TFRobertaPreLayerNormPreTrainedModel, ) try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_roberta_prelayernorm import ( FlaxRobertaPreLayerNormForCausalLM, FlaxRobertaPreLayerNormForMaskedLM, FlaxRobertaPreLayerNormForMultipleChoice, FlaxRobertaPreLayerNormForQuestionAnswering, FlaxRobertaPreLayerNormForSequenceClassification, FlaxRobertaPreLayerNormForTokenClassification, FlaxRobertaPreLayerNormModel, FlaxRobertaPreLayerNormPreTrainedModel, ) else: import sys _lowerCamelCase =_LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
334
'''simple docstring''' import json from typing import List, Optional, Tuple from tokenizers import normalizers from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import logging from .tokenization_squeezebert import SqueezeBertTokenizer a__ : Optional[Any] = logging.get_logger(__name__) a__ : Union[str, Any] = {'vocab_file': 'vocab.txt', 'tokenizer_file': 'tokenizer.json'} a__ : Any = { 'vocab_file': { 'squeezebert/squeezebert-uncased': ( 'https://huggingface.co/squeezebert/squeezebert-uncased/resolve/main/vocab.txt' ), 'squeezebert/squeezebert-mnli': 'https://huggingface.co/squeezebert/squeezebert-mnli/resolve/main/vocab.txt', 'squeezebert/squeezebert-mnli-headless': ( 'https://huggingface.co/squeezebert/squeezebert-mnli-headless/resolve/main/vocab.txt' ), }, 'tokenizer_file': { 'squeezebert/squeezebert-uncased': ( 'https://huggingface.co/squeezebert/squeezebert-uncased/resolve/main/tokenizer.json' ), 'squeezebert/squeezebert-mnli': ( 'https://huggingface.co/squeezebert/squeezebert-mnli/resolve/main/tokenizer.json' ), 'squeezebert/squeezebert-mnli-headless': ( 'https://huggingface.co/squeezebert/squeezebert-mnli-headless/resolve/main/tokenizer.json' ), }, } a__ : Optional[Any] = { 'squeezebert/squeezebert-uncased': 5_1_2, 'squeezebert/squeezebert-mnli': 5_1_2, 'squeezebert/squeezebert-mnli-headless': 5_1_2, } a__ : Optional[Any] = { 'squeezebert/squeezebert-uncased': {'do_lower_case': True}, 'squeezebert/squeezebert-mnli': {'do_lower_case': True}, 'squeezebert/squeezebert-mnli-headless': {'do_lower_case': True}, } class UpperCAmelCase__ ( UpperCAmelCase_): __SCREAMING_SNAKE_CASE = VOCAB_FILES_NAMES __SCREAMING_SNAKE_CASE = PRETRAINED_VOCAB_FILES_MAP __SCREAMING_SNAKE_CASE = PRETRAINED_INIT_CONFIGURATION __SCREAMING_SNAKE_CASE = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES __SCREAMING_SNAKE_CASE = SqueezeBertTokenizer def __init__( self , lowercase=None , lowercase=None , lowercase=True , lowercase="[UNK]" , lowercase="[SEP]" , lowercase="[PAD]" , lowercase="[CLS]" , lowercase="[MASK]" , lowercase=True , lowercase=None , **lowercase , ) -> Tuple: super().__init__( lowercase , tokenizer_file=lowercase , do_lower_case=lowercase , unk_token=lowercase , sep_token=lowercase , pad_token=lowercase , cls_token=lowercase , mask_token=lowercase , tokenize_chinese_chars=lowercase , strip_accents=lowercase , **lowercase , ) __UpperCamelCase = json.loads(self.backend_tokenizer.normalizer.__getstate__() ) if ( normalizer_state.get("""lowercase""" , lowercase ) != do_lower_case or normalizer_state.get("""strip_accents""" , lowercase ) != strip_accents or normalizer_state.get("""handle_chinese_chars""" , lowercase ) != tokenize_chinese_chars ): __UpperCamelCase = getattr(lowercase , normalizer_state.pop("""type""" ) ) __UpperCamelCase = do_lower_case __UpperCamelCase = strip_accents __UpperCamelCase = tokenize_chinese_chars __UpperCamelCase = normalizer_class(**lowercase ) __UpperCamelCase = do_lower_case def __lowerCamelCase ( self , lowercase , lowercase=None ) -> Tuple: __UpperCamelCase = [self.cls_token_id] + token_ids_a + [self.sep_token_id] if token_ids_a: output += token_ids_a + [self.sep_token_id] return output def __lowerCamelCase ( self , lowercase , lowercase = None ) -> List[int]: __UpperCamelCase = [self.sep_token_id] __UpperCamelCase = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] def __lowerCamelCase ( self , lowercase , lowercase = None ) -> Tuple[str]: __UpperCamelCase = self._tokenizer.model.save(lowercase , name=lowercase ) return tuple(lowercase )
349
0
from __future__ import annotations import bisect def __SCREAMING_SNAKE_CASE ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = 0 , UpperCamelCase__ = -1 ) -> str: '''simple docstring''' if hi < 0: UpperCAmelCase = len(__A ) while lo < hi: UpperCAmelCase = lo + (hi - lo) // 2 if sorted_collection[mid] < item: UpperCAmelCase = mid + 1 else: UpperCAmelCase = mid return lo def __SCREAMING_SNAKE_CASE ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = 0 , UpperCamelCase__ = -1 ) -> Optional[Any]: '''simple docstring''' if hi < 0: UpperCAmelCase = len(__A ) while lo < hi: UpperCAmelCase = lo + (hi - lo) // 2 if sorted_collection[mid] <= item: UpperCAmelCase = mid + 1 else: UpperCAmelCase = mid return lo def __SCREAMING_SNAKE_CASE ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = 0 , UpperCamelCase__ = -1 ) -> List[Any]: '''simple docstring''' sorted_collection.insert(bisect_left(__A , __A , __A , __A ) , __A ) def __SCREAMING_SNAKE_CASE ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = 0 , UpperCamelCase__ = -1 ) -> int: '''simple docstring''' sorted_collection.insert(bisect_right(__A , __A , __A , __A ) , __A ) def __SCREAMING_SNAKE_CASE ( UpperCamelCase__ , UpperCamelCase__ ) -> Dict: '''simple docstring''' UpperCAmelCase = 0 UpperCAmelCase = len(__A ) - 1 while left <= right: UpperCAmelCase = left + (right - left) // 2 UpperCAmelCase = sorted_collection[midpoint] if current_item == item: return midpoint elif item < current_item: UpperCAmelCase = midpoint - 1 else: UpperCAmelCase = midpoint + 1 return None def __SCREAMING_SNAKE_CASE ( UpperCamelCase__ , UpperCamelCase__ ) -> str: '''simple docstring''' UpperCAmelCase = bisect.bisect_left(__A , __A ) if index != len(__A ) and sorted_collection[index] == item: return index return None def __SCREAMING_SNAKE_CASE ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) -> List[Any]: '''simple docstring''' if right < left: return None UpperCAmelCase = left + (right - left) // 2 if sorted_collection[midpoint] == item: return midpoint elif sorted_collection[midpoint] > item: return binary_search_by_recursion(__A , __A , __A , midpoint - 1 ) else: return binary_search_by_recursion(__A , __A , midpoint + 1 , __A ) if __name__ == "__main__": __A : Optional[Any] = input("Enter numbers separated by comma:\n").strip() __A : Any = sorted(int(item) for item in user_input.split(",")) __A : str = int(input("Enter a single number to be found in the list:\n")) __A : Union[str, Any] = binary_search(collection, target) if result is None: print(F'{target} was not found in {collection}.') else: print(F'{target} was found at position {result} in {collection}.')
273
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available, is_speech_available, is_torch_available, ) a__ : str = { 'configuration_trocr': ['TROCR_PRETRAINED_CONFIG_ARCHIVE_MAP', 'TrOCRConfig'], 'processing_trocr': ['TrOCRProcessor'], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a__ : int = [ 'TROCR_PRETRAINED_MODEL_ARCHIVE_LIST', 'TrOCRForCausalLM', 'TrOCRPreTrainedModel', ] if TYPE_CHECKING: from .configuration_trocr import TROCR_PRETRAINED_CONFIG_ARCHIVE_MAP, TrOCRConfig from .processing_trocr import TrOCRProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_trocr import TROCR_PRETRAINED_MODEL_ARCHIVE_LIST, TrOCRForCausalLM, TrOCRPreTrainedModel else: import sys a__ : List[str] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
349
0
import copy import json import os import tempfile from transformers import is_torch_available from .test_configuration_utils import config_common_kwargs class SCREAMING_SNAKE_CASE ( UpperCAmelCase_ ): def __init__( self : Optional[int] , __lowercase : Tuple , __lowercase : Any=None , __lowercase : Dict=True , __lowercase : List[Any]=None , **__lowercase : List[str] ): '''simple docstring''' __a = parent __a = config_class __a = has_text_modality __a = kwargs __a = common_properties def UpperCamelCase_ ( self : Optional[Any] ): '''simple docstring''' __a = self.config_class(**self.inputs_dict ) __a = ( ["""hidden_size""", """num_attention_heads""", """num_hidden_layers"""] if self.common_properties is None else self.common_properties ) # Add common fields for text models if self.has_text_modality: common_properties.extend(["""vocab_size"""] ) # Test that config has the common properties as getters for prop in common_properties: self.parent.assertTrue(hasattr(__lowercase , __lowercase ) , msg=F"`{prop}` does not exist" ) # Test that config has the common properties as setter for idx, name in enumerate(__lowercase ): try: setattr(__lowercase , __lowercase , __lowercase ) self.parent.assertEqual( getattr(__lowercase , __lowercase ) , __lowercase , msg=F"`{name} value {idx} expected, but was {getattr(__lowercase , __lowercase )}" ) except NotImplementedError: # Some models might not be able to implement setters for common_properties # In that case, a NotImplementedError is raised pass # Test if config class can be called with Config(prop_name=..) for idx, name in enumerate(__lowercase ): try: __a = self.config_class(**{name: idx} ) self.parent.assertEqual( getattr(__lowercase , __lowercase ) , __lowercase , msg=F"`{name} value {idx} expected, but was {getattr(__lowercase , __lowercase )}" ) except NotImplementedError: # Some models might not be able to implement setters for common_properties # In that case, a NotImplementedError is raised pass def UpperCamelCase_ ( self : int ): '''simple docstring''' __a = self.config_class(**self.inputs_dict ) __a = json.loads(config.to_json_string() ) for key, value in self.inputs_dict.items(): self.parent.assertEqual(obj[key] , __lowercase ) def UpperCamelCase_ ( self : Optional[int] ): '''simple docstring''' __a = self.config_class(**self.inputs_dict ) with tempfile.TemporaryDirectory() as tmpdirname: __a = os.path.join(__lowercase , """config.json""" ) config_first.to_json_file(__lowercase ) __a = self.config_class.from_json_file(__lowercase ) self.parent.assertEqual(config_second.to_dict() , config_first.to_dict() ) def UpperCamelCase_ ( self : int ): '''simple docstring''' __a = self.config_class(**self.inputs_dict ) with tempfile.TemporaryDirectory() as tmpdirname: config_first.save_pretrained(__lowercase ) __a = self.config_class.from_pretrained(__lowercase ) self.parent.assertEqual(config_second.to_dict() , config_first.to_dict() ) def UpperCamelCase_ ( self : Optional[int] ): '''simple docstring''' __a = self.config_class(**self.inputs_dict ) __a = """test""" with tempfile.TemporaryDirectory() as tmpdirname: __a = os.path.join(__lowercase , __lowercase ) config_first.save_pretrained(__lowercase ) __a = self.config_class.from_pretrained(__lowercase , subfolder=__lowercase ) self.parent.assertEqual(config_second.to_dict() , config_first.to_dict() ) def UpperCamelCase_ ( self : Any ): '''simple docstring''' __a = self.config_class(**self.inputs_dict , num_labels=5 ) self.parent.assertEqual(len(config.idalabel ) , 5 ) self.parent.assertEqual(len(config.labelaid ) , 5 ) __a = 3 self.parent.assertEqual(len(config.idalabel ) , 3 ) self.parent.assertEqual(len(config.labelaid ) , 3 ) def UpperCamelCase_ ( self : List[str] ): '''simple docstring''' if self.config_class.is_composition: return __a = self.config_class() self.parent.assertIsNotNone(__lowercase ) def UpperCamelCase_ ( self : Optional[int] ): '''simple docstring''' __a = copy.deepcopy(__lowercase ) __a = self.config_class(**__lowercase ) __a = [] for key, value in config_common_kwargs.items(): if key == "torch_dtype": if not is_torch_available(): continue else: import torch if config.torch_dtype != torch.floataa: wrong_values.append(("""torch_dtype""", config.torch_dtype, torch.floataa) ) elif getattr(__lowercase , __lowercase ) != value: wrong_values.append((key, getattr(__lowercase , __lowercase ), value) ) if len(__lowercase ) > 0: __a = """\n""".join([F"- {v[0]}: got {v[1]} instead of {v[2]}" for v in wrong_values] ) raise ValueError(F"The following keys were not properly set in the config:\n{errors}" ) def UpperCamelCase_ ( self : Optional[Any] ): '''simple docstring''' self.create_and_test_config_common_properties() self.create_and_test_config_to_json_string() self.create_and_test_config_to_json_file() self.create_and_test_config_from_and_save_pretrained() self.create_and_test_config_from_and_save_pretrained_subfolder() self.create_and_test_config_with_num_labels() self.check_config_can_be_init_without_params() self.check_config_arguments_init()
302
'''simple docstring''' from collections import OrderedDict from typing import Mapping from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig a__ : Union[str, Any] = { 'albert-base-v1': 'https://huggingface.co/albert-base-v1/resolve/main/config.json', 'albert-large-v1': 'https://huggingface.co/albert-large-v1/resolve/main/config.json', 'albert-xlarge-v1': 'https://huggingface.co/albert-xlarge-v1/resolve/main/config.json', 'albert-xxlarge-v1': 'https://huggingface.co/albert-xxlarge-v1/resolve/main/config.json', 'albert-base-v2': 'https://huggingface.co/albert-base-v2/resolve/main/config.json', 'albert-large-v2': 'https://huggingface.co/albert-large-v2/resolve/main/config.json', 'albert-xlarge-v2': 'https://huggingface.co/albert-xlarge-v2/resolve/main/config.json', 'albert-xxlarge-v2': 'https://huggingface.co/albert-xxlarge-v2/resolve/main/config.json', } class UpperCAmelCase__ ( UpperCAmelCase_): __SCREAMING_SNAKE_CASE = '''albert''' def __init__( self , lowercase=3_0_0_0_0 , lowercase=1_2_8 , lowercase=4_0_9_6 , lowercase=1_2 , lowercase=1 , lowercase=6_4 , lowercase=1_6_3_8_4 , lowercase=1 , lowercase="gelu_new" , lowercase=0 , lowercase=0 , lowercase=5_1_2 , lowercase=2 , lowercase=0.02 , lowercase=1E-12 , lowercase=0.1 , lowercase="absolute" , lowercase=0 , lowercase=2 , lowercase=3 , **lowercase , ) -> Any: super().__init__(pad_token_id=lowercase , bos_token_id=lowercase , eos_token_id=lowercase , **lowercase ) __UpperCamelCase = vocab_size __UpperCamelCase = embedding_size __UpperCamelCase = hidden_size __UpperCamelCase = num_hidden_layers __UpperCamelCase = num_hidden_groups __UpperCamelCase = num_attention_heads __UpperCamelCase = inner_group_num __UpperCamelCase = hidden_act __UpperCamelCase = intermediate_size __UpperCamelCase = hidden_dropout_prob __UpperCamelCase = attention_probs_dropout_prob __UpperCamelCase = max_position_embeddings __UpperCamelCase = type_vocab_size __UpperCamelCase = initializer_range __UpperCamelCase = layer_norm_eps __UpperCamelCase = classifier_dropout_prob __UpperCamelCase = position_embedding_type class UpperCAmelCase__ ( UpperCAmelCase_): @property def __lowerCamelCase ( self ) -> Mapping[str, Mapping[int, str]]: if self.task == "multiple-choice": __UpperCamelCase = {0: """batch""", 1: """choice""", 2: """sequence"""} else: __UpperCamelCase = {0: """batch""", 1: """sequence"""} return OrderedDict( [ ("""input_ids""", dynamic_axis), ("""attention_mask""", dynamic_axis), ("""token_type_ids""", dynamic_axis), ] )
349
0
"""simple docstring""" import logging import os from dataclasses import dataclass from typing import List, Optional, Union import tqdm from filelock import FileLock from transformers import ( BartTokenizer, BartTokenizerFast, DataProcessor, PreTrainedTokenizer, RobertaTokenizer, RobertaTokenizerFast, XLMRobertaTokenizer, is_tf_available, is_torch_available, ) lowercase__ = logging.getLogger(__name__) @dataclass(frozen=UpperCAmelCase_ ) class __lowerCamelCase : '''simple docstring''' a_ : int = 42 a_ : List[str] = 42 a_ : Dict = None a_ : Any = None a_ : Tuple = None @dataclass(frozen=UpperCAmelCase_ ) class __lowerCamelCase : '''simple docstring''' a_ : Dict = 42 a_ : Union[str, Any] = None a_ : List[Any] = None a_ : Optional[Any] = None a_ : Optional[int] = None if is_torch_available(): import torch from torch.utils.data import Dataset class __lowerCamelCase ( UpperCAmelCase_ ): '''simple docstring''' a_ : str = 42 def __init__( self : Optional[Any] , a_ : Any , a_ : str , a_ : Union[str, Any] , a_ : Any = None , a_ : Optional[Any]=False , a_ : List[str] = False , ): lowerCAmelCase_ : int = hans_processors[task]() lowerCAmelCase_ : List[str] = os.path.join( a_ , "cached_{}_{}_{}_{}".format( "dev" if evaluate else "train" , tokenizer.__class__.__name__ , str(a_ ) , a_ , ) , ) lowerCAmelCase_ : Optional[Any] = processor.get_labels() if tokenizer.__class__ in ( RobertaTokenizer, RobertaTokenizerFast, XLMRobertaTokenizer, BartTokenizer, BartTokenizerFast, ): # HACK(label indices are swapped in RoBERTa pretrained model) lowerCAmelCase_ , lowerCAmelCase_ : Optional[Any] = label_list[2], label_list[1] lowerCAmelCase_ : Union[str, Any] = label_list # Make sure only the first process in distributed training processes the dataset, # and the others will use the cache. lowerCAmelCase_ : Optional[Any] = cached_features_file + ".lock" with FileLock(a_ ): if os.path.exists(a_ ) and not overwrite_cache: logger.info(f'''Loading features from cached file {cached_features_file}''' ) lowerCAmelCase_ : List[Any] = torch.load(a_ ) else: logger.info(f'''Creating features from dataset file at {data_dir}''' ) lowerCAmelCase_ : Union[str, Any] = ( processor.get_dev_examples(a_ ) if evaluate else processor.get_train_examples(a_ ) ) logger.info("Training examples: %s" , len(a_ ) ) lowerCAmelCase_ : Optional[int] = hans_convert_examples_to_features(a_ , a_ , a_ , a_ ) logger.info("Saving features into cached file %s" , a_ ) torch.save(self.features , a_ ) def __len__( self : List[str] ): return len(self.features ) def __getitem__( self : List[str] , a_ : Dict ): return self.features[i] def lowerCamelCase ( self : Any ): return self.label_list if is_tf_available(): import tensorflow as tf class __lowerCamelCase : '''simple docstring''' a_ : Dict = 42 def __init__( self : str , a_ : str , a_ : List[Any] , a_ : Any , a_ : List[str] = 1_28 , a_ : List[Any]=False , a_ : Dict = False , ): lowerCAmelCase_ : Tuple = hans_processors[task]() lowerCAmelCase_ : List[Any] = processor.get_labels() if tokenizer.__class__ in ( RobertaTokenizer, RobertaTokenizerFast, XLMRobertaTokenizer, BartTokenizer, BartTokenizerFast, ): # HACK(label indices are swapped in RoBERTa pretrained model) lowerCAmelCase_ , lowerCAmelCase_ : Optional[Any] = label_list[2], label_list[1] lowerCAmelCase_ : int = label_list lowerCAmelCase_ : Tuple = processor.get_dev_examples(a_ ) if evaluate else processor.get_train_examples(a_ ) lowerCAmelCase_ : Any = hans_convert_examples_to_features(a_ , a_ , a_ , a_ ) def gen(): for ex_index, ex in tqdm.tqdm(enumerate(self.features ) , desc="convert examples to features" ): if ex_index % 1_00_00 == 0: logger.info("Writing example %d of %d" % (ex_index, len(a_ )) ) yield ( { "example_id": 0, "input_ids": ex.input_ids, "attention_mask": ex.attention_mask, "token_type_ids": ex.token_type_ids, }, ex.label, ) lowerCAmelCase_ : Optional[int] = tf.data.Dataset.from_generator( a_ , ( { "example_id": tf.intaa, "input_ids": tf.intaa, "attention_mask": tf.intaa, "token_type_ids": tf.intaa, }, tf.intaa, ) , ( { "example_id": tf.TensorShape([] ), "input_ids": tf.TensorShape([None, None] ), "attention_mask": tf.TensorShape([None, None] ), "token_type_ids": tf.TensorShape([None, None] ), }, tf.TensorShape([] ), ) , ) def lowerCamelCase ( self : Optional[int] ): return self.dataset def __len__( self : Tuple ): return len(self.features ) def __getitem__( self : List[Any] , a_ : Dict ): return self.features[i] def lowerCamelCase ( self : Any ): return self.label_list class __lowerCamelCase ( UpperCAmelCase_ ): '''simple docstring''' def lowerCamelCase ( self : Dict , a_ : Optional[Any] ): return self._create_examples(self._read_tsv(os.path.join(a_ , "heuristics_train_set.txt" ) ) , "train" ) def lowerCamelCase ( self : Dict , a_ : Union[str, Any] ): return self._create_examples(self._read_tsv(os.path.join(a_ , "heuristics_evaluation_set.txt" ) ) , "dev" ) def lowerCamelCase ( self : List[str] ): return ["contradiction", "entailment", "neutral"] def lowerCamelCase ( self : str , a_ : Dict , a_ : Union[str, Any] ): lowerCAmelCase_ : List[Any] = [] for i, line in enumerate(a_ ): if i == 0: continue lowerCAmelCase_ : int = "%s-%s" % (set_type, line[0]) lowerCAmelCase_ : int = line[5] lowerCAmelCase_ : Optional[Any] = line[6] lowerCAmelCase_ : str = line[7][2:] if line[7].startswith("ex" ) else line[7] lowerCAmelCase_ : Dict = line[0] examples.append(InputExample(guid=a_ , text_a=a_ , text_b=a_ , label=a_ , pairID=a_ ) ) return examples def __lowerCamelCase ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , ) -> Optional[int]: """simple docstring""" lowerCAmelCase_ : int = {label: i for i, label in enumerate(__A )} lowerCAmelCase_ : List[str] = [] for ex_index, example in tqdm.tqdm(enumerate(__A ) , desc="convert examples to features" ): if ex_index % 10000 == 0: logger.info("Writing example %d" % (ex_index) ) lowerCAmelCase_ : Any = tokenizer( example.text_a , example.text_b , add_special_tokens=__A , max_length=__A , padding="max_length" , truncation=__A , return_overflowing_tokens=__A , ) lowerCAmelCase_ : List[Any] = label_map[example.label] if example.label in label_map else 0 lowerCAmelCase_ : str = int(example.pairID ) features.append(InputFeatures(**__A , label=__A , pairID=__A ) ) for i, example in enumerate(examples[:5] ): logger.info("*** Example ***" ) logger.info(f'''guid: {example}''' ) logger.info(f'''features: {features[i]}''' ) return features lowercase__ = { 'hans': 3, } lowercase__ = { 'hans': HansProcessor, }
241
'''simple docstring''' import numpy as np from sklearn.datasets import fetch_california_housing from sklearn.metrics import mean_absolute_error, mean_squared_error from sklearn.model_selection import train_test_split from xgboost import XGBRegressor def _lowercase ( __A ): '''simple docstring''' return (data["data"], data["target"]) def _lowercase ( __A ,__A ,__A ): '''simple docstring''' __UpperCamelCase = XGBRegressor(verbosity=0 ,random_state=42 ) xgb.fit(__A ,__A ) # Predict target for test data __UpperCamelCase = xgb.predict(__A ) __UpperCamelCase = predictions.reshape(len(__A ) ,1 ) return predictions def _lowercase ( ): '''simple docstring''' __UpperCamelCase = fetch_california_housing() __UpperCamelCase , __UpperCamelCase = data_handling(__A ) __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = train_test_split( __A ,__A ,test_size=0.25 ,random_state=1 ) __UpperCamelCase = xgboost(__A ,__A ,__A ) # Error printing print(f"Mean Absolute Error : {mean_absolute_error(__A ,__A )}" ) print(f"Mean Square Error : {mean_squared_error(__A ,__A )}" ) if __name__ == "__main__": import doctest doctest.testmod(verbose=True) main()
349
0
import requests _UpperCamelCase = 'YOUR API KEY' def lowerCAmelCase__( lowercase : int , lowercase : Optional[Any] = giphy_api_key ) -> List[str]: __snake_case : Optional[int] = "+".join(query.split() ) __snake_case : Tuple = f"""https://api.giphy.com/v1/gifs/search?q={formatted_query}&api_key={api_key}""" __snake_case : Optional[int] = requests.get(__A ).json()["data"] return [gif["url"] for gif in gifs] if __name__ == "__main__": print('''\n'''.join(get_gifs('''space ship''')))
326
'''simple docstring''' from __future__ import annotations import unittest from transformers import AutoTokenizer, PegasusConfig, is_tf_available from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow from transformers.utils import cached_property from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import TFAutoModelForSeqaSeqLM, TFPegasusForConditionalGeneration, TFPegasusModel @require_tf class UpperCAmelCase__ : __SCREAMING_SNAKE_CASE = PegasusConfig __SCREAMING_SNAKE_CASE = {} __SCREAMING_SNAKE_CASE = '''gelu''' def __init__( self , lowercase , lowercase=1_3 , lowercase=7 , lowercase=True , lowercase=False , lowercase=9_9 , lowercase=3_2 , lowercase=2 , lowercase=4 , lowercase=3_7 , lowercase=0.1 , lowercase=0.1 , lowercase=4_0 , lowercase=2 , lowercase=1 , lowercase=0 , ) -> Any: __UpperCamelCase = parent __UpperCamelCase = batch_size __UpperCamelCase = seq_length __UpperCamelCase = is_training __UpperCamelCase = use_labels __UpperCamelCase = vocab_size __UpperCamelCase = hidden_size __UpperCamelCase = num_hidden_layers __UpperCamelCase = num_attention_heads __UpperCamelCase = intermediate_size __UpperCamelCase = hidden_dropout_prob __UpperCamelCase = attention_probs_dropout_prob __UpperCamelCase = max_position_embeddings __UpperCamelCase = eos_token_id __UpperCamelCase = pad_token_id __UpperCamelCase = bos_token_id def __lowerCamelCase ( self ) -> Dict: __UpperCamelCase = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size ) __UpperCamelCase = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 ) __UpperCamelCase = tf.concat([input_ids, eos_tensor] , axis=1 ) __UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) __UpperCamelCase = self.config_cls( vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , ) __UpperCamelCase = prepare_pegasus_inputs_dict(lowercase , lowercase , lowercase ) return config, inputs_dict def __lowerCamelCase ( self , lowercase , lowercase ) -> Union[str, Any]: __UpperCamelCase = TFPegasusModel(config=lowercase ).get_decoder() __UpperCamelCase = inputs_dict["""input_ids"""] __UpperCamelCase = input_ids[:1, :] __UpperCamelCase = inputs_dict["""attention_mask"""][:1, :] __UpperCamelCase = inputs_dict["""head_mask"""] __UpperCamelCase = 1 # first forward pass __UpperCamelCase = model(lowercase , attention_mask=lowercase , head_mask=lowercase , use_cache=lowercase ) __UpperCamelCase , __UpperCamelCase = outputs.to_tuple() # create hypothetical next token and extent to next_input_ids __UpperCamelCase = ids_tensor((self.batch_size, 3) , config.vocab_size ) __UpperCamelCase = tf.cast(ids_tensor((self.batch_size, 3) , 2 ) , tf.inta ) # append to next input_ids and __UpperCamelCase = tf.concat([input_ids, next_tokens] , axis=-1 ) __UpperCamelCase = tf.concat([attention_mask, next_attn_mask] , axis=-1 ) __UpperCamelCase = model(lowercase , attention_mask=lowercase )[0] __UpperCamelCase = model(lowercase , attention_mask=lowercase , past_key_values=lowercase )[0] self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1] ) # select random slice __UpperCamelCase = int(ids_tensor((1,) , output_from_past.shape[-1] ) ) __UpperCamelCase = output_from_no_past[:, -3:, random_slice_idx] __UpperCamelCase = output_from_past[:, :, random_slice_idx] # test that outputs are equal for slice tf.debugging.assert_near(lowercase , lowercase , rtol=1E-3 ) def _lowercase ( __A ,__A ,__A ,__A=None ,__A=None ,__A=None ,__A=None ,__A=None ,): '''simple docstring''' if attention_mask is None: __UpperCamelCase = tf.cast(tf.math.not_equal(__A ,config.pad_token_id ) ,tf.inta ) if decoder_attention_mask is None: __UpperCamelCase = tf.concat( [ tf.ones(decoder_input_ids[:, :1].shape ,dtype=tf.inta ), tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] ,config.pad_token_id ) ,tf.inta ), ] ,axis=-1 ,) if head_mask is None: __UpperCamelCase = tf.ones((config.encoder_layers, config.encoder_attention_heads) ) if decoder_head_mask is None: __UpperCamelCase = tf.ones((config.decoder_layers, config.decoder_attention_heads) ) if cross_attn_head_mask is None: __UpperCamelCase = tf.ones((config.decoder_layers, config.decoder_attention_heads) ) return { "input_ids": input_ids, "decoder_input_ids": decoder_input_ids, "attention_mask": attention_mask, "decoder_attention_mask": decoder_attention_mask, "head_mask": head_mask, "decoder_head_mask": decoder_head_mask, "cross_attn_head_mask": cross_attn_head_mask, } @require_tf class UpperCAmelCase__ ( UpperCAmelCase_ , UpperCAmelCase_ , unittest.TestCase): __SCREAMING_SNAKE_CASE = (TFPegasusForConditionalGeneration, TFPegasusModel) if is_tf_available() else () __SCREAMING_SNAKE_CASE = (TFPegasusForConditionalGeneration,) if is_tf_available() else () __SCREAMING_SNAKE_CASE = ( { '''conversational''': TFPegasusForConditionalGeneration, '''feature-extraction''': TFPegasusModel, '''summarization''': TFPegasusForConditionalGeneration, '''text2text-generation''': TFPegasusForConditionalGeneration, '''translation''': TFPegasusForConditionalGeneration, } if is_tf_available() else {} ) __SCREAMING_SNAKE_CASE = True __SCREAMING_SNAKE_CASE = False __SCREAMING_SNAKE_CASE = False def __lowerCamelCase ( self ) -> str: __UpperCamelCase = TFPegasusModelTester(self ) __UpperCamelCase = ConfigTester(self , config_class=lowercase ) def __lowerCamelCase ( self ) -> str: self.config_tester.run_common_tests() def __lowerCamelCase ( self ) -> Tuple: __UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common() self.model_tester.check_decoder_model_past_large_inputs(*lowercase ) @require_sentencepiece @require_tokenizers @require_tf class UpperCAmelCase__ ( unittest.TestCase): __SCREAMING_SNAKE_CASE = [ ''' PG&E stated it scheduled the blackouts in response to forecasts for high winds amid dry conditions. The aim is to reduce the risk of wildfires. Nearly 800 thousand customers were scheduled to be affected by the shutoffs which were expected to last through at least midday tomorrow.''', ''' The London trio are up for best UK act and best album, as well as getting two nominations in the best song category."We got told like this morning \'Oh I think you\'re nominated\'", said Dappy."And I was like \'Oh yeah, which one?\' And now we\'ve got nominated for four awards. I mean, wow!"Bandmate Fazer added: "We thought it\'s best of us to come down and mingle with everyone and say hello to the cameras. And now we find we\'ve got four nominations."The band have two shots at the best song prize, getting the nod for their Tynchy Stryder collaboration Number One, and single Strong Again.Their album Uncle B will also go up against records by the likes of Beyonce and Kanye West.N-Dubz picked up the best newcomer Mobo in 2007, but female member Tulisa said they wouldn\'t be too disappointed if they didn\'t win this time around."At the end of the day we\'re grateful to be where we are in our careers."If it don\'t happen then it don\'t happen - live to fight another day and keep on making albums and hits for the fans."Dappy also revealed they could be performing live several times on the night.The group will be doing Number One and also a possible rendition of the War Child single, I Got Soul.The charity song is a re-working of The Killers\' All These Things That I\'ve Done and is set to feature artists like Chipmunk, Ironik and Pixie Lott.This year\'s Mobos will be held outside of London for the first time, in Glasgow on 30 September.N-Dubz said they were looking forward to performing for their Scottish fans and boasted about their recent shows north of the border."We just done Edinburgh the other day," said Dappy."We smashed up an N-Dubz show over there. We done Aberdeen about three or four months ago - we smashed up that show over there! Everywhere we go we smash it up!" ''', ] __SCREAMING_SNAKE_CASE = [ '''California\'s largest electricity provider has cut power to hundreds of thousands of customers in an effort to''' ''' reduce the risk of wildfires.''', '''N-Dubz have revealed they\'re "grateful" to have been nominated for four Mobo Awards.''', ] # differs slightly from pytorch, likely due to numerical differences in linear layers __SCREAMING_SNAKE_CASE = '''google/pegasus-xsum''' @cached_property def __lowerCamelCase ( self ) -> int: return AutoTokenizer.from_pretrained(self.model_name ) @cached_property def __lowerCamelCase ( self ) -> str: __UpperCamelCase = TFAutoModelForSeqaSeqLM.from_pretrained(self.model_name ) return model def __lowerCamelCase ( self , **lowercase ) -> Optional[int]: __UpperCamelCase = self.translate_src_text(**lowercase ) assert self.expected_text == generated_words def __lowerCamelCase ( self , **lowercase ) -> Optional[Any]: __UpperCamelCase = self.tokenizer(self.src_text , **lowercase , padding=lowercase , return_tensors="""tf""" ) __UpperCamelCase = self.model.generate( model_inputs.input_ids , attention_mask=model_inputs.attention_mask , num_beams=2 , use_cache=lowercase , ) __UpperCamelCase = self.tokenizer.batch_decode(generated_ids.numpy() , skip_special_tokens=lowercase ) return generated_words @slow def __lowerCamelCase ( self ) -> Dict: self._assert_generated_batch_equal_expected()
349
0
"""simple docstring""" from transformers import BertTokenizer, EncoderDecoderModel, SeqaSeqTrainer, SeqaSeqTrainingArguments from transformers.testing_utils import TestCasePlus, require_torch, slow from transformers.utils import is_datasets_available if is_datasets_available(): import datasets class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase_ ): @slow @require_torch def __lowercase ( self : Union[str, Any] ): lowerCAmelCase = EncoderDecoderModel.from_encoder_decoder_pretrained("""prajjwal1/bert-tiny""" , """prajjwal1/bert-tiny""" ) lowerCAmelCase = BertTokenizer.from_pretrained("""bert-base-uncased""" ) lowerCAmelCase = bertabert.config.encoder.vocab_size lowerCAmelCase = tokenizer.sep_token_id lowerCAmelCase = tokenizer.cls_token_id lowerCAmelCase = 128 lowerCAmelCase = datasets.load_dataset("""cnn_dailymail""" , """3.0.0""" , split="""train[:1%]""" ) lowerCAmelCase = datasets.load_dataset("""cnn_dailymail""" , """3.0.0""" , split="""validation[:1%]""" ) lowerCAmelCase = train_dataset.select(range(32 ) ) lowerCAmelCase = val_dataset.select(range(16 ) ) lowerCAmelCase = 4 def _map_to_encoder_decoder_inputs(lowerCAmelCase : List[str] ): # Tokenizer will automatically set [BOS] <text> [EOS] lowerCAmelCase = tokenizer(batch["""article"""] , padding="""max_length""" , truncation=lowerCAmelCase , max_length=512 ) lowerCAmelCase = tokenizer(batch["""highlights"""] , padding="""max_length""" , truncation=lowerCAmelCase , max_length=128 ) lowerCAmelCase = inputs.input_ids lowerCAmelCase = inputs.attention_mask lowerCAmelCase = outputs.input_ids lowerCAmelCase = outputs.input_ids.copy() lowerCAmelCase = [ [-100 if token == tokenizer.pad_token_id else token for token in labels] for labels in batch["""labels"""] ] lowerCAmelCase = outputs.attention_mask assert all(len(lowerCAmelCase ) == 512 for x in inputs.input_ids ) assert all(len(lowerCAmelCase ) == 128 for x in outputs.input_ids ) return batch def _compute_metrics(lowerCAmelCase : Tuple ): lowerCAmelCase = pred.label_ids lowerCAmelCase = pred.predictions # all unnecessary tokens are removed lowerCAmelCase = tokenizer.batch_decode(lowerCAmelCase , skip_special_tokens=lowerCAmelCase ) lowerCAmelCase = tokenizer.batch_decode(lowerCAmelCase , skip_special_tokens=lowerCAmelCase ) lowerCAmelCase = sum([int(pred_str[i] == label_str[i] ) for i in range(len(lowerCAmelCase ) )] ) / len(lowerCAmelCase ) return {"accuracy": accuracy} # map train dataset lowerCAmelCase = train_dataset.map( _map_to_encoder_decoder_inputs , batched=lowerCAmelCase , batch_size=lowerCAmelCase , remove_columns=["""article""", """highlights"""] , ) train_dataset.set_format( type="""torch""" , columns=["""input_ids""", """attention_mask""", """decoder_input_ids""", """decoder_attention_mask""", """labels"""] , ) # same for validation dataset lowerCAmelCase = val_dataset.map( _map_to_encoder_decoder_inputs , batched=lowerCAmelCase , batch_size=lowerCAmelCase , remove_columns=["""article""", """highlights"""] , ) val_dataset.set_format( type="""torch""" , columns=["""input_ids""", """attention_mask""", """decoder_input_ids""", """decoder_attention_mask""", """labels"""] , ) lowerCAmelCase = self.get_auto_remove_tmp_dir() lowerCAmelCase = SeqaSeqTrainingArguments( output_dir=lowerCAmelCase , per_device_train_batch_size=lowerCAmelCase , per_device_eval_batch_size=lowerCAmelCase , predict_with_generate=lowerCAmelCase , evaluation_strategy="""steps""" , do_train=lowerCAmelCase , do_eval=lowerCAmelCase , warmup_steps=0 , eval_steps=2 , logging_steps=2 , ) # instantiate trainer lowerCAmelCase = SeqaSeqTrainer( model=lowerCAmelCase , args=lowerCAmelCase , compute_metrics=_compute_metrics , train_dataset=lowerCAmelCase , eval_dataset=lowerCAmelCase , tokenizer=lowerCAmelCase , ) # start training trainer.train()
155
'''simple docstring''' import string def _lowercase ( __A ): '''simple docstring''' for key in range(len(string.ascii_uppercase ) ): __UpperCamelCase = """""" for symbol in message: if symbol in string.ascii_uppercase: __UpperCamelCase = string.ascii_uppercase.find(__A ) __UpperCamelCase = num - key if num < 0: __UpperCamelCase = num + len(string.ascii_uppercase ) __UpperCamelCase = translated + string.ascii_uppercase[num] else: __UpperCamelCase = translated + symbol print(f"Decryption using Key #{key}: {translated}" ) def _lowercase ( ): '''simple docstring''' __UpperCamelCase = input("""Encrypted message: """ ) __UpperCamelCase = message.upper() decrypt(__A ) if __name__ == "__main__": import doctest doctest.testmod() main()
349
0
"""simple docstring""" from ...configuration_utils import PretrainedConfig from ...utils import logging lowerCamelCase_ = logging.get_logger(__name__) lowerCamelCase_ = { 'facebook/dpr-ctx_encoder-single-nq-base': ( 'https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/config.json' ), 'facebook/dpr-question_encoder-single-nq-base': ( 'https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/config.json' ), 'facebook/dpr-reader-single-nq-base': ( 'https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/config.json' ), 'facebook/dpr-ctx_encoder-multiset-base': ( 'https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/config.json' ), 'facebook/dpr-question_encoder-multiset-base': ( 'https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/config.json' ), 'facebook/dpr-reader-multiset-base': ( 'https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/config.json' ), } class UpperCamelCase_ (UpperCAmelCase_ ): __magic_name__ = '''dpr''' def __init__( self : Tuple , lowerCAmelCase_ : Any=30_522 , lowerCAmelCase_ : Tuple=768 , lowerCAmelCase_ : List[Any]=12 , lowerCAmelCase_ : Union[str, Any]=12 , lowerCAmelCase_ : Tuple=3_072 , lowerCAmelCase_ : str="gelu" , lowerCAmelCase_ : str=0.1 , lowerCAmelCase_ : str=0.1 , lowerCAmelCase_ : Tuple=512 , lowerCAmelCase_ : str=2 , lowerCAmelCase_ : int=0.0_2 , lowerCAmelCase_ : List[Any]=1e-12 , lowerCAmelCase_ : Optional[int]=0 , lowerCAmelCase_ : Union[str, Any]="absolute" , lowerCAmelCase_ : List[Any] = 0 , **lowerCAmelCase_ : List[Any] , ) -> int: super().__init__(pad_token_id=lowerCAmelCase_ , **lowerCAmelCase_ ) UpperCAmelCase_ : Union[str, Any] = vocab_size UpperCAmelCase_ : Tuple = hidden_size UpperCAmelCase_ : Union[str, Any] = num_hidden_layers UpperCAmelCase_ : Optional[int] = num_attention_heads UpperCAmelCase_ : Optional[Any] = hidden_act UpperCAmelCase_ : List[Any] = intermediate_size UpperCAmelCase_ : str = hidden_dropout_prob UpperCAmelCase_ : Union[str, Any] = attention_probs_dropout_prob UpperCAmelCase_ : str = max_position_embeddings UpperCAmelCase_ : Optional[Any] = type_vocab_size UpperCAmelCase_ : Dict = initializer_range UpperCAmelCase_ : Optional[Any] = layer_norm_eps UpperCAmelCase_ : Optional[int] = projection_dim UpperCAmelCase_ : Union[str, Any] = position_embedding_type
268
'''simple docstring''' from collections import OrderedDict from typing import Any, List, Mapping, Optional from ... import PreTrainedTokenizer, TensorType, is_torch_available from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfigWithPast, PatchingSpec from ...utils import logging a__ : Optional[Any] = logging.get_logger(__name__) a__ : Dict = { 'EleutherAI/gpt-j-6B': 'https://huggingface.co/EleutherAI/gpt-j-6B/resolve/main/config.json', # See all GPT-J models at https://huggingface.co/models?filter=gpt_j } class UpperCAmelCase__ ( UpperCAmelCase_): __SCREAMING_SNAKE_CASE = '''gptj''' __SCREAMING_SNAKE_CASE = { '''max_position_embeddings''': '''n_positions''', '''hidden_size''': '''n_embd''', '''num_attention_heads''': '''n_head''', '''num_hidden_layers''': '''n_layer''', } def __init__( self , lowercase=5_0_4_0_0 , lowercase=2_0_4_8 , lowercase=4_0_9_6 , lowercase=2_8 , lowercase=1_6 , lowercase=6_4 , lowercase=None , lowercase="gelu_new" , lowercase=0.0 , lowercase=0.0 , lowercase=0.0 , lowercase=1E-5 , lowercase=0.02 , lowercase=True , lowercase=5_0_2_5_6 , lowercase=5_0_2_5_6 , lowercase=False , **lowercase , ) -> Tuple: __UpperCamelCase = vocab_size __UpperCamelCase = n_positions __UpperCamelCase = n_embd __UpperCamelCase = n_layer __UpperCamelCase = n_head __UpperCamelCase = n_inner __UpperCamelCase = rotary_dim __UpperCamelCase = activation_function __UpperCamelCase = resid_pdrop __UpperCamelCase = embd_pdrop __UpperCamelCase = attn_pdrop __UpperCamelCase = layer_norm_epsilon __UpperCamelCase = initializer_range __UpperCamelCase = use_cache __UpperCamelCase = bos_token_id __UpperCamelCase = eos_token_id super().__init__( bos_token_id=lowercase , eos_token_id=lowercase , tie_word_embeddings=lowercase , **lowercase ) class UpperCAmelCase__ ( UpperCAmelCase_): def __init__( self , lowercase , lowercase = "default" , lowercase = None , lowercase = False , ) -> List[str]: super().__init__(lowercase , task=lowercase , patching_specs=lowercase , use_past=lowercase ) if not getattr(self._config , """pad_token_id""" , lowercase ): # TODO: how to do that better? __UpperCamelCase = 0 @property def __lowerCamelCase ( self ) -> Mapping[str, Mapping[int, str]]: __UpperCamelCase = OrderedDict({"""input_ids""": {0: """batch""", 1: """sequence"""}} ) if self.use_past: self.fill_with_past_key_values_(lowercase , direction="""inputs""" ) __UpperCamelCase = {0: """batch""", 1: """past_sequence + sequence"""} else: __UpperCamelCase = {0: """batch""", 1: """sequence"""} return common_inputs @property def __lowerCamelCase ( self ) -> int: return self._config.n_layer @property def __lowerCamelCase ( self ) -> int: return self._config.n_head def __lowerCamelCase ( self , lowercase , lowercase = -1 , lowercase = -1 , lowercase = False , lowercase = None , ) -> Mapping[str, Any]: __UpperCamelCase = super(lowercase , self ).generate_dummy_inputs( lowercase , batch_size=lowercase , seq_length=lowercase , is_pair=lowercase , framework=lowercase ) # We need to order the input in the way they appears in the forward() __UpperCamelCase = OrderedDict({"""input_ids""": common_inputs["""input_ids"""]} ) # Need to add the past_keys if self.use_past: if not is_torch_available(): raise ValueError("""Cannot generate dummy past_keys inputs without PyTorch installed.""" ) else: import torch __UpperCamelCase , __UpperCamelCase = common_inputs["""input_ids"""].shape # Not using the same length for past_key_values __UpperCamelCase = seqlen + 2 __UpperCamelCase = ( batch, self.num_attention_heads, past_key_values_length, self._config.hidden_size // self.num_attention_heads, ) __UpperCamelCase = [ (torch.zeros(lowercase ), torch.zeros(lowercase )) for _ in range(self.num_layers ) ] __UpperCamelCase = common_inputs["""attention_mask"""] if self.use_past: __UpperCamelCase = ordered_inputs["""attention_mask"""].dtype __UpperCamelCase = torch.cat( [ordered_inputs["""attention_mask"""], torch.ones(lowercase , lowercase , dtype=lowercase )] , dim=1 ) return ordered_inputs @property def __lowerCamelCase ( self ) -> int: return 1_3
349
0
from argparse import ArgumentParser from . import BaseTransformersCLICommand def __UpperCamelCase ( _lowerCAmelCase ) -> List[Any]: """simple docstring""" return DownloadCommand(args.model , args.cache_dir , args.force , args.trust_remote_code ) class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase_ ): '''simple docstring''' @staticmethod def _lowerCAmelCase ( lowerCamelCase__ ): A : int = parser.add_parser("""download""" ) download_parser.add_argument( """--cache-dir""", type=lowerCamelCase__, default=lowerCamelCase__, help="""Path to location to store the models""" ) download_parser.add_argument( """--force""", action="""store_true""", help="""Force the model to be download even if already in cache-dir""" ) download_parser.add_argument( """--trust-remote-code""", action="""store_true""", help="""Whether or not to allow for custom models defined on the Hub in their own modeling files. Use only if you've reviewed the code as it will execute on your local machine""", ) download_parser.add_argument("""model""", type=lowerCamelCase__, help="""Name of the model to download""" ) download_parser.set_defaults(func=lowerCamelCase__ ) def __init__( self, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__ ): A : Any = model A : Optional[Any] = cache A : int = force A : Any = trust_remote_code def _lowerCAmelCase ( self ): from ..models.auto import AutoModel, AutoTokenizer AutoModel.from_pretrained( self._model, cache_dir=self._cache, force_download=self._force, trust_remote_code=self._trust_remote_code ) AutoTokenizer.from_pretrained( self._model, cache_dir=self._cache, force_download=self._force, trust_remote_code=self._trust_remote_code )
116
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_tokenizers_available, is_torch_available, is_vision_available, ) a__ : int = { 'configuration_layoutlmv3': [ 'LAYOUTLMV3_PRETRAINED_CONFIG_ARCHIVE_MAP', 'LayoutLMv3Config', 'LayoutLMv3OnnxConfig', ], 'processing_layoutlmv3': ['LayoutLMv3Processor'], 'tokenization_layoutlmv3': ['LayoutLMv3Tokenizer'], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a__ : Dict = ['LayoutLMv3TokenizerFast'] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a__ : Any = [ 'LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST', 'LayoutLMv3ForQuestionAnswering', 'LayoutLMv3ForSequenceClassification', 'LayoutLMv3ForTokenClassification', 'LayoutLMv3Model', 'LayoutLMv3PreTrainedModel', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a__ : str = [ 'TF_LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST', 'TFLayoutLMv3ForQuestionAnswering', 'TFLayoutLMv3ForSequenceClassification', 'TFLayoutLMv3ForTokenClassification', 'TFLayoutLMv3Model', 'TFLayoutLMv3PreTrainedModel', ] try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a__ : List[Any] = ['LayoutLMv3FeatureExtractor'] a__ : str = ['LayoutLMv3ImageProcessor'] if TYPE_CHECKING: from .configuration_layoutlmva import ( LAYOUTLMV3_PRETRAINED_CONFIG_ARCHIVE_MAP, LayoutLMvaConfig, LayoutLMvaOnnxConfig, ) from .processing_layoutlmva import LayoutLMvaProcessor from .tokenization_layoutlmva import LayoutLMvaTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_layoutlmva_fast import LayoutLMvaTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_layoutlmva import ( LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST, LayoutLMvaForQuestionAnswering, LayoutLMvaForSequenceClassification, LayoutLMvaForTokenClassification, LayoutLMvaModel, LayoutLMvaPreTrainedModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_layoutlmva import ( TF_LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST, TFLayoutLMvaForQuestionAnswering, TFLayoutLMvaForSequenceClassification, TFLayoutLMvaForTokenClassification, TFLayoutLMvaModel, TFLayoutLMvaPreTrainedModel, ) try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .feature_extraction_layoutlmva import LayoutLMvaFeatureExtractor from .image_processing_layoutlmva import LayoutLMvaImageProcessor else: import sys a__ : List[str] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
349
0
from json import JSONDecodeError # Workaround for requests.exceptions.JSONDecodeError import requests def UpperCamelCase__ ( A__ = "isbn/0140328726" ) -> str: snake_case__ : Optional[int] = olid.strip().strip('/' ) # Remove leading/trailing whitespace & slashes if new_olid.count('/' ) != 1: snake_case__ : List[str] = F"""{olid} is not a valid Open Library olid""" raise ValueError(__A ) return requests.get(F"""https://openlibrary.org/{new_olid}.json""" ).json() def UpperCamelCase__ ( A__ ) -> Optional[Any]: snake_case__ : Union[str, Any] = { 'title': 'Title', 'publish_date': 'Publish date', 'authors': 'Authors', 'number_of_pages': 'Number of pages:', 'first_sentence': 'First sentence', 'isbn_10': 'ISBN (10)', 'isbn_13': 'ISBN (13)', } snake_case__ : Tuple = {better_key: ol_book_data[key] for key, better_key in desired_keys.items()} snake_case__ : Optional[Any] = [ get_openlibrary_data(author['key'] )['name'] for author in data['Authors'] ] snake_case__ : Optional[Any] = data['First sentence']['value'] for key, value in data.items(): if isinstance(__A , __A ): snake_case__ : str = ', '.join(__A ) return data if __name__ == "__main__": import doctest doctest.testmod() while True: lowerCAmelCase__ : Tuple = input('''\nEnter the ISBN code to search (or \'quit\' to stop): ''').strip() if isbn.lower() in ("", "q", "quit", "exit", "stop"): break if len(isbn) not in (10, 13) or not isbn.isdigit(): print(F'''Sorry, {isbn} is not a valid ISBN. Please, input a valid ISBN.''') continue print(F'''\nSearching Open Library for ISBN: {isbn}...\n''') try: lowerCAmelCase__ : str = summarize_book(get_openlibrary_data(F'''isbn/{isbn}''')) print('''\n'''.join(F'''{key}: {value}''' for key, value in book_summary.items())) except JSONDecodeError: # Workaround for requests.exceptions.RequestException: print(F'''Sorry, there are no results for ISBN: {isbn}.''')
143
'''simple docstring''' def _lowercase ( __A ,__A ): '''simple docstring''' __UpperCamelCase = len(__A ) __UpperCamelCase = [[False] * (required_sum + 1) for _ in range(arr_len + 1 )] # for each arr value, a sum of zero(0) can be formed by not taking any element # hence True/1 for i in range(arr_len + 1 ): __UpperCamelCase = True # sum is not zero and set is empty then false for i in range(1 ,required_sum + 1 ): __UpperCamelCase = False for i in range(1 ,arr_len + 1 ): for j in range(1 ,required_sum + 1 ): if arr[i - 1] > j: __UpperCamelCase = subset[i - 1][j] if arr[i - 1] <= j: __UpperCamelCase = subset[i - 1][j] or subset[i - 1][j - arr[i - 1]] return subset[arr_len][required_sum] if __name__ == "__main__": import doctest doctest.testmod()
349
0
snake_case : List[Any] = '0.21.0' from .accelerator import Accelerator from .big_modeling import ( cpu_offload, cpu_offload_with_hook, disk_offload, dispatch_model, init_empty_weights, init_on_device, load_checkpoint_and_dispatch, ) from .data_loader import skip_first_batches from .launchers import debug_launcher, notebook_launcher from .state import PartialState from .utils import ( DeepSpeedPlugin, DistributedDataParallelKwargs, DistributedType, FullyShardedDataParallelPlugin, GradScalerKwargs, InitProcessGroupKwargs, find_executable_batch_size, infer_auto_device_map, is_rich_available, load_checkpoint_in_model, synchronize_rng_states, ) if is_rich_available(): from .utils import rich
94
'''simple docstring''' import bza import gzip import lzma import os import shutil import struct import tarfile import warnings import zipfile from abc import ABC, abstractmethod from pathlib import Path from typing import Dict, List, Optional, Type, Union from .. import config from .filelock import FileLock from .logging import get_logger a__ : Any = get_logger(__name__) class UpperCAmelCase__ : def __init__( self , lowercase = None ) -> List[str]: __UpperCamelCase = ( os.path.join(lowercase , config.EXTRACTED_DATASETS_DIR ) if cache_dir else config.EXTRACTED_DATASETS_PATH ) __UpperCamelCase = Extractor def __lowerCamelCase ( self , lowercase ) -> str: from .file_utils import hash_url_to_filename # Path where we extract compressed archives # We extract in the cache dir, and get the extracted path name by hashing the original path" __UpperCamelCase = os.path.abspath(lowercase ) return os.path.join(self.extract_dir , hash_url_to_filename(lowercase ) ) def __lowerCamelCase ( self , lowercase , lowercase ) -> bool: return force_extract or ( not os.path.isfile(lowercase ) and not (os.path.isdir(lowercase ) and os.listdir(lowercase )) ) def __lowerCamelCase ( self , lowercase , lowercase = False ) -> str: __UpperCamelCase = self.extractor.infer_extractor_format(lowercase ) if not extractor_format: return input_path __UpperCamelCase = self._get_output_path(lowercase ) if self._do_extract(lowercase , lowercase ): self.extractor.extract(lowercase , lowercase , lowercase ) return output_path class UpperCAmelCase__ ( UpperCAmelCase_): @classmethod @abstractmethod def __lowerCamelCase ( cls , lowercase , **lowercase ) -> bool: ... @staticmethod @abstractmethod def __lowerCamelCase ( lowercase , lowercase ) -> None: ... class UpperCAmelCase__ ( UpperCAmelCase_ , UpperCAmelCase_): __SCREAMING_SNAKE_CASE = [] @staticmethod def __lowerCamelCase ( lowercase , lowercase ) -> int: with open(lowercase , """rb""" ) as f: return f.read(lowercase ) @classmethod def __lowerCamelCase ( cls , lowercase , lowercase = b"" ) -> bool: if not magic_number: __UpperCamelCase = max(len(lowercase ) for cls_magic_number in cls.magic_numbers ) try: __UpperCamelCase = cls.read_magic_number(lowercase , lowercase ) except OSError: return False return any(magic_number.startswith(lowercase ) for cls_magic_number in cls.magic_numbers ) class UpperCAmelCase__ ( UpperCAmelCase_): @classmethod def __lowerCamelCase ( cls , lowercase , **lowercase ) -> bool: return tarfile.is_tarfile(lowercase ) @staticmethod def __lowerCamelCase ( lowercase , lowercase ) -> str: def resolved(lowercase ) -> str: return os.path.realpath(os.path.abspath(lowercase ) ) def badpath(lowercase , lowercase ) -> bool: # joinpath will ignore base if path is absolute return not resolved(os.path.join(lowercase , lowercase ) ).startswith(lowercase ) def badlink(lowercase , lowercase ) -> bool: # Links are interpreted relative to the directory containing the link __UpperCamelCase = resolved(os.path.join(lowercase , os.path.dirname(info.name ) ) ) return badpath(info.linkname , base=lowercase ) __UpperCamelCase = resolved(lowercase ) for finfo in members: if badpath(finfo.name , lowercase ): logger.error(f"Extraction of {finfo.name} is blocked (illegal path)" ) elif finfo.issym() and badlink(lowercase , lowercase ): logger.error(f"Extraction of {finfo.name} is blocked: Symlink to {finfo.linkname}" ) elif finfo.islnk() and badlink(lowercase , lowercase ): logger.error(f"Extraction of {finfo.name} is blocked: Hard link to {finfo.linkname}" ) else: yield finfo @staticmethod def __lowerCamelCase ( lowercase , lowercase ) -> None: os.makedirs(lowercase , exist_ok=lowercase ) __UpperCamelCase = tarfile.open(lowercase ) tar_file.extractall(lowercase , members=TarExtractor.safemembers(lowercase , lowercase ) ) tar_file.close() class UpperCAmelCase__ ( UpperCAmelCase_): __SCREAMING_SNAKE_CASE = [B'''\x1F\x8B'''] @staticmethod def __lowerCamelCase ( lowercase , lowercase ) -> None: with gzip.open(lowercase , """rb""" ) as gzip_file: with open(lowercase , """wb""" ) as extracted_file: shutil.copyfileobj(lowercase , lowercase ) class UpperCAmelCase__ ( UpperCAmelCase_): __SCREAMING_SNAKE_CASE = [ B'''PK\x03\x04''', B'''PK\x05\x06''', # empty archive B'''PK\x07\x08''', # spanned archive ] @classmethod def __lowerCamelCase ( cls , lowercase , lowercase = b"" ) -> bool: if super().is_extractable(lowercase , magic_number=lowercase ): return True try: # Alternative version of zipfile.is_zipfile that has less false positives, but misses executable zip archives. # From: https://github.com/python/cpython/pull/5053 from zipfile import ( _CD_SIGNATURE, _ECD_DISK_NUMBER, _ECD_DISK_START, _ECD_ENTRIES_TOTAL, _ECD_OFFSET, _ECD_SIZE, _EndRecData, sizeCentralDir, stringCentralDir, structCentralDir, ) with open(lowercase , """rb""" ) as fp: __UpperCamelCase = _EndRecData(lowercase ) if endrec: if endrec[_ECD_ENTRIES_TOTAL] == 0 and endrec[_ECD_SIZE] == 0 and endrec[_ECD_OFFSET] == 0: return True # Empty zipfiles are still zipfiles elif endrec[_ECD_DISK_NUMBER] == endrec[_ECD_DISK_START]: fp.seek(endrec[_ECD_OFFSET] ) # Central directory is on the same disk if fp.tell() == endrec[_ECD_OFFSET] and endrec[_ECD_SIZE] >= sizeCentralDir: __UpperCamelCase = fp.read(lowercase ) # CD is where we expect it to be if len(lowercase ) == sizeCentralDir: __UpperCamelCase = struct.unpack(lowercase , lowercase ) # CD is the right size if centdir[_CD_SIGNATURE] == stringCentralDir: return True # First central directory entry has correct magic number return False except Exception: # catch all errors in case future python versions change the zipfile internals return False @staticmethod def __lowerCamelCase ( lowercase , lowercase ) -> None: os.makedirs(lowercase , exist_ok=lowercase ) with zipfile.ZipFile(lowercase , """r""" ) as zip_file: zip_file.extractall(lowercase ) zip_file.close() class UpperCAmelCase__ ( UpperCAmelCase_): __SCREAMING_SNAKE_CASE = [B'''\xFD\x37\x7A\x58\x5A\x00'''] @staticmethod def __lowerCamelCase ( lowercase , lowercase ) -> None: with lzma.open(lowercase ) as compressed_file: with open(lowercase , """wb""" ) as extracted_file: shutil.copyfileobj(lowercase , lowercase ) class UpperCAmelCase__ ( UpperCAmelCase_): __SCREAMING_SNAKE_CASE = [B'''Rar!\x1a\x07\x00''', B'''Rar!\x1a\x07\x01\x00'''] # RAR_ID # RAR5_ID @staticmethod def __lowerCamelCase ( lowercase , lowercase ) -> None: if not config.RARFILE_AVAILABLE: raise ImportError("""Please pip install rarfile""" ) import rarfile os.makedirs(lowercase , exist_ok=lowercase ) __UpperCamelCase = rarfile.RarFile(lowercase ) rf.extractall(lowercase ) rf.close() class UpperCAmelCase__ ( UpperCAmelCase_): __SCREAMING_SNAKE_CASE = [B'''\x28\xb5\x2F\xFD'''] @staticmethod def __lowerCamelCase ( lowercase , lowercase ) -> None: if not config.ZSTANDARD_AVAILABLE: raise ImportError("""Please pip install zstandard""" ) import zstandard as zstd __UpperCamelCase = zstd.ZstdDecompressor() with open(lowercase , """rb""" ) as ifh, open(lowercase , """wb""" ) as ofh: dctx.copy_stream(lowercase , lowercase ) class UpperCAmelCase__ ( UpperCAmelCase_): __SCREAMING_SNAKE_CASE = [B'''\x42\x5A\x68'''] @staticmethod def __lowerCamelCase ( lowercase , lowercase ) -> None: with bza.open(lowercase , """rb""" ) as compressed_file: with open(lowercase , """wb""" ) as extracted_file: shutil.copyfileobj(lowercase , lowercase ) class UpperCAmelCase__ ( UpperCAmelCase_): __SCREAMING_SNAKE_CASE = [B'''\x37\x7A\xBC\xAF\x27\x1C'''] @staticmethod def __lowerCamelCase ( lowercase , lowercase ) -> None: if not config.PY7ZR_AVAILABLE: raise ImportError("""Please pip install py7zr""" ) import pyazr os.makedirs(lowercase , exist_ok=lowercase ) with pyazr.SevenZipFile(lowercase , """r""" ) as archive: archive.extractall(lowercase ) class UpperCAmelCase__ ( UpperCAmelCase_): __SCREAMING_SNAKE_CASE = [B'''\x04\x22\x4D\x18'''] @staticmethod def __lowerCamelCase ( lowercase , lowercase ) -> None: if not config.LZ4_AVAILABLE: raise ImportError("""Please pip install lz4""" ) import lza.frame with lza.frame.open(lowercase , """rb""" ) as compressed_file: with open(lowercase , """wb""" ) as extracted_file: shutil.copyfileobj(lowercase , lowercase ) class UpperCAmelCase__ : # Put zip file to the last, b/c it is possible wrongly detected as zip (I guess it means: as tar or gzip) __SCREAMING_SNAKE_CASE = { "tar": TarExtractor, "gzip": GzipExtractor, "zip": ZipExtractor, "xz": XzExtractor, "rar": RarExtractor, "zstd": ZstdExtractor, "bz2": BzipaExtractor, "7z": SevenZipExtractor, # <Added version="2.4.0"/> "lz4": LzaExtractor, # <Added version="2.4.0"/> } @classmethod def __lowerCamelCase ( cls ) -> Union[str, Any]: return max( len(lowercase ) for extractor in cls.extractors.values() if issubclass(lowercase , lowercase ) for extractor_magic_number in extractor.magic_numbers ) @staticmethod def __lowerCamelCase ( lowercase , lowercase ) -> str: try: return MagicNumberBaseExtractor.read_magic_number(lowercase , magic_number_length=lowercase ) except OSError: return b"" @classmethod def __lowerCamelCase ( cls , lowercase , lowercase = False ) -> bool: warnings.warn( """Method 'is_extractable' was deprecated in version 2.4.0 and will be removed in 3.0.0. """ """Use 'infer_extractor_format' instead.""" , category=lowercase , ) __UpperCamelCase = cls.infer_extractor_format(lowercase ) if extractor_format: return True if not return_extractor else (True, cls.extractors[extractor_format]) return False if not return_extractor else (False, None) @classmethod def __lowerCamelCase ( cls , lowercase ) -> str: # <Added version="2.4.0"/> __UpperCamelCase = cls._get_magic_number_max_length() __UpperCamelCase = cls._read_magic_number(lowercase , lowercase ) for extractor_format, extractor in cls.extractors.items(): if extractor.is_extractable(lowercase , magic_number=lowercase ): return extractor_format @classmethod def __lowerCamelCase ( cls , lowercase , lowercase , lowercase = None , lowercase = "deprecated" , ) -> None: os.makedirs(os.path.dirname(lowercase ) , exist_ok=lowercase ) # Prevent parallel extractions __UpperCamelCase = str(Path(lowercase ).with_suffix(""".lock""" ) ) with FileLock(lowercase ): shutil.rmtree(lowercase , ignore_errors=lowercase ) if extractor_format or extractor != "deprecated": if extractor != "deprecated" or not isinstance(lowercase , lowercase ): # passed as positional arg warnings.warn( """Parameter 'extractor' was deprecated in version 2.4.0 and will be removed in 3.0.0. """ """Use 'extractor_format' instead.""" , category=lowercase , ) __UpperCamelCase = extractor if extractor != """deprecated""" else extractor_format else: __UpperCamelCase = cls.extractors[extractor_format] return extractor.extract(lowercase , lowercase ) else: warnings.warn( """Parameter 'extractor_format' was made required in version 2.4.0 and not passing it will raise an """ """exception in 3.0.0.""" , category=lowercase , ) for extractor in cls.extractors.values(): if extractor.is_extractable(lowercase ): return extractor.extract(lowercase , lowercase )
349
0
'''simple docstring''' from collections import OrderedDict from typing import Mapping from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging lowercase : List[str] = logging.get_logger(__name__) lowercase : Tuple = { 'kssteven/ibert-roberta-base': 'https://huggingface.co/kssteven/ibert-roberta-base/resolve/main/config.json', 'kssteven/ibert-roberta-large': 'https://huggingface.co/kssteven/ibert-roberta-large/resolve/main/config.json', 'kssteven/ibert-roberta-large-mnli': ( 'https://huggingface.co/kssteven/ibert-roberta-large-mnli/resolve/main/config.json' ), } class A ( UpperCAmelCase_ ): __magic_name__ = '''ibert''' def __init__( self , SCREAMING_SNAKE_CASE=30522 , SCREAMING_SNAKE_CASE=768 , SCREAMING_SNAKE_CASE=12 , SCREAMING_SNAKE_CASE=12 , SCREAMING_SNAKE_CASE=3072 , SCREAMING_SNAKE_CASE="gelu" , SCREAMING_SNAKE_CASE=0.1 , SCREAMING_SNAKE_CASE=0.1 , SCREAMING_SNAKE_CASE=512 , SCREAMING_SNAKE_CASE=2 , SCREAMING_SNAKE_CASE=0.02 , SCREAMING_SNAKE_CASE=1e-12 , SCREAMING_SNAKE_CASE=1 , SCREAMING_SNAKE_CASE=0 , SCREAMING_SNAKE_CASE=2 , SCREAMING_SNAKE_CASE="absolute" , SCREAMING_SNAKE_CASE=False , SCREAMING_SNAKE_CASE="none" , **SCREAMING_SNAKE_CASE , ) -> str: """simple docstring""" super().__init__(pad_token_id=SCREAMING_SNAKE_CASE , bos_token_id=SCREAMING_SNAKE_CASE , eos_token_id=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ) A : Any = vocab_size A : int = hidden_size A : Optional[int] = num_hidden_layers A : List[str] = num_attention_heads A : Any = hidden_act A : Tuple = intermediate_size A : Optional[Any] = hidden_dropout_prob A : Optional[Any] = attention_probs_dropout_prob A : List[str] = max_position_embeddings A : Tuple = type_vocab_size A : List[str] = initializer_range A : int = layer_norm_eps A : str = position_embedding_type A : int = quant_mode A : Optional[Any] = force_dequant class A ( UpperCAmelCase_ ): @property def __lowerCAmelCase ( self ) -> Mapping[str, Mapping[int, str]]: """simple docstring""" if self.task == "multiple-choice": A : Union[str, Any] = {0: '''batch''', 1: '''choice''', 2: '''sequence'''} else: A : Any = {0: '''batch''', 1: '''sequence'''} return OrderedDict( [ ('''input_ids''', dynamic_axis), ('''attention_mask''', dynamic_axis), ] )
3
'''simple docstring''' import unittest from transformers import PegasusConfig, PegasusTokenizer, is_flax_available from transformers.testing_utils import require_flax, slow from ...test_configuration_common import ConfigTester from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor if is_flax_available(): import os # The slow tests are often failing with OOM error on GPU # This makes JAX allocate exactly what is needed on demand, and deallocate memory that is no longer needed # but will be slower as stated here https://jax.readthedocs.io/en/latest/gpu_memory_allocation.html a__ : List[str] = 'platform' import jax import jax.numpy as jnp import numpy as np from transformers import FlaxPegasusForConditionalGeneration, FlaxPegasusModel @require_flax class UpperCAmelCase__ : __SCREAMING_SNAKE_CASE = PegasusConfig __SCREAMING_SNAKE_CASE = {} __SCREAMING_SNAKE_CASE = '''gelu''' def __init__( self , lowercase , lowercase=1_3 , lowercase=7 , lowercase=True , lowercase=False , lowercase=9_9 , lowercase=3_2 , lowercase=5 , lowercase=4 , lowercase=3_7 , lowercase=0.1 , lowercase=0.1 , lowercase=2_0 , lowercase=2 , lowercase=1 , lowercase=0 , ) -> Optional[Any]: __UpperCamelCase = parent __UpperCamelCase = batch_size __UpperCamelCase = seq_length __UpperCamelCase = is_training __UpperCamelCase = use_labels __UpperCamelCase = vocab_size __UpperCamelCase = hidden_size __UpperCamelCase = num_hidden_layers __UpperCamelCase = num_attention_heads __UpperCamelCase = intermediate_size __UpperCamelCase = hidden_dropout_prob __UpperCamelCase = attention_probs_dropout_prob __UpperCamelCase = max_position_embeddings __UpperCamelCase = eos_token_id __UpperCamelCase = pad_token_id __UpperCamelCase = bos_token_id def __lowerCamelCase ( self ) -> str: __UpperCamelCase = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size ).clip(3 , self.vocab_size ) __UpperCamelCase = np.expand_dims(np.array([self.eos_token_id] * self.batch_size ) , 1 ) __UpperCamelCase = np.concatenate([input_ids, eos_tensor] , axis=1 ) __UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) __UpperCamelCase = self.config_cls( vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , ) __UpperCamelCase = prepare_pegasus_inputs_dict(lowercase , lowercase , lowercase ) return config, inputs_dict def __lowerCamelCase ( self , lowercase , lowercase , lowercase ) -> Dict: __UpperCamelCase = 2_0 __UpperCamelCase = model_class_name(lowercase ) __UpperCamelCase = model.encode(inputs_dict["""input_ids"""] ) __UpperCamelCase , __UpperCamelCase = ( inputs_dict["""decoder_input_ids"""], inputs_dict["""decoder_attention_mask"""], ) __UpperCamelCase = model.init_cache(decoder_input_ids.shape[0] , lowercase , lowercase ) __UpperCamelCase = jnp.ones((decoder_input_ids.shape[0], max_decoder_length) , dtype="""i4""" ) __UpperCamelCase = jnp.broadcast_to( jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , ) __UpperCamelCase = model.decode( decoder_input_ids[:, :-1] , lowercase , decoder_attention_mask=lowercase , past_key_values=lowercase , decoder_position_ids=lowercase , ) __UpperCamelCase = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype="""i4""" ) __UpperCamelCase = model.decode( decoder_input_ids[:, -1:] , lowercase , decoder_attention_mask=lowercase , past_key_values=outputs_cache.past_key_values , decoder_position_ids=lowercase , ) __UpperCamelCase = model.decode(lowercase , lowercase ) __UpperCamelCase = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) ) self.parent.assertTrue(diff < 1E-3 , msg=f"Max diff is {diff}" ) def __lowerCamelCase ( self , lowercase , lowercase , lowercase ) -> Any: __UpperCamelCase = 2_0 __UpperCamelCase = model_class_name(lowercase ) __UpperCamelCase = model.encode(inputs_dict["""input_ids"""] ) __UpperCamelCase , __UpperCamelCase = ( inputs_dict["""decoder_input_ids"""], inputs_dict["""decoder_attention_mask"""], ) __UpperCamelCase = jnp.concatenate( [ decoder_attention_mask, jnp.zeros((decoder_attention_mask.shape[0], max_decoder_length - decoder_attention_mask.shape[1]) ), ] , axis=-1 , ) __UpperCamelCase = model.init_cache(decoder_input_ids.shape[0] , lowercase , lowercase ) __UpperCamelCase = jnp.broadcast_to( jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , ) __UpperCamelCase = model.decode( decoder_input_ids[:, :-1] , lowercase , decoder_attention_mask=lowercase , past_key_values=lowercase , decoder_position_ids=lowercase , ) __UpperCamelCase = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype="""i4""" ) __UpperCamelCase = model.decode( decoder_input_ids[:, -1:] , lowercase , past_key_values=outputs_cache.past_key_values , decoder_attention_mask=lowercase , decoder_position_ids=lowercase , ) __UpperCamelCase = model.decode(lowercase , lowercase , decoder_attention_mask=lowercase ) __UpperCamelCase = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) ) self.parent.assertTrue(diff < 1E-3 , msg=f"Max diff is {diff}" ) def _lowercase ( __A ,__A ,__A ,__A=None ,__A=None ,): '''simple docstring''' if attention_mask is None: __UpperCamelCase = np.not_equal(__A ,config.pad_token_id ).astype(np.inta ) if decoder_attention_mask is None: __UpperCamelCase = np.concatenate( [ np.ones(decoder_input_ids[:, :1].shape ,dtype=np.inta ), np.not_equal(decoder_input_ids[:, 1:] ,config.pad_token_id ).astype(np.inta ), ] ,axis=-1 ,) return { "input_ids": input_ids, "decoder_input_ids": decoder_input_ids, "attention_mask": attention_mask, "decoder_attention_mask": decoder_attention_mask, } @require_flax class UpperCAmelCase__ ( UpperCAmelCase_ , unittest.TestCase): __SCREAMING_SNAKE_CASE = ( ( FlaxPegasusForConditionalGeneration, FlaxPegasusModel, ) if is_flax_available() else () ) __SCREAMING_SNAKE_CASE = (FlaxPegasusForConditionalGeneration,) if is_flax_available() else () __SCREAMING_SNAKE_CASE = True __SCREAMING_SNAKE_CASE = False __SCREAMING_SNAKE_CASE = False __SCREAMING_SNAKE_CASE = False def __lowerCamelCase ( self ) -> Optional[Any]: __UpperCamelCase = FlaxPegasusModelTester(self ) __UpperCamelCase = ConfigTester(self , config_class=lowercase ) def __lowerCamelCase ( self ) -> List[Any]: self.config_tester.run_common_tests() def __lowerCamelCase ( self ) -> List[Any]: __UpperCamelCase , __UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: self.model_tester.check_use_cache_forward(lowercase , lowercase , lowercase ) def __lowerCamelCase ( self ) -> List[Any]: __UpperCamelCase , __UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: self.model_tester.check_use_cache_forward_with_attn_mask(lowercase , lowercase , lowercase ) def __lowerCamelCase ( self ) -> List[str]: __UpperCamelCase , __UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: with self.subTest(model_class.__name__ ): __UpperCamelCase = self._prepare_for_class(lowercase , lowercase ) __UpperCamelCase = model_class(lowercase ) @jax.jit def encode_jitted(lowercase , lowercase=None , **lowercase ): return model.encode(input_ids=lowercase , attention_mask=lowercase ) with self.subTest("""JIT Enabled""" ): __UpperCamelCase = encode_jitted(**lowercase ).to_tuple() with self.subTest("""JIT Disabled""" ): with jax.disable_jit(): __UpperCamelCase = encode_jitted(**lowercase ).to_tuple() self.assertEqual(len(lowercase ) , len(lowercase ) ) for jitted_output, output in zip(lowercase , lowercase ): self.assertEqual(jitted_output.shape , output.shape ) def __lowerCamelCase ( self ) -> List[Any]: __UpperCamelCase , __UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: with self.subTest(model_class.__name__ ): __UpperCamelCase = model_class(lowercase ) __UpperCamelCase = model.encode(inputs_dict["""input_ids"""] , inputs_dict["""attention_mask"""] ) __UpperCamelCase = { """decoder_input_ids""": inputs_dict["""decoder_input_ids"""], """decoder_attention_mask""": inputs_dict["""decoder_attention_mask"""], """encoder_outputs""": encoder_outputs, } @jax.jit def decode_jitted(lowercase , lowercase , lowercase ): return model.decode( decoder_input_ids=lowercase , decoder_attention_mask=lowercase , encoder_outputs=lowercase , ) with self.subTest("""JIT Enabled""" ): __UpperCamelCase = decode_jitted(**lowercase ).to_tuple() with self.subTest("""JIT Disabled""" ): with jax.disable_jit(): __UpperCamelCase = decode_jitted(**lowercase ).to_tuple() self.assertEqual(len(lowercase ) , len(lowercase ) ) for jitted_output, output in zip(lowercase , lowercase ): self.assertEqual(jitted_output.shape , output.shape ) @slow def __lowerCamelCase ( self ) -> Dict: for model_class_name in self.all_model_classes: __UpperCamelCase = model_class_name.from_pretrained("""google/pegasus-large""" , from_pt=lowercase ) __UpperCamelCase = np.ones((1, 1) ) __UpperCamelCase = model(lowercase ) self.assertIsNotNone(lowercase ) @slow def __lowerCamelCase ( self ) -> str: __UpperCamelCase = FlaxPegasusForConditionalGeneration.from_pretrained("""google/pegasus-xsum""" ) __UpperCamelCase = PegasusTokenizer.from_pretrained("""google/pegasus-xsum""" ) __UpperCamelCase = [ """ PG&E stated it scheduled the blackouts in response to forecasts for high winds amid dry conditions. The aim is to reduce the risk of wildfires. Nearly 800 thousand customers were scheduled to be affected by the shutoffs which were expected to last through at least midday tomorrow.""", """ The London trio are up for best UK act and best album, as well as getting two nominations in the best song category.\"We got told like this morning 'Oh I think you're nominated'\", said Dappy.\"And I was like 'Oh yeah, which one?' And now we've got nominated for four awards. I mean, wow!\"Bandmate Fazer added: \"We thought it's best of us to come down and mingle with everyone and say hello to the cameras. And now we find we've got four nominations.\"The band have two shots at the best song prize, getting the nod for their Tynchy Stryder collaboration Number One, and single Strong Again.Their album Uncle B will also go up against records by the likes of Beyonce and Kanye West.N-Dubz picked up the best newcomer Mobo in 2007, but female member Tulisa said they wouldn't be too disappointed if they didn't win this time around.\"At the end of the day we're grateful to be where we are in our careers.\"If it don't happen then it don't happen - live to fight another day and keep on making albums and hits for the fans.\"Dappy also revealed they could be performing live several times on the night.The group will be doing Number One and also a possible rendition of the War Child single, I Got Soul.The charity song is a re-working of The Killers' All These Things That I've Done and is set to feature artists like Chipmunk, Ironik and Pixie Lott.This year's Mobos will be held outside of London for the first time, in Glasgow on 30 September.N-Dubz said they were looking forward to performing for their Scottish fans and boasted about their recent shows north of the border.\"We just done Edinburgh the other day,\" said Dappy.\"We smashed up an N-Dubz show over there. We done Aberdeen about three or four months ago - we smashed up that show over there! Everywhere we go we smash it up!\" """, ] __UpperCamelCase = [ """California's largest electricity provider has turned off power to hundreds of thousands of customers.""", """Pop group N-Dubz have revealed they were surprised to get four nominations for this year's Mobo Awards.""", ] __UpperCamelCase = tokenizer(lowercase , return_tensors="""np""" , truncation=lowercase , max_length=5_1_2 , padding=lowercase ) __UpperCamelCase = model.generate(**lowercase , num_beams=2 ).sequences __UpperCamelCase = tokenizer.batch_decode(lowercase , skip_special_tokens=lowercase ) assert tgt_text == decoded
349
0
from typing import Dict, List, Optional, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import ( center_crop, convert_to_rgb, get_resize_output_image_size, normalize, rescale, resize, to_channel_dimension_format, ) from ...image_utils import ( OPENAI_CLIP_MEAN, OPENAI_CLIP_STD, ChannelDimension, ImageInput, PILImageResampling, make_list_of_images, to_numpy_array, valid_images, ) from ...utils import TensorType, is_vision_available, logging _lowerCamelCase =logging.get_logger(__name__) if is_vision_available(): import PIL class a_ ( UpperCAmelCase_ ): """simple docstring""" __UpperCAmelCase = ['pixel_values'] def __init__( self : Dict ,snake_case : List[str] = True ,snake_case : List[Any] = None ,snake_case : int = PILImageResampling.BICUBIC ,snake_case : int = True ,snake_case : Optional[Any] = None ,snake_case : Tuple = True ,snake_case : List[Any] = 1 / 255 ,snake_case : Dict = True ,snake_case : Optional[Any] = None ,snake_case : List[Any] = None ,snake_case : Union[str, Any] = True ,**snake_case : Optional[int] ,): super().__init__(**snake_case ) SCREAMING_SNAKE_CASE =size if size is not None else {'shortest_edge': 224} SCREAMING_SNAKE_CASE =get_size_dict(snake_case ,default_to_square=snake_case ) SCREAMING_SNAKE_CASE =crop_size if crop_size is not None else {'height': 224, 'width': 224} SCREAMING_SNAKE_CASE =get_size_dict(snake_case ,default_to_square=snake_case ,param_name='crop_size' ) SCREAMING_SNAKE_CASE =do_resize SCREAMING_SNAKE_CASE =size SCREAMING_SNAKE_CASE =resample SCREAMING_SNAKE_CASE =do_center_crop SCREAMING_SNAKE_CASE =crop_size SCREAMING_SNAKE_CASE =do_rescale SCREAMING_SNAKE_CASE =rescale_factor SCREAMING_SNAKE_CASE =do_normalize SCREAMING_SNAKE_CASE =image_mean if image_mean is not None else OPENAI_CLIP_MEAN SCREAMING_SNAKE_CASE =image_std if image_std is not None else OPENAI_CLIP_STD SCREAMING_SNAKE_CASE =do_convert_rgb def _lowerCAmelCase ( self : Dict ,snake_case : List[Any] ,snake_case : Optional[Any] ,snake_case : List[Any] = PILImageResampling.BICUBIC ,snake_case : Union[str, Any] = None ,**snake_case : int ,): SCREAMING_SNAKE_CASE =get_size_dict(snake_case ,default_to_square=snake_case ) if "shortest_edge" not in size: raise ValueError(f'The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}' ) SCREAMING_SNAKE_CASE =get_resize_output_image_size(snake_case ,size=size['shortest_edge'] ,default_to_square=snake_case ) return resize(snake_case ,size=snake_case ,resample=snake_case ,data_format=snake_case ,**snake_case ) def _lowerCAmelCase ( self : Tuple ,snake_case : int ,snake_case : Optional[Any] ,snake_case : Dict = None ,**snake_case : Tuple ,): SCREAMING_SNAKE_CASE =get_size_dict(snake_case ) if "height" not in size or "width" not in size: raise ValueError(f'The `size` parameter must contain the keys (height, width). Got {size.keys()}' ) return center_crop(snake_case ,size=(size['height'], size['width']) ,data_format=snake_case ,**snake_case ) def _lowerCAmelCase ( self : Optional[int] ,snake_case : Optional[int] ,snake_case : Union[str, Any] ,snake_case : List[str] = None ,**snake_case : Optional[int] ,): return rescale(snake_case ,scale=snake_case ,data_format=snake_case ,**snake_case ) def _lowerCAmelCase ( self : Any ,snake_case : Dict ,snake_case : Tuple ,snake_case : List[str] ,snake_case : int = None ,**snake_case : Optional[Any] ,): return normalize(snake_case ,mean=snake_case ,std=snake_case ,data_format=snake_case ,**snake_case ) def _lowerCAmelCase ( self : Any ,snake_case : Dict ,snake_case : Tuple = None ,snake_case : str = None ,snake_case : int = None ,snake_case : str = None ,snake_case : Tuple = None ,snake_case : Tuple = None ,snake_case : Tuple = None ,snake_case : Any = None ,snake_case : Union[str, Any] = None ,snake_case : List[str] = None ,snake_case : Any = None ,snake_case : List[Any] = None ,snake_case : List[Any] = ChannelDimension.FIRST ,**snake_case : Optional[Any] ,): SCREAMING_SNAKE_CASE =do_resize if do_resize is not None else self.do_resize SCREAMING_SNAKE_CASE =size if size is not None else self.size SCREAMING_SNAKE_CASE =get_size_dict(snake_case ,param_name='size' ,default_to_square=snake_case ) SCREAMING_SNAKE_CASE =resample if resample is not None else self.resample SCREAMING_SNAKE_CASE =do_center_crop if do_center_crop is not None else self.do_center_crop SCREAMING_SNAKE_CASE =crop_size if crop_size is not None else self.crop_size SCREAMING_SNAKE_CASE =get_size_dict(snake_case ,param_name='crop_size' ,default_to_square=snake_case ) SCREAMING_SNAKE_CASE =do_rescale if do_rescale is not None else self.do_rescale SCREAMING_SNAKE_CASE =rescale_factor if rescale_factor is not None else self.rescale_factor SCREAMING_SNAKE_CASE =do_normalize if do_normalize is not None else self.do_normalize SCREAMING_SNAKE_CASE =image_mean if image_mean is not None else self.image_mean SCREAMING_SNAKE_CASE =image_std if image_std is not None else self.image_std SCREAMING_SNAKE_CASE =do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb SCREAMING_SNAKE_CASE =make_list_of_images(snake_case ) if not valid_images(snake_case ): raise ValueError( 'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, ' 'torch.Tensor, tf.Tensor or jax.ndarray.' ) if do_resize and size is None: raise ValueError('Size must be specified if do_resize is True.' ) if do_center_crop and crop_size is None: raise ValueError('Crop size must be specified if do_center_crop is True.' ) if do_rescale and rescale_factor is None: raise ValueError('Rescale factor must be specified if do_rescale is True.' ) if do_normalize and (image_mean is None or image_std is None): raise ValueError('Image mean and std must be specified if do_normalize is True.' ) # PIL RGBA images are converted to RGB if do_convert_rgb: SCREAMING_SNAKE_CASE =[convert_to_rgb(snake_case ) for image in images] # All transformations expect numpy arrays. SCREAMING_SNAKE_CASE =[to_numpy_array(snake_case ) for image in images] if do_resize: SCREAMING_SNAKE_CASE =[self.resize(image=snake_case ,size=snake_case ,resample=snake_case ) for image in images] if do_center_crop: SCREAMING_SNAKE_CASE =[self.center_crop(image=snake_case ,size=snake_case ) for image in images] if do_rescale: SCREAMING_SNAKE_CASE =[self.rescale(image=snake_case ,scale=snake_case ) for image in images] if do_normalize: SCREAMING_SNAKE_CASE =[self.normalize(image=snake_case ,mean=snake_case ,std=snake_case ) for image in images] SCREAMING_SNAKE_CASE =[to_channel_dimension_format(snake_case ,snake_case ) for image in images] SCREAMING_SNAKE_CASE ={'pixel_values': images} return BatchFeature(data=snake_case ,tensor_type=snake_case )
334
'''simple docstring''' import pytest a__ : List[str] = '__dummy_dataset1__' a__ : Optional[int] = '\nimport json\nimport os\n\nimport datasets\n\n\nREPO_URL = "https://huggingface.co/datasets/albertvillanova/tests-raw-jsonl/resolve/main/"\nURLS = {"train": REPO_URL + "wikiann-bn-train.jsonl", "validation": REPO_URL + "wikiann-bn-validation.jsonl"}\n\n\nclass __DummyDataset1__(datasets.GeneratorBasedBuilder):\n\n def _info(self):\n features = datasets.Features(\n {\n "tokens": datasets.Sequence(datasets.Value("string")),\n "ner_tags": datasets.Sequence(\n datasets.features.ClassLabel(\n names=[\n "O",\n "B-PER",\n "I-PER",\n "B-ORG",\n "I-ORG",\n "B-LOC",\n "I-LOC",\n ]\n )\n ),\n "langs": datasets.Sequence(datasets.Value("string")),\n "spans": datasets.Sequence(datasets.Value("string")),\n }\n )\n return datasets.DatasetInfo(features=features)\n\n def _split_generators(self, dl_manager):\n dl_path = dl_manager.download(URLS)\n return [\n datasets.SplitGenerator(datasets.Split.TRAIN, gen_kwargs={"filepath": dl_path["train"]}),\n datasets.SplitGenerator(datasets.Split.VALIDATION, gen_kwargs={"filepath": dl_path["validation"]}),\n ]\n\n def _generate_examples(self, filepath):\n with open(filepath, "r", encoding="utf-8") as f:\n for i, line in enumerate(f):\n yield i, json.loads(line)\n' @pytest.fixture def _lowercase ( ): '''simple docstring''' return DATASET_LOADING_SCRIPT_NAME @pytest.fixture def _lowercase ( ): '''simple docstring''' return DATASET_LOADING_SCRIPT_CODE @pytest.fixture def _lowercase ( __A ,__A ,__A ): '''simple docstring''' __UpperCamelCase = dataset_loading_script_name __UpperCamelCase = tmp_path / """datasets""" / script_name script_dir.mkdir(parents=__A ) __UpperCamelCase = script_dir / f"{script_name}.py" with open(__A ,"""w""" ) as f: f.write(__A ) return str(__A )
349
0
import re import subprocess import sys __A : Optional[int] = subprocess.check_output("git merge-base main HEAD".split()).decode("utf-8") __A : str = subprocess.check_output(F'git diff --name-only {fork_point_sha}'.split()).decode("utf-8").split() __A : List[str] = '|'.join(sys.argv[1:]) __A : List[str] = re.compile(RF'^({joined_dirs}).*?\.py$') __A : int = [x for x in modified_files if regex.match(x)] print(" ".join(relevant_modified_files), end="")
273
'''simple docstring''' import argparse import json import os import fairseq import torch from fairseq.data import Dictionary from transformers import ( WavaVecaConfig, WavaVecaCTCTokenizer, WavaVecaFeatureExtractor, WavaVecaForCTC, WavaVecaForPreTraining, WavaVecaProcessor, logging, ) from transformers.models.wavaveca.modeling_wavaveca import WavaVecaForSequenceClassification logging.set_verbosity_info() a__ : Any = logging.get_logger(__name__) a__ : Optional[int] = { 'post_extract_proj': 'feature_projection.projection', 'encoder.pos_conv.0': 'encoder.pos_conv_embed.conv', 'self_attn.k_proj': 'encoder.layers.*.attention.k_proj', 'self_attn.v_proj': 'encoder.layers.*.attention.v_proj', 'self_attn.q_proj': 'encoder.layers.*.attention.q_proj', 'self_attn.out_proj': 'encoder.layers.*.attention.out_proj', 'self_attn_layer_norm': 'encoder.layers.*.layer_norm', 'fc1': 'encoder.layers.*.feed_forward.intermediate_dense', 'fc2': 'encoder.layers.*.feed_forward.output_dense', 'final_layer_norm': 'encoder.layers.*.final_layer_norm', 'encoder.layer_norm': 'encoder.layer_norm', 'adapter_layer': 'encoder.layers.*.adapter_layer', 'w2v_model.layer_norm': 'feature_projection.layer_norm', 'quantizer.weight_proj': 'quantizer.weight_proj', 'quantizer.vars': 'quantizer.codevectors', 'project_q': 'project_q', 'final_proj': 'project_hid', 'w2v_encoder.proj': 'lm_head', 'mask_emb': 'masked_spec_embed', 'pooling_layer.linear': 'projector', 'pooling_layer.projection': 'classifier', } a__ : List[str] = [ 'lm_head', 'quantizer.weight_proj', 'quantizer.codevectors', 'project_q', 'project_hid', 'projector', 'classifier', ] def _lowercase ( __A ): '''simple docstring''' __UpperCamelCase = {} with open(__A ,"""r""" ) as file: for line_number, line in enumerate(__A ): __UpperCamelCase = line.strip() if line: __UpperCamelCase = line.split() __UpperCamelCase = line_number __UpperCamelCase = words[0] __UpperCamelCase = value return result def _lowercase ( __A ,__A ,__A ,__A ,__A ): '''simple docstring''' for attribute in key.split(""".""" ): __UpperCamelCase = getattr(__A ,__A ) __UpperCamelCase = None for param_key in PARAM_MAPPING.keys(): if full_name.endswith(__A ): __UpperCamelCase = PARAM_MAPPING[full_name.split(""".""" )[-1]] __UpperCamelCase = """param""" if weight_type is not None and weight_type != "param": __UpperCamelCase = getattr(__A ,__A ).shape elif weight_type is not None and weight_type == "param": __UpperCamelCase = hf_pointer for attribute in hf_param_name.split(""".""" ): __UpperCamelCase = getattr(__A ,__A ) __UpperCamelCase = shape_pointer.shape # let's reduce dimension __UpperCamelCase = value[0] else: __UpperCamelCase = hf_pointer.shape if hf_shape != value.shape: raise ValueError( f"Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be" f" {value.shape} for {full_name}" ) if weight_type == "weight": __UpperCamelCase = value elif weight_type == "weight_g": __UpperCamelCase = value elif weight_type == "weight_v": __UpperCamelCase = value elif weight_type == "bias": __UpperCamelCase = value elif weight_type == "param": for attribute in hf_param_name.split(""".""" ): __UpperCamelCase = getattr(__A ,__A ) __UpperCamelCase = value else: __UpperCamelCase = value logger.info(f"{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}." ) def _lowercase ( __A ,__A ,__A ,__A ,__A ): '''simple docstring''' __UpperCamelCase = None for param_key in PARAM_MAPPING.keys(): if full_name.endswith(__A ): __UpperCamelCase = PARAM_MAPPING[full_name.split(""".""" )[-1]] __UpperCamelCase = """param""" if weight_type is not None and weight_type != "param": __UpperCamelCase = """.""".join([key, weight_type] ) elif weight_type is not None and weight_type == "param": __UpperCamelCase = """.""".join([key, hf_param_name] ) else: __UpperCamelCase = key __UpperCamelCase = value if """lm_head""" in full_key else value[0] a__ : Dict = { 'W_a': 'linear_1.weight', 'W_b': 'linear_2.weight', 'b_a': 'linear_1.bias', 'b_b': 'linear_2.bias', 'ln_W': 'norm.weight', 'ln_b': 'norm.bias', } def _lowercase ( __A ,__A ,__A=None ,__A=None ): '''simple docstring''' __UpperCamelCase = False for key, mapped_key in MAPPING.items(): __UpperCamelCase = """wav2vec2.""" + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key if key in name or key.split("""w2v_model.""" )[-1] == name.split(""".""" )[0]: __UpperCamelCase = True if "*" in mapped_key: __UpperCamelCase = name.split(__A )[0].split(""".""" )[-2] __UpperCamelCase = mapped_key.replace("""*""" ,__A ) if "weight_g" in name: __UpperCamelCase = """weight_g""" elif "weight_v" in name: __UpperCamelCase = """weight_v""" elif "bias" in name: __UpperCamelCase = """bias""" elif "weight" in name: # TODO: don't match quantizer.weight_proj __UpperCamelCase = """weight""" else: __UpperCamelCase = None if hf_dict is not None: rename_dict(__A ,__A ,__A ,__A ,__A ) else: set_recursively(__A ,__A ,__A ,__A ,__A ) return is_used return is_used def _lowercase ( __A ,__A ,__A ): '''simple docstring''' __UpperCamelCase = [] __UpperCamelCase = fairseq_model.state_dict() __UpperCamelCase = hf_model.wavaveca.feature_extractor for name, value in fairseq_dict.items(): __UpperCamelCase = False if "conv_layers" in name: load_conv_layer( __A ,__A ,__A ,__A ,hf_model.config.feat_extract_norm == """group""" ,) __UpperCamelCase = True else: __UpperCamelCase = load_wavaveca_layer(__A ,__A ,__A ) if not is_used: unused_weights.append(__A ) logger.warning(f"Unused weights: {unused_weights}" ) def _lowercase ( __A ,__A ,__A ,__A ,__A ): '''simple docstring''' __UpperCamelCase = full_name.split("""conv_layers.""" )[-1] __UpperCamelCase = name.split(""".""" ) __UpperCamelCase = int(items[0] ) __UpperCamelCase = int(items[1] ) if type_id == 0: if "bias" in name: if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape: raise ValueError( f"{full_name} has size {value.shape}, but" f" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found." ) __UpperCamelCase = value logger.info(f"Feat extract conv layer {layer_id} was initialized from {full_name}." ) elif "weight" in name: if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape: raise ValueError( f"{full_name} has size {value.shape}, but" f" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found." ) __UpperCamelCase = value logger.info(f"Feat extract conv layer {layer_id} was initialized from {full_name}." ) elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm): if "bias" in name: if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape: raise ValueError( f"{full_name} has size {value.shape}, but" f" {feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape} was found." ) __UpperCamelCase = value logger.info(f"Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}." ) elif "weight" in name: if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape: raise ValueError( f"{full_name} has size {value.shape}, but" f" {feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape} was found." ) __UpperCamelCase = value logger.info(f"Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}." ) else: unused_weights.append(__A ) @torch.no_grad() def _lowercase ( __A ,__A ,__A=None ,__A=None ,__A=True ,__A=False ): '''simple docstring''' if config_path is not None: __UpperCamelCase = WavaVecaConfig.from_pretrained(__A ) else: __UpperCamelCase = WavaVecaConfig() if is_seq_class: __UpperCamelCase = read_txt_into_dict(__A ) __UpperCamelCase = idalabel __UpperCamelCase = WavaVecaForSequenceClassification(__A ) __UpperCamelCase = WavaVecaFeatureExtractor( feature_size=1 ,sampling_rate=16_000 ,padding_value=0 ,do_normalize=__A ,return_attention_mask=__A ,) feature_extractor.save_pretrained(__A ) elif is_finetuned: if dict_path: __UpperCamelCase = Dictionary.load(__A ) # important change bos & pad token id since CTC symbol is <pad> and # not <s> as in fairseq __UpperCamelCase = target_dict.pad_index __UpperCamelCase = target_dict.bos_index __UpperCamelCase = target_dict.eos_index __UpperCamelCase = len(target_dict.symbols ) __UpperCamelCase = os.path.join(__A ,"""vocab.json""" ) if not os.path.isdir(__A ): logger.error("""--pytorch_dump_folder_path ({}) should be a directory""".format(__A ) ) return os.makedirs(__A ,exist_ok=__A ) __UpperCamelCase = target_dict.indices # fairseq has the <pad> and <s> switched __UpperCamelCase = 0 __UpperCamelCase = 1 with open(__A ,"""w""" ,encoding="""utf-8""" ) as vocab_handle: json.dump(__A ,__A ) __UpperCamelCase = WavaVecaCTCTokenizer( __A ,unk_token=target_dict.unk_word ,pad_token=target_dict.pad_word ,bos_token=target_dict.bos_word ,eos_token=target_dict.eos_word ,word_delimiter_token="""|""" ,do_lower_case=__A ,) __UpperCamelCase = True if config.feat_extract_norm == """layer""" else False __UpperCamelCase = WavaVecaFeatureExtractor( feature_size=1 ,sampling_rate=16_000 ,padding_value=0 ,do_normalize=__A ,return_attention_mask=__A ,) __UpperCamelCase = WavaVecaProcessor(feature_extractor=__A ,tokenizer=__A ) processor.save_pretrained(__A ) __UpperCamelCase = WavaVecaForCTC(__A ) else: __UpperCamelCase = WavaVecaForPreTraining(__A ) if is_finetuned or is_seq_class: __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = fairseq.checkpoint_utils.load_model_ensemble_and_task( [checkpoint_path] ,arg_overrides={"""data""": """/""".join(dict_path.split("""/""" )[:-1] )} ) else: __UpperCamelCase = argparse.Namespace(task="""audio_pretraining""" ) __UpperCamelCase = fairseq.tasks.setup_task(__A ) __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] ,task=__A ) __UpperCamelCase = model[0].eval() recursively_load_weights(__A ,__A ,not is_finetuned ) hf_wavavec.save_pretrained(__A ) if __name__ == "__main__": a__ : int = argparse.ArgumentParser() parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.') parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to fairseq checkpoint') parser.add_argument('--dict_path', default=None, type=str, help='Path to dict of fine-tuned model') parser.add_argument('--config_path', default=None, type=str, help='Path to hf config.json of model to convert') parser.add_argument( '--not_finetuned', action='store_true', help='Whether the model to convert is a fine-tuned model or not' ) parser.add_argument( '--is_seq_class', action='store_true', help='Whether the model to convert is a fine-tuned sequence classification model or not', ) a__ : Optional[int] = parser.parse_args() a__ : str = not args.not_finetuned and not args.is_seq_class convert_wavaveca_checkpoint( args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, is_finetuned, args.is_seq_class, )
349
0
def lowerCAmelCase__ ( _SCREAMING_SNAKE_CASE : List[str] , _SCREAMING_SNAKE_CASE : List[Any] , _SCREAMING_SNAKE_CASE : Optional[Any] , _SCREAMING_SNAKE_CASE : List[str] ): """simple docstring""" __a , __a = len(__A ), len(grid[0] ) if ( min(__A , __A ) < 0 or row == row_length or col == col_length or (row, col) in visit or grid[row][col] == 1 ): return 0 if row == row_length - 1 and col == col_length - 1: return 1 visit.add((row, col) ) __a = 0 count += depth_first_search(__A , row + 1 , __A , __A ) count += depth_first_search(__A , row - 1 , __A , __A ) count += depth_first_search(__A , __A , col + 1 , __A ) count += depth_first_search(__A , __A , col - 1 , __A ) visit.remove((row, col) ) return count if __name__ == "__main__": import doctest doctest.testmod()
302
'''simple docstring''' from __future__ import annotations import unittest from transformers import DistilBertConfig, is_tf_available from transformers.testing_utils import require_tf, slow from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers.models.distilbert.modeling_tf_distilbert import ( TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST, TFDistilBertForMaskedLM, TFDistilBertForMultipleChoice, TFDistilBertForQuestionAnswering, TFDistilBertForSequenceClassification, TFDistilBertForTokenClassification, TFDistilBertModel, ) class UpperCAmelCase__ : def __init__( self , lowercase , ) -> Union[str, Any]: __UpperCamelCase = parent __UpperCamelCase = 1_3 __UpperCamelCase = 7 __UpperCamelCase = True __UpperCamelCase = True __UpperCamelCase = False __UpperCamelCase = True __UpperCamelCase = 9_9 __UpperCamelCase = 3_2 __UpperCamelCase = 2 __UpperCamelCase = 4 __UpperCamelCase = 3_7 __UpperCamelCase = """gelu""" __UpperCamelCase = 0.1 __UpperCamelCase = 0.1 __UpperCamelCase = 5_1_2 __UpperCamelCase = 1_6 __UpperCamelCase = 2 __UpperCamelCase = 0.02 __UpperCamelCase = 3 __UpperCamelCase = 4 __UpperCamelCase = None def __lowerCamelCase ( self ) -> List[str]: __UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) __UpperCamelCase = None if self.use_input_mask: __UpperCamelCase = random_attention_mask([self.batch_size, self.seq_length] ) __UpperCamelCase = None __UpperCamelCase = None __UpperCamelCase = None if self.use_labels: __UpperCamelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size ) __UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) __UpperCamelCase = ids_tensor([self.batch_size] , self.num_choices ) __UpperCamelCase = DistilBertConfig( vocab_size=self.vocab_size , dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , hidden_dim=self.intermediate_size , hidden_act=self.hidden_act , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , ) return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels def __lowerCamelCase ( self , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase ) -> Dict: __UpperCamelCase = TFDistilBertModel(config=lowercase ) __UpperCamelCase = {"""input_ids""": input_ids, """attention_mask""": input_mask} __UpperCamelCase = model(lowercase ) __UpperCamelCase = [input_ids, input_mask] __UpperCamelCase = model(lowercase ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def __lowerCamelCase ( self , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase ) -> Optional[Any]: __UpperCamelCase = TFDistilBertForMaskedLM(config=lowercase ) __UpperCamelCase = {"""input_ids""": input_ids, """attention_mask""": input_mask} __UpperCamelCase = model(lowercase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def __lowerCamelCase ( self , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase ) -> Tuple: __UpperCamelCase = TFDistilBertForQuestionAnswering(config=lowercase ) __UpperCamelCase = { """input_ids""": input_ids, """attention_mask""": input_mask, } __UpperCamelCase = model(lowercase ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def __lowerCamelCase ( self , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase ) -> Tuple: __UpperCamelCase = self.num_labels __UpperCamelCase = TFDistilBertForSequenceClassification(lowercase ) __UpperCamelCase = {"""input_ids""": input_ids, """attention_mask""": input_mask} __UpperCamelCase = model(lowercase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def __lowerCamelCase ( self , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase ) -> int: __UpperCamelCase = self.num_choices __UpperCamelCase = TFDistilBertForMultipleChoice(lowercase ) __UpperCamelCase = tf.tile(tf.expand_dims(lowercase , 1 ) , (1, self.num_choices, 1) ) __UpperCamelCase = tf.tile(tf.expand_dims(lowercase , 1 ) , (1, self.num_choices, 1) ) __UpperCamelCase = { """input_ids""": multiple_choice_inputs_ids, """attention_mask""": multiple_choice_input_mask, } __UpperCamelCase = model(lowercase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) ) def __lowerCamelCase ( self , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase ) -> Optional[int]: __UpperCamelCase = self.num_labels __UpperCamelCase = TFDistilBertForTokenClassification(lowercase ) __UpperCamelCase = {"""input_ids""": input_ids, """attention_mask""": input_mask} __UpperCamelCase = model(lowercase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def __lowerCamelCase ( self ) -> Dict: __UpperCamelCase = self.prepare_config_and_inputs() ((__UpperCamelCase) , (__UpperCamelCase) , (__UpperCamelCase) , (__UpperCamelCase) , (__UpperCamelCase) , (__UpperCamelCase)) = config_and_inputs __UpperCamelCase = {"""input_ids""": input_ids, """attention_mask""": input_mask} return config, inputs_dict @require_tf class UpperCAmelCase__ ( UpperCAmelCase_ , UpperCAmelCase_ , unittest.TestCase): __SCREAMING_SNAKE_CASE = ( ( TFDistilBertModel, TFDistilBertForMaskedLM, TFDistilBertForQuestionAnswering, TFDistilBertForSequenceClassification, TFDistilBertForTokenClassification, TFDistilBertForMultipleChoice, ) if is_tf_available() else None ) __SCREAMING_SNAKE_CASE = ( { '''feature-extraction''': TFDistilBertModel, '''fill-mask''': TFDistilBertForMaskedLM, '''question-answering''': TFDistilBertForQuestionAnswering, '''text-classification''': TFDistilBertForSequenceClassification, '''token-classification''': TFDistilBertForTokenClassification, '''zero-shot''': TFDistilBertForSequenceClassification, } if is_tf_available() else {} ) __SCREAMING_SNAKE_CASE = False __SCREAMING_SNAKE_CASE = False def __lowerCamelCase ( self ) -> Optional[Any]: __UpperCamelCase = TFDistilBertModelTester(self ) __UpperCamelCase = ConfigTester(self , config_class=lowercase , dim=3_7 ) def __lowerCamelCase ( self ) -> Any: self.config_tester.run_common_tests() def __lowerCamelCase ( self ) -> Dict: __UpperCamelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_distilbert_model(*lowercase ) def __lowerCamelCase ( self ) -> Union[str, Any]: __UpperCamelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_distilbert_for_masked_lm(*lowercase ) def __lowerCamelCase ( self ) -> int: __UpperCamelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_distilbert_for_question_answering(*lowercase ) def __lowerCamelCase ( self ) -> Any: __UpperCamelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_distilbert_for_sequence_classification(*lowercase ) def __lowerCamelCase ( self ) -> Optional[Any]: __UpperCamelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_distilbert_for_multiple_choice(*lowercase ) def __lowerCamelCase ( self ) -> Union[str, Any]: __UpperCamelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_distilbert_for_token_classification(*lowercase ) @slow def __lowerCamelCase ( self ) -> Tuple: for model_name in list(TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1] ): __UpperCamelCase = TFDistilBertModel.from_pretrained(lowercase ) self.assertIsNotNone(lowercase ) @require_tf class UpperCAmelCase__ ( unittest.TestCase): @slow def __lowerCamelCase ( self ) -> Optional[int]: __UpperCamelCase = TFDistilBertModel.from_pretrained("""distilbert-base-uncased""" ) __UpperCamelCase = tf.constant([[0, 1, 2, 3, 4, 5]] ) __UpperCamelCase = model(lowercase )[0] __UpperCamelCase = [1, 6, 7_6_8] self.assertEqual(output.shape , lowercase ) __UpperCamelCase = tf.constant( [ [ [0.19_261_885, -0.13_732_955, 0.4_119_799], [0.22_150_156, -0.07_422_661, 0.39_037_204], [0.22_756_018, -0.0_896_414, 0.3_701_467], ] ] ) tf.debugging.assert_near(output[:, :3, :3] , lowercase , atol=1E-4 )
349
0
"""simple docstring""" def __lowerCamelCase ( __UpperCamelCase ) -> str: """simple docstring""" lowerCAmelCase_ : Tuple = abs(__A ) lowerCAmelCase_ : Union[str, Any] = 0 while n > 0: res += n % 10 n //= 10 return res def __lowerCamelCase ( __UpperCamelCase ) -> Tuple: """simple docstring""" lowerCAmelCase_ : Any = abs(__A ) return n if n < 10 else n % 10 + sum_of_digits(n // 10 ) def __lowerCamelCase ( __UpperCamelCase ) -> List[str]: """simple docstring""" return sum(int(__A ) for c in str(abs(__A ) ) ) def __lowerCamelCase ( ) -> Union[str, Any]: """simple docstring""" from collections.abc import Callable from timeit import timeit def benchmark_a_function(__UpperCamelCase , __UpperCamelCase ) -> None: lowerCAmelCase_ : Dict = f'''{func.__name__}({value})''' lowerCAmelCase_ : List[Any] = timeit(f'''__main__.{call}''' , setup="import __main__" ) print(f'''{call:56} = {func(__A )} -- {timing:.4f} seconds''' ) for value in (262144, 1125899906842624, 1267650600228229401496703205376): for func in (sum_of_digits, sum_of_digits_recursion, sum_of_digits_compact): benchmark_a_function(__A , __A ) print() if __name__ == "__main__": import doctest doctest.testmod() benchmark()
241
'''simple docstring''' from __future__ import annotations import math import numpy as np from numpy.linalg import norm def _lowercase ( __A ,__A ): '''simple docstring''' return math.sqrt(sum(pow(a - b ,2 ) for a, b in zip(__A ,__A ) ) ) def _lowercase ( __A ,__A ): '''simple docstring''' if dataset.ndim != value_array.ndim: __UpperCamelCase = ( """Wrong input data's dimensions... """ f"dataset : {dataset.ndim}, value_array : {value_array.ndim}" ) raise ValueError(__A ) try: if dataset.shape[1] != value_array.shape[1]: __UpperCamelCase = ( """Wrong input data's shape... """ f"dataset : {dataset.shape[1]}, value_array : {value_array.shape[1]}" ) raise ValueError(__A ) except IndexError: if dataset.ndim != value_array.ndim: raise TypeError("""Wrong shape""" ) if dataset.dtype != value_array.dtype: __UpperCamelCase = ( """Input data have different datatype... """ f"dataset : {dataset.dtype}, value_array : {value_array.dtype}" ) raise TypeError(__A ) __UpperCamelCase = [] for value in value_array: __UpperCamelCase = euclidean(__A ,dataset[0] ) __UpperCamelCase = dataset[0].tolist() for dataset_value in dataset[1:]: __UpperCamelCase = euclidean(__A ,__A ) if dist > temp_dist: __UpperCamelCase = temp_dist __UpperCamelCase = dataset_value.tolist() answer.append([vector, dist] ) return answer def _lowercase ( __A ,__A ): '''simple docstring''' return np.dot(__A ,__A ) / (norm(__A ) * norm(__A )) if __name__ == "__main__": import doctest doctest.testmod()
349
0
from __future__ import annotations _UpperCamelCase = [ [-1, 0], # left [0, -1], # down [1, 0], # right [0, 1], # up ] def lowerCAmelCase__( lowercase : List[Any] , lowercase : Optional[int] , lowercase : Optional[Any] , lowercase : str , lowercase : Union[str, Any] , ) -> Optional[Any]: __snake_case : List[str] = [ [0 for col in range(len(grid[0] ) )] for row in range(len(__A ) ) ] # the reference grid __snake_case : Any = 1 __snake_case : Any = [ [0 for col in range(len(grid[0] ) )] for row in range(len(__A ) ) ] # the action grid __snake_case : int = init[0] __snake_case : Dict = init[1] __snake_case : int = 0 __snake_case : Any = g + heuristic[x][y] # cost from starting cell to destination cell __snake_case : str = [[f, g, x, y]] __snake_case : Any = False # flag that is set when search is complete __snake_case : int = False # flag set if we can't find expand while not found and not resign: if len(__A ) == 0: raise ValueError("Algorithm is unable to find solution" ) else: # to choose the least costliest action so as to move closer to the goal cell.sort() cell.reverse() __snake_case : Optional[Any] = cell.pop() __snake_case : Union[str, Any] = next_cell[2] __snake_case : Optional[int] = next_cell[3] __snake_case : Any = next_cell[1] if x == goal[0] and y == goal[1]: __snake_case : Any = True else: for i in range(len(__A ) ): # to try out different valid actions __snake_case : Dict = x + DIRECTIONS[i][0] __snake_case : List[Any] = y + DIRECTIONS[i][1] if xa >= 0 and xa < len(__A ) and ya >= 0 and ya < len(grid[0] ): if closed[xa][ya] == 0 and grid[xa][ya] == 0: __snake_case : Any = g + cost __snake_case : List[str] = ga + heuristic[xa][ya] cell.append([fa, ga, xa, ya] ) __snake_case : Optional[Any] = 1 __snake_case : List[str] = i __snake_case : str = [] __snake_case : Union[str, Any] = goal[0] __snake_case : List[str] = goal[1] invpath.append([x, y] ) # we get the reverse path from here while x != init[0] or y != init[1]: __snake_case : List[str] = x - DIRECTIONS[action[x][y]][0] __snake_case : List[str] = y - DIRECTIONS[action[x][y]][1] __snake_case : Any = xa __snake_case : Tuple = ya invpath.append([x, y] ) __snake_case : str = [] for i in range(len(__A ) ): path.append(invpath[len(__A ) - 1 - i] ) return path, action if __name__ == "__main__": _UpperCamelCase = [ [0, 1, 0, 0, 0, 0], [0, 1, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles [0, 1, 0, 0, 0, 0], [0, 1, 0, 0, 1, 0], [0, 0, 0, 0, 1, 0], ] _UpperCamelCase = [0, 0] # all coordinates are given in format [y,x] _UpperCamelCase = [len(grid) - 1, len(grid[0]) - 1] _UpperCamelCase = 1 # the cost map which pushes the path closer to the goal _UpperCamelCase = [[0 for row in range(len(grid[0]))] for col in range(len(grid))] for i in range(len(grid)): for j in range(len(grid[0])): _UpperCamelCase = abs(i - goal[0]) + abs(j - goal[1]) if grid[i][j] == 1: # added extra penalty in the heuristic map _UpperCamelCase = 99 _UpperCamelCase = search(grid, init, goal, cost, heuristic) print('''ACTION MAP''') for i in range(len(action)): print(action[i]) for i in range(len(path)): print(path[i])
326
'''simple docstring''' from datetime import datetime import requests def _lowercase ( __A ): '''simple docstring''' __UpperCamelCase = """https://downloadgram.net/wp-json/wppress/video-downloader/video?url=""" __UpperCamelCase = requests.get(base_url + url ).json()[0]["""urls"""][0]["""src"""] return requests.get(__A ).content if __name__ == "__main__": a__ : int = input('Enter Video/IGTV url: ').strip() a__ : int = f'''{datetime.now():%Y-%m-%d_%H:%M:%S}.mp4''' with open(file_name, 'wb') as fp: fp.write(download_video(url)) print(f'''Done. Video saved to disk as {file_name}.''')
349
0
"""simple docstring""" import json import os from typing import Optional import numpy as np from ...feature_extraction_utils import BatchFeature from ...processing_utils import ProcessorMixin from ...utils import logging from ...utils.hub import get_file_from_repo from ..auto import AutoTokenizer a = logging.get_logger(__name__) class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase_ ): _a = 'AutoTokenizer' _a = ['tokenizer'] _a = { 'semantic_prompt': 1, 'coarse_prompt': 2, 'fine_prompt': 2, } def __init__( self : Dict , lowerCAmelCase : Dict , lowerCAmelCase : Union[str, Any]=None ): super().__init__(lowerCAmelCase ) lowerCAmelCase = speaker_embeddings @classmethod def __lowercase ( cls : Dict , lowerCAmelCase : Any , lowerCAmelCase : Optional[Any]="speaker_embeddings_path.json" , **lowerCAmelCase : List[str] ): if speaker_embeddings_dict_path is not None: lowerCAmelCase = get_file_from_repo( lowerCAmelCase , lowerCAmelCase , subfolder=kwargs.pop("""subfolder""" , lowerCAmelCase ) , cache_dir=kwargs.pop("""cache_dir""" , lowerCAmelCase ) , force_download=kwargs.pop("""force_download""" , lowerCAmelCase ) , proxies=kwargs.pop("""proxies""" , lowerCAmelCase ) , resume_download=kwargs.pop("""resume_download""" , lowerCAmelCase ) , local_files_only=kwargs.pop("""local_files_only""" , lowerCAmelCase ) , use_auth_token=kwargs.pop("""use_auth_token""" , lowerCAmelCase ) , revision=kwargs.pop("""revision""" , lowerCAmelCase ) , ) if speaker_embeddings_path is None: logger.warning( f'''`{os.path.join(lowerCAmelCase , lowerCAmelCase )}` does not exists\n , no preloaded speaker embeddings will be used - Make sure to provide a correct path to the json\n dictionnary if wanted, otherwise set `speaker_embeddings_dict_path=None`.''' ) lowerCAmelCase = None else: with open(lowerCAmelCase ) as speaker_embeddings_json: lowerCAmelCase = json.load(lowerCAmelCase ) else: lowerCAmelCase = None lowerCAmelCase = AutoTokenizer.from_pretrained(lowerCAmelCase , **lowerCAmelCase ) return cls(tokenizer=lowerCAmelCase , speaker_embeddings=lowerCAmelCase ) def __lowercase ( self : Optional[int] , lowerCAmelCase : Optional[int] , lowerCAmelCase : str="speaker_embeddings_path.json" , lowerCAmelCase : List[str]="speaker_embeddings" , lowerCAmelCase : str = False , **lowerCAmelCase : Any , ): if self.speaker_embeddings is not None: os.makedirs(os.path.join(lowerCAmelCase , lowerCAmelCase , """v2""" ) , exist_ok=lowerCAmelCase ) lowerCAmelCase = {} lowerCAmelCase = save_directory for prompt_key in self.speaker_embeddings: if prompt_key != "repo_or_path": lowerCAmelCase = self._load_voice_preset(lowerCAmelCase ) lowerCAmelCase = {} for key in self.speaker_embeddings[prompt_key]: np.save( os.path.join( embeddings_dict["""repo_or_path"""] , lowerCAmelCase , f'''{prompt_key}_{key}''' ) , voice_preset[key] , allow_pickle=lowerCAmelCase , ) lowerCAmelCase = os.path.join(lowerCAmelCase , f'''{prompt_key}_{key}.npy''' ) lowerCAmelCase = tmp_dict with open(os.path.join(lowerCAmelCase , lowerCAmelCase ) , """w""" ) as fp: json.dump(lowerCAmelCase , lowerCAmelCase ) super().save_pretrained(lowerCAmelCase , lowerCAmelCase , **lowerCAmelCase ) def __lowercase ( self : Optional[int] , lowerCAmelCase : Optional[Any] = None , **lowerCAmelCase : Optional[Any] ): lowerCAmelCase = self.speaker_embeddings[voice_preset] lowerCAmelCase = {} for key in ["semantic_prompt", "coarse_prompt", "fine_prompt"]: if key not in voice_preset_paths: raise ValueError( f'''Voice preset unrecognized, missing {key} as a key in self.speaker_embeddings[{voice_preset}].''' ) lowerCAmelCase = get_file_from_repo( self.speaker_embeddings.get("""repo_or_path""" , """/""" ) , voice_preset_paths[key] , subfolder=kwargs.pop("""subfolder""" , lowerCAmelCase ) , cache_dir=kwargs.pop("""cache_dir""" , lowerCAmelCase ) , force_download=kwargs.pop("""force_download""" , lowerCAmelCase ) , proxies=kwargs.pop("""proxies""" , lowerCAmelCase ) , resume_download=kwargs.pop("""resume_download""" , lowerCAmelCase ) , local_files_only=kwargs.pop("""local_files_only""" , lowerCAmelCase ) , use_auth_token=kwargs.pop("""use_auth_token""" , lowerCAmelCase ) , revision=kwargs.pop("""revision""" , lowerCAmelCase ) , ) if path is None: raise ValueError( f'''`{os.path.join(self.speaker_embeddings.get('repo_or_path' , '/' ) , voice_preset_paths[key] )}` does not exists\n , no preloaded voice preset will be used - Make sure to provide correct paths to the {voice_preset}\n embeddings.''' ) lowerCAmelCase = np.load(lowerCAmelCase ) return voice_preset_dict def __lowercase ( self : int , lowerCAmelCase : str = None ): for key in ["semantic_prompt", "coarse_prompt", "fine_prompt"]: if key not in voice_preset: raise ValueError(f'''Voice preset unrecognized, missing {key} as a key.''' ) if not isinstance(voice_preset[key] , np.ndarray ): raise ValueError(f'''{key} voice preset must be a {str(self.preset_shape[key] )}D ndarray.''' ) if len(voice_preset[key].shape ) != self.preset_shape[key]: raise ValueError(f'''{key} voice preset must be a {str(self.preset_shape[key] )}D ndarray.''' ) def __call__( self : List[Any] , lowerCAmelCase : int=None , lowerCAmelCase : List[Any]=None , lowerCAmelCase : List[str]="pt" , lowerCAmelCase : List[str]=256 , lowerCAmelCase : List[Any]=False , lowerCAmelCase : str=True , lowerCAmelCase : Optional[Any]=False , **lowerCAmelCase : Optional[Any] , ): if voice_preset is not None and not isinstance(lowerCAmelCase , lowerCAmelCase ): if ( isinstance(lowerCAmelCase , lowerCAmelCase ) and self.speaker_embeddings is not None and voice_preset in self.speaker_embeddings ): lowerCAmelCase = self._load_voice_preset(lowerCAmelCase ) else: if isinstance(lowerCAmelCase , lowerCAmelCase ) and not voice_preset.endswith(""".npz""" ): lowerCAmelCase = voice_preset + """.npz""" lowerCAmelCase = np.load(lowerCAmelCase ) if voice_preset is not None: self._validate_voice_preset_dict(lowerCAmelCase , **lowerCAmelCase ) lowerCAmelCase = BatchFeature(data=lowerCAmelCase , tensor_type=lowerCAmelCase ) lowerCAmelCase = self.tokenizer( lowerCAmelCase , return_tensors=lowerCAmelCase , padding="""max_length""" , max_length=lowerCAmelCase , return_attention_mask=lowerCAmelCase , return_token_type_ids=lowerCAmelCase , add_special_tokens=lowerCAmelCase , **lowerCAmelCase , ) if voice_preset is not None: lowerCAmelCase = voice_preset return encoded_text
155
'''simple docstring''' import asyncio import os import re import sys import tempfile import unittest from contextlib import contextmanager from copy import deepcopy from distutils.util import strtobool from enum import Enum from importlib.util import find_spec from pathlib import Path from unittest.mock import patch import pyarrow as pa import pytest import requests from packaging import version from datasets import config if config.PY_VERSION < version.parse('3.8'): import importlib_metadata else: import importlib.metadata as importlib_metadata def _lowercase ( __A ,__A=False ): '''simple docstring''' try: __UpperCamelCase = os.environ[key] except KeyError: # KEY isn't set, default to `default`. __UpperCamelCase = default else: # KEY is set, convert it to True or False. try: __UpperCamelCase = strtobool(__A ) except ValueError: # More values are supported, but let's keep the message simple. raise ValueError(f"If set, {key} must be yes or no." ) return _value a__ : Optional[Any] = parse_flag_from_env('RUN_SLOW', default=False) a__ : Union[str, Any] = parse_flag_from_env('RUN_REMOTE', default=False) a__ : Any = parse_flag_from_env('RUN_LOCAL', default=True) a__ : List[Any] = parse_flag_from_env('RUN_PACKAGED', default=True) # Compression a__ : Optional[int] = pytest.mark.skipif(not config.LZ4_AVAILABLE, reason='test requires lz4') a__ : Optional[int] = pytest.mark.skipif(not config.PY7ZR_AVAILABLE, reason='test requires py7zr') a__ : Optional[Any] = pytest.mark.skipif(not config.ZSTANDARD_AVAILABLE, reason='test requires zstandard') # Audio a__ : List[Any] = pytest.mark.skipif( # On Windows and OS X, soundfile installs sndfile find_spec('soundfile') is None or version.parse(importlib_metadata.version('soundfile')) < version.parse('0.12.0'), reason='test requires sndfile>=0.12.1: \'pip install \"soundfile>=0.12.1\"\'; ', ) # Beam a__ : str = pytest.mark.skipif( not config.BEAM_AVAILABLE or config.DILL_VERSION >= version.parse('0.3.2'), reason='test requires apache-beam and a compatible dill version', ) # Dill-cloudpickle compatibility a__ : str = pytest.mark.skipif( config.DILL_VERSION <= version.parse('0.3.2'), reason='test requires dill>0.3.2 for cloudpickle compatibility', ) # Windows a__ : Tuple = pytest.mark.skipif( sys.platform == 'win32', reason='test should not be run on Windows', ) def _lowercase ( __A ): '''simple docstring''' try: import faiss # noqa except ImportError: __UpperCamelCase = unittest.skip("""test requires faiss""" )(__A ) return test_case def _lowercase ( __A ): '''simple docstring''' try: import regex # noqa except ImportError: __UpperCamelCase = unittest.skip("""test requires regex""" )(__A ) return test_case def _lowercase ( __A ): '''simple docstring''' try: import elasticsearch # noqa except ImportError: __UpperCamelCase = unittest.skip("""test requires elasticsearch""" )(__A ) return test_case def _lowercase ( __A ): '''simple docstring''' try: import sqlalchemy # noqa except ImportError: __UpperCamelCase = unittest.skip("""test requires sqlalchemy""" )(__A ) return test_case def _lowercase ( __A ): '''simple docstring''' if not config.TORCH_AVAILABLE: __UpperCamelCase = unittest.skip("""test requires PyTorch""" )(__A ) return test_case def _lowercase ( __A ): '''simple docstring''' if not config.TF_AVAILABLE: __UpperCamelCase = unittest.skip("""test requires TensorFlow""" )(__A ) return test_case def _lowercase ( __A ): '''simple docstring''' if not config.JAX_AVAILABLE: __UpperCamelCase = unittest.skip("""test requires JAX""" )(__A ) return test_case def _lowercase ( __A ): '''simple docstring''' if not config.PIL_AVAILABLE: __UpperCamelCase = unittest.skip("""test requires Pillow""" )(__A ) return test_case def _lowercase ( __A ): '''simple docstring''' try: import transformers # noqa F401 except ImportError: return unittest.skip("""test requires transformers""" )(__A ) else: return test_case def _lowercase ( __A ): '''simple docstring''' try: import tiktoken # noqa F401 except ImportError: return unittest.skip("""test requires tiktoken""" )(__A ) else: return test_case def _lowercase ( __A ): '''simple docstring''' try: import spacy # noqa F401 except ImportError: return unittest.skip("""test requires spacy""" )(__A ) else: return test_case def _lowercase ( __A ): '''simple docstring''' def _require_spacy_model(__A ): try: import spacy # noqa F401 spacy.load(__A ) except ImportError: return unittest.skip("""test requires spacy""" )(__A ) except OSError: return unittest.skip("""test requires spacy model '{}'""".format(__A ) )(__A ) else: return test_case return _require_spacy_model def _lowercase ( __A ): '''simple docstring''' try: import pyspark # noqa F401 except ImportError: return unittest.skip("""test requires pyspark""" )(__A ) else: return test_case def _lowercase ( __A ): '''simple docstring''' try: import joblibspark # noqa F401 except ImportError: return unittest.skip("""test requires joblibspark""" )(__A ) else: return test_case def _lowercase ( __A ): '''simple docstring''' if not _run_slow_tests or _run_slow_tests == 0: __UpperCamelCase = unittest.skip("""test is slow""" )(__A ) return test_case def _lowercase ( __A ): '''simple docstring''' if not _run_local_tests or _run_local_tests == 0: __UpperCamelCase = unittest.skip("""test is local""" )(__A ) return test_case def _lowercase ( __A ): '''simple docstring''' if not _run_packaged_tests or _run_packaged_tests == 0: __UpperCamelCase = unittest.skip("""test is packaged""" )(__A ) return test_case def _lowercase ( __A ): '''simple docstring''' if not _run_remote_tests or _run_remote_tests == 0: __UpperCamelCase = unittest.skip("""test requires remote""" )(__A ) return test_case def _lowercase ( *__A ): '''simple docstring''' def decorate(cls ): for name, fn in cls.__dict__.items(): if callable(__A ) and name.startswith("""test""" ): for decorator in decorators: __UpperCamelCase = decorator(__A ) setattr(cls ,__A ,__A ) return cls return decorate class UpperCAmelCase__ ( UpperCAmelCase_): pass class UpperCAmelCase__ ( UpperCAmelCase_): __SCREAMING_SNAKE_CASE = 0 __SCREAMING_SNAKE_CASE = 1 __SCREAMING_SNAKE_CASE = 2 @contextmanager def _lowercase ( __A=OfflineSimulationMode.CONNECTION_FAILS ,__A=1E-16 ): '''simple docstring''' __UpperCamelCase = requests.Session().request def timeout_request(__A ,__A ,__A ,**__A ): # Change the url to an invalid url so that the connection hangs __UpperCamelCase = """https://10.255.255.1""" if kwargs.get("""timeout""" ) is None: raise RequestWouldHangIndefinitelyError( f"Tried a call to {url} in offline mode with no timeout set. Please set a timeout." ) __UpperCamelCase = timeout try: return online_request(__A ,__A ,**__A ) except Exception as e: # The following changes in the error are just here to make the offline timeout error prettier __UpperCamelCase = url __UpperCamelCase = e.args[0] __UpperCamelCase = (max_retry_error.args[0].replace("""10.255.255.1""" ,f"OfflineMock[{url}]" ),) __UpperCamelCase = (max_retry_error,) raise def raise_connection_error(__A ,__A ,**__A ): raise requests.ConnectionError("""Offline mode is enabled.""" ,request=__A ) if mode is OfflineSimulationMode.CONNECTION_FAILS: with patch("""requests.Session.send""" ,__A ): yield elif mode is OfflineSimulationMode.CONNECTION_TIMES_OUT: # inspired from https://stackoverflow.com/a/904609 with patch("""requests.Session.request""" ,__A ): yield elif mode is OfflineSimulationMode.HF_DATASETS_OFFLINE_SET_TO_1: with patch("""datasets.config.HF_DATASETS_OFFLINE""" ,__A ): yield else: raise ValueError("""Please use a value from the OfflineSimulationMode enum.""" ) @contextmanager def _lowercase ( *__A ,**__A ): '''simple docstring''' __UpperCamelCase = str(Path().resolve() ) with tempfile.TemporaryDirectory(*__A ,**__A ) as tmp_dir: try: os.chdir(__A ) yield finally: os.chdir(__A ) @contextmanager def _lowercase ( ): '''simple docstring''' import gc gc.collect() __UpperCamelCase = pa.total_allocated_bytes() yield assert pa.total_allocated_bytes() - previous_allocated_memory > 0, "Arrow memory didn't increase." @contextmanager def _lowercase ( ): '''simple docstring''' import gc gc.collect() __UpperCamelCase = pa.total_allocated_bytes() yield assert pa.total_allocated_bytes() - previous_allocated_memory <= 0, "Arrow memory wasn't expected to increase." def _lowercase ( __A ,__A ): '''simple docstring''' return deepcopy(__A ).integers(0 ,100 ,10 ).tolist() == deepcopy(__A ).integers(0 ,100 ,10 ).tolist() def _lowercase ( __A ): '''simple docstring''' import decorator from requests.exceptions import HTTPError def _wrapper(__A ,*__A ,**__A ): try: return func(*__A ,**__A ) except HTTPError as err: if str(__A ).startswith("""500""" ) or str(__A ).startswith("""502""" ): pytest.xfail(str(__A ) ) raise err return decorator.decorator(_wrapper ,__A ) class UpperCAmelCase__ : def __init__( self , lowercase , lowercase , lowercase ) -> str: __UpperCamelCase = returncode __UpperCamelCase = stdout __UpperCamelCase = stderr async def _lowercase ( __A ,__A ): '''simple docstring''' while True: __UpperCamelCase = await stream.readline() if line: callback(__A ) else: break async def _lowercase ( __A ,__A=None ,__A=None ,__A=None ,__A=False ,__A=False ): '''simple docstring''' if echo: print("""\nRunning: """ ,""" """.join(__A ) ) __UpperCamelCase = await asyncio.create_subprocess_exec( cmd[0] ,*cmd[1:] ,stdin=__A ,stdout=asyncio.subprocess.PIPE ,stderr=asyncio.subprocess.PIPE ,env=__A ,) # note: there is a warning for a possible deadlock when using `wait` with huge amounts of data in the pipe # https://docs.python.org/3/library/asyncio-subprocess.html#asyncio.asyncio.subprocess.Process.wait # # If it starts hanging, will need to switch to the following code. The problem is that no data # will be seen until it's done and if it hangs for example there will be no debug info. # out, err = await p.communicate() # return _RunOutput(p.returncode, out, err) __UpperCamelCase = [] __UpperCamelCase = [] def tee(__A ,__A ,__A ,__A="" ): __UpperCamelCase = line.decode("""utf-8""" ).rstrip() sink.append(__A ) if not quiet: print(__A ,__A ,file=__A ) # XXX: the timeout doesn't seem to make any difference here await asyncio.wait( [ _read_stream(p.stdout ,lambda __A : tee(__A ,__A ,sys.stdout ,label="""stdout:""" ) ), _read_stream(p.stderr ,lambda __A : tee(__A ,__A ,sys.stderr ,label="""stderr:""" ) ), ] ,timeout=__A ,) return _RunOutput(await p.wait() ,__A ,__A ) def _lowercase ( __A ,__A=None ,__A=None ,__A=180 ,__A=False ,__A=True ): '''simple docstring''' __UpperCamelCase = asyncio.get_event_loop() __UpperCamelCase = loop.run_until_complete( _stream_subprocess(__A ,env=__A ,stdin=__A ,timeout=__A ,quiet=__A ,echo=__A ) ) __UpperCamelCase = """ """.join(__A ) if result.returncode > 0: __UpperCamelCase = """\n""".join(result.stderr ) raise RuntimeError( f"'{cmd_str}' failed with returncode {result.returncode}\n\n" f"The combined stderr from workers follows:\n{stderr}" ) # check that the subprocess actually did run and produced some output, should the test rely on # the remote side to do the testing if not result.stdout and not result.stderr: raise RuntimeError(f"'{cmd_str}' produced no output." ) return result def _lowercase ( ): '''simple docstring''' __UpperCamelCase = os.environ.get("""PYTEST_XDIST_WORKER""" ,"""gw0""" ) __UpperCamelCase = re.sub(R"""^gw""" ,"""""" ,__A ,0 ,re.M ) return int(__A ) def _lowercase ( ): '''simple docstring''' __UpperCamelCase = 29_500 __UpperCamelCase = pytest_xdist_worker_id() return port + uniq_delta
349
0
"""simple docstring""" import collections import os from typing import List, Optional, Tuple from transformers.utils import is_jieba_available, requires_backends if is_jieba_available(): import jieba from ...tokenization_utils import PreTrainedTokenizer from ...utils import logging lowerCamelCase_ = logging.get_logger(__name__) lowerCamelCase_ = {'vocab_file': 'vocab.txt'} lowerCamelCase_ = { 'vocab_file': { 'openbmb/cpm-ant-10b': 'https://huggingface.co/openbmb/cpm-ant-10b/blob/main/vocab.txt', }, } lowerCamelCase_ = { 'openbmb/cpm-ant-10b': 1024, } def snake_case ( A__ ): UpperCAmelCase_ : int = collections.OrderedDict() with open(__A ,"r" ,encoding="utf-8" ) as reader: UpperCAmelCase_ : List[Any] = reader.readlines() for index, token in enumerate(__A ): UpperCAmelCase_ : List[Any] = token.rstrip("\n" ) UpperCAmelCase_ : Dict = index return vocab class UpperCamelCase_ (UpperCAmelCase_ ): def __init__( self : Any , lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : Tuple="<unk>" , lowerCAmelCase_ : Union[str, Any]=200 ) -> Dict: UpperCAmelCase_ : Dict = vocab UpperCAmelCase_ : Optional[int] = unk_token UpperCAmelCase_ : List[str] = max_input_chars_per_word def _SCREAMING_SNAKE_CASE ( self : List[Any] , lowerCAmelCase_ : Tuple ) -> Optional[int]: UpperCAmelCase_ : Union[str, Any] = list(lowerCAmelCase_ ) if len(lowerCAmelCase_ ) > self.max_input_chars_per_word: return [self.unk_token] UpperCAmelCase_ : Dict = 0 UpperCAmelCase_ : List[Any] = [] while start < len(lowerCAmelCase_ ): UpperCAmelCase_ : List[str] = len(lowerCAmelCase_ ) UpperCAmelCase_ : List[str] = None while start < end: UpperCAmelCase_ : List[str] = "".join(chars[start:end] ) if substr in self.vocab: UpperCAmelCase_ : Dict = substr break end -= 1 if cur_substr is None: sub_tokens.append(self.unk_token ) start += 1 else: sub_tokens.append(lowerCAmelCase_ ) UpperCAmelCase_ : Optional[int] = end return sub_tokens class UpperCamelCase_ (UpperCAmelCase_ ): __magic_name__ = VOCAB_FILES_NAMES __magic_name__ = PRETRAINED_VOCAB_FILES_MAP __magic_name__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES __magic_name__ = ['''input_ids''', '''attention_mask'''] __magic_name__ = False def __init__( self : Optional[int] , lowerCAmelCase_ : Dict , lowerCAmelCase_ : Optional[Any]="<d>" , lowerCAmelCase_ : Optional[Any]="</d>" , lowerCAmelCase_ : Union[str, Any]="<s>" , lowerCAmelCase_ : str="</s>" , lowerCAmelCase_ : str="<pad>" , lowerCAmelCase_ : str="<unk>" , lowerCAmelCase_ : Any="</n>" , lowerCAmelCase_ : Optional[Any]="</_>" , lowerCAmelCase_ : Tuple="left" , **lowerCAmelCase_ : Union[str, Any] , ) -> Tuple: requires_backends(self , ["jieba"] ) super().__init__( bod_token=lowerCAmelCase_ , eod_token=lowerCAmelCase_ , bos_token=lowerCAmelCase_ , eos_token=lowerCAmelCase_ , pad_token=lowerCAmelCase_ , unk_token=lowerCAmelCase_ , line_token=lowerCAmelCase_ , space_token=lowerCAmelCase_ , padding_side=lowerCAmelCase_ , **lowerCAmelCase_ , ) UpperCAmelCase_ : Union[str, Any] = bod_token UpperCAmelCase_ : Tuple = eod_token UpperCAmelCase_ : Optional[Any] = load_vocab(lowerCAmelCase_ ) UpperCAmelCase_ : Tuple = self.encoder[space_token] UpperCAmelCase_ : List[str] = self.encoder[line_token] del self.encoder[space_token] del self.encoder[line_token] UpperCAmelCase_ : Optional[Any] = collections.OrderedDict(sorted(self.encoder.items() , key=lambda lowerCAmelCase_ : x[1] ) ) UpperCAmelCase_ : Any = {v: k for k, v in self.encoder.items()} UpperCAmelCase_ : List[str] = WordpieceTokenizer(vocab=self.encoder , unk_token=self.unk_token ) @property def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> List[str]: return self.encoder[self.bod_token] @property def _SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Union[str, Any]: return self.encoder[self.eod_token] @property def _SCREAMING_SNAKE_CASE ( self : int ) -> Tuple: return self.encoder["\n"] @property def _SCREAMING_SNAKE_CASE ( self : int ) -> int: return len(self.encoder ) def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Optional[Any]: return dict(self.encoder , **self.added_tokens_encoder ) def _SCREAMING_SNAKE_CASE ( self : Any , lowerCAmelCase_ : Dict ) -> List[Any]: UpperCAmelCase_ : Dict = [] for x in jieba.cut(lowerCAmelCase_ , cut_all=lowerCAmelCase_ ): output_tokens.extend(self.wordpiece_tokenizer.tokenize(lowerCAmelCase_ ) ) return output_tokens def _SCREAMING_SNAKE_CASE ( self : Any , lowerCAmelCase_ : Dict , **lowerCAmelCase_ : List[Any] ) -> Union[str, Any]: UpperCAmelCase_ : Optional[Any] = [i for i in token_ids if i >= 0] UpperCAmelCase_ : str = [ x for x in token_ids if x != self.pad_token_id and x != self.eos_token_id and x != self.bos_token_id ] return super()._decode(lowerCAmelCase_ , **lowerCAmelCase_ ) def _SCREAMING_SNAKE_CASE ( self : Optional[int] , lowerCAmelCase_ : Union[str, Any] ) -> int: return token in self.encoder def _SCREAMING_SNAKE_CASE ( self : Any , lowerCAmelCase_ : str ) -> str: return "".join(lowerCAmelCase_ ) def _SCREAMING_SNAKE_CASE ( self : Any , lowerCAmelCase_ : Optional[int] ) -> Dict: return self.encoder.get(lowerCAmelCase_ , self.encoder.get(self.unk_token ) ) def _SCREAMING_SNAKE_CASE ( self : int , lowerCAmelCase_ : Optional[int] ) -> str: return self.decoder.get(lowerCAmelCase_ , self.unk_token ) def _SCREAMING_SNAKE_CASE ( self : str , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : Optional[int] = None ) -> Tuple[str]: if os.path.isdir(lowerCAmelCase_ ): UpperCAmelCase_ : Optional[Any] = os.path.join( lowerCAmelCase_ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] ) else: UpperCAmelCase_ : Tuple = (filename_prefix + "-" if filename_prefix else "") + save_directory UpperCAmelCase_ : Dict = 0 if " " in self.encoder: UpperCAmelCase_ : List[Any] = self.encoder[" "] del self.encoder[" "] if "\n" in self.encoder: UpperCAmelCase_ : Dict = self.encoder["\n"] del self.encoder["\n"] UpperCAmelCase_ : str = collections.OrderedDict(sorted(self.encoder.items() , key=lambda lowerCAmelCase_ : x[1] ) ) with open(lowerCAmelCase_ , "w" , encoding="utf-8" ) as writer: for token, token_index in self.encoder.items(): if index != token_index: logger.warning( f"""Saving vocabulary to {vocab_file}: vocabulary indices are not consecutive.""" " Please check that the vocabulary is not corrupted!" ) UpperCAmelCase_ : Optional[int] = token_index writer.write(token + "\n" ) index += 1 return (vocab_file,) def _SCREAMING_SNAKE_CASE ( self : int , lowerCAmelCase_ : Any , lowerCAmelCase_ : Optional[int] = None ) -> List[int]: if token_ids_a is None: return [self.bos_token_id] + token_ids_a return [self.bos_token_id] + token_ids_a + [self.bos_token_id] + token_ids_a def _SCREAMING_SNAKE_CASE ( self : Any , lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : str = None , lowerCAmelCase_ : List[str] = False ) -> List[int]: if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=lowerCAmelCase_ , token_ids_a=lowerCAmelCase_ , already_has_special_tokens=lowerCAmelCase_ ) if token_ids_a is not None: return [1] + ([0] * len(lowerCAmelCase_ )) + [1] + ([0] * len(lowerCAmelCase_ )) return [1] + ([0] * len(lowerCAmelCase_ ))
268
'''simple docstring''' import re def _lowercase ( __A ): '''simple docstring''' return [char.split() for char in re.split(R"""[^ a-z A-Z 0-9 \s]""" ,str_ )] def _lowercase ( __A ): '''simple docstring''' __UpperCamelCase = split_input(str_ ) return "".join( ["""""".join([char.capitalize() for char in sub_str] ) for sub_str in string_split] ) def _lowercase ( __A ,__A ,__A ): '''simple docstring''' try: __UpperCamelCase = split_input(__A ) if upper: __UpperCamelCase = """""".join( [ separator.join([char.upper() for char in sub_str] ) for sub_str in string_split ] ) else: __UpperCamelCase = """""".join( [ separator.join([char.lower() for char in sub_str] ) for sub_str in string_split ] ) return res_str except IndexError: return "not valid string" def _lowercase ( __A ): '''simple docstring''' return to_simple_case(__A ) def _lowercase ( __A ): '''simple docstring''' try: __UpperCamelCase = to_simple_case(__A ) return res_str[0].lower() + res_str[1:] except IndexError: return "not valid string" def _lowercase ( __A ,__A ): '''simple docstring''' return to_complex_case(__A ,__A ,"""_""" ) def _lowercase ( __A ,__A ): '''simple docstring''' return to_complex_case(__A ,__A ,"""-""" ) if __name__ == "__main__": __import__('doctest').testmod()
349
0
from collections import defaultdict from typing import Optional from ..image_utils import load_image from ..utils import ( add_end_docstrings, is_torch_available, logging, requires_backends, ) from .base import PIPELINE_INIT_ARGS, ChunkPipeline if is_torch_available(): import torch from ..models.auto.modeling_auto import MODEL_FOR_MASK_GENERATION_MAPPING SCREAMING_SNAKE_CASE_:List[str] = logging.get_logger(__name__) @add_end_docstrings(UpperCAmelCase_ ) class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase_ ): '''simple docstring''' def __init__( self, **lowerCamelCase__ ): super().__init__(**lowerCamelCase__ ) requires_backends(self, """vision""" ) requires_backends(self, """torch""" ) if self.framework != "pt": raise ValueError(f'''The {self.__class__} is only available in PyTorch.''' ) self.check_model_type(lowerCamelCase__ ) def _lowerCAmelCase ( self, **lowerCamelCase__ ): A : int = {} A : Any = {} A : Optional[Any] = {} # preprocess args if "points_per_batch" in kwargs: A : Dict = kwargs["""points_per_batch"""] if "points_per_crop" in kwargs: A : Union[str, Any] = kwargs["""points_per_crop"""] if "crops_n_layers" in kwargs: A : Dict = kwargs["""crops_n_layers"""] if "crop_overlap_ratio" in kwargs: A : Optional[int] = kwargs["""crop_overlap_ratio"""] if "crop_n_points_downscale_factor" in kwargs: A : Dict = kwargs["""crop_n_points_downscale_factor"""] # postprocess args if "pred_iou_thresh" in kwargs: A : List[Any] = kwargs["""pred_iou_thresh"""] if "stability_score_offset" in kwargs: A : Optional[int] = kwargs["""stability_score_offset"""] if "mask_threshold" in kwargs: A : List[Any] = kwargs["""mask_threshold"""] if "stability_score_thresh" in kwargs: A : List[str] = kwargs["""stability_score_thresh"""] if "crops_nms_thresh" in kwargs: A : List[str] = kwargs["""crops_nms_thresh"""] if "output_rle_mask" in kwargs: A : Optional[Any] = kwargs["""output_rle_mask"""] if "output_bboxes_mask" in kwargs: A : Any = kwargs["""output_bboxes_mask"""] return preprocess_kwargs, forward_params, postprocess_kwargs def __call__( self, lowerCamelCase__, *lowerCamelCase__, lowerCamelCase__=None, lowerCamelCase__=None, **lowerCamelCase__ ): return super().__call__(lowerCamelCase__, *lowerCamelCase__, num_workers=lowerCamelCase__, batch_size=lowerCamelCase__, **lowerCamelCase__ ) def _lowerCAmelCase ( self, lowerCamelCase__, lowerCamelCase__=64, lowerCamelCase__ = 0, lowerCamelCase__ = 512 / 1500, lowerCamelCase__ = 32, lowerCamelCase__ = 1, ): A : Any = load_image(lowerCamelCase__ ) A : List[Any] = self.image_processor.size["""longest_edge"""] A , A , A , A : Dict = self.image_processor.generate_crop_boxes( lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__ ) A : Optional[Any] = self.image_processor(images=lowerCamelCase__, return_tensors="""pt""" ) with self.device_placement(): if self.framework == "pt": A : Optional[Any] = self.get_inference_context() with inference_context(): A : List[str] = self._ensure_tensor_on_device(lowerCamelCase__, device=self.device ) A : List[Any] = self.model.get_image_embeddings(model_inputs.pop("""pixel_values""" ) ) A : Dict = image_embeddings A : Any = grid_points.shape[1] A : int = points_per_batch if points_per_batch is not None else n_points if points_per_batch <= 0: raise ValueError( """Cannot have points_per_batch<=0. Must be >=1 to returned batched outputs. """ """To return all points at once, set points_per_batch to None""" ) for i in range(0, lowerCamelCase__, lowerCamelCase__ ): A : Optional[int] = grid_points[:, i : i + points_per_batch, :, :] A : Optional[Any] = input_labels[:, i : i + points_per_batch] A : Optional[int] = i == n_points - points_per_batch yield { "input_points": batched_points, "input_labels": labels, "input_boxes": crop_boxes, "is_last": is_last, **model_inputs, } def _lowerCAmelCase ( self, lowerCamelCase__, lowerCamelCase__=0.88, lowerCamelCase__=0.95, lowerCamelCase__=0, lowerCamelCase__=1, ): A : Union[str, Any] = model_inputs.pop("""input_boxes""" ) A : int = model_inputs.pop("""is_last""" ) A : List[str] = model_inputs.pop("""original_sizes""" ).tolist() A : List[str] = model_inputs.pop("""reshaped_input_sizes""" ).tolist() A : List[str] = self.model(**lowerCamelCase__ ) # post processing happens here in order to avoid CPU GPU copies of ALL the masks A : Tuple = model_outputs["""pred_masks"""] A : Optional[Any] = self.image_processor.post_process_masks( lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, binarize=lowerCamelCase__ ) A : List[Any] = model_outputs["""iou_scores"""] A , A , A : Optional[Any] = self.image_processor.filter_masks( masks[0], iou_scores[0], original_sizes[0], input_boxes[0], lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, ) return { "masks": masks, "is_last": is_last, "boxes": boxes, "iou_scores": iou_scores, } def _lowerCAmelCase ( self, lowerCamelCase__, lowerCamelCase__=False, lowerCamelCase__=False, lowerCamelCase__=0.7, ): A : Dict = [] A : Dict = [] A : str = [] for model_output in model_outputs: all_scores.append(model_output.pop("""iou_scores""" ) ) all_masks.extend(model_output.pop("""masks""" ) ) all_boxes.append(model_output.pop("""boxes""" ) ) A : int = torch.cat(lowerCamelCase__ ) A : Dict = torch.cat(lowerCamelCase__ ) A , A , A , A : Tuple = self.image_processor.post_process_for_mask_generation( lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__ ) A : str = defaultdict(lowerCamelCase__ ) for output in model_outputs: for k, v in output.items(): extra[k].append(lowerCamelCase__ ) A : str = {} if output_rle_mask: A : Tuple = rle_mask if output_bboxes_mask: A : Optional[int] = bounding_boxes return {"masks": output_masks, "scores": iou_scores, **optional, **extra}
116
'''simple docstring''' import gc import random import tempfile import unittest import numpy as np import torch from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer from diffusers import AutoencoderKL, DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler, UNetaDConditionModel from diffusers.pipelines.stable_diffusion_safe import StableDiffusionPipelineSafe as StableDiffusionPipeline from diffusers.utils import floats_tensor, nightly, torch_device from diffusers.utils.testing_utils import require_torch_gpu class UpperCAmelCase__ ( unittest.TestCase): def __lowerCamelCase ( self ) -> List[str]: # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() @property def __lowerCamelCase ( self ) -> int: __UpperCamelCase = 1 __UpperCamelCase = 3 __UpperCamelCase = (3_2, 3_2) __UpperCamelCase = floats_tensor((batch_size, num_channels) + sizes , rng=random.Random(0 ) ).to(lowercase ) return image @property def __lowerCamelCase ( self ) -> Dict: torch.manual_seed(0 ) __UpperCamelCase = UNetaDConditionModel( block_out_channels=(3_2, 6_4) , layers_per_block=2 , sample_size=3_2 , in_channels=4 , out_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D""") , cross_attention_dim=3_2 , ) return model @property def __lowerCamelCase ( self ) -> List[str]: torch.manual_seed(0 ) __UpperCamelCase = AutoencoderKL( block_out_channels=[3_2, 6_4] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , ) return model @property def __lowerCamelCase ( self ) -> Optional[int]: torch.manual_seed(0 ) __UpperCamelCase = CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=3_2 , intermediate_size=3_7 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_0_0_0 , ) return CLIPTextModel(lowercase ) @property def __lowerCamelCase ( self ) -> Tuple: def extract(*lowercase , **lowercase ): class UpperCAmelCase__ : def __init__( self ) -> Tuple: __UpperCamelCase = torch.ones([0] ) def __lowerCamelCase ( self , lowercase ) -> List[str]: self.pixel_values.to(lowercase ) return self return Out() return extract def __lowerCamelCase ( self ) -> Any: __UpperCamelCase = """cpu""" # ensure determinism for the device-dependent torch.Generator __UpperCamelCase = self.dummy_cond_unet __UpperCamelCase = DDIMScheduler( beta_start=0.00_085 , beta_end=0.012 , beta_schedule="""scaled_linear""" , clip_sample=lowercase , set_alpha_to_one=lowercase , ) __UpperCamelCase = self.dummy_vae __UpperCamelCase = self.dummy_text_encoder __UpperCamelCase = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" ) # make sure here that pndm scheduler skips prk __UpperCamelCase = StableDiffusionPipeline( unet=lowercase , scheduler=lowercase , vae=lowercase , text_encoder=lowercase , tokenizer=lowercase , safety_checker=lowercase , feature_extractor=self.dummy_extractor , ) __UpperCamelCase = sd_pipe.to(lowercase ) sd_pipe.set_progress_bar_config(disable=lowercase ) __UpperCamelCase = """A painting of a squirrel eating a burger""" __UpperCamelCase = torch.Generator(device=lowercase ).manual_seed(0 ) __UpperCamelCase = sd_pipe([prompt] , generator=lowercase , guidance_scale=6.0 , num_inference_steps=2 , output_type="""np""" ) __UpperCamelCase = output.images __UpperCamelCase = torch.Generator(device=lowercase ).manual_seed(0 ) __UpperCamelCase = sd_pipe( [prompt] , generator=lowercase , guidance_scale=6.0 , num_inference_steps=2 , output_type="""np""" , return_dict=lowercase , )[0] __UpperCamelCase = image[0, -3:, -3:, -1] __UpperCamelCase = image_from_tuple[0, -3:, -3:, -1] assert image.shape == (1, 6_4, 6_4, 3) __UpperCamelCase = np.array([0.5_756, 0.6_118, 0.5_005, 0.5_041, 0.5_471, 0.4_726, 0.4_976, 0.4_865, 0.4_864] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2 def __lowerCamelCase ( self ) -> Tuple: __UpperCamelCase = """cpu""" # ensure determinism for the device-dependent torch.Generator __UpperCamelCase = self.dummy_cond_unet __UpperCamelCase = PNDMScheduler(skip_prk_steps=lowercase ) __UpperCamelCase = self.dummy_vae __UpperCamelCase = self.dummy_text_encoder __UpperCamelCase = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" ) # make sure here that pndm scheduler skips prk __UpperCamelCase = StableDiffusionPipeline( unet=lowercase , scheduler=lowercase , vae=lowercase , text_encoder=lowercase , tokenizer=lowercase , safety_checker=lowercase , feature_extractor=self.dummy_extractor , ) __UpperCamelCase = sd_pipe.to(lowercase ) sd_pipe.set_progress_bar_config(disable=lowercase ) __UpperCamelCase = """A painting of a squirrel eating a burger""" __UpperCamelCase = torch.Generator(device=lowercase ).manual_seed(0 ) __UpperCamelCase = sd_pipe([prompt] , generator=lowercase , guidance_scale=6.0 , num_inference_steps=2 , output_type="""np""" ) __UpperCamelCase = output.images __UpperCamelCase = torch.Generator(device=lowercase ).manual_seed(0 ) __UpperCamelCase = sd_pipe( [prompt] , generator=lowercase , guidance_scale=6.0 , num_inference_steps=2 , output_type="""np""" , return_dict=lowercase , )[0] __UpperCamelCase = image[0, -3:, -3:, -1] __UpperCamelCase = image_from_tuple[0, -3:, -3:, -1] assert image.shape == (1, 6_4, 6_4, 3) __UpperCamelCase = np.array([0.5_125, 0.5_716, 0.4_828, 0.5_060, 0.5_650, 0.4_768, 0.5_185, 0.4_895, 0.4_993] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2 def __lowerCamelCase ( self ) -> Union[str, Any]: __UpperCamelCase = StableDiffusionPipeline.from_pretrained( """hf-internal-testing/tiny-stable-diffusion-lms-pipe""" , safety_checker=lowercase ) assert isinstance(lowercase , lowercase ) assert isinstance(pipe.scheduler , lowercase ) assert pipe.safety_checker is None __UpperCamelCase = pipe("""example prompt""" , num_inference_steps=2 ).images[0] assert image is not None # check that there's no error when saving a pipeline with one of the models being None with tempfile.TemporaryDirectory() as tmpdirname: pipe.save_pretrained(lowercase ) __UpperCamelCase = StableDiffusionPipeline.from_pretrained(lowercase ) # sanity check that the pipeline still works assert pipe.safety_checker is None __UpperCamelCase = pipe("""example prompt""" , num_inference_steps=2 ).images[0] assert image is not None @unittest.skipIf(torch_device != """cuda""" , """This test requires a GPU""" ) def __lowerCamelCase ( self ) -> Optional[int]: __UpperCamelCase = self.dummy_cond_unet __UpperCamelCase = PNDMScheduler(skip_prk_steps=lowercase ) __UpperCamelCase = self.dummy_vae __UpperCamelCase = self.dummy_text_encoder __UpperCamelCase = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" ) # put models in fp16 __UpperCamelCase = unet.half() __UpperCamelCase = vae.half() __UpperCamelCase = bert.half() # make sure here that pndm scheduler skips prk __UpperCamelCase = StableDiffusionPipeline( unet=lowercase , scheduler=lowercase , vae=lowercase , text_encoder=lowercase , tokenizer=lowercase , safety_checker=lowercase , feature_extractor=self.dummy_extractor , ) __UpperCamelCase = sd_pipe.to(lowercase ) sd_pipe.set_progress_bar_config(disable=lowercase ) __UpperCamelCase = """A painting of a squirrel eating a burger""" __UpperCamelCase = sd_pipe([prompt] , num_inference_steps=2 , output_type="""np""" ).images assert image.shape == (1, 6_4, 6_4, 3) @nightly @require_torch_gpu class UpperCAmelCase__ ( unittest.TestCase): def __lowerCamelCase ( self ) -> Tuple: # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() def __lowerCamelCase ( self ) -> Dict: __UpperCamelCase = StableDiffusionPipeline.from_pretrained("""runwayml/stable-diffusion-v1-5""" , safety_checker=lowercase ) __UpperCamelCase = LMSDiscreteScheduler.from_config(sd_pipe.scheduler.config ) __UpperCamelCase = sd_pipe.to(lowercase ) sd_pipe.set_progress_bar_config(disable=lowercase ) __UpperCamelCase = ( """portrait of girl with smokey eyes makeup in abandoned hotel, grange clothes, redshift, wide high angle""" """ coloured polaroid photograph with flash, kodak film, hyper real, stunning moody cinematography, with""" """ anamorphic lenses, by maripol, fallen angels by wong kar - wai, style of suspiria and neon demon and""" """ children from bahnhof zoo, detailed """ ) __UpperCamelCase = 4_0_0_3_6_6_0_3_4_6 __UpperCamelCase = 7 # without safety guidance (sld_guidance_scale = 0) __UpperCamelCase = torch.manual_seed(lowercase ) __UpperCamelCase = sd_pipe( [prompt] , generator=lowercase , guidance_scale=lowercase , num_inference_steps=5_0 , output_type="""np""" , width=5_1_2 , height=5_1_2 , sld_guidance_scale=0 , ) __UpperCamelCase = output.images __UpperCamelCase = image[0, -3:, -3:, -1] __UpperCamelCase = [0.2_278, 0.2_231, 0.2_249, 0.2_333, 0.2_303, 0.1_885, 0.2_273, 0.2_144, 0.2_176] assert image.shape == (1, 5_1_2, 5_1_2, 3) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 # without safety guidance (strong configuration) __UpperCamelCase = torch.manual_seed(lowercase ) __UpperCamelCase = sd_pipe( [prompt] , generator=lowercase , guidance_scale=lowercase , num_inference_steps=5_0 , output_type="""np""" , width=5_1_2 , height=5_1_2 , sld_guidance_scale=2_0_0_0 , sld_warmup_steps=7 , sld_threshold=0.025 , sld_momentum_scale=0.5 , sld_mom_beta=0.7 , ) __UpperCamelCase = output.images __UpperCamelCase = image[0, -3:, -3:, -1] __UpperCamelCase = [0.2_383, 0.2_276, 0.236, 0.2_192, 0.2_186, 0.2_053, 0.1_971, 0.1_901, 0.1_719] assert image.shape == (1, 5_1_2, 5_1_2, 3) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 def __lowerCamelCase ( self ) -> Optional[Any]: __UpperCamelCase = StableDiffusionPipeline.from_pretrained("""runwayml/stable-diffusion-v1-5""" , safety_checker=lowercase ) __UpperCamelCase = LMSDiscreteScheduler.from_config(sd_pipe.scheduler.config ) __UpperCamelCase = sd_pipe.to(lowercase ) sd_pipe.set_progress_bar_config(disable=lowercase ) __UpperCamelCase = """padme amidala taking a bath artwork, safe for work, no nudity""" __UpperCamelCase = 2_7_3_4_9_7_1_7_5_5 __UpperCamelCase = 7 __UpperCamelCase = torch.manual_seed(lowercase ) __UpperCamelCase = sd_pipe( [prompt] , generator=lowercase , guidance_scale=lowercase , num_inference_steps=5_0 , output_type="""np""" , width=5_1_2 , height=5_1_2 , sld_guidance_scale=0 , ) __UpperCamelCase = output.images __UpperCamelCase = image[0, -3:, -3:, -1] __UpperCamelCase = [0.3_502, 0.3_622, 0.3_396, 0.3_642, 0.3_478, 0.3_318, 0.35, 0.3_348, 0.3_297] assert image.shape == (1, 5_1_2, 5_1_2, 3) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 __UpperCamelCase = torch.manual_seed(lowercase ) __UpperCamelCase = sd_pipe( [prompt] , generator=lowercase , guidance_scale=lowercase , num_inference_steps=5_0 , output_type="""np""" , width=5_1_2 , height=5_1_2 , sld_guidance_scale=2_0_0_0 , sld_warmup_steps=7 , sld_threshold=0.025 , sld_momentum_scale=0.5 , sld_mom_beta=0.7 , ) __UpperCamelCase = output.images __UpperCamelCase = image[0, -3:, -3:, -1] __UpperCamelCase = [0.5_531, 0.5_206, 0.4_895, 0.5_156, 0.5_182, 0.4_751, 0.4_802, 0.4_803, 0.4_443] assert image.shape == (1, 5_1_2, 5_1_2, 3) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 def __lowerCamelCase ( self ) -> Optional[Any]: __UpperCamelCase = StableDiffusionPipeline.from_pretrained("""runwayml/stable-diffusion-v1-5""" ) __UpperCamelCase = sd_pipe.to(lowercase ) sd_pipe.set_progress_bar_config(disable=lowercase ) __UpperCamelCase = ( """the four horsewomen of the apocalypse, painting by tom of finland, gaston bussiere, craig mullins, j. c.""" """ leyendecker""" ) __UpperCamelCase = 1_0_4_4_3_5_5_2_3_4 __UpperCamelCase = 1_2 __UpperCamelCase = torch.manual_seed(lowercase ) __UpperCamelCase = sd_pipe( [prompt] , generator=lowercase , guidance_scale=lowercase , num_inference_steps=5_0 , output_type="""np""" , width=5_1_2 , height=5_1_2 , sld_guidance_scale=0 , ) __UpperCamelCase = output.images __UpperCamelCase = image[0, -3:, -3:, -1] __UpperCamelCase = np.array([0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0] ) assert image.shape == (1, 5_1_2, 5_1_2, 3) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-7 __UpperCamelCase = torch.manual_seed(lowercase ) __UpperCamelCase = sd_pipe( [prompt] , generator=lowercase , guidance_scale=lowercase , num_inference_steps=5_0 , output_type="""np""" , width=5_1_2 , height=5_1_2 , sld_guidance_scale=2_0_0_0 , sld_warmup_steps=7 , sld_threshold=0.025 , sld_momentum_scale=0.5 , sld_mom_beta=0.7 , ) __UpperCamelCase = output.images __UpperCamelCase = image[0, -3:, -3:, -1] __UpperCamelCase = np.array([0.5_818, 0.6_285, 0.6_835, 0.6_019, 0.625, 0.6_754, 0.6_096, 0.6_334, 0.6_561] ) assert image.shape == (1, 5_1_2, 5_1_2, 3) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
349
0
from __future__ import annotations class __snake_case : def __init__( self , __UpperCamelCase , __UpperCamelCase ) -> Optional[int]: '''simple docstring''' snake_case__ , snake_case__ : Optional[Any] = text, pattern snake_case__ , snake_case__ : Any = len(__UpperCamelCase ), len(__UpperCamelCase ) def __a ( self , __UpperCamelCase ) -> int: '''simple docstring''' for i in range(self.patLen - 1 , -1 , -1 ): if char == self.pattern[i]: return i return -1 def __a ( self , __UpperCamelCase ) -> int: '''simple docstring''' for i in range(self.patLen - 1 , -1 , -1 ): if self.pattern[i] != self.text[current_pos + i]: return current_pos + i return -1 def __a ( self ) -> list[int]: '''simple docstring''' snake_case__ : int = [] for i in range(self.textLen - self.patLen + 1 ): snake_case__ : Dict = self.mismatch_in_text(__UpperCamelCase ) if mismatch_index == -1: positions.append(__UpperCamelCase ) else: snake_case__ : Optional[int] = self.match_in_pattern(self.text[mismatch_index] ) snake_case__ : Tuple = ( mismatch_index - match_index ) # shifting index lgtm [py/multiple-definition] return positions lowerCAmelCase__ : Tuple = 'ABAABA' lowerCAmelCase__ : Any = 'AB' lowerCAmelCase__ : Tuple = BoyerMooreSearch(text, pattern) lowerCAmelCase__ : Any = bms.bad_character_heuristic() if len(positions) == 0: print('''No match found''') else: print('''Pattern found in following positions: ''') print(positions)
143
'''simple docstring''' from ..utils import DummyObject, requires_backends class UpperCAmelCase__ ( metaclass=UpperCAmelCase_): __SCREAMING_SNAKE_CASE = ['''flax'''] def __init__( self , *lowercase , **lowercase ) -> List[Any]: requires_backends(self , ["""flax"""] ) @classmethod def __lowerCamelCase ( cls , *lowercase , **lowercase ) -> Union[str, Any]: requires_backends(cls , ["""flax"""] ) @classmethod def __lowerCamelCase ( cls , *lowercase , **lowercase ) -> Tuple: requires_backends(cls , ["""flax"""] ) class UpperCAmelCase__ ( metaclass=UpperCAmelCase_): __SCREAMING_SNAKE_CASE = ['''flax'''] def __init__( self , *lowercase , **lowercase ) -> str: requires_backends(self , ["""flax"""] ) @classmethod def __lowerCamelCase ( cls , *lowercase , **lowercase ) -> Any: requires_backends(cls , ["""flax"""] ) @classmethod def __lowerCamelCase ( cls , *lowercase , **lowercase ) -> int: requires_backends(cls , ["""flax"""] ) class UpperCAmelCase__ ( metaclass=UpperCAmelCase_): __SCREAMING_SNAKE_CASE = ['''flax'''] def __init__( self , *lowercase , **lowercase ) -> Dict: requires_backends(self , ["""flax"""] ) @classmethod def __lowerCamelCase ( cls , *lowercase , **lowercase ) -> int: requires_backends(cls , ["""flax"""] ) @classmethod def __lowerCamelCase ( cls , *lowercase , **lowercase ) -> List[Any]: requires_backends(cls , ["""flax"""] ) class UpperCAmelCase__ ( metaclass=UpperCAmelCase_): __SCREAMING_SNAKE_CASE = ['''flax'''] def __init__( self , *lowercase , **lowercase ) -> Optional[Any]: requires_backends(self , ["""flax"""] ) @classmethod def __lowerCamelCase ( cls , *lowercase , **lowercase ) -> Optional[int]: requires_backends(cls , ["""flax"""] ) @classmethod def __lowerCamelCase ( cls , *lowercase , **lowercase ) -> Optional[int]: requires_backends(cls , ["""flax"""] ) class UpperCAmelCase__ ( metaclass=UpperCAmelCase_): __SCREAMING_SNAKE_CASE = ['''flax'''] def __init__( self , *lowercase , **lowercase ) -> Optional[Any]: requires_backends(self , ["""flax"""] ) @classmethod def __lowerCamelCase ( cls , *lowercase , **lowercase ) -> str: requires_backends(cls , ["""flax"""] ) @classmethod def __lowerCamelCase ( cls , *lowercase , **lowercase ) -> List[str]: requires_backends(cls , ["""flax"""] ) class UpperCAmelCase__ ( metaclass=UpperCAmelCase_): __SCREAMING_SNAKE_CASE = ['''flax'''] def __init__( self , *lowercase , **lowercase ) -> Optional[int]: requires_backends(self , ["""flax"""] ) @classmethod def __lowerCamelCase ( cls , *lowercase , **lowercase ) -> str: requires_backends(cls , ["""flax"""] ) @classmethod def __lowerCamelCase ( cls , *lowercase , **lowercase ) -> int: requires_backends(cls , ["""flax"""] ) class UpperCAmelCase__ ( metaclass=UpperCAmelCase_): __SCREAMING_SNAKE_CASE = ['''flax'''] def __init__( self , *lowercase , **lowercase ) -> List[Any]: requires_backends(self , ["""flax"""] ) @classmethod def __lowerCamelCase ( cls , *lowercase , **lowercase ) -> Any: requires_backends(cls , ["""flax"""] ) @classmethod def __lowerCamelCase ( cls , *lowercase , **lowercase ) -> Dict: requires_backends(cls , ["""flax"""] ) class UpperCAmelCase__ ( metaclass=UpperCAmelCase_): __SCREAMING_SNAKE_CASE = ['''flax'''] def __init__( self , *lowercase , **lowercase ) -> Union[str, Any]: requires_backends(self , ["""flax"""] ) @classmethod def __lowerCamelCase ( cls , *lowercase , **lowercase ) -> Dict: requires_backends(cls , ["""flax"""] ) @classmethod def __lowerCamelCase ( cls , *lowercase , **lowercase ) -> Tuple: requires_backends(cls , ["""flax"""] ) class UpperCAmelCase__ ( metaclass=UpperCAmelCase_): __SCREAMING_SNAKE_CASE = ['''flax'''] def __init__( self , *lowercase , **lowercase ) -> int: requires_backends(self , ["""flax"""] ) @classmethod def __lowerCamelCase ( cls , *lowercase , **lowercase ) -> Optional[int]: requires_backends(cls , ["""flax"""] ) @classmethod def __lowerCamelCase ( cls , *lowercase , **lowercase ) -> List[str]: requires_backends(cls , ["""flax"""] ) class UpperCAmelCase__ ( metaclass=UpperCAmelCase_): __SCREAMING_SNAKE_CASE = ['''flax'''] def __init__( self , *lowercase , **lowercase ) -> int: requires_backends(self , ["""flax"""] ) @classmethod def __lowerCamelCase ( cls , *lowercase , **lowercase ) -> str: requires_backends(cls , ["""flax"""] ) @classmethod def __lowerCamelCase ( cls , *lowercase , **lowercase ) -> str: requires_backends(cls , ["""flax"""] ) class UpperCAmelCase__ ( metaclass=UpperCAmelCase_): __SCREAMING_SNAKE_CASE = ['''flax'''] def __init__( self , *lowercase , **lowercase ) -> int: requires_backends(self , ["""flax"""] ) @classmethod def __lowerCamelCase ( cls , *lowercase , **lowercase ) -> Tuple: requires_backends(cls , ["""flax"""] ) @classmethod def __lowerCamelCase ( cls , *lowercase , **lowercase ) -> Tuple: requires_backends(cls , ["""flax"""] ) class UpperCAmelCase__ ( metaclass=UpperCAmelCase_): __SCREAMING_SNAKE_CASE = ['''flax'''] def __init__( self , *lowercase , **lowercase ) -> Optional[int]: requires_backends(self , ["""flax"""] ) @classmethod def __lowerCamelCase ( cls , *lowercase , **lowercase ) -> Tuple: requires_backends(cls , ["""flax"""] ) @classmethod def __lowerCamelCase ( cls , *lowercase , **lowercase ) -> Union[str, Any]: requires_backends(cls , ["""flax"""] ) class UpperCAmelCase__ ( metaclass=UpperCAmelCase_): __SCREAMING_SNAKE_CASE = ['''flax'''] def __init__( self , *lowercase , **lowercase ) -> Any: requires_backends(self , ["""flax"""] ) @classmethod def __lowerCamelCase ( cls , *lowercase , **lowercase ) -> Tuple: requires_backends(cls , ["""flax"""] ) @classmethod def __lowerCamelCase ( cls , *lowercase , **lowercase ) -> List[str]: requires_backends(cls , ["""flax"""] )
349
0
import inspect import tempfile from collections import OrderedDict, UserDict from collections.abc import MutableMapping from contextlib import ExitStack, contextmanager from dataclasses import fields from enum import Enum from typing import Any, ContextManager, List, Tuple import numpy as np from .import_utils import is_flax_available, is_tf_available, is_torch_available, is_torch_fx_proxy if is_flax_available(): import jax.numpy as jnp class _snake_case ( UpperCAmelCase_ ): def __get__( self , _lowerCamelCase , _lowerCamelCase=None ): # See docs.python.org/3/howto/descriptor.html#properties if obj is None: return self if self.fget is None: raise AttributeError('''unreadable attribute''' ) a :Union[str, Any] = '''__cached_''' + self.fget.__name__ a :int = getattr(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) if cached is None: a :Union[str, Any] = self.fget(_lowerCamelCase ) setattr(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) return cached def __lowerCamelCase ( UpperCAmelCase_ : Optional[Any] ): """simple docstring""" a :List[Any] = val.lower() if val in {"y", "yes", "t", "true", "on", "1"}: return 1 if val in {"n", "no", "f", "false", "off", "0"}: return 0 raise ValueError(F'''invalid truth value {val!r}''' ) def __lowerCamelCase ( UpperCAmelCase_ : str ): """simple docstring""" if is_torch_fx_proxy(__A ): return True if is_torch_available(): import torch if isinstance(__A , torch.Tensor ): return True if is_tf_available(): import tensorflow as tf if isinstance(__A , tf.Tensor ): return True if is_flax_available(): import jax.numpy as jnp from jax.core import Tracer if isinstance(__A , (jnp.ndarray, Tracer) ): return True return isinstance(__A , np.ndarray ) def __lowerCamelCase ( UpperCAmelCase_ : Tuple ): """simple docstring""" return isinstance(__A , np.ndarray ) def __lowerCamelCase ( UpperCAmelCase_ : Union[str, Any] ): """simple docstring""" return _is_numpy(__A ) def __lowerCamelCase ( UpperCAmelCase_ : Dict ): """simple docstring""" import torch return isinstance(__A , torch.Tensor ) def __lowerCamelCase ( UpperCAmelCase_ : List[str] ): """simple docstring""" return False if not is_torch_available() else _is_torch(__A ) def __lowerCamelCase ( UpperCAmelCase_ : str ): """simple docstring""" import torch return isinstance(__A , torch.device ) def __lowerCamelCase ( UpperCAmelCase_ : Tuple ): """simple docstring""" return False if not is_torch_available() else _is_torch_device(__A ) def __lowerCamelCase ( UpperCAmelCase_ : Any ): """simple docstring""" import torch if isinstance(__A , __A ): if hasattr(__A , __A ): a :str = getattr(__A , __A ) else: return False return isinstance(__A , torch.dtype ) def __lowerCamelCase ( UpperCAmelCase_ : Union[str, Any] ): """simple docstring""" return False if not is_torch_available() else _is_torch_dtype(__A ) def __lowerCamelCase ( UpperCAmelCase_ : int ): """simple docstring""" import tensorflow as tf return isinstance(__A , tf.Tensor ) def __lowerCamelCase ( UpperCAmelCase_ : List[Any] ): """simple docstring""" return False if not is_tf_available() else _is_tensorflow(__A ) def __lowerCamelCase ( UpperCAmelCase_ : Any ): """simple docstring""" import tensorflow as tf # the `is_symbolic_tensor` predicate is only available starting with TF 2.14 if hasattr(__A , '''is_symbolic_tensor''' ): return tf.is_symbolic_tensor(__A ) return type(__A ) == tf.Tensor def __lowerCamelCase ( UpperCAmelCase_ : str ): """simple docstring""" return False if not is_tf_available() else _is_tf_symbolic_tensor(__A ) def __lowerCamelCase ( UpperCAmelCase_ : Union[str, Any] ): """simple docstring""" import jax.numpy as jnp # noqa: F811 return isinstance(__A , jnp.ndarray ) def __lowerCamelCase ( UpperCAmelCase_ : Optional[int] ): """simple docstring""" return False if not is_flax_available() else _is_jax(__A ) def __lowerCamelCase ( UpperCAmelCase_ : Union[str, Any] ): """simple docstring""" if isinstance(__A , (dict, UserDict) ): return {k: to_py_obj(__A ) for k, v in obj.items()} elif isinstance(__A , (list, tuple) ): return [to_py_obj(__A ) for o in obj] elif is_tf_tensor(__A ): return obj.numpy().tolist() elif is_torch_tensor(__A ): return obj.detach().cpu().tolist() elif is_jax_tensor(__A ): return np.asarray(__A ).tolist() elif isinstance(__A , (np.ndarray, np.number) ): # tolist also works on 0d np arrays return obj.tolist() else: return obj def __lowerCamelCase ( UpperCAmelCase_ : Optional[int] ): """simple docstring""" if isinstance(__A , (dict, UserDict) ): return {k: to_numpy(__A ) for k, v in obj.items()} elif isinstance(__A , (list, tuple) ): return np.array(__A ) elif is_tf_tensor(__A ): return obj.numpy() elif is_torch_tensor(__A ): return obj.detach().cpu().numpy() elif is_jax_tensor(__A ): return np.asarray(__A ) else: return obj class _snake_case ( UpperCAmelCase_ ): def SCREAMING_SNAKE_CASE__ ( self ): a :Any = fields(self ) # Safety and consistency checks if not len(_lowerCamelCase ): raise ValueError(F'''{self.__class__.__name__} has no fields.''' ) if not all(field.default is None for field in class_fields[1:] ): raise ValueError(F'''{self.__class__.__name__} should not have more than one required field.''' ) a :Tuple = getattr(self , class_fields[0].name ) a :Union[str, Any] = all(getattr(self , field.name ) is None for field in class_fields[1:] ) if other_fields_are_none and not is_tensor(_lowerCamelCase ): if isinstance(_lowerCamelCase , _lowerCamelCase ): a :Union[str, Any] = first_field.items() a :Optional[int] = True else: try: a :str = iter(_lowerCamelCase ) a :Any = True except TypeError: a :str = False # if we provided an iterator as first field and the iterator is a (key, value) iterator # set the associated fields if first_field_iterator: for idx, element in enumerate(_lowerCamelCase ): if ( not isinstance(_lowerCamelCase , (list, tuple) ) or not len(_lowerCamelCase ) == 2 or not isinstance(element[0] , _lowerCamelCase ) ): if idx == 0: # If we do not have an iterator of key/values, set it as attribute a :List[Any] = first_field else: # If we have a mixed iterator, raise an error raise ValueError( F'''Cannot set key/value for {element}. It needs to be a tuple (key, value).''' ) break setattr(self , element[0] , element[1] ) if element[1] is not None: a :str = element[1] elif first_field is not None: a :Optional[Any] = first_field else: for field in class_fields: a :str = getattr(self , field.name ) if v is not None: a :Any = v def __delitem__( self , *_lowerCamelCase , **_lowerCamelCase ): raise Exception(F'''You cannot use ``__delitem__`` on a {self.__class__.__name__} instance.''' ) def SCREAMING_SNAKE_CASE__ ( self , *_lowerCamelCase , **_lowerCamelCase ): raise Exception(F'''You cannot use ``setdefault`` on a {self.__class__.__name__} instance.''' ) def SCREAMING_SNAKE_CASE__ ( self , *_lowerCamelCase , **_lowerCamelCase ): raise Exception(F'''You cannot use ``pop`` on a {self.__class__.__name__} instance.''' ) def SCREAMING_SNAKE_CASE__ ( self , *_lowerCamelCase , **_lowerCamelCase ): raise Exception(F'''You cannot use ``update`` on a {self.__class__.__name__} instance.''' ) def __getitem__( self , _lowerCamelCase ): if isinstance(_lowerCamelCase , _lowerCamelCase ): a :Tuple = dict(self.items() ) return inner_dict[k] else: return self.to_tuple()[k] def __setattr__( self , _lowerCamelCase , _lowerCamelCase ): if name in self.keys() and value is not None: # Don't call self.__setitem__ to avoid recursion errors super().__setitem__(_lowerCamelCase , _lowerCamelCase ) super().__setattr__(_lowerCamelCase , _lowerCamelCase ) def __setitem__( self , _lowerCamelCase , _lowerCamelCase ): # Will raise a KeyException if needed super().__setitem__(_lowerCamelCase , _lowerCamelCase ) # Don't call self.__setattr__ to avoid recursion errors super().__setattr__(_lowerCamelCase , _lowerCamelCase ) def SCREAMING_SNAKE_CASE__ ( self ): return tuple(self[k] for k in self.keys() ) class _snake_case ( UpperCAmelCase_ , UpperCAmelCase_ ): @classmethod def SCREAMING_SNAKE_CASE__ ( cls , _lowerCamelCase ): raise ValueError( F'''{value} is not a valid {cls.__name__}, please select one of {list(cls._valueamember_map_.keys() )}''' ) class _snake_case ( UpperCAmelCase_ ): SCREAMING_SNAKE_CASE__ = 'longest' SCREAMING_SNAKE_CASE__ = 'max_length' SCREAMING_SNAKE_CASE__ = 'do_not_pad' class _snake_case ( UpperCAmelCase_ ): SCREAMING_SNAKE_CASE__ = 'pt' SCREAMING_SNAKE_CASE__ = 'tf' SCREAMING_SNAKE_CASE__ = 'np' SCREAMING_SNAKE_CASE__ = 'jax' class _snake_case : def __init__( self , _lowerCamelCase ): a :Optional[Any] = context_managers a :str = ExitStack() def __enter__( self ): for context_manager in self.context_managers: self.stack.enter_context(_lowerCamelCase ) def __exit__( self , *_lowerCamelCase , **_lowerCamelCase ): self.stack.__exit__(*_lowerCamelCase , **_lowerCamelCase ) def __lowerCamelCase ( UpperCAmelCase_ : int ): """simple docstring""" a :Tuple = infer_framework(__A ) if framework == "tf": a :Optional[Any] = inspect.signature(model_class.call ) # TensorFlow models elif framework == "pt": a :Optional[int] = inspect.signature(model_class.forward ) # PyTorch models else: a :Tuple = inspect.signature(model_class.__call__ ) # Flax models for p in signature.parameters: if p == "return_loss" and signature.parameters[p].default is True: return True return False def __lowerCamelCase ( UpperCAmelCase_ : Optional[Any] ): """simple docstring""" a :int = model_class.__name__ a :Tuple = infer_framework(__A ) if framework == "tf": a :List[Any] = inspect.signature(model_class.call ) # TensorFlow models elif framework == "pt": a :Optional[int] = inspect.signature(model_class.forward ) # PyTorch models else: a :Tuple = inspect.signature(model_class.__call__ ) # Flax models if "QuestionAnswering" in model_name: return [p for p in signature.parameters if "label" in p or p in ("start_positions", "end_positions")] else: return [p for p in signature.parameters if "label" in p] def __lowerCamelCase ( UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : Optional[int] = "" , UpperCAmelCase_ : str = "." ): """simple docstring""" def _flatten_dict(UpperCAmelCase_ : str , UpperCAmelCase_ : Dict="" , UpperCAmelCase_ : Any="." ): for k, v in d.items(): a :Any = str(__A ) + delimiter + str(__A ) if parent_key else k if v and isinstance(__A , __A ): yield from flatten_dict(__A , __A , delimiter=__A ).items() else: yield key, v return dict(_flatten_dict(__A , __A , __A ) ) @contextmanager def __lowerCamelCase ( UpperCAmelCase_ : List[str] , UpperCAmelCase_ : Optional[Any] = False ): """simple docstring""" if use_temp_dir: with tempfile.TemporaryDirectory() as tmp_dir: yield tmp_dir else: yield working_dir def __lowerCamelCase ( UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : Dict=None ): """simple docstring""" if is_numpy_array(__A ): return np.transpose(__A , axes=__A ) elif is_torch_tensor(__A ): return array.T if axes is None else array.permute(*__A ) elif is_tf_tensor(__A ): import tensorflow as tf return tf.transpose(__A , perm=__A ) elif is_jax_tensor(__A ): return jnp.transpose(__A , axes=__A ) else: raise ValueError(F'''Type not supported for transpose: {type(__A )}.''' ) def __lowerCamelCase ( UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : Dict ): """simple docstring""" if is_numpy_array(__A ): return np.reshape(__A , __A ) elif is_torch_tensor(__A ): return array.reshape(*__A ) elif is_tf_tensor(__A ): import tensorflow as tf return tf.reshape(__A , __A ) elif is_jax_tensor(__A ): return jnp.reshape(__A , __A ) else: raise ValueError(F'''Type not supported for reshape: {type(__A )}.''' ) def __lowerCamelCase ( UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : Tuple=None ): """simple docstring""" if is_numpy_array(__A ): return np.squeeze(__A , axis=__A ) elif is_torch_tensor(__A ): return array.squeeze() if axis is None else array.squeeze(dim=__A ) elif is_tf_tensor(__A ): import tensorflow as tf return tf.squeeze(__A , axis=__A ) elif is_jax_tensor(__A ): return jnp.squeeze(__A , axis=__A ) else: raise ValueError(F'''Type not supported for squeeze: {type(__A )}.''' ) def __lowerCamelCase ( UpperCAmelCase_ : int , UpperCAmelCase_ : List[Any] ): """simple docstring""" if is_numpy_array(__A ): return np.expand_dims(__A , __A ) elif is_torch_tensor(__A ): return array.unsqueeze(dim=__A ) elif is_tf_tensor(__A ): import tensorflow as tf return tf.expand_dims(__A , axis=__A ) elif is_jax_tensor(__A ): return jnp.expand_dims(__A , axis=__A ) else: raise ValueError(F'''Type not supported for expand_dims: {type(__A )}.''' ) def __lowerCamelCase ( UpperCAmelCase_ : Any ): """simple docstring""" if is_numpy_array(__A ): return np.size(__A ) elif is_torch_tensor(__A ): return array.numel() elif is_tf_tensor(__A ): import tensorflow as tf return tf.size(__A ) elif is_jax_tensor(__A ): return array.size else: raise ValueError(F'''Type not supported for expand_dims: {type(__A )}.''' ) def __lowerCamelCase ( UpperCAmelCase_ : int , UpperCAmelCase_ : Tuple ): """simple docstring""" for key, value in auto_map.items(): if isinstance(__A , (tuple, list) ): a :Any = [F'''{repo_id}--{v}''' if (v is not None and '''--''' not in v) else v for v in value] elif value is not None and "--" not in value: a :Union[str, Any] = F'''{repo_id}--{value}''' return auto_map def __lowerCamelCase ( UpperCAmelCase_ : List[Any] ): """simple docstring""" for base_class in inspect.getmro(__A ): a :Optional[int] = base_class.__module__ a :str = base_class.__name__ if module.startswith('''tensorflow''' ) or module.startswith('''keras''' ) or name == "TFPreTrainedModel": return "tf" elif module.startswith('''torch''' ) or name == "PreTrainedModel": return "pt" elif module.startswith('''flax''' ) or module.startswith('''jax''' ) or name == "FlaxPreTrainedModel": return "flax" else: raise TypeError(F'''Could not infer framework from class {model_class}.''' )
94
'''simple docstring''' import logging import os from .state import PartialState class UpperCAmelCase__ ( logging.LoggerAdapter): @staticmethod def __lowerCamelCase ( lowercase ) -> Dict: __UpperCamelCase = PartialState() return not main_process_only or (main_process_only and state.is_main_process) def __lowerCamelCase ( self , lowercase , lowercase , *lowercase , **lowercase ) -> List[str]: if PartialState._shared_state == {}: raise RuntimeError( """You must initialize the accelerate state by calling either `PartialState()` or `Accelerator()` before using the logging utility.""" ) __UpperCamelCase = kwargs.pop("""main_process_only""" , lowercase ) __UpperCamelCase = kwargs.pop("""in_order""" , lowercase ) if self.isEnabledFor(lowercase ): if self._should_log(lowercase ): __UpperCamelCase , __UpperCamelCase = self.process(lowercase , lowercase ) self.logger.log(lowercase , lowercase , *lowercase , **lowercase ) elif in_order: __UpperCamelCase = PartialState() for i in range(state.num_processes ): if i == state.process_index: __UpperCamelCase , __UpperCamelCase = self.process(lowercase , lowercase ) self.logger.log(lowercase , lowercase , *lowercase , **lowercase ) state.wait_for_everyone() def _lowercase ( __A ,__A = None ): '''simple docstring''' if log_level is None: __UpperCamelCase = os.environ.get("""ACCELERATE_LOG_LEVEL""" ,__A ) __UpperCamelCase = logging.getLogger(__A ) if log_level is not None: logger.setLevel(log_level.upper() ) logger.root.setLevel(log_level.upper() ) return MultiProcessAdapter(__A ,{} )
349
0
'''simple docstring''' import tempfile import unittest import numpy as np import transformers from transformers import GPTaTokenizer, GPTJConfig, is_flax_available, is_torch_available from transformers.testing_utils import is_pt_flax_cross_test, require_flax, tooslow from ...generation.test_flax_utils import FlaxGenerationTesterMixin from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask if is_flax_available(): import jax import jax.numpy as jnp from transformers.modeling_flax_pytorch_utils import ( convert_pytorch_state_dict_to_flax, load_flax_weights_in_pytorch_model, ) from transformers.models.gptj.modeling_flax_gptj import FlaxGPTJForCausalLM, FlaxGPTJModel if is_torch_available(): import torch class A : def __init__( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=14 , SCREAMING_SNAKE_CASE=7 , SCREAMING_SNAKE_CASE=True , SCREAMING_SNAKE_CASE=True , SCREAMING_SNAKE_CASE=False , SCREAMING_SNAKE_CASE=True , SCREAMING_SNAKE_CASE=99 , SCREAMING_SNAKE_CASE=32 , SCREAMING_SNAKE_CASE=4 , SCREAMING_SNAKE_CASE=4 , SCREAMING_SNAKE_CASE=4 , SCREAMING_SNAKE_CASE=37 , SCREAMING_SNAKE_CASE="gelu" , SCREAMING_SNAKE_CASE=0.1 , SCREAMING_SNAKE_CASE=0.1 , SCREAMING_SNAKE_CASE=512 , SCREAMING_SNAKE_CASE=0.02 , ) -> Optional[int]: """simple docstring""" A : Dict = parent A : int = batch_size A : Dict = seq_length A : Optional[int] = is_training A : Tuple = use_input_mask A : Union[str, Any] = use_token_type_ids A : Optional[int] = use_labels A : Tuple = vocab_size A : Dict = hidden_size A : str = rotary_dim A : str = num_hidden_layers A : Any = num_attention_heads A : Union[str, Any] = intermediate_size A : Dict = hidden_act A : str = hidden_dropout_prob A : List[str] = attention_probs_dropout_prob A : List[str] = max_position_embeddings A : str = initializer_range A : Optional[Any] = None A : List[Any] = vocab_size - 1 A : Tuple = vocab_size - 1 A : Optional[Any] = vocab_size - 1 def __lowerCAmelCase ( self ) -> Optional[Any]: """simple docstring""" A : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) A : Dict = None if self.use_input_mask: A : List[str] = random_attention_mask([self.batch_size, self.seq_length] ) A : Optional[Any] = GPTJConfig( vocab_size=self.vocab_size , n_embd=self.hidden_size , n_layer=self.num_hidden_layers , n_head=self.num_attention_heads , n_positions=self.max_position_embeddings , use_cache=SCREAMING_SNAKE_CASE , bos_token_id=self.bos_token_id , eos_token_id=self.eos_token_id , pad_token_id=self.pad_token_id , rotary_dim=self.rotary_dim , ) return (config, input_ids, input_mask) def __lowerCAmelCase ( self ) -> Dict: """simple docstring""" A : List[Any] = self.prepare_config_and_inputs() A, A, A : Optional[int] = config_and_inputs A : int = {'''input_ids''': input_ids, '''attention_mask''': attention_mask} return config, inputs_dict def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> str: """simple docstring""" A : Tuple = 20 A : Optional[Any] = model_class_name(SCREAMING_SNAKE_CASE ) A : Optional[Any] = model.init_cache(input_ids.shape[0] , SCREAMING_SNAKE_CASE ) A : Optional[int] = jnp.ones((input_ids.shape[0], max_decoder_length) , dtype='''i4''' ) A : Tuple = jnp.broadcast_to( jnp.arange(input_ids.shape[-1] - 1 )[None, :] , (input_ids.shape[0], input_ids.shape[-1] - 1) ) A : Optional[int] = model( input_ids[:, :-1] , attention_mask=SCREAMING_SNAKE_CASE , past_key_values=SCREAMING_SNAKE_CASE , position_ids=SCREAMING_SNAKE_CASE , ) A : List[str] = jnp.array(input_ids.shape[0] * [[input_ids.shape[-1] - 1]] , dtype='''i4''' ) A : List[str] = model( input_ids[:, -1:] , attention_mask=SCREAMING_SNAKE_CASE , past_key_values=outputs_cache.past_key_values , position_ids=SCREAMING_SNAKE_CASE , ) A : int = model(SCREAMING_SNAKE_CASE ) A : Tuple = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) ) self.parent.assertTrue(diff < 1e-3 , msg=F'Max diff is {diff}' ) def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> List[Any]: """simple docstring""" A : Dict = 20 A : int = model_class_name(SCREAMING_SNAKE_CASE ) A : Dict = jnp.concatenate( [attention_mask, jnp.zeros((attention_mask.shape[0], max_decoder_length - attention_mask.shape[1]) )] , axis=-1 , ) A : Optional[Any] = model.init_cache(input_ids.shape[0] , SCREAMING_SNAKE_CASE ) A : List[str] = jnp.broadcast_to( jnp.arange(input_ids.shape[-1] - 1 )[None, :] , (input_ids.shape[0], input_ids.shape[-1] - 1) ) A : Dict = model( input_ids[:, :-1] , attention_mask=SCREAMING_SNAKE_CASE , past_key_values=SCREAMING_SNAKE_CASE , position_ids=SCREAMING_SNAKE_CASE , ) A : Optional[Any] = jnp.array(input_ids.shape[0] * [[input_ids.shape[-1] - 1]] , dtype='''i4''' ) A : Dict = model( input_ids[:, -1:] , past_key_values=outputs_cache.past_key_values , attention_mask=SCREAMING_SNAKE_CASE , position_ids=SCREAMING_SNAKE_CASE , ) A : int = model(SCREAMING_SNAKE_CASE , attention_mask=SCREAMING_SNAKE_CASE ) A : List[Any] = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) ) self.parent.assertTrue(diff < 1e-3 , msg=F'Max diff is {diff}' ) @require_flax class A ( UpperCAmelCase_ , UpperCAmelCase_ , unittest.TestCase ): __magic_name__ = (FlaxGPTJModel, FlaxGPTJForCausalLM) if is_flax_available() else () __magic_name__ = (FlaxGPTJForCausalLM,) if is_flax_available() else () def __lowerCAmelCase ( self ) -> int: """simple docstring""" A : Any = FlaxGPTJModelTester(self ) def __lowerCAmelCase ( self ) -> Union[str, Any]: """simple docstring""" for model_class_name in self.all_model_classes: A, A, A : List[str] = self.model_tester.prepare_config_and_inputs() self.model_tester.check_use_cache_forward(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) def __lowerCAmelCase ( self ) -> List[str]: """simple docstring""" for model_class_name in self.all_model_classes: A, A, A : List[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.check_use_cache_forward_with_attn_mask( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) @tooslow def __lowerCAmelCase ( self ) -> Tuple: """simple docstring""" A : str = GPTaTokenizer.from_pretrained('''gpt2''' , pad_token='''<|endoftext|>''' , padding_side='''left''' ) A : Optional[int] = tokenizer(['''Hello this is a long string''', '''Hey'''] , return_tensors='''np''' , padding=SCREAMING_SNAKE_CASE , truncation=SCREAMING_SNAKE_CASE ) A : int = FlaxGPTJForCausalLM.from_pretrained('''EleutherAI/gpt-j-6B''' ) A : str = False A : Tuple = model.config.eos_token_id A : str = jax.jit(model.generate ) A : Union[str, Any] = jit_generate( inputs['''input_ids'''] , attention_mask=inputs['''attention_mask'''] , pad_token_id=tokenizer.pad_token_id ).sequences A : Union[str, Any] = tokenizer.batch_decode(SCREAMING_SNAKE_CASE , skip_special_tokens=SCREAMING_SNAKE_CASE ) A : List[Any] = [ '''Hello this is a long string of text.\n\nI\'m trying to get the text of the''', '''Hey, I\'m a little late to the party. I\'m going to''', ] self.assertListEqual(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) @is_pt_flax_cross_test def __lowerCAmelCase ( self ) -> Dict: """simple docstring""" A, A : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: with self.subTest(model_class.__name__ ): # prepare inputs A : int = self._prepare_for_class(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) A : List[str] = {k: torch.tensor(v.tolist() ) for k, v in prepared_inputs_dict.items()} # load corresponding PyTorch class A : Tuple = model_class.__name__[4:] # Skip the "Flax" at the beginning A : Any = getattr(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) A, A : Union[str, Any] = pt_inputs['''input_ids'''].shape A : Any = np.random.randint(0 , seq_length - 1 , size=(batch_size,) ) for batch_idx, start_index in enumerate(SCREAMING_SNAKE_CASE ): A : int = 0 A : Any = 1 A : Optional[int] = 0 A : Any = 1 A : Dict = pt_model_class(SCREAMING_SNAKE_CASE ).eval() A : Any = model_class(SCREAMING_SNAKE_CASE , dtype=jnp.floataa ) A : int = convert_pytorch_state_dict_to_flax(pt_model.state_dict() , SCREAMING_SNAKE_CASE ) A : Dict = fx_state with torch.no_grad(): A : Dict = pt_model(**SCREAMING_SNAKE_CASE ).to_tuple() A : Union[str, Any] = fx_model(**SCREAMING_SNAKE_CASE ).to_tuple() self.assertEqual(len(SCREAMING_SNAKE_CASE ) , len(SCREAMING_SNAKE_CASE ) , '''Output lengths differ between Flax and PyTorch''' ) for fx_output, pt_output in zip(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ): self.assert_almost_equals(fx_output[:, -1] , pt_output[:, -1].numpy() , 4e-2 ) with tempfile.TemporaryDirectory() as tmpdirname: pt_model.save_pretrained(SCREAMING_SNAKE_CASE ) A : List[Any] = model_class.from_pretrained(SCREAMING_SNAKE_CASE , from_pt=SCREAMING_SNAKE_CASE ) A : str = fx_model_loaded(**SCREAMING_SNAKE_CASE ).to_tuple() self.assertEqual( len(SCREAMING_SNAKE_CASE ) , len(SCREAMING_SNAKE_CASE ) , '''Output lengths differ between Flax and PyTorch''' ) for fx_output_loaded, pt_output in zip(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ): self.assert_almost_equals(fx_output_loaded[:, -1] , pt_output[:, -1].numpy() , 4e-2 ) @is_pt_flax_cross_test def __lowerCAmelCase ( self ) -> Optional[int]: """simple docstring""" A, A : List[Any] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: with self.subTest(model_class.__name__ ): # prepare inputs A : Dict = self._prepare_for_class(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) A : str = {k: torch.tensor(v.tolist() ) for k, v in prepared_inputs_dict.items()} # load corresponding PyTorch class A : Optional[Any] = model_class.__name__[4:] # Skip the "Flax" at the beginning A : List[Any] = getattr(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) A : Optional[int] = pt_model_class(SCREAMING_SNAKE_CASE ).eval() A : Optional[int] = model_class(SCREAMING_SNAKE_CASE , dtype=jnp.floataa ) A : Optional[int] = load_flax_weights_in_pytorch_model(SCREAMING_SNAKE_CASE , fx_model.params ) A, A : List[Any] = pt_inputs['''input_ids'''].shape A : List[str] = np.random.randint(0 , seq_length - 1 , size=(batch_size,) ) for batch_idx, start_index in enumerate(SCREAMING_SNAKE_CASE ): A : Any = 0 A : int = 1 A : Optional[int] = 0 A : str = 1 # make sure weights are tied in PyTorch pt_model.tie_weights() with torch.no_grad(): A : Tuple = pt_model(**SCREAMING_SNAKE_CASE ).to_tuple() A : Optional[int] = fx_model(**SCREAMING_SNAKE_CASE ).to_tuple() self.assertEqual(len(SCREAMING_SNAKE_CASE ) , len(SCREAMING_SNAKE_CASE ) , '''Output lengths differ between Flax and PyTorch''' ) for fx_output, pt_output in zip(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ): self.assert_almost_equals(fx_output[:, -1] , pt_output[:, -1].numpy() , 4e-2 ) with tempfile.TemporaryDirectory() as tmpdirname: fx_model.save_pretrained(SCREAMING_SNAKE_CASE ) A : Optional[int] = pt_model_class.from_pretrained(SCREAMING_SNAKE_CASE , from_flax=SCREAMING_SNAKE_CASE ) with torch.no_grad(): A : str = pt_model_loaded(**SCREAMING_SNAKE_CASE ).to_tuple() self.assertEqual( len(SCREAMING_SNAKE_CASE ) , len(SCREAMING_SNAKE_CASE ) , '''Output lengths differ between Flax and PyTorch''' ) for fx_output, pt_output in zip(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ): self.assert_almost_equals(fx_output[:, -1] , pt_output[:, -1].numpy() , 4e-2 ) @tooslow def __lowerCAmelCase ( self ) -> List[str]: """simple docstring""" for model_class_name in self.all_model_classes: A : Optional[Any] = model_class_name.from_pretrained('''EleutherAI/gpt-j-6B''' ) A : Optional[Any] = model(np.ones((1, 1) ) ) self.assertIsNotNone(SCREAMING_SNAKE_CASE )
3
'''simple docstring''' import logging import random import ray from transformers import RagConfig, RagRetriever, RagTokenizer from transformers.models.rag.retrieval_rag import CustomHFIndex a__ : Optional[Any] = logging.getLogger(__name__) class UpperCAmelCase__ : def __init__( self ) -> Any: __UpperCamelCase = False def __lowerCamelCase ( self , lowercase , lowercase , lowercase , lowercase ) -> str: if not self.initialized: __UpperCamelCase = RagRetriever( lowercase , question_encoder_tokenizer=lowercase , generator_tokenizer=lowercase , index=lowercase , init_retrieval=lowercase , ) __UpperCamelCase = True def __lowerCamelCase ( self ) -> Optional[Any]: self.retriever.index.init_index() def __lowerCamelCase ( self , lowercase , lowercase ) -> Dict: __UpperCamelCase , __UpperCamelCase = self.retriever._main_retrieve(lowercase , lowercase ) return doc_ids, retrieved_doc_embeds class UpperCAmelCase__ ( UpperCAmelCase_): def __init__( self , lowercase , lowercase , lowercase , lowercase , lowercase=None ) -> List[Any]: if index is not None and index.is_initialized() and len(lowercase ) > 0: raise ValueError( """When using Ray for distributed fine-tuning, """ """you'll need to provide the paths instead, """ """as the dataset and the index are loaded """ """separately. More info in examples/rag/use_own_knowledge_dataset.py """ ) super().__init__( lowercase , question_encoder_tokenizer=lowercase , generator_tokenizer=lowercase , index=lowercase , init_retrieval=lowercase , ) __UpperCamelCase = retrieval_workers if len(self.retrieval_workers ) > 0: ray.get( [ worker.create_rag_retriever.remote(lowercase , lowercase , lowercase , lowercase ) for worker in self.retrieval_workers ] ) def __lowerCamelCase ( self ) -> Dict: logger.info("""initializing retrieval""" ) if len(self.retrieval_workers ) > 0: ray.get([worker.init_retrieval.remote() for worker in self.retrieval_workers] ) else: # Non-distributed training. Load index into this same process. self.index.init_index() def __lowerCamelCase ( self , lowercase , lowercase ) -> List[str]: if len(self.retrieval_workers ) > 0: # Select a random retrieval actor. __UpperCamelCase = self.retrieval_workers[random.randint(0 , len(self.retrieval_workers ) - 1 )] __UpperCamelCase , __UpperCamelCase = ray.get(random_worker.retrieve.remote(lowercase , lowercase ) ) else: __UpperCamelCase , __UpperCamelCase = self._main_retrieve(lowercase , lowercase ) return retrieved_doc_embeds, doc_ids, self.index.get_doc_dicts(lowercase ) @classmethod def __lowerCamelCase ( cls , lowercase , lowercase=None , **lowercase ) -> Any: return super(lowercase , cls ).get_tokenizers(lowercase , lowercase , **lowercase ) @classmethod def __lowerCamelCase ( cls , lowercase , lowercase , lowercase=None , **lowercase ) -> int: __UpperCamelCase = kwargs.pop("""config""" , lowercase ) or RagConfig.from_pretrained(lowercase , **lowercase ) __UpperCamelCase = RagTokenizer.from_pretrained(lowercase , config=lowercase ) __UpperCamelCase = rag_tokenizer.question_encoder __UpperCamelCase = rag_tokenizer.generator if indexed_dataset is not None: __UpperCamelCase = """custom""" __UpperCamelCase = CustomHFIndex(config.retrieval_vector_size , lowercase ) else: __UpperCamelCase = cls._build_index(lowercase ) return cls( lowercase , question_encoder_tokenizer=lowercase , generator_tokenizer=lowercase , retrieval_workers=lowercase , index=lowercase , )
349
0
def snake_case__ ( lowerCAmelCase_, lowerCAmelCase_ ): """simple docstring""" return int((input_a, input_a).count(0 ) != 0 ) def snake_case__ ( ): """simple docstring""" assert nand_gate(0, 0 ) == 1 assert nand_gate(0, 1 ) == 1 assert nand_gate(1, 0 ) == 1 assert nand_gate(1, 1 ) == 0 if __name__ == "__main__": print(nand_gate(0, 0)) print(nand_gate(0, 1)) print(nand_gate(1, 0)) print(nand_gate(1, 1))
334
'''simple docstring''' import json from typing import List, Optional, Tuple from tokenizers import normalizers from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import logging from .tokenization_squeezebert import SqueezeBertTokenizer a__ : Optional[Any] = logging.get_logger(__name__) a__ : Union[str, Any] = {'vocab_file': 'vocab.txt', 'tokenizer_file': 'tokenizer.json'} a__ : Any = { 'vocab_file': { 'squeezebert/squeezebert-uncased': ( 'https://huggingface.co/squeezebert/squeezebert-uncased/resolve/main/vocab.txt' ), 'squeezebert/squeezebert-mnli': 'https://huggingface.co/squeezebert/squeezebert-mnli/resolve/main/vocab.txt', 'squeezebert/squeezebert-mnli-headless': ( 'https://huggingface.co/squeezebert/squeezebert-mnli-headless/resolve/main/vocab.txt' ), }, 'tokenizer_file': { 'squeezebert/squeezebert-uncased': ( 'https://huggingface.co/squeezebert/squeezebert-uncased/resolve/main/tokenizer.json' ), 'squeezebert/squeezebert-mnli': ( 'https://huggingface.co/squeezebert/squeezebert-mnli/resolve/main/tokenizer.json' ), 'squeezebert/squeezebert-mnli-headless': ( 'https://huggingface.co/squeezebert/squeezebert-mnli-headless/resolve/main/tokenizer.json' ), }, } a__ : Optional[Any] = { 'squeezebert/squeezebert-uncased': 5_1_2, 'squeezebert/squeezebert-mnli': 5_1_2, 'squeezebert/squeezebert-mnli-headless': 5_1_2, } a__ : Optional[Any] = { 'squeezebert/squeezebert-uncased': {'do_lower_case': True}, 'squeezebert/squeezebert-mnli': {'do_lower_case': True}, 'squeezebert/squeezebert-mnli-headless': {'do_lower_case': True}, } class UpperCAmelCase__ ( UpperCAmelCase_): __SCREAMING_SNAKE_CASE = VOCAB_FILES_NAMES __SCREAMING_SNAKE_CASE = PRETRAINED_VOCAB_FILES_MAP __SCREAMING_SNAKE_CASE = PRETRAINED_INIT_CONFIGURATION __SCREAMING_SNAKE_CASE = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES __SCREAMING_SNAKE_CASE = SqueezeBertTokenizer def __init__( self , lowercase=None , lowercase=None , lowercase=True , lowercase="[UNK]" , lowercase="[SEP]" , lowercase="[PAD]" , lowercase="[CLS]" , lowercase="[MASK]" , lowercase=True , lowercase=None , **lowercase , ) -> Tuple: super().__init__( lowercase , tokenizer_file=lowercase , do_lower_case=lowercase , unk_token=lowercase , sep_token=lowercase , pad_token=lowercase , cls_token=lowercase , mask_token=lowercase , tokenize_chinese_chars=lowercase , strip_accents=lowercase , **lowercase , ) __UpperCamelCase = json.loads(self.backend_tokenizer.normalizer.__getstate__() ) if ( normalizer_state.get("""lowercase""" , lowercase ) != do_lower_case or normalizer_state.get("""strip_accents""" , lowercase ) != strip_accents or normalizer_state.get("""handle_chinese_chars""" , lowercase ) != tokenize_chinese_chars ): __UpperCamelCase = getattr(lowercase , normalizer_state.pop("""type""" ) ) __UpperCamelCase = do_lower_case __UpperCamelCase = strip_accents __UpperCamelCase = tokenize_chinese_chars __UpperCamelCase = normalizer_class(**lowercase ) __UpperCamelCase = do_lower_case def __lowerCamelCase ( self , lowercase , lowercase=None ) -> Tuple: __UpperCamelCase = [self.cls_token_id] + token_ids_a + [self.sep_token_id] if token_ids_a: output += token_ids_a + [self.sep_token_id] return output def __lowerCamelCase ( self , lowercase , lowercase = None ) -> List[int]: __UpperCamelCase = [self.sep_token_id] __UpperCamelCase = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] def __lowerCamelCase ( self , lowercase , lowercase = None ) -> Tuple[str]: __UpperCamelCase = self._tokenizer.model.save(lowercase , name=lowercase ) return tuple(lowercase )
349
0
import logging import os from typing import List, TextIO, Union from conllu import parse_incr from utils_ner import InputExample, Split, TokenClassificationTask __A : List[str] = logging.getLogger(__name__) class A_ (UpperCAmelCase_ ): def __init__( self , _A=-1 ): '''simple docstring''' UpperCAmelCase = label_idx def _lowercase ( self , _A , _A ): '''simple docstring''' if isinstance(_A , _A ): UpperCAmelCase = mode.value UpperCAmelCase = os.path.join(_A , F"""{mode}.txt""" ) UpperCAmelCase = 1 UpperCAmelCase = [] with open(_A , encoding='''utf-8''' ) as f: UpperCAmelCase = [] UpperCAmelCase = [] for line in f: if line.startswith('''-DOCSTART-''' ) or line == "" or line == "\n": if words: examples.append(InputExample(guid=F"""{mode}-{guid_index}""" , words=_A , labels=_A ) ) guid_index += 1 UpperCAmelCase = [] UpperCAmelCase = [] else: UpperCAmelCase = line.split(''' ''' ) words.append(splits[0] ) if len(_A ) > 1: labels.append(splits[self.label_idx].replace('''\n''' , '''''' ) ) else: # Examples could have no label for mode = "test" labels.append('''O''' ) if words: examples.append(InputExample(guid=F"""{mode}-{guid_index}""" , words=_A , labels=_A ) ) return examples def _lowercase ( self , _A , _A , _A ): '''simple docstring''' UpperCAmelCase = 0 for line in test_input_reader: if line.startswith('''-DOCSTART-''' ) or line == "" or line == "\n": writer.write(_A ) if not preds_list[example_id]: example_id += 1 elif preds_list[example_id]: UpperCAmelCase = line.split()[0] + ''' ''' + preds_list[example_id].pop(0 ) + '''\n''' writer.write(_A ) else: logger.warning('''Maximum sequence length exceeded: No prediction for \'%s\'.''' , line.split()[0] ) def _lowercase ( self , _A ): '''simple docstring''' if path: with open(_A , '''r''' ) as f: UpperCAmelCase = f.read().splitlines() if "O" not in labels: UpperCAmelCase = ['''O'''] + labels return labels else: return ["O", "B-MISC", "I-MISC", "B-PER", "I-PER", "B-ORG", "I-ORG", "B-LOC", "I-LOC"] class A_ (UpperCAmelCase_ ): def __init__( self ): '''simple docstring''' super().__init__(label_idx=-2 ) def _lowercase ( self , _A ): '''simple docstring''' if path: with open(_A , '''r''' ) as f: UpperCAmelCase = f.read().splitlines() if "O" not in labels: UpperCAmelCase = ['''O'''] + labels return labels else: return [ "O", "B-ADVP", "B-INTJ", "B-LST", "B-PRT", "B-NP", "B-SBAR", "B-VP", "B-ADJP", "B-CONJP", "B-PP", "I-ADVP", "I-INTJ", "I-LST", "I-PRT", "I-NP", "I-SBAR", "I-VP", "I-ADJP", "I-CONJP", "I-PP", ] class A_ (UpperCAmelCase_ ): def _lowercase ( self , _A , _A ): '''simple docstring''' if isinstance(_A , _A ): UpperCAmelCase = mode.value UpperCAmelCase = os.path.join(_A , F"""{mode}.txt""" ) UpperCAmelCase = 1 UpperCAmelCase = [] with open(_A , encoding='''utf-8''' ) as f: for sentence in parse_incr(_A ): UpperCAmelCase = [] UpperCAmelCase = [] for token in sentence: words.append(token['''form'''] ) labels.append(token['''upos'''] ) assert len(_A ) == len(_A ) if words: examples.append(InputExample(guid=F"""{mode}-{guid_index}""" , words=_A , labels=_A ) ) guid_index += 1 return examples def _lowercase ( self , _A , _A , _A ): '''simple docstring''' UpperCAmelCase = 0 for sentence in parse_incr(_A ): UpperCAmelCase = preds_list[example_id] UpperCAmelCase = '''''' for token in sentence: out += F"""{token["form"]} ({token["upos"]}|{s_p.pop(0 )}) """ out += "\n" writer.write(_A ) example_id += 1 def _lowercase ( self , _A ): '''simple docstring''' if path: with open(_A , '''r''' ) as f: return f.read().splitlines() else: return [ "ADJ", "ADP", "ADV", "AUX", "CCONJ", "DET", "INTJ", "NOUN", "NUM", "PART", "PRON", "PROPN", "PUNCT", "SCONJ", "SYM", "VERB", "X", ]
273
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available, is_speech_available, is_torch_available, ) a__ : str = { 'configuration_trocr': ['TROCR_PRETRAINED_CONFIG_ARCHIVE_MAP', 'TrOCRConfig'], 'processing_trocr': ['TrOCRProcessor'], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a__ : int = [ 'TROCR_PRETRAINED_MODEL_ARCHIVE_LIST', 'TrOCRForCausalLM', 'TrOCRPreTrainedModel', ] if TYPE_CHECKING: from .configuration_trocr import TROCR_PRETRAINED_CONFIG_ARCHIVE_MAP, TrOCRConfig from .processing_trocr import TrOCRProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_trocr import TROCR_PRETRAINED_MODEL_ARCHIVE_LIST, TrOCRForCausalLM, TrOCRPreTrainedModel else: import sys a__ : List[str] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
349
0
import logging import os from dataclasses import dataclass from enum import Enum from typing import List, Optional, Union from filelock import FileLock from transformers import PreTrainedTokenizer, is_tf_available, is_torch_available lowerCamelCase__ = logging.getLogger(__name__) @dataclass class SCREAMING_SNAKE_CASE : __lowerCamelCase : Optional[int] =42 __lowerCamelCase : Union[str, Any] =42 __lowerCamelCase : Optional[Any] =42 @dataclass class SCREAMING_SNAKE_CASE : __lowerCamelCase : str =42 __lowerCamelCase : Optional[int] =42 __lowerCamelCase : Union[str, Any] =None __lowerCamelCase : str =None class SCREAMING_SNAKE_CASE ( UpperCAmelCase_ ): __lowerCamelCase : Union[str, Any] ='train' __lowerCamelCase : str ='dev' __lowerCamelCase : List[str] ='test' class SCREAMING_SNAKE_CASE : @staticmethod def UpperCamelCase_ ( __lowercase : Tuple , __lowercase : str ): '''simple docstring''' raise NotImplementedError @staticmethod def UpperCamelCase_ ( __lowercase : int ): '''simple docstring''' raise NotImplementedError @staticmethod def UpperCamelCase_ ( __lowercase : str , __lowercase : Union[str, Any] , __lowercase : List[str] , __lowercase : Optional[Any] , __lowercase : List[Any]=False , __lowercase : Optional[Any]="[CLS]" , __lowercase : List[Any]=1 , __lowercase : List[str]="[SEP]" , __lowercase : Optional[Any]=False , __lowercase : Any=False , __lowercase : Optional[Any]=0 , __lowercase : Dict=0 , __lowercase : str=-100 , __lowercase : Dict=0 , __lowercase : List[str]=True , ): '''simple docstring''' __a = {label: i for i, label in enumerate(__lowercase )} __a = [] for ex_index, example in enumerate(__lowercase ): if ex_index % 10000 == 0: logger.info("""Writing example %d of %d""" , __lowercase , len(__lowercase ) ) __a = [] __a = [] for word, label in zip(example.words , example.labels ): __a = tokenizer.tokenize(__lowercase ) # bert-base-multilingual-cased sometimes output "nothing ([]) when calling tokenize with just a space. if len(__lowercase ) > 0: tokens.extend(__lowercase ) # Use the real label id for the first token of the word, and padding ids for the remaining tokens label_ids.extend([label_map[label]] + [pad_token_label_id] * (len(__lowercase ) - 1) ) # Account for [CLS] and [SEP] with "- 2" and with "- 3" for RoBERTa. __a = tokenizer.num_special_tokens_to_add() if len(__lowercase ) > max_seq_length - special_tokens_count: __a = tokens[: (max_seq_length - special_tokens_count)] __a = label_ids[: (max_seq_length - special_tokens_count)] # The convention in BERT is: # (a) For sequence pairs: # tokens: [CLS] is this jack ##son ##ville ? [SEP] no it is not . [SEP] # type_ids: 0 0 0 0 0 0 0 0 1 1 1 1 1 1 # (b) For single sequences: # tokens: [CLS] the dog is hairy . [SEP] # type_ids: 0 0 0 0 0 0 0 # # Where "type_ids" are used to indicate whether this is the first # sequence or the second sequence. The embedding vectors for `type=0` and # `type=1` were learned during pre-training and are added to the wordpiece # embedding vector (and position vector). This is not *strictly* necessary # since the [SEP] token unambiguously separates the sequences, but it makes # it easier for the model to learn the concept of sequences. # # For classification tasks, the first vector (corresponding to [CLS]) is # used as the "sentence vector". Note that this only makes sense because # the entire model is fine-tuned. tokens += [sep_token] label_ids += [pad_token_label_id] if sep_token_extra: # roberta uses an extra separator b/w pairs of sentences tokens += [sep_token] label_ids += [pad_token_label_id] __a = [sequence_a_segment_id] * len(__lowercase ) if cls_token_at_end: tokens += [cls_token] label_ids += [pad_token_label_id] segment_ids += [cls_token_segment_id] else: __a = [cls_token] + tokens __a = [pad_token_label_id] + label_ids __a = [cls_token_segment_id] + segment_ids __a = tokenizer.convert_tokens_to_ids(__lowercase ) # The mask has 1 for real tokens and 0 for padding tokens. Only real # tokens are attended to. __a = [1 if mask_padding_with_zero else 0] * len(__lowercase ) # Zero-pad up to the sequence length. __a = max_seq_length - len(__lowercase ) if pad_on_left: __a = ([pad_token] * padding_length) + input_ids __a = ([0 if mask_padding_with_zero else 1] * padding_length) + input_mask __a = ([pad_token_segment_id] * padding_length) + segment_ids __a = ([pad_token_label_id] * padding_length) + label_ids else: input_ids += [pad_token] * padding_length input_mask += [0 if mask_padding_with_zero else 1] * padding_length segment_ids += [pad_token_segment_id] * padding_length label_ids += [pad_token_label_id] * padding_length assert len(__lowercase ) == max_seq_length assert len(__lowercase ) == max_seq_length assert len(__lowercase ) == max_seq_length assert len(__lowercase ) == max_seq_length if ex_index < 5: logger.info("""*** Example ***""" ) logger.info("""guid: %s""" , example.guid ) logger.info("""tokens: %s""" , """ """.join([str(__lowercase ) for x in tokens] ) ) logger.info("""input_ids: %s""" , """ """.join([str(__lowercase ) for x in input_ids] ) ) logger.info("""input_mask: %s""" , """ """.join([str(__lowercase ) for x in input_mask] ) ) logger.info("""segment_ids: %s""" , """ """.join([str(__lowercase ) for x in segment_ids] ) ) logger.info("""label_ids: %s""" , """ """.join([str(__lowercase ) for x in label_ids] ) ) if "token_type_ids" not in tokenizer.model_input_names: __a = None features.append( InputFeatures( input_ids=__lowercase , attention_mask=__lowercase , token_type_ids=__lowercase , label_ids=__lowercase ) ) return features if is_torch_available(): import torch from torch import nn from torch.utils.data import Dataset class SCREAMING_SNAKE_CASE ( UpperCAmelCase_ ): __lowerCamelCase : Union[str, Any] =42 __lowerCamelCase : str =nn.CrossEntropyLoss().ignore_index def __init__( self : List[Any] , __lowercase : List[Any] , __lowercase : Optional[Any] , __lowercase : str , __lowercase : str , __lowercase : Optional[int] , __lowercase : Optional[Any] = None , __lowercase : Optional[Any]=False , __lowercase : List[Any] = Split.train , ): '''simple docstring''' # Load data features from cache or dataset file __a = os.path.join( __lowercase , """cached_{}_{}_{}""".format(mode.value , tokenizer.__class__.__name__ , str(__lowercase ) ) , ) # Make sure only the first process in distributed training processes the dataset, # and the others will use the cache. __a = cached_features_file + """.lock""" with FileLock(__lowercase ): if os.path.exists(__lowercase ) and not overwrite_cache: logger.info(F"Loading features from cached file {cached_features_file}" ) __a = torch.load(__lowercase ) else: logger.info(F"Creating features from dataset file at {data_dir}" ) __a = token_classification_task.read_examples_from_file(__lowercase , __lowercase ) # TODO clean up all this to leverage built-in features of tokenizers __a = token_classification_task.convert_examples_to_features( __lowercase , __lowercase , __lowercase , __lowercase , cls_token_at_end=bool(model_type in ["""xlnet"""] ) , cls_token=tokenizer.cls_token , cls_token_segment_id=2 if model_type in ["""xlnet"""] else 0 , sep_token=tokenizer.sep_token , sep_token_extra=__lowercase , pad_on_left=bool(tokenizer.padding_side == """left""" ) , pad_token=tokenizer.pad_token_id , pad_token_segment_id=tokenizer.pad_token_type_id , pad_token_label_id=self.pad_token_label_id , ) logger.info(F"Saving features into cached file {cached_features_file}" ) torch.save(self.features , __lowercase ) def __len__( self : Dict ): '''simple docstring''' return len(self.features ) def __getitem__( self : Tuple , __lowercase : int ): '''simple docstring''' return self.features[i] if is_tf_available(): import tensorflow as tf class SCREAMING_SNAKE_CASE : __lowerCamelCase : Dict =42 __lowerCamelCase : int =-100 def __init__( self : Optional[Any] , __lowercase : Any , __lowercase : str , __lowercase : Tuple , __lowercase : Any , __lowercase : str , __lowercase : Dict = None , __lowercase : List[Any]=False , __lowercase : Any = Split.train , ): '''simple docstring''' __a = token_classification_task.read_examples_from_file(__lowercase , __lowercase ) # TODO clean up all this to leverage built-in features of tokenizers __a = token_classification_task.convert_examples_to_features( __lowercase , __lowercase , __lowercase , __lowercase , cls_token_at_end=bool(model_type in ["""xlnet"""] ) , cls_token=tokenizer.cls_token , cls_token_segment_id=2 if model_type in ["""xlnet"""] else 0 , sep_token=tokenizer.sep_token , sep_token_extra=__lowercase , pad_on_left=bool(tokenizer.padding_side == """left""" ) , pad_token=tokenizer.pad_token_id , pad_token_segment_id=tokenizer.pad_token_type_id , pad_token_label_id=self.pad_token_label_id , ) def gen(): for ex in self.features: if ex.token_type_ids is None: yield ( {"input_ids": ex.input_ids, "attention_mask": ex.attention_mask}, ex.label_ids, ) else: yield ( { "input_ids": ex.input_ids, "attention_mask": ex.attention_mask, "token_type_ids": ex.token_type_ids, }, ex.label_ids, ) if "token_type_ids" not in tokenizer.model_input_names: __a = tf.data.Dataset.from_generator( __lowercase , ({"""input_ids""": tf.intaa, """attention_mask""": tf.intaa}, tf.intaa) , ( {"""input_ids""": tf.TensorShape([None] ), """attention_mask""": tf.TensorShape([None] )}, tf.TensorShape([None] ), ) , ) else: __a = tf.data.Dataset.from_generator( __lowercase , ({"""input_ids""": tf.intaa, """attention_mask""": tf.intaa, """token_type_ids""": tf.intaa}, tf.intaa) , ( { """input_ids""": tf.TensorShape([None] ), """attention_mask""": tf.TensorShape([None] ), """token_type_ids""": tf.TensorShape([None] ), }, tf.TensorShape([None] ), ) , ) def UpperCamelCase_ ( self : List[str] ): '''simple docstring''' __a = self.dataset.apply(tf.data.experimental.assert_cardinality(len(self.features ) ) ) return self.dataset def __len__( self : Optional[int] ): '''simple docstring''' return len(self.features ) def __getitem__( self : Optional[int] , __lowercase : Any ): '''simple docstring''' return self.features[i]
302
'''simple docstring''' from collections import OrderedDict from typing import Mapping from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig a__ : Union[str, Any] = { 'albert-base-v1': 'https://huggingface.co/albert-base-v1/resolve/main/config.json', 'albert-large-v1': 'https://huggingface.co/albert-large-v1/resolve/main/config.json', 'albert-xlarge-v1': 'https://huggingface.co/albert-xlarge-v1/resolve/main/config.json', 'albert-xxlarge-v1': 'https://huggingface.co/albert-xxlarge-v1/resolve/main/config.json', 'albert-base-v2': 'https://huggingface.co/albert-base-v2/resolve/main/config.json', 'albert-large-v2': 'https://huggingface.co/albert-large-v2/resolve/main/config.json', 'albert-xlarge-v2': 'https://huggingface.co/albert-xlarge-v2/resolve/main/config.json', 'albert-xxlarge-v2': 'https://huggingface.co/albert-xxlarge-v2/resolve/main/config.json', } class UpperCAmelCase__ ( UpperCAmelCase_): __SCREAMING_SNAKE_CASE = '''albert''' def __init__( self , lowercase=3_0_0_0_0 , lowercase=1_2_8 , lowercase=4_0_9_6 , lowercase=1_2 , lowercase=1 , lowercase=6_4 , lowercase=1_6_3_8_4 , lowercase=1 , lowercase="gelu_new" , lowercase=0 , lowercase=0 , lowercase=5_1_2 , lowercase=2 , lowercase=0.02 , lowercase=1E-12 , lowercase=0.1 , lowercase="absolute" , lowercase=0 , lowercase=2 , lowercase=3 , **lowercase , ) -> Any: super().__init__(pad_token_id=lowercase , bos_token_id=lowercase , eos_token_id=lowercase , **lowercase ) __UpperCamelCase = vocab_size __UpperCamelCase = embedding_size __UpperCamelCase = hidden_size __UpperCamelCase = num_hidden_layers __UpperCamelCase = num_hidden_groups __UpperCamelCase = num_attention_heads __UpperCamelCase = inner_group_num __UpperCamelCase = hidden_act __UpperCamelCase = intermediate_size __UpperCamelCase = hidden_dropout_prob __UpperCamelCase = attention_probs_dropout_prob __UpperCamelCase = max_position_embeddings __UpperCamelCase = type_vocab_size __UpperCamelCase = initializer_range __UpperCamelCase = layer_norm_eps __UpperCamelCase = classifier_dropout_prob __UpperCamelCase = position_embedding_type class UpperCAmelCase__ ( UpperCAmelCase_): @property def __lowerCamelCase ( self ) -> Mapping[str, Mapping[int, str]]: if self.task == "multiple-choice": __UpperCamelCase = {0: """batch""", 1: """choice""", 2: """sequence"""} else: __UpperCamelCase = {0: """batch""", 1: """sequence"""} return OrderedDict( [ ("""input_ids""", dynamic_axis), ("""attention_mask""", dynamic_axis), ("""token_type_ids""", dynamic_axis), ] )
349
0
"""simple docstring""" import shutil import tempfile import unittest import numpy as np import pytest from transformers.testing_utils import require_vision from transformers.utils import is_vision_available if is_vision_available(): from PIL import Image from transformers import AutoProcessor, BlipaProcessor, BlipImageProcessor, GPTaTokenizer, PreTrainedTokenizerFast @require_vision class __lowerCamelCase ( unittest.TestCase ): '''simple docstring''' def lowerCamelCase ( self : Any ): lowerCAmelCase_ : List[str] = tempfile.mkdtemp() lowerCAmelCase_ : Union[str, Any] = BlipImageProcessor() lowerCAmelCase_ : str = GPTaTokenizer.from_pretrained("hf-internal-testing/tiny-random-GPT2Model" ) lowerCAmelCase_ : str = BlipaProcessor(a_ , a_ ) processor.save_pretrained(self.tmpdirname ) def lowerCamelCase ( self : List[str] , **a_ : Tuple ): return AutoProcessor.from_pretrained(self.tmpdirname , **a_ ).tokenizer def lowerCamelCase ( self : List[Any] , **a_ : int ): return AutoProcessor.from_pretrained(self.tmpdirname , **a_ ).image_processor def lowerCamelCase ( self : str ): shutil.rmtree(self.tmpdirname ) def lowerCamelCase ( self : Optional[Any] ): lowerCAmelCase_ : Dict = [np.random.randint(2_55 , size=(3, 30, 4_00) , dtype=np.uinta )] lowerCAmelCase_ : Optional[Any] = [Image.fromarray(np.moveaxis(a_ , 0 , -1 ) ) for x in image_inputs] return image_inputs def lowerCamelCase ( self : Optional[int] ): lowerCAmelCase_ : Dict = BlipaProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() ) processor.save_pretrained(self.tmpdirname ) lowerCAmelCase_ : int = self.get_tokenizer(bos_token="(BOS)" , eos_token="(EOS)" ) lowerCAmelCase_ : Any = self.get_image_processor(do_normalize=a_ , padding_value=1.0 ) lowerCAmelCase_ : List[Any] = BlipaProcessor.from_pretrained( self.tmpdirname , bos_token="(BOS)" , eos_token="(EOS)" , do_normalize=a_ , padding_value=1.0 ) self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() ) self.assertIsInstance(processor.tokenizer , a_ ) self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() ) self.assertIsInstance(processor.image_processor , a_ ) def lowerCamelCase ( self : List[str] ): lowerCAmelCase_ : Tuple = self.get_image_processor() lowerCAmelCase_ : List[str] = self.get_tokenizer() lowerCAmelCase_ : Optional[int] = BlipaProcessor(tokenizer=a_ , image_processor=a_ ) lowerCAmelCase_ : List[str] = self.prepare_image_inputs() lowerCAmelCase_ : Tuple = image_processor(a_ , return_tensors="np" ) lowerCAmelCase_ : Dict = processor(images=a_ , return_tensors="np" ) for key in input_feat_extract.keys(): self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2 ) def lowerCamelCase ( self : Optional[Any] ): lowerCAmelCase_ : Optional[Any] = self.get_image_processor() lowerCAmelCase_ : Tuple = self.get_tokenizer() lowerCAmelCase_ : Any = BlipaProcessor(tokenizer=a_ , image_processor=a_ ) lowerCAmelCase_ : Optional[Any] = "lower newer" lowerCAmelCase_ : str = processor(text=a_ ) lowerCAmelCase_ : Optional[Any] = tokenizer(a_ , return_token_type_ids=a_ ) for key in encoded_tok.keys(): self.assertListEqual(encoded_tok[key] , encoded_processor[key] ) def lowerCamelCase ( self : int ): lowerCAmelCase_ : str = self.get_image_processor() lowerCAmelCase_ : List[Any] = self.get_tokenizer() lowerCAmelCase_ : str = BlipaProcessor(tokenizer=a_ , image_processor=a_ ) lowerCAmelCase_ : Tuple = "lower newer" lowerCAmelCase_ : Any = self.prepare_image_inputs() lowerCAmelCase_ : List[str] = processor(text=a_ , images=a_ ) self.assertListEqual(list(inputs.keys() ) , ["pixel_values", "input_ids", "attention_mask"] ) # test if it raises when no input is passed with pytest.raises(a_ ): processor() def lowerCamelCase ( self : str ): lowerCAmelCase_ : int = self.get_image_processor() lowerCAmelCase_ : List[str] = self.get_tokenizer() lowerCAmelCase_ : Union[str, Any] = BlipaProcessor(tokenizer=a_ , image_processor=a_ ) lowerCAmelCase_ : Optional[Any] = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]] lowerCAmelCase_ : Optional[int] = processor.batch_decode(a_ ) lowerCAmelCase_ : Dict = tokenizer.batch_decode(a_ ) self.assertListEqual(a_ , a_ ) def lowerCamelCase ( self : Optional[Any] ): lowerCAmelCase_ : Tuple = self.get_image_processor() lowerCAmelCase_ : Any = self.get_tokenizer() lowerCAmelCase_ : Any = BlipaProcessor(tokenizer=a_ , image_processor=a_ ) lowerCAmelCase_ : Optional[int] = "lower newer" lowerCAmelCase_ : str = self.prepare_image_inputs() lowerCAmelCase_ : Optional[Any] = processor(text=a_ , images=a_ ) # For now the processor supports only ['pixel_values', 'input_ids', 'attention_mask'] self.assertListEqual(list(inputs.keys() ) , ["pixel_values", "input_ids", "attention_mask"] )
241
'''simple docstring''' import numpy as np from sklearn.datasets import fetch_california_housing from sklearn.metrics import mean_absolute_error, mean_squared_error from sklearn.model_selection import train_test_split from xgboost import XGBRegressor def _lowercase ( __A ): '''simple docstring''' return (data["data"], data["target"]) def _lowercase ( __A ,__A ,__A ): '''simple docstring''' __UpperCamelCase = XGBRegressor(verbosity=0 ,random_state=42 ) xgb.fit(__A ,__A ) # Predict target for test data __UpperCamelCase = xgb.predict(__A ) __UpperCamelCase = predictions.reshape(len(__A ) ,1 ) return predictions def _lowercase ( ): '''simple docstring''' __UpperCamelCase = fetch_california_housing() __UpperCamelCase , __UpperCamelCase = data_handling(__A ) __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = train_test_split( __A ,__A ,test_size=0.25 ,random_state=1 ) __UpperCamelCase = xgboost(__A ,__A ,__A ) # Error printing print(f"Mean Absolute Error : {mean_absolute_error(__A ,__A )}" ) print(f"Mean Square Error : {mean_squared_error(__A ,__A )}" ) if __name__ == "__main__": import doctest doctest.testmod(verbose=True) main()
349
0
import pytest import requests from datasets.utils.file_utils import http_head from .utils import OfflineSimulationMode, RequestWouldHangIndefinitelyError, offline @pytest.mark.integration def lowerCAmelCase__( ) -> Union[str, Any]: with offline(OfflineSimulationMode.CONNECTION_TIMES_OUT ): with pytest.raises(__A ): requests.request("GET" , "https://huggingface.co" ) with pytest.raises(requests.exceptions.ConnectTimeout ): requests.request("GET" , "https://huggingface.co" , timeout=1.0 ) @pytest.mark.integration def lowerCAmelCase__( ) -> Optional[int]: with offline(OfflineSimulationMode.CONNECTION_FAILS ): with pytest.raises(requests.exceptions.ConnectionError ): requests.request("GET" , "https://huggingface.co" ) def lowerCAmelCase__( ) -> Dict: with offline(OfflineSimulationMode.HF_DATASETS_OFFLINE_SET_TO_1 ): with pytest.raises(__A ): http_head("https://huggingface.co" )
326
'''simple docstring''' from __future__ import annotations import unittest from transformers import AutoTokenizer, PegasusConfig, is_tf_available from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow from transformers.utils import cached_property from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import TFAutoModelForSeqaSeqLM, TFPegasusForConditionalGeneration, TFPegasusModel @require_tf class UpperCAmelCase__ : __SCREAMING_SNAKE_CASE = PegasusConfig __SCREAMING_SNAKE_CASE = {} __SCREAMING_SNAKE_CASE = '''gelu''' def __init__( self , lowercase , lowercase=1_3 , lowercase=7 , lowercase=True , lowercase=False , lowercase=9_9 , lowercase=3_2 , lowercase=2 , lowercase=4 , lowercase=3_7 , lowercase=0.1 , lowercase=0.1 , lowercase=4_0 , lowercase=2 , lowercase=1 , lowercase=0 , ) -> Any: __UpperCamelCase = parent __UpperCamelCase = batch_size __UpperCamelCase = seq_length __UpperCamelCase = is_training __UpperCamelCase = use_labels __UpperCamelCase = vocab_size __UpperCamelCase = hidden_size __UpperCamelCase = num_hidden_layers __UpperCamelCase = num_attention_heads __UpperCamelCase = intermediate_size __UpperCamelCase = hidden_dropout_prob __UpperCamelCase = attention_probs_dropout_prob __UpperCamelCase = max_position_embeddings __UpperCamelCase = eos_token_id __UpperCamelCase = pad_token_id __UpperCamelCase = bos_token_id def __lowerCamelCase ( self ) -> Dict: __UpperCamelCase = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size ) __UpperCamelCase = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 ) __UpperCamelCase = tf.concat([input_ids, eos_tensor] , axis=1 ) __UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) __UpperCamelCase = self.config_cls( vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , ) __UpperCamelCase = prepare_pegasus_inputs_dict(lowercase , lowercase , lowercase ) return config, inputs_dict def __lowerCamelCase ( self , lowercase , lowercase ) -> Union[str, Any]: __UpperCamelCase = TFPegasusModel(config=lowercase ).get_decoder() __UpperCamelCase = inputs_dict["""input_ids"""] __UpperCamelCase = input_ids[:1, :] __UpperCamelCase = inputs_dict["""attention_mask"""][:1, :] __UpperCamelCase = inputs_dict["""head_mask"""] __UpperCamelCase = 1 # first forward pass __UpperCamelCase = model(lowercase , attention_mask=lowercase , head_mask=lowercase , use_cache=lowercase ) __UpperCamelCase , __UpperCamelCase = outputs.to_tuple() # create hypothetical next token and extent to next_input_ids __UpperCamelCase = ids_tensor((self.batch_size, 3) , config.vocab_size ) __UpperCamelCase = tf.cast(ids_tensor((self.batch_size, 3) , 2 ) , tf.inta ) # append to next input_ids and __UpperCamelCase = tf.concat([input_ids, next_tokens] , axis=-1 ) __UpperCamelCase = tf.concat([attention_mask, next_attn_mask] , axis=-1 ) __UpperCamelCase = model(lowercase , attention_mask=lowercase )[0] __UpperCamelCase = model(lowercase , attention_mask=lowercase , past_key_values=lowercase )[0] self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1] ) # select random slice __UpperCamelCase = int(ids_tensor((1,) , output_from_past.shape[-1] ) ) __UpperCamelCase = output_from_no_past[:, -3:, random_slice_idx] __UpperCamelCase = output_from_past[:, :, random_slice_idx] # test that outputs are equal for slice tf.debugging.assert_near(lowercase , lowercase , rtol=1E-3 ) def _lowercase ( __A ,__A ,__A ,__A=None ,__A=None ,__A=None ,__A=None ,__A=None ,): '''simple docstring''' if attention_mask is None: __UpperCamelCase = tf.cast(tf.math.not_equal(__A ,config.pad_token_id ) ,tf.inta ) if decoder_attention_mask is None: __UpperCamelCase = tf.concat( [ tf.ones(decoder_input_ids[:, :1].shape ,dtype=tf.inta ), tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] ,config.pad_token_id ) ,tf.inta ), ] ,axis=-1 ,) if head_mask is None: __UpperCamelCase = tf.ones((config.encoder_layers, config.encoder_attention_heads) ) if decoder_head_mask is None: __UpperCamelCase = tf.ones((config.decoder_layers, config.decoder_attention_heads) ) if cross_attn_head_mask is None: __UpperCamelCase = tf.ones((config.decoder_layers, config.decoder_attention_heads) ) return { "input_ids": input_ids, "decoder_input_ids": decoder_input_ids, "attention_mask": attention_mask, "decoder_attention_mask": decoder_attention_mask, "head_mask": head_mask, "decoder_head_mask": decoder_head_mask, "cross_attn_head_mask": cross_attn_head_mask, } @require_tf class UpperCAmelCase__ ( UpperCAmelCase_ , UpperCAmelCase_ , unittest.TestCase): __SCREAMING_SNAKE_CASE = (TFPegasusForConditionalGeneration, TFPegasusModel) if is_tf_available() else () __SCREAMING_SNAKE_CASE = (TFPegasusForConditionalGeneration,) if is_tf_available() else () __SCREAMING_SNAKE_CASE = ( { '''conversational''': TFPegasusForConditionalGeneration, '''feature-extraction''': TFPegasusModel, '''summarization''': TFPegasusForConditionalGeneration, '''text2text-generation''': TFPegasusForConditionalGeneration, '''translation''': TFPegasusForConditionalGeneration, } if is_tf_available() else {} ) __SCREAMING_SNAKE_CASE = True __SCREAMING_SNAKE_CASE = False __SCREAMING_SNAKE_CASE = False def __lowerCamelCase ( self ) -> str: __UpperCamelCase = TFPegasusModelTester(self ) __UpperCamelCase = ConfigTester(self , config_class=lowercase ) def __lowerCamelCase ( self ) -> str: self.config_tester.run_common_tests() def __lowerCamelCase ( self ) -> Tuple: __UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common() self.model_tester.check_decoder_model_past_large_inputs(*lowercase ) @require_sentencepiece @require_tokenizers @require_tf class UpperCAmelCase__ ( unittest.TestCase): __SCREAMING_SNAKE_CASE = [ ''' PG&E stated it scheduled the blackouts in response to forecasts for high winds amid dry conditions. The aim is to reduce the risk of wildfires. Nearly 800 thousand customers were scheduled to be affected by the shutoffs which were expected to last through at least midday tomorrow.''', ''' The London trio are up for best UK act and best album, as well as getting two nominations in the best song category."We got told like this morning \'Oh I think you\'re nominated\'", said Dappy."And I was like \'Oh yeah, which one?\' And now we\'ve got nominated for four awards. I mean, wow!"Bandmate Fazer added: "We thought it\'s best of us to come down and mingle with everyone and say hello to the cameras. And now we find we\'ve got four nominations."The band have two shots at the best song prize, getting the nod for their Tynchy Stryder collaboration Number One, and single Strong Again.Their album Uncle B will also go up against records by the likes of Beyonce and Kanye West.N-Dubz picked up the best newcomer Mobo in 2007, but female member Tulisa said they wouldn\'t be too disappointed if they didn\'t win this time around."At the end of the day we\'re grateful to be where we are in our careers."If it don\'t happen then it don\'t happen - live to fight another day and keep on making albums and hits for the fans."Dappy also revealed they could be performing live several times on the night.The group will be doing Number One and also a possible rendition of the War Child single, I Got Soul.The charity song is a re-working of The Killers\' All These Things That I\'ve Done and is set to feature artists like Chipmunk, Ironik and Pixie Lott.This year\'s Mobos will be held outside of London for the first time, in Glasgow on 30 September.N-Dubz said they were looking forward to performing for their Scottish fans and boasted about their recent shows north of the border."We just done Edinburgh the other day," said Dappy."We smashed up an N-Dubz show over there. We done Aberdeen about three or four months ago - we smashed up that show over there! Everywhere we go we smash it up!" ''', ] __SCREAMING_SNAKE_CASE = [ '''California\'s largest electricity provider has cut power to hundreds of thousands of customers in an effort to''' ''' reduce the risk of wildfires.''', '''N-Dubz have revealed they\'re "grateful" to have been nominated for four Mobo Awards.''', ] # differs slightly from pytorch, likely due to numerical differences in linear layers __SCREAMING_SNAKE_CASE = '''google/pegasus-xsum''' @cached_property def __lowerCamelCase ( self ) -> int: return AutoTokenizer.from_pretrained(self.model_name ) @cached_property def __lowerCamelCase ( self ) -> str: __UpperCamelCase = TFAutoModelForSeqaSeqLM.from_pretrained(self.model_name ) return model def __lowerCamelCase ( self , **lowercase ) -> Optional[int]: __UpperCamelCase = self.translate_src_text(**lowercase ) assert self.expected_text == generated_words def __lowerCamelCase ( self , **lowercase ) -> Optional[Any]: __UpperCamelCase = self.tokenizer(self.src_text , **lowercase , padding=lowercase , return_tensors="""tf""" ) __UpperCamelCase = self.model.generate( model_inputs.input_ids , attention_mask=model_inputs.attention_mask , num_beams=2 , use_cache=lowercase , ) __UpperCamelCase = self.tokenizer.batch_decode(generated_ids.numpy() , skip_special_tokens=lowercase ) return generated_words @slow def __lowerCamelCase ( self ) -> Dict: self._assert_generated_batch_equal_expected()
349
0
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available a = {} try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a = ['MLukeTokenizer'] if TYPE_CHECKING: try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_mluke import MLukeTokenizer else: import sys a = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
155
'''simple docstring''' import string def _lowercase ( __A ): '''simple docstring''' for key in range(len(string.ascii_uppercase ) ): __UpperCamelCase = """""" for symbol in message: if symbol in string.ascii_uppercase: __UpperCamelCase = string.ascii_uppercase.find(__A ) __UpperCamelCase = num - key if num < 0: __UpperCamelCase = num + len(string.ascii_uppercase ) __UpperCamelCase = translated + string.ascii_uppercase[num] else: __UpperCamelCase = translated + symbol print(f"Decryption using Key #{key}: {translated}" ) def _lowercase ( ): '''simple docstring''' __UpperCamelCase = input("""Encrypted message: """ ) __UpperCamelCase = message.upper() decrypt(__A ) if __name__ == "__main__": import doctest doctest.testmod() main()
349
0
"""simple docstring""" from typing import Any, Callable, Dict, List, Optional, Union import torch from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer from diffusers import ( AutoencoderKL, DDIMScheduler, DiffusionPipeline, LMSDiscreteScheduler, PNDMScheduler, StableDiffusionPipeline, UNetaDConditionModel, ) from diffusers.pipelines.stable_diffusion import StableDiffusionPipelineOutput from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker lowerCamelCase_ = 'CompVis/stable-diffusion-v1-1' lowerCamelCase_ = 'CompVis/stable-diffusion-v1-2' lowerCamelCase_ = 'CompVis/stable-diffusion-v1-3' lowerCamelCase_ = 'CompVis/stable-diffusion-v1-4' class UpperCamelCase_ (UpperCAmelCase_ ): def __init__( self : Tuple , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : int , lowerCAmelCase_ : Any , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : Dict , lowerCAmelCase_ : int = True , ) -> Tuple: super()._init_() UpperCAmelCase_ : Tuple = StableDiffusionPipeline.from_pretrained(lowerCAmelCase_ ) UpperCAmelCase_ : Tuple = StableDiffusionPipeline.from_pretrained(lowerCAmelCase_ ) UpperCAmelCase_ : Optional[Any] = StableDiffusionPipeline.from_pretrained(lowerCAmelCase_ ) UpperCAmelCase_ : List[Any] = StableDiffusionPipeline( vae=lowerCAmelCase_ , text_encoder=lowerCAmelCase_ , tokenizer=lowerCAmelCase_ , unet=lowerCAmelCase_ , scheduler=lowerCAmelCase_ , safety_checker=lowerCAmelCase_ , feature_extractor=lowerCAmelCase_ , requires_safety_checker=lowerCAmelCase_ , ) self.register_modules(pipelinea=self.pipea , pipelinea=self.pipea , pipelinea=self.pipea , pipelinea=self.pipea ) @property def _SCREAMING_SNAKE_CASE ( self : Dict ) -> Dict[str, Any]: return {k: getattr(self , lowerCAmelCase_ ) for k in self.config.keys() if not k.startswith("_" )} def _SCREAMING_SNAKE_CASE ( self : Optional[Any] , lowerCAmelCase_ : int = "auto" ) -> Optional[Any]: if slice_size == "auto": # half the attention head size is usually a good trade-off between # speed and memory UpperCAmelCase_ : Union[str, Any] = self.unet.config.attention_head_dim // 2 self.unet.set_attention_slice(lowerCAmelCase_ ) def _SCREAMING_SNAKE_CASE ( self : Dict ) -> Optional[Any]: self.enable_attention_slicing(lowerCAmelCase_ ) @torch.no_grad() def _SCREAMING_SNAKE_CASE ( self : Optional[int] , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : Union[str, Any] = 512 , lowerCAmelCase_ : Union[str, Any] = 512 , lowerCAmelCase_ : int = 50 , lowerCAmelCase_ : Optional[Any] = 7.5 , lowerCAmelCase_ : List[Any] = None , lowerCAmelCase_ : List[str] = 1 , lowerCAmelCase_ : List[str] = 0.0 , lowerCAmelCase_ : Optional[Any] = None , lowerCAmelCase_ : List[Any] = None , lowerCAmelCase_ : int = "pil" , lowerCAmelCase_ : str = True , lowerCAmelCase_ : Tuple = None , lowerCAmelCase_ : List[Any] = 1 , **lowerCAmelCase_ : Any , ) -> int: return self.pipea( prompt=lowerCAmelCase_ , height=lowerCAmelCase_ , width=lowerCAmelCase_ , num_inference_steps=lowerCAmelCase_ , guidance_scale=lowerCAmelCase_ , negative_prompt=lowerCAmelCase_ , num_images_per_prompt=lowerCAmelCase_ , eta=lowerCAmelCase_ , generator=lowerCAmelCase_ , latents=lowerCAmelCase_ , output_type=lowerCAmelCase_ , return_dict=lowerCAmelCase_ , callback=lowerCAmelCase_ , callback_steps=lowerCAmelCase_ , **lowerCAmelCase_ , ) @torch.no_grad() def _SCREAMING_SNAKE_CASE ( self : Dict , lowerCAmelCase_ : Dict , lowerCAmelCase_ : str = 512 , lowerCAmelCase_ : int = 512 , lowerCAmelCase_ : Optional[int] = 50 , lowerCAmelCase_ : List[Any] = 7.5 , lowerCAmelCase_ : List[Any] = None , lowerCAmelCase_ : List[str] = 1 , lowerCAmelCase_ : List[Any] = 0.0 , lowerCAmelCase_ : str = None , lowerCAmelCase_ : Optional[int] = None , lowerCAmelCase_ : Any = "pil" , lowerCAmelCase_ : Union[str, Any] = True , lowerCAmelCase_ : List[Any] = None , lowerCAmelCase_ : Dict = 1 , **lowerCAmelCase_ : Tuple , ) -> Dict: return self.pipea( prompt=lowerCAmelCase_ , height=lowerCAmelCase_ , width=lowerCAmelCase_ , num_inference_steps=lowerCAmelCase_ , guidance_scale=lowerCAmelCase_ , negative_prompt=lowerCAmelCase_ , num_images_per_prompt=lowerCAmelCase_ , eta=lowerCAmelCase_ , generator=lowerCAmelCase_ , latents=lowerCAmelCase_ , output_type=lowerCAmelCase_ , return_dict=lowerCAmelCase_ , callback=lowerCAmelCase_ , callback_steps=lowerCAmelCase_ , **lowerCAmelCase_ , ) @torch.no_grad() def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : Optional[int] = 512 , lowerCAmelCase_ : Tuple = 512 , lowerCAmelCase_ : Optional[Any] = 50 , lowerCAmelCase_ : Dict = 7.5 , lowerCAmelCase_ : Any = None , lowerCAmelCase_ : Any = 1 , lowerCAmelCase_ : Optional[int] = 0.0 , lowerCAmelCase_ : List[Any] = None , lowerCAmelCase_ : List[str] = None , lowerCAmelCase_ : int = "pil" , lowerCAmelCase_ : Optional[Any] = True , lowerCAmelCase_ : List[str] = None , lowerCAmelCase_ : List[str] = 1 , **lowerCAmelCase_ : int , ) -> Union[str, Any]: return self.pipea( prompt=lowerCAmelCase_ , height=lowerCAmelCase_ , width=lowerCAmelCase_ , num_inference_steps=lowerCAmelCase_ , guidance_scale=lowerCAmelCase_ , negative_prompt=lowerCAmelCase_ , num_images_per_prompt=lowerCAmelCase_ , eta=lowerCAmelCase_ , generator=lowerCAmelCase_ , latents=lowerCAmelCase_ , output_type=lowerCAmelCase_ , return_dict=lowerCAmelCase_ , callback=lowerCAmelCase_ , callback_steps=lowerCAmelCase_ , **lowerCAmelCase_ , ) @torch.no_grad() def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : Dict = 512 , lowerCAmelCase_ : Union[str, Any] = 512 , lowerCAmelCase_ : Tuple = 50 , lowerCAmelCase_ : int = 7.5 , lowerCAmelCase_ : Any = None , lowerCAmelCase_ : Dict = 1 , lowerCAmelCase_ : List[str] = 0.0 , lowerCAmelCase_ : List[Any] = None , lowerCAmelCase_ : Tuple = None , lowerCAmelCase_ : Union[str, Any] = "pil" , lowerCAmelCase_ : Any = True , lowerCAmelCase_ : Tuple = None , lowerCAmelCase_ : Optional[Any] = 1 , **lowerCAmelCase_ : Optional[int] , ) -> Optional[int]: return self.pipea( prompt=lowerCAmelCase_ , height=lowerCAmelCase_ , width=lowerCAmelCase_ , num_inference_steps=lowerCAmelCase_ , guidance_scale=lowerCAmelCase_ , negative_prompt=lowerCAmelCase_ , num_images_per_prompt=lowerCAmelCase_ , eta=lowerCAmelCase_ , generator=lowerCAmelCase_ , latents=lowerCAmelCase_ , output_type=lowerCAmelCase_ , return_dict=lowerCAmelCase_ , callback=lowerCAmelCase_ , callback_steps=lowerCAmelCase_ , **lowerCAmelCase_ , ) @torch.no_grad() def _SCREAMING_SNAKE_CASE ( self : str , lowerCAmelCase_ : str , lowerCAmelCase_ : List[str] = 512 , lowerCAmelCase_ : Tuple = 512 , lowerCAmelCase_ : Any = 50 , lowerCAmelCase_ : Tuple = 7.5 , lowerCAmelCase_ : List[Any] = None , lowerCAmelCase_ : Optional[int] = 1 , lowerCAmelCase_ : List[Any] = 0.0 , lowerCAmelCase_ : str = None , lowerCAmelCase_ : List[Any] = None , lowerCAmelCase_ : Optional[Any] = "pil" , lowerCAmelCase_ : List[str] = True , lowerCAmelCase_ : int = None , lowerCAmelCase_ : str = 1 , **lowerCAmelCase_ : Tuple , ) -> Union[str, Any]: UpperCAmelCase_ : List[str] = "cuda" if torch.cuda.is_available() else "cpu" self.to(lowerCAmelCase_ ) # Checks if the height and width are divisible by 8 or not if height % 8 != 0 or width % 8 != 0: raise ValueError(f"""`height` and `width` must be divisible by 8 but are {height} and {width}.""" ) # Get first result from Stable Diffusion Checkpoint v1.1 UpperCAmelCase_ : List[Any] = self.textaimg_sda_a( prompt=lowerCAmelCase_ , height=lowerCAmelCase_ , width=lowerCAmelCase_ , num_inference_steps=lowerCAmelCase_ , guidance_scale=lowerCAmelCase_ , negative_prompt=lowerCAmelCase_ , num_images_per_prompt=lowerCAmelCase_ , eta=lowerCAmelCase_ , generator=lowerCAmelCase_ , latents=lowerCAmelCase_ , output_type=lowerCAmelCase_ , return_dict=lowerCAmelCase_ , callback=lowerCAmelCase_ , callback_steps=lowerCAmelCase_ , **lowerCAmelCase_ , ) # Get first result from Stable Diffusion Checkpoint v1.2 UpperCAmelCase_ : int = self.textaimg_sda_a( prompt=lowerCAmelCase_ , height=lowerCAmelCase_ , width=lowerCAmelCase_ , num_inference_steps=lowerCAmelCase_ , guidance_scale=lowerCAmelCase_ , negative_prompt=lowerCAmelCase_ , num_images_per_prompt=lowerCAmelCase_ , eta=lowerCAmelCase_ , generator=lowerCAmelCase_ , latents=lowerCAmelCase_ , output_type=lowerCAmelCase_ , return_dict=lowerCAmelCase_ , callback=lowerCAmelCase_ , callback_steps=lowerCAmelCase_ , **lowerCAmelCase_ , ) # Get first result from Stable Diffusion Checkpoint v1.3 UpperCAmelCase_ : Tuple = self.textaimg_sda_a( prompt=lowerCAmelCase_ , height=lowerCAmelCase_ , width=lowerCAmelCase_ , num_inference_steps=lowerCAmelCase_ , guidance_scale=lowerCAmelCase_ , negative_prompt=lowerCAmelCase_ , num_images_per_prompt=lowerCAmelCase_ , eta=lowerCAmelCase_ , generator=lowerCAmelCase_ , latents=lowerCAmelCase_ , output_type=lowerCAmelCase_ , return_dict=lowerCAmelCase_ , callback=lowerCAmelCase_ , callback_steps=lowerCAmelCase_ , **lowerCAmelCase_ , ) # Get first result from Stable Diffusion Checkpoint v1.4 UpperCAmelCase_ : Any = self.textaimg_sda_a( prompt=lowerCAmelCase_ , height=lowerCAmelCase_ , width=lowerCAmelCase_ , num_inference_steps=lowerCAmelCase_ , guidance_scale=lowerCAmelCase_ , negative_prompt=lowerCAmelCase_ , num_images_per_prompt=lowerCAmelCase_ , eta=lowerCAmelCase_ , generator=lowerCAmelCase_ , latents=lowerCAmelCase_ , output_type=lowerCAmelCase_ , return_dict=lowerCAmelCase_ , callback=lowerCAmelCase_ , callback_steps=lowerCAmelCase_ , **lowerCAmelCase_ , ) # Get all result images into a single list and pass it via StableDiffusionPipelineOutput for final result return StableDiffusionPipelineOutput([resa[0], resa[0], resa[0], resa[0]] )
268
'''simple docstring''' from collections import OrderedDict from typing import Any, List, Mapping, Optional from ... import PreTrainedTokenizer, TensorType, is_torch_available from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfigWithPast, PatchingSpec from ...utils import logging a__ : Optional[Any] = logging.get_logger(__name__) a__ : Dict = { 'EleutherAI/gpt-j-6B': 'https://huggingface.co/EleutherAI/gpt-j-6B/resolve/main/config.json', # See all GPT-J models at https://huggingface.co/models?filter=gpt_j } class UpperCAmelCase__ ( UpperCAmelCase_): __SCREAMING_SNAKE_CASE = '''gptj''' __SCREAMING_SNAKE_CASE = { '''max_position_embeddings''': '''n_positions''', '''hidden_size''': '''n_embd''', '''num_attention_heads''': '''n_head''', '''num_hidden_layers''': '''n_layer''', } def __init__( self , lowercase=5_0_4_0_0 , lowercase=2_0_4_8 , lowercase=4_0_9_6 , lowercase=2_8 , lowercase=1_6 , lowercase=6_4 , lowercase=None , lowercase="gelu_new" , lowercase=0.0 , lowercase=0.0 , lowercase=0.0 , lowercase=1E-5 , lowercase=0.02 , lowercase=True , lowercase=5_0_2_5_6 , lowercase=5_0_2_5_6 , lowercase=False , **lowercase , ) -> Tuple: __UpperCamelCase = vocab_size __UpperCamelCase = n_positions __UpperCamelCase = n_embd __UpperCamelCase = n_layer __UpperCamelCase = n_head __UpperCamelCase = n_inner __UpperCamelCase = rotary_dim __UpperCamelCase = activation_function __UpperCamelCase = resid_pdrop __UpperCamelCase = embd_pdrop __UpperCamelCase = attn_pdrop __UpperCamelCase = layer_norm_epsilon __UpperCamelCase = initializer_range __UpperCamelCase = use_cache __UpperCamelCase = bos_token_id __UpperCamelCase = eos_token_id super().__init__( bos_token_id=lowercase , eos_token_id=lowercase , tie_word_embeddings=lowercase , **lowercase ) class UpperCAmelCase__ ( UpperCAmelCase_): def __init__( self , lowercase , lowercase = "default" , lowercase = None , lowercase = False , ) -> List[str]: super().__init__(lowercase , task=lowercase , patching_specs=lowercase , use_past=lowercase ) if not getattr(self._config , """pad_token_id""" , lowercase ): # TODO: how to do that better? __UpperCamelCase = 0 @property def __lowerCamelCase ( self ) -> Mapping[str, Mapping[int, str]]: __UpperCamelCase = OrderedDict({"""input_ids""": {0: """batch""", 1: """sequence"""}} ) if self.use_past: self.fill_with_past_key_values_(lowercase , direction="""inputs""" ) __UpperCamelCase = {0: """batch""", 1: """past_sequence + sequence"""} else: __UpperCamelCase = {0: """batch""", 1: """sequence"""} return common_inputs @property def __lowerCamelCase ( self ) -> int: return self._config.n_layer @property def __lowerCamelCase ( self ) -> int: return self._config.n_head def __lowerCamelCase ( self , lowercase , lowercase = -1 , lowercase = -1 , lowercase = False , lowercase = None , ) -> Mapping[str, Any]: __UpperCamelCase = super(lowercase , self ).generate_dummy_inputs( lowercase , batch_size=lowercase , seq_length=lowercase , is_pair=lowercase , framework=lowercase ) # We need to order the input in the way they appears in the forward() __UpperCamelCase = OrderedDict({"""input_ids""": common_inputs["""input_ids"""]} ) # Need to add the past_keys if self.use_past: if not is_torch_available(): raise ValueError("""Cannot generate dummy past_keys inputs without PyTorch installed.""" ) else: import torch __UpperCamelCase , __UpperCamelCase = common_inputs["""input_ids"""].shape # Not using the same length for past_key_values __UpperCamelCase = seqlen + 2 __UpperCamelCase = ( batch, self.num_attention_heads, past_key_values_length, self._config.hidden_size // self.num_attention_heads, ) __UpperCamelCase = [ (torch.zeros(lowercase ), torch.zeros(lowercase )) for _ in range(self.num_layers ) ] __UpperCamelCase = common_inputs["""attention_mask"""] if self.use_past: __UpperCamelCase = ordered_inputs["""attention_mask"""].dtype __UpperCamelCase = torch.cat( [ordered_inputs["""attention_mask"""], torch.ones(lowercase , lowercase , dtype=lowercase )] , dim=1 ) return ordered_inputs @property def __lowerCamelCase ( self ) -> int: return 1_3
349
0
import numpy as np from sklearn.datasets import fetch_california_housing from sklearn.metrics import mean_absolute_error, mean_squared_error from sklearn.model_selection import train_test_split from xgboost import XGBRegressor def __UpperCamelCase ( _lowerCAmelCase ) -> Tuple: """simple docstring""" return (data["data"], data["target"]) def __UpperCamelCase ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> Optional[int]: """simple docstring""" A : int = XGBRegressor(verbosity=0 , random_state=42 ) xgb.fit(__A , __A ) # Predict target for test data A : Optional[int] = xgb.predict(__A ) A : str = predictions.reshape(len(__A ) , 1 ) return predictions def __UpperCamelCase ( ) -> Any: """simple docstring""" A : Optional[Any] = fetch_california_housing() A , A : List[str] = data_handling(__A ) A , A , A , A : Dict = train_test_split( __A , __A , test_size=0.25 , random_state=1 ) A : Tuple = xgboost(__A , __A , __A ) # Error printing print(f'''Mean Absolute Error : {mean_absolute_error(__A , __A )}''' ) print(f'''Mean Square Error : {mean_squared_error(__A , __A )}''' ) if __name__ == "__main__": import doctest doctest.testmod(verbose=True) main()
116
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_tokenizers_available, is_torch_available, is_vision_available, ) a__ : int = { 'configuration_layoutlmv3': [ 'LAYOUTLMV3_PRETRAINED_CONFIG_ARCHIVE_MAP', 'LayoutLMv3Config', 'LayoutLMv3OnnxConfig', ], 'processing_layoutlmv3': ['LayoutLMv3Processor'], 'tokenization_layoutlmv3': ['LayoutLMv3Tokenizer'], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a__ : Dict = ['LayoutLMv3TokenizerFast'] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a__ : Any = [ 'LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST', 'LayoutLMv3ForQuestionAnswering', 'LayoutLMv3ForSequenceClassification', 'LayoutLMv3ForTokenClassification', 'LayoutLMv3Model', 'LayoutLMv3PreTrainedModel', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a__ : str = [ 'TF_LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST', 'TFLayoutLMv3ForQuestionAnswering', 'TFLayoutLMv3ForSequenceClassification', 'TFLayoutLMv3ForTokenClassification', 'TFLayoutLMv3Model', 'TFLayoutLMv3PreTrainedModel', ] try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a__ : List[Any] = ['LayoutLMv3FeatureExtractor'] a__ : str = ['LayoutLMv3ImageProcessor'] if TYPE_CHECKING: from .configuration_layoutlmva import ( LAYOUTLMV3_PRETRAINED_CONFIG_ARCHIVE_MAP, LayoutLMvaConfig, LayoutLMvaOnnxConfig, ) from .processing_layoutlmva import LayoutLMvaProcessor from .tokenization_layoutlmva import LayoutLMvaTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_layoutlmva_fast import LayoutLMvaTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_layoutlmva import ( LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST, LayoutLMvaForQuestionAnswering, LayoutLMvaForSequenceClassification, LayoutLMvaForTokenClassification, LayoutLMvaModel, LayoutLMvaPreTrainedModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_layoutlmva import ( TF_LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST, TFLayoutLMvaForQuestionAnswering, TFLayoutLMvaForSequenceClassification, TFLayoutLMvaForTokenClassification, TFLayoutLMvaModel, TFLayoutLMvaPreTrainedModel, ) try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .feature_extraction_layoutlmva import LayoutLMvaFeatureExtractor from .image_processing_layoutlmva import LayoutLMvaImageProcessor else: import sys a__ : List[str] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
349
0
import logging import os import sys from dataclasses import dataclass, field from itertools import chain from typing import Optional, Union import datasets import numpy as np import torch from datasets import load_dataset import transformers from transformers import ( AutoConfig, AutoModelForMultipleChoice, AutoTokenizer, HfArgumentParser, Trainer, TrainingArguments, default_data_collator, set_seed, ) from transformers.tokenization_utils_base import PreTrainedTokenizerBase from transformers.trainer_utils import get_last_checkpoint from transformers.utils import PaddingStrategy, check_min_version, send_example_telemetry # Will error if the minimal version of Transformers is not installed. Remove at your own risks. check_min_version('''4.31.0''') lowerCAmelCase__ : Tuple = logging.getLogger(__name__) @dataclass class __snake_case : __lowerCamelCase = field( metadata={"""help""": """Path to pretrained model or model identifier from huggingface.co/models"""} ) __lowerCamelCase = field( default=UpperCAmelCase_ ,metadata={"""help""": """Pretrained config name or path if not the same as model_name"""} ) __lowerCamelCase = field( default=UpperCAmelCase_ ,metadata={"""help""": """Pretrained tokenizer name or path if not the same as model_name"""} ) __lowerCamelCase = field( default=UpperCAmelCase_ ,metadata={"""help""": """Where do you want to store the pretrained models downloaded from huggingface.co"""} ,) __lowerCamelCase = field( default=UpperCAmelCase_ ,metadata={"""help""": """Whether to use one of the fast tokenizer (backed by the tokenizers library) or not."""} ,) __lowerCamelCase = field( default="""main""" ,metadata={"""help""": """The specific model version to use (can be a branch name, tag name or commit id)."""} ,) __lowerCamelCase = field( default=UpperCAmelCase_ ,metadata={ """help""": ( """Will use the token generated when running `huggingface-cli login` (necessary to use this script """ """with private models).""" ) } ,) @dataclass class __snake_case : __lowerCamelCase = field(default=UpperCAmelCase_ ,metadata={"""help""": """The input training data file (a text file)."""} ) __lowerCamelCase = field( default=UpperCAmelCase_ ,metadata={"""help""": """An optional input evaluation data file to evaluate the perplexity on (a text file)."""} ,) __lowerCamelCase = field( default=UpperCAmelCase_ ,metadata={"""help""": """Overwrite the cached training and evaluation sets"""} ) __lowerCamelCase = field( default=UpperCAmelCase_ ,metadata={"""help""": """The number of processes to use for the preprocessing."""} ,) __lowerCamelCase = field( default=UpperCAmelCase_ ,metadata={ """help""": ( """The maximum total input sequence length after tokenization. If passed, sequences longer """ """than this will be truncated, sequences shorter will be padded.""" ) } ,) __lowerCamelCase = field( default=UpperCAmelCase_ ,metadata={ """help""": ( """Whether to pad all samples to the maximum sentence length. """ """If False, will pad the samples dynamically when batching to the maximum length in the batch. More """ """efficient on GPU but very bad for TPU.""" ) } ,) __lowerCamelCase = field( default=UpperCAmelCase_ ,metadata={ """help""": ( """For debugging purposes or quicker training, truncate the number of training examples to this """ """value if set.""" ) } ,) __lowerCamelCase = field( default=UpperCAmelCase_ ,metadata={ """help""": ( """For debugging purposes or quicker training, truncate the number of evaluation examples to this """ """value if set.""" ) } ,) def __a ( self ) -> Union[str, Any]: '''simple docstring''' if self.train_file is not None: snake_case__ : List[Any] = self.train_file.split('.' )[-1] assert extension in ["csv", "json"], "`train_file` should be a csv or a json file." if self.validation_file is not None: snake_case__ : str = self.validation_file.split('.' )[-1] assert extension in ["csv", "json"], "`validation_file` should be a csv or a json file." @dataclass class __snake_case : __lowerCamelCase = 42 __lowerCamelCase = True __lowerCamelCase = None __lowerCamelCase = None def __call__( self , __UpperCamelCase ) -> str: '''simple docstring''' snake_case__ : List[Any] = 'label' if 'label' in features[0].keys() else 'labels' snake_case__ : Dict = [feature.pop(__UpperCamelCase ) for feature in features] snake_case__ : List[Any] = len(__UpperCamelCase ) snake_case__ : Dict = len(features[0]['input_ids'] ) snake_case__ : List[Any] = [ [{k: v[i] for k, v in feature.items()} for i in range(__UpperCamelCase )] for feature in features ] snake_case__ : int = list(chain(*__UpperCamelCase ) ) snake_case__ : Optional[int] = self.tokenizer.pad( __UpperCamelCase , padding=self.padding , max_length=self.max_length , pad_to_multiple_of=self.pad_to_multiple_of , return_tensors='pt' , ) # Un-flatten snake_case__ : Union[str, Any] = {k: v.view(__UpperCamelCase , __UpperCamelCase , -1 ) for k, v in batch.items()} # Add back labels snake_case__ : Union[str, Any] = torch.tensor(__UpperCamelCase , dtype=torch.intaa ) return batch def UpperCamelCase__ ( ) -> List[str]: snake_case__ : Any = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) ) if len(sys.argv ) == 2 and sys.argv[1].endswith('.json' ): # If we pass only one argument to the script and it's the path to a json file, # let's parse it to get our arguments. snake_case__ , snake_case__ , snake_case__ : Optional[int] = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) ) else: snake_case__ , snake_case__ , snake_case__ : Union[str, Any] = parser.parse_args_into_dataclasses() # Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The # information sent is the one passed as arguments along with your Python/PyTorch versions. send_example_telemetry('run_swag' , __A , __A ) # Setup logging logging.basicConfig( format='%(asctime)s - %(levelname)s - %(name)s - %(message)s' , datefmt='%m/%d/%Y %H:%M:%S' , handlers=[logging.StreamHandler(sys.stdout )] , ) if training_args.should_log: # The default of training_args.log_level is passive, so we set log level at info here to have that default. transformers.utils.logging.set_verbosity_info() snake_case__ : int = training_args.get_process_log_level() logger.setLevel(__A ) datasets.utils.logging.set_verbosity(__A ) transformers.utils.logging.set_verbosity(__A ) transformers.utils.logging.enable_default_handler() transformers.utils.logging.enable_explicit_format() # Log on each process the small summary: logger.warning( F"""Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}""" + F"""distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}""" ) logger.info(F"""Training/evaluation parameters {training_args}""" ) # Detecting last checkpoint. snake_case__ : Dict = None if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir: snake_case__ : str = get_last_checkpoint(training_args.output_dir ) if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0: raise ValueError( F"""Output directory ({training_args.output_dir}) already exists and is not empty. """ 'Use --overwrite_output_dir to overcome.' ) elif last_checkpoint is not None and training_args.resume_from_checkpoint is None: logger.info( F"""Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change """ 'the `--output_dir` or add `--overwrite_output_dir` to train from scratch.' ) # Set seed before initializing model. set_seed(training_args.seed ) # Get the datasets: you can either provide your own CSV/JSON/TXT training and evaluation files (see below) # or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/ # (the dataset will be downloaded automatically from the datasets Hub). # For CSV/JSON files, this script will use the column called 'text' or the first column if no column called # 'text' is found. You can easily tweak this behavior (see below). # In distributed training, the load_dataset function guarantee that only one local process can concurrently # download the dataset. if data_args.train_file is not None or data_args.validation_file is not None: snake_case__ : Dict = {} if data_args.train_file is not None: snake_case__ : Union[str, Any] = data_args.train_file if data_args.validation_file is not None: snake_case__ : List[Any] = data_args.validation_file snake_case__ : Optional[Any] = data_args.train_file.split('.' )[-1] snake_case__ : Tuple = load_dataset( __A , data_files=__A , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , ) else: # Downloading and loading the swag dataset from the hub. snake_case__ : Optional[Any] = load_dataset( 'swag' , 'regular' , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , ) # See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at # https://huggingface.co/docs/datasets/loading_datasets.html. # Load pretrained model and tokenizer # Distributed training: # The .from_pretrained methods guarantee that only one local process can concurrently # download model & vocab. snake_case__ : List[Any] = AutoConfig.from_pretrained( model_args.config_name if model_args.config_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ) snake_case__ : Optional[int] = AutoTokenizer.from_pretrained( model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , use_fast=model_args.use_fast_tokenizer , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ) snake_case__ : int = AutoModelForMultipleChoice.from_pretrained( model_args.model_name_or_path , from_tf=bool('.ckpt' in model_args.model_name_or_path ) , config=__A , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ) # When using your own dataset or a different dataset from swag, you will probably need to change this. snake_case__ : List[Any] = [F"""ending{i}""" for i in range(4 )] snake_case__ : Tuple = 'sent1' snake_case__ : Any = 'sent2' if data_args.max_seq_length is None: snake_case__ : Dict = tokenizer.model_max_length if max_seq_length > 1024: logger.warning( 'The chosen tokenizer supports a `model_max_length` that is longer than the default `block_size` value' ' of 1024. If you would like to use a longer `block_size` up to `tokenizer.model_max_length` you can' ' override this default with `--block_size xxx`.' ) snake_case__ : str = 1024 else: if data_args.max_seq_length > tokenizer.model_max_length: logger.warning( F"""The max_seq_length passed ({data_args.max_seq_length}) is larger than the maximum length for the""" F"""model ({tokenizer.model_max_length}). Using max_seq_length={tokenizer.model_max_length}.""" ) snake_case__ : List[str] = min(data_args.max_seq_length , tokenizer.model_max_length ) # Preprocessing the datasets. def preprocess_function(A__ ): snake_case__ : Optional[int] = [[context] * 4 for context in examples[context_name]] snake_case__ : Optional[Any] = examples[question_header_name] snake_case__ : str = [ [F"""{header} {examples[end][i]}""" for end in ending_names] for i, header in enumerate(__A ) ] # Flatten out snake_case__ : Optional[Any] = list(chain(*__A ) ) snake_case__ : List[str] = list(chain(*__A ) ) # Tokenize snake_case__ : List[Any] = tokenizer( __A , __A , truncation=__A , max_length=__A , padding='max_length' if data_args.pad_to_max_length else False , ) # Un-flatten return {k: [v[i : i + 4] for i in range(0 , len(__A ) , 4 )] for k, v in tokenized_examples.items()} if training_args.do_train: if "train" not in raw_datasets: raise ValueError('--do_train requires a train dataset' ) snake_case__ : Optional[Any] = raw_datasets['train'] if data_args.max_train_samples is not None: snake_case__ : Optional[int] = min(len(__A ) , data_args.max_train_samples ) snake_case__ : Tuple = train_dataset.select(range(__A ) ) with training_args.main_process_first(desc='train dataset map pre-processing' ): snake_case__ : int = train_dataset.map( __A , batched=__A , num_proc=data_args.preprocessing_num_workers , load_from_cache_file=not data_args.overwrite_cache , ) if training_args.do_eval: if "validation" not in raw_datasets: raise ValueError('--do_eval requires a validation dataset' ) snake_case__ : List[str] = raw_datasets['validation'] if data_args.max_eval_samples is not None: snake_case__ : List[Any] = min(len(__A ) , data_args.max_eval_samples ) snake_case__ : Dict = eval_dataset.select(range(__A ) ) with training_args.main_process_first(desc='validation dataset map pre-processing' ): snake_case__ : Any = eval_dataset.map( __A , batched=__A , num_proc=data_args.preprocessing_num_workers , load_from_cache_file=not data_args.overwrite_cache , ) # Data collator snake_case__ : Union[str, Any] = ( default_data_collator if data_args.pad_to_max_length else DataCollatorForMultipleChoice(tokenizer=__A , pad_to_multiple_of=8 if training_args.fpaa else None ) ) # Metric def compute_metrics(A__ ): snake_case__ , snake_case__ : List[str] = eval_predictions snake_case__ : Dict = np.argmax(__A , axis=1 ) return {"accuracy": (preds == label_ids).astype(np.floataa ).mean().item()} # Initialize our Trainer snake_case__ : Union[str, Any] = Trainer( model=__A , args=__A , train_dataset=train_dataset if training_args.do_train else None , eval_dataset=eval_dataset if training_args.do_eval else None , tokenizer=__A , data_collator=__A , compute_metrics=__A , ) # Training if training_args.do_train: snake_case__ : Dict = None if training_args.resume_from_checkpoint is not None: snake_case__ : str = training_args.resume_from_checkpoint elif last_checkpoint is not None: snake_case__ : List[Any] = last_checkpoint snake_case__ : Tuple = trainer.train(resume_from_checkpoint=__A ) trainer.save_model() # Saves the tokenizer too for easy upload snake_case__ : Optional[Any] = train_result.metrics snake_case__ : List[Any] = ( data_args.max_train_samples if data_args.max_train_samples is not None else len(__A ) ) snake_case__ : str = min(__A , len(__A ) ) trainer.log_metrics('train' , __A ) trainer.save_metrics('train' , __A ) trainer.save_state() # Evaluation if training_args.do_eval: logger.info('*** Evaluate ***' ) snake_case__ : List[Any] = trainer.evaluate() snake_case__ : Optional[int] = data_args.max_eval_samples if data_args.max_eval_samples is not None else len(__A ) snake_case__ : Union[str, Any] = min(__A , len(__A ) ) trainer.log_metrics('eval' , __A ) trainer.save_metrics('eval' , __A ) snake_case__ : Union[str, Any] = { 'finetuned_from': model_args.model_name_or_path, 'tasks': 'multiple-choice', 'dataset_tags': 'swag', 'dataset_args': 'regular', 'dataset': 'SWAG', 'language': 'en', } if training_args.push_to_hub: trainer.push_to_hub(**__A ) else: trainer.create_model_card(**__A ) def UpperCamelCase__ ( A__ ) -> Optional[Any]: main() if __name__ == "__main__": main()
143
'''simple docstring''' def _lowercase ( __A ,__A ): '''simple docstring''' __UpperCamelCase = len(__A ) __UpperCamelCase = [[False] * (required_sum + 1) for _ in range(arr_len + 1 )] # for each arr value, a sum of zero(0) can be formed by not taking any element # hence True/1 for i in range(arr_len + 1 ): __UpperCamelCase = True # sum is not zero and set is empty then false for i in range(1 ,required_sum + 1 ): __UpperCamelCase = False for i in range(1 ,arr_len + 1 ): for j in range(1 ,required_sum + 1 ): if arr[i - 1] > j: __UpperCamelCase = subset[i - 1][j] if arr[i - 1] <= j: __UpperCamelCase = subset[i - 1][j] or subset[i - 1][j - arr[i - 1]] return subset[arr_len][required_sum] if __name__ == "__main__": import doctest doctest.testmod()
349
0
import pytest snake_case : List[str] = '__dummy_dataset1__' snake_case : Optional[int] = '\nimport json\nimport os\n\nimport datasets\n\n\nREPO_URL = "https://huggingface.co/datasets/albertvillanova/tests-raw-jsonl/resolve/main/"\nURLS = {"train": REPO_URL + "wikiann-bn-train.jsonl", "validation": REPO_URL + "wikiann-bn-validation.jsonl"}\n\n\nclass __DummyDataset1__(datasets.GeneratorBasedBuilder):\n\n def _info(self):\n features = datasets.Features(\n {\n "tokens": datasets.Sequence(datasets.Value("string")),\n "ner_tags": datasets.Sequence(\n datasets.features.ClassLabel(\n names=[\n "O",\n "B-PER",\n "I-PER",\n "B-ORG",\n "I-ORG",\n "B-LOC",\n "I-LOC",\n ]\n )\n ),\n "langs": datasets.Sequence(datasets.Value("string")),\n "spans": datasets.Sequence(datasets.Value("string")),\n }\n )\n return datasets.DatasetInfo(features=features)\n\n def _split_generators(self, dl_manager):\n dl_path = dl_manager.download(URLS)\n return [\n datasets.SplitGenerator(datasets.Split.TRAIN, gen_kwargs={"filepath": dl_path["train"]}),\n datasets.SplitGenerator(datasets.Split.VALIDATION, gen_kwargs={"filepath": dl_path["validation"]}),\n ]\n\n def _generate_examples(self, filepath):\n with open(filepath, "r", encoding="utf-8") as f:\n for i, line in enumerate(f):\n yield i, json.loads(line)\n' @pytest.fixture def __lowerCamelCase ( ): """simple docstring""" return DATASET_LOADING_SCRIPT_NAME @pytest.fixture def __lowerCamelCase ( ): """simple docstring""" return DATASET_LOADING_SCRIPT_CODE @pytest.fixture def __lowerCamelCase ( UpperCAmelCase_ : str , UpperCAmelCase_ : str , UpperCAmelCase_ : List[str] ): """simple docstring""" a :List[Any] = dataset_loading_script_name a :Tuple = tmp_path / '''datasets''' / script_name script_dir.mkdir(parents=__A ) a :List[str] = script_dir / F'''{script_name}.py''' with open(__A , '''w''' ) as f: f.write(__A ) return str(__A )
94
'''simple docstring''' import bza import gzip import lzma import os import shutil import struct import tarfile import warnings import zipfile from abc import ABC, abstractmethod from pathlib import Path from typing import Dict, List, Optional, Type, Union from .. import config from .filelock import FileLock from .logging import get_logger a__ : Any = get_logger(__name__) class UpperCAmelCase__ : def __init__( self , lowercase = None ) -> List[str]: __UpperCamelCase = ( os.path.join(lowercase , config.EXTRACTED_DATASETS_DIR ) if cache_dir else config.EXTRACTED_DATASETS_PATH ) __UpperCamelCase = Extractor def __lowerCamelCase ( self , lowercase ) -> str: from .file_utils import hash_url_to_filename # Path where we extract compressed archives # We extract in the cache dir, and get the extracted path name by hashing the original path" __UpperCamelCase = os.path.abspath(lowercase ) return os.path.join(self.extract_dir , hash_url_to_filename(lowercase ) ) def __lowerCamelCase ( self , lowercase , lowercase ) -> bool: return force_extract or ( not os.path.isfile(lowercase ) and not (os.path.isdir(lowercase ) and os.listdir(lowercase )) ) def __lowerCamelCase ( self , lowercase , lowercase = False ) -> str: __UpperCamelCase = self.extractor.infer_extractor_format(lowercase ) if not extractor_format: return input_path __UpperCamelCase = self._get_output_path(lowercase ) if self._do_extract(lowercase , lowercase ): self.extractor.extract(lowercase , lowercase , lowercase ) return output_path class UpperCAmelCase__ ( UpperCAmelCase_): @classmethod @abstractmethod def __lowerCamelCase ( cls , lowercase , **lowercase ) -> bool: ... @staticmethod @abstractmethod def __lowerCamelCase ( lowercase , lowercase ) -> None: ... class UpperCAmelCase__ ( UpperCAmelCase_ , UpperCAmelCase_): __SCREAMING_SNAKE_CASE = [] @staticmethod def __lowerCamelCase ( lowercase , lowercase ) -> int: with open(lowercase , """rb""" ) as f: return f.read(lowercase ) @classmethod def __lowerCamelCase ( cls , lowercase , lowercase = b"" ) -> bool: if not magic_number: __UpperCamelCase = max(len(lowercase ) for cls_magic_number in cls.magic_numbers ) try: __UpperCamelCase = cls.read_magic_number(lowercase , lowercase ) except OSError: return False return any(magic_number.startswith(lowercase ) for cls_magic_number in cls.magic_numbers ) class UpperCAmelCase__ ( UpperCAmelCase_): @classmethod def __lowerCamelCase ( cls , lowercase , **lowercase ) -> bool: return tarfile.is_tarfile(lowercase ) @staticmethod def __lowerCamelCase ( lowercase , lowercase ) -> str: def resolved(lowercase ) -> str: return os.path.realpath(os.path.abspath(lowercase ) ) def badpath(lowercase , lowercase ) -> bool: # joinpath will ignore base if path is absolute return not resolved(os.path.join(lowercase , lowercase ) ).startswith(lowercase ) def badlink(lowercase , lowercase ) -> bool: # Links are interpreted relative to the directory containing the link __UpperCamelCase = resolved(os.path.join(lowercase , os.path.dirname(info.name ) ) ) return badpath(info.linkname , base=lowercase ) __UpperCamelCase = resolved(lowercase ) for finfo in members: if badpath(finfo.name , lowercase ): logger.error(f"Extraction of {finfo.name} is blocked (illegal path)" ) elif finfo.issym() and badlink(lowercase , lowercase ): logger.error(f"Extraction of {finfo.name} is blocked: Symlink to {finfo.linkname}" ) elif finfo.islnk() and badlink(lowercase , lowercase ): logger.error(f"Extraction of {finfo.name} is blocked: Hard link to {finfo.linkname}" ) else: yield finfo @staticmethod def __lowerCamelCase ( lowercase , lowercase ) -> None: os.makedirs(lowercase , exist_ok=lowercase ) __UpperCamelCase = tarfile.open(lowercase ) tar_file.extractall(lowercase , members=TarExtractor.safemembers(lowercase , lowercase ) ) tar_file.close() class UpperCAmelCase__ ( UpperCAmelCase_): __SCREAMING_SNAKE_CASE = [B'''\x1F\x8B'''] @staticmethod def __lowerCamelCase ( lowercase , lowercase ) -> None: with gzip.open(lowercase , """rb""" ) as gzip_file: with open(lowercase , """wb""" ) as extracted_file: shutil.copyfileobj(lowercase , lowercase ) class UpperCAmelCase__ ( UpperCAmelCase_): __SCREAMING_SNAKE_CASE = [ B'''PK\x03\x04''', B'''PK\x05\x06''', # empty archive B'''PK\x07\x08''', # spanned archive ] @classmethod def __lowerCamelCase ( cls , lowercase , lowercase = b"" ) -> bool: if super().is_extractable(lowercase , magic_number=lowercase ): return True try: # Alternative version of zipfile.is_zipfile that has less false positives, but misses executable zip archives. # From: https://github.com/python/cpython/pull/5053 from zipfile import ( _CD_SIGNATURE, _ECD_DISK_NUMBER, _ECD_DISK_START, _ECD_ENTRIES_TOTAL, _ECD_OFFSET, _ECD_SIZE, _EndRecData, sizeCentralDir, stringCentralDir, structCentralDir, ) with open(lowercase , """rb""" ) as fp: __UpperCamelCase = _EndRecData(lowercase ) if endrec: if endrec[_ECD_ENTRIES_TOTAL] == 0 and endrec[_ECD_SIZE] == 0 and endrec[_ECD_OFFSET] == 0: return True # Empty zipfiles are still zipfiles elif endrec[_ECD_DISK_NUMBER] == endrec[_ECD_DISK_START]: fp.seek(endrec[_ECD_OFFSET] ) # Central directory is on the same disk if fp.tell() == endrec[_ECD_OFFSET] and endrec[_ECD_SIZE] >= sizeCentralDir: __UpperCamelCase = fp.read(lowercase ) # CD is where we expect it to be if len(lowercase ) == sizeCentralDir: __UpperCamelCase = struct.unpack(lowercase , lowercase ) # CD is the right size if centdir[_CD_SIGNATURE] == stringCentralDir: return True # First central directory entry has correct magic number return False except Exception: # catch all errors in case future python versions change the zipfile internals return False @staticmethod def __lowerCamelCase ( lowercase , lowercase ) -> None: os.makedirs(lowercase , exist_ok=lowercase ) with zipfile.ZipFile(lowercase , """r""" ) as zip_file: zip_file.extractall(lowercase ) zip_file.close() class UpperCAmelCase__ ( UpperCAmelCase_): __SCREAMING_SNAKE_CASE = [B'''\xFD\x37\x7A\x58\x5A\x00'''] @staticmethod def __lowerCamelCase ( lowercase , lowercase ) -> None: with lzma.open(lowercase ) as compressed_file: with open(lowercase , """wb""" ) as extracted_file: shutil.copyfileobj(lowercase , lowercase ) class UpperCAmelCase__ ( UpperCAmelCase_): __SCREAMING_SNAKE_CASE = [B'''Rar!\x1a\x07\x00''', B'''Rar!\x1a\x07\x01\x00'''] # RAR_ID # RAR5_ID @staticmethod def __lowerCamelCase ( lowercase , lowercase ) -> None: if not config.RARFILE_AVAILABLE: raise ImportError("""Please pip install rarfile""" ) import rarfile os.makedirs(lowercase , exist_ok=lowercase ) __UpperCamelCase = rarfile.RarFile(lowercase ) rf.extractall(lowercase ) rf.close() class UpperCAmelCase__ ( UpperCAmelCase_): __SCREAMING_SNAKE_CASE = [B'''\x28\xb5\x2F\xFD'''] @staticmethod def __lowerCamelCase ( lowercase , lowercase ) -> None: if not config.ZSTANDARD_AVAILABLE: raise ImportError("""Please pip install zstandard""" ) import zstandard as zstd __UpperCamelCase = zstd.ZstdDecompressor() with open(lowercase , """rb""" ) as ifh, open(lowercase , """wb""" ) as ofh: dctx.copy_stream(lowercase , lowercase ) class UpperCAmelCase__ ( UpperCAmelCase_): __SCREAMING_SNAKE_CASE = [B'''\x42\x5A\x68'''] @staticmethod def __lowerCamelCase ( lowercase , lowercase ) -> None: with bza.open(lowercase , """rb""" ) as compressed_file: with open(lowercase , """wb""" ) as extracted_file: shutil.copyfileobj(lowercase , lowercase ) class UpperCAmelCase__ ( UpperCAmelCase_): __SCREAMING_SNAKE_CASE = [B'''\x37\x7A\xBC\xAF\x27\x1C'''] @staticmethod def __lowerCamelCase ( lowercase , lowercase ) -> None: if not config.PY7ZR_AVAILABLE: raise ImportError("""Please pip install py7zr""" ) import pyazr os.makedirs(lowercase , exist_ok=lowercase ) with pyazr.SevenZipFile(lowercase , """r""" ) as archive: archive.extractall(lowercase ) class UpperCAmelCase__ ( UpperCAmelCase_): __SCREAMING_SNAKE_CASE = [B'''\x04\x22\x4D\x18'''] @staticmethod def __lowerCamelCase ( lowercase , lowercase ) -> None: if not config.LZ4_AVAILABLE: raise ImportError("""Please pip install lz4""" ) import lza.frame with lza.frame.open(lowercase , """rb""" ) as compressed_file: with open(lowercase , """wb""" ) as extracted_file: shutil.copyfileobj(lowercase , lowercase ) class UpperCAmelCase__ : # Put zip file to the last, b/c it is possible wrongly detected as zip (I guess it means: as tar or gzip) __SCREAMING_SNAKE_CASE = { "tar": TarExtractor, "gzip": GzipExtractor, "zip": ZipExtractor, "xz": XzExtractor, "rar": RarExtractor, "zstd": ZstdExtractor, "bz2": BzipaExtractor, "7z": SevenZipExtractor, # <Added version="2.4.0"/> "lz4": LzaExtractor, # <Added version="2.4.0"/> } @classmethod def __lowerCamelCase ( cls ) -> Union[str, Any]: return max( len(lowercase ) for extractor in cls.extractors.values() if issubclass(lowercase , lowercase ) for extractor_magic_number in extractor.magic_numbers ) @staticmethod def __lowerCamelCase ( lowercase , lowercase ) -> str: try: return MagicNumberBaseExtractor.read_magic_number(lowercase , magic_number_length=lowercase ) except OSError: return b"" @classmethod def __lowerCamelCase ( cls , lowercase , lowercase = False ) -> bool: warnings.warn( """Method 'is_extractable' was deprecated in version 2.4.0 and will be removed in 3.0.0. """ """Use 'infer_extractor_format' instead.""" , category=lowercase , ) __UpperCamelCase = cls.infer_extractor_format(lowercase ) if extractor_format: return True if not return_extractor else (True, cls.extractors[extractor_format]) return False if not return_extractor else (False, None) @classmethod def __lowerCamelCase ( cls , lowercase ) -> str: # <Added version="2.4.0"/> __UpperCamelCase = cls._get_magic_number_max_length() __UpperCamelCase = cls._read_magic_number(lowercase , lowercase ) for extractor_format, extractor in cls.extractors.items(): if extractor.is_extractable(lowercase , magic_number=lowercase ): return extractor_format @classmethod def __lowerCamelCase ( cls , lowercase , lowercase , lowercase = None , lowercase = "deprecated" , ) -> None: os.makedirs(os.path.dirname(lowercase ) , exist_ok=lowercase ) # Prevent parallel extractions __UpperCamelCase = str(Path(lowercase ).with_suffix(""".lock""" ) ) with FileLock(lowercase ): shutil.rmtree(lowercase , ignore_errors=lowercase ) if extractor_format or extractor != "deprecated": if extractor != "deprecated" or not isinstance(lowercase , lowercase ): # passed as positional arg warnings.warn( """Parameter 'extractor' was deprecated in version 2.4.0 and will be removed in 3.0.0. """ """Use 'extractor_format' instead.""" , category=lowercase , ) __UpperCamelCase = extractor if extractor != """deprecated""" else extractor_format else: __UpperCamelCase = cls.extractors[extractor_format] return extractor.extract(lowercase , lowercase ) else: warnings.warn( """Parameter 'extractor_format' was made required in version 2.4.0 and not passing it will raise an """ """exception in 3.0.0.""" , category=lowercase , ) for extractor in cls.extractors.values(): if extractor.is_extractable(lowercase ): return extractor.extract(lowercase , lowercase )
349
0
'''simple docstring''' from typing import Dict, List, Optional, Tuple, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import ( center_crop, get_resize_output_image_size, normalize, rescale, resize, to_channel_dimension_format, ) from ...image_utils import ( IMAGENET_STANDARD_MEAN, IMAGENET_STANDARD_STD, ChannelDimension, ImageInput, PILImageResampling, make_list_of_images, to_numpy_array, valid_images, ) from ...utils import TensorType, is_torch_available, is_torch_tensor, logging if is_torch_available(): import torch lowercase : Tuple = logging.get_logger(__name__) class A ( UpperCAmelCase_ ): __magic_name__ = ['''pixel_values'''] def __init__( self , SCREAMING_SNAKE_CASE = True , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = PILImageResampling.BILINEAR , SCREAMING_SNAKE_CASE = True , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = True , SCREAMING_SNAKE_CASE = 1 / 255 , SCREAMING_SNAKE_CASE = True , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = None , **SCREAMING_SNAKE_CASE , ) -> None: """simple docstring""" super().__init__(**SCREAMING_SNAKE_CASE ) A : Any = size if size is not None else {'''shortest_edge''': 256} A : Dict = get_size_dict(SCREAMING_SNAKE_CASE , default_to_square=SCREAMING_SNAKE_CASE ) A : Optional[int] = crop_size if crop_size is not None else {'''height''': 224, '''width''': 224} A : int = get_size_dict(SCREAMING_SNAKE_CASE , param_name='''crop_size''' ) A : Optional[Any] = do_resize A : List[str] = size A : str = resample A : Dict = do_center_crop A : List[str] = crop_size A : List[Any] = do_rescale A : List[Any] = rescale_factor A : Optional[Any] = do_normalize A : str = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN A : int = image_std if image_std is not None else IMAGENET_STANDARD_STD def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = PILImageResampling.BICUBIC , SCREAMING_SNAKE_CASE = None , **SCREAMING_SNAKE_CASE , ) -> np.ndarray: """simple docstring""" A : Tuple = get_size_dict(SCREAMING_SNAKE_CASE , default_to_square=SCREAMING_SNAKE_CASE ) if "shortest_edge" not in size: raise ValueError(F'The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}' ) A : List[str] = get_resize_output_image_size(SCREAMING_SNAKE_CASE , size=size['''shortest_edge'''] , default_to_square=SCREAMING_SNAKE_CASE ) return resize(SCREAMING_SNAKE_CASE , size=SCREAMING_SNAKE_CASE , resample=SCREAMING_SNAKE_CASE , data_format=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ) def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = None , **SCREAMING_SNAKE_CASE , ) -> np.ndarray: """simple docstring""" A : str = get_size_dict(SCREAMING_SNAKE_CASE ) if "height" not in size or "width" not in size: raise ValueError(F'The `size` parameter must contain the keys `height` and `width`. Got {size.keys()}' ) return center_crop(SCREAMING_SNAKE_CASE , size=(size['''height'''], size['''width''']) , data_format=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ) def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = None , **SCREAMING_SNAKE_CASE ) -> np.ndarray: """simple docstring""" return rescale(SCREAMING_SNAKE_CASE , scale=SCREAMING_SNAKE_CASE , data_format=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ) def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = None , **SCREAMING_SNAKE_CASE , ) -> np.ndarray: """simple docstring""" return normalize(SCREAMING_SNAKE_CASE , mean=SCREAMING_SNAKE_CASE , std=SCREAMING_SNAKE_CASE , data_format=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ) def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = ChannelDimension.FIRST , **SCREAMING_SNAKE_CASE , ) -> Optional[int]: """simple docstring""" A : Dict = do_resize if do_resize is not None else self.do_resize A : Any = size if size is not None else self.size A : Dict = get_size_dict(SCREAMING_SNAKE_CASE , default_to_square=SCREAMING_SNAKE_CASE ) A : str = resample if resample is not None else self.resample A : int = do_center_crop if do_center_crop is not None else self.do_center_crop A : Optional[int] = crop_size if crop_size is not None else self.crop_size A : int = get_size_dict(SCREAMING_SNAKE_CASE , param_name='''crop_size''' ) A : Dict = do_rescale if do_rescale is not None else self.do_rescale A : Union[str, Any] = rescale_factor if rescale_factor is not None else self.rescale_factor A : str = do_normalize if do_normalize is not None else self.do_normalize A : Optional[int] = image_mean if image_mean is not None else self.image_mean A : int = image_std if image_std is not None else self.image_std A : Any = make_list_of_images(SCREAMING_SNAKE_CASE ) if not valid_images(SCREAMING_SNAKE_CASE ): raise ValueError( '''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, ''' '''torch.Tensor, tf.Tensor or jax.ndarray.''' ) if do_resize and size is None: raise ValueError('''Size must be specified if do_resize is True.''' ) if do_center_crop and crop_size is None: raise ValueError('''Crop size must be specified if do_center_crop is True.''' ) if do_rescale and rescale_factor is None: raise ValueError('''Rescale factor must be specified if do_rescale is True.''' ) if do_normalize and (image_mean is None or image_std is None): raise ValueError('''Image mean and std must be specified if do_normalize is True.''' ) # All transformations expect numpy arrays. A : int = [to_numpy_array(SCREAMING_SNAKE_CASE ) for image in images] if do_resize: A : Any = [self.resize(image=SCREAMING_SNAKE_CASE , size=SCREAMING_SNAKE_CASE , resample=SCREAMING_SNAKE_CASE ) for image in images] if do_center_crop: A : str = [self.center_crop(image=SCREAMING_SNAKE_CASE , size=SCREAMING_SNAKE_CASE ) for image in images] if do_rescale: A : Optional[int] = [self.rescale(image=SCREAMING_SNAKE_CASE , scale=SCREAMING_SNAKE_CASE ) for image in images] if do_normalize: A : List[Any] = [self.normalize(image=SCREAMING_SNAKE_CASE , mean=SCREAMING_SNAKE_CASE , std=SCREAMING_SNAKE_CASE ) for image in images] A : str = [to_channel_dimension_format(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) for image in images] A : Union[str, Any] = {'''pixel_values''': images} return BatchFeature(data=SCREAMING_SNAKE_CASE , tensor_type=SCREAMING_SNAKE_CASE ) def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = None ) -> Optional[int]: """simple docstring""" A : List[Any] = outputs.logits # Resize logits and compute semantic segmentation maps if target_sizes is not None: if len(SCREAMING_SNAKE_CASE ) != len(SCREAMING_SNAKE_CASE ): raise ValueError( '''Make sure that you pass in as many target sizes as the batch dimension of the logits''' ) if is_torch_tensor(SCREAMING_SNAKE_CASE ): A : Any = target_sizes.numpy() A : Optional[int] = [] for idx in range(len(SCREAMING_SNAKE_CASE ) ): A : Dict = torch.nn.functional.interpolate( logits[idx].unsqueeze(dim=0 ) , size=target_sizes[idx] , mode='''bilinear''' , align_corners=SCREAMING_SNAKE_CASE ) A : List[Any] = resized_logits[0].argmax(dim=0 ) semantic_segmentation.append(SCREAMING_SNAKE_CASE ) else: A : Optional[Any] = logits.argmax(dim=1 ) A : Optional[int] = [semantic_segmentation[i] for i in range(semantic_segmentation.shape[0] )] return semantic_segmentation
3
'''simple docstring''' import unittest from transformers import PegasusConfig, PegasusTokenizer, is_flax_available from transformers.testing_utils import require_flax, slow from ...test_configuration_common import ConfigTester from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor if is_flax_available(): import os # The slow tests are often failing with OOM error on GPU # This makes JAX allocate exactly what is needed on demand, and deallocate memory that is no longer needed # but will be slower as stated here https://jax.readthedocs.io/en/latest/gpu_memory_allocation.html a__ : List[str] = 'platform' import jax import jax.numpy as jnp import numpy as np from transformers import FlaxPegasusForConditionalGeneration, FlaxPegasusModel @require_flax class UpperCAmelCase__ : __SCREAMING_SNAKE_CASE = PegasusConfig __SCREAMING_SNAKE_CASE = {} __SCREAMING_SNAKE_CASE = '''gelu''' def __init__( self , lowercase , lowercase=1_3 , lowercase=7 , lowercase=True , lowercase=False , lowercase=9_9 , lowercase=3_2 , lowercase=5 , lowercase=4 , lowercase=3_7 , lowercase=0.1 , lowercase=0.1 , lowercase=2_0 , lowercase=2 , lowercase=1 , lowercase=0 , ) -> Optional[Any]: __UpperCamelCase = parent __UpperCamelCase = batch_size __UpperCamelCase = seq_length __UpperCamelCase = is_training __UpperCamelCase = use_labels __UpperCamelCase = vocab_size __UpperCamelCase = hidden_size __UpperCamelCase = num_hidden_layers __UpperCamelCase = num_attention_heads __UpperCamelCase = intermediate_size __UpperCamelCase = hidden_dropout_prob __UpperCamelCase = attention_probs_dropout_prob __UpperCamelCase = max_position_embeddings __UpperCamelCase = eos_token_id __UpperCamelCase = pad_token_id __UpperCamelCase = bos_token_id def __lowerCamelCase ( self ) -> str: __UpperCamelCase = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size ).clip(3 , self.vocab_size ) __UpperCamelCase = np.expand_dims(np.array([self.eos_token_id] * self.batch_size ) , 1 ) __UpperCamelCase = np.concatenate([input_ids, eos_tensor] , axis=1 ) __UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) __UpperCamelCase = self.config_cls( vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , ) __UpperCamelCase = prepare_pegasus_inputs_dict(lowercase , lowercase , lowercase ) return config, inputs_dict def __lowerCamelCase ( self , lowercase , lowercase , lowercase ) -> Dict: __UpperCamelCase = 2_0 __UpperCamelCase = model_class_name(lowercase ) __UpperCamelCase = model.encode(inputs_dict["""input_ids"""] ) __UpperCamelCase , __UpperCamelCase = ( inputs_dict["""decoder_input_ids"""], inputs_dict["""decoder_attention_mask"""], ) __UpperCamelCase = model.init_cache(decoder_input_ids.shape[0] , lowercase , lowercase ) __UpperCamelCase = jnp.ones((decoder_input_ids.shape[0], max_decoder_length) , dtype="""i4""" ) __UpperCamelCase = jnp.broadcast_to( jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , ) __UpperCamelCase = model.decode( decoder_input_ids[:, :-1] , lowercase , decoder_attention_mask=lowercase , past_key_values=lowercase , decoder_position_ids=lowercase , ) __UpperCamelCase = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype="""i4""" ) __UpperCamelCase = model.decode( decoder_input_ids[:, -1:] , lowercase , decoder_attention_mask=lowercase , past_key_values=outputs_cache.past_key_values , decoder_position_ids=lowercase , ) __UpperCamelCase = model.decode(lowercase , lowercase ) __UpperCamelCase = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) ) self.parent.assertTrue(diff < 1E-3 , msg=f"Max diff is {diff}" ) def __lowerCamelCase ( self , lowercase , lowercase , lowercase ) -> Any: __UpperCamelCase = 2_0 __UpperCamelCase = model_class_name(lowercase ) __UpperCamelCase = model.encode(inputs_dict["""input_ids"""] ) __UpperCamelCase , __UpperCamelCase = ( inputs_dict["""decoder_input_ids"""], inputs_dict["""decoder_attention_mask"""], ) __UpperCamelCase = jnp.concatenate( [ decoder_attention_mask, jnp.zeros((decoder_attention_mask.shape[0], max_decoder_length - decoder_attention_mask.shape[1]) ), ] , axis=-1 , ) __UpperCamelCase = model.init_cache(decoder_input_ids.shape[0] , lowercase , lowercase ) __UpperCamelCase = jnp.broadcast_to( jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , ) __UpperCamelCase = model.decode( decoder_input_ids[:, :-1] , lowercase , decoder_attention_mask=lowercase , past_key_values=lowercase , decoder_position_ids=lowercase , ) __UpperCamelCase = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype="""i4""" ) __UpperCamelCase = model.decode( decoder_input_ids[:, -1:] , lowercase , past_key_values=outputs_cache.past_key_values , decoder_attention_mask=lowercase , decoder_position_ids=lowercase , ) __UpperCamelCase = model.decode(lowercase , lowercase , decoder_attention_mask=lowercase ) __UpperCamelCase = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) ) self.parent.assertTrue(diff < 1E-3 , msg=f"Max diff is {diff}" ) def _lowercase ( __A ,__A ,__A ,__A=None ,__A=None ,): '''simple docstring''' if attention_mask is None: __UpperCamelCase = np.not_equal(__A ,config.pad_token_id ).astype(np.inta ) if decoder_attention_mask is None: __UpperCamelCase = np.concatenate( [ np.ones(decoder_input_ids[:, :1].shape ,dtype=np.inta ), np.not_equal(decoder_input_ids[:, 1:] ,config.pad_token_id ).astype(np.inta ), ] ,axis=-1 ,) return { "input_ids": input_ids, "decoder_input_ids": decoder_input_ids, "attention_mask": attention_mask, "decoder_attention_mask": decoder_attention_mask, } @require_flax class UpperCAmelCase__ ( UpperCAmelCase_ , unittest.TestCase): __SCREAMING_SNAKE_CASE = ( ( FlaxPegasusForConditionalGeneration, FlaxPegasusModel, ) if is_flax_available() else () ) __SCREAMING_SNAKE_CASE = (FlaxPegasusForConditionalGeneration,) if is_flax_available() else () __SCREAMING_SNAKE_CASE = True __SCREAMING_SNAKE_CASE = False __SCREAMING_SNAKE_CASE = False __SCREAMING_SNAKE_CASE = False def __lowerCamelCase ( self ) -> Optional[Any]: __UpperCamelCase = FlaxPegasusModelTester(self ) __UpperCamelCase = ConfigTester(self , config_class=lowercase ) def __lowerCamelCase ( self ) -> List[Any]: self.config_tester.run_common_tests() def __lowerCamelCase ( self ) -> List[Any]: __UpperCamelCase , __UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: self.model_tester.check_use_cache_forward(lowercase , lowercase , lowercase ) def __lowerCamelCase ( self ) -> List[Any]: __UpperCamelCase , __UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: self.model_tester.check_use_cache_forward_with_attn_mask(lowercase , lowercase , lowercase ) def __lowerCamelCase ( self ) -> List[str]: __UpperCamelCase , __UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: with self.subTest(model_class.__name__ ): __UpperCamelCase = self._prepare_for_class(lowercase , lowercase ) __UpperCamelCase = model_class(lowercase ) @jax.jit def encode_jitted(lowercase , lowercase=None , **lowercase ): return model.encode(input_ids=lowercase , attention_mask=lowercase ) with self.subTest("""JIT Enabled""" ): __UpperCamelCase = encode_jitted(**lowercase ).to_tuple() with self.subTest("""JIT Disabled""" ): with jax.disable_jit(): __UpperCamelCase = encode_jitted(**lowercase ).to_tuple() self.assertEqual(len(lowercase ) , len(lowercase ) ) for jitted_output, output in zip(lowercase , lowercase ): self.assertEqual(jitted_output.shape , output.shape ) def __lowerCamelCase ( self ) -> List[Any]: __UpperCamelCase , __UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: with self.subTest(model_class.__name__ ): __UpperCamelCase = model_class(lowercase ) __UpperCamelCase = model.encode(inputs_dict["""input_ids"""] , inputs_dict["""attention_mask"""] ) __UpperCamelCase = { """decoder_input_ids""": inputs_dict["""decoder_input_ids"""], """decoder_attention_mask""": inputs_dict["""decoder_attention_mask"""], """encoder_outputs""": encoder_outputs, } @jax.jit def decode_jitted(lowercase , lowercase , lowercase ): return model.decode( decoder_input_ids=lowercase , decoder_attention_mask=lowercase , encoder_outputs=lowercase , ) with self.subTest("""JIT Enabled""" ): __UpperCamelCase = decode_jitted(**lowercase ).to_tuple() with self.subTest("""JIT Disabled""" ): with jax.disable_jit(): __UpperCamelCase = decode_jitted(**lowercase ).to_tuple() self.assertEqual(len(lowercase ) , len(lowercase ) ) for jitted_output, output in zip(lowercase , lowercase ): self.assertEqual(jitted_output.shape , output.shape ) @slow def __lowerCamelCase ( self ) -> Dict: for model_class_name in self.all_model_classes: __UpperCamelCase = model_class_name.from_pretrained("""google/pegasus-large""" , from_pt=lowercase ) __UpperCamelCase = np.ones((1, 1) ) __UpperCamelCase = model(lowercase ) self.assertIsNotNone(lowercase ) @slow def __lowerCamelCase ( self ) -> str: __UpperCamelCase = FlaxPegasusForConditionalGeneration.from_pretrained("""google/pegasus-xsum""" ) __UpperCamelCase = PegasusTokenizer.from_pretrained("""google/pegasus-xsum""" ) __UpperCamelCase = [ """ PG&E stated it scheduled the blackouts in response to forecasts for high winds amid dry conditions. The aim is to reduce the risk of wildfires. Nearly 800 thousand customers were scheduled to be affected by the shutoffs which were expected to last through at least midday tomorrow.""", """ The London trio are up for best UK act and best album, as well as getting two nominations in the best song category.\"We got told like this morning 'Oh I think you're nominated'\", said Dappy.\"And I was like 'Oh yeah, which one?' And now we've got nominated for four awards. I mean, wow!\"Bandmate Fazer added: \"We thought it's best of us to come down and mingle with everyone and say hello to the cameras. And now we find we've got four nominations.\"The band have two shots at the best song prize, getting the nod for their Tynchy Stryder collaboration Number One, and single Strong Again.Their album Uncle B will also go up against records by the likes of Beyonce and Kanye West.N-Dubz picked up the best newcomer Mobo in 2007, but female member Tulisa said they wouldn't be too disappointed if they didn't win this time around.\"At the end of the day we're grateful to be where we are in our careers.\"If it don't happen then it don't happen - live to fight another day and keep on making albums and hits for the fans.\"Dappy also revealed they could be performing live several times on the night.The group will be doing Number One and also a possible rendition of the War Child single, I Got Soul.The charity song is a re-working of The Killers' All These Things That I've Done and is set to feature artists like Chipmunk, Ironik and Pixie Lott.This year's Mobos will be held outside of London for the first time, in Glasgow on 30 September.N-Dubz said they were looking forward to performing for their Scottish fans and boasted about their recent shows north of the border.\"We just done Edinburgh the other day,\" said Dappy.\"We smashed up an N-Dubz show over there. We done Aberdeen about three or four months ago - we smashed up that show over there! Everywhere we go we smash it up!\" """, ] __UpperCamelCase = [ """California's largest electricity provider has turned off power to hundreds of thousands of customers.""", """Pop group N-Dubz have revealed they were surprised to get four nominations for this year's Mobo Awards.""", ] __UpperCamelCase = tokenizer(lowercase , return_tensors="""np""" , truncation=lowercase , max_length=5_1_2 , padding=lowercase ) __UpperCamelCase = model.generate(**lowercase , num_beams=2 ).sequences __UpperCamelCase = tokenizer.batch_decode(lowercase , skip_special_tokens=lowercase ) assert tgt_text == decoded
349
0
from __future__ import annotations from collections import deque from collections.abc import Iterator from dataclasses import dataclass @dataclass class a_ : """simple docstring""" __UpperCAmelCase = 42 __UpperCAmelCase = 42 class a_ : """simple docstring""" def __init__( self : List[str] ,snake_case : int ): SCREAMING_SNAKE_CASE =[[] for _ in range(snake_case )] SCREAMING_SNAKE_CASE =size def __getitem__( self : Dict ,snake_case : Any ): return iter(self._graph[vertex] ) @property def _lowerCAmelCase ( self : Optional[Any] ): return self._size def _lowerCAmelCase ( self : Union[str, Any] ,snake_case : List[Any] ,snake_case : str ,snake_case : Optional[int] ): if weight not in (0, 1): raise ValueError('Edge weight must be either 0 or 1.' ) if to_vertex < 0 or to_vertex >= self.size: raise ValueError('Vertex indexes must be in [0; size).' ) self._graph[from_vertex].append(Edge(snake_case ,snake_case ) ) def _lowerCAmelCase ( self : str ,snake_case : List[Any] ,snake_case : int ): SCREAMING_SNAKE_CASE =deque([start_vertex] ) SCREAMING_SNAKE_CASE =[None] * self.size SCREAMING_SNAKE_CASE =0 while queue: SCREAMING_SNAKE_CASE =queue.popleft() SCREAMING_SNAKE_CASE =distances[current_vertex] if current_distance is None: continue for edge in self[current_vertex]: SCREAMING_SNAKE_CASE =current_distance + edge.weight SCREAMING_SNAKE_CASE =distances[edge.destination_vertex] if ( isinstance(snake_case ,snake_case ) and new_distance >= dest_vertex_distance ): continue SCREAMING_SNAKE_CASE =new_distance if edge.weight == 0: queue.appendleft(edge.destination_vertex ) else: queue.append(edge.destination_vertex ) if distances[finish_vertex] is None: raise ValueError('No path from start_vertex to finish_vertex.' ) return distances[finish_vertex] if __name__ == "__main__": import doctest doctest.testmod()
334
'''simple docstring''' import pytest a__ : List[str] = '__dummy_dataset1__' a__ : Optional[int] = '\nimport json\nimport os\n\nimport datasets\n\n\nREPO_URL = "https://huggingface.co/datasets/albertvillanova/tests-raw-jsonl/resolve/main/"\nURLS = {"train": REPO_URL + "wikiann-bn-train.jsonl", "validation": REPO_URL + "wikiann-bn-validation.jsonl"}\n\n\nclass __DummyDataset1__(datasets.GeneratorBasedBuilder):\n\n def _info(self):\n features = datasets.Features(\n {\n "tokens": datasets.Sequence(datasets.Value("string")),\n "ner_tags": datasets.Sequence(\n datasets.features.ClassLabel(\n names=[\n "O",\n "B-PER",\n "I-PER",\n "B-ORG",\n "I-ORG",\n "B-LOC",\n "I-LOC",\n ]\n )\n ),\n "langs": datasets.Sequence(datasets.Value("string")),\n "spans": datasets.Sequence(datasets.Value("string")),\n }\n )\n return datasets.DatasetInfo(features=features)\n\n def _split_generators(self, dl_manager):\n dl_path = dl_manager.download(URLS)\n return [\n datasets.SplitGenerator(datasets.Split.TRAIN, gen_kwargs={"filepath": dl_path["train"]}),\n datasets.SplitGenerator(datasets.Split.VALIDATION, gen_kwargs={"filepath": dl_path["validation"]}),\n ]\n\n def _generate_examples(self, filepath):\n with open(filepath, "r", encoding="utf-8") as f:\n for i, line in enumerate(f):\n yield i, json.loads(line)\n' @pytest.fixture def _lowercase ( ): '''simple docstring''' return DATASET_LOADING_SCRIPT_NAME @pytest.fixture def _lowercase ( ): '''simple docstring''' return DATASET_LOADING_SCRIPT_CODE @pytest.fixture def _lowercase ( __A ,__A ,__A ): '''simple docstring''' __UpperCamelCase = dataset_loading_script_name __UpperCamelCase = tmp_path / """datasets""" / script_name script_dir.mkdir(parents=__A ) __UpperCamelCase = script_dir / f"{script_name}.py" with open(__A ,"""w""" ) as f: f.write(__A ) return str(__A )
349
0
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available, is_vision_available, ) __A : Optional[int] = { 'configuration_efficientformer': [ 'EFFICIENTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP', 'EfficientFormerConfig', ] } try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __A : Union[str, Any] = ['EfficientFormerImageProcessor'] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __A : List[str] = [ 'EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST', 'EfficientFormerForImageClassification', 'EfficientFormerForImageClassificationWithTeacher', 'EfficientFormerModel', 'EfficientFormerPreTrainedModel', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __A : Dict = [ 'TF_EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST', 'TFEfficientFormerForImageClassification', 'TFEfficientFormerForImageClassificationWithTeacher', 'TFEfficientFormerModel', 'TFEfficientFormerPreTrainedModel', ] if TYPE_CHECKING: from .configuration_efficientformer import EFFICIENTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, EfficientFormerConfig try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .image_processing_efficientformer import EfficientFormerImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_efficientformer import ( EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, EfficientFormerForImageClassification, EfficientFormerForImageClassificationWithTeacher, EfficientFormerModel, EfficientFormerPreTrainedModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_efficientformer import ( TF_EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, TFEfficientFormerForImageClassification, TFEfficientFormerForImageClassificationWithTeacher, TFEfficientFormerModel, TFEfficientFormerPreTrainedModel, ) else: import sys __A : List[Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
273
'''simple docstring''' import argparse import json import os import fairseq import torch from fairseq.data import Dictionary from transformers import ( WavaVecaConfig, WavaVecaCTCTokenizer, WavaVecaFeatureExtractor, WavaVecaForCTC, WavaVecaForPreTraining, WavaVecaProcessor, logging, ) from transformers.models.wavaveca.modeling_wavaveca import WavaVecaForSequenceClassification logging.set_verbosity_info() a__ : Any = logging.get_logger(__name__) a__ : Optional[int] = { 'post_extract_proj': 'feature_projection.projection', 'encoder.pos_conv.0': 'encoder.pos_conv_embed.conv', 'self_attn.k_proj': 'encoder.layers.*.attention.k_proj', 'self_attn.v_proj': 'encoder.layers.*.attention.v_proj', 'self_attn.q_proj': 'encoder.layers.*.attention.q_proj', 'self_attn.out_proj': 'encoder.layers.*.attention.out_proj', 'self_attn_layer_norm': 'encoder.layers.*.layer_norm', 'fc1': 'encoder.layers.*.feed_forward.intermediate_dense', 'fc2': 'encoder.layers.*.feed_forward.output_dense', 'final_layer_norm': 'encoder.layers.*.final_layer_norm', 'encoder.layer_norm': 'encoder.layer_norm', 'adapter_layer': 'encoder.layers.*.adapter_layer', 'w2v_model.layer_norm': 'feature_projection.layer_norm', 'quantizer.weight_proj': 'quantizer.weight_proj', 'quantizer.vars': 'quantizer.codevectors', 'project_q': 'project_q', 'final_proj': 'project_hid', 'w2v_encoder.proj': 'lm_head', 'mask_emb': 'masked_spec_embed', 'pooling_layer.linear': 'projector', 'pooling_layer.projection': 'classifier', } a__ : List[str] = [ 'lm_head', 'quantizer.weight_proj', 'quantizer.codevectors', 'project_q', 'project_hid', 'projector', 'classifier', ] def _lowercase ( __A ): '''simple docstring''' __UpperCamelCase = {} with open(__A ,"""r""" ) as file: for line_number, line in enumerate(__A ): __UpperCamelCase = line.strip() if line: __UpperCamelCase = line.split() __UpperCamelCase = line_number __UpperCamelCase = words[0] __UpperCamelCase = value return result def _lowercase ( __A ,__A ,__A ,__A ,__A ): '''simple docstring''' for attribute in key.split(""".""" ): __UpperCamelCase = getattr(__A ,__A ) __UpperCamelCase = None for param_key in PARAM_MAPPING.keys(): if full_name.endswith(__A ): __UpperCamelCase = PARAM_MAPPING[full_name.split(""".""" )[-1]] __UpperCamelCase = """param""" if weight_type is not None and weight_type != "param": __UpperCamelCase = getattr(__A ,__A ).shape elif weight_type is not None and weight_type == "param": __UpperCamelCase = hf_pointer for attribute in hf_param_name.split(""".""" ): __UpperCamelCase = getattr(__A ,__A ) __UpperCamelCase = shape_pointer.shape # let's reduce dimension __UpperCamelCase = value[0] else: __UpperCamelCase = hf_pointer.shape if hf_shape != value.shape: raise ValueError( f"Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be" f" {value.shape} for {full_name}" ) if weight_type == "weight": __UpperCamelCase = value elif weight_type == "weight_g": __UpperCamelCase = value elif weight_type == "weight_v": __UpperCamelCase = value elif weight_type == "bias": __UpperCamelCase = value elif weight_type == "param": for attribute in hf_param_name.split(""".""" ): __UpperCamelCase = getattr(__A ,__A ) __UpperCamelCase = value else: __UpperCamelCase = value logger.info(f"{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}." ) def _lowercase ( __A ,__A ,__A ,__A ,__A ): '''simple docstring''' __UpperCamelCase = None for param_key in PARAM_MAPPING.keys(): if full_name.endswith(__A ): __UpperCamelCase = PARAM_MAPPING[full_name.split(""".""" )[-1]] __UpperCamelCase = """param""" if weight_type is not None and weight_type != "param": __UpperCamelCase = """.""".join([key, weight_type] ) elif weight_type is not None and weight_type == "param": __UpperCamelCase = """.""".join([key, hf_param_name] ) else: __UpperCamelCase = key __UpperCamelCase = value if """lm_head""" in full_key else value[0] a__ : Dict = { 'W_a': 'linear_1.weight', 'W_b': 'linear_2.weight', 'b_a': 'linear_1.bias', 'b_b': 'linear_2.bias', 'ln_W': 'norm.weight', 'ln_b': 'norm.bias', } def _lowercase ( __A ,__A ,__A=None ,__A=None ): '''simple docstring''' __UpperCamelCase = False for key, mapped_key in MAPPING.items(): __UpperCamelCase = """wav2vec2.""" + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key if key in name or key.split("""w2v_model.""" )[-1] == name.split(""".""" )[0]: __UpperCamelCase = True if "*" in mapped_key: __UpperCamelCase = name.split(__A )[0].split(""".""" )[-2] __UpperCamelCase = mapped_key.replace("""*""" ,__A ) if "weight_g" in name: __UpperCamelCase = """weight_g""" elif "weight_v" in name: __UpperCamelCase = """weight_v""" elif "bias" in name: __UpperCamelCase = """bias""" elif "weight" in name: # TODO: don't match quantizer.weight_proj __UpperCamelCase = """weight""" else: __UpperCamelCase = None if hf_dict is not None: rename_dict(__A ,__A ,__A ,__A ,__A ) else: set_recursively(__A ,__A ,__A ,__A ,__A ) return is_used return is_used def _lowercase ( __A ,__A ,__A ): '''simple docstring''' __UpperCamelCase = [] __UpperCamelCase = fairseq_model.state_dict() __UpperCamelCase = hf_model.wavaveca.feature_extractor for name, value in fairseq_dict.items(): __UpperCamelCase = False if "conv_layers" in name: load_conv_layer( __A ,__A ,__A ,__A ,hf_model.config.feat_extract_norm == """group""" ,) __UpperCamelCase = True else: __UpperCamelCase = load_wavaveca_layer(__A ,__A ,__A ) if not is_used: unused_weights.append(__A ) logger.warning(f"Unused weights: {unused_weights}" ) def _lowercase ( __A ,__A ,__A ,__A ,__A ): '''simple docstring''' __UpperCamelCase = full_name.split("""conv_layers.""" )[-1] __UpperCamelCase = name.split(""".""" ) __UpperCamelCase = int(items[0] ) __UpperCamelCase = int(items[1] ) if type_id == 0: if "bias" in name: if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape: raise ValueError( f"{full_name} has size {value.shape}, but" f" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found." ) __UpperCamelCase = value logger.info(f"Feat extract conv layer {layer_id} was initialized from {full_name}." ) elif "weight" in name: if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape: raise ValueError( f"{full_name} has size {value.shape}, but" f" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found." ) __UpperCamelCase = value logger.info(f"Feat extract conv layer {layer_id} was initialized from {full_name}." ) elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm): if "bias" in name: if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape: raise ValueError( f"{full_name} has size {value.shape}, but" f" {feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape} was found." ) __UpperCamelCase = value logger.info(f"Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}." ) elif "weight" in name: if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape: raise ValueError( f"{full_name} has size {value.shape}, but" f" {feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape} was found." ) __UpperCamelCase = value logger.info(f"Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}." ) else: unused_weights.append(__A ) @torch.no_grad() def _lowercase ( __A ,__A ,__A=None ,__A=None ,__A=True ,__A=False ): '''simple docstring''' if config_path is not None: __UpperCamelCase = WavaVecaConfig.from_pretrained(__A ) else: __UpperCamelCase = WavaVecaConfig() if is_seq_class: __UpperCamelCase = read_txt_into_dict(__A ) __UpperCamelCase = idalabel __UpperCamelCase = WavaVecaForSequenceClassification(__A ) __UpperCamelCase = WavaVecaFeatureExtractor( feature_size=1 ,sampling_rate=16_000 ,padding_value=0 ,do_normalize=__A ,return_attention_mask=__A ,) feature_extractor.save_pretrained(__A ) elif is_finetuned: if dict_path: __UpperCamelCase = Dictionary.load(__A ) # important change bos & pad token id since CTC symbol is <pad> and # not <s> as in fairseq __UpperCamelCase = target_dict.pad_index __UpperCamelCase = target_dict.bos_index __UpperCamelCase = target_dict.eos_index __UpperCamelCase = len(target_dict.symbols ) __UpperCamelCase = os.path.join(__A ,"""vocab.json""" ) if not os.path.isdir(__A ): logger.error("""--pytorch_dump_folder_path ({}) should be a directory""".format(__A ) ) return os.makedirs(__A ,exist_ok=__A ) __UpperCamelCase = target_dict.indices # fairseq has the <pad> and <s> switched __UpperCamelCase = 0 __UpperCamelCase = 1 with open(__A ,"""w""" ,encoding="""utf-8""" ) as vocab_handle: json.dump(__A ,__A ) __UpperCamelCase = WavaVecaCTCTokenizer( __A ,unk_token=target_dict.unk_word ,pad_token=target_dict.pad_word ,bos_token=target_dict.bos_word ,eos_token=target_dict.eos_word ,word_delimiter_token="""|""" ,do_lower_case=__A ,) __UpperCamelCase = True if config.feat_extract_norm == """layer""" else False __UpperCamelCase = WavaVecaFeatureExtractor( feature_size=1 ,sampling_rate=16_000 ,padding_value=0 ,do_normalize=__A ,return_attention_mask=__A ,) __UpperCamelCase = WavaVecaProcessor(feature_extractor=__A ,tokenizer=__A ) processor.save_pretrained(__A ) __UpperCamelCase = WavaVecaForCTC(__A ) else: __UpperCamelCase = WavaVecaForPreTraining(__A ) if is_finetuned or is_seq_class: __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = fairseq.checkpoint_utils.load_model_ensemble_and_task( [checkpoint_path] ,arg_overrides={"""data""": """/""".join(dict_path.split("""/""" )[:-1] )} ) else: __UpperCamelCase = argparse.Namespace(task="""audio_pretraining""" ) __UpperCamelCase = fairseq.tasks.setup_task(__A ) __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] ,task=__A ) __UpperCamelCase = model[0].eval() recursively_load_weights(__A ,__A ,not is_finetuned ) hf_wavavec.save_pretrained(__A ) if __name__ == "__main__": a__ : int = argparse.ArgumentParser() parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.') parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to fairseq checkpoint') parser.add_argument('--dict_path', default=None, type=str, help='Path to dict of fine-tuned model') parser.add_argument('--config_path', default=None, type=str, help='Path to hf config.json of model to convert') parser.add_argument( '--not_finetuned', action='store_true', help='Whether the model to convert is a fine-tuned model or not' ) parser.add_argument( '--is_seq_class', action='store_true', help='Whether the model to convert is a fine-tuned sequence classification model or not', ) a__ : Optional[int] = parser.parse_args() a__ : str = not args.not_finetuned and not args.is_seq_class convert_wavaveca_checkpoint( args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, is_finetuned, args.is_seq_class, )
349
0