code
stringlengths
82
54.1k
code_codestyle
int64
0
699
style_context
stringlengths
111
35.6k
style_context_codestyle
int64
0
699
label
int64
0
1
# limitations under the License. from typing import Optional, Tuple, Union import torch from diffusers import DiffusionPipeline, ImagePipelineOutput class __SCREAMING_SNAKE_CASE ( lowerCAmelCase_ ): def __init__( self : Optional[int] , A : List[Any] , A : Tuple ) ->List[str]: super().__init__() self.register_modules(unet=__lowerCAmelCase , scheduler=__lowerCAmelCase ) @torch.no_grad() def __call__( self : Dict , A : Tuple = 1 , A : Optional[int] = None , A : List[str] = 5_0 , A : Any = "pil" , A : Union[str, Any] = True , **A : Union[str, Any] , ) ->List[Any]: lowerCamelCase__ : List[str] = torch.randn( (batch_size, self.unet.config.in_channels, self.unet.config.sample_size, self.unet.config.sample_size) , generator=__lowerCAmelCase , ) lowerCamelCase__ : Union[str, Any] = image.to(self.device ) # set step values self.scheduler.set_timesteps(__lowerCAmelCase ) for t in self.progress_bar(self.scheduler.timesteps ): # 1. predict noise model_output lowerCamelCase__ : Union[str, Any] = self.unet(__lowerCAmelCase , __lowerCAmelCase ).sample # 2. predict previous mean of image x_t-1 and add variance depending on eta # eta corresponds to η in paper and should be between [0, 1] # do x_t -> x_t-1 lowerCamelCase__ : List[str] = self.scheduler.step(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ).prev_sample lowerCamelCase__ : Optional[Any] = (image / 2 + 0.5).clamp(0 , 1 ) lowerCamelCase__ : int = image.cpu().permute(0 , 2 , 3 , 1 ).numpy() if output_type == "pil": lowerCamelCase__ : Dict = self.numpy_to_pil(__lowerCAmelCase ) if not return_dict: return (image,), "This is a local test" return ImagePipelineOutput(images=__lowerCAmelCase ), "This is a local test"
315
import warnings from ...processing_utils import ProcessorMixin from ...tokenization_utils_base import BatchEncoding class lowerCamelCase_ ( lowerCamelCase ): a__ = ['''image_processor''', '''tokenizer'''] a__ = '''ChineseCLIPImageProcessor''' a__ = ('''BertTokenizer''', '''BertTokenizerFast''') def __init__( self , __lowerCAmelCase=None , __lowerCAmelCase=None , **__lowerCAmelCase ): """simple docstring""" __magic_name__ :Tuple = None if "feature_extractor" in kwargs: warnings.warn( '''The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`''' ''' instead.''' , __lowerCAmelCase , ) __magic_name__ :Optional[Any] = kwargs.pop('''feature_extractor''' ) __magic_name__ :Tuple = image_processor if image_processor is not None else feature_extractor if image_processor is None: raise ValueError('''You need to specify an `image_processor`.''' ) if tokenizer is None: raise ValueError('''You need to specify a `tokenizer`.''' ) super().__init__(__lowerCAmelCase , __lowerCAmelCase ) __magic_name__ :List[Any] = self.image_processor def __call__( self , __lowerCAmelCase=None , __lowerCAmelCase=None , __lowerCAmelCase=None , **__lowerCAmelCase ): """simple docstring""" if text is None and images is None: raise ValueError('''You have to specify either text or images. Both cannot be none.''' ) if text is not None: __magic_name__ :int = self.tokenizer(__lowerCAmelCase , return_tensors=__lowerCAmelCase , **__lowerCAmelCase ) if images is not None: __magic_name__ :Dict = self.image_processor(__lowerCAmelCase , return_tensors=__lowerCAmelCase , **__lowerCAmelCase ) if text is not None and images is not None: __magic_name__ :Union[str, Any] = image_features.pixel_values return encoding elif text is not None: return encoding else: return BatchEncoding(data=dict(**__lowerCAmelCase ) , tensor_type=__lowerCAmelCase ) def A ( self , *__lowerCAmelCase , **__lowerCAmelCase ): """simple docstring""" return self.tokenizer.batch_decode(*__lowerCAmelCase , **__lowerCAmelCase ) def A ( self , *__lowerCAmelCase , **__lowerCAmelCase ): """simple docstring""" return self.tokenizer.decode(*__lowerCAmelCase , **__lowerCAmelCase ) @property def A ( self ): """simple docstring""" __magic_name__ :List[Any] = self.tokenizer.model_input_names __magic_name__ :Any = self.image_processor.model_input_names return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) ) @property def A ( self ): """simple docstring""" warnings.warn( '''`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.''' , __lowerCAmelCase , ) return self.image_processor_class
0
0
'''simple docstring''' import functools import operator from ...configuration_utils import PretrainedConfig from ...utils import logging UpperCamelCase_ = logging.get_logger(__name__) UpperCamelCase_ = { """asapp/sew-tiny-100k""": """https://huggingface.co/asapp/sew-tiny-100k/resolve/main/config.json""", # See all SEW models at https://huggingface.co/models?filter=sew } class _SCREAMING_SNAKE_CASE( _SCREAMING_SNAKE_CASE ): A_ : Dict = 'sew' def __init__( self : Tuple , UpperCamelCase_ : Optional[int]=32 , UpperCamelCase_ : Dict=7_68 , UpperCamelCase_ : Optional[Any]=12 , UpperCamelCase_ : List[Any]=12 , UpperCamelCase_ : List[Any]=30_72 , UpperCamelCase_ : List[Any]=2 , UpperCamelCase_ : Optional[Any]="gelu" , UpperCamelCase_ : Dict=0.1 , UpperCamelCase_ : Dict=0.1 , UpperCamelCase_ : List[Any]=0.1 , UpperCamelCase_ : int=0.0 , UpperCamelCase_ : str=0.1 , UpperCamelCase_ : Optional[int]=0.1 , UpperCamelCase_ : List[str]=0.02 , UpperCamelCase_ : Optional[int]=1e-5 , UpperCamelCase_ : str="group" , UpperCamelCase_ : int="gelu" , UpperCamelCase_ : Union[str, Any]=(64, 1_28, 1_28, 1_28, 1_28, 2_56, 2_56, 2_56, 2_56, 5_12, 5_12, 5_12, 5_12) , UpperCamelCase_ : Any=(5, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1) , UpperCamelCase_ : List[Any]=(10, 3, 1, 3, 1, 3, 1, 3, 1, 2, 1, 2, 1) , UpperCamelCase_ : Tuple=False , UpperCamelCase_ : Any=1_28 , UpperCamelCase_ : int=16 , UpperCamelCase_ : List[Any]=True , UpperCamelCase_ : Optional[int]=0.05 , UpperCamelCase_ : str=10 , UpperCamelCase_ : Optional[Any]=2 , UpperCamelCase_ : Union[str, Any]=0.0 , UpperCamelCase_ : List[Any]=10 , UpperCamelCase_ : Dict=0 , UpperCamelCase_ : Optional[Any]="mean" , UpperCamelCase_ : Union[str, Any]=False , UpperCamelCase_ : Any=False , UpperCamelCase_ : Any=2_56 , UpperCamelCase_ : Optional[Any]=0 , UpperCamelCase_ : Dict=1 , UpperCamelCase_ : List[Any]=2 , **UpperCamelCase_ : int , ) -> Union[str, Any]: super().__init__(**__lowerCAmelCase , pad_token_id=__lowerCAmelCase , bos_token_id=__lowerCAmelCase , eos_token_id=__lowerCAmelCase ) SCREAMING_SNAKE_CASE__ :Optional[Any] = hidden_size SCREAMING_SNAKE_CASE__ :str = feat_extract_norm SCREAMING_SNAKE_CASE__ :str = feat_extract_activation SCREAMING_SNAKE_CASE__ :Tuple = list(__lowerCAmelCase ) SCREAMING_SNAKE_CASE__ :int = list(__lowerCAmelCase ) SCREAMING_SNAKE_CASE__ :Any = list(__lowerCAmelCase ) SCREAMING_SNAKE_CASE__ :Tuple = conv_bias SCREAMING_SNAKE_CASE__ :Union[str, Any] = num_conv_pos_embeddings SCREAMING_SNAKE_CASE__ :str = num_conv_pos_embedding_groups SCREAMING_SNAKE_CASE__ :List[Any] = len(self.conv_dim ) SCREAMING_SNAKE_CASE__ :Dict = num_hidden_layers SCREAMING_SNAKE_CASE__ :Union[str, Any] = intermediate_size SCREAMING_SNAKE_CASE__ :List[Any] = squeeze_factor SCREAMING_SNAKE_CASE__ :Tuple = hidden_act SCREAMING_SNAKE_CASE__ :int = num_attention_heads SCREAMING_SNAKE_CASE__ :Union[str, Any] = hidden_dropout SCREAMING_SNAKE_CASE__ :Dict = attention_dropout SCREAMING_SNAKE_CASE__ :Any = activation_dropout SCREAMING_SNAKE_CASE__ :Union[str, Any] = feat_proj_dropout SCREAMING_SNAKE_CASE__ :Union[str, Any] = final_dropout SCREAMING_SNAKE_CASE__ :Optional[Any] = layerdrop SCREAMING_SNAKE_CASE__ :List[Any] = layer_norm_eps SCREAMING_SNAKE_CASE__ :Union[str, Any] = initializer_range SCREAMING_SNAKE_CASE__ :List[str] = vocab_size if ( (len(self.conv_stride ) != self.num_feat_extract_layers) or (len(self.conv_kernel ) != self.num_feat_extract_layers) or (len(self.conv_dim ) != self.num_feat_extract_layers) ): raise ValueError( 'Configuration for convolutional layers is incorrect.' 'It is required that `len(config.conv_dim)` == `len(config.conv_stride)` == `len(config.conv_kernel)`,' f'''but is `len(config.conv_dim) = {len(self.conv_dim )}`, `len(config.conv_stride)''' f'''= {len(self.conv_stride )}`, `len(config.conv_kernel) = {len(self.conv_kernel )}`.''' ) # fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779 SCREAMING_SNAKE_CASE__ :Any = apply_spec_augment SCREAMING_SNAKE_CASE__ :Union[str, Any] = mask_time_prob SCREAMING_SNAKE_CASE__ :Union[str, Any] = mask_time_length SCREAMING_SNAKE_CASE__ :str = mask_time_min_masks SCREAMING_SNAKE_CASE__ :Optional[int] = mask_feature_prob SCREAMING_SNAKE_CASE__ :Tuple = mask_feature_length SCREAMING_SNAKE_CASE__ :List[Any] = mask_feature_min_masks # ctc loss SCREAMING_SNAKE_CASE__ :Any = ctc_loss_reduction SCREAMING_SNAKE_CASE__ :Optional[int] = ctc_zero_infinity # sequence classification SCREAMING_SNAKE_CASE__ :Optional[Any] = use_weighted_layer_sum SCREAMING_SNAKE_CASE__ :List[Any] = classifier_proj_size @property def __lowerCamelCase ( self : Dict ) -> int: return functools.reduce(operator.mul , self.conv_stride , 1 )
209
from sklearn.metrics import matthews_corrcoef import datasets SCREAMING_SNAKE_CASE__ : Optional[Any] = """ Compute the Matthews correlation coefficient (MCC) The Matthews correlation coefficient is used in machine learning as a measure of the quality of binary and multiclass classifications. It takes into account true and false positives and negatives and is generally regarded as a balanced measure which can be used even if the classes are of very different sizes. The MCC is in essence a correlation coefficient value between -1 and +1. A coefficient of +1 represents a perfect prediction, 0 an average random prediction and -1 an inverse prediction. The statistic is also known as the phi coefficient. [source: Wikipedia] """ SCREAMING_SNAKE_CASE__ : Union[str, Any] = """ Args: predictions (list of int): Predicted labels, as returned by a model. references (list of int): Ground truth labels. sample_weight (list of int, float, or bool): Sample weights. Defaults to `None`. Returns: matthews_correlation (dict containing float): Matthews correlation. Examples: Example 1, a basic example with only predictions and references as inputs: >>> matthews_metric = datasets.load_metric(\"matthews_correlation\") >>> results = matthews_metric.compute(references=[1, 3, 2, 0, 3, 2], ... predictions=[1, 2, 2, 0, 3, 3]) >>> print(round(results['matthews_correlation'], 2)) 0.54 Example 2, the same example as above, but also including sample weights: >>> matthews_metric = datasets.load_metric(\"matthews_correlation\") >>> results = matthews_metric.compute(references=[1, 3, 2, 0, 3, 2], ... predictions=[1, 2, 2, 0, 3, 3], ... sample_weight=[0.5, 3, 1, 1, 1, 2]) >>> print(round(results['matthews_correlation'], 2)) 0.1 Example 3, the same example as above, but with sample weights that cause a negative correlation: >>> matthews_metric = datasets.load_metric(\"matthews_correlation\") >>> results = matthews_metric.compute(references=[1, 3, 2, 0, 3, 2], ... predictions=[1, 2, 2, 0, 3, 3], ... sample_weight=[0.5, 1, 0, 0, 0, 1]) >>> print(round(results['matthews_correlation'], 2)) -0.25 """ SCREAMING_SNAKE_CASE__ : int = """\ @article{scikit-learn, title={Scikit-learn: Machine Learning in {P}ython}, author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V. and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P. and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.}, journal={Journal of Machine Learning Research}, volume={12}, pages={2825--2830}, year={2011} } """ @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class lowerCamelCase_ ( datasets.Metric ): def A ( self ): """simple docstring""" return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { '''predictions''': datasets.Value('''int32''' ), '''references''': datasets.Value('''int32''' ), } ) , reference_urls=[ '''https://scikit-learn.org/stable/modules/generated/sklearn.metrics.matthews_corrcoef.html''' ] , ) def A ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase=None ): """simple docstring""" return { "matthews_correlation": float(matthews_corrcoef(__lowerCAmelCase , __lowerCAmelCase , sample_weight=__lowerCAmelCase ) ), }
0
0
import math import random from typing import Any from .hill_climbing import SearchProblem def __UpperCamelCase (lowerCAmelCase : Union[str, Any], lowerCAmelCase : List[str] = True, lowerCAmelCase : str = math.inf, lowerCAmelCase : Optional[Any] = -math.inf, lowerCAmelCase : Optional[int] = math.inf, lowerCAmelCase : str = -math.inf, lowerCAmelCase : int = False, lowerCAmelCase : Any = 100, lowerCAmelCase : Tuple = 0.01, lowerCAmelCase : Optional[int] = 1, ) -> int: A = False A = search_prob A = start_temperate A = [] A = 0 A = None while not search_end: A = current_state.score() if best_state is None or current_score > best_state.score(): A = current_state scores.append(lowerCAmelCase ) iterations += 1 A = None A = current_state.get_neighbors() while ( next_state is None and neighbors ): # till we do not find a neighbor that we can move to A = random.randint(0, len(lowerCAmelCase ) - 1 ) # picking a random neighbor A = neighbors.pop(lowerCAmelCase ) A = picked_neighbor.score() - current_score if ( picked_neighbor.x > max_x or picked_neighbor.x < min_x or picked_neighbor.y > max_y or picked_neighbor.y < min_y ): continue # neighbor outside our bounds if not find_max: A = change * -1 # in case we are finding minimum if change > 0: # improves the solution A = picked_neighbor else: A = (math.e) ** ( change / current_temp ) # probability generation function if random.random() < probability: # random number within probability A = picked_neighbor A = current_temp - (current_temp * rate_of_decrease) if current_temp < threshold_temp or next_state is None: # temperature below threshold, or could not find a suitable neighbor A = True else: A = next_state if visualization: from matplotlib import pyplot as plt plt.plot(range(lowerCAmelCase ), lowerCAmelCase ) plt.xlabel('Iterations' ) plt.ylabel('Function values' ) plt.show() return best_state if __name__ == "__main__": def __UpperCamelCase (lowerCAmelCase : Tuple, lowerCAmelCase : Any ) -> str: return (x**2) + (y**2) # starting the problem with initial coordinates (12, 47) _UpperCAmelCase = SearchProblem(x=12, y=47, step_size=1, function_to_optimize=test_fa) _UpperCAmelCase = simulated_annealing( prob, find_max=False, max_x=100, min_x=5, max_y=50, min_y=-5, visualization=True ) print( "The minimum score for f(x, y) = x^2 + y^2 with the domain 100 > x > 5 " F'''and 50 > y > - 5 found via hill climbing: {local_min.score()}''' ) # starting the problem with initial coordinates (12, 47) _UpperCAmelCase = SearchProblem(x=12, y=47, step_size=1, function_to_optimize=test_fa) _UpperCAmelCase = simulated_annealing( prob, find_max=True, max_x=100, min_x=5, max_y=50, min_y=-5, visualization=True ) print( "The maximum score for f(x, y) = x^2 + y^2 with the domain 100 > x > 5 " F'''and 50 > y > - 5 found via hill climbing: {local_min.score()}''' ) def __UpperCamelCase (lowerCAmelCase : Optional[Any], lowerCAmelCase : int ) -> List[Any]: return (3 * x**2) - (6 * y) _UpperCAmelCase = SearchProblem(x=3, y=4, step_size=1, function_to_optimize=test_fa) _UpperCAmelCase = simulated_annealing(prob, find_max=False, visualization=True) print( "The minimum score for f(x, y) = 3*x^2 - 6*y found via hill climbing: " F'''{local_min.score()}''' ) _UpperCAmelCase = SearchProblem(x=3, y=4, step_size=1, function_to_optimize=test_fa) _UpperCAmelCase = simulated_annealing(prob, find_max=True, visualization=True) print( "The maximum score for f(x, y) = 3*x^2 - 6*y found via hill climbing: " F'''{local_min.score()}''' )
699
from __future__ import annotations def __lowercase ( snake_case, snake_case ): """simple docstring""" print(f'''Vertex\tShortest Distance from vertex {src}''' ) for i, d in enumerate(snake_case ): print(f'''{i}\t\t{d}''' ) def __lowercase ( snake_case, snake_case, snake_case ): """simple docstring""" for j in range(snake_case ): __magic_name__ , __magic_name__ , __magic_name__ :Tuple = (graph[j][k] for k in ['''src''', '''dst''', '''weight''']) if distance[u] != float('''inf''' ) and distance[u] + w < distance[v]: return True return False def __lowercase ( snake_case, snake_case, snake_case, snake_case ): """simple docstring""" __magic_name__ :List[Any] = [float('''inf''' )] * vertex_count __magic_name__ :Tuple = 0.0 for _ in range(vertex_count - 1 ): for j in range(snake_case ): __magic_name__ , __magic_name__ , __magic_name__ :Dict = (graph[j][k] for k in ['''src''', '''dst''', '''weight''']) if distance[u] != float('''inf''' ) and distance[u] + w < distance[v]: __magic_name__ :Tuple = distance[u] + w __magic_name__ :Tuple = check_negative_cycle(snake_case, snake_case, snake_case ) if negative_cycle_exists: raise Exception('''Negative cycle found''' ) return distance if __name__ == "__main__": import doctest doctest.testmod() SCREAMING_SNAKE_CASE__ : Tuple = int(input("""Enter number of vertices: """).strip()) SCREAMING_SNAKE_CASE__ : Any = int(input("""Enter number of edges: """).strip()) SCREAMING_SNAKE_CASE__ : list[dict[str, int]] = [{} for _ in range(E)] for i in range(E): print("""Edge """, i + 1) SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Dict = ( int(x) for x in input("""Enter source, destination, weight: """).strip().split(""" """) ) SCREAMING_SNAKE_CASE__ : Dict = {"""src""": src, """dst""": dest, """weight""": weight} SCREAMING_SNAKE_CASE__ : List[Any] = int(input("""\nEnter shortest path source:""").strip()) SCREAMING_SNAKE_CASE__ : List[str] = bellman_ford(graph, V, E, source) print_distance(shortest_distance, 0)
0
0
from collections import namedtuple import requests from lxml import html # type: ignore snake_case_ = namedtuple('covid_data', 'cases deaths recovered') def lowerCamelCase__ ( snake_case_ : Optional[int] = "https://www.worldometers.info/coronavirus/" ) -> Union[str, Any]: __snake_case = '''//div[@class = "maincounter-number"]/span/text()''' return covid_data(*html.fromstring(requests.get(snake_case_ ).content ).xpath(snake_case_ ) ) snake_case_ = """Total COVID-19 cases in the world: {} Total deaths due to COVID-19 in the world: {} Total COVID-19 patients recovered in the world: {}""" print(fmt.format(*covid_stats()))
592
from __future__ import annotations import unittest from transformers import RoFormerConfig, is_tf_available from transformers.testing_utils import require_tf, slow from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import ( TFRoFormerForCausalLM, TFRoFormerForMaskedLM, TFRoFormerForMultipleChoice, TFRoFormerForQuestionAnswering, TFRoFormerForSequenceClassification, TFRoFormerForTokenClassification, TFRoFormerModel, ) from transformers.models.roformer.modeling_tf_roformer import ( TFRoFormerSelfAttention, TFRoFormerSinusoidalPositionalEmbedding, ) class lowerCamelCase_ : def __init__( self , __lowerCAmelCase , __lowerCAmelCase=1_3 , __lowerCAmelCase=7 , __lowerCAmelCase=True , __lowerCAmelCase=True , __lowerCAmelCase=True , __lowerCAmelCase=True , __lowerCAmelCase=9_9 , __lowerCAmelCase=3_2 , __lowerCAmelCase=2 , __lowerCAmelCase=4 , __lowerCAmelCase=3_7 , __lowerCAmelCase="gelu" , __lowerCAmelCase=0.1 , __lowerCAmelCase=0.1 , __lowerCAmelCase=5_1_2 , __lowerCAmelCase=1_6 , __lowerCAmelCase=2 , __lowerCAmelCase=0.02 , __lowerCAmelCase=3 , __lowerCAmelCase=4 , __lowerCAmelCase=None , ): """simple docstring""" __magic_name__ :Optional[int] = parent __magic_name__ :List[Any] = 1_3 __magic_name__ :Union[str, Any] = 7 __magic_name__ :Optional[Any] = True __magic_name__ :Tuple = True __magic_name__ :List[str] = True __magic_name__ :List[Any] = True __magic_name__ :int = 9_9 __magic_name__ :Any = 3_2 __magic_name__ :Union[str, Any] = 2 __magic_name__ :List[str] = 4 __magic_name__ :List[Any] = 3_7 __magic_name__ :Tuple = '''gelu''' __magic_name__ :Any = 0.1 __magic_name__ :str = 0.1 __magic_name__ :List[str] = 5_1_2 __magic_name__ :int = 1_6 __magic_name__ :Any = 2 __magic_name__ :List[Any] = 0.02 __magic_name__ :Optional[Any] = 3 __magic_name__ :Tuple = 4 __magic_name__ :Optional[Any] = None def A ( self ): """simple docstring""" __magic_name__ :Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) __magic_name__ :str = None if self.use_input_mask: __magic_name__ :Optional[int] = random_attention_mask([self.batch_size, self.seq_length] ) __magic_name__ :str = None if self.use_token_type_ids: __magic_name__ :List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) __magic_name__ :Union[str, Any] = None __magic_name__ :Tuple = None __magic_name__ :str = None if self.use_labels: __magic_name__ :List[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size ) __magic_name__ :List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) __magic_name__ :List[Any] = ids_tensor([self.batch_size] , self.num_choices ) __magic_name__ :str = RoFormerConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , return_dict=__lowerCAmelCase , ) return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def A ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ): """simple docstring""" __magic_name__ :int = TFRoFormerModel(config=__lowerCAmelCase ) __magic_name__ :Optional[Any] = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids} __magic_name__ :List[str] = [input_ids, input_mask] __magic_name__ :Any = model(__lowerCAmelCase ) __magic_name__ :List[str] = model(__lowerCAmelCase ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def A ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ): """simple docstring""" __magic_name__ :Dict = True __magic_name__ :List[str] = TFRoFormerForCausalLM(config=__lowerCAmelCase ) __magic_name__ :str = { '''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids, } __magic_name__ :Optional[Any] = model(__lowerCAmelCase )['''logits'''] self.parent.assertListEqual( list(prediction_scores.numpy().shape ) , [self.batch_size, self.seq_length, self.vocab_size] ) def A ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ): """simple docstring""" __magic_name__ :Optional[Any] = TFRoFormerForMaskedLM(config=__lowerCAmelCase ) __magic_name__ :Any = { '''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids, } __magic_name__ :Dict = model(__lowerCAmelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def A ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ): """simple docstring""" __magic_name__ :int = self.num_labels __magic_name__ :str = TFRoFormerForSequenceClassification(config=__lowerCAmelCase ) __magic_name__ :Optional[int] = { '''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids, } __magic_name__ :str = model(__lowerCAmelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def A ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ): """simple docstring""" __magic_name__ :Union[str, Any] = self.num_choices __magic_name__ :Tuple = TFRoFormerForMultipleChoice(config=__lowerCAmelCase ) __magic_name__ :int = tf.tile(tf.expand_dims(__lowerCAmelCase , 1 ) , (1, self.num_choices, 1) ) __magic_name__ :Optional[Any] = tf.tile(tf.expand_dims(__lowerCAmelCase , 1 ) , (1, self.num_choices, 1) ) __magic_name__ :Union[str, Any] = tf.tile(tf.expand_dims(__lowerCAmelCase , 1 ) , (1, self.num_choices, 1) ) __magic_name__ :str = { '''input_ids''': multiple_choice_inputs_ids, '''attention_mask''': multiple_choice_input_mask, '''token_type_ids''': multiple_choice_token_type_ids, } __magic_name__ :Tuple = model(__lowerCAmelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) ) def A ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ): """simple docstring""" __magic_name__ :Optional[int] = self.num_labels __magic_name__ :Any = TFRoFormerForTokenClassification(config=__lowerCAmelCase ) __magic_name__ :str = { '''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids, } __magic_name__ :Dict = model(__lowerCAmelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def A ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ): """simple docstring""" __magic_name__ :List[str] = TFRoFormerForQuestionAnswering(config=__lowerCAmelCase ) __magic_name__ :List[str] = { '''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids, } __magic_name__ :Union[str, Any] = model(__lowerCAmelCase ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def A ( self ): """simple docstring""" __magic_name__ :Union[str, Any] = self.prepare_config_and_inputs() ( ( __magic_name__ ) , ( __magic_name__ ) , ( __magic_name__ ) , ( __magic_name__ ) , ( __magic_name__ ) , ( __magic_name__ ) , ( __magic_name__ ) , ) :Union[str, Any] = config_and_inputs __magic_name__ :Optional[Any] = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask} return config, inputs_dict @require_tf class lowerCamelCase_ ( lowerCamelCase , lowerCamelCase , unittest.TestCase ): a__ = ( ( TFRoFormerModel, TFRoFormerForCausalLM, TFRoFormerForMaskedLM, TFRoFormerForQuestionAnswering, TFRoFormerForSequenceClassification, TFRoFormerForTokenClassification, TFRoFormerForMultipleChoice, ) if is_tf_available() else () ) a__ = ( { '''feature-extraction''': TFRoFormerModel, '''fill-mask''': TFRoFormerForMaskedLM, '''question-answering''': TFRoFormerForQuestionAnswering, '''text-classification''': TFRoFormerForSequenceClassification, '''text-generation''': TFRoFormerForCausalLM, '''token-classification''': TFRoFormerForTokenClassification, '''zero-shot''': TFRoFormerForSequenceClassification, } if is_tf_available() else {} ) a__ = False a__ = False def A ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ): """simple docstring""" if pipeline_test_casse_name == "TextGenerationPipelineTests": return True return False def A ( self ): """simple docstring""" __magic_name__ :List[str] = TFRoFormerModelTester(self ) __magic_name__ :List[str] = ConfigTester(self , config_class=__lowerCAmelCase , hidden_size=3_7 ) def A ( self ): """simple docstring""" self.config_tester.run_common_tests() def A ( self ): """simple docstring""" __magic_name__ :Optional[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*__lowerCAmelCase ) def A ( self ): """simple docstring""" __magic_name__ :Union[str, Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_lm(*__lowerCAmelCase ) def A ( self ): """simple docstring""" __magic_name__ :Tuple = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_lm_head(*__lowerCAmelCase ) def A ( self ): """simple docstring""" __magic_name__ :Optional[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_multiple_choice(*__lowerCAmelCase ) def A ( self ): """simple docstring""" __magic_name__ :Tuple = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_question_answering(*__lowerCAmelCase ) def A ( self ): """simple docstring""" __magic_name__ :Dict = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_sequence_classification(*__lowerCAmelCase ) def A ( self ): """simple docstring""" __magic_name__ :Optional[int] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_token_classification(*__lowerCAmelCase ) @slow def A ( self ): """simple docstring""" __magic_name__ :Optional[Any] = TFRoFormerModel.from_pretrained('''junnyu/roformer_chinese_base''' ) self.assertIsNotNone(__lowerCAmelCase ) @require_tf class lowerCamelCase_ ( unittest.TestCase ): @slow def A ( self ): """simple docstring""" __magic_name__ :int = TFRoFormerForMaskedLM.from_pretrained('''junnyu/roformer_chinese_base''' ) __magic_name__ :Dict = tf.constant([[0, 1, 2, 3, 4, 5]] ) __magic_name__ :Optional[Any] = model(__lowerCAmelCase )[0] # TODO Replace vocab size __magic_name__ :int = 5_0_0_0_0 __magic_name__ :Tuple = [1, 6, vocab_size] self.assertEqual(output.shape , __lowerCAmelCase ) print(output[:, :3, :3] ) # TODO Replace values below with what was printed above. __magic_name__ :Any = tf.constant( [ [ [-0.12053341, -1.0264901, 0.29221946], [-1.5133783, 0.197433, 0.15190607], [-5.0135403, -3.900256, -0.84038764], ] ] ) tf.debugging.assert_near(output[:, :3, :3] , __lowerCAmelCase , atol=1E-4 ) @require_tf class lowerCamelCase_ ( unittest.TestCase ): a__ = 1e-4 def A ( self ): """simple docstring""" __magic_name__ :Optional[int] = tf.constant([[4, 1_0]] ) __magic_name__ :Optional[int] = TFRoFormerSinusoidalPositionalEmbedding(num_positions=6 , embedding_dim=6 ) __magic_name__ :Optional[Any] = emba(input_ids.shape ) __magic_name__ :List[str] = tf.constant( [[0.0000, 0.0000, 0.0000, 1.0000, 1.0000, 1.0000], [0.8415, 0.0464, 0.0022, 0.5403, 0.9989, 1.0000]] ) tf.debugging.assert_near(__lowerCAmelCase , __lowerCAmelCase , atol=self.tolerance ) def A ( self ): """simple docstring""" __magic_name__ :Tuple = tf.constant( [ [0.0000, 0.0000, 0.0000, 0.0000, 0.0000], [0.8415, 0.8219, 0.8020, 0.7819, 0.7617], [0.9093, 0.9364, 0.9581, 0.9749, 0.9870], ] ) __magic_name__ :Union[str, Any] = TFRoFormerSinusoidalPositionalEmbedding(num_positions=5_1_2 , embedding_dim=5_1_2 ) emba([2, 1_6, 5_1_2] ) __magic_name__ :Optional[int] = emba.weight[:3, :5] tf.debugging.assert_near(__lowerCAmelCase , __lowerCAmelCase , atol=self.tolerance ) @require_tf class lowerCamelCase_ ( unittest.TestCase ): a__ = 1e-4 def A ( self ): """simple docstring""" # 2,12,16,64 __magic_name__ :int = tf.reshape(tf.range(2 * 1_2 * 1_6 * 6_4 , dtype=tf.floataa ) , shape=(2, 1_2, 1_6, 6_4) ) / 1_0_0 __magic_name__ :str = -tf.reshape(tf.range(2 * 1_2 * 1_6 * 6_4 , dtype=tf.floataa ) , shape=(2, 1_2, 1_6, 6_4) ) / 1_0_0 __magic_name__ :int = TFRoFormerSinusoidalPositionalEmbedding(num_positions=3_2 , embedding_dim=6_4 ) __magic_name__ :List[str] = embed_positions([2, 1_6, 7_6_8] )[None, None, :, :] __magic_name__ , __magic_name__ :Union[str, Any] = TFRoFormerSelfAttention.apply_rotary_position_embeddings( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) __magic_name__ :Tuple = tf.constant( [ [0.0000, 0.0100, 0.0200, 0.0300, 0.0400, 0.0500, 0.0600, 0.0700], [-0.2012, 0.8897, 0.0263, 0.9401, 0.2074, 0.9463, 0.3481, 0.9343], [-1.7057, 0.6271, -1.2145, 1.3897, -0.6303, 1.7647, -0.1173, 1.8985], [-2.1731, -1.6397, -2.7358, 0.2854, -2.1840, 1.7183, -1.3018, 2.4871], [0.2717, -3.6173, -2.9206, -2.1988, -3.6638, 0.3858, -2.9155, 2.2980], [3.9859, -2.1580, -0.7984, -4.4904, -4.1181, -2.0252, -4.4782, 1.1253], ] ) __magic_name__ :List[str] = tf.constant( [ [0.0000, -0.0100, -0.0200, -0.0300, -0.0400, -0.0500, -0.0600, -0.0700], [0.2012, -0.8897, -0.0263, -0.9401, -0.2074, -0.9463, -0.3481, -0.9343], [1.7057, -0.6271, 1.2145, -1.3897, 0.6303, -1.7647, 0.1173, -1.8985], [2.1731, 1.6397, 2.7358, -0.2854, 2.1840, -1.7183, 1.3018, -2.4871], [-0.2717, 3.6173, 2.9206, 2.1988, 3.6638, -0.3858, 2.9155, -2.2980], [-3.9859, 2.1580, 0.7984, 4.4904, 4.1181, 2.0252, 4.4782, -1.1253], ] ) tf.debugging.assert_near(query_layer[0, 0, :6, :8] , __lowerCAmelCase , atol=self.tolerance ) tf.debugging.assert_near(key_layer[0, 0, :6, :8] , __lowerCAmelCase , atol=self.tolerance )
0
0
import argparse import re from pathlib import Path import requests import torch from PIL import Image from torchvision.transforms import CenterCrop, Compose, Normalize, Resize, ToTensor from transformers import ( EfficientFormerConfig, EfficientFormerForImageClassificationWithTeacher, EfficientFormerImageProcessor, ) from transformers.image_utils import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, PILImageResampling def __lowerCAmelCase ( _A ,_A ): """simple docstring""" _lowercase = old_name if "patch_embed" in old_name: _lowercase = old_name.split(""".""" ) if layer == "0": _lowercase = old_name.replace("""0""" ,"""convolution1""" ) elif layer == "1": _lowercase = old_name.replace("""1""" ,"""batchnorm_before""" ) elif layer == "3": _lowercase = old_name.replace("""3""" ,"""convolution2""" ) else: _lowercase = old_name.replace("""4""" ,"""batchnorm_after""" ) if "network" in old_name and re.search(r"""\d\.\d""" ,_A ): _lowercase = R'''\b\d{2}\b''' if bool(re.search(_A ,_A ) ): _lowercase = re.search(r"""\d\.\d\d.""" ,_A ).group() else: _lowercase = re.search(r"""\d\.\d.""" ,_A ).group() if int(match[0] ) < 6: _lowercase = old_name.replace(_A ,"""""" ) _lowercase = trimmed_name.replace("""network""" ,match[0] + """.meta4D_layers.blocks.""" + match[2:-1] ) _lowercase = '''intermediate_stages.''' + trimmed_name else: _lowercase = old_name.replace(_A ,"""""" ) if int(match[2] ) < num_meta4D_last_stage: _lowercase = trimmed_name.replace("""network""" ,"""meta4D_layers.blocks.""" + match[2] ) else: _lowercase = str(int(match[2] ) - num_meta4D_last_stage ) _lowercase = trimmed_name.replace("""network""" ,"""meta3D_layers.blocks.""" + layer_index ) if "norm1" in old_name: _lowercase = trimmed_name.replace("""norm1""" ,"""layernorm1""" ) elif "norm2" in old_name: _lowercase = trimmed_name.replace("""norm2""" ,"""layernorm2""" ) elif "fc1" in old_name: _lowercase = trimmed_name.replace("""fc1""" ,"""linear_in""" ) elif "fc2" in old_name: _lowercase = trimmed_name.replace("""fc2""" ,"""linear_out""" ) _lowercase = '''last_stage.''' + trimmed_name elif "network" in old_name and re.search(r""".\d.""" ,_A ): _lowercase = old_name.replace("""network""" ,"""intermediate_stages""" ) if "fc" in new_name: _lowercase = new_name.replace("""fc""" ,"""convolution""" ) elif ("norm1" in new_name) and ("layernorm1" not in new_name): _lowercase = new_name.replace("""norm1""" ,"""batchnorm_before""" ) elif ("norm2" in new_name) and ("layernorm2" not in new_name): _lowercase = new_name.replace("""norm2""" ,"""batchnorm_after""" ) if "proj" in new_name: _lowercase = new_name.replace("""proj""" ,"""projection""" ) if "dist_head" in new_name: _lowercase = new_name.replace("""dist_head""" ,"""distillation_classifier""" ) elif "head" in new_name: _lowercase = new_name.replace("""head""" ,"""classifier""" ) elif "patch_embed" in new_name: _lowercase = '''efficientformer.''' + new_name elif new_name == "norm.weight" or new_name == "norm.bias": _lowercase = new_name.replace("""norm""" ,"""layernorm""" ) _lowercase = '''efficientformer.''' + new_name else: _lowercase = '''efficientformer.encoder.''' + new_name return new_name def __lowerCAmelCase ( _A ,_A ): """simple docstring""" for key in checkpoint.copy().keys(): _lowercase = checkpoint.pop(_A ) _lowercase = val return checkpoint def __lowerCAmelCase ( ): """simple docstring""" _lowercase = '''http://images.cocodataset.org/val2017/000000039769.jpg''' _lowercase = Image.open(requests.get(_A ,stream=_A ).raw ) return image def __lowerCAmelCase ( _A ,_A ,_A ,_A ): """simple docstring""" _lowercase = torch.load(_A ,map_location="""cpu""" )['''model'''] _lowercase = EfficientFormerConfig.from_json_file(_A ) _lowercase = EfficientFormerForImageClassificationWithTeacher(_A ) _lowercase = '''_'''.join(checkpoint_path.split("""/""" )[-1].split(""".""" )[0].split("""_""" )[:-1] ) _lowercase = config.depths[-1] - config.num_metaad_blocks + 1 _lowercase = convert_torch_checkpoint(_A ,_A ) model.load_state_dict(_A ) model.eval() _lowercase = { '''bilinear''': PILImageResampling.BILINEAR, '''bicubic''': PILImageResampling.BICUBIC, '''nearest''': PILImageResampling.NEAREST, } # prepare image _lowercase = prepare_img() _lowercase = 256 _lowercase = 224 _lowercase = EfficientFormerImageProcessor( size={"""shortest_edge""": image_size} ,crop_size={"""height""": crop_size, """width""": crop_size} ,resample=pillow_resamplings["""bicubic"""] ,) _lowercase = processor(images=_A ,return_tensors="""pt""" ).pixel_values # original processing pipeline _lowercase = Compose( [ Resize(_A ,interpolation=pillow_resamplings["""bicubic"""] ), CenterCrop(_A ), ToTensor(), Normalize(_A ,_A ), ] ) _lowercase = image_transforms(_A ).unsqueeze(0 ) assert torch.allclose(_A ,_A ) _lowercase = model(_A ) _lowercase = outputs.logits _lowercase = (1, 1_000) if "l1" in model_name: _lowercase = torch.Tensor( [-0.1_3_1_2, 0.4_3_5_3, -1.0_4_9_9, -0.5_1_2_4, 0.4_1_8_3, -0.6_7_9_3, -1.3_7_7_7, -0.0_8_9_3, -0.7_3_5_8, -2.4_3_2_8] ) assert torch.allclose(logits[0, :10] ,_A ,atol=1E-3 ) assert logits.shape == expected_shape elif "l3" in model_name: _lowercase = torch.Tensor( [-1.3_1_5_0, -1.5_4_5_6, -1.2_5_5_6, -0.8_4_9_6, -0.7_1_2_7, -0.7_8_9_7, -0.9_7_2_8, -0.3_0_5_2, 0.3_7_5_1, -0.3_1_2_7] ) assert torch.allclose(logits[0, :10] ,_A ,atol=1E-3 ) assert logits.shape == expected_shape elif "l7" in model_name: _lowercase = torch.Tensor( [-1.0_2_8_3, -1.4_1_3_1, -0.5_6_4_4, -1.3_1_1_5, -0.5_7_8_5, -1.2_0_4_9, -0.7_5_2_8, 0.1_9_9_2, -0.3_8_2_2, -0.0_8_7_8] ) assert logits.shape == expected_shape else: raise ValueError( f'''Unknown model checkpoint: {checkpoint_path}. Supported version of efficientformer are l1, l3 and l7''' ) # Save Checkpoints Path(_A ).mkdir(exist_ok=_A ) model.save_pretrained(_A ) print(f'''Checkpoint successfuly converted. Model saved at {pytorch_dump_path}''' ) processor.save_pretrained(_A ) print(f'''Processor successfuly saved at {pytorch_dump_path}''' ) if push_to_hub: print("""Pushing model to the hub...""" ) model.push_to_hub( repo_id=f'''Bearnardd/{pytorch_dump_path}''' ,commit_message="""Add model""" ,use_temp_dir=_A ,) processor.push_to_hub( repo_id=f'''Bearnardd/{pytorch_dump_path}''' ,commit_message="""Add image processor""" ,use_temp_dir=_A ,) if __name__ == "__main__": A_: Any = argparse.ArgumentParser() # Required parameters parser.add_argument( '--pytorch_model_path', default=None, type=str, required=True, help='Path to EfficientFormer pytorch checkpoint.', ) parser.add_argument( '--config_file', default=None, type=str, required=True, help='The json file for EfficientFormer model config.', ) parser.add_argument( '--pytorch_dump_path', default=None, type=str, required=True, help='Path to the output PyTorch model.' ) parser.add_argument('--push_to_hub', action='store_true', help='Push model and image processor to the hub') parser.add_argument( '--no-push_to_hub', dest='push_to_hub', action='store_false', help='Do not push model and image processor to the hub', ) parser.set_defaults(push_to_hub=True) A_: List[str] = parser.parse_args() convert_efficientformer_checkpoint( checkpoint_path=args.pytorch_model_path, efficientformer_config_file=args.config_file, pytorch_dump_path=args.pytorch_dump_path, push_to_hub=args.push_to_hub, )
398
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available SCREAMING_SNAKE_CASE__ : Optional[int] = {"""tokenization_herbert""": ["""HerbertTokenizer"""]} try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: SCREAMING_SNAKE_CASE__ : Optional[Any] = ["""HerbertTokenizerFast"""] if TYPE_CHECKING: from .tokenization_herbert import HerbertTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_herbert_fast import HerbertTokenizerFast else: import sys SCREAMING_SNAKE_CASE__ : Union[str, Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
0
0
import warnings from typing import List, Optional, Union from ...image_utils import ImageInput from ...processing_utils import ProcessorMixin from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy from ...utils import TensorType class A_ ( a_ ): _SCREAMING_SNAKE_CASE = ["""image_processor""", """tokenizer"""] _SCREAMING_SNAKE_CASE = """FlavaImageProcessor""" _SCREAMING_SNAKE_CASE = ("""BertTokenizer""", """BertTokenizerFast""") def __init__( self : Union[str, Any] , __SCREAMING_SNAKE_CASE : List[str]=None , __SCREAMING_SNAKE_CASE : Optional[Any]=None , **__SCREAMING_SNAKE_CASE : int ): __a = None if "feature_extractor" in kwargs: warnings.warn( "The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`" " instead." , __lowerCAmelCase , ) __a = kwargs.pop("feature_extractor" ) __a = image_processor if image_processor is not None else feature_extractor if image_processor is None: raise ValueError("You need to specify an `image_processor`." ) if tokenizer is None: raise ValueError("You need to specify a `tokenizer`." ) super().__init__(__lowerCAmelCase , __lowerCAmelCase ) __a = self.image_processor def __call__( self : Dict , __SCREAMING_SNAKE_CASE : Optional[int] = None , __SCREAMING_SNAKE_CASE : Tuple = None , __SCREAMING_SNAKE_CASE : str = True , __SCREAMING_SNAKE_CASE : str = False , __SCREAMING_SNAKE_CASE : Union[str, Any] = False , __SCREAMING_SNAKE_CASE : Optional[int] = None , __SCREAMING_SNAKE_CASE : Tuple = 0 , __SCREAMING_SNAKE_CASE : List[Any] = None , __SCREAMING_SNAKE_CASE : Tuple = None , __SCREAMING_SNAKE_CASE : Any = None , __SCREAMING_SNAKE_CASE : Dict = None , __SCREAMING_SNAKE_CASE : int = None , __SCREAMING_SNAKE_CASE : Dict = False , __SCREAMING_SNAKE_CASE : Dict = False , __SCREAMING_SNAKE_CASE : str = False , __SCREAMING_SNAKE_CASE : Any = False , __SCREAMING_SNAKE_CASE : Union[str, Any] = True , __SCREAMING_SNAKE_CASE : str = None , **__SCREAMING_SNAKE_CASE : Optional[int] , ): if text is None and images is None: raise ValueError("You have to specify either text or images. Both cannot be none." ) if text is not None: __a = self.tokenizer( text=__lowerCAmelCase , add_special_tokens=__lowerCAmelCase , padding=__lowerCAmelCase , truncation=__lowerCAmelCase , max_length=__lowerCAmelCase , stride=__lowerCAmelCase , pad_to_multiple_of=__lowerCAmelCase , return_token_type_ids=__lowerCAmelCase , return_attention_mask=__lowerCAmelCase , return_overflowing_tokens=__lowerCAmelCase , return_special_tokens_mask=__lowerCAmelCase , return_offsets_mapping=__lowerCAmelCase , return_length=__lowerCAmelCase , verbose=__lowerCAmelCase , return_tensors=__lowerCAmelCase , **__lowerCAmelCase , ) if images is not None: __a = self.image_processor( __lowerCAmelCase , return_image_mask=__lowerCAmelCase , return_codebook_pixels=__lowerCAmelCase , return_tensors=__lowerCAmelCase , **__lowerCAmelCase , ) if text is not None and images is not None: encoding.update(__lowerCAmelCase ) return encoding elif text is not None: return encoding else: return BatchEncoding(data=dict(**__lowerCAmelCase ) , tensor_type=__lowerCAmelCase ) def _UpperCAmelCase ( self : Dict , *__SCREAMING_SNAKE_CASE : Optional[int] , **__SCREAMING_SNAKE_CASE : List[str] ): return self.tokenizer.batch_decode(*__lowerCAmelCase , **__lowerCAmelCase ) def _UpperCAmelCase ( self : Optional[int] , *__SCREAMING_SNAKE_CASE : List[str] , **__SCREAMING_SNAKE_CASE : Any ): return self.tokenizer.decode(*__lowerCAmelCase , **__lowerCAmelCase ) @property def _UpperCAmelCase ( self : int ): __a = self.tokenizer.model_input_names __a = self.image_processor.model_input_names return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) ) @property def _UpperCAmelCase ( self : Union[str, Any] ): warnings.warn( "`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead." , __lowerCAmelCase , ) return self.image_processor_class @property def _UpperCAmelCase ( self : List[Any] ): warnings.warn( "`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead." , __lowerCAmelCase , ) return self.image_processor
197
import argparse import gdown import numpy as np import torch from huggingface_hub import hf_hub_download from transformers import ( CLIPTokenizer, CLIPTokenizerFast, VideoMAEImageProcessor, XCLIPConfig, XCLIPModel, XCLIPProcessor, XCLIPTextConfig, XCLIPVisionConfig, ) def __lowercase ( snake_case, snake_case ): """simple docstring""" __magic_name__ :str = XCLIPTextConfig() # derive patch size from model name __magic_name__ :Union[str, Any] = model_name.find('''patch''' ) __magic_name__ :Optional[Any] = int(model_name[start_idx + len('''patch''' ) : start_idx + len('''patch''' ) + 2] ) __magic_name__ :int = XCLIPVisionConfig(patch_size=snake_case, num_frames=snake_case ) if "large" in model_name: __magic_name__ :Dict = 7_6_8 __magic_name__ :int = 3_0_7_2 __magic_name__ :List[Any] = 1_2 __magic_name__ :str = 1_0_2_4 __magic_name__ :Any = 4_0_9_6 __magic_name__ :Optional[Any] = 1_6 __magic_name__ :Union[str, Any] = 2_4 __magic_name__ :Union[str, Any] = 7_6_8 __magic_name__ :Tuple = 3_0_7_2 if model_name == "xclip-large-patch14-16-frames": __magic_name__ :List[str] = 3_3_6 __magic_name__ :Any = XCLIPConfig.from_text_vision_configs(snake_case, snake_case ) if "large" in model_name: __magic_name__ :str = 7_6_8 return config def __lowercase ( snake_case ): """simple docstring""" if name == "token_embedding.weight": __magic_name__ :Any = name.replace('''token_embedding.weight''', '''text_model.embeddings.token_embedding.weight''' ) if name == "positional_embedding": __magic_name__ :Any = name.replace('''positional_embedding''', '''text_model.embeddings.position_embedding.weight''' ) if "ln_1" in name: __magic_name__ :List[str] = name.replace('''ln_1''', '''layer_norm1''' ) if "ln_2" in name: __magic_name__ :str = name.replace('''ln_2''', '''layer_norm2''' ) if "c_fc" in name: __magic_name__ :List[Any] = name.replace('''c_fc''', '''fc1''' ) if "c_proj" in name: __magic_name__ :Any = name.replace('''c_proj''', '''fc2''' ) if name.startswith('''transformer.resblocks''' ): __magic_name__ :Any = name.replace('''transformer.resblocks''', '''text_model.encoder.layers''' ) if "attn.out_proj" in name and "message" not in name: __magic_name__ :Union[str, Any] = name.replace('''attn.out_proj''', '''self_attn.out_proj''' ) if "ln_final" in name: __magic_name__ :Tuple = name.replace('''ln_final''', '''text_model.final_layer_norm''' ) # visual encoder if name == "visual.class_embedding": __magic_name__ :List[Any] = name.replace('''visual.class_embedding''', '''vision_model.embeddings.class_embedding''' ) if name == "visual.positional_embedding": __magic_name__ :Any = name.replace('''visual.positional_embedding''', '''vision_model.embeddings.position_embedding.weight''' ) if name.startswith('''visual.transformer.resblocks''' ): __magic_name__ :Union[str, Any] = name.replace('''visual.transformer.resblocks''', '''vision_model.encoder.layers''' ) if "visual.conv1" in name: __magic_name__ :Tuple = name.replace('''visual.conv1''', '''vision_model.embeddings.patch_embedding''' ) if "visual.ln_pre" in name: __magic_name__ :Tuple = name.replace('''visual.ln_pre''', '''vision_model.pre_layernorm''' ) if "visual.ln_post" in name: __magic_name__ :Optional[Any] = name.replace('''visual.ln_post''', '''vision_model.post_layernorm''' ) if "visual.proj" in name: __magic_name__ :Tuple = name.replace('''visual.proj''', '''visual_projection.weight''' ) if "text_projection" in name: __magic_name__ :int = name.replace('''text_projection''', '''text_projection.weight''' ) # things on top if "prompts_visual_proj" in name: __magic_name__ :int = name.replace('''prompts_visual_proj''', '''prompts_visual_projection''' ) if "prompts_visual_ln" in name: __magic_name__ :Dict = name.replace('''prompts_visual_ln''', '''prompts_visual_layernorm''' ) # mit if name == "mit.positional_embedding": __magic_name__ :List[Any] = name.replace('''positional''', '''position''' ) if name.startswith('''mit.resblocks''' ): __magic_name__ :Union[str, Any] = name.replace('''mit.resblocks''', '''mit.encoder.layers''' ) # prompts generator if name.startswith('''prompts_generator.norm''' ): __magic_name__ :str = name.replace('''prompts_generator.norm''', '''prompts_generator.layernorm''' ) return name def __lowercase ( snake_case, snake_case ): """simple docstring""" for key in orig_state_dict.copy().keys(): __magic_name__ :Any = orig_state_dict.pop(snake_case ) if "attn.in_proj" in key: __magic_name__ :str = key.split('''.''' ) if key.startswith('''visual''' ): __magic_name__ :List[Any] = key_split[3] __magic_name__ :List[Any] = config.vision_config.hidden_size if "message_attn" in key: if "weight" in key: __magic_name__ :List[Any] = val[ :dim, : ] __magic_name__ :List[str] = val[ dim : dim * 2, : ] __magic_name__ :List[str] = val[ -dim:, : ] else: __magic_name__ :str = val[ :dim ] __magic_name__ :Optional[int] = val[ dim : dim * 2 ] __magic_name__ :Any = val[ -dim: ] else: if "weight" in key: __magic_name__ :int = val[ :dim, : ] __magic_name__ :Union[str, Any] = val[ dim : dim * 2, : ] __magic_name__ :List[Any] = val[ -dim:, : ] else: __magic_name__ :Union[str, Any] = val[:dim] __magic_name__ :str = val[ dim : dim * 2 ] __magic_name__ :Dict = val[-dim:] elif key.startswith('''mit''' ): __magic_name__ :List[Any] = key_split[2] __magic_name__ :Any = config.vision_config.mit_hidden_size if "weight" in key: __magic_name__ :Union[str, Any] = val[:dim, :] __magic_name__ :Optional[int] = val[dim : dim * 2, :] __magic_name__ :int = val[-dim:, :] else: __magic_name__ :Tuple = val[:dim] __magic_name__ :Optional[int] = val[dim : dim * 2] __magic_name__ :Optional[int] = val[-dim:] else: __magic_name__ :Any = key_split[2] __magic_name__ :List[Any] = config.text_config.hidden_size if "weight" in key: __magic_name__ :Union[str, Any] = val[:dim, :] __magic_name__ :Tuple = val[ dim : dim * 2, : ] __magic_name__ :str = val[-dim:, :] else: __magic_name__ :int = val[:dim] __magic_name__ :Any = val[ dim : dim * 2 ] __magic_name__ :str = val[-dim:] else: __magic_name__ :Tuple = rename_key(snake_case ) if new_key_name in ["visual_projection.weight", "text_projection.weight"]: __magic_name__ :List[Any] = val.T __magic_name__ :Optional[Any] = val return orig_state_dict def __lowercase ( snake_case ): """simple docstring""" if num_frames == 8: __magic_name__ :Any = '''eating_spaghetti_8_frames.npy''' elif num_frames == 1_6: __magic_name__ :List[Any] = '''eating_spaghetti.npy''' elif num_frames == 3_2: __magic_name__ :Tuple = '''eating_spaghetti_32_frames.npy''' __magic_name__ :str = hf_hub_download( repo_id='''hf-internal-testing/spaghetti-video''', filename=snake_case, repo_type='''dataset''', ) __magic_name__ :List[Any] = np.load(snake_case ) return list(snake_case ) def __lowercase ( snake_case, snake_case=None, snake_case=False ): """simple docstring""" __magic_name__ :Union[str, Any] = { # fully supervised kinetics-400 checkpoints '''xclip-base-patch32''': '''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_32_8.pth''', '''xclip-base-patch32-16-frames''': ( '''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_32_16.pth''' ), '''xclip-base-patch16''': '''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_16_8.pth''', '''xclip-base-patch16-16-frames''': ( '''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_16_16.pth''' ), '''xclip-large-patch14''': '''https://drive.google.com/u/0/uc?id=1NUOImq0o5DlQTST17iIP3vG7DgmHQuCx&amp;export=download&amp;confirm=t&amp;uuid=b26caedc-88e2-473e-830a-9d158b653cdb''', '''xclip-large-patch14-16-frames''': '''https://drive.google.com/u/0/uc?id=1FOYgnJc097OJ4lGwtRCCydQyVPJEOH7d&amp;export=download&amp;confirm=t&amp;uuid=538fa810-e671-4050-b385-9a623f89804f''', # fully supervised kinetics-600 checkpoints '''xclip-base-patch16-kinetics-600''': ( '''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k600_16_8.pth''' ), '''xclip-base-patch16-kinetics-600-16-frames''': ( '''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k600_16_16.pth''' ), '''xclip-large-patch14-kinetics-600''': '''https://drive.google.com/u/0/uc?id=1FV8C1INuM91sLAN4ImjzePLIlpMSihwV&amp;export=download&amp;confirm=t&amp;uuid=141d4977-4a65-44ae-864f-4b0c19f838be''', # few shot '''xclip-base-patch16-hmdb-2-shot''': ( '''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_2.pth''' ), '''xclip-base-patch16-hmdb-4-shot''': ( '''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_4.pth''' ), '''xclip-base-patch16-hmdb-8-shot''': ( '''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_8.pth''' ), '''xclip-base-patch16-hmdb-16-shot''': ( '''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_16.pth''' ), '''xclip-base-patch16-ucf-2-shot''': ( '''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_2.pth''' ), '''xclip-base-patch16-ucf-4-shot''': ( '''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_4.pth''' ), '''xclip-base-patch16-ucf-8-shot''': ( '''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_8.pth''' ), '''xclip-base-patch16-ucf-16-shot''': ( '''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_16.pth''' ), # zero shot '''xclip-base-patch16-zero-shot''': '''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/zero.pth''', } __magic_name__ :Optional[int] = model_to_url[model_name] __magic_name__ :List[str] = 8 if "16-frames" in model_name: __magic_name__ :List[Any] = 1_6 elif "shot" in model_name: __magic_name__ :Dict = 3_2 __magic_name__ :str = get_xclip_config(snake_case, snake_case ) __magic_name__ :List[Any] = XCLIPModel(snake_case ) model.eval() if "drive" in checkpoint_url: __magic_name__ :Any = '''pytorch_model.bin''' gdown.cached_download(snake_case, snake_case, quiet=snake_case ) __magic_name__ :Optional[Any] = torch.load(snake_case, map_location='''cpu''' )['''model'''] else: __magic_name__ :Optional[int] = torch.hub.load_state_dict_from_url(snake_case )['''model'''] __magic_name__ :List[str] = convert_state_dict(snake_case, snake_case ) __magic_name__ :List[Any] = XCLIPModel(snake_case ) __magic_name__ , __magic_name__ :Optional[Any] = model.load_state_dict(snake_case, strict=snake_case ) assert missing_keys == ["text_model.embeddings.position_ids", "vision_model.embeddings.position_ids"] model.eval() __magic_name__ :str = 3_3_6 if model_name == '''xclip-large-patch14-16-frames''' else 2_2_4 __magic_name__ :Optional[int] = VideoMAEImageProcessor(size=snake_case ) __magic_name__ :Optional[int] = CLIPTokenizer.from_pretrained('''openai/clip-vit-base-patch32''' ) __magic_name__ :Tuple = CLIPTokenizerFast.from_pretrained('''openai/clip-vit-base-patch32''' ) __magic_name__ :Optional[int] = XCLIPProcessor(image_processor=snake_case, tokenizer=snake_case ) __magic_name__ :List[Any] = prepare_video(snake_case ) __magic_name__ :str = processor( text=['''playing sports''', '''eating spaghetti''', '''go shopping'''], videos=snake_case, return_tensors='''pt''', padding=snake_case ) print('''Shape of pixel values:''', inputs.pixel_values.shape ) with torch.no_grad(): __magic_name__ :Tuple = model(**snake_case ) # Verify outputs __magic_name__ :Any = outputs.logits_per_video __magic_name__ :str = logits_per_video.softmax(dim=1 ) print('''Probs:''', snake_case ) # kinetics-400 if model_name == "xclip-base-patch32": __magic_name__ :Dict = torch.tensor([[0.0019, 0.9951, 0.0030]] ) elif model_name == "xclip-base-patch32-16-frames": __magic_name__ :str = torch.tensor([[7.0_9_9_9E-0_4, 9.9_8_8_3E-0_1, 4.5_5_8_0E-0_4]] ) elif model_name == "xclip-base-patch16": __magic_name__ :Tuple = torch.tensor([[0.0083, 0.9681, 0.0236]] ) elif model_name == "xclip-base-patch16-16-frames": __magic_name__ :Tuple = torch.tensor([[7.6_9_3_7E-0_4, 9.9_7_2_8E-0_1, 1.9_4_7_3E-0_3]] ) elif model_name == "xclip-large-patch14": __magic_name__ :str = torch.tensor([[0.0062, 0.9864, 0.0075]] ) elif model_name == "xclip-large-patch14-16-frames": __magic_name__ :Optional[int] = torch.tensor([[3.3_8_7_7E-0_4, 9.9_9_3_7E-0_1, 2.8_8_8_8E-0_4]] ) # kinetics-600 elif model_name == "xclip-base-patch16-kinetics-600": __magic_name__ :Optional[int] = torch.tensor([[0.0555, 0.8914, 0.0531]] ) elif model_name == "xclip-base-patch16-kinetics-600-16-frames": __magic_name__ :List[str] = torch.tensor([[3.8_5_5_4E-0_4, 9.9_9_2_9E-0_1, 3.2_7_5_4E-0_4]] ) elif model_name == "xclip-large-patch14-kinetics-600": __magic_name__ :List[str] = torch.tensor([[0.0036, 0.9920, 0.0045]] ) # few shot elif model_name == "xclip-base-patch16-hmdb-2-shot": __magic_name__ :Tuple = torch.tensor([[7.1_8_9_0E-0_6, 9.9_9_9_4E-0_1, 5.6_5_5_9E-0_5]] ) elif model_name == "xclip-base-patch16-hmdb-4-shot": __magic_name__ :List[str] = torch.tensor([[1.0_3_2_0E-0_5, 9.9_9_9_3E-0_1, 6.2_4_3_5E-0_5]] ) elif model_name == "xclip-base-patch16-hmdb-8-shot": __magic_name__ :Optional[int] = torch.tensor([[4.1_3_7_7E-0_6, 9.9_9_9_0E-0_1, 9.8_3_8_6E-0_5]] ) elif model_name == "xclip-base-patch16-hmdb-16-shot": __magic_name__ :Optional[int] = torch.tensor([[4.1_3_4_7E-0_5, 9.9_9_6_2E-0_1, 3.3_4_1_1E-0_4]] ) elif model_name == "xclip-base-patch16-ucf-2-shot": __magic_name__ :Union[str, Any] = torch.tensor([[8.5_8_5_7E-0_5, 9.9_9_2_8E-0_1, 6.3_2_9_1E-0_4]] ) elif model_name == "xclip-base-patch16-ucf-4-shot": __magic_name__ :Union[str, Any] = torch.tensor([[8.5_8_5_7E-0_5, 9.9_9_2_8E-0_1, 6.3_2_9_1E-0_4]] ) elif model_name == "xclip-base-patch16-ucf-8-shot": __magic_name__ :Optional[int] = torch.tensor([[0.0027, 0.9904, 0.0070]] ) elif model_name == "xclip-base-patch16-ucf-16-shot": __magic_name__ :Any = torch.tensor([[9.8_2_1_9E-0_4, 9.9_5_9_3E-0_1, 3.0_8_6_3E-0_3]] ) # zero shot elif model_name == "xclip-base-patch16-zero-shot": __magic_name__ :Optional[int] = torch.tensor([[3.5_0_8_2E-0_4, 9.9_7_8_5E-0_1, 1.7_9_6_6E-0_3]] ) else: raise ValueError(f'''Model name {model_name} not supported''' ) assert torch.allclose(snake_case, snake_case, atol=1E-3 ) print('''Looks ok!''' ) if pytorch_dump_folder_path is not None: print(f'''Saving model {model_name} to {pytorch_dump_folder_path}''' ) model.save_pretrained(snake_case ) if push_to_hub: print('''Pushing model, processor and slow tokenizer files to the hub...''' ) model.push_to_hub(snake_case, organization='''nielsr''' ) processor.push_to_hub(snake_case, organization='''nielsr''' ) slow_tokenizer.push_to_hub(snake_case, organization='''nielsr''' ) if __name__ == "__main__": SCREAMING_SNAKE_CASE__ : Optional[Any] = argparse.ArgumentParser() # Required parameters parser.add_argument( """--model_name""", default="""xclip-base-patch32""", type=str, help="""Name of the model.""", ) parser.add_argument( """--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory.""" ) parser.add_argument( """--push_to_hub""", action="""store_true""", help="""Whether or not to push the converted model to the 🤗 hub.""" ) SCREAMING_SNAKE_CASE__ : List[Any] = parser.parse_args() convert_xclip_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
0
0
from typing import List, Optional, Tuple, Union import torch from ...models import UNetaDModel from ...schedulers import KarrasVeScheduler from ...utils import randn_tensor from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput class A__ ( __SCREAMING_SNAKE_CASE): _UpperCAmelCase : str = 42 _UpperCAmelCase : int = 42 def __init__( self , __magic_name__ , __magic_name__ ): super().__init__() self.register_modules(unet=__lowerCAmelCase , scheduler=__lowerCAmelCase ) @torch.no_grad() def __call__( self , __magic_name__ = 1 , __magic_name__ = 5_0 , __magic_name__ = None , __magic_name__ = "pil" , __magic_name__ = True , **__magic_name__ , ): lowerCamelCase : List[Any] = self.unet.config.sample_size lowerCamelCase : Tuple = (batch_size, 3, img_size, img_size) lowerCamelCase : str = self.unet # sample x_0 ~ N(0, sigma_0^2 * I) lowerCamelCase : Optional[int] = randn_tensor(__lowerCAmelCase , generator=__lowerCAmelCase , device=self.device ) * self.scheduler.init_noise_sigma self.scheduler.set_timesteps(__lowerCAmelCase ) for t in self.progress_bar(self.scheduler.timesteps ): # here sigma_t == t_i from the paper lowerCamelCase : Tuple = self.scheduler.schedule[t] lowerCamelCase : int = self.scheduler.schedule[t - 1] if t > 0 else 0 # 1. Select temporarily increased noise level sigma_hat # 2. Add new noise to move from sample_i to sample_hat lowerCamelCase : Dict = self.scheduler.add_noise_to_input(__lowerCAmelCase , __lowerCAmelCase , generator=__lowerCAmelCase ) # 3. Predict the noise residual given the noise magnitude `sigma_hat` # The model inputs and output are adjusted by following eq. (213) in [1]. lowerCamelCase : int = (sigma_hat / 2) * model((sample_hat + 1) / 2 , sigma_hat / 2 ).sample # 4. Evaluate dx/dt at sigma_hat # 5. Take Euler step from sigma to sigma_prev lowerCamelCase : List[str] = self.scheduler.step(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) if sigma_prev != 0: # 6. Apply 2nd order correction # The model inputs and output are adjusted by following eq. (213) in [1]. lowerCamelCase : List[Any] = (sigma_prev / 2) * model((step_output.prev_sample + 1) / 2 , sigma_prev / 2 ).sample lowerCamelCase : str = self.scheduler.step_correct( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , step_output.prev_sample , step_output["""derivative"""] , ) lowerCamelCase : Dict = step_output.prev_sample lowerCamelCase : Dict = (sample / 2 + 0.5).clamp(0 , 1 ) lowerCamelCase : str = sample.cpu().permute(0 , 2 , 3 , 1 ).numpy() if output_type == "pil": lowerCamelCase : List[Any] = self.numpy_to_pil(__lowerCAmelCase ) if not return_dict: return (image,) return ImagePipelineOutput(images=__lowerCAmelCase )
681
import numpy as np import torch from torch.utils.data import Dataset from utils import logger class lowerCamelCase_ ( lowerCamelCase ): def __init__( self , __lowerCAmelCase , __lowerCAmelCase ): """simple docstring""" __magic_name__ :Optional[int] = params __magic_name__ :Any = np.array(__lowerCAmelCase ) __magic_name__ :Optional[Any] = np.array([len(__lowerCAmelCase ) for t in data] ) self.check() self.remove_long_sequences() self.remove_empty_sequences() self.remove_unknown_sequences() self.check() self.print_statistics() def __getitem__( self , __lowerCAmelCase ): """simple docstring""" return (self.token_ids[index], self.lengths[index]) def __len__( self ): """simple docstring""" return len(self.lengths ) def A ( self ): """simple docstring""" assert len(self.token_ids ) == len(self.lengths ) assert all(self.lengths[i] == len(self.token_ids[i] ) for i in range(len(self.lengths ) ) ) def A ( self ): """simple docstring""" __magic_name__ :Any = self.params.max_model_input_size __magic_name__ :int = self.lengths > max_len logger.info(F'''Splitting {sum(__lowerCAmelCase )} too long sequences.''' ) def divide_chunks(__lowerCAmelCase , __lowerCAmelCase ): return [l[i : i + n] for i in range(0 , len(__lowerCAmelCase ) , __lowerCAmelCase )] __magic_name__ :Optional[int] = [] __magic_name__ :List[Any] = [] if self.params.mlm: __magic_name__ , __magic_name__ :Optional[Any] = self.params.special_tok_ids['''cls_token'''], self.params.special_tok_ids['''sep_token'''] else: __magic_name__ , __magic_name__ :Tuple = self.params.special_tok_ids['''bos_token'''], self.params.special_tok_ids['''eos_token'''] for seq_, len_ in zip(self.token_ids , self.lengths ): assert (seq_[0] == cls_id) and (seq_[-1] == sep_id), seq_ if len_ <= max_len: new_tok_ids.append(seq_ ) new_lengths.append(len_ ) else: __magic_name__ :int = [] for sub_s in divide_chunks(seq_ , max_len - 2 ): if sub_s[0] != cls_id: __magic_name__ :List[Any] = np.insert(__lowerCAmelCase , 0 , __lowerCAmelCase ) if sub_s[-1] != sep_id: __magic_name__ :Union[str, Any] = np.insert(__lowerCAmelCase , len(__lowerCAmelCase ) , __lowerCAmelCase ) assert len(__lowerCAmelCase ) <= max_len assert (sub_s[0] == cls_id) and (sub_s[-1] == sep_id), sub_s sub_seqs.append(__lowerCAmelCase ) new_tok_ids.extend(__lowerCAmelCase ) new_lengths.extend([len(__lowerCAmelCase ) for l in sub_seqs] ) __magic_name__ :Tuple = np.array(__lowerCAmelCase ) __magic_name__ :Optional[int] = np.array(__lowerCAmelCase ) def A ( self ): """simple docstring""" __magic_name__ :Optional[Any] = len(self ) __magic_name__ :int = self.lengths > 1_1 __magic_name__ :List[str] = self.token_ids[indices] __magic_name__ :Union[str, Any] = self.lengths[indices] __magic_name__ :List[str] = len(self ) logger.info(F'''Remove {init_size - new_size} too short (<=11 tokens) sequences.''' ) def A ( self ): """simple docstring""" if "unk_token" not in self.params.special_tok_ids: return else: __magic_name__ :Tuple = self.params.special_tok_ids['''unk_token'''] __magic_name__ :Dict = len(self ) __magic_name__ :Tuple = np.array([np.count_nonzero(a == unk_token_id ) for a in self.token_ids] ) __magic_name__ :int = (unk_occs / self.lengths) < 0.5 __magic_name__ :str = self.token_ids[indices] __magic_name__ :str = self.lengths[indices] __magic_name__ :Any = len(self ) logger.info(F'''Remove {init_size - new_size} sequences with a high level of unknown tokens (50%).''' ) def A ( self ): """simple docstring""" if not self.params.is_master: return logger.info(F'''{len(self )} sequences''' ) # data_len = sum(self.lengths) # nb_unique_tokens = len(Counter(list(chain(*self.token_ids)))) # logger.info(f'{data_len} tokens ({nb_unique_tokens} unique)') # unk_idx = self.params.special_tok_ids['unk_token'] # nb_unknown = sum([(t==unk_idx).sum() for t in self.token_ids]) # logger.info(f'{nb_unknown} unknown tokens (covering {100*nb_unknown/data_len:.2f}% of the data)') def A ( self , __lowerCAmelCase ): """simple docstring""" __magic_name__ :Optional[Any] = [t[0] for t in batch] __magic_name__ :List[Any] = [t[1] for t in batch] assert len(__lowerCAmelCase ) == len(__lowerCAmelCase ) # Max for paddings __magic_name__ :Tuple = max(__lowerCAmelCase ) # Pad token ids if self.params.mlm: __magic_name__ :Any = self.params.special_tok_ids['''pad_token'''] else: __magic_name__ :str = self.params.special_tok_ids['''unk_token'''] __magic_name__ :Any = [list(t.astype(__lowerCAmelCase ) ) + [pad_idx] * (max_seq_len_ - len(__lowerCAmelCase )) for t in token_ids] assert len(tk_ ) == len(__lowerCAmelCase ) assert all(len(__lowerCAmelCase ) == max_seq_len_ for t in tk_ ) __magic_name__ :Optional[int] = torch.tensor(tk_ ) # (bs, max_seq_len_) __magic_name__ :Optional[int] = torch.tensor(__lowerCAmelCase ) # (bs) return tk_t, lg_t
0
0
'''simple docstring''' from collections.abc import Sequence def snake_case ( snake_case : int = None ) -> str: """simple docstring""" if nums is None or not nums: raise ValueError('Input sequence should not be empty' ) lowerCAmelCase = nums[0] for i in range(1 , len(snake_case ) ): lowerCAmelCase = nums[i] lowerCAmelCase = max(snake_case , ans + num , snake_case ) return ans if __name__ == "__main__": import doctest doctest.testmod() # Try on a sample input from the user _UpperCamelCase : Optional[int] = int(input("Enter number of elements : ").strip()) _UpperCamelCase : Any = list(map(int, input("\nEnter the numbers : ").strip().split()))[:n] print(max_subsequence_sum(array))
284
import os from shutil import copyfile from typing import Any, Dict, List, Optional, Tuple import sentencepiece as spm from ...tokenization_utils import PreTrainedTokenizer from ...utils import logging SCREAMING_SNAKE_CASE__ : str = logging.get_logger(__name__) SCREAMING_SNAKE_CASE__ : Tuple = """▁""" SCREAMING_SNAKE_CASE__ : Union[str, Any] = {"""vocab_file""": """spiece.model"""} SCREAMING_SNAKE_CASE__ : List[Any] = { """vocab_file""": { """google/reformer-crime-and-punishment""": ( """https://huggingface.co/google/reformer-crime-and-punishment/resolve/main/spiece.model""" ) } } SCREAMING_SNAKE_CASE__ : Optional[int] = { """google/reformer-crime-and-punishment""": 52_42_88, } class lowerCamelCase_ ( lowerCamelCase ): a__ = VOCAB_FILES_NAMES a__ = PRETRAINED_VOCAB_FILES_MAP a__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES a__ = ['''input_ids''', '''attention_mask'''] def __init__( self , __lowerCAmelCase , __lowerCAmelCase="</s>" , __lowerCAmelCase="<unk>" , __lowerCAmelCase=[] , __lowerCAmelCase = None , **__lowerCAmelCase , ): """simple docstring""" __magic_name__ :int = {} if sp_model_kwargs is None else sp_model_kwargs super().__init__( eos_token=__lowerCAmelCase , unk_token=__lowerCAmelCase , additional_special_tokens=__lowerCAmelCase , sp_model_kwargs=self.sp_model_kwargs , **__lowerCAmelCase , ) __magic_name__ :Optional[Any] = vocab_file __magic_name__ :int = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(__lowerCAmelCase ) @property def A ( self ): """simple docstring""" return self.sp_model.get_piece_size() def A ( self ): """simple docstring""" __magic_name__ :str = {self.convert_ids_to_tokens(__lowerCAmelCase ): i for i in range(self.vocab_size )} vocab.update(self.added_tokens_encoder ) return vocab def __getstate__( self ): """simple docstring""" __magic_name__ :Optional[Any] = self.__dict__.copy() __magic_name__ :Optional[Any] = None return state def __setstate__( self , __lowerCAmelCase ): """simple docstring""" __magic_name__ :Any = d # for backward compatibility if not hasattr(self , '''sp_model_kwargs''' ): __magic_name__ :Optional[int] = {} __magic_name__ :Union[str, Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(self.vocab_file ) def A ( self , __lowerCAmelCase ): """simple docstring""" return self.sp_model.encode(__lowerCAmelCase , out_type=__lowerCAmelCase ) def A ( self , __lowerCAmelCase ): """simple docstring""" return self.sp_model.piece_to_id(__lowerCAmelCase ) def A ( self , __lowerCAmelCase ): """simple docstring""" if index < self.sp_model.get_piece_size(): __magic_name__ :int = self.sp_model.IdToPiece(__lowerCAmelCase ) return token def A ( self , __lowerCAmelCase ): """simple docstring""" __magic_name__ :Optional[Any] = [] __magic_name__ :Tuple = '''''' for token in tokens: # make sure that special tokens are not decoded using sentencepiece model if token in self.all_special_tokens: out_string += self.sp_model.decode(__lowerCAmelCase ) + token __magic_name__ :Optional[Any] = [] else: current_sub_tokens.append(__lowerCAmelCase ) out_string += self.sp_model.decode(__lowerCAmelCase ) return out_string.strip() def A ( self , __lowerCAmelCase , __lowerCAmelCase = None ): """simple docstring""" if not os.path.isdir(__lowerCAmelCase ): logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' ) return __magic_name__ :Optional[int] = os.path.join( __lowerCAmelCase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(__lowerCAmelCase ) and os.path.isfile(self.vocab_file ): copyfile(self.vocab_file , __lowerCAmelCase ) elif not os.path.isfile(self.vocab_file ): with open(__lowerCAmelCase , '''wb''' ) as fi: __magic_name__ :Dict = self.sp_model.serialized_model_proto() fi.write(__lowerCAmelCase ) return (out_vocab_file,)
0
0
from ...configuration_utils import PretrainedConfig from ...utils import logging snake_case__ : Any = logging.get_logger(__name__) snake_case__ : Union[str, Any] = { """microsoft/markuplm-base""": """https://huggingface.co/microsoft/markuplm-base/resolve/main/config.json""", """microsoft/markuplm-large""": """https://huggingface.co/microsoft/markuplm-large/resolve/main/config.json""", } class SCREAMING_SNAKE_CASE_ (a__ ): '''simple docstring''' _a = "markuplm" def __init__( self : List[Any] , __a : str=30_522 , __a : Tuple=768 , __a : Any=12 , __a : Union[str, Any]=12 , __a : Union[str, Any]=3_072 , __a : Union[str, Any]="gelu" , __a : Optional[int]=0.1 , __a : Optional[Any]=0.1 , __a : Union[str, Any]=512 , __a : Dict=2 , __a : Optional[Any]=0.02 , __a : Union[str, Any]=1e-12 , __a : int=0 , __a : Union[str, Any]=0 , __a : List[Any]=2 , __a : Dict=256 , __a : Optional[Any]=1_024 , __a : Tuple=216 , __a : Optional[int]=1_001 , __a : Union[str, Any]=32 , __a : Optional[int]=50 , __a : Dict="absolute" , __a : Any=True , __a : str=None , **__a : Tuple , ) ->Union[str, Any]: super().__init__( pad_token_id=__lowerCAmelCase , bos_token_id=__lowerCAmelCase , eos_token_id=__lowerCAmelCase , **__lowerCAmelCase , ) lowerCamelCase_ : Optional[Any] = vocab_size lowerCamelCase_ : List[Any] = hidden_size lowerCamelCase_ : Optional[Any] = num_hidden_layers lowerCamelCase_ : int = num_attention_heads lowerCamelCase_ : Optional[Any] = hidden_act lowerCamelCase_ : Tuple = intermediate_size lowerCamelCase_ : List[str] = hidden_dropout_prob lowerCamelCase_ : int = attention_probs_dropout_prob lowerCamelCase_ : Optional[Any] = max_position_embeddings lowerCamelCase_ : Dict = type_vocab_size lowerCamelCase_ : str = initializer_range lowerCamelCase_ : Optional[int] = layer_norm_eps lowerCamelCase_ : Optional[int] = position_embedding_type lowerCamelCase_ : Dict = use_cache lowerCamelCase_ : Union[str, Any] = classifier_dropout # additional properties lowerCamelCase_ : str = max_depth lowerCamelCase_ : List[Any] = max_xpath_tag_unit_embeddings lowerCamelCase_ : Any = max_xpath_subs_unit_embeddings lowerCamelCase_ : Any = tag_pad_id lowerCamelCase_ : Dict = subs_pad_id lowerCamelCase_ : int = xpath_unit_hidden_size
278
import os import unittest from transformers import MobileBertTokenizer, MobileBertTokenizerFast from transformers.models.bert.tokenization_bert import ( VOCAB_FILES_NAMES, BasicTokenizer, WordpieceTokenizer, _is_control, _is_punctuation, _is_whitespace, ) from transformers.testing_utils import require_tokenizers, slow from ...test_tokenization_common import TokenizerTesterMixin, filter_non_english @require_tokenizers class lowerCamelCase_ ( lowerCamelCase , unittest.TestCase ): a__ = MobileBertTokenizer a__ = MobileBertTokenizerFast a__ = True a__ = True a__ = filter_non_english a__ = '''google/mobilebert-uncased''' def A ( self ): """simple docstring""" super().setUp() __magic_name__ :Tuple = [ '''[UNK]''', '''[CLS]''', '''[SEP]''', '''[PAD]''', '''[MASK]''', '''want''', '''##want''', '''##ed''', '''wa''', '''un''', '''runn''', '''##ing''', ''',''', '''low''', '''lowest''', ] __magic_name__ :Dict = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] ) with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer: vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) ) __magic_name__ :List[str] = [ (tokenizer_def[0], self.pre_trained_model_path, tokenizer_def[2]) # else the 'google/' prefix is stripped for tokenizer_def in self.tokenizers_list ] def A ( self , __lowerCAmelCase ): """simple docstring""" __magic_name__ :Union[str, Any] = '''UNwant\u00E9d,running''' __magic_name__ :int = '''unwanted, running''' return input_text, output_text def A ( self ): """simple docstring""" __magic_name__ :Optional[int] = self.tokenizer_class(self.vocab_file ) __magic_name__ :List[Any] = tokenizer.tokenize('''UNwant\u00E9d,running''' ) self.assertListEqual(__lowerCAmelCase , ['''un''', '''##want''', '''##ed''', ''',''', '''runn''', '''##ing'''] ) self.assertListEqual(tokenizer.convert_tokens_to_ids(__lowerCAmelCase ) , [9, 6, 7, 1_2, 1_0, 1_1] ) def A ( self ): """simple docstring""" if not self.test_rust_tokenizer: return __magic_name__ :int = self.get_tokenizer() __magic_name__ :Tuple = self.get_rust_tokenizer() __magic_name__ :List[str] = '''UNwant\u00E9d,running''' __magic_name__ :Optional[Any] = tokenizer.tokenize(__lowerCAmelCase ) __magic_name__ :List[Any] = rust_tokenizer.tokenize(__lowerCAmelCase ) self.assertListEqual(__lowerCAmelCase , __lowerCAmelCase ) __magic_name__ :int = tokenizer.encode(__lowerCAmelCase , add_special_tokens=__lowerCAmelCase ) __magic_name__ :str = rust_tokenizer.encode(__lowerCAmelCase , add_special_tokens=__lowerCAmelCase ) self.assertListEqual(__lowerCAmelCase , __lowerCAmelCase ) __magic_name__ :List[Any] = self.get_rust_tokenizer() __magic_name__ :Any = tokenizer.encode(__lowerCAmelCase ) __magic_name__ :Any = rust_tokenizer.encode(__lowerCAmelCase ) self.assertListEqual(__lowerCAmelCase , __lowerCAmelCase ) # With lower casing __magic_name__ :Any = self.get_tokenizer(do_lower_case=__lowerCAmelCase ) __magic_name__ :List[Any] = self.get_rust_tokenizer(do_lower_case=__lowerCAmelCase ) __magic_name__ :Dict = '''UNwant\u00E9d,running''' __magic_name__ :Tuple = tokenizer.tokenize(__lowerCAmelCase ) __magic_name__ :Union[str, Any] = rust_tokenizer.tokenize(__lowerCAmelCase ) self.assertListEqual(__lowerCAmelCase , __lowerCAmelCase ) __magic_name__ :Optional[Any] = tokenizer.encode(__lowerCAmelCase , add_special_tokens=__lowerCAmelCase ) __magic_name__ :Dict = rust_tokenizer.encode(__lowerCAmelCase , add_special_tokens=__lowerCAmelCase ) self.assertListEqual(__lowerCAmelCase , __lowerCAmelCase ) __magic_name__ :Tuple = self.get_rust_tokenizer() __magic_name__ :Dict = tokenizer.encode(__lowerCAmelCase ) __magic_name__ :List[Any] = rust_tokenizer.encode(__lowerCAmelCase ) self.assertListEqual(__lowerCAmelCase , __lowerCAmelCase ) def A ( self ): """simple docstring""" __magic_name__ :Optional[int] = BasicTokenizer() self.assertListEqual(tokenizer.tokenize('''ah\u535A\u63A8zz''' ) , ['''ah''', '''\u535A''', '''\u63A8''', '''zz'''] ) def A ( self ): """simple docstring""" __magic_name__ :List[Any] = BasicTokenizer(do_lower_case=__lowerCAmelCase ) self.assertListEqual( tokenizer.tokenize(''' \tHeLLo!how \n Are yoU? ''' ) , ['''hello''', '''!''', '''how''', '''are''', '''you''', '''?'''] ) self.assertListEqual(tokenizer.tokenize('''H\u00E9llo''' ) , ['''hello'''] ) def A ( self ): """simple docstring""" __magic_name__ :Union[str, Any] = BasicTokenizer(do_lower_case=__lowerCAmelCase , strip_accents=__lowerCAmelCase ) self.assertListEqual( tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''hällo''', '''!''', '''how''', '''are''', '''you''', '''?'''] ) self.assertListEqual(tokenizer.tokenize('''H\u00E9llo''' ) , ['''h\u00E9llo'''] ) def A ( self ): """simple docstring""" __magic_name__ :Dict = BasicTokenizer(do_lower_case=__lowerCAmelCase , strip_accents=__lowerCAmelCase ) self.assertListEqual( tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''hallo''', '''!''', '''how''', '''are''', '''you''', '''?'''] ) self.assertListEqual(tokenizer.tokenize('''H\u00E9llo''' ) , ['''hello'''] ) def A ( self ): """simple docstring""" __magic_name__ :Optional[int] = BasicTokenizer(do_lower_case=__lowerCAmelCase ) self.assertListEqual( tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''hallo''', '''!''', '''how''', '''are''', '''you''', '''?'''] ) self.assertListEqual(tokenizer.tokenize('''H\u00E9llo''' ) , ['''hello'''] ) def A ( self ): """simple docstring""" __magic_name__ :List[str] = BasicTokenizer(do_lower_case=__lowerCAmelCase ) self.assertListEqual( tokenizer.tokenize(''' \tHeLLo!how \n Are yoU? ''' ) , ['''HeLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?'''] ) def A ( self ): """simple docstring""" __magic_name__ :int = BasicTokenizer(do_lower_case=__lowerCAmelCase , strip_accents=__lowerCAmelCase ) self.assertListEqual( tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''HäLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?'''] ) def A ( self ): """simple docstring""" __magic_name__ :Optional[int] = BasicTokenizer(do_lower_case=__lowerCAmelCase , strip_accents=__lowerCAmelCase ) self.assertListEqual( tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''HaLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?'''] ) def A ( self ): """simple docstring""" __magic_name__ :Optional[Any] = BasicTokenizer(do_lower_case=__lowerCAmelCase , never_split=['''[UNK]'''] ) self.assertListEqual( tokenizer.tokenize(''' \tHeLLo!how \n Are yoU? [UNK]''' ) , ['''HeLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?''', '''[UNK]'''] ) def A ( self ): """simple docstring""" __magic_name__ :int = ['''[UNK]''', '''[CLS]''', '''[SEP]''', '''want''', '''##want''', '''##ed''', '''wa''', '''un''', '''runn''', '''##ing'''] __magic_name__ :Union[str, Any] = {} for i, token in enumerate(__lowerCAmelCase ): __magic_name__ :Tuple = i __magic_name__ :List[Any] = WordpieceTokenizer(vocab=__lowerCAmelCase , unk_token='''[UNK]''' ) self.assertListEqual(tokenizer.tokenize('''''' ) , [] ) self.assertListEqual(tokenizer.tokenize('''unwanted running''' ) , ['''un''', '''##want''', '''##ed''', '''runn''', '''##ing'''] ) self.assertListEqual(tokenizer.tokenize('''unwantedX running''' ) , ['''[UNK]''', '''runn''', '''##ing'''] ) def A ( self ): """simple docstring""" self.assertTrue(_is_whitespace(''' ''' ) ) self.assertTrue(_is_whitespace('''\t''' ) ) self.assertTrue(_is_whitespace('''\r''' ) ) self.assertTrue(_is_whitespace('''\n''' ) ) self.assertTrue(_is_whitespace('''\u00A0''' ) ) self.assertFalse(_is_whitespace('''A''' ) ) self.assertFalse(_is_whitespace('''-''' ) ) def A ( self ): """simple docstring""" self.assertTrue(_is_control('''\u0005''' ) ) self.assertFalse(_is_control('''A''' ) ) self.assertFalse(_is_control(''' ''' ) ) self.assertFalse(_is_control('''\t''' ) ) self.assertFalse(_is_control('''\r''' ) ) def A ( self ): """simple docstring""" self.assertTrue(_is_punctuation('''-''' ) ) self.assertTrue(_is_punctuation('''$''' ) ) self.assertTrue(_is_punctuation('''`''' ) ) self.assertTrue(_is_punctuation('''.''' ) ) self.assertFalse(_is_punctuation('''A''' ) ) self.assertFalse(_is_punctuation(''' ''' ) ) def A ( self ): """simple docstring""" __magic_name__ :Any = self.get_tokenizer() __magic_name__ :Any = self.get_rust_tokenizer() # Example taken from the issue https://github.com/huggingface/tokenizers/issues/340 self.assertListEqual([tokenizer.tokenize(__lowerCAmelCase ) for t in ['''Test''', '''\xad''', '''test''']] , [['''[UNK]'''], [], ['''[UNK]''']] ) self.assertListEqual( [rust_tokenizer.tokenize(__lowerCAmelCase ) for t in ['''Test''', '''\xad''', '''test''']] , [['''[UNK]'''], [], ['''[UNK]''']] ) @slow def A ( self ): """simple docstring""" __magic_name__ :Optional[int] = self.tokenizer_class.from_pretrained('''google/mobilebert-uncased''' ) __magic_name__ :Optional[int] = tokenizer.encode('''sequence builders''' , add_special_tokens=__lowerCAmelCase ) __magic_name__ :List[Any] = tokenizer.encode('''multi-sequence build''' , add_special_tokens=__lowerCAmelCase ) __magic_name__ :Union[str, Any] = tokenizer.build_inputs_with_special_tokens(__lowerCAmelCase ) __magic_name__ :List[Any] = tokenizer.build_inputs_with_special_tokens(__lowerCAmelCase , __lowerCAmelCase ) assert encoded_sentence == [1_0_1] + text + [1_0_2] assert encoded_pair == [1_0_1] + text + [1_0_2] + text_a + [1_0_2] def A ( self ): """simple docstring""" for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})''' ): __magic_name__ :Optional[Any] = self.rust_tokenizer_class.from_pretrained(__lowerCAmelCase , **__lowerCAmelCase ) __magic_name__ :Optional[int] = F'''A, naïve {tokenizer_r.mask_token} AllenNLP sentence.''' __magic_name__ :Optional[Any] = tokenizer_r.encode_plus( __lowerCAmelCase , return_attention_mask=__lowerCAmelCase , return_token_type_ids=__lowerCAmelCase , return_offsets_mapping=__lowerCAmelCase , add_special_tokens=__lowerCAmelCase , ) __magic_name__ :Any = tokenizer_r.do_lower_case if hasattr(__lowerCAmelCase , '''do_lower_case''' ) else False __magic_name__ :Optional[int] = ( [ ((0, 0), tokenizer_r.cls_token), ((0, 1), '''A'''), ((1, 2), ''','''), ((3, 5), '''na'''), ((5, 6), '''##ï'''), ((6, 8), '''##ve'''), ((9, 1_5), tokenizer_r.mask_token), ((1_6, 2_1), '''Allen'''), ((2_1, 2_3), '''##NL'''), ((2_3, 2_4), '''##P'''), ((2_5, 3_3), '''sentence'''), ((3_3, 3_4), '''.'''), ((0, 0), tokenizer_r.sep_token), ] if not do_lower_case else [ ((0, 0), tokenizer_r.cls_token), ((0, 1), '''a'''), ((1, 2), ''','''), ((3, 8), '''naive'''), ((9, 1_5), tokenizer_r.mask_token), ((1_6, 2_1), '''allen'''), ((2_1, 2_3), '''##nl'''), ((2_3, 2_4), '''##p'''), ((2_5, 3_3), '''sentence'''), ((3_3, 3_4), '''.'''), ((0, 0), tokenizer_r.sep_token), ] ) self.assertEqual( [e[1] for e in expected_results] , tokenizer_r.convert_ids_to_tokens(tokens['''input_ids'''] ) ) self.assertEqual([e[0] for e in expected_results] , tokens['''offset_mapping'''] ) def A ( self ): """simple docstring""" __magic_name__ :Dict = ['''的''', '''人''', '''有'''] __magic_name__ :Any = ''''''.join(__lowerCAmelCase ) for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})''' ): __magic_name__ :Optional[Any] = True __magic_name__ :Optional[int] = self.tokenizer_class.from_pretrained(__lowerCAmelCase , **__lowerCAmelCase ) __magic_name__ :Tuple = self.rust_tokenizer_class.from_pretrained(__lowerCAmelCase , **__lowerCAmelCase ) __magic_name__ :Dict = tokenizer_p.encode(__lowerCAmelCase , add_special_tokens=__lowerCAmelCase ) __magic_name__ :List[str] = tokenizer_r.encode(__lowerCAmelCase , add_special_tokens=__lowerCAmelCase ) __magic_name__ :Dict = tokenizer_r.convert_ids_to_tokens(__lowerCAmelCase ) __magic_name__ :Union[str, Any] = tokenizer_p.convert_ids_to_tokens(__lowerCAmelCase ) # it is expected that each Chinese character is not preceded by "##" self.assertListEqual(__lowerCAmelCase , __lowerCAmelCase ) self.assertListEqual(__lowerCAmelCase , __lowerCAmelCase ) __magic_name__ :List[str] = False __magic_name__ :Tuple = self.rust_tokenizer_class.from_pretrained(__lowerCAmelCase , **__lowerCAmelCase ) __magic_name__ :List[str] = self.tokenizer_class.from_pretrained(__lowerCAmelCase , **__lowerCAmelCase ) __magic_name__ :Optional[Any] = tokenizer_r.encode(__lowerCAmelCase , add_special_tokens=__lowerCAmelCase ) __magic_name__ :Union[str, Any] = tokenizer_p.encode(__lowerCAmelCase , add_special_tokens=__lowerCAmelCase ) __magic_name__ :List[str] = tokenizer_r.convert_ids_to_tokens(__lowerCAmelCase ) __magic_name__ :Optional[int] = tokenizer_p.convert_ids_to_tokens(__lowerCAmelCase ) # it is expected that only the first Chinese character is not preceded by "##". __magic_name__ :Dict = [ F'''##{token}''' if idx != 0 else token for idx, token in enumerate(__lowerCAmelCase ) ] self.assertListEqual(__lowerCAmelCase , __lowerCAmelCase ) self.assertListEqual(__lowerCAmelCase , __lowerCAmelCase )
0
0
import copy import inspect import unittest import numpy as np from huggingface_hub import hf_hub_download from transformers import VideoMAEConfig from transformers.models.auto import get_values from transformers.testing_utils import require_torch, require_vision, slow, torch_device from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from torch import nn from transformers import ( MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING, VideoMAEForPreTraining, VideoMAEForVideoClassification, VideoMAEModel, ) from transformers.models.videomae.modeling_videomae import VIDEOMAE_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from transformers import VideoMAEImageProcessor class A : def __init__(self , lowerCAmelCase , lowerCAmelCase=1_3 , lowerCAmelCase=1_0 , lowerCAmelCase=3 , lowerCAmelCase=2 , lowerCAmelCase=2 , lowerCAmelCase=2 , lowerCAmelCase=True , lowerCAmelCase=True , lowerCAmelCase=3_2 , lowerCAmelCase=5 , lowerCAmelCase=4 , lowerCAmelCase=3_7 , lowerCAmelCase="gelu" , lowerCAmelCase=0.1 , lowerCAmelCase=0.1 , lowerCAmelCase=1_0 , lowerCAmelCase=0.02 , lowerCAmelCase=0.9 , lowerCAmelCase=None , ): __lowercase= parent __lowercase= batch_size __lowercase= image_size __lowercase= num_channels __lowercase= patch_size __lowercase= tubelet_size __lowercase= num_frames __lowercase= is_training __lowercase= use_labels __lowercase= hidden_size __lowercase= num_hidden_layers __lowercase= num_attention_heads __lowercase= intermediate_size __lowercase= hidden_act __lowercase= hidden_dropout_prob __lowercase= attention_probs_dropout_prob __lowercase= type_sequence_label_size __lowercase= initializer_range __lowercase= mask_ratio __lowercase= scope # in VideoMAE, the number of tokens equals num_frames/tubelet_size * num_patches per frame __lowercase= (image_size // patch_size) ** 2 __lowercase= (num_frames // tubelet_size) * self.num_patches_per_frame # use this variable to define bool_masked_pos __lowercase= int(mask_ratio * self.seq_length ) def _A (self ): __lowercase= floats_tensor( [self.batch_size, self.num_frames, self.num_channels, self.image_size, self.image_size] ) __lowercase= None if self.use_labels: __lowercase= ids_tensor([self.batch_size] , self.type_sequence_label_size ) __lowercase= self.get_config() return config, pixel_values, labels def _A (self ): return VideoMAEConfig( image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , num_frames=self.num_frames , tubelet_size=self.tubelet_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=__lowerCAmelCase , initializer_range=self.initializer_range , ) def _A (self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ): __lowercase= VideoMAEModel(config=__lowerCAmelCase ) model.to(__lowerCAmelCase ) model.eval() __lowercase= model(__lowerCAmelCase ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def _A (self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ): __lowercase= VideoMAEForPreTraining(__lowerCAmelCase ) model.to(__lowerCAmelCase ) model.eval() # important: each video needs to have the same number of masked patches # hence we define a single mask, which we then repeat for each example in the batch __lowercase= torch.ones((self.num_masks,) ) __lowercase= torch.cat([mask, torch.zeros(self.seq_length - mask.size(0 ) )] ) __lowercase= mask.expand(self.batch_size , -1 ).bool() __lowercase= model(__lowerCAmelCase , __lowerCAmelCase ) # model only returns predictions for masked patches __lowercase= mask.sum().item() __lowercase= 3 * self.tubelet_size * self.patch_size**2 self.parent.assertEqual(result.logits.shape , (self.batch_size, num_masked_patches, decoder_num_labels) ) def _A (self ): __lowercase= self.prepare_config_and_inputs() __lowercase= config_and_inputs __lowercase= {'''pixel_values''': pixel_values} return config, inputs_dict @require_torch class A ( A_ , A_ , unittest.TestCase ): UpperCamelCase_ : str =( (VideoMAEModel, VideoMAEForPreTraining, VideoMAEForVideoClassification) if is_torch_available() else () ) UpperCamelCase_ : List[str] =( {'''feature-extraction''': VideoMAEModel, '''video-classification''': VideoMAEForVideoClassification} if is_torch_available() else {} ) UpperCamelCase_ : Dict =False UpperCamelCase_ : Dict =False UpperCamelCase_ : Any =False UpperCamelCase_ : Optional[Any] =False def _A (self ): __lowercase= VideoMAEModelTester(self ) __lowercase= ConfigTester(self , config_class=__lowerCAmelCase , has_text_modality=__lowerCAmelCase , hidden_size=3_7 ) def _A (self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase=False ): __lowercase= copy.deepcopy(__lowerCAmelCase ) if model_class == VideoMAEForPreTraining: # important: each video needs to have the same number of masked patches # hence we define a single mask, which we then repeat for each example in the batch __lowercase= torch.ones((self.model_tester.num_masks,) ) __lowercase= torch.cat([mask, torch.zeros(self.model_tester.seq_length - mask.size(0 ) )] ) __lowercase= mask.expand(self.model_tester.batch_size , -1 ).bool() __lowercase= bool_masked_pos.to(__lowerCAmelCase ) if return_labels: if model_class in [ *get_values(__lowerCAmelCase ), ]: __lowercase= torch.zeros( self.model_tester.batch_size , dtype=torch.long , device=__lowerCAmelCase ) return inputs_dict def _A (self ): self.config_tester.run_common_tests() @unittest.skip(reason='VideoMAE does not use inputs_embeds' ) def _A (self ): pass def _A (self ): __lowercase= self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: __lowercase= model_class(__lowerCAmelCase ) self.assertIsInstance(model.get_input_embeddings() , (nn.Module) ) __lowercase= model.get_output_embeddings() self.assertTrue(x is None or isinstance(__lowerCAmelCase , nn.Linear ) ) def _A (self ): __lowercase= self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: __lowercase= model_class(__lowerCAmelCase ) __lowercase= inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic __lowercase= [*signature.parameters.keys()] __lowercase= ['''pixel_values'''] self.assertListEqual(arg_names[:1] , __lowerCAmelCase ) def _A (self ): __lowercase= self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*__lowerCAmelCase ) def _A (self ): __lowercase= self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_pretraining(*__lowerCAmelCase ) @slow def _A (self ): for model_name in VIDEOMAE_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: __lowercase= VideoMAEModel.from_pretrained(__lowerCAmelCase ) self.assertIsNotNone(__lowerCAmelCase ) def _A (self ): if not self.has_attentions: pass else: __lowercase= self.model_tester.prepare_config_and_inputs_for_common() __lowercase= True for model_class in self.all_model_classes: __lowercase= self.model_tester.seq_length - self.model_tester.num_masks __lowercase= ( num_visible_patches if model_class == VideoMAEForPreTraining else self.model_tester.seq_length ) __lowercase= True __lowercase= False __lowercase= True __lowercase= model_class(__lowerCAmelCase ) model.to(__lowerCAmelCase ) model.eval() with torch.no_grad(): __lowercase= model(**self._prepare_for_class(__lowerCAmelCase , __lowerCAmelCase ) ) __lowercase= outputs.attentions self.assertEqual(len(__lowerCAmelCase ) , self.model_tester.num_hidden_layers ) # check that output_attentions also work using config del inputs_dict["output_attentions"] __lowercase= True __lowercase= model_class(__lowerCAmelCase ) model.to(__lowerCAmelCase ) model.eval() with torch.no_grad(): __lowercase= model(**self._prepare_for_class(__lowerCAmelCase , __lowerCAmelCase ) ) __lowercase= outputs.attentions self.assertEqual(len(__lowerCAmelCase ) , self.model_tester.num_hidden_layers ) self.assertListEqual( list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len, seq_len] , ) __lowercase= len(__lowerCAmelCase ) # Check attention is always last and order is fine __lowercase= True __lowercase= True __lowercase= model_class(__lowerCAmelCase ) model.to(__lowerCAmelCase ) model.eval() with torch.no_grad(): __lowercase= model(**self._prepare_for_class(__lowerCAmelCase , __lowerCAmelCase ) ) self.assertEqual(out_len + 1 , len(__lowerCAmelCase ) ) __lowercase= outputs.attentions self.assertEqual(len(__lowerCAmelCase ) , self.model_tester.num_hidden_layers ) self.assertListEqual( list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len, seq_len] , ) def _A (self ): def check_hidden_states_output(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ): __lowercase= model_class(__lowerCAmelCase ) model.to(__lowerCAmelCase ) model.eval() with torch.no_grad(): __lowercase= model(**self._prepare_for_class(__lowerCAmelCase , __lowerCAmelCase ) ) __lowercase= outputs.hidden_states __lowercase= self.model_tester.num_hidden_layers + 1 self.assertEqual(len(__lowerCAmelCase ) , __lowerCAmelCase ) __lowercase= self.model_tester.seq_length - self.model_tester.num_masks __lowercase= num_visible_patches if model_class == VideoMAEForPreTraining else self.model_tester.seq_length self.assertListEqual( list(hidden_states[0].shape[-2:] ) , [seq_length, self.model_tester.hidden_size] , ) __lowercase= self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: __lowercase= True check_hidden_states_output(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] __lowercase= True check_hidden_states_output(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) @unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.' ) def _A (self ): pass def _lowerCamelCase( ) -> List[str]: '''simple docstring''' __lowercase= hf_hub_download( repo_id='hf-internal-testing/spaghetti-video' , filename='eating_spaghetti.npy' , repo_type='dataset' ) __lowercase= np.load(lowercase__ ) return list(lowercase__ ) @require_torch @require_vision class A ( unittest.TestCase ): @cached_property def _A (self ): return ( VideoMAEImageProcessor(image_mean=[0.5, 0.5, 0.5] , image_std=[0.5, 0.5, 0.5] ) if is_vision_available() else None ) @slow def _A (self ): __lowercase= VideoMAEForVideoClassification.from_pretrained('MCG-NJU/videomae-base-finetuned-kinetics' ).to( __lowerCAmelCase ) __lowercase= self.default_image_processor __lowercase= prepare_video() __lowercase= image_processor(__lowerCAmelCase , return_tensors='pt' ).to(__lowerCAmelCase ) # forward pass with torch.no_grad(): __lowercase= model(**__lowerCAmelCase ) # verify the logits __lowercase= torch.Size((1, 4_0_0) ) self.assertEqual(outputs.logits.shape , __lowerCAmelCase ) __lowercase= torch.tensor([0.36_69, -0.06_88, -0.24_21] ).to(__lowerCAmelCase ) self.assertTrue(torch.allclose(outputs.logits[0, :3] , __lowerCAmelCase , atol=1E-4 ) ) @slow def _A (self ): __lowercase= VideoMAEForPreTraining.from_pretrained('MCG-NJU/videomae-base-short' ).to(__lowerCAmelCase ) __lowercase= self.default_image_processor __lowercase= prepare_video() __lowercase= image_processor(__lowerCAmelCase , return_tensors='pt' ).to(__lowerCAmelCase ) # add boolean mask, indicating which patches to mask __lowercase= hf_hub_download(repo_id='hf-internal-testing/bool-masked-pos' , filename='bool_masked_pos.pt' ) __lowercase= torch.load(__lowerCAmelCase ) # forward pass with torch.no_grad(): __lowercase= model(**__lowerCAmelCase ) # verify the logits __lowercase= torch.Size([1, 1_4_0_8, 1_5_3_6] ) __lowercase= torch.tensor( [[0.79_94, 0.96_12, 0.85_08], [0.74_01, 0.89_58, 0.83_02], [0.58_62, 0.74_68, 0.73_25]] , device=__lowerCAmelCase ) self.assertEqual(outputs.logits.shape , __lowerCAmelCase ) self.assertTrue(torch.allclose(outputs.logits[0, :3, :3] , __lowerCAmelCase , atol=1E-4 ) ) # verify the loss (`config.norm_pix_loss` = `True`) __lowercase= torch.tensor([0.51_42] , device=__lowerCAmelCase ) self.assertTrue(torch.allclose(outputs.loss , __lowerCAmelCase , atol=1E-4 ) ) # verify the loss (`config.norm_pix_loss` = `False`) __lowercase= VideoMAEForPreTraining.from_pretrained('MCG-NJU/videomae-base-short' , norm_pix_loss=__lowerCAmelCase ).to( __lowerCAmelCase ) with torch.no_grad(): __lowercase= model(**__lowerCAmelCase ) __lowercase= torch.tensor(torch.tensor([0.64_69] ) , device=__lowerCAmelCase ) self.assertTrue(torch.allclose(outputs.loss , __lowerCAmelCase , atol=1E-4 ) )
230
import logging import os import quant_trainer import torch from torch.utils.data import DataLoader from transformers import Trainer, is_torch_tpu_available from transformers.trainer_utils import PredictionOutput SCREAMING_SNAKE_CASE__ : List[str] = logging.getLogger(__name__) if is_torch_tpu_available(check_device=False): import torch_xla.core.xla_model as xm import torch_xla.debug.metrics as met class lowerCamelCase_ ( lowerCamelCase ): def __init__( self , *__lowerCAmelCase , __lowerCAmelCase=None , __lowerCAmelCase=None , __lowerCAmelCase=None , **__lowerCAmelCase ): """simple docstring""" super().__init__(*__lowerCAmelCase , **__lowerCAmelCase ) __magic_name__ :Any = eval_examples __magic_name__ :str = post_process_function __magic_name__ :int = quant_trainer_args __magic_name__ :List[str] = 1_2_8 # default number of calibration samples def A ( self , __lowerCAmelCase=None ): """simple docstring""" if calib_dataset is None and self.calib_dataset is None: raise ValueError('''Trainer: calibration requires an calib_dataset.''' ) __magic_name__ :Optional[Any] = calib_dataset if calib_dataset is not None else self.calib_dataset __magic_name__ :Optional[int] = self._remove_unused_columns(__lowerCAmelCase , description='''Calibration''' ) return DataLoader( __lowerCAmelCase , batch_size=self.args.eval_batch_size , collate_fn=self.data_collator , drop_last=self.args.dataloader_drop_last , num_workers=self.args.dataloader_num_workers , pin_memory=self.args.dataloader_pin_memory , shuffle=__lowerCAmelCase , ) def A ( self , __lowerCAmelCase=None ): """simple docstring""" __magic_name__ :Dict = self.train_dataset if calib_dataset is None else calib_dataset __magic_name__ :Any = self.get_calib_dataloader(__lowerCAmelCase ) __magic_name__ :List[str] = self.model quant_trainer.configure_model(__lowerCAmelCase , self.quant_trainer_args , calib=__lowerCAmelCase ) model.eval() quant_trainer.enable_calibration(__lowerCAmelCase ) logger.info('''***** Running calibration *****''' ) logger.info(F''' Num examples = {self.calib_num}''' ) logger.info(F''' Batch size = {calib_dataloader.batch_size}''' ) for step, inputs in enumerate(__lowerCAmelCase ): # Prediction step __magic_name__ , __magic_name__ , __magic_name__ :str = self.prediction_step(__lowerCAmelCase , __lowerCAmelCase , prediction_loss_only=__lowerCAmelCase ) if (step + 1) * calib_dataloader.batch_size >= self.calib_num: break quant_trainer.finish_calibration(__lowerCAmelCase , self.quant_trainer_args ) __magic_name__ :Any = model def A ( self , __lowerCAmelCase=None , __lowerCAmelCase=None , __lowerCAmelCase=None , __lowerCAmelCase = "eval" ): """simple docstring""" __magic_name__ :Tuple = self.eval_dataset if eval_dataset is None else eval_dataset __magic_name__ :Optional[Any] = self.get_eval_dataloader(__lowerCAmelCase ) __magic_name__ :str = self.eval_examples if eval_examples is None else eval_examples # Temporarily disable metric computation, we will do it in the loop here. __magic_name__ :Any = self.compute_metrics __magic_name__ :List[Any] = None __magic_name__ :List[str] = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop try: __magic_name__ :Optional[Any] = eval_loop( __lowerCAmelCase , description='''Evaluation''' , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=__lowerCAmelCase , ) finally: __magic_name__ :Union[str, Any] = compute_metrics if self.post_process_function is not None and self.compute_metrics is not None: __magic_name__ :Union[str, Any] = self.post_process_function(__lowerCAmelCase , __lowerCAmelCase , output.predictions ) __magic_name__ :int = self.compute_metrics(__lowerCAmelCase ) # Prefix all keys with metric_key_prefix + '_' for key in list(metrics.keys() ): if not key.startswith(F'''{metric_key_prefix}_''' ): __magic_name__ :Dict = metrics.pop(__lowerCAmelCase ) self.log(__lowerCAmelCase ) else: __magic_name__ :List[str] = {} if self.args.tpu_metrics_debug or self.args.debug: # tpu-comment: Logging debug metrics for PyTorch/XLA (compile, execute times, ops, etc.) xm.master_print(met.metrics_report() ) __magic_name__ :Optional[Any] = self.callback_handler.on_evaluate(self.args , self.state , self.control , __lowerCAmelCase ) return metrics def A ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase=None , __lowerCAmelCase = "test" ): """simple docstring""" __magic_name__ :int = self.get_test_dataloader(__lowerCAmelCase ) # Temporarily disable metric computation, we will do it in the loop here. __magic_name__ :Dict = self.compute_metrics __magic_name__ :str = None __magic_name__ :Optional[int] = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop try: __magic_name__ :int = eval_loop( __lowerCAmelCase , description='''Prediction''' , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=__lowerCAmelCase , ) finally: __magic_name__ :List[Any] = compute_metrics if self.post_process_function is None or self.compute_metrics is None: return output __magic_name__ :Optional[Any] = self.post_process_function(__lowerCAmelCase , __lowerCAmelCase , output.predictions , '''predict''' ) __magic_name__ :Dict = self.compute_metrics(__lowerCAmelCase ) # Prefix all keys with metric_key_prefix + '_' for key in list(metrics.keys() ): if not key.startswith(F'''{metric_key_prefix}_''' ): __magic_name__ :List[str] = metrics.pop(__lowerCAmelCase ) return PredictionOutput(predictions=predictions.predictions , label_ids=predictions.label_ids , metrics=__lowerCAmelCase ) def A ( self , __lowerCAmelCase="./" ): """simple docstring""" __magic_name__ :List[Any] = self.eval_dataset __magic_name__ :Any = self.get_eval_dataloader(__lowerCAmelCase ) __magic_name__ :int = next(iter(__lowerCAmelCase ) ) # saving device - to make it consistent __magic_name__ :str = torch.device('''cuda''' if torch.cuda.is_available() else '''cpu''' ) # convert to tuple __magic_name__ :int = tuple(v.to(__lowerCAmelCase ) for k, v in batch.items() ) logger.info('''Converting model to be onnx compatible''' ) from pytorch_quantization.nn import TensorQuantizer __magic_name__ :Any = True __magic_name__ :Optional[int] = self.model.to(__lowerCAmelCase ) model.eval() model.float() __magic_name__ :Any = model.module if hasattr(__lowerCAmelCase , '''module''' ) else model quant_trainer.configure_model(__lowerCAmelCase , self.quant_trainer_args ) __magic_name__ :int = os.path.join(__lowerCAmelCase , '''model.onnx''' ) logger.info(F'''exporting model to {output_model_file}''' ) __magic_name__ :Dict = {0: '''batch_size''', 1: '''seq_len'''} torch.onnx.export( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , export_params=__lowerCAmelCase , opset_version=1_3 , do_constant_folding=__lowerCAmelCase , input_names=['''input_ids''', '''attention_mask''', '''token_type_ids'''] , output_names=['''output_start_logits''', '''output_end_logits'''] , dynamic_axes={ '''input_ids''': axes, '''attention_mask''': axes, '''token_type_ids''': axes, '''output_start_logits''': axes, '''output_end_logits''': axes, } , verbose=__lowerCAmelCase , ) logger.info('''onnx export finished''' )
0
0
"""simple docstring""" import math def A__ ( __lowerCamelCase, __lowerCamelCase ): """simple docstring""" if 0 not in (x, y): # We use the relation x^y = y*log10(x), where 10 is the base. return y * math.logaa(__lowerCamelCase ) else: if x == 0: # 0 raised to any number is 0 return 0 elif y == 0: return 1 # any number raised to 0 is 1 raise AssertionError('This should never happen' ) if __name__ == "__main__": # Main function # Read two numbers from input and typecast them to int using map function. # Here x is the base and y is the power. a__ : Optional[Any] = """Enter the base and the power separated by a comma: """ a__ : Optional[int] = map(int, input(prompt).split(""",""")) a__ : int = map(int, input(prompt).split(""",""")) # We find the log of each number, using the function res(), which takes two # arguments. a__ : List[Any] = res(xa, ya) a__ : Dict = res(xa, ya) # We check for the largest number if resa > resa: print("""Largest number is""", xa, """^""", ya) elif resa > resa: print("""Largest number is""", xa, """^""", ya) else: print("""Both are equal""")
589
def __lowercase ( snake_case ): """simple docstring""" return "".join([hex(snake_case )[2:].zfill(2 ).upper() for byte in list(snake_case )] ) def __lowercase ( snake_case ): """simple docstring""" if (len(snake_case ) % 2) != 0: raise ValueError( '''Base16 encoded data is invalid: Data does not have an even number of hex digits.''' ) # Check the character set - the standard base16 alphabet # is uppercase according to RFC3548 section 6 if not set(snake_case ) <= set('''0123456789ABCDEF''' ): raise ValueError( '''Base16 encoded data is invalid: Data is not uppercase hex or it contains invalid characters.''' ) # For every two hexadecimal digits (= a byte), turn it into an integer. # Then, string the result together into bytes, and return it. return bytes(int(data[i] + data[i + 1], 1_6 ) for i in range(0, len(snake_case ), 2 ) ) if __name__ == "__main__": import doctest doctest.testmod()
0
0
def _a ( UpperCAmelCase ) -> Optional[int]: """simple docstring""" lowerCamelCase__ : str = current_set.copy() for row_index, row in enumerate(UpperCAmelCase ): lowerCamelCase__ : Tuple = row[0] for column_index, column in enumerate(UpperCAmelCase ): if magnitude == 0: lowerCamelCase__ : Optional[Any] = column continue lowerCamelCase__ : Union[str, Any] = column / magnitude # Subtract to cancel term lowerCamelCase__ : List[str] = current_set[0] lowerCamelCase__ : List[Any] = [first_row] lowerCamelCase__ : Any = current_set[1::] for row in current_set: lowerCamelCase__ : str = [] # If first term is 0, it is already in form we want, so we preserve it if row[0] == 0: final_set.append(UpperCAmelCase ) continue for column_index in range(len(UpperCAmelCase ) ): temp_row.append(first_row[column_index] - row[column_index] ) final_set.append(UpperCAmelCase ) # Create next recursion iteration set if len(final_set[0] ) != 3: lowerCamelCase__ : Tuple = final_set[0] lowerCamelCase__ : List[Any] = [] lowerCamelCase__ : int = [] for row in final_set[1::]: current_first_column.append(row[0] ) next_iteration.append(row[1::] ) lowerCamelCase__ : int = simplify(UpperCAmelCase ) for i in range(len(UpperCAmelCase ) ): resultant[i].insert(0 , current_first_column[i] ) resultant.insert(0 , UpperCAmelCase ) lowerCamelCase__ : Any = resultant return final_set def _a ( UpperCAmelCase ) -> Tuple: """simple docstring""" if len(UpperCAmelCase ) == 0: raise IndexError('''solve_simultaneous() requires n lists of length n+1''' ) lowerCamelCase__ : int = len(UpperCAmelCase ) + 1 if any(len(UpperCAmelCase ) != _length for item in equations ): raise IndexError('''solve_simultaneous() requires n lists of length n+1''' ) for row in equations: if any(not isinstance(UpperCAmelCase , (int, float) ) for column in row ): raise ValueError('''solve_simultaneous() requires lists of integers''' ) if len(UpperCAmelCase ) == 1: return [equations[0][-1] / equations[0][0]] lowerCamelCase__ : List[Any] = equations.copy() if any(0 in row for row in data_set ): lowerCamelCase__ : Any = data_set.copy() lowerCamelCase__ : List[str] = [] for row_index, row in enumerate(UpperCAmelCase ): if 0 not in row: lowerCamelCase__ : Any = data_set.pop(UpperCAmelCase ) break if not full_row: raise ValueError('''solve_simultaneous() requires at least 1 full equation''' ) data_set.insert(0 , UpperCAmelCase ) lowerCamelCase__ : Tuple = data_set.copy() lowerCamelCase__ : Optional[Any] = simplify(UpperCAmelCase ) lowerCamelCase__ : Optional[Any] = simplified[::-1] lowerCamelCase__ : list = [] for row in simplified: lowerCamelCase__ : int = row[-1] if not solutions: if row[-2] == 0: solutions.append(0 ) continue solutions.append(current_solution / row[-2] ) continue lowerCamelCase__ : Union[str, Any] = row.copy()[: len(UpperCAmelCase ) - 1 :] while temp_row[0] == 0: temp_row.pop(0 ) if len(UpperCAmelCase ) == 0: solutions.append(0 ) continue lowerCamelCase__ : Dict = temp_row[1::] lowerCamelCase__ : Any = temp_row[::-1] for column_index, column in enumerate(UpperCAmelCase ): current_solution -= column * solutions[column_index] solutions.append(UpperCAmelCase ) lowerCamelCase__ : List[Any] = [] for item in solutions: final.append(float(round(UpperCAmelCase , 5 ) ) ) return final[::-1] if __name__ == "__main__": import doctest doctest.testmod() _A : List[str] = [ [2, 1, 1, 1, 1, 4], [1, 2, 1, 1, 1, 5], [1, 1, 2, 1, 1, 6], [1, 1, 1, 2, 1, 7], [1, 1, 1, 1, 2, 8], ] print(solve_simultaneous(eq)) print(solve_simultaneous([[4, 2]]))
315
import pytest import requests from datasets.utils.file_utils import http_head from .utils import OfflineSimulationMode, RequestWouldHangIndefinitelyError, offline @pytest.mark.integration def __lowercase ( ): """simple docstring""" with offline(OfflineSimulationMode.CONNECTION_TIMES_OUT ): with pytest.raises(snake_case ): requests.request('''GET''', '''https://huggingface.co''' ) with pytest.raises(requests.exceptions.ConnectTimeout ): requests.request('''GET''', '''https://huggingface.co''', timeout=1.0 ) @pytest.mark.integration def __lowercase ( ): """simple docstring""" with offline(OfflineSimulationMode.CONNECTION_FAILS ): with pytest.raises(requests.exceptions.ConnectionError ): requests.request('''GET''', '''https://huggingface.co''' ) def __lowercase ( ): """simple docstring""" with offline(OfflineSimulationMode.HF_DATASETS_OFFLINE_SET_TO_1 ): with pytest.raises(snake_case ): http_head('''https://huggingface.co''' )
0
0
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available UpperCamelCase_ = {"""tokenization_herbert""": ["""HerbertTokenizer"""]} try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase_ = ["""HerbertTokenizerFast"""] if TYPE_CHECKING: from .tokenization_herbert import HerbertTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_herbert_fast import HerbertTokenizerFast else: import sys UpperCamelCase_ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
209
import math from collections.abc import Iterator from itertools import takewhile def __lowercase ( snake_case ): """simple docstring""" if 1 < number < 4: # 2 and 3 are primes return True elif number < 2 or number % 2 == 0 or number % 3 == 0: # Negatives, 0, 1, all even numbers, all multiples of 3 are not primes return False # All primes number are in format of 6k +/- 1 for i in range(5, int(math.sqrt(snake_case ) + 1 ), 6 ): if number % i == 0 or number % (i + 2) == 0: return False return True def __lowercase ( ): """simple docstring""" __magic_name__ :str = 2 while True: if is_prime(snake_case ): yield num num += 1 def __lowercase ( snake_case = 2_0_0_0_0_0_0 ): """simple docstring""" return sum(takewhile(lambda snake_case : x < n, prime_generator() ) ) if __name__ == "__main__": print(f"{solution() = }")
0
0
import argparse import copy def __UpperCamelCase (lowerCAmelCase : Tuple ) -> str: A = {} with open(lowerCAmelCase ) as f: for line in f: if line.split()[0] not in dict_of_neighbours: A = [] _list.append([line.split()[1], line.split()[2]] ) A = _list else: dict_of_neighbours[line.split()[0]].append( [line.split()[1], line.split()[2]] ) if line.split()[1] not in dict_of_neighbours: A = [] _list.append([line.split()[0], line.split()[2]] ) A = _list else: dict_of_neighbours[line.split()[1]].append( [line.split()[0], line.split()[2]] ) return dict_of_neighbours def __UpperCamelCase (lowerCAmelCase : Optional[int], lowerCAmelCase : Optional[int] ) -> Tuple: with open(lowerCAmelCase ) as f: A = f.read(1 ) A = start_node A = [] A = start_node A = 0 while visiting not in first_solution: A = 10_000 for k in dict_of_neighbours[visiting]: if int(k[1] ) < int(lowerCAmelCase ) and k[0] not in first_solution: A = k[1] A = k[0] first_solution.append(lowerCAmelCase ) A = distance_of_first_solution + int(lowerCAmelCase ) A = best_node first_solution.append(lowerCAmelCase ) A = 0 for k in dict_of_neighbours[first_solution[-2]]: if k[0] == start_node: break position += 1 A = ( distance_of_first_solution + int(dict_of_neighbours[first_solution[-2]][position][1] ) - 10_000 ) return first_solution, distance_of_first_solution def __UpperCamelCase (lowerCAmelCase : Union[str, Any], lowerCAmelCase : Tuple ) -> Union[str, Any]: A = [] for n in solution[1:-1]: A = solution.index(lowerCAmelCase ) for kn in solution[1:-1]: A = solution.index(lowerCAmelCase ) if n == kn: continue A = copy.deepcopy(lowerCAmelCase ) A = kn A = n A = 0 for k in _tmp[:-1]: A = _tmp[_tmp.index(lowerCAmelCase ) + 1] for i in dict_of_neighbours[k]: if i[0] == next_node: A = distance + int(i[1] ) _tmp.append(lowerCAmelCase ) if _tmp not in neighborhood_of_solution: neighborhood_of_solution.append(_tmp ) A = len(neighborhood_of_solution[0] ) - 1 neighborhood_of_solution.sort(key=lambda lowerCAmelCase : x[index_of_last_item_in_the_list] ) return neighborhood_of_solution def __UpperCamelCase (lowerCAmelCase : Optional[int], lowerCAmelCase : List[str], lowerCAmelCase : str, lowerCAmelCase : Tuple, lowerCAmelCase : Optional[int] ) -> Tuple: A = 1 A = first_solution A = [] A = distance_of_first_solution A = solution while count <= iters: A = find_neighborhood(lowerCAmelCase, lowerCAmelCase ) A = 0 A = neighborhood[index_of_best_solution] A = len(lowerCAmelCase ) - 1 A = False while not found: A = 0 while i < len(lowerCAmelCase ): if best_solution[i] != solution[i]: A = best_solution[i] A = solution[i] break A = i + 1 if [first_exchange_node, second_exchange_node] not in tabu_list and [ second_exchange_node, first_exchange_node, ] not in tabu_list: tabu_list.append([first_exchange_node, second_exchange_node] ) A = True A = best_solution[:-1] A = neighborhood[index_of_best_solution][best_cost_index] if cost < best_cost: A = cost A = solution else: A = index_of_best_solution + 1 A = neighborhood[index_of_best_solution] if len(lowerCAmelCase ) >= size: tabu_list.pop(0 ) A = count + 1 return best_solution_ever, best_cost def __UpperCamelCase (lowerCAmelCase : Any=None ) -> Optional[Any]: A = generate_neighbours(args.File ) A = generate_first_solution( args.File, lowerCAmelCase ) A = tabu_search( lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, args.Iterations, args.Size, ) print(f'''Best solution: {best_sol}, with total distance: {best_cost}.''' ) if __name__ == "__main__": _UpperCAmelCase = argparse.ArgumentParser(description="Tabu Search") parser.add_argument( "-f", "--File", type=str, help="Path to the file containing the data", required=True, ) parser.add_argument( "-i", "--Iterations", type=int, help="How many iterations the algorithm should perform", required=True, ) parser.add_argument( "-s", "--Size", type=int, help="Size of the tabu list", required=True ) # Pass the arguments to main method main(parser.parse_args())
699
import unittest import numpy as np from transformers.testing_utils import require_flax, require_tf, require_torch from transformers.utils import ( expand_dims, flatten_dict, is_flax_available, is_tf_available, is_torch_available, reshape, squeeze, transpose, ) if is_flax_available(): import jax.numpy as jnp if is_tf_available(): import tensorflow as tf if is_torch_available(): import torch class lowerCamelCase_ ( unittest.TestCase ): def A ( self ): """simple docstring""" __magic_name__ :List[Any] = { '''task_specific_params''': { '''summarization''': {'''length_penalty''': 1.0, '''max_length''': 1_2_8, '''min_length''': 1_2, '''num_beams''': 4}, '''summarization_cnn''': {'''length_penalty''': 2.0, '''max_length''': 1_4_2, '''min_length''': 5_6, '''num_beams''': 4}, '''summarization_xsum''': {'''length_penalty''': 1.0, '''max_length''': 6_2, '''min_length''': 1_1, '''num_beams''': 6}, } } __magic_name__ :List[str] = { '''task_specific_params.summarization.length_penalty''': 1.0, '''task_specific_params.summarization.max_length''': 1_2_8, '''task_specific_params.summarization.min_length''': 1_2, '''task_specific_params.summarization.num_beams''': 4, '''task_specific_params.summarization_cnn.length_penalty''': 2.0, '''task_specific_params.summarization_cnn.max_length''': 1_4_2, '''task_specific_params.summarization_cnn.min_length''': 5_6, '''task_specific_params.summarization_cnn.num_beams''': 4, '''task_specific_params.summarization_xsum.length_penalty''': 1.0, '''task_specific_params.summarization_xsum.max_length''': 6_2, '''task_specific_params.summarization_xsum.min_length''': 1_1, '''task_specific_params.summarization_xsum.num_beams''': 6, } self.assertEqual(flatten_dict(__lowerCAmelCase ) , __lowerCAmelCase ) def A ( self ): """simple docstring""" __magic_name__ :Optional[Any] = np.random.randn(3 , 4 ) self.assertTrue(np.allclose(transpose(__lowerCAmelCase ) , x.transpose() ) ) __magic_name__ :List[Any] = np.random.randn(3 , 4 , 5 ) self.assertTrue(np.allclose(transpose(__lowerCAmelCase , axes=(1, 2, 0) ) , x.transpose((1, 2, 0) ) ) ) @require_torch def A ( self ): """simple docstring""" __magic_name__ :Optional[Any] = np.random.randn(3 , 4 ) __magic_name__ :Tuple = torch.tensor(__lowerCAmelCase ) self.assertTrue(np.allclose(transpose(__lowerCAmelCase ) , transpose(__lowerCAmelCase ).numpy() ) ) __magic_name__ :int = np.random.randn(3 , 4 , 5 ) __magic_name__ :Union[str, Any] = torch.tensor(__lowerCAmelCase ) self.assertTrue(np.allclose(transpose(__lowerCAmelCase , axes=(1, 2, 0) ) , transpose(__lowerCAmelCase , axes=(1, 2, 0) ).numpy() ) ) @require_tf def A ( self ): """simple docstring""" __magic_name__ :int = np.random.randn(3 , 4 ) __magic_name__ :Optional[Any] = tf.constant(__lowerCAmelCase ) self.assertTrue(np.allclose(transpose(__lowerCAmelCase ) , transpose(__lowerCAmelCase ).numpy() ) ) __magic_name__ :List[str] = np.random.randn(3 , 4 , 5 ) __magic_name__ :Optional[Any] = tf.constant(__lowerCAmelCase ) self.assertTrue(np.allclose(transpose(__lowerCAmelCase , axes=(1, 2, 0) ) , transpose(__lowerCAmelCase , axes=(1, 2, 0) ).numpy() ) ) @require_flax def A ( self ): """simple docstring""" __magic_name__ :int = np.random.randn(3 , 4 ) __magic_name__ :Dict = jnp.array(__lowerCAmelCase ) self.assertTrue(np.allclose(transpose(__lowerCAmelCase ) , np.asarray(transpose(__lowerCAmelCase ) ) ) ) __magic_name__ :Dict = np.random.randn(3 , 4 , 5 ) __magic_name__ :Dict = jnp.array(__lowerCAmelCase ) self.assertTrue(np.allclose(transpose(__lowerCAmelCase , axes=(1, 2, 0) ) , np.asarray(transpose(__lowerCAmelCase , axes=(1, 2, 0) ) ) ) ) def A ( self ): """simple docstring""" __magic_name__ :Any = np.random.randn(3 , 4 ) self.assertTrue(np.allclose(reshape(__lowerCAmelCase , (4, 3) ) , np.reshape(__lowerCAmelCase , (4, 3) ) ) ) __magic_name__ :Union[str, Any] = np.random.randn(3 , 4 , 5 ) self.assertTrue(np.allclose(reshape(__lowerCAmelCase , (1_2, 5) ) , np.reshape(__lowerCAmelCase , (1_2, 5) ) ) ) @require_torch def A ( self ): """simple docstring""" __magic_name__ :Dict = np.random.randn(3 , 4 ) __magic_name__ :Tuple = torch.tensor(__lowerCAmelCase ) self.assertTrue(np.allclose(reshape(__lowerCAmelCase , (4, 3) ) , reshape(__lowerCAmelCase , (4, 3) ).numpy() ) ) __magic_name__ :Union[str, Any] = np.random.randn(3 , 4 , 5 ) __magic_name__ :List[str] = torch.tensor(__lowerCAmelCase ) self.assertTrue(np.allclose(reshape(__lowerCAmelCase , (1_2, 5) ) , reshape(__lowerCAmelCase , (1_2, 5) ).numpy() ) ) @require_tf def A ( self ): """simple docstring""" __magic_name__ :Dict = np.random.randn(3 , 4 ) __magic_name__ :Union[str, Any] = tf.constant(__lowerCAmelCase ) self.assertTrue(np.allclose(reshape(__lowerCAmelCase , (4, 3) ) , reshape(__lowerCAmelCase , (4, 3) ).numpy() ) ) __magic_name__ :List[Any] = np.random.randn(3 , 4 , 5 ) __magic_name__ :Optional[int] = tf.constant(__lowerCAmelCase ) self.assertTrue(np.allclose(reshape(__lowerCAmelCase , (1_2, 5) ) , reshape(__lowerCAmelCase , (1_2, 5) ).numpy() ) ) @require_flax def A ( self ): """simple docstring""" __magic_name__ :List[str] = np.random.randn(3 , 4 ) __magic_name__ :Any = jnp.array(__lowerCAmelCase ) self.assertTrue(np.allclose(reshape(__lowerCAmelCase , (4, 3) ) , np.asarray(reshape(__lowerCAmelCase , (4, 3) ) ) ) ) __magic_name__ :List[Any] = np.random.randn(3 , 4 , 5 ) __magic_name__ :List[str] = jnp.array(__lowerCAmelCase ) self.assertTrue(np.allclose(reshape(__lowerCAmelCase , (1_2, 5) ) , np.asarray(reshape(__lowerCAmelCase , (1_2, 5) ) ) ) ) def A ( self ): """simple docstring""" __magic_name__ :List[Any] = np.random.randn(1 , 3 , 4 ) self.assertTrue(np.allclose(squeeze(__lowerCAmelCase ) , np.squeeze(__lowerCAmelCase ) ) ) __magic_name__ :Optional[Any] = np.random.randn(1 , 4 , 1 , 5 ) self.assertTrue(np.allclose(squeeze(__lowerCAmelCase , axis=2 ) , np.squeeze(__lowerCAmelCase , axis=2 ) ) ) @require_torch def A ( self ): """simple docstring""" __magic_name__ :Dict = np.random.randn(1 , 3 , 4 ) __magic_name__ :List[Any] = torch.tensor(__lowerCAmelCase ) self.assertTrue(np.allclose(squeeze(__lowerCAmelCase ) , squeeze(__lowerCAmelCase ).numpy() ) ) __magic_name__ :List[str] = np.random.randn(1 , 4 , 1 , 5 ) __magic_name__ :str = torch.tensor(__lowerCAmelCase ) self.assertTrue(np.allclose(squeeze(__lowerCAmelCase , axis=2 ) , squeeze(__lowerCAmelCase , axis=2 ).numpy() ) ) @require_tf def A ( self ): """simple docstring""" __magic_name__ :int = np.random.randn(1 , 3 , 4 ) __magic_name__ :Tuple = tf.constant(__lowerCAmelCase ) self.assertTrue(np.allclose(squeeze(__lowerCAmelCase ) , squeeze(__lowerCAmelCase ).numpy() ) ) __magic_name__ :Tuple = np.random.randn(1 , 4 , 1 , 5 ) __magic_name__ :Optional[int] = tf.constant(__lowerCAmelCase ) self.assertTrue(np.allclose(squeeze(__lowerCAmelCase , axis=2 ) , squeeze(__lowerCAmelCase , axis=2 ).numpy() ) ) @require_flax def A ( self ): """simple docstring""" __magic_name__ :Tuple = np.random.randn(1 , 3 , 4 ) __magic_name__ :Optional[Any] = jnp.array(__lowerCAmelCase ) self.assertTrue(np.allclose(squeeze(__lowerCAmelCase ) , np.asarray(squeeze(__lowerCAmelCase ) ) ) ) __magic_name__ :List[Any] = np.random.randn(1 , 4 , 1 , 5 ) __magic_name__ :Optional[Any] = jnp.array(__lowerCAmelCase ) self.assertTrue(np.allclose(squeeze(__lowerCAmelCase , axis=2 ) , np.asarray(squeeze(__lowerCAmelCase , axis=2 ) ) ) ) def A ( self ): """simple docstring""" __magic_name__ :Any = np.random.randn(3 , 4 ) self.assertTrue(np.allclose(expand_dims(__lowerCAmelCase , axis=1 ) , np.expand_dims(__lowerCAmelCase , axis=1 ) ) ) @require_torch def A ( self ): """simple docstring""" __magic_name__ :List[Any] = np.random.randn(3 , 4 ) __magic_name__ :Any = torch.tensor(__lowerCAmelCase ) self.assertTrue(np.allclose(expand_dims(__lowerCAmelCase , axis=1 ) , expand_dims(__lowerCAmelCase , axis=1 ).numpy() ) ) @require_tf def A ( self ): """simple docstring""" __magic_name__ :Union[str, Any] = np.random.randn(3 , 4 ) __magic_name__ :Union[str, Any] = tf.constant(__lowerCAmelCase ) self.assertTrue(np.allclose(expand_dims(__lowerCAmelCase , axis=1 ) , expand_dims(__lowerCAmelCase , axis=1 ).numpy() ) ) @require_flax def A ( self ): """simple docstring""" __magic_name__ :List[str] = np.random.randn(3 , 4 ) __magic_name__ :Tuple = jnp.array(__lowerCAmelCase ) self.assertTrue(np.allclose(expand_dims(__lowerCAmelCase , axis=1 ) , np.asarray(expand_dims(__lowerCAmelCase , axis=1 ) ) ) )
0
0
from dataclasses import dataclass, field from typing import ClassVar, Dict from ..features import Features, Value from .base import TaskTemplate @dataclass(frozen=_UpperCAmelCase ) class SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase ): A_ : List[str] = field(default='language-modeling' , metadata={'include_in_asdict_even_if_is_default': True} ) A_ : List[Any] = Features({'text': Value('string' )} ) A_ : Dict = Features({} ) A_ : str = 'text' @property def a (self : List[str] ): """simple docstring""" return {self.text_column: "text"}
592
from pathlib import PurePosixPath from typing import Optional import fsspec from fsspec import AbstractFileSystem from huggingface_hub.hf_api import DatasetInfo from ..utils.file_utils import get_authentication_headers_for_url from ..utils.hub import hf_hub_url class lowerCamelCase_ ( lowerCamelCase ): a__ = '''''' a__ = '''hf-legacy''' # "hf://"" is reserved for hffs def __init__( self , __lowerCAmelCase = None , __lowerCAmelCase = None , **__lowerCAmelCase , ): """simple docstring""" super().__init__(self , **__lowerCAmelCase ) __magic_name__ :List[Any] = repo_info __magic_name__ :Dict = token __magic_name__ :Optional[Any] = None def A ( self ): """simple docstring""" if self.dir_cache is None: __magic_name__ :Any = {} for hf_file in self.repo_info.siblings: # TODO(QL): add sizes __magic_name__ :Optional[int] = { '''name''': hf_file.rfilename, '''size''': None, '''type''': '''file''', } self.dir_cache.update( { str(__lowerCAmelCase ): {'''name''': str(__lowerCAmelCase ), '''size''': None, '''type''': '''directory'''} for d in list(PurePosixPath(hf_file.rfilename ).parents )[:-1] } ) def A ( self , __lowerCAmelCase , __lowerCAmelCase = "rb" , **__lowerCAmelCase , ): """simple docstring""" if not isinstance(self.repo_info , __lowerCAmelCase ): raise NotImplementedError(F'''Open is only implemented for dataset repositories, but got {self.repo_info}''' ) __magic_name__ :Union[str, Any] = hf_hub_url(self.repo_info.id , __lowerCAmelCase , revision=self.repo_info.sha ) return fsspec.open( __lowerCAmelCase , mode=__lowerCAmelCase , headers=get_authentication_headers_for_url(__lowerCAmelCase , use_auth_token=self.token ) , client_kwargs={'''trust_env''': True} , ).open() def A ( self , __lowerCAmelCase , **__lowerCAmelCase ): """simple docstring""" self._get_dirs() __magic_name__ :str = self._strip_protocol(__lowerCAmelCase ) if path in self.dir_cache: return self.dir_cache[path] else: raise FileNotFoundError(__lowerCAmelCase ) def A ( self , __lowerCAmelCase , __lowerCAmelCase=False , **__lowerCAmelCase ): """simple docstring""" self._get_dirs() __magic_name__ :Union[str, Any] = PurePosixPath(path.strip('''/''' ) ) __magic_name__ :Dict = {} for p, f in self.dir_cache.items(): __magic_name__ :int = PurePosixPath(p.strip('''/''' ) ) __magic_name__ :Tuple = p.parent if root == path: __magic_name__ :Optional[Any] = f __magic_name__ :List[Any] = list(paths.values() ) if detail: return out else: return sorted(f['''name'''] for f in out )
0
0
import multiprocessing import os from typing import BinaryIO, Optional, Union import fsspec from .. import Dataset, Features, NamedSplit, config from ..formatting import query_table from ..packaged_modules.json.json import Json from ..utils import logging from ..utils.typing import NestedDataStructureLike, PathLike from .abc import AbstractDatasetReader class _lowercase ( _UpperCAmelCase ): """simple docstring""" def __init__( self , UpperCAmelCase , UpperCAmelCase = None , UpperCAmelCase = None , UpperCAmelCase = None , UpperCAmelCase = False , UpperCAmelCase = False , UpperCAmelCase = None , UpperCAmelCase = None , **UpperCAmelCase , ): '''simple docstring''' super().__init__( __lowerCAmelCase , split=__lowerCAmelCase , features=__lowerCAmelCase , cache_dir=__lowerCAmelCase , keep_in_memory=__lowerCAmelCase , streaming=__lowerCAmelCase , num_proc=__lowerCAmelCase , **__lowerCAmelCase , ) _lowercase = field _lowercase = path_or_paths if isinstance(__lowerCAmelCase , __lowerCAmelCase ) else {self.split: path_or_paths} _lowercase = Json( cache_dir=__lowerCAmelCase , data_files=__lowerCAmelCase , features=__lowerCAmelCase , field=__lowerCAmelCase , **__lowerCAmelCase , ) def _UpperCAmelCase ( self ): '''simple docstring''' if self.streaming: _lowercase = self.builder.as_streaming_dataset(split=self.split ) # Build regular (map-style) dataset else: _lowercase = None _lowercase = None _lowercase = None _lowercase = None self.builder.download_and_prepare( download_config=__lowerCAmelCase , download_mode=__lowerCAmelCase , verification_mode=__lowerCAmelCase , base_path=__lowerCAmelCase , num_proc=self.num_proc , ) _lowercase = self.builder.as_dataset( split=self.split , verification_mode=__lowerCAmelCase , in_memory=self.keep_in_memory ) return dataset class _lowercase : """simple docstring""" def __init__( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = None , UpperCAmelCase = None , **UpperCAmelCase , ): '''simple docstring''' if num_proc is not None and num_proc <= 0: raise ValueError(F'''num_proc {num_proc} must be an integer > 0.''' ) _lowercase = dataset _lowercase = path_or_buf _lowercase = batch_size if batch_size else config.DEFAULT_MAX_BATCH_SIZE _lowercase = num_proc _lowercase = '''utf-8''' _lowercase = to_json_kwargs def _UpperCAmelCase ( self ): '''simple docstring''' _lowercase = self.to_json_kwargs.pop("""path_or_buf""" , __lowerCAmelCase ) _lowercase = self.to_json_kwargs.pop("""orient""" , """records""" ) _lowercase = self.to_json_kwargs.pop("""lines""" , True if orient == """records""" else False ) _lowercase = self.to_json_kwargs.pop("""index""" , False if orient in ["""split""", """table"""] else True ) _lowercase = self.to_json_kwargs.pop("""compression""" , __lowerCAmelCase ) if compression not in [None, "infer", "gzip", "bz2", "xz"]: raise NotImplementedError(F'''`datasets` currently does not support {compression} compression''' ) if isinstance(self.path_or_buf , (str, bytes, os.PathLike) ): with fsspec.open(self.path_or_buf , """wb""" , compression=__lowerCAmelCase ) as buffer: _lowercase = self._write(file_obj=__lowerCAmelCase , orient=__lowerCAmelCase , lines=__lowerCAmelCase , index=__lowerCAmelCase , **self.to_json_kwargs ) else: if compression: raise NotImplementedError( F'''The compression parameter is not supported when writing to a buffer, but compression={compression}''' """ was passed. Please provide a local path instead.""" ) _lowercase = self._write( file_obj=self.path_or_buf , orient=__lowerCAmelCase , lines=__lowerCAmelCase , index=__lowerCAmelCase , **self.to_json_kwargs ) return written def _UpperCAmelCase ( self , UpperCAmelCase ): '''simple docstring''' _lowercase = args _lowercase = query_table( table=self.dataset.data , key=slice(__lowerCAmelCase , offset + self.batch_size ) , indices=self.dataset._indices , ) _lowercase = batch.to_pandas().to_json( path_or_buf=__lowerCAmelCase , orient=__lowerCAmelCase , lines=__lowerCAmelCase , index=__lowerCAmelCase , **__lowerCAmelCase ) if not json_str.endswith("""\n""" ): json_str += "\n" return json_str.encode(self.encoding ) def _UpperCAmelCase ( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , **UpperCAmelCase , ): '''simple docstring''' _lowercase = 0 if self.num_proc is None or self.num_proc == 1: for offset in logging.tqdm( range(0 , len(self.dataset ) , self.batch_size ) , unit="""ba""" , disable=not logging.is_progress_bar_enabled() , desc="""Creating json from Arrow format""" , ): _lowercase = self._batch_json((offset, orient, lines, index, to_json_kwargs) ) written += file_obj.write(__lowerCAmelCase ) else: _lowercase = len(self.dataset ), self.batch_size with multiprocessing.Pool(self.num_proc ) as pool: for json_str in logging.tqdm( pool.imap( self._batch_json , [(offset, orient, lines, index, to_json_kwargs) for offset in range(0 , __lowerCAmelCase , __lowerCAmelCase )] , ) , total=(num_rows // batch_size) + 1 if num_rows % batch_size else num_rows // batch_size , unit="""ba""" , disable=not logging.is_progress_bar_enabled() , desc="""Creating json from Arrow format""" , ): written += file_obj.write(__lowerCAmelCase ) return written
398
import pyarrow.parquet as pq import pytest from datasets import Audio, Dataset, DatasetDict, Features, NamedSplit, Sequence, Value, config from datasets.features.image import Image from datasets.io.parquet import ParquetDatasetReader, ParquetDatasetWriter, get_writer_batch_size from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases def __lowercase ( snake_case, snake_case ): """simple docstring""" assert isinstance(snake_case, snake_case ) assert dataset.num_rows == 4 assert dataset.num_columns == 3 assert dataset.column_names == ["col_1", "col_2", "col_3"] for feature, expected_dtype in expected_features.items(): assert dataset.features[feature].dtype == expected_dtype @pytest.mark.parametrize('''keep_in_memory''', [False, True] ) def __lowercase ( snake_case, snake_case, snake_case ): """simple docstring""" __magic_name__ :Tuple = tmp_path / '''cache''' __magic_name__ :int = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''} with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase(): __magic_name__ :Optional[Any] = ParquetDatasetReader(snake_case, cache_dir=snake_case, keep_in_memory=snake_case ).read() _check_parquet_dataset(snake_case, snake_case ) @pytest.mark.parametrize( '''features''', [ None, {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}, {'''col_1''': '''string''', '''col_2''': '''string''', '''col_3''': '''string'''}, {'''col_1''': '''int32''', '''col_2''': '''int32''', '''col_3''': '''int32'''}, {'''col_1''': '''float32''', '''col_2''': '''float32''', '''col_3''': '''float32'''}, ], ) def __lowercase ( snake_case, snake_case, snake_case ): """simple docstring""" __magic_name__ :List[str] = tmp_path / '''cache''' __magic_name__ :int = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''} __magic_name__ :Tuple = features.copy() if features else default_expected_features __magic_name__ :Union[str, Any] = ( Features({feature: Value(snake_case ) for feature, dtype in features.items()} ) if features is not None else None ) __magic_name__ :int = ParquetDatasetReader(snake_case, features=snake_case, cache_dir=snake_case ).read() _check_parquet_dataset(snake_case, snake_case ) @pytest.mark.parametrize('''split''', [None, NamedSplit('''train''' ), '''train''', '''test'''] ) def __lowercase ( snake_case, snake_case, snake_case ): """simple docstring""" __magic_name__ :str = tmp_path / '''cache''' __magic_name__ :List[Any] = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''} __magic_name__ :int = ParquetDatasetReader(snake_case, cache_dir=snake_case, split=snake_case ).read() _check_parquet_dataset(snake_case, snake_case ) assert dataset.split == split if split else "train" @pytest.mark.parametrize('''path_type''', [str, list] ) def __lowercase ( snake_case, snake_case, snake_case ): """simple docstring""" if issubclass(snake_case, snake_case ): __magic_name__ :Union[str, Any] = parquet_path elif issubclass(snake_case, snake_case ): __magic_name__ :Union[str, Any] = [parquet_path] __magic_name__ :Optional[int] = tmp_path / '''cache''' __magic_name__ :Optional[int] = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''} __magic_name__ :str = ParquetDatasetReader(snake_case, cache_dir=snake_case ).read() _check_parquet_dataset(snake_case, snake_case ) def __lowercase ( snake_case, snake_case, snake_case=("train",) ): """simple docstring""" assert isinstance(snake_case, snake_case ) for split in splits: __magic_name__ :Optional[Any] = dataset_dict[split] assert dataset.num_rows == 4 assert dataset.num_columns == 3 assert dataset.column_names == ["col_1", "col_2", "col_3"] for feature, expected_dtype in expected_features.items(): assert dataset.features[feature].dtype == expected_dtype @pytest.mark.parametrize('''keep_in_memory''', [False, True] ) def __lowercase ( snake_case, snake_case, snake_case ): """simple docstring""" __magic_name__ :Any = tmp_path / '''cache''' __magic_name__ :Optional[int] = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''} with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase(): __magic_name__ :Tuple = ParquetDatasetReader( {'''train''': parquet_path}, cache_dir=snake_case, keep_in_memory=snake_case ).read() _check_parquet_datasetdict(snake_case, snake_case ) @pytest.mark.parametrize( '''features''', [ None, {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}, {'''col_1''': '''string''', '''col_2''': '''string''', '''col_3''': '''string'''}, {'''col_1''': '''int32''', '''col_2''': '''int32''', '''col_3''': '''int32'''}, {'''col_1''': '''float32''', '''col_2''': '''float32''', '''col_3''': '''float32'''}, ], ) def __lowercase ( snake_case, snake_case, snake_case ): """simple docstring""" __magic_name__ :Optional[Any] = tmp_path / '''cache''' __magic_name__ :Dict = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''} __magic_name__ :int = features.copy() if features else default_expected_features __magic_name__ :List[Any] = ( Features({feature: Value(snake_case ) for feature, dtype in features.items()} ) if features is not None else None ) __magic_name__ :Optional[int] = ParquetDatasetReader({'''train''': parquet_path}, features=snake_case, cache_dir=snake_case ).read() _check_parquet_datasetdict(snake_case, snake_case ) @pytest.mark.parametrize('''split''', [None, NamedSplit('''train''' ), '''train''', '''test'''] ) def __lowercase ( snake_case, snake_case, snake_case ): """simple docstring""" if split: __magic_name__ :Dict = {split: parquet_path} else: __magic_name__ :Optional[int] = '''train''' __magic_name__ :Dict = {'''train''': parquet_path, '''test''': parquet_path} __magic_name__ :List[Any] = tmp_path / '''cache''' __magic_name__ :Optional[Any] = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''} __magic_name__ :Optional[Any] = ParquetDatasetReader(snake_case, cache_dir=snake_case ).read() _check_parquet_datasetdict(snake_case, snake_case, splits=list(path.keys() ) ) assert all(dataset[split].split == split for split in path.keys() ) def __lowercase ( snake_case, snake_case ): """simple docstring""" __magic_name__ :str = ParquetDatasetWriter(snake_case, tmp_path / '''foo.parquet''' ) assert writer.write() > 0 __magic_name__ :List[Any] = pq.ParquetFile(tmp_path / '''foo.parquet''' ) __magic_name__ :List[Any] = pf.read() assert dataset.data.table == output_table def __lowercase ( snake_case, snake_case ): """simple docstring""" __magic_name__ :List[str] = str(shared_datadir / '''test_image_rgb.jpg''' ) __magic_name__ :Tuple = {'''image''': [image_path]} __magic_name__ :List[Any] = Features({'''image''': Image()} ) __magic_name__ :Tuple = Dataset.from_dict(snake_case, features=snake_case ) __magic_name__ :Union[str, Any] = ParquetDatasetWriter(snake_case, tmp_path / '''foo.parquet''' ) assert writer.write() > 0 __magic_name__ :List[str] = Dataset.from_parquet(str(tmp_path / '''foo.parquet''' ) ) assert dataset.features == reloaded_dataset.features __magic_name__ :List[str] = ParquetDatasetReader(str(tmp_path / '''foo.parquet''' ), streaming=snake_case ).read() assert dataset.features == reloaded_iterable_dataset.features @pytest.mark.parametrize( '''feature, expected''', [ (Features({'''foo''': Value('''int32''' )} ), None), (Features({'''image''': Image(), '''foo''': Value('''int32''' )} ), config.PARQUET_ROW_GROUP_SIZE_FOR_IMAGE_DATASETS), (Features({'''nested''': Sequence(Audio() )} ), config.PARQUET_ROW_GROUP_SIZE_FOR_AUDIO_DATASETS), ], ) def __lowercase ( snake_case, snake_case ): """simple docstring""" assert get_writer_batch_size(snake_case ) == expected
0
0
import os from shutil import copyfile from typing import List, Optional, Tuple from tokenizers import processors from ...tokenization_utils import AddedToken, BatchEncoding from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import is_sentencepiece_available, logging if is_sentencepiece_available(): from .tokenization_mbart import MBartTokenizer else: SCREAMING_SNAKE_CASE : Optional[int] = None SCREAMING_SNAKE_CASE : Any = logging.get_logger(__name__) SCREAMING_SNAKE_CASE : str = {"""vocab_file""": """sentencepiece.bpe.model""", """tokenizer_file""": """tokenizer.json"""} SCREAMING_SNAKE_CASE : Tuple = { """vocab_file""": { """facebook/mbart-large-en-ro""": ( """https://huggingface.co/facebook/mbart-large-en-ro/resolve/main/sentencepiece.bpe.model""" ), """facebook/mbart-large-cc25""": ( """https://huggingface.co/facebook/mbart-large-cc25/resolve/main/sentencepiece.bpe.model""" ), }, """tokenizer_file""": { """facebook/mbart-large-en-ro""": """https://huggingface.co/facebook/mbart-large-en-ro/resolve/main/tokenizer.json""", """facebook/mbart-large-cc25""": """https://huggingface.co/facebook/mbart-large-cc25/resolve/main/tokenizer.json""", }, } SCREAMING_SNAKE_CASE : Dict = { """facebook/mbart-large-en-ro""": 1024, """facebook/mbart-large-cc25""": 1024, } # fmt: off SCREAMING_SNAKE_CASE : Union[str, Any] = ["""ar_AR""", """cs_CZ""", """de_DE""", """en_XX""", """es_XX""", """et_EE""", """fi_FI""", """fr_XX""", """gu_IN""", """hi_IN""", """it_IT""", """ja_XX""", """kk_KZ""", """ko_KR""", """lt_LT""", """lv_LV""", """my_MM""", """ne_NP""", """nl_XX""", """ro_RO""", """ru_RU""", """si_LK""", """tr_TR""", """vi_VN""", """zh_CN"""] class A_ ( a_ ): _SCREAMING_SNAKE_CASE = VOCAB_FILES_NAMES _SCREAMING_SNAKE_CASE = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES _SCREAMING_SNAKE_CASE = PRETRAINED_VOCAB_FILES_MAP _SCREAMING_SNAKE_CASE = ["""input_ids""", """attention_mask"""] _SCREAMING_SNAKE_CASE = MBartTokenizer _SCREAMING_SNAKE_CASE = [] _SCREAMING_SNAKE_CASE = [] def __init__( self : Optional[int] , __SCREAMING_SNAKE_CASE : Union[str, Any]=None , __SCREAMING_SNAKE_CASE : Union[str, Any]=None , __SCREAMING_SNAKE_CASE : List[Any]="<s>" , __SCREAMING_SNAKE_CASE : int="</s>" , __SCREAMING_SNAKE_CASE : Optional[int]="</s>" , __SCREAMING_SNAKE_CASE : Any="<s>" , __SCREAMING_SNAKE_CASE : Optional[int]="<unk>" , __SCREAMING_SNAKE_CASE : str="<pad>" , __SCREAMING_SNAKE_CASE : str="<mask>" , __SCREAMING_SNAKE_CASE : Optional[int]=None , __SCREAMING_SNAKE_CASE : Optional[Any]=None , __SCREAMING_SNAKE_CASE : Any=None , **__SCREAMING_SNAKE_CASE : Tuple , ): __a = AddedToken(__lowerCAmelCase , lstrip=__lowerCAmelCase , rstrip=__lowerCAmelCase ) if isinstance(__lowerCAmelCase , __lowerCAmelCase ) else mask_token super().__init__( vocab_file=__lowerCAmelCase , tokenizer_file=__lowerCAmelCase , bos_token=__lowerCAmelCase , eos_token=__lowerCAmelCase , sep_token=__lowerCAmelCase , cls_token=__lowerCAmelCase , unk_token=__lowerCAmelCase , pad_token=__lowerCAmelCase , mask_token=__lowerCAmelCase , src_lang=__lowerCAmelCase , tgt_lang=__lowerCAmelCase , additional_special_tokens=__lowerCAmelCase , **__lowerCAmelCase , ) __a = vocab_file __a = False if not self.vocab_file else True __a = FAIRSEQ_LANGUAGE_CODES.copy() if additional_special_tokens is not None: # Only add those special tokens if they are not already there. _additional_special_tokens.extend( [t for t in additional_special_tokens if t not in _additional_special_tokens] ) self.add_special_tokens({"additional_special_tokens": _additional_special_tokens} ) __a = { lang_code: self.convert_tokens_to_ids(__lowerCAmelCase ) for lang_code in FAIRSEQ_LANGUAGE_CODES } __a = src_lang if src_lang is not None else '''en_XX''' __a = self.convert_tokens_to_ids(self._src_lang ) __a = tgt_lang self.set_src_lang_special_tokens(self._src_lang ) @property def _UpperCAmelCase ( self : List[str] ): return self._src_lang @src_lang.setter def _UpperCAmelCase ( self : List[str] , __SCREAMING_SNAKE_CASE : Dict ): __a = new_src_lang self.set_src_lang_special_tokens(self._src_lang ) def _UpperCAmelCase ( self : Tuple , __SCREAMING_SNAKE_CASE : List[Any] , __SCREAMING_SNAKE_CASE : Dict = None ): if token_ids_a is None: return self.prefix_tokens + token_ids_a + self.suffix_tokens # We don't expect to process pairs, but leave the pair logic for API consistency return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens def _UpperCAmelCase ( self : List[Any] , __SCREAMING_SNAKE_CASE : List[str] , __SCREAMING_SNAKE_CASE : List[str] = None ): __a = [self.sep_token_id] __a = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0] def _UpperCAmelCase ( self : Tuple , __SCREAMING_SNAKE_CASE : Union[str, Any] , __SCREAMING_SNAKE_CASE : Tuple , __SCREAMING_SNAKE_CASE : Union[str, Any] , __SCREAMING_SNAKE_CASE : Union[str, Any] , **__SCREAMING_SNAKE_CASE : Optional[int] ): if src_lang is None or tgt_lang is None: raise ValueError("Translation requires a `src_lang` and a `tgt_lang` for this model" ) __a = src_lang __a = self(__lowerCAmelCase , add_special_tokens=__lowerCAmelCase , return_tensors=__lowerCAmelCase , **__lowerCAmelCase ) __a = self.convert_tokens_to_ids(__lowerCAmelCase ) __a = tgt_lang_id return inputs def _UpperCAmelCase ( self : str , __SCREAMING_SNAKE_CASE : List[str] , __SCREAMING_SNAKE_CASE : str = "en_XX" , __SCREAMING_SNAKE_CASE : str = None , __SCREAMING_SNAKE_CASE : str = "ro_RO" , **__SCREAMING_SNAKE_CASE : int , ): __a = src_lang __a = tgt_lang return super().prepare_seqaseq_batch(__lowerCAmelCase , __lowerCAmelCase , **__lowerCAmelCase ) def _UpperCAmelCase ( self : Optional[int] ): return self.set_src_lang_special_tokens(self.src_lang ) def _UpperCAmelCase ( self : Optional[int] ): return self.set_tgt_lang_special_tokens(self.tgt_lang ) def _UpperCAmelCase ( self : List[str] , __SCREAMING_SNAKE_CASE : Any ): __a = self.convert_tokens_to_ids(__lowerCAmelCase ) __a = [] __a = [self.eos_token_id, self.cur_lang_code] __a = self.convert_ids_to_tokens(self.prefix_tokens ) __a = self.convert_ids_to_tokens(self.suffix_tokens ) __a = processors.TemplateProcessing( single=prefix_tokens_str + ["$A"] + suffix_tokens_str , pair=prefix_tokens_str + ["$A", "$B"] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens ) ) , ) def _UpperCAmelCase ( self : List[str] , __SCREAMING_SNAKE_CASE : Dict ): __a = self.convert_tokens_to_ids(__lowerCAmelCase ) __a = [] __a = [self.eos_token_id, self.cur_lang_code] __a = self.convert_ids_to_tokens(self.prefix_tokens ) __a = self.convert_ids_to_tokens(self.suffix_tokens ) __a = processors.TemplateProcessing( single=prefix_tokens_str + ["$A"] + suffix_tokens_str , pair=prefix_tokens_str + ["$A", "$B"] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens ) ) , ) def _UpperCAmelCase ( self : Dict , __SCREAMING_SNAKE_CASE : Optional[int] , __SCREAMING_SNAKE_CASE : Optional[Any] = None ): if not self.can_save_slow_tokenizer: raise ValueError( "Your fast tokenizer does not have the necessary information to save the vocabulary for a slow " "tokenizer." ) if not os.path.isdir(__lowerCAmelCase ): logger.error(f"""Vocabulary path ({save_directory}) should be a directory.""" ) return __a = os.path.join( __lowerCAmelCase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(__lowerCAmelCase ): copyfile(self.vocab_file , __lowerCAmelCase ) return (out_vocab_file,)
197
def __lowercase ( snake_case ): """simple docstring""" if not isinstance(snake_case, snake_case ): raise ValueError('''multiplicative_persistence() only accepts integral values''' ) if num < 0: raise ValueError('''multiplicative_persistence() does not accept negative values''' ) __magic_name__ :str = 0 __magic_name__ :Dict = str(snake_case ) while len(snake_case ) != 1: __magic_name__ :Optional[Any] = [int(snake_case ) for i in num_string] __magic_name__ :Dict = 1 for i in range(0, len(snake_case ) ): total *= numbers[i] __magic_name__ :int = str(snake_case ) steps += 1 return steps def __lowercase ( snake_case ): """simple docstring""" if not isinstance(snake_case, snake_case ): raise ValueError('''additive_persistence() only accepts integral values''' ) if num < 0: raise ValueError('''additive_persistence() does not accept negative values''' ) __magic_name__ :str = 0 __magic_name__ :Union[str, Any] = str(snake_case ) while len(snake_case ) != 1: __magic_name__ :str = [int(snake_case ) for i in num_string] __magic_name__ :Optional[int] = 0 for i in range(0, len(snake_case ) ): total += numbers[i] __magic_name__ :int = str(snake_case ) steps += 1 return steps if __name__ == "__main__": import doctest doctest.testmod()
0
0
import random from typing import Any def _a ( lowerCamelCase ): for _ in range(len(lowerCamelCase ) ): lowerCamelCase : Optional[int] = random.randint(0, len(lowerCamelCase ) - 1 ) lowerCamelCase : Union[str, Any] = random.randint(0, len(lowerCamelCase ) - 1 ) lowerCamelCase : List[Any] = data[b], data[a] return data if __name__ == "__main__": _lowerCamelCase =[0, 1, 2, 3, 4, 5, 6, 7] _lowerCamelCase =["""python""", """says""", """hello""", """!"""] print("""Fisher-Yates Shuffle:""") print("""List""", integers, strings) print("""FY Shuffle""", fisher_yates_shuffle(integers), fisher_yates_shuffle(strings))
681
import math import os import re import sys import unittest from pathlib import Path from typing import Tuple from unittest.mock import patch from parameterized import parameterized from transformers.testing_utils import ( CaptureStderr, ExtendSysPath, TestCasePlus, execute_subprocess_async, get_gpu_count, get_torch_dist_unique_port, require_apex, require_bitsandbytes, require_fairscale, require_torch, require_torch_gpu, require_torch_multi_gpu, require_torch_non_multi_gpu, slow, ) from transformers.trainer_callback import TrainerState from transformers.trainer_utils import set_seed SCREAMING_SNAKE_CASE__ : List[Any] = os.path.abspath(os.path.dirname(__file__)) with ExtendSysPath(f"{bindir}/../../examples/pytorch/translation"): from run_translation import main # noqa set_seed(42) SCREAMING_SNAKE_CASE__ : Optional[Any] = """sshleifer/student_marian_en_ro_6_1""" SCREAMING_SNAKE_CASE__ : List[Any] = """sshleifer/tiny-mbart""" @require_torch class lowerCamelCase_ ( lowerCamelCase ): def A ( self , __lowerCAmelCase=False , __lowerCAmelCase=None , __lowerCAmelCase=True , __lowerCAmelCase=True , __lowerCAmelCase=True , __lowerCAmelCase=True , ): """simple docstring""" __magic_name__ :List[Any] = self.run_trainer( eval_steps=1 , max_len=1_2 , model_name=__lowerCAmelCase , num_train_epochs=1 , distributed=__lowerCAmelCase , extra_args_str=__lowerCAmelCase , predict_with_generate=__lowerCAmelCase , do_train=__lowerCAmelCase , do_eval=__lowerCAmelCase , do_predict=__lowerCAmelCase , ) __magic_name__ :Any = TrainerState.load_from_json(os.path.join(__lowerCAmelCase , '''trainer_state.json''' ) ).log_history if not do_eval: return __magic_name__ :Any = [log for log in logs if '''eval_loss''' in log.keys()] __magic_name__ :str = eval_metrics[0] if predict_with_generate: assert "eval_bleu" in first_step_stats __magic_name__ :Tuple = eval_metrics[-1] assert isinstance(last_step_stats['''eval_bleu'''] , __lowerCAmelCase ) assert not math.isnan(float(last_step_stats['''eval_loss'''] ) ), "eval_loss must not be `nan`" @require_torch_non_multi_gpu def A ( self ): """simple docstring""" self.run_seqaseq_quick() @require_torch_multi_gpu def A ( self ): """simple docstring""" self.run_seqaseq_quick(distributed=__lowerCAmelCase ) @require_torch_multi_gpu def A ( self ): """simple docstring""" self.run_seqaseq_quick(distributed=__lowerCAmelCase ) @unittest.skip('''Requires an update of the env running those tests''' ) @require_torch_multi_gpu @require_fairscale def A ( self ): """simple docstring""" self.run_seqaseq_quick(distributed=__lowerCAmelCase , extra_args_str='''--sharded_ddp simple''' ) @unittest.skip('''Requires an update of the env running those tests''' ) @require_torch_multi_gpu @require_fairscale def A ( self ): """simple docstring""" self.run_seqaseq_quick(distributed=__lowerCAmelCase , extra_args_str='''--sharded_ddp simple --fp16''' ) @unittest.skip('''Requires an update of the env running those tests''' ) @require_torch_multi_gpu @require_fairscale def A ( self ): """simple docstring""" self.run_seqaseq_quick(distributed=__lowerCAmelCase , extra_args_str='''--sharded_ddp zero_dp_2''' , predict_with_generate=__lowerCAmelCase ) @unittest.skip('''Requires an update of the env running those tests''' ) @require_torch_multi_gpu @require_fairscale def A ( self ): """simple docstring""" self.run_seqaseq_quick( distributed=__lowerCAmelCase , extra_args_str='''--sharded_ddp zero_dp_2 --fp16''' , predict_with_generate=__lowerCAmelCase ) @require_apex @require_torch_gpu def A ( self ): """simple docstring""" # XXX: apex breaks the trainer if it's run twice e.g. run_seq2seq.main() from the same # program and it breaks other tests that run from the same pytest worker, therefore until this is # sorted out it must be run only in an external program, that is distributed=True in this # test and only under one or more gpus - if we want cpu will need to make a special test # # specifically to the problem traced it to self.optimizer.step() - if it's run 2nd time via # 2nd main() call it botches the future eval. # self.run_seqaseq_quick(distributed=__lowerCAmelCase , extra_args_str='''--fp16 --fp16_backend=apex''' ) # test 2nd time - was getting eval_loss': nan' # to reproduce the problem set distributed=False self.run_seqaseq_quick(distributed=__lowerCAmelCase , extra_args_str='''--fp16 --fp16_backend=apex''' ) @parameterized.expand(['''base''', '''low''', '''high''', '''mixed'''] ) @require_torch_multi_gpu def A ( self , __lowerCAmelCase ): """simple docstring""" # as each sub-test is slow-ish split into multiple sub-tests to avoid CI timeout __magic_name__ :Any = { # test with the default log_level - should be info and thus log info once '''base''': {'''extra_args_str''': '''''', '''n_matches''': 1}, # test with low log_level and log_level_replica - should be noisy on all processes # now the info string should appear twice on 2 processes '''low''': {'''extra_args_str''': '''--log_level debug --log_level_replica debug''', '''n_matches''': 2}, # test with high log_level and low log_level_replica # now the info string should appear once only on the replica '''high''': {'''extra_args_str''': '''--log_level error --log_level_replica debug''', '''n_matches''': 1}, # test with high log_level and log_level_replica - should be quiet on all processes '''mixed''': {'''extra_args_str''': '''--log_level error --log_level_replica error''', '''n_matches''': 0}, } __magic_name__ :Optional[Any] = experiments[experiment_id] __magic_name__ :List[Any] = {'''distributed''': True, '''predict_with_generate''': False, '''do_eval''': False, '''do_predict''': False} __magic_name__ :Optional[int] = '''Running training''' with CaptureStderr() as cl: self.run_seqaseq_quick(**__lowerCAmelCase , extra_args_str=data['''extra_args_str'''] ) __magic_name__ :int = len(re.findall(__lowerCAmelCase , cl.err ) ) self.assertEqual(__lowerCAmelCase , data['''n_matches'''] ) @slow def A ( self ): """simple docstring""" __magic_name__ :List[str] = self.run_trainer( eval_steps=2 , max_len=1_2_8 , model_name=__lowerCAmelCase , learning_rate=3E-4 , num_train_epochs=1_0 , distributed=__lowerCAmelCase , ) # Check metrics __magic_name__ :Optional[int] = TrainerState.load_from_json(os.path.join(__lowerCAmelCase , '''trainer_state.json''' ) ).log_history __magic_name__ :List[str] = [log for log in logs if '''eval_loss''' in log.keys()] __magic_name__ :Any = eval_metrics[0] __magic_name__ :int = eval_metrics[-1] assert first_step_stats["eval_loss"] > last_step_stats["eval_loss"], "model learned nothing" assert isinstance(last_step_stats['''eval_bleu'''] , __lowerCAmelCase ) # test if do_predict saves generations and metrics __magic_name__ :List[Any] = os.listdir(__lowerCAmelCase ) __magic_name__ :List[str] = {os.path.basename(__lowerCAmelCase ) for p in contents} assert "generated_predictions.txt" in contents assert "predict_results.json" in contents @slow @require_bitsandbytes def A ( self ): """simple docstring""" from transformers.training_args import OptimizerNames def train_and_return_metrics(__lowerCAmelCase ) -> Tuple[int, float]: __magic_name__ :str = '''--skip_memory_metrics 0''' __magic_name__ :Dict = self.run_trainer( max_len=1_2_8 , model_name=__lowerCAmelCase , learning_rate=3E-4 , num_train_epochs=1 , optim=__lowerCAmelCase , distributed=__lowerCAmelCase , extra_args_str=__lowerCAmelCase , do_eval=__lowerCAmelCase , do_predict=__lowerCAmelCase , n_gpus_to_use=1 , ) # Check metrics __magic_name__ :Optional[Any] = TrainerState.load_from_json(Path(__lowerCAmelCase , '''trainer_state.json''' ) ).log_history __magic_name__ :int = int(logs[0]['''train_mem_gpu_peaked_delta'''] / 2**2_0 ) __magic_name__ :Optional[Any] = int(logs[0]['''train_mem_gpu_alloc_delta'''] / 2**2_0 ) __magic_name__ :Any = logs[0]['''train_loss'''] return gpu_peak_mem_mb, gpu_alloc_mem_mb, loss __magic_name__ , __magic_name__ , __magic_name__ :int = train_and_return_metrics(OptimizerNames.ADAMW_TORCH.value ) __magic_name__ , __magic_name__ , __magic_name__ :Tuple = train_and_return_metrics(OptimizerNames.ADAMW_BNB.value ) __magic_name__ :Tuple = gpu_alloc_mem_orig - gpu_alloc_mem_bnb __magic_name__ :Tuple = gpu_peak_mem_orig + gpu_alloc_mem_orig __magic_name__ :List[Any] = gpu_peak_mem_bnb + gpu_alloc_mem_bnb __magic_name__ :Optional[int] = gpu_total_mem_orig - gpu_total_mem_bnb # sshleifer/student_marian_en_ro_6_1 has 54M parameter, 29M of which is `nn.Embedding` which # doesn't get quantized and remains in fp32. Therefore we only have 25M parameters quantized # in 2 bytes and the diff in optim memory usage is derived as so: # # - normal 25*8=~200MB (8 bytes per param) # - bnb 25*2= ~50MB (2 bytes per param) # # Thus we should expect ~150MB total memory saved. # # Peak memory should be the same - the total should be different by about that same margin # # After leaving a small margin to accommodate for differences between gpus let's check # that we have at least 120MB in savings __magic_name__ :Optional[Any] = 1_2_0 # uncomment the following if this test starts failing - requires py38 for a new print feature # gpu_peak_mem_diff = gpu_peak_mem_orig - gpu_peak_mem_bnb # print(f"{gpu_alloc_mem_orig=}MB {gpu_peak_mem_orig=}MB {gpu_alloc_mem_orig+gpu_peak_mem_orig=}MB") # print(f" {gpu_alloc_mem_bnb=}MB {gpu_peak_mem_bnb=}MB {gpu_alloc_mem_bnb+gpu_peak_mem_bnb=}MB") # print(f"{gpu_alloc_mem_diff=}MB") # print(f"{gpu_peak_mem_diff=}MB") # print(f"{gpu_total_mem_orig=}MB, {gpu_total_mem_bnb=}MB") # print(f"{gpu_total_mem_diff=}MB, {gpu_total_mem_diff=}MB") self.assertGreater( __lowerCAmelCase , __lowerCAmelCase , '''should use ~150MB less alloc gpu memory with BNB, compared to without it for this model but got''' F''' a difference of {gpu_alloc_mem_diff}MB, with gpu_alloc_mem_orig={gpu_alloc_mem_orig}MB and''' F''' gpu_alloc_mem_bnb={gpu_alloc_mem_bnb}MB''' , ) self.assertGreater( __lowerCAmelCase , __lowerCAmelCase , '''should use ~150MB less total gpu memory with BNB, compared to without it for this model but got''' F''' a difference of {gpu_total_mem_diff}MB, with gpu_total_mem_orig={gpu_total_mem_orig}MB and''' F''' gpu_total_mem_bnb={gpu_total_mem_bnb}MB''' , ) self.assertEqual( __lowerCAmelCase , __lowerCAmelCase , F'''loss should be the same, but got loss_orig={loss_orig}, loss_bnb={loss_bnb}''' ) def A ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = 3E-3 , __lowerCAmelCase = "adafactor" , __lowerCAmelCase = False , __lowerCAmelCase = None , __lowerCAmelCase = 0 , __lowerCAmelCase = True , __lowerCAmelCase = True , __lowerCAmelCase = True , __lowerCAmelCase = True , __lowerCAmelCase = None , ): """simple docstring""" __magic_name__ :int = self.test_file_dir / '''../fixtures/tests_samples/wmt_en_ro''' __magic_name__ :Dict = self.get_auto_remove_tmp_dir() __magic_name__ :Tuple = F''' --model_name_or_path {model_name} --train_file {data_dir}/train.json --validation_file {data_dir}/val.json --test_file {data_dir}/test.json --output_dir {output_dir} --overwrite_output_dir --max_train_samples 8 --max_source_length {max_len} --max_target_length {max_len} --do_train --num_train_epochs {str(__lowerCAmelCase )} --per_device_train_batch_size 4 --learning_rate {learning_rate} --warmup_steps 8 --logging_steps 0 --logging_strategy no --save_steps {str(__lowerCAmelCase )} --group_by_length --label_smoothing_factor 0.1 --target_lang ro_RO --source_lang en_XX '''.split() __magic_name__ :str = F''' --do_eval --per_device_eval_batch_size 4 --max_eval_samples 8 --val_max_target_length {max_len} --evaluation_strategy steps --eval_steps {str(__lowerCAmelCase )} '''.split() __magic_name__ :Dict = ''' --do_predict '''.split() __magic_name__ :Optional[int] = [] if do_train: args += args_train if do_eval: args += args_eval if do_predict: args += args_predict if predict_with_generate: args += "--predict_with_generate".split() if do_train: if optim == "adafactor": args += "--adafactor".split() else: args += F'''--optim {optim}'''.split() if extra_args_str is not None: args += extra_args_str.split() if distributed: if n_gpus_to_use is None: __magic_name__ :List[Any] = get_gpu_count() __magic_name__ :Tuple = get_torch_dist_unique_port() __magic_name__ :Union[str, Any] = F''' -m torch.distributed.run --nproc_per_node={n_gpus_to_use} --master_port={master_port} {self.examples_dir_str}/pytorch/translation/run_translation.py '''.split() __magic_name__ :Any = [sys.executable] + distributed_args + args # keep for quick debug # print(" ".join([f"\nPYTHONPATH={self.src_dir_str}"] +cmd)); die execute_subprocess_async(__lowerCAmelCase , env=self.get_env() ) else: __magic_name__ :List[Any] = ['''run_translation.py'''] + args with patch.object(__lowerCAmelCase , '''argv''' , __lowerCAmelCase ): main() return output_dir
0
0
'''simple docstring''' import os import tempfile import unittest from transformers import FlaubertConfig, is_torch_available from transformers.testing_utils import require_torch, require_torch_gpu, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( FlaubertForMultipleChoice, FlaubertForQuestionAnswering, FlaubertForQuestionAnsweringSimple, FlaubertForSequenceClassification, FlaubertForTokenClassification, FlaubertModel, FlaubertWithLMHeadModel, ) from transformers.models.flaubert.modeling_flaubert import FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST class _snake_case ( a_ ): def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=13 , _SCREAMING_SNAKE_CASE=7 , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=False , _SCREAMING_SNAKE_CASE=False , _SCREAMING_SNAKE_CASE=False , _SCREAMING_SNAKE_CASE=2 , _SCREAMING_SNAKE_CASE=99 , _SCREAMING_SNAKE_CASE=0 , _SCREAMING_SNAKE_CASE=32 , _SCREAMING_SNAKE_CASE=5 , _SCREAMING_SNAKE_CASE=4 , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=5_12 , _SCREAMING_SNAKE_CASE=12 , _SCREAMING_SNAKE_CASE=2 , _SCREAMING_SNAKE_CASE=0.02 , _SCREAMING_SNAKE_CASE=3 , _SCREAMING_SNAKE_CASE=4 , _SCREAMING_SNAKE_CASE="last" , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , ): '''simple docstring''' lowerCAmelCase = parent lowerCAmelCase = batch_size lowerCAmelCase = seq_length lowerCAmelCase = is_training lowerCAmelCase = use_input_lengths lowerCAmelCase = use_token_type_ids lowerCAmelCase = use_labels lowerCAmelCase = gelu_activation lowerCAmelCase = sinusoidal_embeddings lowerCAmelCase = causal lowerCAmelCase = asm lowerCAmelCase = n_langs lowerCAmelCase = vocab_size lowerCAmelCase = n_special lowerCAmelCase = hidden_size lowerCAmelCase = num_hidden_layers lowerCAmelCase = num_attention_heads lowerCAmelCase = hidden_dropout_prob lowerCAmelCase = attention_probs_dropout_prob lowerCAmelCase = max_position_embeddings lowerCAmelCase = type_vocab_size lowerCAmelCase = type_sequence_label_size lowerCAmelCase = initializer_range lowerCAmelCase = num_labels lowerCAmelCase = num_choices lowerCAmelCase = summary_type lowerCAmelCase = use_proj lowerCAmelCase = scope def _SCREAMING_SNAKE_CASE ( self ): '''simple docstring''' lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) lowerCAmelCase = random_attention_mask([self.batch_size, self.seq_length] ) lowerCAmelCase = None if self.use_input_lengths: lowerCAmelCase = ( ids_tensor([self.batch_size] , vocab_size=2 ) + self.seq_length - 2 ) # small variation of seq_length lowerCAmelCase = None if self.use_token_type_ids: lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.n_langs ) lowerCAmelCase = None lowerCAmelCase = None lowerCAmelCase = None if self.use_labels: lowerCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size ) lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) lowerCAmelCase = ids_tensor([self.batch_size] , 2 ).float() lowerCAmelCase = ids_tensor([self.batch_size] , self.num_choices ) lowerCAmelCase = self.get_config() return ( config, input_ids, token_type_ids, input_lengths, sequence_labels, token_labels, is_impossible_labels, choice_labels, input_mask, ) def _SCREAMING_SNAKE_CASE ( self ): '''simple docstring''' return FlaubertConfig( vocab_size=self.vocab_size , n_special=self.n_special , emb_dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , gelu_activation=self.gelu_activation , sinusoidal_embeddings=self.sinusoidal_embeddings , asm=self.asm , causal=self.causal , n_langs=self.n_langs , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , summary_type=self.summary_type , use_proj=self.use_proj , ) def _SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , ): '''simple docstring''' lowerCAmelCase = FlaubertModel(config=__lowerCAmelCase ) model.to(__lowerCAmelCase ) model.eval() lowerCAmelCase = model(__lowerCAmelCase , lengths=__lowerCAmelCase , langs=__lowerCAmelCase ) lowerCAmelCase = model(__lowerCAmelCase , langs=__lowerCAmelCase ) lowerCAmelCase = model(__lowerCAmelCase ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def _SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , ): '''simple docstring''' lowerCAmelCase = FlaubertWithLMHeadModel(__lowerCAmelCase ) model.to(__lowerCAmelCase ) model.eval() lowerCAmelCase = model(__lowerCAmelCase , token_type_ids=__lowerCAmelCase , labels=__lowerCAmelCase ) self.parent.assertEqual(result.loss.shape , () ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def _SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , ): '''simple docstring''' lowerCAmelCase = FlaubertForQuestionAnsweringSimple(__lowerCAmelCase ) model.to(__lowerCAmelCase ) model.eval() lowerCAmelCase = model(__lowerCAmelCase ) lowerCAmelCase = model(__lowerCAmelCase , start_positions=__lowerCAmelCase , end_positions=__lowerCAmelCase ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def _SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , ): '''simple docstring''' lowerCAmelCase = FlaubertForQuestionAnswering(__lowerCAmelCase ) model.to(__lowerCAmelCase ) model.eval() lowerCAmelCase = model(__lowerCAmelCase ) lowerCAmelCase = model( __lowerCAmelCase , start_positions=__lowerCAmelCase , end_positions=__lowerCAmelCase , cls_index=__lowerCAmelCase , is_impossible=__lowerCAmelCase , p_mask=__lowerCAmelCase , ) lowerCAmelCase = model( __lowerCAmelCase , start_positions=__lowerCAmelCase , end_positions=__lowerCAmelCase , cls_index=__lowerCAmelCase , is_impossible=__lowerCAmelCase , ) (lowerCAmelCase ) = result_with_labels.to_tuple() lowerCAmelCase = model(__lowerCAmelCase , start_positions=__lowerCAmelCase , end_positions=__lowerCAmelCase ) (lowerCAmelCase ) = result_with_labels.to_tuple() self.parent.assertEqual(result_with_labels.loss.shape , () ) self.parent.assertEqual(result.start_top_log_probs.shape , (self.batch_size, model.config.start_n_top) ) self.parent.assertEqual(result.start_top_index.shape , (self.batch_size, model.config.start_n_top) ) self.parent.assertEqual( result.end_top_log_probs.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top) ) self.parent.assertEqual( result.end_top_index.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top) ) self.parent.assertEqual(result.cls_logits.shape , (self.batch_size,) ) def _SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , ): '''simple docstring''' lowerCAmelCase = FlaubertForSequenceClassification(__lowerCAmelCase ) model.to(__lowerCAmelCase ) model.eval() lowerCAmelCase = model(__lowerCAmelCase ) lowerCAmelCase = model(__lowerCAmelCase , labels=__lowerCAmelCase ) self.parent.assertEqual(result.loss.shape , () ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) ) def _SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , ): '''simple docstring''' lowerCAmelCase = self.num_labels lowerCAmelCase = FlaubertForTokenClassification(__lowerCAmelCase ) model.to(__lowerCAmelCase ) model.eval() lowerCAmelCase = model(__lowerCAmelCase , attention_mask=__lowerCAmelCase , labels=__lowerCAmelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def _SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , ): '''simple docstring''' lowerCAmelCase = self.num_choices lowerCAmelCase = FlaubertForMultipleChoice(config=__lowerCAmelCase ) model.to(__lowerCAmelCase ) model.eval() lowerCAmelCase = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() lowerCAmelCase = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() lowerCAmelCase = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() lowerCAmelCase = model( __lowerCAmelCase , attention_mask=__lowerCAmelCase , token_type_ids=__lowerCAmelCase , labels=__lowerCAmelCase , ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) ) def _SCREAMING_SNAKE_CASE ( self ): '''simple docstring''' lowerCAmelCase = self.prepare_config_and_inputs() ( lowerCAmelCase ) = config_and_inputs lowerCAmelCase = { '''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''lengths''': input_lengths, '''attention_mask''': input_mask, } return config, inputs_dict @require_torch class _snake_case ( a_ , a_ , unittest.TestCase ): SCREAMING_SNAKE_CASE : List[str] = ( ( FlaubertModel, FlaubertWithLMHeadModel, FlaubertForQuestionAnswering, FlaubertForQuestionAnsweringSimple, FlaubertForSequenceClassification, FlaubertForTokenClassification, FlaubertForMultipleChoice, ) if is_torch_available() else () ) SCREAMING_SNAKE_CASE : Dict = ( { '''feature-extraction''': FlaubertModel, '''fill-mask''': FlaubertWithLMHeadModel, '''question-answering''': FlaubertForQuestionAnsweringSimple, '''text-classification''': FlaubertForSequenceClassification, '''token-classification''': FlaubertForTokenClassification, '''zero-shot''': FlaubertForSequenceClassification, } if is_torch_available() else {} ) def _SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): '''simple docstring''' if ( pipeline_test_casse_name == "QAPipelineTests" and tokenizer_name is not None and not tokenizer_name.endswith('Fast' ) ): # `QAPipelineTests` fails for a few models when the slower tokenizer are used. # (The slower tokenizers were never used for pipeline tests before the pipeline testing rework) # TODO: check (and possibly fix) the `QAPipelineTests` with slower tokenizer return True return False def _SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=False ): '''simple docstring''' lowerCAmelCase = super()._prepare_for_class(__lowerCAmelCase , __lowerCAmelCase , return_labels=__lowerCAmelCase ) if return_labels: if model_class.__name__ == "FlaubertForQuestionAnswering": lowerCAmelCase = torch.zeros( self.model_tester.batch_size , dtype=torch.long , device=__lowerCAmelCase ) lowerCAmelCase = torch.zeros( self.model_tester.batch_size , dtype=torch.long , device=__lowerCAmelCase ) return inputs_dict def _SCREAMING_SNAKE_CASE ( self ): '''simple docstring''' lowerCAmelCase = FlaubertModelTester(self ) lowerCAmelCase = ConfigTester(self , config_class=__lowerCAmelCase , emb_dim=37 ) def _SCREAMING_SNAKE_CASE ( self ): '''simple docstring''' self.config_tester.run_common_tests() def _SCREAMING_SNAKE_CASE ( self ): '''simple docstring''' lowerCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_flaubert_model(*__lowerCAmelCase ) def _SCREAMING_SNAKE_CASE ( self ): '''simple docstring''' lowerCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_flaubert_lm_head(*__lowerCAmelCase ) def _SCREAMING_SNAKE_CASE ( self ): '''simple docstring''' lowerCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_flaubert_simple_qa(*__lowerCAmelCase ) def _SCREAMING_SNAKE_CASE ( self ): '''simple docstring''' lowerCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_flaubert_qa(*__lowerCAmelCase ) def _SCREAMING_SNAKE_CASE ( self ): '''simple docstring''' lowerCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_flaubert_sequence_classif(*__lowerCAmelCase ) def _SCREAMING_SNAKE_CASE ( self ): '''simple docstring''' lowerCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_flaubert_token_classif(*__lowerCAmelCase ) def _SCREAMING_SNAKE_CASE ( self ): '''simple docstring''' lowerCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_flaubert_multiple_choice(*__lowerCAmelCase ) @slow def _SCREAMING_SNAKE_CASE ( self ): '''simple docstring''' for model_name in FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: lowerCAmelCase = FlaubertModel.from_pretrained(__lowerCAmelCase ) self.assertIsNotNone(__lowerCAmelCase ) @slow @require_torch_gpu def _SCREAMING_SNAKE_CASE ( self ): '''simple docstring''' lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: # FlauBertForMultipleChoice behaves incorrectly in JIT environments. if model_class == FlaubertForMultipleChoice: return lowerCAmelCase = True lowerCAmelCase = model_class(config=__lowerCAmelCase ) lowerCAmelCase = self._prepare_for_class(__lowerCAmelCase , __lowerCAmelCase ) lowerCAmelCase = torch.jit.trace( __lowerCAmelCase , (inputs_dict['input_ids'].to('cpu' ), inputs_dict['attention_mask'].to('cpu' )) ) with tempfile.TemporaryDirectory() as tmp: torch.jit.save(__lowerCAmelCase , os.path.join(__lowerCAmelCase , 'traced_model.pt' ) ) lowerCAmelCase = torch.jit.load(os.path.join(__lowerCAmelCase , 'traced_model.pt' ) , map_location=__lowerCAmelCase ) loaded(inputs_dict['input_ids'].to(__lowerCAmelCase ) , inputs_dict['attention_mask'].to(__lowerCAmelCase ) ) @require_torch class _snake_case ( unittest.TestCase ): @slow def _SCREAMING_SNAKE_CASE ( self ): '''simple docstring''' lowerCAmelCase = FlaubertModel.from_pretrained('flaubert/flaubert_base_cased' ) lowerCAmelCase = torch.tensor([[0, 3_45, 2_32, 3_28, 7_40, 1_40, 16_95, 69, 60_78, 15_88, 2]] ) with torch.no_grad(): lowerCAmelCase = model(__lowerCAmelCase )[0] lowerCAmelCase = torch.Size((1, 11, 7_68) ) self.assertEqual(output.shape , __lowerCAmelCase ) lowerCAmelCase = torch.tensor( [[[-2.6_251, -1.4_298, -0.0_227], [-2.8_510, -1.6_387, 0.2_258], [-2.8_114, -1.1_832, -0.3_066]]] ) self.assertTrue(torch.allclose(output[:, :3, :3] , __lowerCAmelCase , atol=1e-4 ) )
284
import sys SCREAMING_SNAKE_CASE__ : Optional[Any] = ( """73167176531330624919225119674426574742355349194934""" """96983520312774506326239578318016984801869478851843""" """85861560789112949495459501737958331952853208805511""" """12540698747158523863050715693290963295227443043557""" """66896648950445244523161731856403098711121722383113""" """62229893423380308135336276614282806444486645238749""" """30358907296290491560440772390713810515859307960866""" """70172427121883998797908792274921901699720888093776""" """65727333001053367881220235421809751254540594752243""" """52584907711670556013604839586446706324415722155397""" """53697817977846174064955149290862569321978468622482""" """83972241375657056057490261407972968652414535100474""" """82166370484403199890008895243450658541227588666881""" """16427171479924442928230863465674813919123162824586""" """17866458359124566529476545682848912883142607690042""" """24219022671055626321111109370544217506941658960408""" """07198403850962455444362981230987879927244284909188""" """84580156166097919133875499200524063689912560717606""" """05886116467109405077541002256983155200055935729725""" """71636269561882670428252483600823257530420752963450""" ) def __lowercase ( snake_case = N ): """simple docstring""" __magic_name__ :Optional[int] = -sys.maxsize - 1 for i in range(len(snake_case ) - 1_2 ): __magic_name__ :List[Any] = 1 for j in range(1_3 ): product *= int(n[i + j] ) if product > largest_product: __magic_name__ :str = product return largest_product if __name__ == "__main__": print(f"{solution() = }")
0
0
import json import logging import os import socket import git import numpy as np import torch logging.basicConfig( format='%(asctime)s - %(levelname)s - %(name)s - PID: %(process)d - %(message)s', datefmt='%m/%d/%Y %H:%M:%S', level=logging.INFO, ) snake_case__ : str = logging.getLogger(__name__) def __lowerCamelCase ( A__ : Optional[Any] ) -> Dict: lowerCamelCase_ : Optional[Any] = git.Repo(search_parent_directories=A__ ) lowerCamelCase_ : Dict = { '''repo_id''': str(A__ ), '''repo_sha''': str(repo.head.object.hexsha ), '''repo_branch''': str(repo.active_branch ), } with open(os.path.join(A__ , """git_log.json""" ) , """w""" ) as f: json.dump(A__ , A__ , indent=4 ) def __lowerCamelCase ( A__ : List[str] ) -> Optional[int]: if params.n_gpu <= 0: lowerCamelCase_ : Union[str, Any] = 0 lowerCamelCase_ : Optional[Any] = -1 lowerCamelCase_ : int = True lowerCamelCase_ : Union[str, Any] = False return assert torch.cuda.is_available() logger.info("""Initializing GPUs""" ) if params.n_gpu > 1: assert params.local_rank != -1 lowerCamelCase_ : Any = int(os.environ["""WORLD_SIZE"""] ) lowerCamelCase_ : str = int(os.environ["""N_GPU_NODE"""] ) lowerCamelCase_ : List[str] = int(os.environ["""RANK"""] ) # number of nodes / node ID lowerCamelCase_ : Optional[Any] = params.world_size // params.n_gpu_per_node lowerCamelCase_ : Optional[int] = params.global_rank // params.n_gpu_per_node lowerCamelCase_ : str = True assert params.n_nodes == int(os.environ["""N_NODES"""] ) assert params.node_id == int(os.environ["""NODE_RANK"""] ) # local job (single GPU) else: assert params.local_rank == -1 lowerCamelCase_ : str = 1 lowerCamelCase_ : str = 0 lowerCamelCase_ : Dict = 0 lowerCamelCase_ : Tuple = 0 lowerCamelCase_ : int = 1 lowerCamelCase_ : int = 1 lowerCamelCase_ : Dict = False # sanity checks assert params.n_nodes >= 1 assert 0 <= params.node_id < params.n_nodes assert 0 <= params.local_rank <= params.global_rank < params.world_size assert params.world_size == params.n_nodes * params.n_gpu_per_node # define whether this is the master process / if we are in multi-node distributed mode lowerCamelCase_ : List[Any] = params.node_id == 0 and params.local_rank == 0 lowerCamelCase_ : Tuple = params.n_nodes > 1 # summary lowerCamelCase_ : Optional[int] = f'''--- Global rank: {params.global_rank} - ''' logger.info(PREFIX + """Number of nodes: %i""" % params.n_nodes ) logger.info(PREFIX + """Node ID : %i""" % params.node_id ) logger.info(PREFIX + """Local rank : %i""" % params.local_rank ) logger.info(PREFIX + """World size : %i""" % params.world_size ) logger.info(PREFIX + """GPUs per node : %i""" % params.n_gpu_per_node ) logger.info(PREFIX + """Master : %s""" % str(params.is_master ) ) logger.info(PREFIX + """Multi-node : %s""" % str(params.multi_node ) ) logger.info(PREFIX + """Multi-GPU : %s""" % str(params.multi_gpu ) ) logger.info(PREFIX + """Hostname : %s""" % socket.gethostname() ) # set GPU device torch.cuda.set_device(params.local_rank ) # initialize multi-GPU if params.multi_gpu: logger.info("""Initializing PyTorch distributed""" ) torch.distributed.init_process_group( init_method="""env://""" , backend="""nccl""" , ) def __lowerCamelCase ( A__ : List[str] ) -> Union[str, Any]: np.random.seed(args.seed ) torch.manual_seed(args.seed ) if args.n_gpu > 0: torch.cuda.manual_seed_all(args.seed )
278
SCREAMING_SNAKE_CASE__ : Tuple = { """a""": """AAAAA""", """b""": """AAAAB""", """c""": """AAABA""", """d""": """AAABB""", """e""": """AABAA""", """f""": """AABAB""", """g""": """AABBA""", """h""": """AABBB""", """i""": """ABAAA""", """j""": """BBBAA""", """k""": """ABAAB""", """l""": """ABABA""", """m""": """ABABB""", """n""": """ABBAA""", """o""": """ABBAB""", """p""": """ABBBA""", """q""": """ABBBB""", """r""": """BAAAA""", """s""": """BAAAB""", """t""": """BAABA""", """u""": """BAABB""", """v""": """BBBAB""", """w""": """BABAA""", """x""": """BABAB""", """y""": """BABBA""", """z""": """BABBB""", """ """: """ """, } SCREAMING_SNAKE_CASE__ : Union[str, Any] = {value: key for key, value in encode_dict.items()} def __lowercase ( snake_case ): """simple docstring""" __magic_name__ :Tuple = '''''' for letter in word.lower(): if letter.isalpha() or letter == " ": encoded += encode_dict[letter] else: raise Exception('''encode() accepts only letters of the alphabet and spaces''' ) return encoded def __lowercase ( snake_case ): """simple docstring""" if set(snake_case ) - {"A", "B", " "} != set(): raise Exception('''decode() accepts only \'A\', \'B\' and spaces''' ) __magic_name__ :Dict = '''''' for word in coded.split(): while len(snake_case ) != 0: decoded += decode_dict[word[:5]] __magic_name__ :int = word[5:] decoded += " " return decoded.strip() if __name__ == "__main__": from doctest import testmod testmod()
0
0
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_torch_available, ) lowerCAmelCase = {"""configuration_vit_mae""": ["""VIT_MAE_PRETRAINED_CONFIG_ARCHIVE_MAP""", """ViTMAEConfig"""]} try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase = [ """VIT_MAE_PRETRAINED_MODEL_ARCHIVE_LIST""", """ViTMAEForPreTraining""", """ViTMAELayer""", """ViTMAEModel""", """ViTMAEPreTrainedModel""", ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase = [ """TFViTMAEForPreTraining""", """TFViTMAEModel""", """TFViTMAEPreTrainedModel""", ] if TYPE_CHECKING: from .configuration_vit_mae import VIT_MAE_PRETRAINED_CONFIG_ARCHIVE_MAP, ViTMAEConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_vit_mae import ( VIT_MAE_PRETRAINED_MODEL_ARCHIVE_LIST, ViTMAEForPreTraining, ViTMAELayer, ViTMAEModel, ViTMAEPreTrainedModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_vit_mae import TFViTMAEForPreTraining, TFViTMAEModel, TFViTMAEPreTrainedModel else: import sys lowerCAmelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
230
import argparse import torch from torch import nn from transformers import MaMaaaConfig, MaMaaaForConditionalGeneration def __lowercase ( snake_case ): """simple docstring""" __magic_name__ :Optional[Any] = [ '''encoder.version''', '''decoder.version''', '''model.encoder.version''', '''model.decoder.version''', '''decoder.output_projection.weight''', '''_float_tensor''', '''encoder.embed_positions._float_tensor''', '''decoder.embed_positions._float_tensor''', ] for k in ignore_keys: state_dict.pop(snake_case, snake_case ) def __lowercase ( snake_case ): """simple docstring""" __magic_name__ , __magic_name__ :Tuple = emb.weight.shape __magic_name__ :int = nn.Linear(snake_case, snake_case, bias=snake_case ) __magic_name__ :str = emb.weight.data return lin_layer def __lowercase ( snake_case ): """simple docstring""" __magic_name__ :int = torch.load(snake_case, map_location='''cpu''' ) __magic_name__ :Optional[Any] = mam_aaa['''args'''] or mam_aaa['''cfg''']['''model'''] __magic_name__ :List[Any] = mam_aaa['''model'''] remove_ignore_keys_(snake_case ) __magic_name__ :Tuple = state_dict['''encoder.embed_tokens.weight'''].shape[0] __magic_name__ :List[str] = MaMaaaConfig( vocab_size=snake_case, max_position_embeddings=1_0_2_4, encoder_layers=args.encoder_layers, decoder_layers=args.decoder_layers, encoder_attention_heads=args.encoder_attention_heads, decoder_attention_heads=args.decoder_attention_heads, encoder_ffn_dim=args.encoder_ffn_embed_dim, decoder_ffn_dim=args.decoder_ffn_embed_dim, d_model=args.encoder_embed_dim, encoder_layerdrop=args.encoder_layerdrop, decoder_layerdrop=args.decoder_layerdrop, dropout=args.dropout, attention_dropout=args.attention_dropout, activation_dropout=args.activation_dropout, activation_function='''relu''', ) __magic_name__ :int = state_dict['''decoder.embed_tokens.weight'''] __magic_name__ :List[str] = MaMaaaForConditionalGeneration(snake_case ) model.model.load_state_dict(snake_case, strict=snake_case ) __magic_name__ :List[str] = make_linear_from_emb(model.model.shared ) return model if __name__ == "__main__": SCREAMING_SNAKE_CASE__ : Optional[Any] = argparse.ArgumentParser() # Required parameters parser.add_argument("""fairseq_path""", type=str, help="""path to a model.pt on local filesystem.""") parser.add_argument("""pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""") SCREAMING_SNAKE_CASE__ : int = parser.parse_args() SCREAMING_SNAKE_CASE__ : Any = convert_fairseq_mamaaa_checkpoint_from_disk(args.fairseq_pathß) model.save_pretrained(args.pytorch_dump_folder_path)
0
0
"""simple docstring""" import gc import random import unittest import numpy as np import torch from diffusers import DDIMScheduler, KandinskyVaaPipeline, KandinskyVaaPriorPipeline, UNetaDConditionModel, VQModel from diffusers.utils import floats_tensor, load_numpy, slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference enable_full_determinism() class __magic_name__ ( _UpperCamelCase ,unittest.TestCase ): UpperCamelCase : List[Any] = KandinskyVaaPipeline UpperCamelCase : List[str] = [ "image_embeds", "negative_image_embeds", ] UpperCamelCase : Union[str, Any] = ["image_embeds", "negative_image_embeds"] UpperCamelCase : Any = [ "generator", "height", "width", "latents", "guidance_scale", "num_inference_steps", "return_dict", "guidance_scale", "num_images_per_prompt", "output_type", "return_dict", ] UpperCamelCase : Optional[Any] = False @property def _lowerCamelCase ( self ): """simple docstring""" return 3_2 @property def _lowerCamelCase ( self ): """simple docstring""" return 3_2 @property def _lowerCamelCase ( self ): """simple docstring""" return self.time_input_dim @property def _lowerCamelCase ( self ): """simple docstring""" return self.time_input_dim * 4 @property def _lowerCamelCase ( self ): """simple docstring""" return 1_0_0 @property def _lowerCamelCase ( self ): """simple docstring""" torch.manual_seed(0 ) _lowerCAmelCase = { '''in_channels''': 4, # Out channels is double in channels because predicts mean and variance '''out_channels''': 8, '''addition_embed_type''': '''image''', '''down_block_types''': ('''ResnetDownsampleBlock2D''', '''SimpleCrossAttnDownBlock2D'''), '''up_block_types''': ('''SimpleCrossAttnUpBlock2D''', '''ResnetUpsampleBlock2D'''), '''mid_block_type''': '''UNetMidBlock2DSimpleCrossAttn''', '''block_out_channels''': (self.block_out_channels_a, self.block_out_channels_a * 2), '''layers_per_block''': 1, '''encoder_hid_dim''': self.text_embedder_hidden_size, '''encoder_hid_dim_type''': '''image_proj''', '''cross_attention_dim''': self.cross_attention_dim, '''attention_head_dim''': 4, '''resnet_time_scale_shift''': '''scale_shift''', '''class_embed_type''': None, } _lowerCAmelCase = UNetaDConditionModel(**__lowerCAmelCase ) return model @property def _lowerCamelCase ( self ): """simple docstring""" return { "block_out_channels": [3_2, 6_4], "down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"], "in_channels": 3, "latent_channels": 4, "layers_per_block": 1, "norm_num_groups": 8, "norm_type": "spatial", "num_vq_embeddings": 1_2, "out_channels": 3, "up_block_types": [ "AttnUpDecoderBlock2D", "UpDecoderBlock2D", ], "vq_embed_dim": 4, } @property def _lowerCamelCase ( self ): """simple docstring""" torch.manual_seed(0 ) _lowerCAmelCase = VQModel(**self.dummy_movq_kwargs ) return model def _lowerCamelCase ( self ): """simple docstring""" _lowerCAmelCase = self.dummy_unet _lowerCAmelCase = self.dummy_movq _lowerCAmelCase = DDIMScheduler( num_train_timesteps=1_0_0_0 , beta_schedule='linear' , beta_start=0.0_00_85 , beta_end=0.0_12 , clip_sample=__lowerCAmelCase , set_alpha_to_one=__lowerCAmelCase , steps_offset=1 , prediction_type='epsilon' , thresholding=__lowerCAmelCase , ) _lowerCAmelCase = { '''unet''': unet, '''scheduler''': scheduler, '''movq''': movq, } return components def _lowerCamelCase ( self , __magic_name__ , __magic_name__=0 ): """simple docstring""" _lowerCAmelCase = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(__lowerCAmelCase ) ).to(__lowerCAmelCase ) _lowerCAmelCase = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(seed + 1 ) ).to( __lowerCAmelCase ) if str(__lowerCAmelCase ).startswith('mps' ): _lowerCAmelCase = torch.manual_seed(__lowerCAmelCase ) else: _lowerCAmelCase = torch.Generator(device=__lowerCAmelCase ).manual_seed(__lowerCAmelCase ) _lowerCAmelCase = { '''image_embeds''': image_embeds, '''negative_image_embeds''': negative_image_embeds, '''generator''': generator, '''height''': 6_4, '''width''': 6_4, '''guidance_scale''': 4.0, '''num_inference_steps''': 2, '''output_type''': '''np''', } return inputs def _lowerCamelCase ( self ): """simple docstring""" _lowerCAmelCase = '''cpu''' _lowerCAmelCase = self.get_dummy_components() _lowerCAmelCase = self.pipeline_class(**__lowerCAmelCase ) _lowerCAmelCase = pipe.to(__lowerCAmelCase ) pipe.set_progress_bar_config(disable=__lowerCAmelCase ) _lowerCAmelCase = pipe(**self.get_dummy_inputs(__lowerCAmelCase ) ) _lowerCAmelCase = output.images _lowerCAmelCase = pipe( **self.get_dummy_inputs(__lowerCAmelCase ) , return_dict=__lowerCAmelCase , )[0] _lowerCAmelCase = image[0, -3:, -3:, -1] _lowerCAmelCase = image_from_tuple[0, -3:, -3:, -1] assert image.shape == (1, 6_4, 6_4, 3) _lowerCAmelCase = np.array( [0.6_23_79_76, 1.0, 0.36_44_13_32, 1.0, 0.70_63_96_34, 0.29_87_71_86, 0.85_65_21_25, 0.5_21_68_43, 0.54_45_40_46] ) assert ( np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2 ), F''' expected_slice {expected_slice}, but got {image_slice.flatten()}''' assert ( np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2 ), F''' expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}''' @slow @require_torch_gpu class __magic_name__ ( unittest.TestCase ): def _lowerCamelCase ( self ): """simple docstring""" super().tearDown() gc.collect() torch.cuda.empty_cache() def _lowerCamelCase ( self ): """simple docstring""" _lowerCAmelCase = load_numpy( 'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main' '/kandinskyv22/kandinskyv22_text2img_cat_fp16.npy' ) _lowerCAmelCase = KandinskyVaaPriorPipeline.from_pretrained( 'kandinsky-community/kandinsky-2-2-prior' , torch_dtype=torch.floataa ) pipe_prior.to(__lowerCAmelCase ) _lowerCAmelCase = KandinskyVaaPipeline.from_pretrained( 'kandinsky-community/kandinsky-2-2-decoder' , torch_dtype=torch.floataa ) _lowerCAmelCase = pipeline.to(__lowerCAmelCase ) pipeline.set_progress_bar_config(disable=__lowerCAmelCase ) _lowerCAmelCase = '''red cat, 4k photo''' _lowerCAmelCase = torch.Generator(device='cuda' ).manual_seed(0 ) _lowerCAmelCase = pipe_prior( __lowerCAmelCase , generator=__lowerCAmelCase , num_inference_steps=5 , negative_prompt='' , ).to_tuple() _lowerCAmelCase = torch.Generator(device='cuda' ).manual_seed(0 ) _lowerCAmelCase = pipeline( image_embeds=__lowerCAmelCase , negative_image_embeds=__lowerCAmelCase , generator=__lowerCAmelCase , num_inference_steps=1_0_0 , output_type='np' , ) _lowerCAmelCase = output.images[0] assert image.shape == (5_1_2, 5_1_2, 3) assert_mean_pixel_difference(__lowerCAmelCase , __lowerCAmelCase )
589
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available SCREAMING_SNAKE_CASE__ : Dict = { """configuration_canine""": ["""CANINE_PRETRAINED_CONFIG_ARCHIVE_MAP""", """CanineConfig"""], """tokenization_canine""": ["""CanineTokenizer"""], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: SCREAMING_SNAKE_CASE__ : str = [ """CANINE_PRETRAINED_MODEL_ARCHIVE_LIST""", """CanineForMultipleChoice""", """CanineForQuestionAnswering""", """CanineForSequenceClassification""", """CanineForTokenClassification""", """CanineLayer""", """CanineModel""", """CaninePreTrainedModel""", """load_tf_weights_in_canine""", ] if TYPE_CHECKING: from .configuration_canine import CANINE_PRETRAINED_CONFIG_ARCHIVE_MAP, CanineConfig from .tokenization_canine import CanineTokenizer try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_canine import ( CANINE_PRETRAINED_MODEL_ARCHIVE_LIST, CanineForMultipleChoice, CanineForQuestionAnswering, CanineForSequenceClassification, CanineForTokenClassification, CanineLayer, CanineModel, CaninePreTrainedModel, load_tf_weights_in_canine, ) else: import sys SCREAMING_SNAKE_CASE__ : Union[str, Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
0
0
from collections import OrderedDict from typing import Mapping from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging _A : Any = logging.get_logger(__name__) _A : Optional[Any] = { """google/bigbird-roberta-base""": """https://huggingface.co/google/bigbird-roberta-base/resolve/main/config.json""", """google/bigbird-roberta-large""": """https://huggingface.co/google/bigbird-roberta-large/resolve/main/config.json""", """google/bigbird-base-trivia-itc""": """https://huggingface.co/google/bigbird-base-trivia-itc/resolve/main/config.json""", # See all BigBird models at https://huggingface.co/models?filter=big_bird } class __SCREAMING_SNAKE_CASE ( lowerCAmelCase_ ): _UpperCAmelCase : List[Any] = "big_bird" def __init__( self : List[Any] , A : int=5_0_3_5_8 , A : Dict=7_6_8 , A : Any=1_2 , A : List[Any]=1_2 , A : Tuple=3_0_7_2 , A : List[Any]="gelu_new" , A : Any=0.1 , A : List[str]=0.1 , A : Union[str, Any]=4_0_9_6 , A : Union[str, Any]=2 , A : Dict=0.02 , A : List[Any]=1e-12 , A : Optional[Any]=True , A : List[str]=0 , A : Dict=1 , A : List[str]=2 , A : str=6_6 , A : Optional[Any]="block_sparse" , A : Tuple=True , A : Any=False , A : int=6_4 , A : int=3 , A : int=None , **A : str , ) ->Optional[Any]: super().__init__( pad_token_id=__lowerCAmelCase , bos_token_id=__lowerCAmelCase , eos_token_id=__lowerCAmelCase , sep_token_id=__lowerCAmelCase , **__lowerCAmelCase , ) lowerCamelCase__ : Union[str, Any] = vocab_size lowerCamelCase__ : List[str] = max_position_embeddings lowerCamelCase__ : Dict = hidden_size lowerCamelCase__ : Union[str, Any] = num_hidden_layers lowerCamelCase__ : str = num_attention_heads lowerCamelCase__ : Any = intermediate_size lowerCamelCase__ : List[Any] = hidden_act lowerCamelCase__ : Any = hidden_dropout_prob lowerCamelCase__ : str = attention_probs_dropout_prob lowerCamelCase__ : Union[str, Any] = initializer_range lowerCamelCase__ : Optional[int] = type_vocab_size lowerCamelCase__ : Optional[Any] = layer_norm_eps lowerCamelCase__ : List[str] = use_cache lowerCamelCase__ : Optional[Any] = rescale_embeddings lowerCamelCase__ : Optional[Any] = attention_type lowerCamelCase__ : Optional[Any] = use_bias lowerCamelCase__ : Optional[int] = block_size lowerCamelCase__ : List[Any] = num_random_blocks lowerCamelCase__ : List[str] = classifier_dropout class __SCREAMING_SNAKE_CASE ( lowerCAmelCase_ ): @property def __lowerCamelCase ( self : int ) ->Any: if self.task == "multiple-choice": lowerCamelCase__ : int = {0: '''batch''', 1: '''choice''', 2: '''sequence'''} else: lowerCamelCase__ : Dict = {0: '''batch''', 1: '''sequence'''} return OrderedDict( [ ('''input_ids''', dynamic_axis), ('''attention_mask''', dynamic_axis), ] )
315
import warnings from ...processing_utils import ProcessorMixin from ...tokenization_utils_base import BatchEncoding class lowerCamelCase_ ( lowerCamelCase ): a__ = ['''image_processor''', '''tokenizer'''] a__ = '''ChineseCLIPImageProcessor''' a__ = ('''BertTokenizer''', '''BertTokenizerFast''') def __init__( self , __lowerCAmelCase=None , __lowerCAmelCase=None , **__lowerCAmelCase ): """simple docstring""" __magic_name__ :Tuple = None if "feature_extractor" in kwargs: warnings.warn( '''The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`''' ''' instead.''' , __lowerCAmelCase , ) __magic_name__ :Optional[Any] = kwargs.pop('''feature_extractor''' ) __magic_name__ :Tuple = image_processor if image_processor is not None else feature_extractor if image_processor is None: raise ValueError('''You need to specify an `image_processor`.''' ) if tokenizer is None: raise ValueError('''You need to specify a `tokenizer`.''' ) super().__init__(__lowerCAmelCase , __lowerCAmelCase ) __magic_name__ :List[Any] = self.image_processor def __call__( self , __lowerCAmelCase=None , __lowerCAmelCase=None , __lowerCAmelCase=None , **__lowerCAmelCase ): """simple docstring""" if text is None and images is None: raise ValueError('''You have to specify either text or images. Both cannot be none.''' ) if text is not None: __magic_name__ :int = self.tokenizer(__lowerCAmelCase , return_tensors=__lowerCAmelCase , **__lowerCAmelCase ) if images is not None: __magic_name__ :Dict = self.image_processor(__lowerCAmelCase , return_tensors=__lowerCAmelCase , **__lowerCAmelCase ) if text is not None and images is not None: __magic_name__ :Union[str, Any] = image_features.pixel_values return encoding elif text is not None: return encoding else: return BatchEncoding(data=dict(**__lowerCAmelCase ) , tensor_type=__lowerCAmelCase ) def A ( self , *__lowerCAmelCase , **__lowerCAmelCase ): """simple docstring""" return self.tokenizer.batch_decode(*__lowerCAmelCase , **__lowerCAmelCase ) def A ( self , *__lowerCAmelCase , **__lowerCAmelCase ): """simple docstring""" return self.tokenizer.decode(*__lowerCAmelCase , **__lowerCAmelCase ) @property def A ( self ): """simple docstring""" __magic_name__ :List[Any] = self.tokenizer.model_input_names __magic_name__ :Any = self.image_processor.model_input_names return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) ) @property def A ( self ): """simple docstring""" warnings.warn( '''`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.''' , __lowerCAmelCase , ) return self.image_processor_class
0
0
'''simple docstring''' import argparse from transformers import ( TapasConfig, TapasForMaskedLM, TapasForQuestionAnswering, TapasForSequenceClassification, TapasModel, TapasTokenizer, load_tf_weights_in_tapas, ) from transformers.utils import logging logging.set_verbosity_info() def lowerCamelCase ( UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : Optional[Any] , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : int ) -> List[Any]: '''simple docstring''' SCREAMING_SNAKE_CASE__ :Tuple = TapasConfig.from_json_file(UpperCAmelCase__ ) # set absolute/relative position embeddings parameter SCREAMING_SNAKE_CASE__ :Optional[int] = reset_position_index_per_cell # set remaining parameters of TapasConfig as well as the model based on the task if task == "SQA": SCREAMING_SNAKE_CASE__ :Any = TapasForQuestionAnswering(config=UpperCAmelCase__ ) elif task == "WTQ": # run_task_main.py hparams SCREAMING_SNAKE_CASE__ :Any = 4 SCREAMING_SNAKE_CASE__ :int = True # hparam_utils.py hparams SCREAMING_SNAKE_CASE__ :int = 0.66_4694 SCREAMING_SNAKE_CASE__ :str = 0.20_7951 SCREAMING_SNAKE_CASE__ :List[Any] = 0.12_1194 SCREAMING_SNAKE_CASE__ :List[Any] = True SCREAMING_SNAKE_CASE__ :int = True SCREAMING_SNAKE_CASE__ :Tuple = False SCREAMING_SNAKE_CASE__ :Tuple = 0.035_2513 SCREAMING_SNAKE_CASE__ :str = TapasForQuestionAnswering(config=UpperCAmelCase__ ) elif task == "WIKISQL_SUPERVISED": # run_task_main.py hparams SCREAMING_SNAKE_CASE__ :Optional[Any] = 4 SCREAMING_SNAKE_CASE__ :Union[str, Any] = False # hparam_utils.py hparams SCREAMING_SNAKE_CASE__ :Optional[int] = 36.4519 SCREAMING_SNAKE_CASE__ :Dict = 0.90_3421 SCREAMING_SNAKE_CASE__ :Any = 222.088 SCREAMING_SNAKE_CASE__ :str = True SCREAMING_SNAKE_CASE__ :int = True SCREAMING_SNAKE_CASE__ :int = True SCREAMING_SNAKE_CASE__ :List[str] = 0.76_3141 SCREAMING_SNAKE_CASE__ :Optional[Any] = TapasForQuestionAnswering(config=UpperCAmelCase__ ) elif task == "TABFACT": SCREAMING_SNAKE_CASE__ :str = TapasForSequenceClassification(config=UpperCAmelCase__ ) elif task == "MLM": SCREAMING_SNAKE_CASE__ :Any = TapasForMaskedLM(config=UpperCAmelCase__ ) elif task == "INTERMEDIATE_PRETRAINING": SCREAMING_SNAKE_CASE__ :str = TapasModel(config=UpperCAmelCase__ ) else: raise ValueError(F'''Task {task} not supported.''' ) print(F'''Building PyTorch model from configuration: {config}''' ) # Load weights from tf checkpoint load_tf_weights_in_tapas(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ) # Save pytorch-model (weights and configuration) print(F'''Save PyTorch model to {pytorch_dump_path}''' ) model.save_pretrained(UpperCAmelCase__ ) # Save tokenizer files print(F'''Save tokenizer files to {pytorch_dump_path}''' ) SCREAMING_SNAKE_CASE__ :List[Any] = TapasTokenizer(vocab_file=tf_checkpoint_path[:-1_0] + 'vocab.txt' , model_max_length=5_1_2 ) tokenizer.save_pretrained(UpperCAmelCase__ ) print('Used relative position embeddings:' , model.config.reset_position_index_per_cell ) if __name__ == "__main__": UpperCamelCase_ = argparse.ArgumentParser() # Required parameters parser.add_argument( '''--task''', default='''SQA''', type=str, help='''Model task for which to convert a checkpoint. Defaults to SQA.''' ) parser.add_argument( '''--reset_position_index_per_cell''', default=False, action='''store_true''', help='''Whether to use relative position embeddings or not. Defaults to True.''', ) parser.add_argument( '''--tf_checkpoint_path''', default=None, type=str, required=True, help='''Path to the TensorFlow checkpoint path.''' ) parser.add_argument( '''--tapas_config_file''', default=None, type=str, required=True, help=( '''The config json file corresponding to the pre-trained TAPAS model. \n''' '''This specifies the model architecture.''' ), ) parser.add_argument( '''--pytorch_dump_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.''' ) UpperCamelCase_ = parser.parse_args() convert_tf_checkpoint_to_pytorch( args.task, args.reset_position_index_per_cell, args.tf_checkpoint_path, args.tapas_config_file, args.pytorch_dump_path, )
209
from sklearn.metrics import matthews_corrcoef import datasets SCREAMING_SNAKE_CASE__ : Optional[Any] = """ Compute the Matthews correlation coefficient (MCC) The Matthews correlation coefficient is used in machine learning as a measure of the quality of binary and multiclass classifications. It takes into account true and false positives and negatives and is generally regarded as a balanced measure which can be used even if the classes are of very different sizes. The MCC is in essence a correlation coefficient value between -1 and +1. A coefficient of +1 represents a perfect prediction, 0 an average random prediction and -1 an inverse prediction. The statistic is also known as the phi coefficient. [source: Wikipedia] """ SCREAMING_SNAKE_CASE__ : Union[str, Any] = """ Args: predictions (list of int): Predicted labels, as returned by a model. references (list of int): Ground truth labels. sample_weight (list of int, float, or bool): Sample weights. Defaults to `None`. Returns: matthews_correlation (dict containing float): Matthews correlation. Examples: Example 1, a basic example with only predictions and references as inputs: >>> matthews_metric = datasets.load_metric(\"matthews_correlation\") >>> results = matthews_metric.compute(references=[1, 3, 2, 0, 3, 2], ... predictions=[1, 2, 2, 0, 3, 3]) >>> print(round(results['matthews_correlation'], 2)) 0.54 Example 2, the same example as above, but also including sample weights: >>> matthews_metric = datasets.load_metric(\"matthews_correlation\") >>> results = matthews_metric.compute(references=[1, 3, 2, 0, 3, 2], ... predictions=[1, 2, 2, 0, 3, 3], ... sample_weight=[0.5, 3, 1, 1, 1, 2]) >>> print(round(results['matthews_correlation'], 2)) 0.1 Example 3, the same example as above, but with sample weights that cause a negative correlation: >>> matthews_metric = datasets.load_metric(\"matthews_correlation\") >>> results = matthews_metric.compute(references=[1, 3, 2, 0, 3, 2], ... predictions=[1, 2, 2, 0, 3, 3], ... sample_weight=[0.5, 1, 0, 0, 0, 1]) >>> print(round(results['matthews_correlation'], 2)) -0.25 """ SCREAMING_SNAKE_CASE__ : int = """\ @article{scikit-learn, title={Scikit-learn: Machine Learning in {P}ython}, author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V. and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P. and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.}, journal={Journal of Machine Learning Research}, volume={12}, pages={2825--2830}, year={2011} } """ @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class lowerCamelCase_ ( datasets.Metric ): def A ( self ): """simple docstring""" return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { '''predictions''': datasets.Value('''int32''' ), '''references''': datasets.Value('''int32''' ), } ) , reference_urls=[ '''https://scikit-learn.org/stable/modules/generated/sklearn.metrics.matthews_corrcoef.html''' ] , ) def A ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase=None ): """simple docstring""" return { "matthews_correlation": float(matthews_corrcoef(__lowerCAmelCase , __lowerCAmelCase , sample_weight=__lowerCAmelCase ) ), }
0
0
import copy from dataclasses import dataclass, field from typing import ClassVar, Dict from ..features import Audio, ClassLabel, Features from .base import TaskTemplate @dataclass(frozen=__lowercase ) class _UpperCAmelCase ( __lowercase ): '''simple docstring''' SCREAMING_SNAKE_CASE : Any = field(default='''audio-classification''' , metadata={'''include_in_asdict_even_if_is_default''': True} ) SCREAMING_SNAKE_CASE : Optional[int] = Features({'''audio''': Audio()} ) SCREAMING_SNAKE_CASE : str = Features({'''labels''': ClassLabel} ) SCREAMING_SNAKE_CASE : Optional[int] = '''audio''' SCREAMING_SNAKE_CASE : Dict = '''labels''' def UpperCamelCase ( self : Any , UpperCamelCase__ : str ): if self.label_column not in features: raise ValueError(f'''Column {self.label_column} is not present in features.''' ) if not isinstance(features[self.label_column] , __lowerCAmelCase ): raise ValueError(f'''Column {self.label_column} is not a ClassLabel.''' ) A = copy.deepcopy(self ) A = self.label_schema.copy() A = features[self.label_column] A = label_schema return task_template @property def UpperCamelCase ( self : str ): return { self.audio_column: "audio", self.label_column: "labels", }
699
from __future__ import annotations def __lowercase ( snake_case, snake_case ): """simple docstring""" print(f'''Vertex\tShortest Distance from vertex {src}''' ) for i, d in enumerate(snake_case ): print(f'''{i}\t\t{d}''' ) def __lowercase ( snake_case, snake_case, snake_case ): """simple docstring""" for j in range(snake_case ): __magic_name__ , __magic_name__ , __magic_name__ :Tuple = (graph[j][k] for k in ['''src''', '''dst''', '''weight''']) if distance[u] != float('''inf''' ) and distance[u] + w < distance[v]: return True return False def __lowercase ( snake_case, snake_case, snake_case, snake_case ): """simple docstring""" __magic_name__ :List[Any] = [float('''inf''' )] * vertex_count __magic_name__ :Tuple = 0.0 for _ in range(vertex_count - 1 ): for j in range(snake_case ): __magic_name__ , __magic_name__ , __magic_name__ :Dict = (graph[j][k] for k in ['''src''', '''dst''', '''weight''']) if distance[u] != float('''inf''' ) and distance[u] + w < distance[v]: __magic_name__ :Tuple = distance[u] + w __magic_name__ :Tuple = check_negative_cycle(snake_case, snake_case, snake_case ) if negative_cycle_exists: raise Exception('''Negative cycle found''' ) return distance if __name__ == "__main__": import doctest doctest.testmod() SCREAMING_SNAKE_CASE__ : Tuple = int(input("""Enter number of vertices: """).strip()) SCREAMING_SNAKE_CASE__ : Any = int(input("""Enter number of edges: """).strip()) SCREAMING_SNAKE_CASE__ : list[dict[str, int]] = [{} for _ in range(E)] for i in range(E): print("""Edge """, i + 1) SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Dict = ( int(x) for x in input("""Enter source, destination, weight: """).strip().split(""" """) ) SCREAMING_SNAKE_CASE__ : Dict = {"""src""": src, """dst""": dest, """weight""": weight} SCREAMING_SNAKE_CASE__ : List[Any] = int(input("""\nEnter shortest path source:""").strip()) SCREAMING_SNAKE_CASE__ : List[str] = bellman_ford(graph, V, E, source) print_distance(shortest_distance, 0)
0
0
from .glue import GlueDataset, GlueDataTrainingArguments from .language_modeling import ( LineByLineTextDataset, LineByLineWithRefDataset, LineByLineWithSOPTextDataset, TextDataset, TextDatasetForNextSentencePrediction, ) from .squad import SquadDataset, SquadDataTrainingArguments
592
from __future__ import annotations import unittest from transformers import RoFormerConfig, is_tf_available from transformers.testing_utils import require_tf, slow from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import ( TFRoFormerForCausalLM, TFRoFormerForMaskedLM, TFRoFormerForMultipleChoice, TFRoFormerForQuestionAnswering, TFRoFormerForSequenceClassification, TFRoFormerForTokenClassification, TFRoFormerModel, ) from transformers.models.roformer.modeling_tf_roformer import ( TFRoFormerSelfAttention, TFRoFormerSinusoidalPositionalEmbedding, ) class lowerCamelCase_ : def __init__( self , __lowerCAmelCase , __lowerCAmelCase=1_3 , __lowerCAmelCase=7 , __lowerCAmelCase=True , __lowerCAmelCase=True , __lowerCAmelCase=True , __lowerCAmelCase=True , __lowerCAmelCase=9_9 , __lowerCAmelCase=3_2 , __lowerCAmelCase=2 , __lowerCAmelCase=4 , __lowerCAmelCase=3_7 , __lowerCAmelCase="gelu" , __lowerCAmelCase=0.1 , __lowerCAmelCase=0.1 , __lowerCAmelCase=5_1_2 , __lowerCAmelCase=1_6 , __lowerCAmelCase=2 , __lowerCAmelCase=0.02 , __lowerCAmelCase=3 , __lowerCAmelCase=4 , __lowerCAmelCase=None , ): """simple docstring""" __magic_name__ :Optional[int] = parent __magic_name__ :List[Any] = 1_3 __magic_name__ :Union[str, Any] = 7 __magic_name__ :Optional[Any] = True __magic_name__ :Tuple = True __magic_name__ :List[str] = True __magic_name__ :List[Any] = True __magic_name__ :int = 9_9 __magic_name__ :Any = 3_2 __magic_name__ :Union[str, Any] = 2 __magic_name__ :List[str] = 4 __magic_name__ :List[Any] = 3_7 __magic_name__ :Tuple = '''gelu''' __magic_name__ :Any = 0.1 __magic_name__ :str = 0.1 __magic_name__ :List[str] = 5_1_2 __magic_name__ :int = 1_6 __magic_name__ :Any = 2 __magic_name__ :List[Any] = 0.02 __magic_name__ :Optional[Any] = 3 __magic_name__ :Tuple = 4 __magic_name__ :Optional[Any] = None def A ( self ): """simple docstring""" __magic_name__ :Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) __magic_name__ :str = None if self.use_input_mask: __magic_name__ :Optional[int] = random_attention_mask([self.batch_size, self.seq_length] ) __magic_name__ :str = None if self.use_token_type_ids: __magic_name__ :List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) __magic_name__ :Union[str, Any] = None __magic_name__ :Tuple = None __magic_name__ :str = None if self.use_labels: __magic_name__ :List[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size ) __magic_name__ :List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) __magic_name__ :List[Any] = ids_tensor([self.batch_size] , self.num_choices ) __magic_name__ :str = RoFormerConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , return_dict=__lowerCAmelCase , ) return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def A ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ): """simple docstring""" __magic_name__ :int = TFRoFormerModel(config=__lowerCAmelCase ) __magic_name__ :Optional[Any] = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids} __magic_name__ :List[str] = [input_ids, input_mask] __magic_name__ :Any = model(__lowerCAmelCase ) __magic_name__ :List[str] = model(__lowerCAmelCase ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def A ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ): """simple docstring""" __magic_name__ :Dict = True __magic_name__ :List[str] = TFRoFormerForCausalLM(config=__lowerCAmelCase ) __magic_name__ :str = { '''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids, } __magic_name__ :Optional[Any] = model(__lowerCAmelCase )['''logits'''] self.parent.assertListEqual( list(prediction_scores.numpy().shape ) , [self.batch_size, self.seq_length, self.vocab_size] ) def A ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ): """simple docstring""" __magic_name__ :Optional[Any] = TFRoFormerForMaskedLM(config=__lowerCAmelCase ) __magic_name__ :Any = { '''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids, } __magic_name__ :Dict = model(__lowerCAmelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def A ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ): """simple docstring""" __magic_name__ :int = self.num_labels __magic_name__ :str = TFRoFormerForSequenceClassification(config=__lowerCAmelCase ) __magic_name__ :Optional[int] = { '''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids, } __magic_name__ :str = model(__lowerCAmelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def A ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ): """simple docstring""" __magic_name__ :Union[str, Any] = self.num_choices __magic_name__ :Tuple = TFRoFormerForMultipleChoice(config=__lowerCAmelCase ) __magic_name__ :int = tf.tile(tf.expand_dims(__lowerCAmelCase , 1 ) , (1, self.num_choices, 1) ) __magic_name__ :Optional[Any] = tf.tile(tf.expand_dims(__lowerCAmelCase , 1 ) , (1, self.num_choices, 1) ) __magic_name__ :Union[str, Any] = tf.tile(tf.expand_dims(__lowerCAmelCase , 1 ) , (1, self.num_choices, 1) ) __magic_name__ :str = { '''input_ids''': multiple_choice_inputs_ids, '''attention_mask''': multiple_choice_input_mask, '''token_type_ids''': multiple_choice_token_type_ids, } __magic_name__ :Tuple = model(__lowerCAmelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) ) def A ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ): """simple docstring""" __magic_name__ :Optional[int] = self.num_labels __magic_name__ :Any = TFRoFormerForTokenClassification(config=__lowerCAmelCase ) __magic_name__ :str = { '''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids, } __magic_name__ :Dict = model(__lowerCAmelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def A ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ): """simple docstring""" __magic_name__ :List[str] = TFRoFormerForQuestionAnswering(config=__lowerCAmelCase ) __magic_name__ :List[str] = { '''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids, } __magic_name__ :Union[str, Any] = model(__lowerCAmelCase ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def A ( self ): """simple docstring""" __magic_name__ :Union[str, Any] = self.prepare_config_and_inputs() ( ( __magic_name__ ) , ( __magic_name__ ) , ( __magic_name__ ) , ( __magic_name__ ) , ( __magic_name__ ) , ( __magic_name__ ) , ( __magic_name__ ) , ) :Union[str, Any] = config_and_inputs __magic_name__ :Optional[Any] = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask} return config, inputs_dict @require_tf class lowerCamelCase_ ( lowerCamelCase , lowerCamelCase , unittest.TestCase ): a__ = ( ( TFRoFormerModel, TFRoFormerForCausalLM, TFRoFormerForMaskedLM, TFRoFormerForQuestionAnswering, TFRoFormerForSequenceClassification, TFRoFormerForTokenClassification, TFRoFormerForMultipleChoice, ) if is_tf_available() else () ) a__ = ( { '''feature-extraction''': TFRoFormerModel, '''fill-mask''': TFRoFormerForMaskedLM, '''question-answering''': TFRoFormerForQuestionAnswering, '''text-classification''': TFRoFormerForSequenceClassification, '''text-generation''': TFRoFormerForCausalLM, '''token-classification''': TFRoFormerForTokenClassification, '''zero-shot''': TFRoFormerForSequenceClassification, } if is_tf_available() else {} ) a__ = False a__ = False def A ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ): """simple docstring""" if pipeline_test_casse_name == "TextGenerationPipelineTests": return True return False def A ( self ): """simple docstring""" __magic_name__ :List[str] = TFRoFormerModelTester(self ) __magic_name__ :List[str] = ConfigTester(self , config_class=__lowerCAmelCase , hidden_size=3_7 ) def A ( self ): """simple docstring""" self.config_tester.run_common_tests() def A ( self ): """simple docstring""" __magic_name__ :Optional[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*__lowerCAmelCase ) def A ( self ): """simple docstring""" __magic_name__ :Union[str, Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_lm(*__lowerCAmelCase ) def A ( self ): """simple docstring""" __magic_name__ :Tuple = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_lm_head(*__lowerCAmelCase ) def A ( self ): """simple docstring""" __magic_name__ :Optional[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_multiple_choice(*__lowerCAmelCase ) def A ( self ): """simple docstring""" __magic_name__ :Tuple = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_question_answering(*__lowerCAmelCase ) def A ( self ): """simple docstring""" __magic_name__ :Dict = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_sequence_classification(*__lowerCAmelCase ) def A ( self ): """simple docstring""" __magic_name__ :Optional[int] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_token_classification(*__lowerCAmelCase ) @slow def A ( self ): """simple docstring""" __magic_name__ :Optional[Any] = TFRoFormerModel.from_pretrained('''junnyu/roformer_chinese_base''' ) self.assertIsNotNone(__lowerCAmelCase ) @require_tf class lowerCamelCase_ ( unittest.TestCase ): @slow def A ( self ): """simple docstring""" __magic_name__ :int = TFRoFormerForMaskedLM.from_pretrained('''junnyu/roformer_chinese_base''' ) __magic_name__ :Dict = tf.constant([[0, 1, 2, 3, 4, 5]] ) __magic_name__ :Optional[Any] = model(__lowerCAmelCase )[0] # TODO Replace vocab size __magic_name__ :int = 5_0_0_0_0 __magic_name__ :Tuple = [1, 6, vocab_size] self.assertEqual(output.shape , __lowerCAmelCase ) print(output[:, :3, :3] ) # TODO Replace values below with what was printed above. __magic_name__ :Any = tf.constant( [ [ [-0.12053341, -1.0264901, 0.29221946], [-1.5133783, 0.197433, 0.15190607], [-5.0135403, -3.900256, -0.84038764], ] ] ) tf.debugging.assert_near(output[:, :3, :3] , __lowerCAmelCase , atol=1E-4 ) @require_tf class lowerCamelCase_ ( unittest.TestCase ): a__ = 1e-4 def A ( self ): """simple docstring""" __magic_name__ :Optional[int] = tf.constant([[4, 1_0]] ) __magic_name__ :Optional[int] = TFRoFormerSinusoidalPositionalEmbedding(num_positions=6 , embedding_dim=6 ) __magic_name__ :Optional[Any] = emba(input_ids.shape ) __magic_name__ :List[str] = tf.constant( [[0.0000, 0.0000, 0.0000, 1.0000, 1.0000, 1.0000], [0.8415, 0.0464, 0.0022, 0.5403, 0.9989, 1.0000]] ) tf.debugging.assert_near(__lowerCAmelCase , __lowerCAmelCase , atol=self.tolerance ) def A ( self ): """simple docstring""" __magic_name__ :Tuple = tf.constant( [ [0.0000, 0.0000, 0.0000, 0.0000, 0.0000], [0.8415, 0.8219, 0.8020, 0.7819, 0.7617], [0.9093, 0.9364, 0.9581, 0.9749, 0.9870], ] ) __magic_name__ :Union[str, Any] = TFRoFormerSinusoidalPositionalEmbedding(num_positions=5_1_2 , embedding_dim=5_1_2 ) emba([2, 1_6, 5_1_2] ) __magic_name__ :Optional[int] = emba.weight[:3, :5] tf.debugging.assert_near(__lowerCAmelCase , __lowerCAmelCase , atol=self.tolerance ) @require_tf class lowerCamelCase_ ( unittest.TestCase ): a__ = 1e-4 def A ( self ): """simple docstring""" # 2,12,16,64 __magic_name__ :int = tf.reshape(tf.range(2 * 1_2 * 1_6 * 6_4 , dtype=tf.floataa ) , shape=(2, 1_2, 1_6, 6_4) ) / 1_0_0 __magic_name__ :str = -tf.reshape(tf.range(2 * 1_2 * 1_6 * 6_4 , dtype=tf.floataa ) , shape=(2, 1_2, 1_6, 6_4) ) / 1_0_0 __magic_name__ :int = TFRoFormerSinusoidalPositionalEmbedding(num_positions=3_2 , embedding_dim=6_4 ) __magic_name__ :List[str] = embed_positions([2, 1_6, 7_6_8] )[None, None, :, :] __magic_name__ , __magic_name__ :Union[str, Any] = TFRoFormerSelfAttention.apply_rotary_position_embeddings( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) __magic_name__ :Tuple = tf.constant( [ [0.0000, 0.0100, 0.0200, 0.0300, 0.0400, 0.0500, 0.0600, 0.0700], [-0.2012, 0.8897, 0.0263, 0.9401, 0.2074, 0.9463, 0.3481, 0.9343], [-1.7057, 0.6271, -1.2145, 1.3897, -0.6303, 1.7647, -0.1173, 1.8985], [-2.1731, -1.6397, -2.7358, 0.2854, -2.1840, 1.7183, -1.3018, 2.4871], [0.2717, -3.6173, -2.9206, -2.1988, -3.6638, 0.3858, -2.9155, 2.2980], [3.9859, -2.1580, -0.7984, -4.4904, -4.1181, -2.0252, -4.4782, 1.1253], ] ) __magic_name__ :List[str] = tf.constant( [ [0.0000, -0.0100, -0.0200, -0.0300, -0.0400, -0.0500, -0.0600, -0.0700], [0.2012, -0.8897, -0.0263, -0.9401, -0.2074, -0.9463, -0.3481, -0.9343], [1.7057, -0.6271, 1.2145, -1.3897, 0.6303, -1.7647, 0.1173, -1.8985], [2.1731, 1.6397, 2.7358, -0.2854, 2.1840, -1.7183, 1.3018, -2.4871], [-0.2717, 3.6173, 2.9206, 2.1988, 3.6638, -0.3858, 2.9155, -2.2980], [-3.9859, 2.1580, 0.7984, 4.4904, 4.1181, 2.0252, 4.4782, -1.1253], ] ) tf.debugging.assert_near(query_layer[0, 0, :6, :8] , __lowerCAmelCase , atol=self.tolerance ) tf.debugging.assert_near(key_layer[0, 0, :6, :8] , __lowerCAmelCase , atol=self.tolerance )
0
0
import gc import random import tempfile import unittest import numpy as np import torch from PIL import Image from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer from diffusers import ( AutoencoderKL, DDIMInverseScheduler, DDIMScheduler, DPMSolverMultistepInverseScheduler, DPMSolverMultistepScheduler, StableDiffusionDiffEditPipeline, UNetaDConditionModel, ) from diffusers.utils import load_image, slow from diffusers.utils.testing_utils import enable_full_determinism, floats_tensor, require_torch_gpu, torch_device from ..pipeline_params import TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS, TEXT_GUIDED_IMAGE_INPAINTING_PARAMS from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin enable_full_determinism() class _lowercase ( _UpperCAmelCase , _UpperCAmelCase , unittest.TestCase ): """simple docstring""" lowerCAmelCase__ = StableDiffusionDiffEditPipeline lowerCAmelCase__ = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS - {'height', 'width', 'image'} | {'image_latents'} lowerCAmelCase__ = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS - {'image'} | {'image_latents'} lowerCAmelCase__ = frozenset( [] ) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess lowerCAmelCase__ = frozenset([] ) def _UpperCAmelCase ( self ): '''simple docstring''' torch.manual_seed(0 ) _lowercase = UNetaDConditionModel( block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D""") , cross_attention_dim=32 , attention_head_dim=(2, 4) , use_linear_projection=__lowerCAmelCase , ) _lowercase = DDIMScheduler( beta_start=0.00_085 , beta_end=0.012 , beta_schedule="""scaled_linear""" , clip_sample=__lowerCAmelCase , set_alpha_to_one=__lowerCAmelCase , ) _lowercase = DDIMInverseScheduler( beta_start=0.00_085 , beta_end=0.012 , beta_schedule="""scaled_linear""" , clip_sample=__lowerCAmelCase , set_alpha_to_zero=__lowerCAmelCase , ) torch.manual_seed(0 ) _lowercase = AutoencoderKL( block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , sample_size=128 , ) torch.manual_seed(0 ) _lowercase = CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , hidden_act="""gelu""" , projection_dim=512 , ) _lowercase = CLIPTextModel(__lowerCAmelCase ) _lowercase = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" ) _lowercase = { '''unet''': unet, '''scheduler''': scheduler, '''inverse_scheduler''': inverse_scheduler, '''vae''': vae, '''text_encoder''': text_encoder, '''tokenizer''': tokenizer, '''safety_checker''': None, '''feature_extractor''': None, } return components def _UpperCAmelCase ( self , UpperCAmelCase , UpperCAmelCase=0 ): '''simple docstring''' _lowercase = floats_tensor((1, 16, 16) , rng=random.Random(__lowerCAmelCase ) ).to(__lowerCAmelCase ) _lowercase = floats_tensor((1, 2, 4, 16, 16) , rng=random.Random(__lowerCAmelCase ) ).to(__lowerCAmelCase ) if str(__lowerCAmelCase ).startswith("""mps""" ): _lowercase = torch.manual_seed(__lowerCAmelCase ) else: _lowercase = torch.Generator(device=__lowerCAmelCase ).manual_seed(__lowerCAmelCase ) _lowercase = { '''prompt''': '''a dog and a newt''', '''mask_image''': mask, '''image_latents''': latents, '''generator''': generator, '''num_inference_steps''': 2, '''inpaint_strength''': 1.0, '''guidance_scale''': 6.0, '''output_type''': '''numpy''', } return inputs def _UpperCAmelCase ( self , UpperCAmelCase , UpperCAmelCase=0 ): '''simple docstring''' _lowercase = floats_tensor((1, 3, 32, 32) , rng=random.Random(__lowerCAmelCase ) ).to(__lowerCAmelCase ) _lowercase = image.cpu().permute(0 , 2 , 3 , 1 )[0] _lowercase = Image.fromarray(np.uinta(__lowerCAmelCase ) ).convert("""RGB""" ) if str(__lowerCAmelCase ).startswith("""mps""" ): _lowercase = torch.manual_seed(__lowerCAmelCase ) else: _lowercase = torch.Generator(device=__lowerCAmelCase ).manual_seed(__lowerCAmelCase ) _lowercase = { '''image''': image, '''source_prompt''': '''a cat and a frog''', '''target_prompt''': '''a dog and a newt''', '''generator''': generator, '''num_inference_steps''': 2, '''num_maps_per_mask''': 2, '''mask_encode_strength''': 1.0, '''guidance_scale''': 6.0, '''output_type''': '''numpy''', } return inputs def _UpperCAmelCase ( self , UpperCAmelCase , UpperCAmelCase=0 ): '''simple docstring''' _lowercase = floats_tensor((1, 3, 32, 32) , rng=random.Random(__lowerCAmelCase ) ).to(__lowerCAmelCase ) _lowercase = image.cpu().permute(0 , 2 , 3 , 1 )[0] _lowercase = Image.fromarray(np.uinta(__lowerCAmelCase ) ).convert("""RGB""" ) if str(__lowerCAmelCase ).startswith("""mps""" ): _lowercase = torch.manual_seed(__lowerCAmelCase ) else: _lowercase = torch.Generator(device=__lowerCAmelCase ).manual_seed(__lowerCAmelCase ) _lowercase = { '''image''': image, '''prompt''': '''a cat and a frog''', '''generator''': generator, '''num_inference_steps''': 2, '''inpaint_strength''': 1.0, '''guidance_scale''': 6.0, '''decode_latents''': True, '''output_type''': '''numpy''', } return inputs def _UpperCAmelCase ( self ): '''simple docstring''' if not hasattr(self.pipeline_class , """_optional_components""" ): return _lowercase = self.get_dummy_components() _lowercase = self.pipeline_class(**__lowerCAmelCase ) pipe.to(__lowerCAmelCase ) pipe.set_progress_bar_config(disable=__lowerCAmelCase ) # set all optional components to None and update pipeline config accordingly for optional_component in pipe._optional_components: setattr(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) pipe.register_modules(**{optional_component: None for optional_component in pipe._optional_components} ) _lowercase = self.get_dummy_inputs(__lowerCAmelCase ) _lowercase = pipe(**__lowerCAmelCase )[0] with tempfile.TemporaryDirectory() as tmpdir: pipe.save_pretrained(__lowerCAmelCase ) _lowercase = self.pipeline_class.from_pretrained(__lowerCAmelCase ) pipe_loaded.to(__lowerCAmelCase ) pipe_loaded.set_progress_bar_config(disable=__lowerCAmelCase ) for optional_component in pipe._optional_components: self.assertTrue( getattr(__lowerCAmelCase , __lowerCAmelCase ) is None , F'''`{optional_component}` did not stay set to None after loading.''' , ) _lowercase = self.get_dummy_inputs(__lowerCAmelCase ) _lowercase = pipe_loaded(**__lowerCAmelCase )[0] _lowercase = np.abs(output - output_loaded ).max() self.assertLess(__lowerCAmelCase , 1e-4 ) def _UpperCAmelCase ( self ): '''simple docstring''' _lowercase = '''cpu''' _lowercase = self.get_dummy_components() _lowercase = self.pipeline_class(**__lowerCAmelCase ) pipe.to(__lowerCAmelCase ) pipe.set_progress_bar_config(disable=__lowerCAmelCase ) _lowercase = self.get_dummy_mask_inputs(__lowerCAmelCase ) _lowercase = pipe.generate_mask(**__lowerCAmelCase ) _lowercase = mask[0, -3:, -3:] self.assertEqual(mask.shape , (1, 16, 16) ) _lowercase = np.array([0] * 9 ) _lowercase = np.abs(mask_slice.flatten() - expected_slice ).max() self.assertLessEqual(__lowerCAmelCase , 1e-3 ) self.assertEqual(mask[0, -3, -4] , 0 ) def _UpperCAmelCase ( self ): '''simple docstring''' _lowercase = '''cpu''' _lowercase = self.get_dummy_components() _lowercase = self.pipeline_class(**__lowerCAmelCase ) pipe.to(__lowerCAmelCase ) pipe.set_progress_bar_config(disable=__lowerCAmelCase ) _lowercase = self.get_dummy_inversion_inputs(__lowerCAmelCase ) _lowercase = pipe.invert(**__lowerCAmelCase ).images _lowercase = image[0, -1, -3:, -3:] self.assertEqual(image.shape , (2, 32, 32, 3) ) _lowercase = np.array( [0.5_150, 0.5_134, 0.5_043, 0.5_376, 0.4_694, 0.51_050, 0.5_015, 0.4_407, 0.4_799] , ) _lowercase = np.abs(image_slice.flatten() - expected_slice ).max() self.assertLessEqual(__lowerCAmelCase , 1e-3 ) def _UpperCAmelCase ( self ): '''simple docstring''' super().test_inference_batch_single_identical(expected_max_diff=5e-3 ) def _UpperCAmelCase ( self ): '''simple docstring''' _lowercase = '''cpu''' _lowercase = self.get_dummy_components() _lowercase = {'''beta_start''': 0.00_085, '''beta_end''': 0.012, '''beta_schedule''': '''scaled_linear'''} _lowercase = DPMSolverMultistepScheduler(**__lowerCAmelCase ) _lowercase = DPMSolverMultistepInverseScheduler(**__lowerCAmelCase ) _lowercase = self.pipeline_class(**__lowerCAmelCase ) pipe.to(__lowerCAmelCase ) pipe.set_progress_bar_config(disable=__lowerCAmelCase ) _lowercase = self.get_dummy_inversion_inputs(__lowerCAmelCase ) _lowercase = pipe.invert(**__lowerCAmelCase ).images _lowercase = image[0, -1, -3:, -3:] self.assertEqual(image.shape , (2, 32, 32, 3) ) _lowercase = np.array( [0.5_150, 0.5_134, 0.5_043, 0.5_376, 0.4_694, 0.51_050, 0.5_015, 0.4_407, 0.4_799] , ) _lowercase = np.abs(image_slice.flatten() - expected_slice ).max() self.assertLessEqual(__lowerCAmelCase , 1e-3 ) @require_torch_gpu @slow class _lowercase ( unittest.TestCase ): """simple docstring""" def _UpperCAmelCase ( self ): '''simple docstring''' super().tearDown() gc.collect() torch.cuda.empty_cache() @classmethod def _UpperCAmelCase ( cls ): '''simple docstring''' _lowercase = load_image( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/diffedit/fruit.png""" ) _lowercase = raw_image.convert("""RGB""" ).resize((768, 768) ) _lowercase = raw_image def _UpperCAmelCase ( self ): '''simple docstring''' _lowercase = torch.manual_seed(0 ) _lowercase = StableDiffusionDiffEditPipeline.from_pretrained( """stabilityai/stable-diffusion-2-1""" , safety_checker=__lowerCAmelCase , torch_dtype=torch.floataa ) _lowercase = DDIMScheduler.from_config(pipe.scheduler.config ) _lowercase = DDIMInverseScheduler.from_config(pipe.scheduler.config ) pipe.enable_model_cpu_offload() pipe.set_progress_bar_config(disable=__lowerCAmelCase ) _lowercase = '''a bowl of fruit''' _lowercase = '''a bowl of pears''' _lowercase = pipe.generate_mask( image=self.raw_image , source_prompt=__lowerCAmelCase , target_prompt=__lowerCAmelCase , generator=__lowerCAmelCase , ) _lowercase = pipe.invert( prompt=__lowerCAmelCase , image=self.raw_image , inpaint_strength=0.7 , generator=__lowerCAmelCase ).latents _lowercase = pipe( prompt=__lowerCAmelCase , mask_image=__lowerCAmelCase , image_latents=__lowerCAmelCase , generator=__lowerCAmelCase , negative_prompt=__lowerCAmelCase , inpaint_strength=0.7 , output_type="""numpy""" , ).images[0] _lowercase = ( np.array( load_image( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main""" """/diffedit/pears.png""" ).resize((768, 768) ) ) / 255 ) assert np.abs((expected_image - image).max() ) < 5e-1 def _UpperCAmelCase ( self ): '''simple docstring''' _lowercase = torch.manual_seed(0 ) _lowercase = StableDiffusionDiffEditPipeline.from_pretrained( """stabilityai/stable-diffusion-2-1""" , safety_checker=__lowerCAmelCase , torch_dtype=torch.floataa ) _lowercase = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config ) _lowercase = DPMSolverMultistepInverseScheduler.from_config(pipe.scheduler.config ) pipe.enable_model_cpu_offload() pipe.set_progress_bar_config(disable=__lowerCAmelCase ) _lowercase = '''a bowl of fruit''' _lowercase = '''a bowl of pears''' _lowercase = pipe.generate_mask( image=self.raw_image , source_prompt=__lowerCAmelCase , target_prompt=__lowerCAmelCase , generator=__lowerCAmelCase , ) _lowercase = pipe.invert( prompt=__lowerCAmelCase , image=self.raw_image , inpaint_strength=0.7 , generator=__lowerCAmelCase , num_inference_steps=25 , ).latents _lowercase = pipe( prompt=__lowerCAmelCase , mask_image=__lowerCAmelCase , image_latents=__lowerCAmelCase , generator=__lowerCAmelCase , negative_prompt=__lowerCAmelCase , inpaint_strength=0.7 , num_inference_steps=25 , output_type="""numpy""" , ).images[0] _lowercase = ( np.array( load_image( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main""" """/diffedit/pears.png""" ).resize((768, 768) ) ) / 255 ) assert np.abs((expected_image - image).max() ) < 5e-1
398
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available SCREAMING_SNAKE_CASE__ : Optional[int] = {"""tokenization_herbert""": ["""HerbertTokenizer"""]} try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: SCREAMING_SNAKE_CASE__ : Optional[Any] = ["""HerbertTokenizerFast"""] if TYPE_CHECKING: from .tokenization_herbert import HerbertTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_herbert_fast import HerbertTokenizerFast else: import sys SCREAMING_SNAKE_CASE__ : Union[str, Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
0
0
def __A ( _A ): """simple docstring""" if bit_count < 0: raise ValueError("The given input must be positive" ) # get the generated string sequence __a = gray_code_sequence_string(_A ) # # convert them to integers for i in range(len(_A ) ): __a = int(sequence[i] , 2 ) return sequence def __A ( _A ): """simple docstring""" if bit_count == 0: return ["0"] if bit_count == 1: return ["0", "1"] __a = 1 << bit_count # defines the length of the sequence # 1<< n is equivalent to 2^n # recursive answer will generate answer for n-1 bits __a = gray_code_sequence_string(bit_count - 1 ) __a = [] # append 0 to first half of the smaller sequence generated for i in range(seq_len // 2 ): __a = '''0''' + smaller_sequence[i] sequence.append(_A ) # append 1 to second half ... start from the end of the list for i in reversed(range(seq_len // 2 ) ): __a = '''1''' + smaller_sequence[i] sequence.append(_A ) return sequence if __name__ == "__main__": import doctest doctest.testmod()
197
import argparse import gdown import numpy as np import torch from huggingface_hub import hf_hub_download from transformers import ( CLIPTokenizer, CLIPTokenizerFast, VideoMAEImageProcessor, XCLIPConfig, XCLIPModel, XCLIPProcessor, XCLIPTextConfig, XCLIPVisionConfig, ) def __lowercase ( snake_case, snake_case ): """simple docstring""" __magic_name__ :str = XCLIPTextConfig() # derive patch size from model name __magic_name__ :Union[str, Any] = model_name.find('''patch''' ) __magic_name__ :Optional[Any] = int(model_name[start_idx + len('''patch''' ) : start_idx + len('''patch''' ) + 2] ) __magic_name__ :int = XCLIPVisionConfig(patch_size=snake_case, num_frames=snake_case ) if "large" in model_name: __magic_name__ :Dict = 7_6_8 __magic_name__ :int = 3_0_7_2 __magic_name__ :List[Any] = 1_2 __magic_name__ :str = 1_0_2_4 __magic_name__ :Any = 4_0_9_6 __magic_name__ :Optional[Any] = 1_6 __magic_name__ :Union[str, Any] = 2_4 __magic_name__ :Union[str, Any] = 7_6_8 __magic_name__ :Tuple = 3_0_7_2 if model_name == "xclip-large-patch14-16-frames": __magic_name__ :List[str] = 3_3_6 __magic_name__ :Any = XCLIPConfig.from_text_vision_configs(snake_case, snake_case ) if "large" in model_name: __magic_name__ :str = 7_6_8 return config def __lowercase ( snake_case ): """simple docstring""" if name == "token_embedding.weight": __magic_name__ :Any = name.replace('''token_embedding.weight''', '''text_model.embeddings.token_embedding.weight''' ) if name == "positional_embedding": __magic_name__ :Any = name.replace('''positional_embedding''', '''text_model.embeddings.position_embedding.weight''' ) if "ln_1" in name: __magic_name__ :List[str] = name.replace('''ln_1''', '''layer_norm1''' ) if "ln_2" in name: __magic_name__ :str = name.replace('''ln_2''', '''layer_norm2''' ) if "c_fc" in name: __magic_name__ :List[Any] = name.replace('''c_fc''', '''fc1''' ) if "c_proj" in name: __magic_name__ :Any = name.replace('''c_proj''', '''fc2''' ) if name.startswith('''transformer.resblocks''' ): __magic_name__ :Any = name.replace('''transformer.resblocks''', '''text_model.encoder.layers''' ) if "attn.out_proj" in name and "message" not in name: __magic_name__ :Union[str, Any] = name.replace('''attn.out_proj''', '''self_attn.out_proj''' ) if "ln_final" in name: __magic_name__ :Tuple = name.replace('''ln_final''', '''text_model.final_layer_norm''' ) # visual encoder if name == "visual.class_embedding": __magic_name__ :List[Any] = name.replace('''visual.class_embedding''', '''vision_model.embeddings.class_embedding''' ) if name == "visual.positional_embedding": __magic_name__ :Any = name.replace('''visual.positional_embedding''', '''vision_model.embeddings.position_embedding.weight''' ) if name.startswith('''visual.transformer.resblocks''' ): __magic_name__ :Union[str, Any] = name.replace('''visual.transformer.resblocks''', '''vision_model.encoder.layers''' ) if "visual.conv1" in name: __magic_name__ :Tuple = name.replace('''visual.conv1''', '''vision_model.embeddings.patch_embedding''' ) if "visual.ln_pre" in name: __magic_name__ :Tuple = name.replace('''visual.ln_pre''', '''vision_model.pre_layernorm''' ) if "visual.ln_post" in name: __magic_name__ :Optional[Any] = name.replace('''visual.ln_post''', '''vision_model.post_layernorm''' ) if "visual.proj" in name: __magic_name__ :Tuple = name.replace('''visual.proj''', '''visual_projection.weight''' ) if "text_projection" in name: __magic_name__ :int = name.replace('''text_projection''', '''text_projection.weight''' ) # things on top if "prompts_visual_proj" in name: __magic_name__ :int = name.replace('''prompts_visual_proj''', '''prompts_visual_projection''' ) if "prompts_visual_ln" in name: __magic_name__ :Dict = name.replace('''prompts_visual_ln''', '''prompts_visual_layernorm''' ) # mit if name == "mit.positional_embedding": __magic_name__ :List[Any] = name.replace('''positional''', '''position''' ) if name.startswith('''mit.resblocks''' ): __magic_name__ :Union[str, Any] = name.replace('''mit.resblocks''', '''mit.encoder.layers''' ) # prompts generator if name.startswith('''prompts_generator.norm''' ): __magic_name__ :str = name.replace('''prompts_generator.norm''', '''prompts_generator.layernorm''' ) return name def __lowercase ( snake_case, snake_case ): """simple docstring""" for key in orig_state_dict.copy().keys(): __magic_name__ :Any = orig_state_dict.pop(snake_case ) if "attn.in_proj" in key: __magic_name__ :str = key.split('''.''' ) if key.startswith('''visual''' ): __magic_name__ :List[Any] = key_split[3] __magic_name__ :List[Any] = config.vision_config.hidden_size if "message_attn" in key: if "weight" in key: __magic_name__ :List[Any] = val[ :dim, : ] __magic_name__ :List[str] = val[ dim : dim * 2, : ] __magic_name__ :List[str] = val[ -dim:, : ] else: __magic_name__ :str = val[ :dim ] __magic_name__ :Optional[int] = val[ dim : dim * 2 ] __magic_name__ :Any = val[ -dim: ] else: if "weight" in key: __magic_name__ :int = val[ :dim, : ] __magic_name__ :Union[str, Any] = val[ dim : dim * 2, : ] __magic_name__ :List[Any] = val[ -dim:, : ] else: __magic_name__ :Union[str, Any] = val[:dim] __magic_name__ :str = val[ dim : dim * 2 ] __magic_name__ :Dict = val[-dim:] elif key.startswith('''mit''' ): __magic_name__ :List[Any] = key_split[2] __magic_name__ :Any = config.vision_config.mit_hidden_size if "weight" in key: __magic_name__ :Union[str, Any] = val[:dim, :] __magic_name__ :Optional[int] = val[dim : dim * 2, :] __magic_name__ :int = val[-dim:, :] else: __magic_name__ :Tuple = val[:dim] __magic_name__ :Optional[int] = val[dim : dim * 2] __magic_name__ :Optional[int] = val[-dim:] else: __magic_name__ :Any = key_split[2] __magic_name__ :List[Any] = config.text_config.hidden_size if "weight" in key: __magic_name__ :Union[str, Any] = val[:dim, :] __magic_name__ :Tuple = val[ dim : dim * 2, : ] __magic_name__ :str = val[-dim:, :] else: __magic_name__ :int = val[:dim] __magic_name__ :Any = val[ dim : dim * 2 ] __magic_name__ :str = val[-dim:] else: __magic_name__ :Tuple = rename_key(snake_case ) if new_key_name in ["visual_projection.weight", "text_projection.weight"]: __magic_name__ :List[Any] = val.T __magic_name__ :Optional[Any] = val return orig_state_dict def __lowercase ( snake_case ): """simple docstring""" if num_frames == 8: __magic_name__ :Any = '''eating_spaghetti_8_frames.npy''' elif num_frames == 1_6: __magic_name__ :List[Any] = '''eating_spaghetti.npy''' elif num_frames == 3_2: __magic_name__ :Tuple = '''eating_spaghetti_32_frames.npy''' __magic_name__ :str = hf_hub_download( repo_id='''hf-internal-testing/spaghetti-video''', filename=snake_case, repo_type='''dataset''', ) __magic_name__ :List[Any] = np.load(snake_case ) return list(snake_case ) def __lowercase ( snake_case, snake_case=None, snake_case=False ): """simple docstring""" __magic_name__ :Union[str, Any] = { # fully supervised kinetics-400 checkpoints '''xclip-base-patch32''': '''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_32_8.pth''', '''xclip-base-patch32-16-frames''': ( '''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_32_16.pth''' ), '''xclip-base-patch16''': '''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_16_8.pth''', '''xclip-base-patch16-16-frames''': ( '''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_16_16.pth''' ), '''xclip-large-patch14''': '''https://drive.google.com/u/0/uc?id=1NUOImq0o5DlQTST17iIP3vG7DgmHQuCx&amp;export=download&amp;confirm=t&amp;uuid=b26caedc-88e2-473e-830a-9d158b653cdb''', '''xclip-large-patch14-16-frames''': '''https://drive.google.com/u/0/uc?id=1FOYgnJc097OJ4lGwtRCCydQyVPJEOH7d&amp;export=download&amp;confirm=t&amp;uuid=538fa810-e671-4050-b385-9a623f89804f''', # fully supervised kinetics-600 checkpoints '''xclip-base-patch16-kinetics-600''': ( '''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k600_16_8.pth''' ), '''xclip-base-patch16-kinetics-600-16-frames''': ( '''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k600_16_16.pth''' ), '''xclip-large-patch14-kinetics-600''': '''https://drive.google.com/u/0/uc?id=1FV8C1INuM91sLAN4ImjzePLIlpMSihwV&amp;export=download&amp;confirm=t&amp;uuid=141d4977-4a65-44ae-864f-4b0c19f838be''', # few shot '''xclip-base-patch16-hmdb-2-shot''': ( '''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_2.pth''' ), '''xclip-base-patch16-hmdb-4-shot''': ( '''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_4.pth''' ), '''xclip-base-patch16-hmdb-8-shot''': ( '''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_8.pth''' ), '''xclip-base-patch16-hmdb-16-shot''': ( '''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_16.pth''' ), '''xclip-base-patch16-ucf-2-shot''': ( '''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_2.pth''' ), '''xclip-base-patch16-ucf-4-shot''': ( '''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_4.pth''' ), '''xclip-base-patch16-ucf-8-shot''': ( '''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_8.pth''' ), '''xclip-base-patch16-ucf-16-shot''': ( '''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_16.pth''' ), # zero shot '''xclip-base-patch16-zero-shot''': '''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/zero.pth''', } __magic_name__ :Optional[int] = model_to_url[model_name] __magic_name__ :List[str] = 8 if "16-frames" in model_name: __magic_name__ :List[Any] = 1_6 elif "shot" in model_name: __magic_name__ :Dict = 3_2 __magic_name__ :str = get_xclip_config(snake_case, snake_case ) __magic_name__ :List[Any] = XCLIPModel(snake_case ) model.eval() if "drive" in checkpoint_url: __magic_name__ :Any = '''pytorch_model.bin''' gdown.cached_download(snake_case, snake_case, quiet=snake_case ) __magic_name__ :Optional[Any] = torch.load(snake_case, map_location='''cpu''' )['''model'''] else: __magic_name__ :Optional[int] = torch.hub.load_state_dict_from_url(snake_case )['''model'''] __magic_name__ :List[str] = convert_state_dict(snake_case, snake_case ) __magic_name__ :List[Any] = XCLIPModel(snake_case ) __magic_name__ , __magic_name__ :Optional[Any] = model.load_state_dict(snake_case, strict=snake_case ) assert missing_keys == ["text_model.embeddings.position_ids", "vision_model.embeddings.position_ids"] model.eval() __magic_name__ :str = 3_3_6 if model_name == '''xclip-large-patch14-16-frames''' else 2_2_4 __magic_name__ :Optional[int] = VideoMAEImageProcessor(size=snake_case ) __magic_name__ :Optional[int] = CLIPTokenizer.from_pretrained('''openai/clip-vit-base-patch32''' ) __magic_name__ :Tuple = CLIPTokenizerFast.from_pretrained('''openai/clip-vit-base-patch32''' ) __magic_name__ :Optional[int] = XCLIPProcessor(image_processor=snake_case, tokenizer=snake_case ) __magic_name__ :List[Any] = prepare_video(snake_case ) __magic_name__ :str = processor( text=['''playing sports''', '''eating spaghetti''', '''go shopping'''], videos=snake_case, return_tensors='''pt''', padding=snake_case ) print('''Shape of pixel values:''', inputs.pixel_values.shape ) with torch.no_grad(): __magic_name__ :Tuple = model(**snake_case ) # Verify outputs __magic_name__ :Any = outputs.logits_per_video __magic_name__ :str = logits_per_video.softmax(dim=1 ) print('''Probs:''', snake_case ) # kinetics-400 if model_name == "xclip-base-patch32": __magic_name__ :Dict = torch.tensor([[0.0019, 0.9951, 0.0030]] ) elif model_name == "xclip-base-patch32-16-frames": __magic_name__ :str = torch.tensor([[7.0_9_9_9E-0_4, 9.9_8_8_3E-0_1, 4.5_5_8_0E-0_4]] ) elif model_name == "xclip-base-patch16": __magic_name__ :Tuple = torch.tensor([[0.0083, 0.9681, 0.0236]] ) elif model_name == "xclip-base-patch16-16-frames": __magic_name__ :Tuple = torch.tensor([[7.6_9_3_7E-0_4, 9.9_7_2_8E-0_1, 1.9_4_7_3E-0_3]] ) elif model_name == "xclip-large-patch14": __magic_name__ :str = torch.tensor([[0.0062, 0.9864, 0.0075]] ) elif model_name == "xclip-large-patch14-16-frames": __magic_name__ :Optional[int] = torch.tensor([[3.3_8_7_7E-0_4, 9.9_9_3_7E-0_1, 2.8_8_8_8E-0_4]] ) # kinetics-600 elif model_name == "xclip-base-patch16-kinetics-600": __magic_name__ :Optional[int] = torch.tensor([[0.0555, 0.8914, 0.0531]] ) elif model_name == "xclip-base-patch16-kinetics-600-16-frames": __magic_name__ :List[str] = torch.tensor([[3.8_5_5_4E-0_4, 9.9_9_2_9E-0_1, 3.2_7_5_4E-0_4]] ) elif model_name == "xclip-large-patch14-kinetics-600": __magic_name__ :List[str] = torch.tensor([[0.0036, 0.9920, 0.0045]] ) # few shot elif model_name == "xclip-base-patch16-hmdb-2-shot": __magic_name__ :Tuple = torch.tensor([[7.1_8_9_0E-0_6, 9.9_9_9_4E-0_1, 5.6_5_5_9E-0_5]] ) elif model_name == "xclip-base-patch16-hmdb-4-shot": __magic_name__ :List[str] = torch.tensor([[1.0_3_2_0E-0_5, 9.9_9_9_3E-0_1, 6.2_4_3_5E-0_5]] ) elif model_name == "xclip-base-patch16-hmdb-8-shot": __magic_name__ :Optional[int] = torch.tensor([[4.1_3_7_7E-0_6, 9.9_9_9_0E-0_1, 9.8_3_8_6E-0_5]] ) elif model_name == "xclip-base-patch16-hmdb-16-shot": __magic_name__ :Optional[int] = torch.tensor([[4.1_3_4_7E-0_5, 9.9_9_6_2E-0_1, 3.3_4_1_1E-0_4]] ) elif model_name == "xclip-base-patch16-ucf-2-shot": __magic_name__ :Union[str, Any] = torch.tensor([[8.5_8_5_7E-0_5, 9.9_9_2_8E-0_1, 6.3_2_9_1E-0_4]] ) elif model_name == "xclip-base-patch16-ucf-4-shot": __magic_name__ :Union[str, Any] = torch.tensor([[8.5_8_5_7E-0_5, 9.9_9_2_8E-0_1, 6.3_2_9_1E-0_4]] ) elif model_name == "xclip-base-patch16-ucf-8-shot": __magic_name__ :Optional[int] = torch.tensor([[0.0027, 0.9904, 0.0070]] ) elif model_name == "xclip-base-patch16-ucf-16-shot": __magic_name__ :Any = torch.tensor([[9.8_2_1_9E-0_4, 9.9_5_9_3E-0_1, 3.0_8_6_3E-0_3]] ) # zero shot elif model_name == "xclip-base-patch16-zero-shot": __magic_name__ :Optional[int] = torch.tensor([[3.5_0_8_2E-0_4, 9.9_7_8_5E-0_1, 1.7_9_6_6E-0_3]] ) else: raise ValueError(f'''Model name {model_name} not supported''' ) assert torch.allclose(snake_case, snake_case, atol=1E-3 ) print('''Looks ok!''' ) if pytorch_dump_folder_path is not None: print(f'''Saving model {model_name} to {pytorch_dump_folder_path}''' ) model.save_pretrained(snake_case ) if push_to_hub: print('''Pushing model, processor and slow tokenizer files to the hub...''' ) model.push_to_hub(snake_case, organization='''nielsr''' ) processor.push_to_hub(snake_case, organization='''nielsr''' ) slow_tokenizer.push_to_hub(snake_case, organization='''nielsr''' ) if __name__ == "__main__": SCREAMING_SNAKE_CASE__ : Optional[Any] = argparse.ArgumentParser() # Required parameters parser.add_argument( """--model_name""", default="""xclip-base-patch32""", type=str, help="""Name of the model.""", ) parser.add_argument( """--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory.""" ) parser.add_argument( """--push_to_hub""", action="""store_true""", help="""Whether or not to push the converted model to the 🤗 hub.""" ) SCREAMING_SNAKE_CASE__ : List[Any] = parser.parse_args() convert_xclip_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
0
0
import json import os import tempfile import transformers import datasets from utils import generate_example_dataset, get_duration _lowerCamelCase =5_0_0_0_0_0 _lowerCamelCase =os.path.split(__file__) _lowerCamelCase =os.path.join(RESULTS_BASEPATH, """results""", RESULTS_FILENAME.replace(""".py""", """.json""")) @get_duration def _a ( lowerCamelCase, **lowerCamelCase ): lowerCamelCase : str = dataset.map(**lowerCamelCase ) @get_duration def _a ( lowerCamelCase, **lowerCamelCase ): lowerCamelCase : str = dataset.filter(**lowerCamelCase ) def _a ( ): lowerCamelCase : Any = {'''num examples''': SPEED_TEST_N_EXAMPLES} with tempfile.TemporaryDirectory() as tmp_dir: lowerCamelCase : Tuple = datasets.Features({"""text""": datasets.Value("""string""" ), """numbers""": datasets.Value("""float32""" )} ) lowerCamelCase : Union[str, Any] = generate_example_dataset( os.path.join(lowerCamelCase, """dataset.arrow""" ), lowerCamelCase, num_examples=lowerCamelCase ) lowerCamelCase : int = transformers.AutoTokenizer.from_pretrained("""bert-base-cased""", use_fast=lowerCamelCase ) def tokenize(lowerCamelCase ): return tokenizer(examples["""text"""] ) lowerCamelCase : str = map(lowerCamelCase ) lowerCamelCase : Tuple = map(lowerCamelCase, batched=lowerCamelCase ) lowerCamelCase : Optional[int] = map(lowerCamelCase, function=lambda lowerCamelCase : None, batched=lowerCamelCase ) with dataset.formatted_as(type="""numpy""" ): lowerCamelCase : str = map(lowerCamelCase, function=lambda lowerCamelCase : None, batched=lowerCamelCase ) with dataset.formatted_as(type="""pandas""" ): lowerCamelCase : Any = map(lowerCamelCase, function=lambda lowerCamelCase : None, batched=lowerCamelCase ) with dataset.formatted_as(type="""torch""", columns="""numbers""" ): lowerCamelCase : List[Any] = map(lowerCamelCase, function=lambda lowerCamelCase : None, batched=lowerCamelCase ) with dataset.formatted_as(type="""tensorflow""", columns="""numbers""" ): lowerCamelCase : Optional[Any] = map(lowerCamelCase, function=lambda lowerCamelCase : None, batched=lowerCamelCase ) lowerCamelCase : str = map(lowerCamelCase, function=lowerCamelCase, batched=lowerCamelCase ) lowerCamelCase : Tuple = filter(lowerCamelCase ) # Activate later when tokenizer support batched inputs # with dataset.formatted_as(type='numpy'): # times[func.__name__ + " fast-tokenizer batched numpy"] = func(dataset, function=tokenize, batched=True) with open(lowerCamelCase, """wb""" ) as f: f.write(json.dumps(lowerCamelCase ).encode("""utf-8""" ) ) if __name__ == "__main__": # useful to run the profiler benchmark_map_filter()
681
import numpy as np import torch from torch.utils.data import Dataset from utils import logger class lowerCamelCase_ ( lowerCamelCase ): def __init__( self , __lowerCAmelCase , __lowerCAmelCase ): """simple docstring""" __magic_name__ :Optional[int] = params __magic_name__ :Any = np.array(__lowerCAmelCase ) __magic_name__ :Optional[Any] = np.array([len(__lowerCAmelCase ) for t in data] ) self.check() self.remove_long_sequences() self.remove_empty_sequences() self.remove_unknown_sequences() self.check() self.print_statistics() def __getitem__( self , __lowerCAmelCase ): """simple docstring""" return (self.token_ids[index], self.lengths[index]) def __len__( self ): """simple docstring""" return len(self.lengths ) def A ( self ): """simple docstring""" assert len(self.token_ids ) == len(self.lengths ) assert all(self.lengths[i] == len(self.token_ids[i] ) for i in range(len(self.lengths ) ) ) def A ( self ): """simple docstring""" __magic_name__ :Any = self.params.max_model_input_size __magic_name__ :int = self.lengths > max_len logger.info(F'''Splitting {sum(__lowerCAmelCase )} too long sequences.''' ) def divide_chunks(__lowerCAmelCase , __lowerCAmelCase ): return [l[i : i + n] for i in range(0 , len(__lowerCAmelCase ) , __lowerCAmelCase )] __magic_name__ :Optional[int] = [] __magic_name__ :List[Any] = [] if self.params.mlm: __magic_name__ , __magic_name__ :Optional[Any] = self.params.special_tok_ids['''cls_token'''], self.params.special_tok_ids['''sep_token'''] else: __magic_name__ , __magic_name__ :Tuple = self.params.special_tok_ids['''bos_token'''], self.params.special_tok_ids['''eos_token'''] for seq_, len_ in zip(self.token_ids , self.lengths ): assert (seq_[0] == cls_id) and (seq_[-1] == sep_id), seq_ if len_ <= max_len: new_tok_ids.append(seq_ ) new_lengths.append(len_ ) else: __magic_name__ :int = [] for sub_s in divide_chunks(seq_ , max_len - 2 ): if sub_s[0] != cls_id: __magic_name__ :List[Any] = np.insert(__lowerCAmelCase , 0 , __lowerCAmelCase ) if sub_s[-1] != sep_id: __magic_name__ :Union[str, Any] = np.insert(__lowerCAmelCase , len(__lowerCAmelCase ) , __lowerCAmelCase ) assert len(__lowerCAmelCase ) <= max_len assert (sub_s[0] == cls_id) and (sub_s[-1] == sep_id), sub_s sub_seqs.append(__lowerCAmelCase ) new_tok_ids.extend(__lowerCAmelCase ) new_lengths.extend([len(__lowerCAmelCase ) for l in sub_seqs] ) __magic_name__ :Tuple = np.array(__lowerCAmelCase ) __magic_name__ :Optional[int] = np.array(__lowerCAmelCase ) def A ( self ): """simple docstring""" __magic_name__ :Optional[Any] = len(self ) __magic_name__ :int = self.lengths > 1_1 __magic_name__ :List[str] = self.token_ids[indices] __magic_name__ :Union[str, Any] = self.lengths[indices] __magic_name__ :List[str] = len(self ) logger.info(F'''Remove {init_size - new_size} too short (<=11 tokens) sequences.''' ) def A ( self ): """simple docstring""" if "unk_token" not in self.params.special_tok_ids: return else: __magic_name__ :Tuple = self.params.special_tok_ids['''unk_token'''] __magic_name__ :Dict = len(self ) __magic_name__ :Tuple = np.array([np.count_nonzero(a == unk_token_id ) for a in self.token_ids] ) __magic_name__ :int = (unk_occs / self.lengths) < 0.5 __magic_name__ :str = self.token_ids[indices] __magic_name__ :str = self.lengths[indices] __magic_name__ :Any = len(self ) logger.info(F'''Remove {init_size - new_size} sequences with a high level of unknown tokens (50%).''' ) def A ( self ): """simple docstring""" if not self.params.is_master: return logger.info(F'''{len(self )} sequences''' ) # data_len = sum(self.lengths) # nb_unique_tokens = len(Counter(list(chain(*self.token_ids)))) # logger.info(f'{data_len} tokens ({nb_unique_tokens} unique)') # unk_idx = self.params.special_tok_ids['unk_token'] # nb_unknown = sum([(t==unk_idx).sum() for t in self.token_ids]) # logger.info(f'{nb_unknown} unknown tokens (covering {100*nb_unknown/data_len:.2f}% of the data)') def A ( self , __lowerCAmelCase ): """simple docstring""" __magic_name__ :Optional[Any] = [t[0] for t in batch] __magic_name__ :List[Any] = [t[1] for t in batch] assert len(__lowerCAmelCase ) == len(__lowerCAmelCase ) # Max for paddings __magic_name__ :Tuple = max(__lowerCAmelCase ) # Pad token ids if self.params.mlm: __magic_name__ :Any = self.params.special_tok_ids['''pad_token'''] else: __magic_name__ :str = self.params.special_tok_ids['''unk_token'''] __magic_name__ :Any = [list(t.astype(__lowerCAmelCase ) ) + [pad_idx] * (max_seq_len_ - len(__lowerCAmelCase )) for t in token_ids] assert len(tk_ ) == len(__lowerCAmelCase ) assert all(len(__lowerCAmelCase ) == max_seq_len_ for t in tk_ ) __magic_name__ :Optional[int] = torch.tensor(tk_ ) # (bs, max_seq_len_) __magic_name__ :Optional[int] = torch.tensor(__lowerCAmelCase ) # (bs) return tk_t, lg_t
0
0
'''simple docstring''' def snake_case ( snake_case : List[Any] ) -> Optional[int]: """simple docstring""" for i in range(len(snake_case ) - 1 , 0 , -1 ): lowerCAmelCase = False for j in range(snake_case , 0 , -1 ): if unsorted[j] < unsorted[j - 1]: lowerCAmelCase = unsorted[j - 1], unsorted[j] lowerCAmelCase = True for j in range(snake_case ): if unsorted[j] > unsorted[j + 1]: lowerCAmelCase = unsorted[j + 1], unsorted[j] lowerCAmelCase = True if not swapped: break return unsorted if __name__ == "__main__": import doctest doctest.testmod() _UpperCamelCase : Optional[int] = input("Enter numbers separated by a comma:\n").strip() _UpperCamelCase : str = [int(item) for item in user_input.split(",")] print(F"""{cocktail_shaker_sort(unsorted) = }""")
284
import os from shutil import copyfile from typing import Any, Dict, List, Optional, Tuple import sentencepiece as spm from ...tokenization_utils import PreTrainedTokenizer from ...utils import logging SCREAMING_SNAKE_CASE__ : str = logging.get_logger(__name__) SCREAMING_SNAKE_CASE__ : Tuple = """▁""" SCREAMING_SNAKE_CASE__ : Union[str, Any] = {"""vocab_file""": """spiece.model"""} SCREAMING_SNAKE_CASE__ : List[Any] = { """vocab_file""": { """google/reformer-crime-and-punishment""": ( """https://huggingface.co/google/reformer-crime-and-punishment/resolve/main/spiece.model""" ) } } SCREAMING_SNAKE_CASE__ : Optional[int] = { """google/reformer-crime-and-punishment""": 52_42_88, } class lowerCamelCase_ ( lowerCamelCase ): a__ = VOCAB_FILES_NAMES a__ = PRETRAINED_VOCAB_FILES_MAP a__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES a__ = ['''input_ids''', '''attention_mask'''] def __init__( self , __lowerCAmelCase , __lowerCAmelCase="</s>" , __lowerCAmelCase="<unk>" , __lowerCAmelCase=[] , __lowerCAmelCase = None , **__lowerCAmelCase , ): """simple docstring""" __magic_name__ :int = {} if sp_model_kwargs is None else sp_model_kwargs super().__init__( eos_token=__lowerCAmelCase , unk_token=__lowerCAmelCase , additional_special_tokens=__lowerCAmelCase , sp_model_kwargs=self.sp_model_kwargs , **__lowerCAmelCase , ) __magic_name__ :Optional[Any] = vocab_file __magic_name__ :int = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(__lowerCAmelCase ) @property def A ( self ): """simple docstring""" return self.sp_model.get_piece_size() def A ( self ): """simple docstring""" __magic_name__ :str = {self.convert_ids_to_tokens(__lowerCAmelCase ): i for i in range(self.vocab_size )} vocab.update(self.added_tokens_encoder ) return vocab def __getstate__( self ): """simple docstring""" __magic_name__ :Optional[Any] = self.__dict__.copy() __magic_name__ :Optional[Any] = None return state def __setstate__( self , __lowerCAmelCase ): """simple docstring""" __magic_name__ :Any = d # for backward compatibility if not hasattr(self , '''sp_model_kwargs''' ): __magic_name__ :Optional[int] = {} __magic_name__ :Union[str, Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(self.vocab_file ) def A ( self , __lowerCAmelCase ): """simple docstring""" return self.sp_model.encode(__lowerCAmelCase , out_type=__lowerCAmelCase ) def A ( self , __lowerCAmelCase ): """simple docstring""" return self.sp_model.piece_to_id(__lowerCAmelCase ) def A ( self , __lowerCAmelCase ): """simple docstring""" if index < self.sp_model.get_piece_size(): __magic_name__ :int = self.sp_model.IdToPiece(__lowerCAmelCase ) return token def A ( self , __lowerCAmelCase ): """simple docstring""" __magic_name__ :Optional[Any] = [] __magic_name__ :Tuple = '''''' for token in tokens: # make sure that special tokens are not decoded using sentencepiece model if token in self.all_special_tokens: out_string += self.sp_model.decode(__lowerCAmelCase ) + token __magic_name__ :Optional[Any] = [] else: current_sub_tokens.append(__lowerCAmelCase ) out_string += self.sp_model.decode(__lowerCAmelCase ) return out_string.strip() def A ( self , __lowerCAmelCase , __lowerCAmelCase = None ): """simple docstring""" if not os.path.isdir(__lowerCAmelCase ): logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' ) return __magic_name__ :Optional[int] = os.path.join( __lowerCAmelCase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(__lowerCAmelCase ) and os.path.isfile(self.vocab_file ): copyfile(self.vocab_file , __lowerCAmelCase ) elif not os.path.isfile(self.vocab_file ): with open(__lowerCAmelCase , '''wb''' ) as fi: __magic_name__ :Dict = self.sp_model.serialized_model_proto() fi.write(__lowerCAmelCase ) return (out_vocab_file,)
0
0
from typing import List, Optional, Tuple from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import logging from .tokenization_herbert import HerbertTokenizer snake_case__ : Optional[int] = logging.get_logger(__name__) snake_case__ : str = {"""vocab_file""": """vocab.json""", """merges_file""": """merges.txt""", """tokenizer_file""": """tokenizer.json"""} snake_case__ : int = { """vocab_file""": { """allegro/herbert-base-cased""": """https://huggingface.co/allegro/herbert-base-cased/resolve/main/vocab.json""" }, """merges_file""": { """allegro/herbert-base-cased""": """https://huggingface.co/allegro/herbert-base-cased/resolve/main/merges.txt""" }, } snake_case__ : Optional[int] = {"""allegro/herbert-base-cased""": 514} snake_case__ : Union[str, Any] = {} class SCREAMING_SNAKE_CASE_ (a__ ): '''simple docstring''' _a = VOCAB_FILES_NAMES _a = PRETRAINED_VOCAB_FILES_MAP _a = PRETRAINED_INIT_CONFIGURATION _a = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES _a = HerbertTokenizer def __init__( self : Dict , __a : str=None , __a : List[Any]=None , __a : int=None , __a : List[Any]="<s>" , __a : List[Any]="<unk>" , __a : str="<pad>" , __a : Optional[int]="<mask>" , __a : Tuple="</s>" , **__a : Union[str, Any] , ) ->List[str]: super().__init__( __lowerCAmelCase , __lowerCAmelCase , tokenizer_file=__lowerCAmelCase , cls_token=__lowerCAmelCase , unk_token=__lowerCAmelCase , pad_token=__lowerCAmelCase , mask_token=__lowerCAmelCase , sep_token=__lowerCAmelCase , **__lowerCAmelCase , ) def _lowerCAmelCase ( self : List[str] , __a : Any , __a : Union[str, Any] = None ) ->Optional[Any]: lowerCamelCase_ : str = [self.cls_token_id] lowerCamelCase_ : Tuple = [self.sep_token_id] if token_ids_a is None: return cls + token_ids_a + sep return cls + token_ids_a + sep + token_ids_a + sep def _lowerCAmelCase ( self : str , __a : Optional[int] , __a : Dict = None , __a : str = False ) ->Optional[int]: if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=__lowerCAmelCase , token_ids_a=__lowerCAmelCase , already_has_special_tokens=__lowerCAmelCase ) if token_ids_a is None: return [1] + ([0] * len(__lowerCAmelCase )) + [1] return [1] + ([0] * len(__lowerCAmelCase )) + [1] + ([0] * len(__lowerCAmelCase )) + [1] def _lowerCAmelCase ( self : Any , __a : str , __a : Any = None ) ->List[str]: lowerCamelCase_ : Dict = [self.sep_token_id] lowerCamelCase_ : int = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] def _lowerCAmelCase ( self : Any , __a : List[str] , __a : List[Any] = None ) ->Dict: lowerCamelCase_ : Optional[int] = self._tokenizer.model.save(__lowerCAmelCase , name=__lowerCAmelCase ) return tuple(__lowerCAmelCase )
278
import os import unittest from transformers import MobileBertTokenizer, MobileBertTokenizerFast from transformers.models.bert.tokenization_bert import ( VOCAB_FILES_NAMES, BasicTokenizer, WordpieceTokenizer, _is_control, _is_punctuation, _is_whitespace, ) from transformers.testing_utils import require_tokenizers, slow from ...test_tokenization_common import TokenizerTesterMixin, filter_non_english @require_tokenizers class lowerCamelCase_ ( lowerCamelCase , unittest.TestCase ): a__ = MobileBertTokenizer a__ = MobileBertTokenizerFast a__ = True a__ = True a__ = filter_non_english a__ = '''google/mobilebert-uncased''' def A ( self ): """simple docstring""" super().setUp() __magic_name__ :Tuple = [ '''[UNK]''', '''[CLS]''', '''[SEP]''', '''[PAD]''', '''[MASK]''', '''want''', '''##want''', '''##ed''', '''wa''', '''un''', '''runn''', '''##ing''', ''',''', '''low''', '''lowest''', ] __magic_name__ :Dict = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] ) with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer: vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) ) __magic_name__ :List[str] = [ (tokenizer_def[0], self.pre_trained_model_path, tokenizer_def[2]) # else the 'google/' prefix is stripped for tokenizer_def in self.tokenizers_list ] def A ( self , __lowerCAmelCase ): """simple docstring""" __magic_name__ :Union[str, Any] = '''UNwant\u00E9d,running''' __magic_name__ :int = '''unwanted, running''' return input_text, output_text def A ( self ): """simple docstring""" __magic_name__ :Optional[int] = self.tokenizer_class(self.vocab_file ) __magic_name__ :List[Any] = tokenizer.tokenize('''UNwant\u00E9d,running''' ) self.assertListEqual(__lowerCAmelCase , ['''un''', '''##want''', '''##ed''', ''',''', '''runn''', '''##ing'''] ) self.assertListEqual(tokenizer.convert_tokens_to_ids(__lowerCAmelCase ) , [9, 6, 7, 1_2, 1_0, 1_1] ) def A ( self ): """simple docstring""" if not self.test_rust_tokenizer: return __magic_name__ :int = self.get_tokenizer() __magic_name__ :Tuple = self.get_rust_tokenizer() __magic_name__ :List[str] = '''UNwant\u00E9d,running''' __magic_name__ :Optional[Any] = tokenizer.tokenize(__lowerCAmelCase ) __magic_name__ :List[Any] = rust_tokenizer.tokenize(__lowerCAmelCase ) self.assertListEqual(__lowerCAmelCase , __lowerCAmelCase ) __magic_name__ :int = tokenizer.encode(__lowerCAmelCase , add_special_tokens=__lowerCAmelCase ) __magic_name__ :str = rust_tokenizer.encode(__lowerCAmelCase , add_special_tokens=__lowerCAmelCase ) self.assertListEqual(__lowerCAmelCase , __lowerCAmelCase ) __magic_name__ :List[Any] = self.get_rust_tokenizer() __magic_name__ :Any = tokenizer.encode(__lowerCAmelCase ) __magic_name__ :Any = rust_tokenizer.encode(__lowerCAmelCase ) self.assertListEqual(__lowerCAmelCase , __lowerCAmelCase ) # With lower casing __magic_name__ :Any = self.get_tokenizer(do_lower_case=__lowerCAmelCase ) __magic_name__ :List[Any] = self.get_rust_tokenizer(do_lower_case=__lowerCAmelCase ) __magic_name__ :Dict = '''UNwant\u00E9d,running''' __magic_name__ :Tuple = tokenizer.tokenize(__lowerCAmelCase ) __magic_name__ :Union[str, Any] = rust_tokenizer.tokenize(__lowerCAmelCase ) self.assertListEqual(__lowerCAmelCase , __lowerCAmelCase ) __magic_name__ :Optional[Any] = tokenizer.encode(__lowerCAmelCase , add_special_tokens=__lowerCAmelCase ) __magic_name__ :Dict = rust_tokenizer.encode(__lowerCAmelCase , add_special_tokens=__lowerCAmelCase ) self.assertListEqual(__lowerCAmelCase , __lowerCAmelCase ) __magic_name__ :Tuple = self.get_rust_tokenizer() __magic_name__ :Dict = tokenizer.encode(__lowerCAmelCase ) __magic_name__ :List[Any] = rust_tokenizer.encode(__lowerCAmelCase ) self.assertListEqual(__lowerCAmelCase , __lowerCAmelCase ) def A ( self ): """simple docstring""" __magic_name__ :Optional[int] = BasicTokenizer() self.assertListEqual(tokenizer.tokenize('''ah\u535A\u63A8zz''' ) , ['''ah''', '''\u535A''', '''\u63A8''', '''zz'''] ) def A ( self ): """simple docstring""" __magic_name__ :List[Any] = BasicTokenizer(do_lower_case=__lowerCAmelCase ) self.assertListEqual( tokenizer.tokenize(''' \tHeLLo!how \n Are yoU? ''' ) , ['''hello''', '''!''', '''how''', '''are''', '''you''', '''?'''] ) self.assertListEqual(tokenizer.tokenize('''H\u00E9llo''' ) , ['''hello'''] ) def A ( self ): """simple docstring""" __magic_name__ :Union[str, Any] = BasicTokenizer(do_lower_case=__lowerCAmelCase , strip_accents=__lowerCAmelCase ) self.assertListEqual( tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''hällo''', '''!''', '''how''', '''are''', '''you''', '''?'''] ) self.assertListEqual(tokenizer.tokenize('''H\u00E9llo''' ) , ['''h\u00E9llo'''] ) def A ( self ): """simple docstring""" __magic_name__ :Dict = BasicTokenizer(do_lower_case=__lowerCAmelCase , strip_accents=__lowerCAmelCase ) self.assertListEqual( tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''hallo''', '''!''', '''how''', '''are''', '''you''', '''?'''] ) self.assertListEqual(tokenizer.tokenize('''H\u00E9llo''' ) , ['''hello'''] ) def A ( self ): """simple docstring""" __magic_name__ :Optional[int] = BasicTokenizer(do_lower_case=__lowerCAmelCase ) self.assertListEqual( tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''hallo''', '''!''', '''how''', '''are''', '''you''', '''?'''] ) self.assertListEqual(tokenizer.tokenize('''H\u00E9llo''' ) , ['''hello'''] ) def A ( self ): """simple docstring""" __magic_name__ :List[str] = BasicTokenizer(do_lower_case=__lowerCAmelCase ) self.assertListEqual( tokenizer.tokenize(''' \tHeLLo!how \n Are yoU? ''' ) , ['''HeLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?'''] ) def A ( self ): """simple docstring""" __magic_name__ :int = BasicTokenizer(do_lower_case=__lowerCAmelCase , strip_accents=__lowerCAmelCase ) self.assertListEqual( tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''HäLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?'''] ) def A ( self ): """simple docstring""" __magic_name__ :Optional[int] = BasicTokenizer(do_lower_case=__lowerCAmelCase , strip_accents=__lowerCAmelCase ) self.assertListEqual( tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''HaLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?'''] ) def A ( self ): """simple docstring""" __magic_name__ :Optional[Any] = BasicTokenizer(do_lower_case=__lowerCAmelCase , never_split=['''[UNK]'''] ) self.assertListEqual( tokenizer.tokenize(''' \tHeLLo!how \n Are yoU? [UNK]''' ) , ['''HeLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?''', '''[UNK]'''] ) def A ( self ): """simple docstring""" __magic_name__ :int = ['''[UNK]''', '''[CLS]''', '''[SEP]''', '''want''', '''##want''', '''##ed''', '''wa''', '''un''', '''runn''', '''##ing'''] __magic_name__ :Union[str, Any] = {} for i, token in enumerate(__lowerCAmelCase ): __magic_name__ :Tuple = i __magic_name__ :List[Any] = WordpieceTokenizer(vocab=__lowerCAmelCase , unk_token='''[UNK]''' ) self.assertListEqual(tokenizer.tokenize('''''' ) , [] ) self.assertListEqual(tokenizer.tokenize('''unwanted running''' ) , ['''un''', '''##want''', '''##ed''', '''runn''', '''##ing'''] ) self.assertListEqual(tokenizer.tokenize('''unwantedX running''' ) , ['''[UNK]''', '''runn''', '''##ing'''] ) def A ( self ): """simple docstring""" self.assertTrue(_is_whitespace(''' ''' ) ) self.assertTrue(_is_whitespace('''\t''' ) ) self.assertTrue(_is_whitespace('''\r''' ) ) self.assertTrue(_is_whitespace('''\n''' ) ) self.assertTrue(_is_whitespace('''\u00A0''' ) ) self.assertFalse(_is_whitespace('''A''' ) ) self.assertFalse(_is_whitespace('''-''' ) ) def A ( self ): """simple docstring""" self.assertTrue(_is_control('''\u0005''' ) ) self.assertFalse(_is_control('''A''' ) ) self.assertFalse(_is_control(''' ''' ) ) self.assertFalse(_is_control('''\t''' ) ) self.assertFalse(_is_control('''\r''' ) ) def A ( self ): """simple docstring""" self.assertTrue(_is_punctuation('''-''' ) ) self.assertTrue(_is_punctuation('''$''' ) ) self.assertTrue(_is_punctuation('''`''' ) ) self.assertTrue(_is_punctuation('''.''' ) ) self.assertFalse(_is_punctuation('''A''' ) ) self.assertFalse(_is_punctuation(''' ''' ) ) def A ( self ): """simple docstring""" __magic_name__ :Any = self.get_tokenizer() __magic_name__ :Any = self.get_rust_tokenizer() # Example taken from the issue https://github.com/huggingface/tokenizers/issues/340 self.assertListEqual([tokenizer.tokenize(__lowerCAmelCase ) for t in ['''Test''', '''\xad''', '''test''']] , [['''[UNK]'''], [], ['''[UNK]''']] ) self.assertListEqual( [rust_tokenizer.tokenize(__lowerCAmelCase ) for t in ['''Test''', '''\xad''', '''test''']] , [['''[UNK]'''], [], ['''[UNK]''']] ) @slow def A ( self ): """simple docstring""" __magic_name__ :Optional[int] = self.tokenizer_class.from_pretrained('''google/mobilebert-uncased''' ) __magic_name__ :Optional[int] = tokenizer.encode('''sequence builders''' , add_special_tokens=__lowerCAmelCase ) __magic_name__ :List[Any] = tokenizer.encode('''multi-sequence build''' , add_special_tokens=__lowerCAmelCase ) __magic_name__ :Union[str, Any] = tokenizer.build_inputs_with_special_tokens(__lowerCAmelCase ) __magic_name__ :List[Any] = tokenizer.build_inputs_with_special_tokens(__lowerCAmelCase , __lowerCAmelCase ) assert encoded_sentence == [1_0_1] + text + [1_0_2] assert encoded_pair == [1_0_1] + text + [1_0_2] + text_a + [1_0_2] def A ( self ): """simple docstring""" for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})''' ): __magic_name__ :Optional[Any] = self.rust_tokenizer_class.from_pretrained(__lowerCAmelCase , **__lowerCAmelCase ) __magic_name__ :Optional[int] = F'''A, naïve {tokenizer_r.mask_token} AllenNLP sentence.''' __magic_name__ :Optional[Any] = tokenizer_r.encode_plus( __lowerCAmelCase , return_attention_mask=__lowerCAmelCase , return_token_type_ids=__lowerCAmelCase , return_offsets_mapping=__lowerCAmelCase , add_special_tokens=__lowerCAmelCase , ) __magic_name__ :Any = tokenizer_r.do_lower_case if hasattr(__lowerCAmelCase , '''do_lower_case''' ) else False __magic_name__ :Optional[int] = ( [ ((0, 0), tokenizer_r.cls_token), ((0, 1), '''A'''), ((1, 2), ''','''), ((3, 5), '''na'''), ((5, 6), '''##ï'''), ((6, 8), '''##ve'''), ((9, 1_5), tokenizer_r.mask_token), ((1_6, 2_1), '''Allen'''), ((2_1, 2_3), '''##NL'''), ((2_3, 2_4), '''##P'''), ((2_5, 3_3), '''sentence'''), ((3_3, 3_4), '''.'''), ((0, 0), tokenizer_r.sep_token), ] if not do_lower_case else [ ((0, 0), tokenizer_r.cls_token), ((0, 1), '''a'''), ((1, 2), ''','''), ((3, 8), '''naive'''), ((9, 1_5), tokenizer_r.mask_token), ((1_6, 2_1), '''allen'''), ((2_1, 2_3), '''##nl'''), ((2_3, 2_4), '''##p'''), ((2_5, 3_3), '''sentence'''), ((3_3, 3_4), '''.'''), ((0, 0), tokenizer_r.sep_token), ] ) self.assertEqual( [e[1] for e in expected_results] , tokenizer_r.convert_ids_to_tokens(tokens['''input_ids'''] ) ) self.assertEqual([e[0] for e in expected_results] , tokens['''offset_mapping'''] ) def A ( self ): """simple docstring""" __magic_name__ :Dict = ['''的''', '''人''', '''有'''] __magic_name__ :Any = ''''''.join(__lowerCAmelCase ) for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})''' ): __magic_name__ :Optional[Any] = True __magic_name__ :Optional[int] = self.tokenizer_class.from_pretrained(__lowerCAmelCase , **__lowerCAmelCase ) __magic_name__ :Tuple = self.rust_tokenizer_class.from_pretrained(__lowerCAmelCase , **__lowerCAmelCase ) __magic_name__ :Dict = tokenizer_p.encode(__lowerCAmelCase , add_special_tokens=__lowerCAmelCase ) __magic_name__ :List[str] = tokenizer_r.encode(__lowerCAmelCase , add_special_tokens=__lowerCAmelCase ) __magic_name__ :Dict = tokenizer_r.convert_ids_to_tokens(__lowerCAmelCase ) __magic_name__ :Union[str, Any] = tokenizer_p.convert_ids_to_tokens(__lowerCAmelCase ) # it is expected that each Chinese character is not preceded by "##" self.assertListEqual(__lowerCAmelCase , __lowerCAmelCase ) self.assertListEqual(__lowerCAmelCase , __lowerCAmelCase ) __magic_name__ :List[str] = False __magic_name__ :Tuple = self.rust_tokenizer_class.from_pretrained(__lowerCAmelCase , **__lowerCAmelCase ) __magic_name__ :List[str] = self.tokenizer_class.from_pretrained(__lowerCAmelCase , **__lowerCAmelCase ) __magic_name__ :Optional[Any] = tokenizer_r.encode(__lowerCAmelCase , add_special_tokens=__lowerCAmelCase ) __magic_name__ :Union[str, Any] = tokenizer_p.encode(__lowerCAmelCase , add_special_tokens=__lowerCAmelCase ) __magic_name__ :List[str] = tokenizer_r.convert_ids_to_tokens(__lowerCAmelCase ) __magic_name__ :Optional[int] = tokenizer_p.convert_ids_to_tokens(__lowerCAmelCase ) # it is expected that only the first Chinese character is not preceded by "##". __magic_name__ :Dict = [ F'''##{token}''' if idx != 0 else token for idx, token in enumerate(__lowerCAmelCase ) ] self.assertListEqual(__lowerCAmelCase , __lowerCAmelCase ) self.assertListEqual(__lowerCAmelCase , __lowerCAmelCase )
0
0
import argparse import json from pathlib import Path import requests import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import ( BertTokenizer, ViltConfig, ViltForImageAndTextRetrieval, ViltForImagesAndTextClassification, ViltForMaskedLM, ViltForQuestionAnswering, ViltImageProcessor, ViltProcessor, ) from transformers.utils import logging logging.set_verbosity_info() lowerCAmelCase = logging.get_logger(__name__) def _lowerCamelCase( lowercase__ , lowercase__=False , lowercase__=False , lowercase__=False ) -> Union[str, Any]: '''simple docstring''' __lowercase= [] for i in range(config.num_hidden_layers ): # encoder layers: output projection, 2 feedforward neural networks and 2 layernorms rename_keys.append((F'transformer.blocks.{i}.norm1.weight', F'vilt.encoder.layer.{i}.layernorm_before.weight') ) rename_keys.append((F'transformer.blocks.{i}.norm1.bias', F'vilt.encoder.layer.{i}.layernorm_before.bias') ) rename_keys.append( (F'transformer.blocks.{i}.attn.proj.weight', F'vilt.encoder.layer.{i}.attention.output.dense.weight') ) rename_keys.append( (F'transformer.blocks.{i}.attn.proj.bias', F'vilt.encoder.layer.{i}.attention.output.dense.bias') ) rename_keys.append((F'transformer.blocks.{i}.norm2.weight', F'vilt.encoder.layer.{i}.layernorm_after.weight') ) rename_keys.append((F'transformer.blocks.{i}.norm2.bias', F'vilt.encoder.layer.{i}.layernorm_after.bias') ) rename_keys.append( (F'transformer.blocks.{i}.mlp.fc1.weight', F'vilt.encoder.layer.{i}.intermediate.dense.weight') ) rename_keys.append((F'transformer.blocks.{i}.mlp.fc1.bias', F'vilt.encoder.layer.{i}.intermediate.dense.bias') ) rename_keys.append((F'transformer.blocks.{i}.mlp.fc2.weight', F'vilt.encoder.layer.{i}.output.dense.weight') ) rename_keys.append((F'transformer.blocks.{i}.mlp.fc2.bias', F'vilt.encoder.layer.{i}.output.dense.bias') ) # embeddings rename_keys.extend( [ # text embeddings ('text_embeddings.word_embeddings.weight', 'vilt.embeddings.text_embeddings.word_embeddings.weight'), ( 'text_embeddings.position_embeddings.weight', 'vilt.embeddings.text_embeddings.position_embeddings.weight', ), ('text_embeddings.position_ids', 'vilt.embeddings.text_embeddings.position_ids'), ( 'text_embeddings.token_type_embeddings.weight', 'vilt.embeddings.text_embeddings.token_type_embeddings.weight', ), ('text_embeddings.LayerNorm.weight', 'vilt.embeddings.text_embeddings.LayerNorm.weight'), ('text_embeddings.LayerNorm.bias', 'vilt.embeddings.text_embeddings.LayerNorm.bias'), # patch embeddings ('transformer.cls_token', 'vilt.embeddings.cls_token'), ('transformer.patch_embed.proj.weight', 'vilt.embeddings.patch_embeddings.projection.weight'), ('transformer.patch_embed.proj.bias', 'vilt.embeddings.patch_embeddings.projection.bias'), ('transformer.pos_embed', 'vilt.embeddings.position_embeddings'), # token type embeddings ('token_type_embeddings.weight', 'vilt.embeddings.token_type_embeddings.weight'), ] ) # final layernorm + pooler rename_keys.extend( [ ('transformer.norm.weight', 'vilt.layernorm.weight'), ('transformer.norm.bias', 'vilt.layernorm.bias'), ('pooler.dense.weight', 'vilt.pooler.dense.weight'), ('pooler.dense.bias', 'vilt.pooler.dense.bias'), ] ) # classifier head(s) if vqa_model: # classification head rename_keys.extend( [ ('vqa_classifier.0.weight', 'classifier.0.weight'), ('vqa_classifier.0.bias', 'classifier.0.bias'), ('vqa_classifier.1.weight', 'classifier.1.weight'), ('vqa_classifier.1.bias', 'classifier.1.bias'), ('vqa_classifier.3.weight', 'classifier.3.weight'), ('vqa_classifier.3.bias', 'classifier.3.bias'), ] ) elif nlvr_model: # classification head rename_keys.extend( [ ('nlvr2_classifier.0.weight', 'classifier.0.weight'), ('nlvr2_classifier.0.bias', 'classifier.0.bias'), ('nlvr2_classifier.1.weight', 'classifier.1.weight'), ('nlvr2_classifier.1.bias', 'classifier.1.bias'), ('nlvr2_classifier.3.weight', 'classifier.3.weight'), ('nlvr2_classifier.3.bias', 'classifier.3.bias'), ] ) else: pass return rename_keys def _lowerCamelCase( lowercase__ , lowercase__ ) -> List[str]: '''simple docstring''' for i in range(config.num_hidden_layers ): __lowercase= '''vilt.''' # read in weights + bias of input projection layer (in timm, this is a single matrix + bias) __lowercase= state_dict.pop(F'transformer.blocks.{i}.attn.qkv.weight' ) __lowercase= state_dict.pop(F'transformer.blocks.{i}.attn.qkv.bias' ) # next, add query, keys and values (in that order) to the state dict __lowercase= in_proj_weight[ : config.hidden_size, : ] __lowercase= in_proj_bias[: config.hidden_size] __lowercase= in_proj_weight[ config.hidden_size : config.hidden_size * 2, : ] __lowercase= in_proj_bias[ config.hidden_size : config.hidden_size * 2 ] __lowercase= in_proj_weight[ -config.hidden_size :, : ] __lowercase= in_proj_bias[-config.hidden_size :] def _lowerCamelCase( lowercase__ ) -> str: '''simple docstring''' __lowercase= ['''head.weight''', '''head.bias'''] for k in ignore_keys: state_dict.pop(lowercase__ , lowercase__ ) def _lowerCamelCase( lowercase__ , lowercase__ , lowercase__ ) -> Optional[Any]: '''simple docstring''' __lowercase= dct.pop(lowercase__ ) __lowercase= val @torch.no_grad() def _lowerCamelCase( lowercase__ , lowercase__ ) -> Tuple: '''simple docstring''' __lowercase= ViltConfig(image_size=3_8_4 , patch_size=3_2 , tie_word_embeddings=lowercase__ ) __lowercase= False __lowercase= False __lowercase= False __lowercase= False if "vqa" in checkpoint_url: __lowercase= True __lowercase= 3_1_2_9 __lowercase= '''huggingface/label-files''' __lowercase= '''vqa2-id2label.json''' __lowercase= json.load(open(hf_hub_download(lowercase__ , lowercase__ , repo_type='dataset' ) , 'r' ) ) __lowercase= {int(lowercase__ ): v for k, v in idalabel.items()} __lowercase= idalabel __lowercase= {v: k for k, v in idalabel.items()} __lowercase= ViltForQuestionAnswering(lowercase__ ) elif "nlvr" in checkpoint_url: __lowercase= True __lowercase= 2 __lowercase= {0: '''False''', 1: '''True'''} __lowercase= {v: k for k, v in config.idalabel.items()} __lowercase= 3 __lowercase= ViltForImagesAndTextClassification(lowercase__ ) elif "irtr" in checkpoint_url: __lowercase= True __lowercase= ViltForImageAndTextRetrieval(lowercase__ ) elif "mlm_itm" in checkpoint_url: __lowercase= True __lowercase= ViltForMaskedLM(lowercase__ ) else: raise ValueError('Unknown model type' ) # load state_dict of original model, remove and rename some keys __lowercase= torch.hub.load_state_dict_from_url(lowercase__ , map_location='cpu' )['''state_dict'''] __lowercase= create_rename_keys(lowercase__ , lowercase__ , lowercase__ , lowercase__ ) for src, dest in rename_keys: rename_key(lowercase__ , lowercase__ , lowercase__ ) read_in_q_k_v(lowercase__ , lowercase__ ) if mlm_model or irtr_model: __lowercase= ['''itm_score.fc.weight''', '''itm_score.fc.bias'''] for k in ignore_keys: state_dict.pop(lowercase__ , lowercase__ ) # load state dict into HuggingFace model model.eval() if mlm_model: __lowercase= model.load_state_dict(lowercase__ , strict=lowercase__ ) assert missing_keys == ["mlm_score.decoder.bias"] else: model.load_state_dict(lowercase__ ) # Define processor __lowercase= ViltImageProcessor(size=3_8_4 ) __lowercase= BertTokenizer.from_pretrained('bert-base-uncased' ) __lowercase= ViltProcessor(lowercase__ , lowercase__ ) # Forward pass on example inputs (image + text) if nlvr_model: __lowercase= Image.open(requests.get('https://lil.nlp.cornell.edu/nlvr/exs/ex0_0.jpg' , stream=lowercase__ ).raw ) __lowercase= Image.open(requests.get('https://lil.nlp.cornell.edu/nlvr/exs/ex0_0.jpg' , stream=lowercase__ ).raw ) __lowercase= ( '''The left image contains twice the number of dogs as the right image, and at least two dogs in total are''' ''' standing.''' ) __lowercase= processor(lowercase__ , lowercase__ , return_tensors='pt' ) __lowercase= processor(lowercase__ , lowercase__ , return_tensors='pt' ) __lowercase= model( input_ids=encoding_a.input_ids , pixel_values=encoding_a.pixel_values , pixel_values_a=encoding_a.pixel_values , ) else: __lowercase= Image.open(requests.get('http://images.cocodataset.org/val2017/000000039769.jpg' , stream=lowercase__ ).raw ) if mlm_model: __lowercase= '''a bunch of [MASK] laying on a [MASK].''' else: __lowercase= '''How many cats are there?''' __lowercase= processor(lowercase__ , lowercase__ , return_tensors='pt' ) __lowercase= model(**lowercase__ ) # Verify outputs if mlm_model: __lowercase= torch.Size([1, 1_1, 3_0_5_2_2] ) __lowercase= torch.tensor([-12.5061, -12.5123, -12.5174] ) assert outputs.logits.shape == expected_shape assert torch.allclose(outputs.logits[0, 0, :3] , lowercase__ , atol=1E-4 ) # verify masked token prediction equals "cats" __lowercase= outputs.logits[0, 4, :].argmax(-1 ).item() assert tokenizer.decode([predicted_id] ) == "cats" elif vqa_model: __lowercase= torch.Size([1, 3_1_2_9] ) __lowercase= torch.tensor([-15.9495, -18.1472, -10.3041] ) assert torch.allclose(outputs.logits[0, :3] , lowercase__ , atol=1E-4 ) assert outputs.logits.shape == expected_shape assert torch.allclose(outputs.logits[0, 0, :3] , lowercase__ , atol=1E-4 ) # verify vqa prediction equals "2" __lowercase= outputs.logits.argmax(-1 ).item() assert model.config.idalabel[predicted_idx] == "2" elif nlvr_model: __lowercase= torch.Size([1, 2] ) __lowercase= torch.tensor([-2.8721, 2.1291] ) assert torch.allclose(outputs.logits[0, :3] , lowercase__ , atol=1E-4 ) assert outputs.logits.shape == expected_shape Path(lowercase__ ).mkdir(exist_ok=lowercase__ ) print(F'Saving model and processor to {pytorch_dump_folder_path}' ) model.save_pretrained(lowercase__ ) processor.save_pretrained(lowercase__ ) if __name__ == "__main__": lowerCAmelCase = argparse.ArgumentParser() # Required parameters parser.add_argument( '''--checkpoint_url''', default='''https://github.com/dandelin/ViLT/releases/download/200k/vilt_200k_mlm_itm.ckpt''', type=str, help='''URL of the checkpoint you\'d like to convert.''', ) parser.add_argument( '''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.''' ) lowerCAmelCase = parser.parse_args() convert_vilt_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path)
230
import logging import os import quant_trainer import torch from torch.utils.data import DataLoader from transformers import Trainer, is_torch_tpu_available from transformers.trainer_utils import PredictionOutput SCREAMING_SNAKE_CASE__ : List[str] = logging.getLogger(__name__) if is_torch_tpu_available(check_device=False): import torch_xla.core.xla_model as xm import torch_xla.debug.metrics as met class lowerCamelCase_ ( lowerCamelCase ): def __init__( self , *__lowerCAmelCase , __lowerCAmelCase=None , __lowerCAmelCase=None , __lowerCAmelCase=None , **__lowerCAmelCase ): """simple docstring""" super().__init__(*__lowerCAmelCase , **__lowerCAmelCase ) __magic_name__ :Any = eval_examples __magic_name__ :str = post_process_function __magic_name__ :int = quant_trainer_args __magic_name__ :List[str] = 1_2_8 # default number of calibration samples def A ( self , __lowerCAmelCase=None ): """simple docstring""" if calib_dataset is None and self.calib_dataset is None: raise ValueError('''Trainer: calibration requires an calib_dataset.''' ) __magic_name__ :Optional[Any] = calib_dataset if calib_dataset is not None else self.calib_dataset __magic_name__ :Optional[int] = self._remove_unused_columns(__lowerCAmelCase , description='''Calibration''' ) return DataLoader( __lowerCAmelCase , batch_size=self.args.eval_batch_size , collate_fn=self.data_collator , drop_last=self.args.dataloader_drop_last , num_workers=self.args.dataloader_num_workers , pin_memory=self.args.dataloader_pin_memory , shuffle=__lowerCAmelCase , ) def A ( self , __lowerCAmelCase=None ): """simple docstring""" __magic_name__ :Dict = self.train_dataset if calib_dataset is None else calib_dataset __magic_name__ :Any = self.get_calib_dataloader(__lowerCAmelCase ) __magic_name__ :List[str] = self.model quant_trainer.configure_model(__lowerCAmelCase , self.quant_trainer_args , calib=__lowerCAmelCase ) model.eval() quant_trainer.enable_calibration(__lowerCAmelCase ) logger.info('''***** Running calibration *****''' ) logger.info(F''' Num examples = {self.calib_num}''' ) logger.info(F''' Batch size = {calib_dataloader.batch_size}''' ) for step, inputs in enumerate(__lowerCAmelCase ): # Prediction step __magic_name__ , __magic_name__ , __magic_name__ :str = self.prediction_step(__lowerCAmelCase , __lowerCAmelCase , prediction_loss_only=__lowerCAmelCase ) if (step + 1) * calib_dataloader.batch_size >= self.calib_num: break quant_trainer.finish_calibration(__lowerCAmelCase , self.quant_trainer_args ) __magic_name__ :Any = model def A ( self , __lowerCAmelCase=None , __lowerCAmelCase=None , __lowerCAmelCase=None , __lowerCAmelCase = "eval" ): """simple docstring""" __magic_name__ :Tuple = self.eval_dataset if eval_dataset is None else eval_dataset __magic_name__ :Optional[Any] = self.get_eval_dataloader(__lowerCAmelCase ) __magic_name__ :str = self.eval_examples if eval_examples is None else eval_examples # Temporarily disable metric computation, we will do it in the loop here. __magic_name__ :Any = self.compute_metrics __magic_name__ :List[Any] = None __magic_name__ :List[str] = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop try: __magic_name__ :Optional[Any] = eval_loop( __lowerCAmelCase , description='''Evaluation''' , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=__lowerCAmelCase , ) finally: __magic_name__ :Union[str, Any] = compute_metrics if self.post_process_function is not None and self.compute_metrics is not None: __magic_name__ :Union[str, Any] = self.post_process_function(__lowerCAmelCase , __lowerCAmelCase , output.predictions ) __magic_name__ :int = self.compute_metrics(__lowerCAmelCase ) # Prefix all keys with metric_key_prefix + '_' for key in list(metrics.keys() ): if not key.startswith(F'''{metric_key_prefix}_''' ): __magic_name__ :Dict = metrics.pop(__lowerCAmelCase ) self.log(__lowerCAmelCase ) else: __magic_name__ :List[str] = {} if self.args.tpu_metrics_debug or self.args.debug: # tpu-comment: Logging debug metrics for PyTorch/XLA (compile, execute times, ops, etc.) xm.master_print(met.metrics_report() ) __magic_name__ :Optional[Any] = self.callback_handler.on_evaluate(self.args , self.state , self.control , __lowerCAmelCase ) return metrics def A ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase=None , __lowerCAmelCase = "test" ): """simple docstring""" __magic_name__ :int = self.get_test_dataloader(__lowerCAmelCase ) # Temporarily disable metric computation, we will do it in the loop here. __magic_name__ :Dict = self.compute_metrics __magic_name__ :str = None __magic_name__ :Optional[int] = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop try: __magic_name__ :int = eval_loop( __lowerCAmelCase , description='''Prediction''' , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=__lowerCAmelCase , ) finally: __magic_name__ :List[Any] = compute_metrics if self.post_process_function is None or self.compute_metrics is None: return output __magic_name__ :Optional[Any] = self.post_process_function(__lowerCAmelCase , __lowerCAmelCase , output.predictions , '''predict''' ) __magic_name__ :Dict = self.compute_metrics(__lowerCAmelCase ) # Prefix all keys with metric_key_prefix + '_' for key in list(metrics.keys() ): if not key.startswith(F'''{metric_key_prefix}_''' ): __magic_name__ :List[str] = metrics.pop(__lowerCAmelCase ) return PredictionOutput(predictions=predictions.predictions , label_ids=predictions.label_ids , metrics=__lowerCAmelCase ) def A ( self , __lowerCAmelCase="./" ): """simple docstring""" __magic_name__ :List[Any] = self.eval_dataset __magic_name__ :Any = self.get_eval_dataloader(__lowerCAmelCase ) __magic_name__ :int = next(iter(__lowerCAmelCase ) ) # saving device - to make it consistent __magic_name__ :str = torch.device('''cuda''' if torch.cuda.is_available() else '''cpu''' ) # convert to tuple __magic_name__ :int = tuple(v.to(__lowerCAmelCase ) for k, v in batch.items() ) logger.info('''Converting model to be onnx compatible''' ) from pytorch_quantization.nn import TensorQuantizer __magic_name__ :Any = True __magic_name__ :Optional[int] = self.model.to(__lowerCAmelCase ) model.eval() model.float() __magic_name__ :Any = model.module if hasattr(__lowerCAmelCase , '''module''' ) else model quant_trainer.configure_model(__lowerCAmelCase , self.quant_trainer_args ) __magic_name__ :int = os.path.join(__lowerCAmelCase , '''model.onnx''' ) logger.info(F'''exporting model to {output_model_file}''' ) __magic_name__ :Dict = {0: '''batch_size''', 1: '''seq_len'''} torch.onnx.export( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , export_params=__lowerCAmelCase , opset_version=1_3 , do_constant_folding=__lowerCAmelCase , input_names=['''input_ids''', '''attention_mask''', '''token_type_ids'''] , output_names=['''output_start_logits''', '''output_end_logits'''] , dynamic_axes={ '''input_ids''': axes, '''attention_mask''': axes, '''token_type_ids''': axes, '''output_start_logits''': axes, '''output_end_logits''': axes, } , verbose=__lowerCAmelCase , ) logger.info('''onnx export finished''' )
0
0
"""simple docstring""" import argparse import os import jax as jnp import numpy as onp import torch import torch.nn as nn from music_spectrogram_diffusion import inference from tax import checkpoints from diffusers import DDPMScheduler, OnnxRuntimeModel, SpectrogramDiffusionPipeline from diffusers.pipelines.spectrogram_diffusion import SpectrogramContEncoder, SpectrogramNotesEncoder, TaFilmDecoder a__ : Optional[int] = """base_with_context""" def A__ ( __lowerCamelCase, __lowerCamelCase ): """simple docstring""" _lowerCAmelCase = nn.Parameter(torch.FloatTensor(weights['token_embedder']['embedding'] ) ) _lowerCAmelCase = nn.Parameter( torch.FloatTensor(weights['Embed_0']['embedding'] ), requires_grad=__lowerCamelCase ) for lyr_num, lyr in enumerate(model.encoders ): _lowerCAmelCase = weights[F'''layers_{lyr_num}'''] _lowerCAmelCase = nn.Parameter( torch.FloatTensor(ly_weight['pre_attention_layer_norm']['scale'] ) ) _lowerCAmelCase = ly_weight['''attention'''] _lowerCAmelCase = nn.Parameter(torch.FloatTensor(attention_weights['query']['kernel'].T ) ) _lowerCAmelCase = nn.Parameter(torch.FloatTensor(attention_weights['key']['kernel'].T ) ) _lowerCAmelCase = nn.Parameter(torch.FloatTensor(attention_weights['value']['kernel'].T ) ) _lowerCAmelCase = nn.Parameter(torch.FloatTensor(attention_weights['out']['kernel'].T ) ) _lowerCAmelCase = nn.Parameter(torch.FloatTensor(ly_weight['pre_mlp_layer_norm']['scale'] ) ) _lowerCAmelCase = nn.Parameter(torch.FloatTensor(ly_weight['mlp']['wi_0']['kernel'].T ) ) _lowerCAmelCase = nn.Parameter(torch.FloatTensor(ly_weight['mlp']['wi_1']['kernel'].T ) ) _lowerCAmelCase = nn.Parameter(torch.FloatTensor(ly_weight['mlp']['wo']['kernel'].T ) ) _lowerCAmelCase = nn.Parameter(torch.FloatTensor(weights['encoder_norm']['scale'] ) ) return model def A__ ( __lowerCamelCase, __lowerCamelCase ): """simple docstring""" _lowerCAmelCase = nn.Parameter(torch.FloatTensor(weights['input_proj']['kernel'].T ) ) _lowerCAmelCase = nn.Parameter( torch.FloatTensor(weights['Embed_0']['embedding'] ), requires_grad=__lowerCamelCase ) for lyr_num, lyr in enumerate(model.encoders ): _lowerCAmelCase = weights[F'''layers_{lyr_num}'''] _lowerCAmelCase = ly_weight['''attention'''] _lowerCAmelCase = nn.Parameter(torch.FloatTensor(attention_weights['query']['kernel'].T ) ) _lowerCAmelCase = nn.Parameter(torch.FloatTensor(attention_weights['key']['kernel'].T ) ) _lowerCAmelCase = nn.Parameter(torch.FloatTensor(attention_weights['value']['kernel'].T ) ) _lowerCAmelCase = nn.Parameter(torch.FloatTensor(attention_weights['out']['kernel'].T ) ) _lowerCAmelCase = nn.Parameter( torch.FloatTensor(ly_weight['pre_attention_layer_norm']['scale'] ) ) _lowerCAmelCase = nn.Parameter(torch.FloatTensor(ly_weight['mlp']['wi_0']['kernel'].T ) ) _lowerCAmelCase = nn.Parameter(torch.FloatTensor(ly_weight['mlp']['wi_1']['kernel'].T ) ) _lowerCAmelCase = nn.Parameter(torch.FloatTensor(ly_weight['mlp']['wo']['kernel'].T ) ) _lowerCAmelCase = nn.Parameter(torch.FloatTensor(ly_weight['pre_mlp_layer_norm']['scale'] ) ) _lowerCAmelCase = nn.Parameter(torch.FloatTensor(weights['encoder_norm']['scale'] ) ) return model def A__ ( __lowerCamelCase, __lowerCamelCase ): """simple docstring""" _lowerCAmelCase = nn.Parameter(torch.FloatTensor(weights['time_emb_dense0']['kernel'].T ) ) _lowerCAmelCase = nn.Parameter(torch.FloatTensor(weights['time_emb_dense1']['kernel'].T ) ) _lowerCAmelCase = nn.Parameter( torch.FloatTensor(weights['Embed_0']['embedding'] ), requires_grad=__lowerCamelCase ) _lowerCAmelCase = nn.Parameter( torch.FloatTensor(weights['continuous_inputs_projection']['kernel'].T ) ) for lyr_num, lyr in enumerate(model.decoders ): _lowerCAmelCase = weights[F'''layers_{lyr_num}'''] _lowerCAmelCase = nn.Parameter( torch.FloatTensor(ly_weight['pre_self_attention_layer_norm']['scale'] ) ) _lowerCAmelCase = nn.Parameter( torch.FloatTensor(ly_weight['FiLMLayer_0']['DenseGeneral_0']['kernel'].T ) ) _lowerCAmelCase = ly_weight['''self_attention'''] _lowerCAmelCase = nn.Parameter(torch.FloatTensor(attention_weights['query']['kernel'].T ) ) _lowerCAmelCase = nn.Parameter(torch.FloatTensor(attention_weights['key']['kernel'].T ) ) _lowerCAmelCase = nn.Parameter(torch.FloatTensor(attention_weights['value']['kernel'].T ) ) _lowerCAmelCase = nn.Parameter(torch.FloatTensor(attention_weights['out']['kernel'].T ) ) _lowerCAmelCase = ly_weight['''MultiHeadDotProductAttention_0'''] _lowerCAmelCase = nn.Parameter(torch.FloatTensor(attention_weights['query']['kernel'].T ) ) _lowerCAmelCase = nn.Parameter(torch.FloatTensor(attention_weights['key']['kernel'].T ) ) _lowerCAmelCase = nn.Parameter(torch.FloatTensor(attention_weights['value']['kernel'].T ) ) _lowerCAmelCase = nn.Parameter(torch.FloatTensor(attention_weights['out']['kernel'].T ) ) _lowerCAmelCase = nn.Parameter( torch.FloatTensor(ly_weight['pre_cross_attention_layer_norm']['scale'] ) ) _lowerCAmelCase = nn.Parameter(torch.FloatTensor(ly_weight['pre_mlp_layer_norm']['scale'] ) ) _lowerCAmelCase = nn.Parameter( torch.FloatTensor(ly_weight['FiLMLayer_1']['DenseGeneral_0']['kernel'].T ) ) _lowerCAmelCase = nn.Parameter(torch.FloatTensor(ly_weight['mlp']['wi_0']['kernel'].T ) ) _lowerCAmelCase = nn.Parameter(torch.FloatTensor(ly_weight['mlp']['wi_1']['kernel'].T ) ) _lowerCAmelCase = nn.Parameter(torch.FloatTensor(ly_weight['mlp']['wo']['kernel'].T ) ) _lowerCAmelCase = nn.Parameter(torch.FloatTensor(weights['decoder_norm']['scale'] ) ) _lowerCAmelCase = nn.Parameter(torch.FloatTensor(weights['spec_out_dense']['kernel'].T ) ) return model def A__ ( __lowerCamelCase ): """simple docstring""" _lowerCAmelCase = checkpoints.load_tax_checkpoint(args.checkpoint_path ) _lowerCAmelCase = jnp.tree_util.tree_map(onp.array, __lowerCamelCase ) _lowerCAmelCase = [ '''from __gin__ import dynamic_registration''', '''from music_spectrogram_diffusion.models.diffusion import diffusion_utils''', '''diffusion_utils.ClassifierFreeGuidanceConfig.eval_condition_weight = 2.0''', '''diffusion_utils.DiffusionConfig.classifier_free_guidance = @diffusion_utils.ClassifierFreeGuidanceConfig()''', ] _lowerCAmelCase = os.path.join(args.checkpoint_path, '..', 'config.gin' ) _lowerCAmelCase = inference.parse_training_gin_file(__lowerCamelCase, __lowerCamelCase ) _lowerCAmelCase = inference.InferenceModel(args.checkpoint_path, __lowerCamelCase ) _lowerCAmelCase = DDPMScheduler(beta_schedule='squaredcos_cap_v2', variance_type='fixed_large' ) _lowerCAmelCase = SpectrogramNotesEncoder( max_length=synth_model.sequence_length['inputs'], vocab_size=synth_model.model.module.config.vocab_size, d_model=synth_model.model.module.config.emb_dim, dropout_rate=synth_model.model.module.config.dropout_rate, num_layers=synth_model.model.module.config.num_encoder_layers, num_heads=synth_model.model.module.config.num_heads, d_kv=synth_model.model.module.config.head_dim, d_ff=synth_model.model.module.config.mlp_dim, feed_forward_proj='gated-gelu', ) _lowerCAmelCase = SpectrogramContEncoder( input_dims=synth_model.audio_codec.n_dims, targets_context_length=synth_model.sequence_length['targets_context'], d_model=synth_model.model.module.config.emb_dim, dropout_rate=synth_model.model.module.config.dropout_rate, num_layers=synth_model.model.module.config.num_encoder_layers, num_heads=synth_model.model.module.config.num_heads, d_kv=synth_model.model.module.config.head_dim, d_ff=synth_model.model.module.config.mlp_dim, feed_forward_proj='gated-gelu', ) _lowerCAmelCase = TaFilmDecoder( input_dims=synth_model.audio_codec.n_dims, targets_length=synth_model.sequence_length['targets_context'], max_decoder_noise_time=synth_model.model.module.config.max_decoder_noise_time, d_model=synth_model.model.module.config.emb_dim, num_layers=synth_model.model.module.config.num_decoder_layers, num_heads=synth_model.model.module.config.num_heads, d_kv=synth_model.model.module.config.head_dim, d_ff=synth_model.model.module.config.mlp_dim, dropout_rate=synth_model.model.module.config.dropout_rate, ) _lowerCAmelCase = load_notes_encoder(ta_checkpoint['target']['token_encoder'], __lowerCamelCase ) _lowerCAmelCase = load_continuous_encoder(ta_checkpoint['target']['continuous_encoder'], __lowerCamelCase ) _lowerCAmelCase = load_decoder(ta_checkpoint['target']['decoder'], __lowerCamelCase ) _lowerCAmelCase = OnnxRuntimeModel.from_pretrained('kashif/soundstream_mel_decoder' ) _lowerCAmelCase = SpectrogramDiffusionPipeline( notes_encoder=__lowerCamelCase, continuous_encoder=__lowerCamelCase, decoder=__lowerCamelCase, scheduler=__lowerCamelCase, melgan=__lowerCamelCase, ) if args.save: pipe.save_pretrained(args.output_path ) if __name__ == "__main__": a__ : str = argparse.ArgumentParser() parser.add_argument("""--output_path""", default=None, type=str, required=True, help="""Path to the converted model.""") parser.add_argument( """--save""", default=True, type=bool, required=False, help="""Whether to save the converted model or not.""" ) parser.add_argument( """--checkpoint_path""", default=f'{MODEL}/checkpoint_500000', type=str, required=False, help="""Path to the original jax model checkpoint.""", ) a__ : Dict = parser.parse_args() main(args)
589
def __lowercase ( snake_case ): """simple docstring""" return "".join([hex(snake_case )[2:].zfill(2 ).upper() for byte in list(snake_case )] ) def __lowercase ( snake_case ): """simple docstring""" if (len(snake_case ) % 2) != 0: raise ValueError( '''Base16 encoded data is invalid: Data does not have an even number of hex digits.''' ) # Check the character set - the standard base16 alphabet # is uppercase according to RFC3548 section 6 if not set(snake_case ) <= set('''0123456789ABCDEF''' ): raise ValueError( '''Base16 encoded data is invalid: Data is not uppercase hex or it contains invalid characters.''' ) # For every two hexadecimal digits (= a byte), turn it into an integer. # Then, string the result together into bytes, and return it. return bytes(int(data[i] + data[i + 1], 1_6 ) for i in range(0, len(snake_case ), 2 ) ) if __name__ == "__main__": import doctest doctest.testmod()
0
0
from typing import TYPE_CHECKING from ...file_utils import _LazyModule, is_torch_available from ...utils import OptionalDependencyNotAvailable _A : List[Any] = { """configuration_gpt_neox_japanese""": ["""GPT_NEOX_JAPANESE_PRETRAINED_CONFIG_ARCHIVE_MAP""", """GPTNeoXJapaneseConfig"""], """tokenization_gpt_neox_japanese""": ["""GPTNeoXJapaneseTokenizer"""], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _A : str = [ """GPT_NEOX_JAPANESE_PRETRAINED_MODEL_ARCHIVE_LIST""", """GPTNeoXJapaneseForCausalLM""", """GPTNeoXJapaneseLayer""", """GPTNeoXJapaneseModel""", """GPTNeoXJapanesePreTrainedModel""", ] if TYPE_CHECKING: from .configuration_gpt_neox_japanese import GPT_NEOX_JAPANESE_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTNeoXJapaneseConfig from .tokenization_gpt_neox_japanese import GPTNeoXJapaneseTokenizer try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_gpt_neox_japanese import ( GPT_NEOX_JAPANESE_PRETRAINED_MODEL_ARCHIVE_LIST, GPTNeoXJapaneseForCausalLM, GPTNeoXJapaneseLayer, GPTNeoXJapaneseModel, GPTNeoXJapanesePreTrainedModel, ) else: import sys _A : Tuple = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
315
import pytest import requests from datasets.utils.file_utils import http_head from .utils import OfflineSimulationMode, RequestWouldHangIndefinitelyError, offline @pytest.mark.integration def __lowercase ( ): """simple docstring""" with offline(OfflineSimulationMode.CONNECTION_TIMES_OUT ): with pytest.raises(snake_case ): requests.request('''GET''', '''https://huggingface.co''' ) with pytest.raises(requests.exceptions.ConnectTimeout ): requests.request('''GET''', '''https://huggingface.co''', timeout=1.0 ) @pytest.mark.integration def __lowercase ( ): """simple docstring""" with offline(OfflineSimulationMode.CONNECTION_FAILS ): with pytest.raises(requests.exceptions.ConnectionError ): requests.request('''GET''', '''https://huggingface.co''' ) def __lowercase ( ): """simple docstring""" with offline(OfflineSimulationMode.HF_DATASETS_OFFLINE_SET_TO_1 ): with pytest.raises(snake_case ): http_head('''https://huggingface.co''' )
0
0
'''simple docstring''' from math import isqrt def lowerCamelCase ( UpperCAmelCase__ : Optional[int] ) -> List[Any]: '''simple docstring''' SCREAMING_SNAKE_CASE__ :str = [True] * max_number for i in range(2 , isqrt(max_number - 1 ) + 1 ): if is_prime[i]: for j in range(i**2 , UpperCAmelCase__ , UpperCAmelCase__ ): SCREAMING_SNAKE_CASE__ :List[Any] = False return [i for i in range(2 , UpperCAmelCase__ ) if is_prime[i]] def lowerCamelCase ( UpperCAmelCase__ : Any = 1_0**8 ) -> Any: '''simple docstring''' SCREAMING_SNAKE_CASE__ :Any = calculate_prime_numbers(max_number // 2 ) SCREAMING_SNAKE_CASE__ :Optional[Any] = 0 SCREAMING_SNAKE_CASE__ :Optional[Any] = 0 SCREAMING_SNAKE_CASE__ :int = len(UpperCAmelCase__ ) - 1 while left <= right: while prime_numbers[left] * prime_numbers[right] >= max_number: right -= 1 semiprimes_count += right - left + 1 left += 1 return semiprimes_count if __name__ == "__main__": print(f"{solution() = }")
209
import math from collections.abc import Iterator from itertools import takewhile def __lowercase ( snake_case ): """simple docstring""" if 1 < number < 4: # 2 and 3 are primes return True elif number < 2 or number % 2 == 0 or number % 3 == 0: # Negatives, 0, 1, all even numbers, all multiples of 3 are not primes return False # All primes number are in format of 6k +/- 1 for i in range(5, int(math.sqrt(snake_case ) + 1 ), 6 ): if number % i == 0 or number % (i + 2) == 0: return False return True def __lowercase ( ): """simple docstring""" __magic_name__ :str = 2 while True: if is_prime(snake_case ): yield num num += 1 def __lowercase ( snake_case = 2_0_0_0_0_0_0 ): """simple docstring""" return sum(takewhile(lambda snake_case : x < n, prime_generator() ) ) if __name__ == "__main__": print(f"{solution() = }")
0
0
import logging import os import quant_trainer import torch from torch.utils.data import DataLoader from transformers import Trainer, is_torch_tpu_available from transformers.trainer_utils import PredictionOutput _UpperCAmelCase = logging.getLogger(__name__) if is_torch_tpu_available(check_device=False): import torch_xla.core.xla_model as xm import torch_xla.debug.metrics as met class _UpperCAmelCase ( __lowercase ): '''simple docstring''' def __init__( self : List[str] , *UpperCamelCase__ : str , UpperCamelCase__ : Tuple=None , UpperCamelCase__ : Tuple=None , UpperCamelCase__ : int=None , **UpperCamelCase__ : int ): super().__init__(*__lowerCAmelCase , **__lowerCAmelCase ) A = eval_examples A = post_process_function A = quant_trainer_args A = 128 # default number of calibration samples def UpperCamelCase ( self : int , UpperCamelCase__ : Union[str, Any]=None ): if calib_dataset is None and self.calib_dataset is None: raise ValueError('Trainer: calibration requires an calib_dataset.' ) A = calib_dataset if calib_dataset is not None else self.calib_dataset A = self._remove_unused_columns(__lowerCAmelCase , description='Calibration' ) return DataLoader( __lowerCAmelCase , batch_size=self.args.eval_batch_size , collate_fn=self.data_collator , drop_last=self.args.dataloader_drop_last , num_workers=self.args.dataloader_num_workers , pin_memory=self.args.dataloader_pin_memory , shuffle=__lowerCAmelCase , ) def UpperCamelCase ( self : int , UpperCamelCase__ : Dict=None ): A = self.train_dataset if calib_dataset is None else calib_dataset A = self.get_calib_dataloader(__lowerCAmelCase ) A = self.model quant_trainer.configure_model(__lowerCAmelCase , self.quant_trainer_args , calib=__lowerCAmelCase ) model.eval() quant_trainer.enable_calibration(__lowerCAmelCase ) logger.info('***** Running calibration *****' ) logger.info(f''' Num examples = {self.calib_num}''' ) logger.info(f''' Batch size = {calib_dataloader.batch_size}''' ) for step, inputs in enumerate(__lowerCAmelCase ): # Prediction step A = self.prediction_step(__lowerCAmelCase , __lowerCAmelCase , prediction_loss_only=__lowerCAmelCase ) if (step + 1) * calib_dataloader.batch_size >= self.calib_num: break quant_trainer.finish_calibration(__lowerCAmelCase , self.quant_trainer_args ) A = model def UpperCamelCase ( self : List[Any] , UpperCamelCase__ : Optional[Any]=None , UpperCamelCase__ : Optional[int]=None , UpperCamelCase__ : List[str]=None , UpperCamelCase__ : int = "eval" ): A = self.eval_dataset if eval_dataset is None else eval_dataset A = self.get_eval_dataloader(__lowerCAmelCase ) A = self.eval_examples if eval_examples is None else eval_examples # Temporarily disable metric computation, we will do it in the loop here. A = self.compute_metrics A = None A = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop try: A = eval_loop( __lowerCAmelCase , description='Evaluation' , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=__lowerCAmelCase , ) finally: A = compute_metrics if self.post_process_function is not None and self.compute_metrics is not None: A = self.post_process_function(__lowerCAmelCase , __lowerCAmelCase , output.predictions ) A = self.compute_metrics(__lowerCAmelCase ) # Prefix all keys with metric_key_prefix + '_' for key in list(metrics.keys() ): if not key.startswith(f'''{metric_key_prefix}_''' ): A = metrics.pop(__lowerCAmelCase ) self.log(__lowerCAmelCase ) else: A = {} if self.args.tpu_metrics_debug or self.args.debug: # tpu-comment: Logging debug metrics for PyTorch/XLA (compile, execute times, ops, etc.) xm.master_print(met.metrics_report() ) A = self.callback_handler.on_evaluate(self.args , self.state , self.control , __lowerCAmelCase ) return metrics def UpperCamelCase ( self : List[Any] , UpperCamelCase__ : Dict , UpperCamelCase__ : str , UpperCamelCase__ : List[Any]=None , UpperCamelCase__ : int = "test" ): A = self.get_test_dataloader(__lowerCAmelCase ) # Temporarily disable metric computation, we will do it in the loop here. A = self.compute_metrics A = None A = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop try: A = eval_loop( __lowerCAmelCase , description='Prediction' , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=__lowerCAmelCase , ) finally: A = compute_metrics if self.post_process_function is None or self.compute_metrics is None: return output A = self.post_process_function(__lowerCAmelCase , __lowerCAmelCase , output.predictions , 'predict' ) A = self.compute_metrics(__lowerCAmelCase ) # Prefix all keys with metric_key_prefix + '_' for key in list(metrics.keys() ): if not key.startswith(f'''{metric_key_prefix}_''' ): A = metrics.pop(__lowerCAmelCase ) return PredictionOutput(predictions=predictions.predictions , label_ids=predictions.label_ids , metrics=__lowerCAmelCase ) def UpperCamelCase ( self : Any , UpperCamelCase__ : Any="./" ): A = self.eval_dataset A = self.get_eval_dataloader(__lowerCAmelCase ) A = next(iter(__lowerCAmelCase ) ) # saving device - to make it consistent A = torch.device('cuda' if torch.cuda.is_available() else 'cpu' ) # convert to tuple A = tuple(v.to(__lowerCAmelCase ) for k, v in batch.items() ) logger.info('Converting model to be onnx compatible' ) from pytorch_quantization.nn import TensorQuantizer A = True A = self.model.to(__lowerCAmelCase ) model.eval() model.float() A = model.module if hasattr(__lowerCAmelCase , 'module' ) else model quant_trainer.configure_model(__lowerCAmelCase , self.quant_trainer_args ) A = os.path.join(__lowerCAmelCase , 'model.onnx' ) logger.info(f'''exporting model to {output_model_file}''' ) A = {0: '''batch_size''', 1: '''seq_len'''} torch.onnx.export( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , export_params=__lowerCAmelCase , opset_version=13 , do_constant_folding=__lowerCAmelCase , input_names=['input_ids', 'attention_mask', 'token_type_ids'] , output_names=['output_start_logits', 'output_end_logits'] , dynamic_axes={ 'input_ids': axes, 'attention_mask': axes, 'token_type_ids': axes, 'output_start_logits': axes, 'output_end_logits': axes, } , verbose=__lowerCAmelCase , ) logger.info('onnx export finished' )
699
import unittest import numpy as np from transformers.testing_utils import require_flax, require_tf, require_torch from transformers.utils import ( expand_dims, flatten_dict, is_flax_available, is_tf_available, is_torch_available, reshape, squeeze, transpose, ) if is_flax_available(): import jax.numpy as jnp if is_tf_available(): import tensorflow as tf if is_torch_available(): import torch class lowerCamelCase_ ( unittest.TestCase ): def A ( self ): """simple docstring""" __magic_name__ :List[Any] = { '''task_specific_params''': { '''summarization''': {'''length_penalty''': 1.0, '''max_length''': 1_2_8, '''min_length''': 1_2, '''num_beams''': 4}, '''summarization_cnn''': {'''length_penalty''': 2.0, '''max_length''': 1_4_2, '''min_length''': 5_6, '''num_beams''': 4}, '''summarization_xsum''': {'''length_penalty''': 1.0, '''max_length''': 6_2, '''min_length''': 1_1, '''num_beams''': 6}, } } __magic_name__ :List[str] = { '''task_specific_params.summarization.length_penalty''': 1.0, '''task_specific_params.summarization.max_length''': 1_2_8, '''task_specific_params.summarization.min_length''': 1_2, '''task_specific_params.summarization.num_beams''': 4, '''task_specific_params.summarization_cnn.length_penalty''': 2.0, '''task_specific_params.summarization_cnn.max_length''': 1_4_2, '''task_specific_params.summarization_cnn.min_length''': 5_6, '''task_specific_params.summarization_cnn.num_beams''': 4, '''task_specific_params.summarization_xsum.length_penalty''': 1.0, '''task_specific_params.summarization_xsum.max_length''': 6_2, '''task_specific_params.summarization_xsum.min_length''': 1_1, '''task_specific_params.summarization_xsum.num_beams''': 6, } self.assertEqual(flatten_dict(__lowerCAmelCase ) , __lowerCAmelCase ) def A ( self ): """simple docstring""" __magic_name__ :Optional[Any] = np.random.randn(3 , 4 ) self.assertTrue(np.allclose(transpose(__lowerCAmelCase ) , x.transpose() ) ) __magic_name__ :List[Any] = np.random.randn(3 , 4 , 5 ) self.assertTrue(np.allclose(transpose(__lowerCAmelCase , axes=(1, 2, 0) ) , x.transpose((1, 2, 0) ) ) ) @require_torch def A ( self ): """simple docstring""" __magic_name__ :Optional[Any] = np.random.randn(3 , 4 ) __magic_name__ :Tuple = torch.tensor(__lowerCAmelCase ) self.assertTrue(np.allclose(transpose(__lowerCAmelCase ) , transpose(__lowerCAmelCase ).numpy() ) ) __magic_name__ :int = np.random.randn(3 , 4 , 5 ) __magic_name__ :Union[str, Any] = torch.tensor(__lowerCAmelCase ) self.assertTrue(np.allclose(transpose(__lowerCAmelCase , axes=(1, 2, 0) ) , transpose(__lowerCAmelCase , axes=(1, 2, 0) ).numpy() ) ) @require_tf def A ( self ): """simple docstring""" __magic_name__ :int = np.random.randn(3 , 4 ) __magic_name__ :Optional[Any] = tf.constant(__lowerCAmelCase ) self.assertTrue(np.allclose(transpose(__lowerCAmelCase ) , transpose(__lowerCAmelCase ).numpy() ) ) __magic_name__ :List[str] = np.random.randn(3 , 4 , 5 ) __magic_name__ :Optional[Any] = tf.constant(__lowerCAmelCase ) self.assertTrue(np.allclose(transpose(__lowerCAmelCase , axes=(1, 2, 0) ) , transpose(__lowerCAmelCase , axes=(1, 2, 0) ).numpy() ) ) @require_flax def A ( self ): """simple docstring""" __magic_name__ :int = np.random.randn(3 , 4 ) __magic_name__ :Dict = jnp.array(__lowerCAmelCase ) self.assertTrue(np.allclose(transpose(__lowerCAmelCase ) , np.asarray(transpose(__lowerCAmelCase ) ) ) ) __magic_name__ :Dict = np.random.randn(3 , 4 , 5 ) __magic_name__ :Dict = jnp.array(__lowerCAmelCase ) self.assertTrue(np.allclose(transpose(__lowerCAmelCase , axes=(1, 2, 0) ) , np.asarray(transpose(__lowerCAmelCase , axes=(1, 2, 0) ) ) ) ) def A ( self ): """simple docstring""" __magic_name__ :Any = np.random.randn(3 , 4 ) self.assertTrue(np.allclose(reshape(__lowerCAmelCase , (4, 3) ) , np.reshape(__lowerCAmelCase , (4, 3) ) ) ) __magic_name__ :Union[str, Any] = np.random.randn(3 , 4 , 5 ) self.assertTrue(np.allclose(reshape(__lowerCAmelCase , (1_2, 5) ) , np.reshape(__lowerCAmelCase , (1_2, 5) ) ) ) @require_torch def A ( self ): """simple docstring""" __magic_name__ :Dict = np.random.randn(3 , 4 ) __magic_name__ :Tuple = torch.tensor(__lowerCAmelCase ) self.assertTrue(np.allclose(reshape(__lowerCAmelCase , (4, 3) ) , reshape(__lowerCAmelCase , (4, 3) ).numpy() ) ) __magic_name__ :Union[str, Any] = np.random.randn(3 , 4 , 5 ) __magic_name__ :List[str] = torch.tensor(__lowerCAmelCase ) self.assertTrue(np.allclose(reshape(__lowerCAmelCase , (1_2, 5) ) , reshape(__lowerCAmelCase , (1_2, 5) ).numpy() ) ) @require_tf def A ( self ): """simple docstring""" __magic_name__ :Dict = np.random.randn(3 , 4 ) __magic_name__ :Union[str, Any] = tf.constant(__lowerCAmelCase ) self.assertTrue(np.allclose(reshape(__lowerCAmelCase , (4, 3) ) , reshape(__lowerCAmelCase , (4, 3) ).numpy() ) ) __magic_name__ :List[Any] = np.random.randn(3 , 4 , 5 ) __magic_name__ :Optional[int] = tf.constant(__lowerCAmelCase ) self.assertTrue(np.allclose(reshape(__lowerCAmelCase , (1_2, 5) ) , reshape(__lowerCAmelCase , (1_2, 5) ).numpy() ) ) @require_flax def A ( self ): """simple docstring""" __magic_name__ :List[str] = np.random.randn(3 , 4 ) __magic_name__ :Any = jnp.array(__lowerCAmelCase ) self.assertTrue(np.allclose(reshape(__lowerCAmelCase , (4, 3) ) , np.asarray(reshape(__lowerCAmelCase , (4, 3) ) ) ) ) __magic_name__ :List[Any] = np.random.randn(3 , 4 , 5 ) __magic_name__ :List[str] = jnp.array(__lowerCAmelCase ) self.assertTrue(np.allclose(reshape(__lowerCAmelCase , (1_2, 5) ) , np.asarray(reshape(__lowerCAmelCase , (1_2, 5) ) ) ) ) def A ( self ): """simple docstring""" __magic_name__ :List[Any] = np.random.randn(1 , 3 , 4 ) self.assertTrue(np.allclose(squeeze(__lowerCAmelCase ) , np.squeeze(__lowerCAmelCase ) ) ) __magic_name__ :Optional[Any] = np.random.randn(1 , 4 , 1 , 5 ) self.assertTrue(np.allclose(squeeze(__lowerCAmelCase , axis=2 ) , np.squeeze(__lowerCAmelCase , axis=2 ) ) ) @require_torch def A ( self ): """simple docstring""" __magic_name__ :Dict = np.random.randn(1 , 3 , 4 ) __magic_name__ :List[Any] = torch.tensor(__lowerCAmelCase ) self.assertTrue(np.allclose(squeeze(__lowerCAmelCase ) , squeeze(__lowerCAmelCase ).numpy() ) ) __magic_name__ :List[str] = np.random.randn(1 , 4 , 1 , 5 ) __magic_name__ :str = torch.tensor(__lowerCAmelCase ) self.assertTrue(np.allclose(squeeze(__lowerCAmelCase , axis=2 ) , squeeze(__lowerCAmelCase , axis=2 ).numpy() ) ) @require_tf def A ( self ): """simple docstring""" __magic_name__ :int = np.random.randn(1 , 3 , 4 ) __magic_name__ :Tuple = tf.constant(__lowerCAmelCase ) self.assertTrue(np.allclose(squeeze(__lowerCAmelCase ) , squeeze(__lowerCAmelCase ).numpy() ) ) __magic_name__ :Tuple = np.random.randn(1 , 4 , 1 , 5 ) __magic_name__ :Optional[int] = tf.constant(__lowerCAmelCase ) self.assertTrue(np.allclose(squeeze(__lowerCAmelCase , axis=2 ) , squeeze(__lowerCAmelCase , axis=2 ).numpy() ) ) @require_flax def A ( self ): """simple docstring""" __magic_name__ :Tuple = np.random.randn(1 , 3 , 4 ) __magic_name__ :Optional[Any] = jnp.array(__lowerCAmelCase ) self.assertTrue(np.allclose(squeeze(__lowerCAmelCase ) , np.asarray(squeeze(__lowerCAmelCase ) ) ) ) __magic_name__ :List[Any] = np.random.randn(1 , 4 , 1 , 5 ) __magic_name__ :Optional[Any] = jnp.array(__lowerCAmelCase ) self.assertTrue(np.allclose(squeeze(__lowerCAmelCase , axis=2 ) , np.asarray(squeeze(__lowerCAmelCase , axis=2 ) ) ) ) def A ( self ): """simple docstring""" __magic_name__ :Any = np.random.randn(3 , 4 ) self.assertTrue(np.allclose(expand_dims(__lowerCAmelCase , axis=1 ) , np.expand_dims(__lowerCAmelCase , axis=1 ) ) ) @require_torch def A ( self ): """simple docstring""" __magic_name__ :List[Any] = np.random.randn(3 , 4 ) __magic_name__ :Any = torch.tensor(__lowerCAmelCase ) self.assertTrue(np.allclose(expand_dims(__lowerCAmelCase , axis=1 ) , expand_dims(__lowerCAmelCase , axis=1 ).numpy() ) ) @require_tf def A ( self ): """simple docstring""" __magic_name__ :Union[str, Any] = np.random.randn(3 , 4 ) __magic_name__ :Union[str, Any] = tf.constant(__lowerCAmelCase ) self.assertTrue(np.allclose(expand_dims(__lowerCAmelCase , axis=1 ) , expand_dims(__lowerCAmelCase , axis=1 ).numpy() ) ) @require_flax def A ( self ): """simple docstring""" __magic_name__ :List[str] = np.random.randn(3 , 4 ) __magic_name__ :Tuple = jnp.array(__lowerCAmelCase ) self.assertTrue(np.allclose(expand_dims(__lowerCAmelCase , axis=1 ) , np.asarray(expand_dims(__lowerCAmelCase , axis=1 ) ) ) )
0
0
def lowerCamelCase__ ( snake_case_ : List[Any] ) -> Dict: if not all(char in '''01''' for char in bin_string ): raise ValueError('''Non-binary value was passed to the function''' ) if not bin_string: raise ValueError('''Empty string was passed to the function''' ) __snake_case = '''''' while len(snake_case_ ) % 3 != 0: __snake_case = '''0''' + bin_string __snake_case = [ bin_string[index : index + 3] for index in range(len(snake_case_ ) ) if index % 3 == 0 ] for bin_group in bin_string_in_3_list: __snake_case = 0 for index, val in enumerate(snake_case_ ): oct_val += int(2 ** (2 - index) * int(snake_case_ ) ) oct_string += str(snake_case_ ) return oct_string if __name__ == "__main__": from doctest import testmod testmod()
592
from pathlib import PurePosixPath from typing import Optional import fsspec from fsspec import AbstractFileSystem from huggingface_hub.hf_api import DatasetInfo from ..utils.file_utils import get_authentication_headers_for_url from ..utils.hub import hf_hub_url class lowerCamelCase_ ( lowerCamelCase ): a__ = '''''' a__ = '''hf-legacy''' # "hf://"" is reserved for hffs def __init__( self , __lowerCAmelCase = None , __lowerCAmelCase = None , **__lowerCAmelCase , ): """simple docstring""" super().__init__(self , **__lowerCAmelCase ) __magic_name__ :List[Any] = repo_info __magic_name__ :Dict = token __magic_name__ :Optional[Any] = None def A ( self ): """simple docstring""" if self.dir_cache is None: __magic_name__ :Any = {} for hf_file in self.repo_info.siblings: # TODO(QL): add sizes __magic_name__ :Optional[int] = { '''name''': hf_file.rfilename, '''size''': None, '''type''': '''file''', } self.dir_cache.update( { str(__lowerCAmelCase ): {'''name''': str(__lowerCAmelCase ), '''size''': None, '''type''': '''directory'''} for d in list(PurePosixPath(hf_file.rfilename ).parents )[:-1] } ) def A ( self , __lowerCAmelCase , __lowerCAmelCase = "rb" , **__lowerCAmelCase , ): """simple docstring""" if not isinstance(self.repo_info , __lowerCAmelCase ): raise NotImplementedError(F'''Open is only implemented for dataset repositories, but got {self.repo_info}''' ) __magic_name__ :Union[str, Any] = hf_hub_url(self.repo_info.id , __lowerCAmelCase , revision=self.repo_info.sha ) return fsspec.open( __lowerCAmelCase , mode=__lowerCAmelCase , headers=get_authentication_headers_for_url(__lowerCAmelCase , use_auth_token=self.token ) , client_kwargs={'''trust_env''': True} , ).open() def A ( self , __lowerCAmelCase , **__lowerCAmelCase ): """simple docstring""" self._get_dirs() __magic_name__ :str = self._strip_protocol(__lowerCAmelCase ) if path in self.dir_cache: return self.dir_cache[path] else: raise FileNotFoundError(__lowerCAmelCase ) def A ( self , __lowerCAmelCase , __lowerCAmelCase=False , **__lowerCAmelCase ): """simple docstring""" self._get_dirs() __magic_name__ :Union[str, Any] = PurePosixPath(path.strip('''/''' ) ) __magic_name__ :Dict = {} for p, f in self.dir_cache.items(): __magic_name__ :int = PurePosixPath(p.strip('''/''' ) ) __magic_name__ :Tuple = p.parent if root == path: __magic_name__ :Optional[Any] = f __magic_name__ :List[Any] = list(paths.values() ) if detail: return out else: return sorted(f['''name'''] for f in out )
0
0
import json import os import re import sys import urllib.request import requests from bsa import BeautifulSoup A_: Union[str, Any] = { """User-Agent""": """Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36""" """ (KHTML, like Gecko) Chrome/70.0.3538.102 Safari/537.36 Edge/18.19582""" } def __lowerCAmelCase ( _A = "dhaka" ,_A = 5 ): """simple docstring""" _lowercase = min(_A ,50 ) # Prevent abuse! _lowercase = { '''q''': query, '''tbm''': '''isch''', '''hl''': '''en''', '''ijn''': '''0''', } _lowercase = requests.get("""https://www.google.com/search""" ,params=_A ,headers=_A ) _lowercase = BeautifulSoup(html.text ,"""html.parser""" ) _lowercase = ''''''.join( re.findall(r"""AF_initDataCallback\(([^<]+)\);""" ,str(soup.select("""script""" ) ) ) ) _lowercase = json.dumps(_A ) _lowercase = json.loads(_A ) _lowercase = re.findall( r"""\[\"GRID_STATE0\",null,\[\[1,\[0,\".*?\",(.*),\"All\",""" ,_A ,) if not matched_google_image_data: return 0 _lowercase = re.sub( r"""\[\"(https\:\/\/encrypted-tbn0\.gstatic\.com\/images\?.*?)\",\d+,\d+\]""" ,"""""" ,str(_A ) ,) _lowercase = re.findall( r"""(?:\'|,),\[\"(https:|http.*?)\",\d+,\d+\]""" ,_A ,) for index, fixed_full_res_image in enumerate(_A ): if index >= max_images: return index _lowercase = bytes(_A ,"""ascii""" ).decode( """unicode-escape""" ) _lowercase = bytes(_A ,"""ascii""" ).decode( """unicode-escape""" ) _lowercase = urllib.request.build_opener() _lowercase = [ ( '''User-Agent''', '''Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36''' ''' (KHTML, like Gecko) Chrome/70.0.3538.102 Safari/537.36 Edge/18.19582''', ) ] urllib.request.install_opener(_A ) _lowercase = f'''query_{query.replace(" " ,"_" )}''' if not os.path.exists(_A ): os.makedirs(_A ) urllib.request.urlretrieve( # noqa: S310 _A ,f'''{path_name}/original_size_img_{index}.jpg''' ) return index if __name__ == "__main__": try: A_: Union[str, Any] = download_images_from_google_query(sys.argv[1]) print(F'''{image_count} images were downloaded to disk.''') except IndexError: print('Please provide a search term.') raise
398
import pyarrow.parquet as pq import pytest from datasets import Audio, Dataset, DatasetDict, Features, NamedSplit, Sequence, Value, config from datasets.features.image import Image from datasets.io.parquet import ParquetDatasetReader, ParquetDatasetWriter, get_writer_batch_size from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases def __lowercase ( snake_case, snake_case ): """simple docstring""" assert isinstance(snake_case, snake_case ) assert dataset.num_rows == 4 assert dataset.num_columns == 3 assert dataset.column_names == ["col_1", "col_2", "col_3"] for feature, expected_dtype in expected_features.items(): assert dataset.features[feature].dtype == expected_dtype @pytest.mark.parametrize('''keep_in_memory''', [False, True] ) def __lowercase ( snake_case, snake_case, snake_case ): """simple docstring""" __magic_name__ :Tuple = tmp_path / '''cache''' __magic_name__ :int = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''} with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase(): __magic_name__ :Optional[Any] = ParquetDatasetReader(snake_case, cache_dir=snake_case, keep_in_memory=snake_case ).read() _check_parquet_dataset(snake_case, snake_case ) @pytest.mark.parametrize( '''features''', [ None, {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}, {'''col_1''': '''string''', '''col_2''': '''string''', '''col_3''': '''string'''}, {'''col_1''': '''int32''', '''col_2''': '''int32''', '''col_3''': '''int32'''}, {'''col_1''': '''float32''', '''col_2''': '''float32''', '''col_3''': '''float32'''}, ], ) def __lowercase ( snake_case, snake_case, snake_case ): """simple docstring""" __magic_name__ :List[str] = tmp_path / '''cache''' __magic_name__ :int = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''} __magic_name__ :Tuple = features.copy() if features else default_expected_features __magic_name__ :Union[str, Any] = ( Features({feature: Value(snake_case ) for feature, dtype in features.items()} ) if features is not None else None ) __magic_name__ :int = ParquetDatasetReader(snake_case, features=snake_case, cache_dir=snake_case ).read() _check_parquet_dataset(snake_case, snake_case ) @pytest.mark.parametrize('''split''', [None, NamedSplit('''train''' ), '''train''', '''test'''] ) def __lowercase ( snake_case, snake_case, snake_case ): """simple docstring""" __magic_name__ :str = tmp_path / '''cache''' __magic_name__ :List[Any] = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''} __magic_name__ :int = ParquetDatasetReader(snake_case, cache_dir=snake_case, split=snake_case ).read() _check_parquet_dataset(snake_case, snake_case ) assert dataset.split == split if split else "train" @pytest.mark.parametrize('''path_type''', [str, list] ) def __lowercase ( snake_case, snake_case, snake_case ): """simple docstring""" if issubclass(snake_case, snake_case ): __magic_name__ :Union[str, Any] = parquet_path elif issubclass(snake_case, snake_case ): __magic_name__ :Union[str, Any] = [parquet_path] __magic_name__ :Optional[int] = tmp_path / '''cache''' __magic_name__ :Optional[int] = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''} __magic_name__ :str = ParquetDatasetReader(snake_case, cache_dir=snake_case ).read() _check_parquet_dataset(snake_case, snake_case ) def __lowercase ( snake_case, snake_case, snake_case=("train",) ): """simple docstring""" assert isinstance(snake_case, snake_case ) for split in splits: __magic_name__ :Optional[Any] = dataset_dict[split] assert dataset.num_rows == 4 assert dataset.num_columns == 3 assert dataset.column_names == ["col_1", "col_2", "col_3"] for feature, expected_dtype in expected_features.items(): assert dataset.features[feature].dtype == expected_dtype @pytest.mark.parametrize('''keep_in_memory''', [False, True] ) def __lowercase ( snake_case, snake_case, snake_case ): """simple docstring""" __magic_name__ :Any = tmp_path / '''cache''' __magic_name__ :Optional[int] = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''} with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase(): __magic_name__ :Tuple = ParquetDatasetReader( {'''train''': parquet_path}, cache_dir=snake_case, keep_in_memory=snake_case ).read() _check_parquet_datasetdict(snake_case, snake_case ) @pytest.mark.parametrize( '''features''', [ None, {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}, {'''col_1''': '''string''', '''col_2''': '''string''', '''col_3''': '''string'''}, {'''col_1''': '''int32''', '''col_2''': '''int32''', '''col_3''': '''int32'''}, {'''col_1''': '''float32''', '''col_2''': '''float32''', '''col_3''': '''float32'''}, ], ) def __lowercase ( snake_case, snake_case, snake_case ): """simple docstring""" __magic_name__ :Optional[Any] = tmp_path / '''cache''' __magic_name__ :Dict = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''} __magic_name__ :int = features.copy() if features else default_expected_features __magic_name__ :List[Any] = ( Features({feature: Value(snake_case ) for feature, dtype in features.items()} ) if features is not None else None ) __magic_name__ :Optional[int] = ParquetDatasetReader({'''train''': parquet_path}, features=snake_case, cache_dir=snake_case ).read() _check_parquet_datasetdict(snake_case, snake_case ) @pytest.mark.parametrize('''split''', [None, NamedSplit('''train''' ), '''train''', '''test'''] ) def __lowercase ( snake_case, snake_case, snake_case ): """simple docstring""" if split: __magic_name__ :Dict = {split: parquet_path} else: __magic_name__ :Optional[int] = '''train''' __magic_name__ :Dict = {'''train''': parquet_path, '''test''': parquet_path} __magic_name__ :List[Any] = tmp_path / '''cache''' __magic_name__ :Optional[Any] = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''} __magic_name__ :Optional[Any] = ParquetDatasetReader(snake_case, cache_dir=snake_case ).read() _check_parquet_datasetdict(snake_case, snake_case, splits=list(path.keys() ) ) assert all(dataset[split].split == split for split in path.keys() ) def __lowercase ( snake_case, snake_case ): """simple docstring""" __magic_name__ :str = ParquetDatasetWriter(snake_case, tmp_path / '''foo.parquet''' ) assert writer.write() > 0 __magic_name__ :List[Any] = pq.ParquetFile(tmp_path / '''foo.parquet''' ) __magic_name__ :List[Any] = pf.read() assert dataset.data.table == output_table def __lowercase ( snake_case, snake_case ): """simple docstring""" __magic_name__ :List[str] = str(shared_datadir / '''test_image_rgb.jpg''' ) __magic_name__ :Tuple = {'''image''': [image_path]} __magic_name__ :List[Any] = Features({'''image''': Image()} ) __magic_name__ :Tuple = Dataset.from_dict(snake_case, features=snake_case ) __magic_name__ :Union[str, Any] = ParquetDatasetWriter(snake_case, tmp_path / '''foo.parquet''' ) assert writer.write() > 0 __magic_name__ :List[str] = Dataset.from_parquet(str(tmp_path / '''foo.parquet''' ) ) assert dataset.features == reloaded_dataset.features __magic_name__ :List[str] = ParquetDatasetReader(str(tmp_path / '''foo.parquet''' ), streaming=snake_case ).read() assert dataset.features == reloaded_iterable_dataset.features @pytest.mark.parametrize( '''feature, expected''', [ (Features({'''foo''': Value('''int32''' )} ), None), (Features({'''image''': Image(), '''foo''': Value('''int32''' )} ), config.PARQUET_ROW_GROUP_SIZE_FOR_IMAGE_DATASETS), (Features({'''nested''': Sequence(Audio() )} ), config.PARQUET_ROW_GROUP_SIZE_FOR_AUDIO_DATASETS), ], ) def __lowercase ( snake_case, snake_case ): """simple docstring""" assert get_writer_batch_size(snake_case ) == expected
0
0
def __A ( _A ): """simple docstring""" if not grid or not grid[0]: raise TypeError("The grid does not contain the appropriate information" ) for cell_n in range(1 , len(grid[0] ) ): grid[0][cell_n] += grid[0][cell_n - 1] __a = grid[0] for row_n in range(1 , len(_A ) ): __a = grid[row_n] __a = fill_row(_A , _A ) __a = grid[row_n] return grid[-1][-1] def __A ( _A , _A ): """simple docstring""" current_row[0] += row_above[0] for cell_n in range(1 , len(_A ) ): current_row[cell_n] += min(current_row[cell_n - 1] , row_above[cell_n] ) return current_row if __name__ == "__main__": import doctest doctest.testmod()
197
def __lowercase ( snake_case ): """simple docstring""" if not isinstance(snake_case, snake_case ): raise ValueError('''multiplicative_persistence() only accepts integral values''' ) if num < 0: raise ValueError('''multiplicative_persistence() does not accept negative values''' ) __magic_name__ :str = 0 __magic_name__ :Dict = str(snake_case ) while len(snake_case ) != 1: __magic_name__ :Optional[Any] = [int(snake_case ) for i in num_string] __magic_name__ :Dict = 1 for i in range(0, len(snake_case ) ): total *= numbers[i] __magic_name__ :int = str(snake_case ) steps += 1 return steps def __lowercase ( snake_case ): """simple docstring""" if not isinstance(snake_case, snake_case ): raise ValueError('''additive_persistence() only accepts integral values''' ) if num < 0: raise ValueError('''additive_persistence() does not accept negative values''' ) __magic_name__ :str = 0 __magic_name__ :Union[str, Any] = str(snake_case ) while len(snake_case ) != 1: __magic_name__ :str = [int(snake_case ) for i in num_string] __magic_name__ :Optional[int] = 0 for i in range(0, len(snake_case ) ): total += numbers[i] __magic_name__ :int = str(snake_case ) steps += 1 return steps if __name__ == "__main__": import doctest doctest.testmod()
0
0
from __future__ import annotations import copy import inspect import json import math import os import tempfile import unittest from importlib import import_module import numpy as np from transformers import ViTMAEConfig from transformers.file_utils import cached_property, is_tf_available, is_vision_available from transformers.testing_utils import require_tf, require_vision, slow from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import TFViTMAEForPreTraining, TFViTMAEModel if is_vision_available(): from PIL import Image from transformers import ViTImageProcessor class A__ : def __init__( self , __magic_name__ , __magic_name__=1_3 , __magic_name__=3_0 , __magic_name__=2 , __magic_name__=3 , __magic_name__=True , __magic_name__=True , __magic_name__=3_2 , __magic_name__=2 , __magic_name__=4 , __magic_name__=3_7 , __magic_name__="gelu" , __magic_name__=0.1 , __magic_name__=0.1 , __magic_name__=1_0 , __magic_name__=0.02 , __magic_name__=3 , __magic_name__=0.6 , __magic_name__=None , ): lowerCamelCase : Tuple = parent lowerCamelCase : List[str] = batch_size lowerCamelCase : Any = image_size lowerCamelCase : List[str] = patch_size lowerCamelCase : Optional[int] = num_channels lowerCamelCase : int = is_training lowerCamelCase : Tuple = use_labels lowerCamelCase : List[Any] = hidden_size lowerCamelCase : Dict = num_hidden_layers lowerCamelCase : Union[str, Any] = num_attention_heads lowerCamelCase : Any = intermediate_size lowerCamelCase : List[Any] = hidden_act lowerCamelCase : Tuple = hidden_dropout_prob lowerCamelCase : str = attention_probs_dropout_prob lowerCamelCase : int = type_sequence_label_size lowerCamelCase : Dict = initializer_range lowerCamelCase : Optional[Any] = mask_ratio lowerCamelCase : Any = scope # in ViTMAE, the expected sequence length = (num_patches + 1) * (1 - config.mask_ratio), rounded above # (we add 1 for the [CLS] token) lowerCamelCase : int = (image_size // patch_size) ** 2 lowerCamelCase : List[Any] = int(math.ceil((1 - mask_ratio) * (num_patches + 1) ) ) def UpperCamelCase__ ( self ): lowerCamelCase : List[Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) lowerCamelCase : List[str] = None if self.use_labels: lowerCamelCase : Tuple = ids_tensor([self.batch_size] , self.type_sequence_label_size ) lowerCamelCase : Dict = self.get_config() return config, pixel_values, labels def UpperCamelCase__ ( self ): return ViTMAEConfig( image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , decoder_hidden_size=self.hidden_size , decoder_num_hidden_layers=self.num_hidden_layers , decoder_num_attention_heads=self.num_attention_heads , decoder_intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=__lowerCAmelCase , initializer_range=self.initializer_range , mask_ratio=self.mask_ratio , ) def UpperCamelCase__ ( self , __magic_name__ , __magic_name__ , __magic_name__ ): lowerCamelCase : Union[str, Any] = TFViTMAEModel(config=__lowerCAmelCase ) lowerCamelCase : Union[str, Any] = model(__lowerCAmelCase , training=__lowerCAmelCase ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def UpperCamelCase__ ( self , __magic_name__ , __magic_name__ , __magic_name__ ): lowerCamelCase : Any = TFViTMAEForPreTraining(__lowerCAmelCase ) lowerCamelCase : List[str] = model(__lowerCAmelCase , training=__lowerCAmelCase ) # expected sequence length = num_patches lowerCamelCase : List[str] = (self.image_size // self.patch_size) ** 2 lowerCamelCase : str = self.patch_size**2 * self.num_channels self.parent.assertEqual(result.logits.shape , (self.batch_size, num_patches, expected_num_channels) ) # test greyscale images lowerCamelCase : Optional[Any] = 1 lowerCamelCase : Union[str, Any] = TFViTMAEForPreTraining(__lowerCAmelCase ) lowerCamelCase : str = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] ) lowerCamelCase : Union[str, Any] = model(__lowerCAmelCase , training=__lowerCAmelCase ) lowerCamelCase : List[str] = self.patch_size**2 self.parent.assertEqual(result.logits.shape , (self.batch_size, num_patches, expected_num_channels) ) def UpperCamelCase__ ( self ): lowerCamelCase : List[str] = self.prepare_config_and_inputs() (lowerCamelCase) : Tuple = config_and_inputs lowerCamelCase : Optional[int] = {'''pixel_values''': pixel_values} return config, inputs_dict @require_tf class A__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , unittest.TestCase): _UpperCAmelCase : Tuple = (TFViTMAEModel, TFViTMAEForPreTraining) if is_tf_available() else () _UpperCAmelCase : Optional[Any] = {"""feature-extraction""": TFViTMAEModel} if is_tf_available() else {} _UpperCAmelCase : Dict = False _UpperCAmelCase : Union[str, Any] = False _UpperCAmelCase : Union[str, Any] = False _UpperCAmelCase : Tuple = False def UpperCamelCase__ ( self ): lowerCamelCase : List[str] = TFViTMAEModelTester(self ) lowerCamelCase : Optional[Any] = ConfigTester(self , config_class=__lowerCAmelCase , has_text_modality=__lowerCAmelCase , hidden_size=3_7 ) def UpperCamelCase__ ( self ): self.config_tester.run_common_tests() @unittest.skip(reason="""ViTMAE does not use inputs_embeds""" ) def UpperCamelCase__ ( self ): pass def UpperCamelCase__ ( self ): lowerCamelCase : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: lowerCamelCase : Dict = model_class(__lowerCAmelCase ) self.assertIsInstance(model.get_input_embeddings() , (tf.keras.layers.Layer) ) lowerCamelCase : Dict = model.get_output_embeddings() self.assertTrue(x is None or isinstance(__lowerCAmelCase , tf.keras.layers.Layer ) ) def UpperCamelCase__ ( self ): lowerCamelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: lowerCamelCase : Optional[int] = model_class(__lowerCAmelCase ) lowerCamelCase : Dict = inspect.signature(model.call ) # signature.parameters is an OrderedDict => so arg_names order is deterministic lowerCamelCase : Optional[Any] = [*signature.parameters.keys()] lowerCamelCase : Union[str, Any] = ['''pixel_values'''] self.assertListEqual(arg_names[:1] , __lowerCAmelCase ) def UpperCamelCase__ ( self ): lowerCamelCase : Any = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*__lowerCAmelCase ) def UpperCamelCase__ ( self ): lowerCamelCase : List[str] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_pretraining(*__lowerCAmelCase ) def UpperCamelCase__ ( self ): np.random.seed(2 ) lowerCamelCase : List[Any] = self.model_tester.prepare_config_and_inputs_for_common() lowerCamelCase : Any = int((config.image_size // config.patch_size) ** 2 ) lowerCamelCase : str = np.random.uniform(size=(self.model_tester.batch_size, num_patches) ) for model_class in self.all_model_classes: lowerCamelCase : int = model_class(__lowerCAmelCase ) lowerCamelCase : str = self._prepare_for_class(__lowerCAmelCase , __lowerCAmelCase ) lowerCamelCase : str = model(__lowerCAmelCase , noise=__lowerCAmelCase ) lowerCamelCase : str = copy.deepcopy(self._prepare_for_class(__lowerCAmelCase , __lowerCAmelCase ) ) lowerCamelCase : Optional[Any] = model(**__lowerCAmelCase , noise=__lowerCAmelCase ) lowerCamelCase : Any = outputs_dict[0].numpy() lowerCamelCase : Any = outputs_keywords[0].numpy() self.assertLess(np.sum(np.abs(output_dict - output_keywords ) ) , 1e-6 ) def UpperCamelCase__ ( self ): np.random.seed(2 ) lowerCamelCase : Any = self.model_tester.prepare_config_and_inputs_for_common() lowerCamelCase : str = int((config.image_size // config.patch_size) ** 2 ) lowerCamelCase : Optional[Any] = np.random.uniform(size=(self.model_tester.batch_size, num_patches) ) def prepare_numpy_arrays(__magic_name__ ): lowerCamelCase : Dict = {} for k, v in inputs_dict.items(): if tf.is_tensor(__lowerCAmelCase ): lowerCamelCase : Optional[int] = v.numpy() else: lowerCamelCase : Union[str, Any] = np.array(__lowerCAmelCase ) return inputs_np_dict for model_class in self.all_model_classes: lowerCamelCase : Union[str, Any] = model_class(__lowerCAmelCase ) lowerCamelCase : Optional[Any] = self._prepare_for_class(__lowerCAmelCase , __lowerCAmelCase ) lowerCamelCase : Any = prepare_numpy_arrays(__lowerCAmelCase ) lowerCamelCase : Dict = model(__lowerCAmelCase , noise=__lowerCAmelCase ) lowerCamelCase : Dict = model(**__lowerCAmelCase , noise=__lowerCAmelCase ) self.assert_outputs_same(__lowerCAmelCase , __lowerCAmelCase ) def UpperCamelCase__ ( self , __magic_name__ , __magic_name__ , __magic_name__ ): np.random.seed(2 ) lowerCamelCase : Tuple = int((tf_model.config.image_size // tf_model.config.patch_size) ** 2 ) lowerCamelCase : Optional[int] = np.random.uniform(size=(self.model_tester.batch_size, num_patches) ) lowerCamelCase : List[str] = tf.constant(__lowerCAmelCase ) # Add `noise` argument. # PT inputs will be prepared in `super().check_pt_tf_models()` with this added `noise` argument lowerCamelCase : List[Any] = tf_noise super().check_pt_tf_models(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) def UpperCamelCase__ ( self ): np.random.seed(2 ) lowerCamelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common() lowerCamelCase : int = { module_member for model_class in self.all_model_classes for module in (import_module(model_class.__module__ ),) for module_member_name in dir(__lowerCAmelCase ) if module_member_name.endswith("""MainLayer""" ) # This condition is required, since `modeling_tf_clip.py` has 3 classes whose names end with `MainLayer`. and module_member_name[: -len("""MainLayer""" )] == model_class.__name__[: -len("""Model""" )] for module_member in (getattr(__lowerCAmelCase , __lowerCAmelCase ),) if isinstance(__lowerCAmelCase , __lowerCAmelCase ) and tf.keras.layers.Layer in module_member.__bases__ and getattr(__lowerCAmelCase , """_keras_serializable""" , __lowerCAmelCase ) } lowerCamelCase : Dict = int((config.image_size // config.patch_size) ** 2 ) lowerCamelCase : Optional[int] = np.random.uniform(size=(self.model_tester.batch_size, num_patches) ) lowerCamelCase : Dict = tf.convert_to_tensor(__lowerCAmelCase ) inputs_dict.update({"""noise""": noise} ) for main_layer_class in tf_main_layer_classes: lowerCamelCase : str = main_layer_class(__lowerCAmelCase ) lowerCamelCase : Optional[Any] = { name: tf.keras.Input(tensor.shape[1:] , dtype=tensor.dtype ) for name, tensor in inputs_dict.items() } lowerCamelCase : List[str] = tf.keras.Model(__lowerCAmelCase , outputs=main_layer(__lowerCAmelCase ) ) lowerCamelCase : Tuple = model(__lowerCAmelCase ) with tempfile.TemporaryDirectory() as tmpdirname: lowerCamelCase : List[Any] = os.path.join(__lowerCAmelCase , """keras_model.h5""" ) model.save(__lowerCAmelCase ) lowerCamelCase : Dict = tf.keras.models.load_model( __lowerCAmelCase , custom_objects={main_layer_class.__name__: main_layer_class} ) assert isinstance(__lowerCAmelCase , tf.keras.Model ) lowerCamelCase : int = model(__lowerCAmelCase ) self.assert_outputs_same(__lowerCAmelCase , __lowerCAmelCase ) @slow def UpperCamelCase__ ( self ): np.random.seed(2 ) lowerCamelCase : List[str] = self.model_tester.prepare_config_and_inputs_for_common() lowerCamelCase : Dict = int((config.image_size // config.patch_size) ** 2 ) lowerCamelCase : List[Any] = np.random.uniform(size=(self.model_tester.batch_size, num_patches) ) for model_class in self.all_model_classes: lowerCamelCase : Tuple = model_class(__lowerCAmelCase ) lowerCamelCase : str = self._prepare_for_class(__lowerCAmelCase , __lowerCAmelCase ) lowerCamelCase : List[Any] = model(__lowerCAmelCase , noise=__lowerCAmelCase ) if model_class.__name__ == "TFViTMAEModel": lowerCamelCase : Dict = outputs.last_hidden_state.numpy() lowerCamelCase : Any = 0 else: lowerCamelCase : Dict = outputs.logits.numpy() lowerCamelCase : Union[str, Any] = 0 with tempfile.TemporaryDirectory() as tmpdirname: model.save_pretrained(__lowerCAmelCase , saved_model=__lowerCAmelCase ) lowerCamelCase : List[str] = model_class.from_pretrained(__lowerCAmelCase ) lowerCamelCase : int = model(__lowerCAmelCase , noise=__lowerCAmelCase ) if model_class.__name__ == "TFViTMAEModel": lowerCamelCase : Optional[int] = after_outputs['''last_hidden_state'''].numpy() lowerCamelCase : str = 0 else: lowerCamelCase : str = after_outputs['''logits'''].numpy() lowerCamelCase : Optional[Any] = 0 lowerCamelCase : int = np.amax(np.abs(out_a - out_a ) ) self.assertLessEqual(__lowerCAmelCase , 1e-5 ) def UpperCamelCase__ ( self ): np.random.seed(2 ) lowerCamelCase : Dict = self.model_tester.prepare_config_and_inputs_for_common() lowerCamelCase : List[str] = int((config.image_size // config.patch_size) ** 2 ) lowerCamelCase : List[str] = np.random.uniform(size=(self.model_tester.batch_size, num_patches) ) for model_class in self.all_model_classes: lowerCamelCase : Union[str, Any] = model_class(__lowerCAmelCase ) lowerCamelCase : Union[str, Any] = self._prepare_for_class(__lowerCAmelCase , __lowerCAmelCase ) lowerCamelCase : Tuple = model(__lowerCAmelCase , noise=__lowerCAmelCase ) lowerCamelCase : Tuple = model.get_config() # make sure that returned config is jsonifiable, which is required by keras json.dumps(__lowerCAmelCase ) lowerCamelCase : List[Any] = model_class.from_config(model.get_config() ) # make sure it also accepts a normal config lowerCamelCase : Union[str, Any] = model_class.from_config(model.config ) lowerCamelCase : Dict = new_model(__lowerCAmelCase ) # Build model new_model.set_weights(model.get_weights() ) lowerCamelCase : Any = new_model(__lowerCAmelCase , noise=__lowerCAmelCase ) self.assert_outputs_same(__lowerCAmelCase , __lowerCAmelCase ) @unittest.skip( reason="""ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load to get deterministic results.""" ) def UpperCamelCase__ ( self ): pass @unittest.skip(reason="""ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load""" ) def UpperCamelCase__ ( self ): pass @slow def UpperCamelCase__ ( self ): lowerCamelCase : Any = TFViTMAEModel.from_pretrained("""google/vit-base-patch16-224""" ) self.assertIsNotNone(__lowerCAmelCase ) def _a ( ): lowerCamelCase : Tuple = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" ) return image @require_tf @require_vision class A__ ( unittest.TestCase): @cached_property def UpperCamelCase__ ( self ): return ViTImageProcessor.from_pretrained("""facebook/vit-mae-base""" ) if is_vision_available() else None @slow def UpperCamelCase__ ( self ): np.random.seed(2 ) lowerCamelCase : str = TFViTMAEForPreTraining.from_pretrained("""facebook/vit-mae-base""" ) lowerCamelCase : Tuple = self.default_image_processor lowerCamelCase : Dict = prepare_img() lowerCamelCase : Dict = image_processor(images=__lowerCAmelCase , return_tensors="""tf""" ) # prepare a noise vector that will be also used for testing the TF model # (this way we can ensure that the PT and TF models operate on the same inputs) lowerCamelCase : Tuple = ViTMAEConfig() lowerCamelCase : Union[str, Any] = int((vit_mae_config.image_size // vit_mae_config.patch_size) ** 2 ) lowerCamelCase : List[str] = np.random.uniform(size=(1, num_patches) ) # forward pass lowerCamelCase : str = model(**__lowerCAmelCase , noise=__lowerCAmelCase ) # verify the logits lowerCamelCase : List[Any] = tf.convert_to_tensor([1, 1_9_6, 7_6_8] ) self.assertEqual(outputs.logits.shape , __lowerCAmelCase ) lowerCamelCase : int = tf.convert_to_tensor( [[-0.0_548, -1.7_023, -0.9_325], [0.3_721, -0.5_670, -0.2_233], [0.8_235, -1.3_878, -0.3_524]] ) tf.debugging.assert_near(outputs.logits[0, :3, :3] , __lowerCAmelCase , atol=1e-4 )
681
import math import os import re import sys import unittest from pathlib import Path from typing import Tuple from unittest.mock import patch from parameterized import parameterized from transformers.testing_utils import ( CaptureStderr, ExtendSysPath, TestCasePlus, execute_subprocess_async, get_gpu_count, get_torch_dist_unique_port, require_apex, require_bitsandbytes, require_fairscale, require_torch, require_torch_gpu, require_torch_multi_gpu, require_torch_non_multi_gpu, slow, ) from transformers.trainer_callback import TrainerState from transformers.trainer_utils import set_seed SCREAMING_SNAKE_CASE__ : List[Any] = os.path.abspath(os.path.dirname(__file__)) with ExtendSysPath(f"{bindir}/../../examples/pytorch/translation"): from run_translation import main # noqa set_seed(42) SCREAMING_SNAKE_CASE__ : Optional[Any] = """sshleifer/student_marian_en_ro_6_1""" SCREAMING_SNAKE_CASE__ : List[Any] = """sshleifer/tiny-mbart""" @require_torch class lowerCamelCase_ ( lowerCamelCase ): def A ( self , __lowerCAmelCase=False , __lowerCAmelCase=None , __lowerCAmelCase=True , __lowerCAmelCase=True , __lowerCAmelCase=True , __lowerCAmelCase=True , ): """simple docstring""" __magic_name__ :List[Any] = self.run_trainer( eval_steps=1 , max_len=1_2 , model_name=__lowerCAmelCase , num_train_epochs=1 , distributed=__lowerCAmelCase , extra_args_str=__lowerCAmelCase , predict_with_generate=__lowerCAmelCase , do_train=__lowerCAmelCase , do_eval=__lowerCAmelCase , do_predict=__lowerCAmelCase , ) __magic_name__ :Any = TrainerState.load_from_json(os.path.join(__lowerCAmelCase , '''trainer_state.json''' ) ).log_history if not do_eval: return __magic_name__ :Any = [log for log in logs if '''eval_loss''' in log.keys()] __magic_name__ :str = eval_metrics[0] if predict_with_generate: assert "eval_bleu" in first_step_stats __magic_name__ :Tuple = eval_metrics[-1] assert isinstance(last_step_stats['''eval_bleu'''] , __lowerCAmelCase ) assert not math.isnan(float(last_step_stats['''eval_loss'''] ) ), "eval_loss must not be `nan`" @require_torch_non_multi_gpu def A ( self ): """simple docstring""" self.run_seqaseq_quick() @require_torch_multi_gpu def A ( self ): """simple docstring""" self.run_seqaseq_quick(distributed=__lowerCAmelCase ) @require_torch_multi_gpu def A ( self ): """simple docstring""" self.run_seqaseq_quick(distributed=__lowerCAmelCase ) @unittest.skip('''Requires an update of the env running those tests''' ) @require_torch_multi_gpu @require_fairscale def A ( self ): """simple docstring""" self.run_seqaseq_quick(distributed=__lowerCAmelCase , extra_args_str='''--sharded_ddp simple''' ) @unittest.skip('''Requires an update of the env running those tests''' ) @require_torch_multi_gpu @require_fairscale def A ( self ): """simple docstring""" self.run_seqaseq_quick(distributed=__lowerCAmelCase , extra_args_str='''--sharded_ddp simple --fp16''' ) @unittest.skip('''Requires an update of the env running those tests''' ) @require_torch_multi_gpu @require_fairscale def A ( self ): """simple docstring""" self.run_seqaseq_quick(distributed=__lowerCAmelCase , extra_args_str='''--sharded_ddp zero_dp_2''' , predict_with_generate=__lowerCAmelCase ) @unittest.skip('''Requires an update of the env running those tests''' ) @require_torch_multi_gpu @require_fairscale def A ( self ): """simple docstring""" self.run_seqaseq_quick( distributed=__lowerCAmelCase , extra_args_str='''--sharded_ddp zero_dp_2 --fp16''' , predict_with_generate=__lowerCAmelCase ) @require_apex @require_torch_gpu def A ( self ): """simple docstring""" # XXX: apex breaks the trainer if it's run twice e.g. run_seq2seq.main() from the same # program and it breaks other tests that run from the same pytest worker, therefore until this is # sorted out it must be run only in an external program, that is distributed=True in this # test and only under one or more gpus - if we want cpu will need to make a special test # # specifically to the problem traced it to self.optimizer.step() - if it's run 2nd time via # 2nd main() call it botches the future eval. # self.run_seqaseq_quick(distributed=__lowerCAmelCase , extra_args_str='''--fp16 --fp16_backend=apex''' ) # test 2nd time - was getting eval_loss': nan' # to reproduce the problem set distributed=False self.run_seqaseq_quick(distributed=__lowerCAmelCase , extra_args_str='''--fp16 --fp16_backend=apex''' ) @parameterized.expand(['''base''', '''low''', '''high''', '''mixed'''] ) @require_torch_multi_gpu def A ( self , __lowerCAmelCase ): """simple docstring""" # as each sub-test is slow-ish split into multiple sub-tests to avoid CI timeout __magic_name__ :Any = { # test with the default log_level - should be info and thus log info once '''base''': {'''extra_args_str''': '''''', '''n_matches''': 1}, # test with low log_level and log_level_replica - should be noisy on all processes # now the info string should appear twice on 2 processes '''low''': {'''extra_args_str''': '''--log_level debug --log_level_replica debug''', '''n_matches''': 2}, # test with high log_level and low log_level_replica # now the info string should appear once only on the replica '''high''': {'''extra_args_str''': '''--log_level error --log_level_replica debug''', '''n_matches''': 1}, # test with high log_level and log_level_replica - should be quiet on all processes '''mixed''': {'''extra_args_str''': '''--log_level error --log_level_replica error''', '''n_matches''': 0}, } __magic_name__ :Optional[Any] = experiments[experiment_id] __magic_name__ :List[Any] = {'''distributed''': True, '''predict_with_generate''': False, '''do_eval''': False, '''do_predict''': False} __magic_name__ :Optional[int] = '''Running training''' with CaptureStderr() as cl: self.run_seqaseq_quick(**__lowerCAmelCase , extra_args_str=data['''extra_args_str'''] ) __magic_name__ :int = len(re.findall(__lowerCAmelCase , cl.err ) ) self.assertEqual(__lowerCAmelCase , data['''n_matches'''] ) @slow def A ( self ): """simple docstring""" __magic_name__ :List[str] = self.run_trainer( eval_steps=2 , max_len=1_2_8 , model_name=__lowerCAmelCase , learning_rate=3E-4 , num_train_epochs=1_0 , distributed=__lowerCAmelCase , ) # Check metrics __magic_name__ :Optional[int] = TrainerState.load_from_json(os.path.join(__lowerCAmelCase , '''trainer_state.json''' ) ).log_history __magic_name__ :List[str] = [log for log in logs if '''eval_loss''' in log.keys()] __magic_name__ :Any = eval_metrics[0] __magic_name__ :int = eval_metrics[-1] assert first_step_stats["eval_loss"] > last_step_stats["eval_loss"], "model learned nothing" assert isinstance(last_step_stats['''eval_bleu'''] , __lowerCAmelCase ) # test if do_predict saves generations and metrics __magic_name__ :List[Any] = os.listdir(__lowerCAmelCase ) __magic_name__ :List[str] = {os.path.basename(__lowerCAmelCase ) for p in contents} assert "generated_predictions.txt" in contents assert "predict_results.json" in contents @slow @require_bitsandbytes def A ( self ): """simple docstring""" from transformers.training_args import OptimizerNames def train_and_return_metrics(__lowerCAmelCase ) -> Tuple[int, float]: __magic_name__ :str = '''--skip_memory_metrics 0''' __magic_name__ :Dict = self.run_trainer( max_len=1_2_8 , model_name=__lowerCAmelCase , learning_rate=3E-4 , num_train_epochs=1 , optim=__lowerCAmelCase , distributed=__lowerCAmelCase , extra_args_str=__lowerCAmelCase , do_eval=__lowerCAmelCase , do_predict=__lowerCAmelCase , n_gpus_to_use=1 , ) # Check metrics __magic_name__ :Optional[Any] = TrainerState.load_from_json(Path(__lowerCAmelCase , '''trainer_state.json''' ) ).log_history __magic_name__ :int = int(logs[0]['''train_mem_gpu_peaked_delta'''] / 2**2_0 ) __magic_name__ :Optional[Any] = int(logs[0]['''train_mem_gpu_alloc_delta'''] / 2**2_0 ) __magic_name__ :Any = logs[0]['''train_loss'''] return gpu_peak_mem_mb, gpu_alloc_mem_mb, loss __magic_name__ , __magic_name__ , __magic_name__ :int = train_and_return_metrics(OptimizerNames.ADAMW_TORCH.value ) __magic_name__ , __magic_name__ , __magic_name__ :Tuple = train_and_return_metrics(OptimizerNames.ADAMW_BNB.value ) __magic_name__ :Tuple = gpu_alloc_mem_orig - gpu_alloc_mem_bnb __magic_name__ :Tuple = gpu_peak_mem_orig + gpu_alloc_mem_orig __magic_name__ :List[Any] = gpu_peak_mem_bnb + gpu_alloc_mem_bnb __magic_name__ :Optional[int] = gpu_total_mem_orig - gpu_total_mem_bnb # sshleifer/student_marian_en_ro_6_1 has 54M parameter, 29M of which is `nn.Embedding` which # doesn't get quantized and remains in fp32. Therefore we only have 25M parameters quantized # in 2 bytes and the diff in optim memory usage is derived as so: # # - normal 25*8=~200MB (8 bytes per param) # - bnb 25*2= ~50MB (2 bytes per param) # # Thus we should expect ~150MB total memory saved. # # Peak memory should be the same - the total should be different by about that same margin # # After leaving a small margin to accommodate for differences between gpus let's check # that we have at least 120MB in savings __magic_name__ :Optional[Any] = 1_2_0 # uncomment the following if this test starts failing - requires py38 for a new print feature # gpu_peak_mem_diff = gpu_peak_mem_orig - gpu_peak_mem_bnb # print(f"{gpu_alloc_mem_orig=}MB {gpu_peak_mem_orig=}MB {gpu_alloc_mem_orig+gpu_peak_mem_orig=}MB") # print(f" {gpu_alloc_mem_bnb=}MB {gpu_peak_mem_bnb=}MB {gpu_alloc_mem_bnb+gpu_peak_mem_bnb=}MB") # print(f"{gpu_alloc_mem_diff=}MB") # print(f"{gpu_peak_mem_diff=}MB") # print(f"{gpu_total_mem_orig=}MB, {gpu_total_mem_bnb=}MB") # print(f"{gpu_total_mem_diff=}MB, {gpu_total_mem_diff=}MB") self.assertGreater( __lowerCAmelCase , __lowerCAmelCase , '''should use ~150MB less alloc gpu memory with BNB, compared to without it for this model but got''' F''' a difference of {gpu_alloc_mem_diff}MB, with gpu_alloc_mem_orig={gpu_alloc_mem_orig}MB and''' F''' gpu_alloc_mem_bnb={gpu_alloc_mem_bnb}MB''' , ) self.assertGreater( __lowerCAmelCase , __lowerCAmelCase , '''should use ~150MB less total gpu memory with BNB, compared to without it for this model but got''' F''' a difference of {gpu_total_mem_diff}MB, with gpu_total_mem_orig={gpu_total_mem_orig}MB and''' F''' gpu_total_mem_bnb={gpu_total_mem_bnb}MB''' , ) self.assertEqual( __lowerCAmelCase , __lowerCAmelCase , F'''loss should be the same, but got loss_orig={loss_orig}, loss_bnb={loss_bnb}''' ) def A ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = 3E-3 , __lowerCAmelCase = "adafactor" , __lowerCAmelCase = False , __lowerCAmelCase = None , __lowerCAmelCase = 0 , __lowerCAmelCase = True , __lowerCAmelCase = True , __lowerCAmelCase = True , __lowerCAmelCase = True , __lowerCAmelCase = None , ): """simple docstring""" __magic_name__ :int = self.test_file_dir / '''../fixtures/tests_samples/wmt_en_ro''' __magic_name__ :Dict = self.get_auto_remove_tmp_dir() __magic_name__ :Tuple = F''' --model_name_or_path {model_name} --train_file {data_dir}/train.json --validation_file {data_dir}/val.json --test_file {data_dir}/test.json --output_dir {output_dir} --overwrite_output_dir --max_train_samples 8 --max_source_length {max_len} --max_target_length {max_len} --do_train --num_train_epochs {str(__lowerCAmelCase )} --per_device_train_batch_size 4 --learning_rate {learning_rate} --warmup_steps 8 --logging_steps 0 --logging_strategy no --save_steps {str(__lowerCAmelCase )} --group_by_length --label_smoothing_factor 0.1 --target_lang ro_RO --source_lang en_XX '''.split() __magic_name__ :str = F''' --do_eval --per_device_eval_batch_size 4 --max_eval_samples 8 --val_max_target_length {max_len} --evaluation_strategy steps --eval_steps {str(__lowerCAmelCase )} '''.split() __magic_name__ :Dict = ''' --do_predict '''.split() __magic_name__ :Optional[int] = [] if do_train: args += args_train if do_eval: args += args_eval if do_predict: args += args_predict if predict_with_generate: args += "--predict_with_generate".split() if do_train: if optim == "adafactor": args += "--adafactor".split() else: args += F'''--optim {optim}'''.split() if extra_args_str is not None: args += extra_args_str.split() if distributed: if n_gpus_to_use is None: __magic_name__ :List[Any] = get_gpu_count() __magic_name__ :Tuple = get_torch_dist_unique_port() __magic_name__ :Union[str, Any] = F''' -m torch.distributed.run --nproc_per_node={n_gpus_to_use} --master_port={master_port} {self.examples_dir_str}/pytorch/translation/run_translation.py '''.split() __magic_name__ :Any = [sys.executable] + distributed_args + args # keep for quick debug # print(" ".join([f"\nPYTHONPATH={self.src_dir_str}"] +cmd)); die execute_subprocess_async(__lowerCAmelCase , env=self.get_env() ) else: __magic_name__ :List[Any] = ['''run_translation.py'''] + args with patch.object(__lowerCAmelCase , '''argv''' , __lowerCAmelCase ): main() return output_dir
0
0
import json from typing import List, Optional, Tuple from tokenizers import normalizers from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import logging from .tokenization_squeezebert import SqueezeBertTokenizer __snake_case = logging.get_logger(__name__) __snake_case = {'''vocab_file''': '''vocab.txt''', '''tokenizer_file''': '''tokenizer.json'''} __snake_case = { '''vocab_file''': { '''squeezebert/squeezebert-uncased''': ( '''https://huggingface.co/squeezebert/squeezebert-uncased/resolve/main/vocab.txt''' ), '''squeezebert/squeezebert-mnli''': '''https://huggingface.co/squeezebert/squeezebert-mnli/resolve/main/vocab.txt''', '''squeezebert/squeezebert-mnli-headless''': ( '''https://huggingface.co/squeezebert/squeezebert-mnli-headless/resolve/main/vocab.txt''' ), }, '''tokenizer_file''': { '''squeezebert/squeezebert-uncased''': ( '''https://huggingface.co/squeezebert/squeezebert-uncased/resolve/main/tokenizer.json''' ), '''squeezebert/squeezebert-mnli''': ( '''https://huggingface.co/squeezebert/squeezebert-mnli/resolve/main/tokenizer.json''' ), '''squeezebert/squeezebert-mnli-headless''': ( '''https://huggingface.co/squeezebert/squeezebert-mnli-headless/resolve/main/tokenizer.json''' ), }, } __snake_case = { '''squeezebert/squeezebert-uncased''': 5_1_2, '''squeezebert/squeezebert-mnli''': 5_1_2, '''squeezebert/squeezebert-mnli-headless''': 5_1_2, } __snake_case = { '''squeezebert/squeezebert-uncased''': {'''do_lower_case''': True}, '''squeezebert/squeezebert-mnli''': {'''do_lower_case''': True}, '''squeezebert/squeezebert-mnli-headless''': {'''do_lower_case''': True}, } class __lowerCamelCase (_a ): _lowercase = VOCAB_FILES_NAMES _lowercase = PRETRAINED_VOCAB_FILES_MAP _lowercase = PRETRAINED_INIT_CONFIGURATION _lowercase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES _lowercase = SqueezeBertTokenizer def __init__( self: Dict,A_: int=None,A_: int=None,A_: List[Any]=True,A_: Any="[UNK]",A_: List[Any]="[SEP]",A_: int="[PAD]",A_: Optional[Any]="[CLS]",A_: List[str]="[MASK]",A_: int=True,A_: Dict=None,**A_: int,): '''simple docstring''' super().__init__( A_,tokenizer_file=A_,do_lower_case=A_,unk_token=A_,sep_token=A_,pad_token=A_,cls_token=A_,mask_token=A_,tokenize_chinese_chars=A_,strip_accents=A_,**A_,) __UpperCamelCase = json.loads(self.backend_tokenizer.normalizer.__getstate__() ) if ( normalizer_state.get('lowercase',A_ ) != do_lower_case or normalizer_state.get('strip_accents',A_ ) != strip_accents or normalizer_state.get('handle_chinese_chars',A_ ) != tokenize_chinese_chars ): __UpperCamelCase = getattr(A_,normalizer_state.pop('type' ) ) __UpperCamelCase = do_lower_case __UpperCamelCase = strip_accents __UpperCamelCase = tokenize_chinese_chars __UpperCamelCase = normalizer_class(**A_ ) __UpperCamelCase = do_lower_case def snake_case_ ( self: Dict,A_: Tuple,A_: Union[str, Any]=None ): '''simple docstring''' __UpperCamelCase = [self.cls_token_id] + token_ids_a + [self.sep_token_id] if token_ids_a: output += token_ids_a + [self.sep_token_id] return output def snake_case_ ( self: Any,A_: List[int],A_: Optional[List[int]] = None ): '''simple docstring''' __UpperCamelCase = [self.sep_token_id] __UpperCamelCase = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] def snake_case_ ( self: Union[str, Any],A_: str,A_: Optional[str] = None ): '''simple docstring''' __UpperCamelCase = self._tokenizer.model.save(A_,name=A_ ) return tuple(A_ )
1
import gc import random import unittest import numpy as np import torch from PIL import Image from transformers import XLMRobertaTokenizerFast from diffusers import DDIMScheduler, KandinskyInpaintPipeline, KandinskyPriorPipeline, UNetaDConditionModel, VQModel from diffusers.pipelines.kandinsky.text_encoder import MCLIPConfig, MultilingualCLIP from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference enable_full_determinism() class __lowerCamelCase (_a , unittest.TestCase ): _lowercase = KandinskyInpaintPipeline _lowercase = ["""prompt""", """image_embeds""", """negative_image_embeds""", """image""", """mask_image"""] _lowercase = [ """prompt""", """negative_prompt""", """image_embeds""", """negative_image_embeds""", """image""", """mask_image""", ] _lowercase = [ """generator""", """height""", """width""", """latents""", """guidance_scale""", """negative_prompt""", """num_inference_steps""", """return_dict""", """guidance_scale""", """num_images_per_prompt""", """output_type""", """return_dict""", ] _lowercase = False @property def snake_case_ ( self: int ): '''simple docstring''' return 32 @property def snake_case_ ( self: str ): '''simple docstring''' return 32 @property def snake_case_ ( self: Tuple ): '''simple docstring''' return self.time_input_dim @property def snake_case_ ( self: Union[str, Any] ): '''simple docstring''' return self.time_input_dim * 4 @property def snake_case_ ( self: Optional[int] ): '''simple docstring''' return 100 @property def snake_case_ ( self: str ): '''simple docstring''' __UpperCamelCase = XLMRobertaTokenizerFast.from_pretrained('YiYiXu/tiny-random-mclip-base' ) return tokenizer @property def snake_case_ ( self: Any ): '''simple docstring''' torch.manual_seed(0 ) __UpperCamelCase = MCLIPConfig( numDims=self.cross_attention_dim,transformerDimensions=self.text_embedder_hidden_size,hidden_size=self.text_embedder_hidden_size,intermediate_size=37,num_attention_heads=4,num_hidden_layers=5,vocab_size=1005,) __UpperCamelCase = MultilingualCLIP(A_ ) __UpperCamelCase = text_encoder.eval() return text_encoder @property def snake_case_ ( self: Any ): '''simple docstring''' torch.manual_seed(0 ) __UpperCamelCase = { 'in_channels': 9, # Out channels is double in channels because predicts mean and variance 'out_channels': 8, 'addition_embed_type': 'text_image', 'down_block_types': ('ResnetDownsampleBlock2D', 'SimpleCrossAttnDownBlock2D'), 'up_block_types': ('SimpleCrossAttnUpBlock2D', 'ResnetUpsampleBlock2D'), 'mid_block_type': 'UNetMidBlock2DSimpleCrossAttn', 'block_out_channels': (self.block_out_channels_a, self.block_out_channels_a * 2), 'layers_per_block': 1, 'encoder_hid_dim': self.text_embedder_hidden_size, 'encoder_hid_dim_type': 'text_image_proj', 'cross_attention_dim': self.cross_attention_dim, 'attention_head_dim': 4, 'resnet_time_scale_shift': 'scale_shift', 'class_embed_type': None, } __UpperCamelCase = UNetaDConditionModel(**A_ ) return model @property def snake_case_ ( self: str ): '''simple docstring''' return { "block_out_channels": [32, 64], "down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"], "in_channels": 3, "latent_channels": 4, "layers_per_block": 1, "norm_num_groups": 8, "norm_type": "spatial", "num_vq_embeddings": 12, "out_channels": 3, "up_block_types": [ "AttnUpDecoderBlock2D", "UpDecoderBlock2D", ], "vq_embed_dim": 4, } @property def snake_case_ ( self: str ): '''simple docstring''' torch.manual_seed(0 ) __UpperCamelCase = VQModel(**self.dummy_movq_kwargs ) return model def snake_case_ ( self: Dict ): '''simple docstring''' __UpperCamelCase = self.dummy_text_encoder __UpperCamelCase = self.dummy_tokenizer __UpperCamelCase = self.dummy_unet __UpperCamelCase = self.dummy_movq __UpperCamelCase = DDIMScheduler( num_train_timesteps=1000,beta_schedule='linear',beta_start=0.0_0_0_8_5,beta_end=0.0_1_2,clip_sample=A_,set_alpha_to_one=A_,steps_offset=1,prediction_type='epsilon',thresholding=A_,) __UpperCamelCase = { 'text_encoder': text_encoder, 'tokenizer': tokenizer, 'unet': unet, 'scheduler': scheduler, 'movq': movq, } return components def snake_case_ ( self: Tuple,A_: Optional[int],A_: Dict=0 ): '''simple docstring''' __UpperCamelCase = floats_tensor((1, self.cross_attention_dim),rng=random.Random(A_ ) ).to(A_ ) __UpperCamelCase = floats_tensor((1, self.cross_attention_dim),rng=random.Random(seed + 1 ) ).to(A_ ) # create init_image __UpperCamelCase = floats_tensor((1, 3, 64, 64),rng=random.Random(A_ ) ).to(A_ ) __UpperCamelCase = image.cpu().permute(0,2,3,1 )[0] __UpperCamelCase = Image.fromarray(np.uinta(A_ ) ).convert('RGB' ).resize((256, 256) ) # create mask __UpperCamelCase = np.ones((64, 64),dtype=np.floataa ) __UpperCamelCase = 0 if str(A_ ).startswith('mps' ): __UpperCamelCase = torch.manual_seed(A_ ) else: __UpperCamelCase = torch.Generator(device=A_ ).manual_seed(A_ ) __UpperCamelCase = { 'prompt': 'horse', 'image': init_image, 'mask_image': mask, 'image_embeds': image_embeds, 'negative_image_embeds': negative_image_embeds, 'generator': generator, 'height': 64, 'width': 64, 'num_inference_steps': 2, 'guidance_scale': 4.0, 'output_type': 'np', } return inputs def snake_case_ ( self: Any ): '''simple docstring''' __UpperCamelCase = 'cpu' __UpperCamelCase = self.get_dummy_components() __UpperCamelCase = self.pipeline_class(**A_ ) __UpperCamelCase = pipe.to(A_ ) pipe.set_progress_bar_config(disable=A_ ) __UpperCamelCase = pipe(**self.get_dummy_inputs(A_ ) ) __UpperCamelCase = output.images __UpperCamelCase = pipe( **self.get_dummy_inputs(A_ ),return_dict=A_,)[0] __UpperCamelCase = image[0, -3:, -3:, -1] __UpperCamelCase = image_from_tuple[0, -3:, -3:, -1] print(F'''image.shape {image.shape}''' ) assert image.shape == (1, 64, 64, 3) __UpperCamelCase = np.array( [0.8_3_2_6_9_1_9, 0.7_3_7_9_0_4_6_7, 0.2_0_9_1_8_5_8_1, 0.9_3_0_9_6_1_2, 0.5_5_1_1_7_9_1, 0.4_3_7_1_3_3_2_8, 0.5_5_1_3_3_2_1, 0.4_9_9_2_2_9_3_4, 0.5_9_4_9_7_7_8_6] ) assert ( np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 ), F''' expected_slice {expected_slice}, but got {image_slice.flatten()}''' assert ( np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2 ), F''' expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}''' def snake_case_ ( self: Optional[Any] ): '''simple docstring''' super().test_inference_batch_single_identical(expected_max_diff=3E-3 ) @slow @require_torch_gpu class __lowerCamelCase (unittest.TestCase ): def snake_case_ ( self: Tuple ): '''simple docstring''' super().tearDown() gc.collect() torch.cuda.empty_cache() def snake_case_ ( self: Any ): '''simple docstring''' __UpperCamelCase = load_numpy( 'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main' '/kandinsky/kandinsky_inpaint_cat_with_hat_fp16.npy' ) __UpperCamelCase = load_image( 'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main' '/kandinsky/cat.png' ) __UpperCamelCase = np.ones((768, 768),dtype=np.floataa ) __UpperCamelCase = 0 __UpperCamelCase = 'a hat' __UpperCamelCase = KandinskyPriorPipeline.from_pretrained( 'kandinsky-community/kandinsky-2-1-prior',torch_dtype=torch.floataa ) pipe_prior.to(A_ ) __UpperCamelCase = KandinskyInpaintPipeline.from_pretrained( 'kandinsky-community/kandinsky-2-1-inpaint',torch_dtype=torch.floataa ) __UpperCamelCase = pipeline.to(A_ ) pipeline.set_progress_bar_config(disable=A_ ) __UpperCamelCase = torch.Generator(device='cpu' ).manual_seed(0 ) __UpperCamelCase, __UpperCamelCase = pipe_prior( A_,generator=A_,num_inference_steps=5,negative_prompt='',).to_tuple() __UpperCamelCase = pipeline( A_,image=A_,mask_image=A_,image_embeds=A_,negative_image_embeds=A_,generator=A_,num_inference_steps=100,height=768,width=768,output_type='np',) __UpperCamelCase = output.images[0] assert image.shape == (768, 768, 3) assert_mean_pixel_difference(A_,A_ )
1
1
import numpy as np from nltk.translate import meteor_score import datasets from datasets.config import importlib_metadata, version __snake_case = version.parse(importlib_metadata.version('''nltk''')) if NLTK_VERSION >= version.Version('''3.6.4'''): from nltk import word_tokenize __snake_case = '''\ @inproceedings{banarjee2005, title = {{METEOR}: An Automatic Metric for {MT} Evaluation with Improved Correlation with Human Judgments}, author = {Banerjee, Satanjeev and Lavie, Alon}, booktitle = {Proceedings of the {ACL} Workshop on Intrinsic and Extrinsic Evaluation Measures for Machine Translation and/or Summarization}, month = jun, year = {2005}, address = {Ann Arbor, Michigan}, publisher = {Association for Computational Linguistics}, url = {https://www.aclweb.org/anthology/W05-0909}, pages = {65--72}, } ''' __snake_case = '''\ METEOR, an automatic metric for machine translation evaluation that is based on a generalized concept of unigram matching between the machine-produced translation and human-produced reference translations. Unigrams can be matched based on their surface forms, stemmed forms, and meanings; furthermore, METEOR can be easily extended to include more advanced matching strategies. Once all generalized unigram matches between the two strings have been found, METEOR computes a score for this matching using a combination of unigram-precision, unigram-recall, and a measure of fragmentation that is designed to directly capture how well-ordered the matched words in the machine translation are in relation to the reference. METEOR gets an R correlation value of 0.347 with human evaluation on the Arabic data and 0.331 on the Chinese data. This is shown to be an improvement on using simply unigram-precision, unigram-recall and their harmonic F1 combination. ''' __snake_case = ''' Computes METEOR score of translated segments against one or more references. Args: predictions: list of predictions to score. Each prediction should be a string with tokens separated by spaces. references: list of reference for each prediction. Each reference should be a string with tokens separated by spaces. alpha: Parameter for controlling relative weights of precision and recall. default: 0.9 beta: Parameter for controlling shape of penalty as a function of fragmentation. default: 3 gamma: Relative weight assigned to fragmentation penalty. default: 0.5 Returns: \'meteor\': meteor score. Examples: >>> meteor = datasets.load_metric(\'meteor\') >>> predictions = ["It is a guide to action which ensures that the military always obeys the commands of the party"] >>> references = ["It is a guide to action that ensures that the military will forever heed Party commands"] >>> results = meteor.compute(predictions=predictions, references=references) >>> print(round(results["meteor"], 4)) 0.6944 ''' @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class __lowerCamelCase (datasets.Metric ): def snake_case_ ( self: List[str] ): '''simple docstring''' return datasets.MetricInfo( description=_DESCRIPTION,citation=_CITATION,inputs_description=_KWARGS_DESCRIPTION,features=datasets.Features( { 'predictions': datasets.Value('string',id='sequence' ), 'references': datasets.Value('string',id='sequence' ), } ),codebase_urls=['https://github.com/nltk/nltk/blob/develop/nltk/translate/meteor_score.py'],reference_urls=[ 'https://www.nltk.org/api/nltk.translate.html#module-nltk.translate.meteor_score', 'https://en.wikipedia.org/wiki/METEOR', ],) def snake_case_ ( self: int,A_: str ): '''simple docstring''' import nltk nltk.download('wordnet' ) if NLTK_VERSION >= version.Version('3.6.5' ): nltk.download('punkt' ) if NLTK_VERSION >= version.Version('3.6.6' ): nltk.download('omw-1.4' ) def snake_case_ ( self: str,A_: Any,A_: Optional[Any],A_: Any=0.9,A_: int=3,A_: Optional[Any]=0.5 ): '''simple docstring''' if NLTK_VERSION >= version.Version('3.6.5' ): __UpperCamelCase = [ meteor_score.single_meteor_score( word_tokenize(A_ ),word_tokenize(A_ ),alpha=A_,beta=A_,gamma=A_ ) for ref, pred in zip(A_,A_ ) ] else: __UpperCamelCase = [ meteor_score.single_meteor_score(A_,A_,alpha=A_,beta=A_,gamma=A_ ) for ref, pred in zip(A_,A_ ) ] return {"meteor": np.mean(A_ )}
1
from typing import Any class __lowerCamelCase : def __init__( self: int,A_: Any ): '''simple docstring''' __UpperCamelCase = data __UpperCamelCase = None def __repr__( self: Any ): '''simple docstring''' return F'''Node({self.data})''' class __lowerCamelCase : def __init__( self: Union[str, Any] ): '''simple docstring''' __UpperCamelCase = None def __iter__( self: int ): '''simple docstring''' __UpperCamelCase = self.head while node: yield node.data __UpperCamelCase = node.next def __len__( self: List[str] ): '''simple docstring''' return sum(1 for _ in self ) def __repr__( self: Any ): '''simple docstring''' return "->".join([str(A_ ) for item in self] ) def __getitem__( self: int,A_: int ): '''simple docstring''' if not 0 <= index < len(self ): raise ValueError('list index out of range.' ) for i, node in enumerate(self ): if i == index: return node return None def __setitem__( self: int,A_: int,A_: Any ): '''simple docstring''' if not 0 <= index < len(self ): raise ValueError('list index out of range.' ) __UpperCamelCase = self.head for _ in range(A_ ): __UpperCamelCase = current.next __UpperCamelCase = data def snake_case_ ( self: Union[str, Any],A_: Any ): '''simple docstring''' self.insert_nth(len(self ),A_ ) def snake_case_ ( self: List[Any],A_: Any ): '''simple docstring''' self.insert_nth(0,A_ ) def snake_case_ ( self: Optional[Any],A_: int,A_: Any ): '''simple docstring''' if not 0 <= index <= len(self ): raise IndexError('list index out of range' ) __UpperCamelCase = Node(A_ ) if self.head is None: __UpperCamelCase = new_node elif index == 0: __UpperCamelCase = self.head # link new_node to head __UpperCamelCase = new_node else: __UpperCamelCase = self.head for _ in range(index - 1 ): __UpperCamelCase = temp.next __UpperCamelCase = temp.next __UpperCamelCase = new_node def snake_case_ ( self: str ): # print every node data '''simple docstring''' print(self ) def snake_case_ ( self: int ): '''simple docstring''' return self.delete_nth(0 ) def snake_case_ ( self: str ): # delete from tail '''simple docstring''' return self.delete_nth(len(self ) - 1 ) def snake_case_ ( self: Any,A_: int = 0 ): '''simple docstring''' if not 0 <= index <= len(self ) - 1: # test if index is valid raise IndexError('List index out of range.' ) __UpperCamelCase = self.head # default first node if index == 0: __UpperCamelCase = self.head.next else: __UpperCamelCase = self.head for _ in range(index - 1 ): __UpperCamelCase = temp.next __UpperCamelCase = temp.next __UpperCamelCase = temp.next.next return delete_node.data def snake_case_ ( self: Any ): '''simple docstring''' return self.head is None def snake_case_ ( self: Optional[int] ): '''simple docstring''' __UpperCamelCase = None __UpperCamelCase = self.head while current: # Store the current node's next node. __UpperCamelCase = current.next # Make the current node's next point backwards __UpperCamelCase = prev # Make the previous node be the current node __UpperCamelCase = current # Make the current node the next node (to progress iteration) __UpperCamelCase = next_node # Return prev in order to put the head at the end __UpperCamelCase = prev def _A ( ) -> None: """simple docstring""" __UpperCamelCase = LinkedList() assert linked_list.is_empty() is True assert str(_lowercase ) == "" try: linked_list.delete_head() raise AssertionError # This should not happen. except IndexError: assert True # This should happen. try: linked_list.delete_tail() raise AssertionError # This should not happen. except IndexError: assert True # This should happen. for i in range(10 ): assert len(_lowercase ) == i linked_list.insert_nth(_lowercase , i + 1 ) assert str(_lowercase ) == "->".join(str(_lowercase ) for i in range(1 , 11 ) ) linked_list.insert_head(0 ) linked_list.insert_tail(11 ) assert str(_lowercase ) == "->".join(str(_lowercase ) for i in range(0 , 12 ) ) assert linked_list.delete_head() == 0 assert linked_list.delete_nth(9 ) == 10 assert linked_list.delete_tail() == 11 assert len(_lowercase ) == 9 assert str(_lowercase ) == "->".join(str(_lowercase ) for i in range(1 , 10 ) ) assert all(linked_list[i] == i + 1 for i in range(0 , 9 ) ) is True for i in range(0 , 9 ): __UpperCamelCase = -i assert all(linked_list[i] == -i for i in range(0 , 9 ) ) is True linked_list.reverse() assert str(_lowercase ) == "->".join(str(_lowercase ) for i in range(-8 , 1 ) ) def _A ( ) -> None: """simple docstring""" __UpperCamelCase = [ -9, 1_00, Node(77_34_51_12 ), 'dlrow olleH', 7, 55_55, 0, -1_92.5_55_55, 'Hello, world!', 77.9, Node(10 ), None, None, 12.20, ] __UpperCamelCase = LinkedList() for i in test_input: linked_list.insert_tail(_lowercase ) # Check if it's empty or not assert linked_list.is_empty() is False assert ( str(_lowercase ) == "-9->100->Node(77345112)->dlrow olleH->7->5555->0->" "-192.55555->Hello, world!->77.9->Node(10)->None->None->12.2" ) # Delete the head __UpperCamelCase = linked_list.delete_head() assert result == -9 assert ( str(_lowercase ) == "100->Node(77345112)->dlrow olleH->7->5555->0->-192.55555->" "Hello, world!->77.9->Node(10)->None->None->12.2" ) # Delete the tail __UpperCamelCase = linked_list.delete_tail() assert result == 12.2 assert ( str(_lowercase ) == "100->Node(77345112)->dlrow olleH->7->5555->0->-192.55555->" "Hello, world!->77.9->Node(10)->None->None" ) # Delete a node in specific location in linked list __UpperCamelCase = linked_list.delete_nth(10 ) assert result is None assert ( str(_lowercase ) == "100->Node(77345112)->dlrow olleH->7->5555->0->-192.55555->" "Hello, world!->77.9->Node(10)->None" ) # Add a Node instance to its head linked_list.insert_head(Node('Hello again, world!' ) ) assert ( str(_lowercase ) == "Node(Hello again, world!)->100->Node(77345112)->dlrow olleH->" "7->5555->0->-192.55555->Hello, world!->77.9->Node(10)->None" ) # Add None to its tail linked_list.insert_tail(_lowercase ) assert ( str(_lowercase ) == "Node(Hello again, world!)->100->Node(77345112)->dlrow olleH->" "7->5555->0->-192.55555->Hello, world!->77.9->Node(10)->None->None" ) # Reverse the linked list linked_list.reverse() assert ( str(_lowercase ) == "None->None->Node(10)->77.9->Hello, world!->-192.55555->0->5555->" "7->dlrow olleH->Node(77345112)->100->Node(Hello again, world!)" ) def _A ( ) -> List[str]: """simple docstring""" from doctest import testmod testmod() __UpperCamelCase = LinkedList() linked_list.insert_head(input('Inserting 1st at head ' ).strip() ) linked_list.insert_head(input('Inserting 2nd at head ' ).strip() ) print('\nPrint list:' ) linked_list.print_list() linked_list.insert_tail(input('\nInserting 1st at tail ' ).strip() ) linked_list.insert_tail(input('Inserting 2nd at tail ' ).strip() ) print('\nPrint list:' ) linked_list.print_list() print('\nDelete head' ) linked_list.delete_head() print('Delete tail' ) linked_list.delete_tail() print('\nPrint list:' ) linked_list.print_list() print('\nReverse linked list' ) linked_list.reverse() print('\nPrint list:' ) linked_list.print_list() print('\nString representation of linked list:' ) print(_lowercase ) print('\nReading/changing Node data using indexing:' ) print(f'''Element at Position 1: {linked_list[1]}''' ) __UpperCamelCase = input('Enter New Value: ' ).strip() print('New list:' ) print(_lowercase ) print(f'''length of linked_list is : {len(_lowercase )}''' ) if __name__ == "__main__": main()
1
1
def _A ( _lowercase , _lowercase ) -> int: """simple docstring""" return (pointa[0] - pointa[0]) ** 2 + (pointa[1] - pointa[1]) ** 2 def _A ( _lowercase , _lowercase=0 ) -> Dict: """simple docstring""" return sorted(_lowercase , key=lambda _lowercase : x[column] ) def _A ( _lowercase , _lowercase , _lowercase=float('inf' ) ) -> List[Any]: """simple docstring""" for i in range(points_counts - 1 ): for j in range(i + 1 , _lowercase ): __UpperCamelCase = euclidean_distance_sqr(points[i] , points[j] ) if current_dis < min_dis: __UpperCamelCase = current_dis return min_dis def _A ( _lowercase , _lowercase , _lowercase=float('inf' ) ) -> Tuple: """simple docstring""" for i in range(min(6 , points_counts - 1 ) , _lowercase ): for j in range(max(0 , i - 6 ) , _lowercase ): __UpperCamelCase = euclidean_distance_sqr(points[i] , points[j] ) if current_dis < min_dis: __UpperCamelCase = current_dis return min_dis def _A ( _lowercase , _lowercase , _lowercase ) -> Optional[Any]: """simple docstring""" if points_counts <= 3: return dis_between_closest_pair(_lowercase , _lowercase ) # recursion __UpperCamelCase = points_counts // 2 __UpperCamelCase = closest_pair_of_points_sqr( _lowercase , points_sorted_on_y[:mid] , _lowercase ) __UpperCamelCase = closest_pair_of_points_sqr( _lowercase , points_sorted_on_y[mid:] , points_counts - mid ) __UpperCamelCase = min(_lowercase , _lowercase ) __UpperCamelCase = [] for point in points_sorted_on_x: if abs(point[0] - points_sorted_on_x[mid][0] ) < closest_pair_dis: cross_strip.append(_lowercase ) __UpperCamelCase = dis_between_closest_in_strip( _lowercase , len(_lowercase ) , _lowercase ) return min(_lowercase , _lowercase ) def _A ( _lowercase , _lowercase ) -> Optional[int]: """simple docstring""" __UpperCamelCase = column_based_sort(_lowercase , column=0 ) __UpperCamelCase = column_based_sort(_lowercase , column=1 ) return ( closest_pair_of_points_sqr( _lowercase , _lowercase , _lowercase ) ) ** 0.5 if __name__ == "__main__": __snake_case = [(2, 3), (1_2, 3_0), (4_0, 5_0), (5, 1), (1_2, 1_0), (3, 4)] print('''Distance:''', closest_pair_of_points(points, len(points)))
1
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_torch_available, ) __snake_case = {'''configuration_unispeech''': ['''UNISPEECH_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''UniSpeechConfig''']} try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __snake_case = [ '''UNISPEECH_PRETRAINED_MODEL_ARCHIVE_LIST''', '''UniSpeechForCTC''', '''UniSpeechForPreTraining''', '''UniSpeechForSequenceClassification''', '''UniSpeechModel''', '''UniSpeechPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_unispeech import UNISPEECH_PRETRAINED_CONFIG_ARCHIVE_MAP, UniSpeechConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_unispeech import ( UNISPEECH_PRETRAINED_MODEL_ARCHIVE_LIST, UniSpeechForCTC, UniSpeechForPreTraining, UniSpeechForSequenceClassification, UniSpeechModel, UniSpeechPreTrainedModel, ) else: import sys __snake_case = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
1
1
import argparse import requests import torch # pip3 install salesforce-lavis # I'm actually installing a slightly modified version: pip3 install git+https://github.com/nielsrogge/LAVIS.git@fix_lavis from lavis.models import load_model_and_preprocess from PIL import Image from transformers import ( AutoTokenizer, BlipaConfig, BlipaForConditionalGeneration, BlipaProcessor, BlipaVisionConfig, BlipImageProcessor, OPTConfig, TaConfig, ) from transformers.utils.constants import OPENAI_CLIP_MEAN, OPENAI_CLIP_STD def _A ( ) -> int: """simple docstring""" __UpperCamelCase = 'https://storage.googleapis.com/sfr-vision-language-research/LAVIS/assets/merlion.png' __UpperCamelCase = Image.open(requests.get(_lowercase , stream=_lowercase ).raw ).convert('RGB' ) return image def _A ( _lowercase ) -> int: """simple docstring""" __UpperCamelCase = [] # fmt: off # vision encoder rename_keys.append(('visual_encoder.cls_token', 'vision_model.embeddings.class_embedding') ) rename_keys.append(('visual_encoder.pos_embed', 'vision_model.embeddings.position_embedding') ) rename_keys.append(('visual_encoder.patch_embed.proj.weight', 'vision_model.embeddings.patch_embedding.weight') ) rename_keys.append(('visual_encoder.patch_embed.proj.bias', 'vision_model.embeddings.patch_embedding.bias') ) rename_keys.append(('ln_vision.weight', 'vision_model.post_layernorm.weight') ) rename_keys.append(('ln_vision.bias', 'vision_model.post_layernorm.bias') ) for i in range(config.vision_config.num_hidden_layers ): rename_keys.append((f'''visual_encoder.blocks.{i}.norm1.weight''', f'''vision_model.encoder.layers.{i}.layer_norm1.weight''') ) rename_keys.append((f'''visual_encoder.blocks.{i}.norm1.bias''', f'''vision_model.encoder.layers.{i}.layer_norm1.bias''') ) rename_keys.append((f'''visual_encoder.blocks.{i}.norm2.weight''', f'''vision_model.encoder.layers.{i}.layer_norm2.weight''') ) rename_keys.append((f'''visual_encoder.blocks.{i}.norm2.bias''', f'''vision_model.encoder.layers.{i}.layer_norm2.bias''') ) rename_keys.append((f'''visual_encoder.blocks.{i}.attn.qkv.weight''', f'''vision_model.encoder.layers.{i}.self_attn.qkv.weight''') ) rename_keys.append((f'''visual_encoder.blocks.{i}.attn.proj.weight''', f'''vision_model.encoder.layers.{i}.self_attn.projection.weight''',) ) rename_keys.append((f'''visual_encoder.blocks.{i}.attn.proj.bias''', f'''vision_model.encoder.layers.{i}.self_attn.projection.bias''') ) rename_keys.append((f'''visual_encoder.blocks.{i}.mlp.fc1.weight''', f'''vision_model.encoder.layers.{i}.mlp.fc1.weight''') ) rename_keys.append((f'''visual_encoder.blocks.{i}.mlp.fc1.bias''', f'''vision_model.encoder.layers.{i}.mlp.fc1.bias''') ) rename_keys.append((f'''visual_encoder.blocks.{i}.mlp.fc2.weight''', f'''vision_model.encoder.layers.{i}.mlp.fc2.weight''') ) rename_keys.append((f'''visual_encoder.blocks.{i}.mlp.fc2.bias''', f'''vision_model.encoder.layers.{i}.mlp.fc2.bias''') ) # QFormer rename_keys.append(('Qformer.bert.embeddings.LayerNorm.weight', 'qformer.layernorm.weight') ) rename_keys.append(('Qformer.bert.embeddings.LayerNorm.bias', 'qformer.layernorm.bias') ) # fmt: on return rename_keys def _A ( _lowercase , _lowercase , _lowercase ) -> Optional[int]: """simple docstring""" __UpperCamelCase = dct.pop(_lowercase ) __UpperCamelCase = val def _A ( _lowercase , _lowercase ) -> int: """simple docstring""" for i in range(config.vision_config.num_hidden_layers ): # read in original q and v biases __UpperCamelCase = state_dict.pop(f'''visual_encoder.blocks.{i}.attn.q_bias''' ) __UpperCamelCase = state_dict.pop(f'''visual_encoder.blocks.{i}.attn.v_bias''' ) # next, set bias in the state dict __UpperCamelCase = torch.cat((q_bias, torch.zeros_like(_lowercase , requires_grad=_lowercase ), v_bias) ) __UpperCamelCase = qkv_bias def _A ( _lowercase , _lowercase ) -> Any: """simple docstring""" __UpperCamelCase = 3_64 if 'coco' in model_name else 2_24 __UpperCamelCase = BlipaVisionConfig(image_size=_lowercase ).to_dict() # make sure the models have proper bos_token_id and eos_token_id set (important for generation) # seems like flan-T5 models don't have bos_token_id properly set? if "opt-2.7b" in model_name: __UpperCamelCase = OPTConfig.from_pretrained('facebook/opt-2.7b' , eos_token_id=_lowercase ).to_dict() elif "opt-6.7b" in model_name: __UpperCamelCase = OPTConfig.from_pretrained('facebook/opt-6.7b' , eos_token_id=_lowercase ).to_dict() elif "t5-xl" in model_name: __UpperCamelCase = TaConfig.from_pretrained('google/flan-t5-xl' , dense_act_fn='gelu' , bos_token_id=1 ).to_dict() elif "t5-xxl" in model_name: __UpperCamelCase = TaConfig.from_pretrained('google/flan-t5-xxl' , dense_act_fn='gelu' , bos_token_id=1 ).to_dict() __UpperCamelCase = BlipaConfig(vision_config=_lowercase , text_config=_lowercase ) return config, image_size @torch.no_grad() def _A ( _lowercase , _lowercase=None , _lowercase=False ) -> Union[str, Any]: """simple docstring""" __UpperCamelCase = ( AutoTokenizer.from_pretrained('facebook/opt-2.7b' ) if 'opt' in model_name else AutoTokenizer.from_pretrained('google/flan-t5-xl' ) ) __UpperCamelCase = tokenizer('\n' , add_special_tokens=_lowercase ).input_ids[0] __UpperCamelCase, __UpperCamelCase = get_blipa_config(_lowercase , eos_token_id=_lowercase ) __UpperCamelCase = BlipaForConditionalGeneration(_lowercase ).eval() __UpperCamelCase = { 'blip2-opt-2.7b': ('blip2_opt', 'pretrain_opt2.7b'), 'blip2-opt-6.7b': ('blip2_opt', 'pretrain_opt6.7b'), 'blip2-opt-2.7b-coco': ('blip2_opt', 'caption_coco_opt2.7b'), 'blip2-opt-6.7b-coco': ('blip2_opt', 'caption_coco_opt6.7b'), 'blip2-flan-t5-xl': ('blip2_t5', 'pretrain_flant5xl'), 'blip2-flan-t5-xl-coco': ('blip2_t5', 'caption_coco_flant5xl'), 'blip2-flan-t5-xxl': ('blip2_t5', 'pretrain_flant5xxl'), } __UpperCamelCase, __UpperCamelCase = model_name_to_original[model_name] # load original model print('Loading original model...' ) __UpperCamelCase = 'cuda' if torch.cuda.is_available() else 'cpu' __UpperCamelCase, __UpperCamelCase, __UpperCamelCase = load_model_and_preprocess( name=_lowercase , model_type=_lowercase , is_eval=_lowercase , device=_lowercase ) original_model.eval() print('Done!' ) # update state dict keys __UpperCamelCase = original_model.state_dict() __UpperCamelCase = create_rename_keys(_lowercase ) for src, dest in rename_keys: rename_key(_lowercase , _lowercase , _lowercase ) # some keys can be renamed efficiently for key, val in state_dict.copy().items(): __UpperCamelCase = state_dict.pop(_lowercase ) if key.startswith('Qformer.bert' ): __UpperCamelCase = key.replace('Qformer.bert' , 'qformer' ) if "attention.self" in key: __UpperCamelCase = key.replace('self' , 'attention' ) if "opt_proj" in key: __UpperCamelCase = key.replace('opt_proj' , 'language_projection' ) if "t5_proj" in key: __UpperCamelCase = key.replace('t5_proj' , 'language_projection' ) if key.startswith('opt' ): __UpperCamelCase = key.replace('opt' , 'language' ) if key.startswith('t5' ): __UpperCamelCase = key.replace('t5' , 'language' ) __UpperCamelCase = val # read in qv biases read_in_q_v_bias(_lowercase , _lowercase ) __UpperCamelCase, __UpperCamelCase = hf_model.load_state_dict(_lowercase , strict=_lowercase ) assert len(_lowercase ) == 0 assert unexpected_keys == ["qformer.embeddings.position_ids"] __UpperCamelCase = load_demo_image() __UpperCamelCase = vis_processors['eval'](_lowercase ).unsqueeze(0 ).to(_lowercase ) __UpperCamelCase = tokenizer(['\n'] , return_tensors='pt' ).input_ids.to(_lowercase ) # create processor __UpperCamelCase = BlipImageProcessor( size={'height': image_size, 'width': image_size} , image_mean=_lowercase , image_std=_lowercase ) __UpperCamelCase = BlipaProcessor(image_processor=_lowercase , tokenizer=_lowercase ) __UpperCamelCase = processor(images=_lowercase , return_tensors='pt' ).pixel_values.to(_lowercase ) # make sure processor creates exact same pixel values assert torch.allclose(_lowercase , _lowercase ) original_model.to(_lowercase ) hf_model.to(_lowercase ) with torch.no_grad(): if "opt" in model_name: __UpperCamelCase = original_model({'image': original_pixel_values, 'text_input': ['']} ).logits __UpperCamelCase = hf_model(_lowercase , _lowercase ).logits else: __UpperCamelCase = original_model( {'image': original_pixel_values, 'text_input': ['\n'], 'text_output': ['\n']} ).logits __UpperCamelCase = input_ids.masked_fill(input_ids == tokenizer.pad_token_id , -1_00 ) __UpperCamelCase = hf_model(_lowercase , _lowercase , labels=_lowercase ).logits assert original_logits.shape == logits.shape print('First values of original logits:' , original_logits[0, :3, :3] ) print('First values of HF logits:' , logits[0, :3, :3] ) # assert values if model_name == "blip2-flan-t5-xl": __UpperCamelCase = torch.tensor( [[-41.58_50, -4.44_40, -8.99_22], [-47.43_22, -5.91_43, -1.73_40]] , device=_lowercase ) assert torch.allclose(logits[0, :3, :3] , _lowercase , atol=1e-4 ) elif model_name == "blip2-flan-t5-xl-coco": __UpperCamelCase = torch.tensor( [[-57.01_09, -9.89_67, -12.62_80], [-68.65_78, -12.71_91, -10.50_65]] , device=_lowercase ) else: # cast to same type __UpperCamelCase = logits.dtype assert torch.allclose(original_logits.to(_lowercase ) , _lowercase , atol=1e-2 ) print('Looks ok!' ) print('Generating a caption...' ) __UpperCamelCase = '' __UpperCamelCase = tokenizer(_lowercase , return_tensors='pt' ).input_ids.to(_lowercase ) __UpperCamelCase = original_model.generate({'image': original_pixel_values} ) __UpperCamelCase = hf_model.generate( _lowercase , _lowercase , do_sample=_lowercase , num_beams=5 , max_length=30 , min_length=1 , top_p=0.9 , repetition_penalty=1.0 , length_penalty=1.0 , temperature=1 , ) print('Original generation:' , _lowercase ) __UpperCamelCase = input_ids.shape[1] __UpperCamelCase = processor.batch_decode(outputs[:, prompt_length:] , skip_special_tokens=_lowercase ) __UpperCamelCase = [text.strip() for text in output_text] print('HF generation:' , _lowercase ) if pytorch_dump_folder_path is not None: processor.save_pretrained(_lowercase ) hf_model.save_pretrained(_lowercase ) if push_to_hub: processor.push_to_hub(f'''nielsr/{model_name}''' ) hf_model.push_to_hub(f'''nielsr/{model_name}''' ) if __name__ == "__main__": __snake_case = argparse.ArgumentParser() __snake_case = [ '''blip2-opt-2.7b''', '''blip2-opt-6.7b''', '''blip2-opt-2.7b-coco''', '''blip2-opt-6.7b-coco''', '''blip2-flan-t5-xl''', '''blip2-flan-t5-xl-coco''', '''blip2-flan-t5-xxl''', ] parser.add_argument( '''--model_name''', default='''blip2-opt-2.7b''', choices=choices, type=str, help='''Path to hf config.json of model to convert''', ) parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''') parser.add_argument( '''--push_to_hub''', action='''store_true''', help='''Whether to push the model and processor to the hub after converting''', ) __snake_case = parser.parse_args() convert_blipa_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
1
__snake_case = { '''a''': '''AAAAA''', '''b''': '''AAAAB''', '''c''': '''AAABA''', '''d''': '''AAABB''', '''e''': '''AABAA''', '''f''': '''AABAB''', '''g''': '''AABBA''', '''h''': '''AABBB''', '''i''': '''ABAAA''', '''j''': '''BBBAA''', '''k''': '''ABAAB''', '''l''': '''ABABA''', '''m''': '''ABABB''', '''n''': '''ABBAA''', '''o''': '''ABBAB''', '''p''': '''ABBBA''', '''q''': '''ABBBB''', '''r''': '''BAAAA''', '''s''': '''BAAAB''', '''t''': '''BAABA''', '''u''': '''BAABB''', '''v''': '''BBBAB''', '''w''': '''BABAA''', '''x''': '''BABAB''', '''y''': '''BABBA''', '''z''': '''BABBB''', ''' ''': ''' ''', } __snake_case = {value: key for key, value in encode_dict.items()} def _A ( _lowercase ) -> str: """simple docstring""" __UpperCamelCase = '' for letter in word.lower(): if letter.isalpha() or letter == " ": encoded += encode_dict[letter] else: raise Exception('encode() accepts only letters of the alphabet and spaces' ) return encoded def _A ( _lowercase ) -> str: """simple docstring""" if set(_lowercase ) - {"A", "B", " "} != set(): raise Exception('decode() accepts only \'A\', \'B\' and spaces' ) __UpperCamelCase = '' for word in coded.split(): while len(_lowercase ) != 0: decoded += decode_dict[word[:5]] __UpperCamelCase = word[5:] decoded += " " return decoded.strip() if __name__ == "__main__": from doctest import testmod testmod()
1
1
from __future__ import annotations import unittest from transformers import is_tf_available from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow if is_tf_available(): import numpy as np import tensorflow as tf from transformers import TFCamembertModel @require_tf @require_sentencepiece @require_tokenizers class __lowerCamelCase (unittest.TestCase ): @slow def snake_case_ ( self: Any ): '''simple docstring''' __UpperCamelCase = TFCamembertModel.from_pretrained('jplu/tf-camembert-base' ) __UpperCamelCase = tf.convert_to_tensor( [[5, 121, 11, 660, 16, 730, 2_5543, 110, 83, 6]],dtype=tf.intaa,) # J'aime le camembert !" __UpperCamelCase = model(A_ )['last_hidden_state'] __UpperCamelCase = tf.TensorShape((1, 10, 768) ) self.assertEqual(output.shape,A_ ) # compare the actual values for a slice. __UpperCamelCase = tf.convert_to_tensor( [[[-0.0_2_5_4, 0.0_2_3_5, 0.1_0_2_7], [0.0_6_0_6, -0.1_8_1_1, -0.0_4_1_8], [-0.1_5_6_1, -0.1_1_2_7, 0.2_6_8_7]]],dtype=tf.floataa,) # camembert = torch.hub.load('pytorch/fairseq', 'camembert.v0') # camembert.eval() # expected_slice = roberta.model.forward(input_ids)[0][:, :3, :3].detach() self.assertTrue(np.allclose(output[:, :3, :3].numpy(),expected_slice.numpy(),atol=1E-4 ) )
1
from collections.abc import Generator from math import sin def _A ( _lowercase ) -> bytes: """simple docstring""" if len(_lowercase ) != 32: raise ValueError('Input must be of length 32' ) __UpperCamelCase = B'' for i in [3, 2, 1, 0]: little_endian += string_aa[8 * i : 8 * i + 8] return little_endian def _A ( _lowercase ) -> bytes: """simple docstring""" if i < 0: raise ValueError('Input must be non-negative' ) __UpperCamelCase = format(_lowercase , '08x' )[-8:] __UpperCamelCase = B'' for i in [3, 2, 1, 0]: little_endian_hex += hex_rep[2 * i : 2 * i + 2].encode('utf-8' ) return little_endian_hex def _A ( _lowercase ) -> bytes: """simple docstring""" __UpperCamelCase = B'' for char in message: bit_string += format(_lowercase , '08b' ).encode('utf-8' ) __UpperCamelCase = format(len(_lowercase ) , '064b' ).encode('utf-8' ) # Pad bit_string to a multiple of 512 chars bit_string += b"1" while len(_lowercase ) % 5_12 != 4_48: bit_string += b"0" bit_string += to_little_endian(start_len[32:] ) + to_little_endian(start_len[:32] ) return bit_string def _A ( _lowercase ) -> Generator[list[int], None, None]: """simple docstring""" if len(_lowercase ) % 5_12 != 0: raise ValueError('Input must have length that\'s a multiple of 512' ) for pos in range(0 , len(_lowercase ) , 5_12 ): __UpperCamelCase = bit_string[pos : pos + 5_12] __UpperCamelCase = [] for i in range(0 , 5_12 , 32 ): block_words.append(int(to_little_endian(block[i : i + 32] ) , 2 ) ) yield block_words def _A ( _lowercase ) -> int: """simple docstring""" if i < 0: raise ValueError('Input must be non-negative' ) __UpperCamelCase = format(_lowercase , '032b' ) __UpperCamelCase = '' for c in i_str: new_str += "1" if c == "0" else "0" return int(_lowercase , 2 ) def _A ( _lowercase , _lowercase ) -> int: """simple docstring""" return (a + b) % 2**32 def _A ( _lowercase , _lowercase ) -> int: """simple docstring""" if i < 0: raise ValueError('Input must be non-negative' ) if shift < 0: raise ValueError('Shift must be non-negative' ) return ((i << shift) ^ (i >> (32 - shift))) % 2**32 def _A ( _lowercase ) -> bytes: """simple docstring""" __UpperCamelCase = preprocess(_lowercase ) __UpperCamelCase = [int(2**32 * abs(sin(i + 1 ) ) ) for i in range(64 )] # Starting states __UpperCamelCase = 0X67_45_23_01 __UpperCamelCase = 0Xef_cd_ab_89 __UpperCamelCase = 0X98_ba_dc_fe __UpperCamelCase = 0X10_32_54_76 __UpperCamelCase = [ 7, 12, 17, 22, 7, 12, 17, 22, 7, 12, 17, 22, 7, 12, 17, 22, 5, 9, 14, 20, 5, 9, 14, 20, 5, 9, 14, 20, 5, 9, 14, 20, 4, 11, 16, 23, 4, 11, 16, 23, 4, 11, 16, 23, 4, 11, 16, 23, 6, 10, 15, 21, 6, 10, 15, 21, 6, 10, 15, 21, 6, 10, 15, 21, ] # Process bit string in chunks, each with 16 32-char words for block_words in get_block_words(_lowercase ): __UpperCamelCase = aa __UpperCamelCase = ba __UpperCamelCase = ca __UpperCamelCase = da # Hash current chunk for i in range(64 ): if i <= 15: # f = (b & c) | (not_32(b) & d) # Alternate definition for f __UpperCamelCase = d ^ (b & (c ^ d)) __UpperCamelCase = i elif i <= 31: # f = (d & b) | (not_32(d) & c) # Alternate definition for f __UpperCamelCase = c ^ (d & (b ^ c)) __UpperCamelCase = (5 * i + 1) % 16 elif i <= 47: __UpperCamelCase = b ^ c ^ d __UpperCamelCase = (3 * i + 5) % 16 else: __UpperCamelCase = c ^ (b | not_aa(_lowercase )) __UpperCamelCase = (7 * i) % 16 __UpperCamelCase = (f + a + added_consts[i] + block_words[g]) % 2**32 __UpperCamelCase = d __UpperCamelCase = c __UpperCamelCase = b __UpperCamelCase = sum_aa(_lowercase , left_rotate_aa(_lowercase , shift_amounts[i] ) ) # Add hashed chunk to running total __UpperCamelCase = sum_aa(_lowercase , _lowercase ) __UpperCamelCase = sum_aa(_lowercase , _lowercase ) __UpperCamelCase = sum_aa(_lowercase , _lowercase ) __UpperCamelCase = sum_aa(_lowercase , _lowercase ) __UpperCamelCase = reformat_hex(_lowercase ) + reformat_hex(_lowercase ) + reformat_hex(_lowercase ) + reformat_hex(_lowercase ) return digest if __name__ == "__main__": import doctest doctest.testmod()
1
1
import json from typing import List, Optional, Tuple from tokenizers import normalizers from ...tokenization_utils_fast import PreTrainedTokenizerFast from .tokenization_lxmert import LxmertTokenizer __snake_case = {'''vocab_file''': '''vocab.txt''', '''tokenizer_file''': '''tokenizer.json'''} __snake_case = { '''vocab_file''': { '''unc-nlp/lxmert-base-uncased''': '''https://huggingface.co/unc-nlp/lxmert-base-uncased/resolve/main/vocab.txt''', }, '''tokenizer_file''': { '''unc-nlp/lxmert-base-uncased''': ( '''https://huggingface.co/unc-nlp/lxmert-base-uncased/resolve/main/tokenizer.json''' ), }, } __snake_case = { '''unc-nlp/lxmert-base-uncased''': 5_1_2, } __snake_case = { '''unc-nlp/lxmert-base-uncased''': {'''do_lower_case''': True}, } class __lowerCamelCase (_a ): _lowercase = VOCAB_FILES_NAMES _lowercase = PRETRAINED_VOCAB_FILES_MAP _lowercase = PRETRAINED_INIT_CONFIGURATION _lowercase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES _lowercase = LxmertTokenizer def __init__( self: Tuple,A_: str=None,A_: Any=None,A_: List[Any]=True,A_: int="[UNK]",A_: Union[str, Any]="[SEP]",A_: str="[PAD]",A_: Dict="[CLS]",A_: int="[MASK]",A_: int=True,A_: Optional[int]=None,**A_: Union[str, Any],): '''simple docstring''' super().__init__( A_,tokenizer_file=A_,do_lower_case=A_,unk_token=A_,sep_token=A_,pad_token=A_,cls_token=A_,mask_token=A_,tokenize_chinese_chars=A_,strip_accents=A_,**A_,) __UpperCamelCase = json.loads(self.backend_tokenizer.normalizer.__getstate__() ) if ( normalizer_state.get('lowercase',A_ ) != do_lower_case or normalizer_state.get('strip_accents',A_ ) != strip_accents or normalizer_state.get('handle_chinese_chars',A_ ) != tokenize_chinese_chars ): __UpperCamelCase = getattr(A_,normalizer_state.pop('type' ) ) __UpperCamelCase = do_lower_case __UpperCamelCase = strip_accents __UpperCamelCase = tokenize_chinese_chars __UpperCamelCase = normalizer_class(**A_ ) __UpperCamelCase = do_lower_case def snake_case_ ( self: Tuple,A_: List[str],A_: Tuple=None ): '''simple docstring''' __UpperCamelCase = [self.cls_token_id] + token_ids_a + [self.sep_token_id] if token_ids_a: output += token_ids_a + [self.sep_token_id] return output def snake_case_ ( self: Dict,A_: List[int],A_: Optional[List[int]] = None ): '''simple docstring''' __UpperCamelCase = [self.sep_token_id] __UpperCamelCase = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] def snake_case_ ( self: Any,A_: str,A_: Optional[str] = None ): '''simple docstring''' __UpperCamelCase = self._tokenizer.model.save(A_,name=A_ ) return tuple(A_ )
1
from __future__ import annotations import time from math import sqrt # 1 for manhattan, 0 for euclidean __snake_case = 0 __snake_case = [ [0, 0, 0, 0, 0, 0, 0], [0, 1, 0, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles [0, 0, 0, 0, 0, 0, 0], [0, 0, 1, 0, 0, 0, 0], [1, 0, 1, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 1, 0, 0], ] __snake_case = [[-1, 0], [0, -1], [1, 0], [0, 1]] # up, left, down, right __snake_case = tuple[int, int] class __lowerCamelCase : def __init__( self: str,A_: int,A_: int,A_: int,A_: int,A_: int,A_: Node | None,): '''simple docstring''' __UpperCamelCase = pos_x __UpperCamelCase = pos_y __UpperCamelCase = (pos_y, pos_x) __UpperCamelCase = goal_x __UpperCamelCase = goal_y __UpperCamelCase = g_cost __UpperCamelCase = parent __UpperCamelCase = self.calculate_heuristic() __UpperCamelCase = self.g_cost + self.h_cost def snake_case_ ( self: str ): '''simple docstring''' __UpperCamelCase = self.pos_x - self.goal_x __UpperCamelCase = self.pos_y - self.goal_y if HEURISTIC == 1: return abs(A_ ) + abs(A_ ) else: return sqrt(dy**2 + dx**2 ) def __lt__( self: int,A_: Node ): '''simple docstring''' return self.f_cost < other.f_cost class __lowerCamelCase : def __init__( self: Any,A_: TPosition,A_: TPosition ): '''simple docstring''' __UpperCamelCase = Node(start[1],start[0],goal[1],goal[0],0,A_ ) __UpperCamelCase = Node(goal[1],goal[0],goal[1],goal[0],9_9999,A_ ) __UpperCamelCase = [self.start] __UpperCamelCase = [] __UpperCamelCase = False def snake_case_ ( self: Any ): '''simple docstring''' while self.open_nodes: # Open Nodes are sorted using __lt__ self.open_nodes.sort() __UpperCamelCase = self.open_nodes.pop(0 ) if current_node.pos == self.target.pos: return self.retrace_path(A_ ) self.closed_nodes.append(A_ ) __UpperCamelCase = self.get_successors(A_ ) for child_node in successors: if child_node in self.closed_nodes: continue if child_node not in self.open_nodes: self.open_nodes.append(A_ ) else: # retrieve the best current path __UpperCamelCase = self.open_nodes.pop(self.open_nodes.index(A_ ) ) if child_node.g_cost < better_node.g_cost: self.open_nodes.append(A_ ) else: self.open_nodes.append(A_ ) return [self.start.pos] def snake_case_ ( self: int,A_: Node ): '''simple docstring''' __UpperCamelCase = [] for action in delta: __UpperCamelCase = parent.pos_x + action[1] __UpperCamelCase = parent.pos_y + action[0] if not (0 <= pos_x <= len(grid[0] ) - 1 and 0 <= pos_y <= len(A_ ) - 1): continue if grid[pos_y][pos_x] != 0: continue successors.append( Node( A_,A_,self.target.pos_y,self.target.pos_x,parent.g_cost + 1,A_,) ) return successors def snake_case_ ( self: Any,A_: Node | None ): '''simple docstring''' __UpperCamelCase = node __UpperCamelCase = [] while current_node is not None: path.append((current_node.pos_y, current_node.pos_x) ) __UpperCamelCase = current_node.parent path.reverse() return path class __lowerCamelCase : def __init__( self: List[Any],A_: TPosition,A_: TPosition ): '''simple docstring''' __UpperCamelCase = AStar(A_,A_ ) __UpperCamelCase = AStar(A_,A_ ) __UpperCamelCase = False def snake_case_ ( self: Union[str, Any] ): '''simple docstring''' while self.fwd_astar.open_nodes or self.bwd_astar.open_nodes: self.fwd_astar.open_nodes.sort() self.bwd_astar.open_nodes.sort() __UpperCamelCase = self.fwd_astar.open_nodes.pop(0 ) __UpperCamelCase = self.bwd_astar.open_nodes.pop(0 ) if current_bwd_node.pos == current_fwd_node.pos: return self.retrace_bidirectional_path( A_,A_ ) self.fwd_astar.closed_nodes.append(A_ ) self.bwd_astar.closed_nodes.append(A_ ) __UpperCamelCase = current_bwd_node __UpperCamelCase = current_fwd_node __UpperCamelCase = { self.fwd_astar: self.fwd_astar.get_successors(A_ ), self.bwd_astar: self.bwd_astar.get_successors(A_ ), } for astar in [self.fwd_astar, self.bwd_astar]: for child_node in successors[astar]: if child_node in astar.closed_nodes: continue if child_node not in astar.open_nodes: astar.open_nodes.append(A_ ) else: # retrieve the best current path __UpperCamelCase = astar.open_nodes.pop( astar.open_nodes.index(A_ ) ) if child_node.g_cost < better_node.g_cost: astar.open_nodes.append(A_ ) else: astar.open_nodes.append(A_ ) return [self.fwd_astar.start.pos] def snake_case_ ( self: List[str],A_: Node,A_: Node ): '''simple docstring''' __UpperCamelCase = self.fwd_astar.retrace_path(A_ ) __UpperCamelCase = self.bwd_astar.retrace_path(A_ ) bwd_path.pop() bwd_path.reverse() __UpperCamelCase = fwd_path + bwd_path return path if __name__ == "__main__": # all coordinates are given in format [y,x] __snake_case = (0, 0) __snake_case = (len(grid) - 1, len(grid[0]) - 1) for elem in grid: print(elem) __snake_case = time.time() __snake_case = AStar(init, goal) __snake_case = a_star.search() __snake_case = time.time() - start_time print(f"""AStar execution time = {end_time:f} seconds""") __snake_case = time.time() __snake_case = BidirectionalAStar(init, goal) __snake_case = time.time() - bd_start_time print(f"""BidirectionalAStar execution time = {bd_end_time:f} seconds""")
1
1
import json import os import re import shutil import tempfile import unittest from typing import Tuple from transformers import AddedToken, BatchEncoding, ByTaTokenizer from transformers.utils import cached_property, is_tf_available, is_torch_available from ...test_tokenization_common import TokenizerTesterMixin if is_torch_available(): __snake_case = '''pt''' elif is_tf_available(): __snake_case = '''tf''' else: __snake_case = '''jax''' class __lowerCamelCase (_a , unittest.TestCase ): _lowercase = ByTaTokenizer _lowercase = False def snake_case_ ( self: Any ): '''simple docstring''' super().setUp() __UpperCamelCase = ByTaTokenizer() tokenizer.save_pretrained(self.tmpdirname ) @cached_property def snake_case_ ( self: int ): '''simple docstring''' return ByTaTokenizer.from_pretrained('google/byt5-small' ) def snake_case_ ( self: Optional[int],**A_: int ): '''simple docstring''' return self.tokenizer_class.from_pretrained(self.tmpdirname,**A_ ) def snake_case_ ( self: Any,A_: List[Any],A_: List[str]=False,A_: List[Any]=20,A_: List[Any]=5 ): '''simple docstring''' __UpperCamelCase = [] for i in range(len(A_ ) ): try: __UpperCamelCase = tokenizer.decode([i],clean_up_tokenization_spaces=A_ ) except UnicodeDecodeError: pass toks.append((i, tok) ) __UpperCamelCase = list(filter(lambda A_ : re.match(r'^[ a-zA-Z]+$',t[1] ),A_ ) ) __UpperCamelCase = list(filter(lambda A_ : [t[0]] == tokenizer.encode(t[1],add_special_tokens=A_ ),A_ ) ) if max_length is not None and len(A_ ) > max_length: __UpperCamelCase = toks[:max_length] if min_length is not None and len(A_ ) < min_length and len(A_ ) > 0: while len(A_ ) < min_length: __UpperCamelCase = toks + toks # toks_str = [t[1] for t in toks] __UpperCamelCase = [t[0] for t in toks] # Ensure consistency __UpperCamelCase = tokenizer.decode(A_,clean_up_tokenization_spaces=A_ ) if " " not in output_txt and len(A_ ) > 1: __UpperCamelCase = ( tokenizer.decode([toks_ids[0]],clean_up_tokenization_spaces=A_ ) + ' ' + tokenizer.decode(toks_ids[1:],clean_up_tokenization_spaces=A_ ) ) if with_prefix_space: __UpperCamelCase = ' ' + output_txt __UpperCamelCase = tokenizer.encode(A_,add_special_tokens=A_ ) return output_txt, output_ids def snake_case_ ( self: List[Any] ): '''simple docstring''' __UpperCamelCase = self.ta_base_tokenizer __UpperCamelCase = tokenizer(['hi</s>', 'I went to the gym</s>', '</s>'] ) __UpperCamelCase = tokenizer(['hi', 'I went to the gym', ''] ) self.assertListEqual(batch_with_eos_added['input_ids'],batch_without_eos_added['input_ids'] ) def snake_case_ ( self: Dict ): '''simple docstring''' __UpperCamelCase = self.ta_base_tokenizer __UpperCamelCase = 'Unicode €.' __UpperCamelCase = tokenizer(A_ ) __UpperCamelCase = [88, 113, 108, 102, 114, 103, 104, 35, 229, 133, 175, 49, 1] self.assertEqual(encoded['input_ids'],A_ ) # decoding __UpperCamelCase = tokenizer.decode(A_ ) self.assertEqual(A_,'Unicode €.</s>' ) __UpperCamelCase = tokenizer('e è é ê ë' ) __UpperCamelCase = [104, 35, 198, 171, 35, 198, 172, 35, 198, 173, 35, 198, 174, 1] self.assertEqual(encoded['input_ids'],A_ ) # decoding __UpperCamelCase = tokenizer.decode(A_ ) self.assertEqual(A_,'e è é ê ë</s>' ) # encode/decode, but with `encode` instead of `__call__` self.assertEqual(tokenizer.decode(tokenizer.encode('e è é ê ë' ) ),'e è é ê ë</s>' ) def snake_case_ ( self: Any ): '''simple docstring''' __UpperCamelCase = self.ta_base_tokenizer __UpperCamelCase = ['A long paragraph for summarization.', 'Another paragraph for summarization.'] # fmt: off __UpperCamelCase = [68, 35, 111, 114, 113, 106, 35, 115, 100, 117, 100, 106, 117, 100, 115, 107, 35, 105, 114, 117, 35, 118, 120, 112, 112, 100, 117, 108, 125, 100, 119, 108, 114, 113, 49, 1, 0] # fmt: on __UpperCamelCase = tokenizer(A_,padding=A_,return_tensors=A_ ) self.assertIsInstance(A_,A_ ) if FRAMEWORK != "jax": __UpperCamelCase = list(batch.input_ids.numpy()[0] ) else: __UpperCamelCase = list(batch.input_ids.tolist()[0] ) self.assertListEqual(A_,A_ ) self.assertEqual((2, 37),batch.input_ids.shape ) self.assertEqual((2, 37),batch.attention_mask.shape ) def snake_case_ ( self: Dict ): '''simple docstring''' __UpperCamelCase = self.ta_base_tokenizer __UpperCamelCase = ['A long paragraph for summarization.', 'Another paragraph for summarization.'] __UpperCamelCase = tokenizer(A_,padding=A_,return_tensors=A_ ) # check if input_ids are returned and no decoder_input_ids self.assertIn('input_ids',A_ ) self.assertIn('attention_mask',A_ ) self.assertNotIn('decoder_input_ids',A_ ) self.assertNotIn('decoder_attention_mask',A_ ) def snake_case_ ( self: Dict ): '''simple docstring''' __UpperCamelCase = self.ta_base_tokenizer __UpperCamelCase = [ 'Summary of the text.', 'Another summary.', ] __UpperCamelCase = tokenizer( text_target=A_,max_length=32,padding='max_length',truncation=A_,return_tensors=A_ ) self.assertEqual(32,targets['input_ids'].shape[1] ) def snake_case_ ( self: Union[str, Any] ): '''simple docstring''' __UpperCamelCase = self.ta_base_tokenizer __UpperCamelCase = ['A long paragraph for summarization. </s>'] __UpperCamelCase = ['Summary of the text. </s>'] # fmt: off __UpperCamelCase = [68, 35, 111, 114, 113, 106, 35, 115, 100, 117, 100, 106, 117, 100, 115, 107, 35, 105, 114, 117, 35, 118, 120, 112, 112, 100, 117, 108, 125, 100, 119, 108, 114, 113, 49, 35, 1] __UpperCamelCase = [86, 120, 112, 112, 100, 117, 124, 35, 114, 105, 35, 119, 107, 104, 35, 119, 104, 123, 119, 49, 35, 1] # fmt: on __UpperCamelCase = tokenizer(A_,text_target=A_ ) self.assertEqual(A_,batch['input_ids'][0] ) self.assertEqual(A_,batch['labels'][0] ) def snake_case_ ( self: int ): '''simple docstring''' __UpperCamelCase = self.get_tokenizers() for tokenizer in tokenizers: with self.subTest(F'''{tokenizer.__class__.__name__}''' ): self.assertNotEqual(tokenizer.model_max_length,42 ) # Now let's start the test __UpperCamelCase = self.get_tokenizers() for tokenizer in tokenizers: with self.subTest(F'''{tokenizer.__class__.__name__}''' ): # Isolate this from the other tests because we save additional tokens/etc __UpperCamelCase = tempfile.mkdtemp() __UpperCamelCase = ' He is very happy, UNwant\u00E9d,running' __UpperCamelCase = tokenizer.encode(A_,add_special_tokens=A_ ) tokenizer.save_pretrained(A_ ) __UpperCamelCase = tokenizer.__class__.from_pretrained(A_ ) __UpperCamelCase = after_tokenizer.encode(A_,add_special_tokens=A_ ) self.assertListEqual(A_,A_ ) shutil.rmtree(A_ ) __UpperCamelCase = self.get_tokenizers(model_max_length=42 ) for tokenizer in tokenizers: with self.subTest(F'''{tokenizer.__class__.__name__}''' ): # Isolate this from the other tests because we save additional tokens/etc __UpperCamelCase = tempfile.mkdtemp() __UpperCamelCase = ' He is very happy, UNwant\u00E9d,running' tokenizer.add_tokens(['bim', 'bambam'] ) __UpperCamelCase = tokenizer.additional_special_tokens additional_special_tokens.append('new_additional_special_token' ) tokenizer.add_special_tokens({'additional_special_tokens': additional_special_tokens} ) __UpperCamelCase = tokenizer.encode(A_,add_special_tokens=A_ ) tokenizer.save_pretrained(A_ ) __UpperCamelCase = tokenizer.__class__.from_pretrained(A_ ) __UpperCamelCase = after_tokenizer.encode(A_,add_special_tokens=A_ ) self.assertListEqual(A_,A_ ) self.assertIn('new_additional_special_token',after_tokenizer.additional_special_tokens ) self.assertEqual(after_tokenizer.model_max_length,42 ) __UpperCamelCase = tokenizer.__class__.from_pretrained(A_,model_max_length=43 ) self.assertEqual(tokenizer.model_max_length,43 ) shutil.rmtree(A_ ) def snake_case_ ( self: Tuple ): '''simple docstring''' __UpperCamelCase = [] if self.test_slow_tokenizer: tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()) ) if self.test_rust_tokenizer: tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()) ) for tokenizer_class, tokenizer_utils in tokenizer_list: with tempfile.TemporaryDirectory() as tmp_dir: tokenizer_utils.save_pretrained(A_ ) with open(os.path.join(A_,'special_tokens_map.json' ),encoding='utf-8' ) as json_file: __UpperCamelCase = json.load(A_ ) with open(os.path.join(A_,'tokenizer_config.json' ),encoding='utf-8' ) as json_file: __UpperCamelCase = json.load(A_ ) __UpperCamelCase = [F'''<extra_id_{i}>''' for i in range(125 )] __UpperCamelCase = added_tokens_extra_ids + [ 'an_additional_special_token' ] __UpperCamelCase = added_tokens_extra_ids + [ 'an_additional_special_token' ] with open(os.path.join(A_,'special_tokens_map.json' ),'w',encoding='utf-8' ) as outfile: json.dump(A_,A_ ) with open(os.path.join(A_,'tokenizer_config.json' ),'w',encoding='utf-8' ) as outfile: json.dump(A_,A_ ) # the following checks allow us to verify that our test works as expected, i.e. that the tokenizer takes # into account the new value of additional_special_tokens given in the "tokenizer_config.json" and # "special_tokens_map.json" files __UpperCamelCase = tokenizer_class.from_pretrained( A_,) self.assertIn( 'an_additional_special_token',tokenizer_without_change_in_init.additional_special_tokens ) # self.assertIn("an_additional_special_token",tokenizer_without_change_in_init.get_vocab()) # ByT5Tokenization no vocab self.assertEqual( ['an_additional_special_token'],tokenizer_without_change_in_init.convert_ids_to_tokens( tokenizer_without_change_in_init.convert_tokens_to_ids(['an_additional_special_token'] ) ),) # Now we test that we can change the value of additional_special_tokens in the from_pretrained __UpperCamelCase = added_tokens_extra_ids + [AddedToken('a_new_additional_special_token',lstrip=A_ )] __UpperCamelCase = tokenizer_class.from_pretrained( A_,additional_special_tokens=A_,) self.assertIn('a_new_additional_special_token',tokenizer.additional_special_tokens ) self.assertEqual( ['a_new_additional_special_token'],tokenizer.convert_ids_to_tokens( tokenizer.convert_tokens_to_ids(['a_new_additional_special_token'] ) ),) def snake_case_ ( self: str ): '''simple docstring''' __UpperCamelCase = [] if self.test_slow_tokenizer: tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()) ) if self.test_rust_tokenizer: tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()) ) for tokenizer_class, tokenizer_utils in tokenizer_list: with tempfile.TemporaryDirectory() as tmp_dir: tokenizer_utils.save_pretrained(A_ ) __UpperCamelCase = tokenizer_class.from_pretrained(A_ ) self.assertTrue(tokenizer.decode([255] ) == '' ) def snake_case_ ( self: Union[str, Any] ): '''simple docstring''' pass def snake_case_ ( self: Dict ): '''simple docstring''' pass def snake_case_ ( self: Any ): '''simple docstring''' pass def snake_case_ ( self: Tuple ): '''simple docstring''' pass def snake_case_ ( self: Optional[Any] ): '''simple docstring''' __UpperCamelCase = self.get_tokenizers(fast=A_,do_lower_case=A_ ) for tokenizer in tokenizers: with self.subTest(F'''{tokenizer.__class__.__name__}''' ): __UpperCamelCase = ['t', 'h', 'i', 's', ' ', 'i', 's', ' ', 'a', ' ', 't', 'e', 'x', 't', '</s>'] __UpperCamelCase = tokenizer.convert_tokens_to_string(A_ ) self.assertIsInstance(A_,A_ ) def snake_case_ ( self: Dict ): '''simple docstring''' __UpperCamelCase = self.get_tokenizers() for tokenizer in tokenizers: with self.subTest(F'''{tokenizer.__class__.__name__}''' ): __UpperCamelCase = [ 'bos_token', 'eos_token', 'unk_token', 'sep_token', 'pad_token', 'cls_token', 'mask_token', ] __UpperCamelCase = 0 __UpperCamelCase = tokenizer.convert_ids_to_tokens( A_,skip_special_tokens=A_ ) for attr in attributes_list: setattr(A_,attr + '_id',A_ ) self.assertEqual(getattr(A_,A_ ),A_ ) self.assertEqual(getattr(A_,attr + '_id' ),A_ ) setattr(A_,attr + '_id',A_ ) self.assertEqual(getattr(A_,A_ ),A_ ) self.assertEqual(getattr(A_,attr + '_id' ),A_ ) setattr(A_,'additional_special_tokens_ids',[] ) self.assertListEqual(getattr(A_,'additional_special_tokens' ),[] ) self.assertListEqual(getattr(A_,'additional_special_tokens_ids' ),[] ) setattr(A_,'additional_special_tokens_ids',[token_id_to_test_setters] ) self.assertListEqual(getattr(A_,'additional_special_tokens' ),[token_to_test_setters] ) self.assertListEqual(getattr(A_,'additional_special_tokens_ids' ),[token_id_to_test_setters] )
1
import sys import tempfile import unittest import unittest.mock as mock from pathlib import Path from huggingface_hub import HfFolder, delete_repo from requests.exceptions import HTTPError from transformers import AutoFeatureExtractor, WavaVecaFeatureExtractor from transformers.testing_utils import TOKEN, USER, get_tests_dir, is_staging_test sys.path.append(str(Path(__file__).parent.parent / '''utils''')) from test_module.custom_feature_extraction import CustomFeatureExtractor # noqa E402 __snake_case = get_tests_dir('''fixtures''') class __lowerCamelCase (unittest.TestCase ): def snake_case_ ( self: int ): '''simple docstring''' __UpperCamelCase = mock.Mock() __UpperCamelCase = 500 __UpperCamelCase = {} __UpperCamelCase = HTTPError __UpperCamelCase = {} # Download this model to make sure it's in the cache. __UpperCamelCase = WavaVecaFeatureExtractor.from_pretrained('hf-internal-testing/tiny-random-wav2vec2' ) # Under the mock environment we get a 500 error when trying to reach the model. with mock.patch('requests.Session.request',return_value=A_ ) as mock_head: __UpperCamelCase = WavaVecaFeatureExtractor.from_pretrained('hf-internal-testing/tiny-random-wav2vec2' ) # This check we did call the fake head request mock_head.assert_called() def snake_case_ ( self: Dict ): '''simple docstring''' __UpperCamelCase = WavaVecaFeatureExtractor.from_pretrained( 'https://huggingface.co/hf-internal-testing/tiny-random-wav2vec2/resolve/main/preprocessor_config.json' ) @is_staging_test class __lowerCamelCase (unittest.TestCase ): @classmethod def snake_case_ ( cls: Tuple ): '''simple docstring''' __UpperCamelCase = TOKEN HfFolder.save_token(A_ ) @classmethod def snake_case_ ( cls: Tuple ): '''simple docstring''' try: delete_repo(token=cls._token,repo_id='test-feature-extractor' ) except HTTPError: pass try: delete_repo(token=cls._token,repo_id='valid_org/test-feature-extractor-org' ) except HTTPError: pass try: delete_repo(token=cls._token,repo_id='test-dynamic-feature-extractor' ) except HTTPError: pass def snake_case_ ( self: Tuple ): '''simple docstring''' __UpperCamelCase = WavaVecaFeatureExtractor.from_pretrained(A_ ) feature_extractor.push_to_hub('test-feature-extractor',use_auth_token=self._token ) __UpperCamelCase = WavaVecaFeatureExtractor.from_pretrained(F'''{USER}/test-feature-extractor''' ) for k, v in feature_extractor.__dict__.items(): self.assertEqual(A_,getattr(A_,A_ ) ) # Reset repo delete_repo(token=self._token,repo_id='test-feature-extractor' ) # Push to hub via save_pretrained with tempfile.TemporaryDirectory() as tmp_dir: feature_extractor.save_pretrained( A_,repo_id='test-feature-extractor',push_to_hub=A_,use_auth_token=self._token ) __UpperCamelCase = WavaVecaFeatureExtractor.from_pretrained(F'''{USER}/test-feature-extractor''' ) for k, v in feature_extractor.__dict__.items(): self.assertEqual(A_,getattr(A_,A_ ) ) def snake_case_ ( self: List[str] ): '''simple docstring''' __UpperCamelCase = WavaVecaFeatureExtractor.from_pretrained(A_ ) feature_extractor.push_to_hub('valid_org/test-feature-extractor',use_auth_token=self._token ) __UpperCamelCase = WavaVecaFeatureExtractor.from_pretrained('valid_org/test-feature-extractor' ) for k, v in feature_extractor.__dict__.items(): self.assertEqual(A_,getattr(A_,A_ ) ) # Reset repo delete_repo(token=self._token,repo_id='valid_org/test-feature-extractor' ) # Push to hub via save_pretrained with tempfile.TemporaryDirectory() as tmp_dir: feature_extractor.save_pretrained( A_,repo_id='valid_org/test-feature-extractor-org',push_to_hub=A_,use_auth_token=self._token ) __UpperCamelCase = WavaVecaFeatureExtractor.from_pretrained('valid_org/test-feature-extractor-org' ) for k, v in feature_extractor.__dict__.items(): self.assertEqual(A_,getattr(A_,A_ ) ) def snake_case_ ( self: int ): '''simple docstring''' CustomFeatureExtractor.register_for_auto_class() __UpperCamelCase = CustomFeatureExtractor.from_pretrained(A_ ) feature_extractor.push_to_hub('test-dynamic-feature-extractor',use_auth_token=self._token ) # This has added the proper auto_map field to the config self.assertDictEqual( feature_extractor.auto_map,{'AutoFeatureExtractor': 'custom_feature_extraction.CustomFeatureExtractor'},) __UpperCamelCase = AutoFeatureExtractor.from_pretrained( F'''{USER}/test-dynamic-feature-extractor''',trust_remote_code=A_ ) # Can't make an isinstance check because the new_feature_extractor is from the CustomFeatureExtractor class of a dynamic module self.assertEqual(new_feature_extractor.__class__.__name__,'CustomFeatureExtractor' )
1
1
import gc import importlib.metadata import tempfile import unittest from packaging import version from transformers import ( AutoModel, AutoModelForCausalLM, AutoModelForSeqaSeqLM, AutoModelForSequenceClassification, AutoTokenizer, BitsAndBytesConfig, pipeline, ) from transformers.testing_utils import ( is_torch_available, require_accelerate, require_bitsandbytes, require_torch, require_torch_gpu, require_torch_multi_gpu, slow, ) def _A ( _lowercase ) -> List[str]: """simple docstring""" if model.config.model_type == "gpt2": return model.transformer.h[0].mlp.c_fc return model.transformer.h[0].mlp.dense_ah_to_h if is_torch_available(): import torch import torch.nn as nn class __lowerCamelCase (nn.Module ): def __init__( self: Union[str, Any],A_: nn.Module,A_: int ): '''simple docstring''' super().__init__() __UpperCamelCase = module __UpperCamelCase = nn.Sequential( nn.Linear(module.in_features,A_,bias=A_ ),nn.Linear(A_,module.out_features,bias=A_ ),) __UpperCamelCase = (2.0 / (5 * min(module.in_features,module.out_features ))) ** 0.5 nn.init.normal_(self.adapter[0].weight,std=A_ ) nn.init.zeros_(self.adapter[1].weight ) self.adapter.to(module.weight.device ) def snake_case_ ( self: Optional[Any],A_: int,*A_: Union[str, Any],**A_: str ): '''simple docstring''' return self.module(A_,*A_,**A_ ) + self.adapter(A_ ) @require_bitsandbytes @require_accelerate @require_torch @require_torch_gpu @slow class __lowerCamelCase (unittest.TestCase ): # We keep the constants inside the init function and model loading inside setUp function # We need to test on relatively large models (aka >1b parameters otherwise the quantiztion may not work as expected) # Therefore here we use only bloom-1b3 to test our module _lowercase = """bigscience/bloom-1b7""" # Constant values _lowercase = 2.109659552692574 _lowercase = """Hello my name is""" _lowercase = set() EXPECTED_OUTPUTS.add("""Hello my name is John and I am a professional photographer. I""" ) EXPECTED_OUTPUTS.add("""Hello my name is John.\nI am a friend of your father.\n""" ) EXPECTED_OUTPUTS.add("""Hello my name is John Doe, I am a student at the University""" ) _lowercase = 10 def snake_case_ ( self: Union[str, Any] ): '''simple docstring''' __UpperCamelCase = AutoTokenizer.from_pretrained(self.model_name ) class __lowerCamelCase (_a ): def snake_case_ ( self: int ): '''simple docstring''' super().setUp() # Models and tokenizer __UpperCamelCase = AutoModelForCausalLM.from_pretrained( self.model_name,torch_dtype=torch.floataa,device_map='auto' ) __UpperCamelCase = AutoModelForCausalLM.from_pretrained(self.model_name,load_in_abit=A_,device_map='auto' ) def snake_case_ ( self: Optional[Any] ): '''simple docstring''' del self.model_fpaa del self.model_abit gc.collect() torch.cuda.empty_cache() def snake_case_ ( self: str ): '''simple docstring''' __UpperCamelCase = self.model_abit.config self.assertTrue(hasattr(A_,'quantization_config' ) ) __UpperCamelCase = config.to_dict() __UpperCamelCase = config.to_diff_dict() __UpperCamelCase = config.to_json_string() def snake_case_ ( self: Optional[int] ): '''simple docstring''' from bitsandbytes.nn import Paramsabit __UpperCamelCase = self.model_fpaa.get_memory_footprint() __UpperCamelCase = self.model_abit.get_memory_footprint() self.assertAlmostEqual(mem_fpaa / mem_abit,self.EXPECTED_RELATIVE_DIFFERENCE ) __UpperCamelCase = get_some_linear_layer(self.model_abit ) self.assertTrue(linear.weight.__class__ == Paramsabit ) def snake_case_ ( self: str ): '''simple docstring''' from transformers import TaPreTrainedModel self.model_fpaa.get_memory_footprint() self.model_abit.get_memory_footprint() for name, module in self.model_abit.named_modules(): if isinstance(A_,torch.nn.Linear ): if name not in ["lm_head"] + TaPreTrainedModel._keep_in_fpaa_modules: # 4-bit parameters are packed in uint8 variables self.assertTrue(module.weight.dtype == torch.uinta ) def snake_case_ ( self: str ): '''simple docstring''' __UpperCamelCase = self.tokenizer(self.input_text,return_tensors='pt' ) __UpperCamelCase = self.model_abit.generate(input_ids=encoded_input['input_ids'].to(0 ),max_new_tokens=10 ) self.assertIn(self.tokenizer.decode(output_sequences[0],skip_special_tokens=A_ ),self.EXPECTED_OUTPUTS ) def snake_case_ ( self: str ): '''simple docstring''' __UpperCamelCase = BitsAndBytesConfig() __UpperCamelCase = True __UpperCamelCase = AutoModelForCausalLM.from_pretrained( self.model_name,quantization_config=A_,device_map='auto' ) __UpperCamelCase = self.tokenizer(self.input_text,return_tensors='pt' ) __UpperCamelCase = model_abit_from_config.generate( input_ids=encoded_input['input_ids'].to(0 ),max_new_tokens=10 ) self.assertIn(self.tokenizer.decode(output_sequences[0],skip_special_tokens=A_ ),self.EXPECTED_OUTPUTS ) def snake_case_ ( self: List[str] ): '''simple docstring''' with self.assertRaises(A_ ), tempfile.TemporaryDirectory() as tmpdirname: self.model_abit.save_pretrained(A_ ) def snake_case_ ( self: Dict ): '''simple docstring''' __UpperCamelCase = BitsAndBytesConfig() with self.assertRaises(A_ ): __UpperCamelCase = AutoModelForCausalLM.from_pretrained( self.model_name,quantization_config=A_,load_in_abit=A_,device_map='auto',bnb_abit_quant_type='nf4',) def snake_case_ ( self: Tuple ): '''simple docstring''' with self.assertRaises(A_ ): # Tries with `str` self.model_abit.to('cpu' ) with self.assertRaises(A_ ): # Tries with a `dtype`` self.model_abit.to(torch.floataa ) with self.assertRaises(A_ ): # Tries with a `device` self.model_abit.to(torch.device('cuda:0' ) ) with self.assertRaises(A_ ): # Tries with a `device` self.model_abit.float() with self.assertRaises(A_ ): # Tries with a `device` self.model_abit.half() # Test if we did not break anything __UpperCamelCase = self.tokenizer(self.input_text,return_tensors='pt' ) __UpperCamelCase = self.model_fpaa.to(torch.floataa ) __UpperCamelCase = self.model_fpaa.generate(input_ids=encoded_input['input_ids'].to(0 ),max_new_tokens=10 ) # Check this does not throw an error __UpperCamelCase = self.model_fpaa.to('cpu' ) # Check this does not throw an error __UpperCamelCase = self.model_fpaa.half() # Check this does not throw an error __UpperCamelCase = self.model_fpaa.float() def snake_case_ ( self: Optional[int] ): '''simple docstring''' __UpperCamelCase = AutoModelForSeqaSeqLM.from_pretrained('t5-small',load_in_abit=A_,device_map='auto' ) self.assertTrue(model.decoder.block[0].layer[2].DenseReluDense.wo.weight.dtype == torch.floataa ) @require_bitsandbytes @require_accelerate @require_torch @require_torch_gpu @slow class __lowerCamelCase (unittest.TestCase ): @classmethod def snake_case_ ( cls: Optional[Any] ): '''simple docstring''' __UpperCamelCase = 't5-small' __UpperCamelCase = 'google/flan-t5-small' # flan-t5 uses dense-act instead of dense-relu-dense __UpperCamelCase = AutoTokenizer.from_pretrained(cls.model_name ) __UpperCamelCase = 'Translate in German: Hello, my dog is cute' def snake_case_ ( self: Dict ): '''simple docstring''' gc.collect() torch.cuda.empty_cache() def snake_case_ ( self: List[str] ): '''simple docstring''' from transformers import TaForConditionalGeneration __UpperCamelCase = TaForConditionalGeneration._keep_in_fpaa_modules __UpperCamelCase = None # test with `t5-small` __UpperCamelCase = TaForConditionalGeneration.from_pretrained(self.model_name,load_in_abit=A_,device_map='auto' ) __UpperCamelCase = self.tokenizer(self.input_text,return_tensors='pt' ).to(0 ) __UpperCamelCase = model.generate(**A_ ) # test with `flan-t5-small` __UpperCamelCase = TaForConditionalGeneration.from_pretrained( self.dense_act_model_name,load_in_abit=A_,device_map='auto' ) __UpperCamelCase = self.tokenizer(self.input_text,return_tensors='pt' ).to(0 ) __UpperCamelCase = model.generate(**A_ ) __UpperCamelCase = modules def snake_case_ ( self: Union[str, Any] ): '''simple docstring''' import bitsandbytes as bnb from transformers import TaForConditionalGeneration # test with `t5-small` __UpperCamelCase = TaForConditionalGeneration.from_pretrained(self.model_name,load_in_abit=A_,device_map='auto' ) # there was a bug with decoders - this test checks that it is fixed self.assertTrue(isinstance(model.decoder.block[0].layer[0].SelfAttention.q,bnb.nn.Linearabit ) ) __UpperCamelCase = self.tokenizer(self.input_text,return_tensors='pt' ).to(0 ) __UpperCamelCase = model.generate(**A_ ) # test with `flan-t5-small` __UpperCamelCase = TaForConditionalGeneration.from_pretrained( self.dense_act_model_name,load_in_abit=A_,device_map='auto' ) __UpperCamelCase = self.tokenizer(self.input_text,return_tensors='pt' ).to(0 ) __UpperCamelCase = model.generate(**A_ ) class __lowerCamelCase (_a ): def snake_case_ ( self: Tuple ): '''simple docstring''' super().setUp() # model_name __UpperCamelCase = 'bigscience/bloom-560m' __UpperCamelCase = 't5-small' # Different types of model __UpperCamelCase = AutoModel.from_pretrained(self.model_name,load_in_abit=A_,device_map='auto' ) # Sequence classification model __UpperCamelCase = AutoModelForSequenceClassification.from_pretrained( self.model_name,load_in_abit=A_,device_map='auto' ) # CausalLM model __UpperCamelCase = AutoModelForCausalLM.from_pretrained(self.model_name,load_in_abit=A_,device_map='auto' ) # Seq2seq model __UpperCamelCase = AutoModelForSeqaSeqLM.from_pretrained( self.seq_to_seq_name,load_in_abit=A_,device_map='auto' ) def snake_case_ ( self: Any ): '''simple docstring''' del self.base_model del self.sequence_model del self.model_abit del self.seq_to_seq_model gc.collect() torch.cuda.empty_cache() def snake_case_ ( self: str ): '''simple docstring''' from bitsandbytes.nn import Paramsabit self.assertTrue(self.base_model.h[-1].mlp.dense_ah_to_h.weight.__class__ == Paramsabit ) # Other heads should be nn.Parameter self.assertTrue(self.model_abit.lm_head.weight.__class__ == torch.nn.Parameter ) self.assertTrue(self.sequence_model.score.weight.__class__ == torch.nn.Parameter ) self.assertTrue(self.seq_to_seq_model.lm_head.weight.__class__ == torch.nn.Parameter ) class __lowerCamelCase (_a ): def snake_case_ ( self: Any ): '''simple docstring''' super().setUp() def snake_case_ ( self: List[str] ): '''simple docstring''' del self.pipe gc.collect() torch.cuda.empty_cache() def snake_case_ ( self: str ): '''simple docstring''' __UpperCamelCase = pipeline( 'text-generation',model=self.model_name,model_kwargs={'device_map': 'auto', 'load_in_4bit': True, 'torch_dtype': torch.floataa},max_new_tokens=self.MAX_NEW_TOKENS,) # Real second forward pass __UpperCamelCase = self.pipe(self.input_text ) self.assertIn(pipeline_output[0]['generated_text'],self.EXPECTED_OUTPUTS ) @require_torch_multi_gpu class __lowerCamelCase (_a ): def snake_case_ ( self: Union[str, Any] ): '''simple docstring''' super().setUp() def snake_case_ ( self: List[str] ): '''simple docstring''' __UpperCamelCase = AutoModelForCausalLM.from_pretrained( self.model_name,load_in_abit=A_,device_map='balanced' ) # Check correct device map self.assertEqual(set(model_parallel.hf_device_map.values() ),{0, 1} ) # Check that inference pass works on the model __UpperCamelCase = self.tokenizer(self.input_text,return_tensors='pt' ) # Second real batch __UpperCamelCase = model_parallel.generate(input_ids=encoded_input['input_ids'].to(0 ),max_new_tokens=10 ) self.assertIn(self.tokenizer.decode(output_parallel[0],skip_special_tokens=A_ ),self.EXPECTED_OUTPUTS ) class __lowerCamelCase (_a ): def snake_case_ ( self: List[str] ): '''simple docstring''' __UpperCamelCase = 'facebook/opt-350m' super().setUp() def snake_case_ ( self: Union[str, Any] ): '''simple docstring''' if version.parse(importlib.metadata.version('bitsandbytes' ) ) < version.parse('0.37.0' ): return # Step 1: freeze all parameters __UpperCamelCase = AutoModelForCausalLM.from_pretrained(self.model_name,load_in_abit=A_ ) self.assertEqual(set(model.hf_device_map.values() ),{torch.cuda.current_device()} ) for param in model.parameters(): __UpperCamelCase = False # freeze the model - train adapters later if param.ndim == 1: # cast the small parameters (e.g. layernorm) to fp32 for stability __UpperCamelCase = param.data.to(torch.floataa ) # Step 2: add adapters for _, module in model.named_modules(): if "OPTAttention" in repr(type(A_ ) ): __UpperCamelCase = LoRALayer(module.q_proj,rank=16 ) __UpperCamelCase = LoRALayer(module.k_proj,rank=16 ) __UpperCamelCase = LoRALayer(module.v_proj,rank=16 ) # Step 3: dummy batch __UpperCamelCase = self.tokenizer('Test batch ',return_tensors='pt' ).to(0 ) # Step 4: Check if the gradient is not None with torch.cuda.amp.autocast(): __UpperCamelCase = model.forward(**A_ ) out.logits.norm().backward() for module in model.modules(): if isinstance(A_,A_ ): self.assertTrue(module.adapter[1].weight.grad is not None ) self.assertTrue(module.adapter[1].weight.grad.norm().item() > 0 ) elif isinstance(A_,nn.Embedding ): self.assertTrue(module.weight.grad is None ) class __lowerCamelCase (_a ): _lowercase = """gpt2-xl""" _lowercase = 3.3191854854152187
1
import argparse import json import os import evaluate import torch from datasets import load_dataset from torch.optim import AdamW from torch.utils.data import DataLoader from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed from accelerate import Accelerator, DistributedType from accelerate.utils.deepspeed import DummyOptim, DummyScheduler __snake_case = 1_6 __snake_case = 3_2 def _A ( _lowercase , _lowercase = 16 , _lowercase = "bert-base-cased" ) -> Union[str, Any]: """simple docstring""" __UpperCamelCase = AutoTokenizer.from_pretrained(_lowercase ) __UpperCamelCase = load_dataset('glue' , 'mrpc' ) def tokenize_function(_lowercase ): # max_length=None => use the model max length (it's actually the default) __UpperCamelCase = tokenizer(examples['sentence1'] , examples['sentence2'] , truncation=_lowercase , max_length=_lowercase ) return outputs # Apply the method we just defined to all the examples in all the splits of the dataset __UpperCamelCase = datasets.map( _lowercase , batched=_lowercase , remove_columns=['idx', 'sentence1', 'sentence2'] , load_from_cache_file=_lowercase ) # We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the # transformers library __UpperCamelCase = tokenized_datasets.rename_column('label' , 'labels' ) def collate_fn(_lowercase ): # On TPU it's best to pad everything to the same length or training will be very slow. if accelerator.distributed_type == DistributedType.TPU: return tokenizer.pad(_lowercase , padding='max_length' , max_length=1_28 , return_tensors='pt' ) return tokenizer.pad(_lowercase , padding='longest' , return_tensors='pt' ) # Instantiate dataloaders. __UpperCamelCase = DataLoader( tokenized_datasets['train'] , shuffle=_lowercase , collate_fn=_lowercase , batch_size=_lowercase ) __UpperCamelCase = DataLoader( tokenized_datasets['validation'] , shuffle=_lowercase , collate_fn=_lowercase , batch_size=_lowercase ) return train_dataloader, eval_dataloader def _A ( _lowercase , _lowercase ) -> int: """simple docstring""" __UpperCamelCase = Accelerator() # Sample hyper-parameters for learning rate, batch size, seed and a few other HPs __UpperCamelCase = config['lr'] __UpperCamelCase = int(config['num_epochs'] ) __UpperCamelCase = int(config['seed'] ) __UpperCamelCase = int(config['batch_size'] ) __UpperCamelCase = args.model_name_or_path set_seed(_lowercase ) __UpperCamelCase, __UpperCamelCase = get_dataloaders(_lowercase , _lowercase , _lowercase ) # Instantiate the model (we build the model here so that the seed also control new weights initialization) __UpperCamelCase = AutoModelForSequenceClassification.from_pretrained(_lowercase , return_dict=_lowercase ) # Instantiate optimizer __UpperCamelCase = ( AdamW if accelerator.state.deepspeed_plugin is None or 'optimizer' not in accelerator.state.deepspeed_plugin.deepspeed_config else DummyOptim ) __UpperCamelCase = optimizer_cls(params=model.parameters() , lr=_lowercase ) if accelerator.state.deepspeed_plugin is not None: __UpperCamelCase = accelerator.state.deepspeed_plugin.deepspeed_config[ 'gradient_accumulation_steps' ] else: __UpperCamelCase = 1 __UpperCamelCase = (len(_lowercase ) * num_epochs) // gradient_accumulation_steps # Instantiate scheduler if ( accelerator.state.deepspeed_plugin is None or "scheduler" not in accelerator.state.deepspeed_plugin.deepspeed_config ): __UpperCamelCase = get_linear_schedule_with_warmup( optimizer=_lowercase , num_warmup_steps=0 , num_training_steps=_lowercase , ) else: __UpperCamelCase = DummyScheduler(_lowercase , total_num_steps=_lowercase , warmup_num_steps=0 ) # Prepare everything # There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the # prepare method. __UpperCamelCase, __UpperCamelCase, __UpperCamelCase, __UpperCamelCase, __UpperCamelCase = accelerator.prepare( _lowercase , _lowercase , _lowercase , _lowercase , _lowercase ) # We need to keep track of how many total steps we have iterated over __UpperCamelCase = 0 # We also need to keep track of the stating epoch so files are named properly __UpperCamelCase = 0 # Now we train the model __UpperCamelCase = evaluate.load('glue' , 'mrpc' ) __UpperCamelCase = 0 __UpperCamelCase = {} for epoch in range(_lowercase , _lowercase ): model.train() for step, batch in enumerate(_lowercase ): __UpperCamelCase = model(**_lowercase ) __UpperCamelCase = outputs.loss __UpperCamelCase = loss / gradient_accumulation_steps accelerator.backward(_lowercase ) if step % gradient_accumulation_steps == 0: optimizer.step() lr_scheduler.step() optimizer.zero_grad() overall_step += 1 model.eval() __UpperCamelCase = 0 for step, batch in enumerate(_lowercase ): # We could avoid this line since we set the accelerator with `device_placement=True`. batch.to(accelerator.device ) with torch.no_grad(): __UpperCamelCase = model(**_lowercase ) __UpperCamelCase = outputs.logits.argmax(dim=-1 ) # It is slightly faster to call this once, than multiple times __UpperCamelCase, __UpperCamelCase = accelerator.gather( (predictions, batch['labels']) ) # If we are in a multiprocess environment, the last batch has duplicates if accelerator.use_distributed: if step == len(_lowercase ) - 1: __UpperCamelCase = predictions[: len(eval_dataloader.dataset ) - samples_seen] __UpperCamelCase = references[: len(eval_dataloader.dataset ) - samples_seen] else: samples_seen += references.shape[0] metric.add_batch( predictions=_lowercase , references=_lowercase , ) __UpperCamelCase = metric.compute() # Use accelerator.print to print only on the main process. accelerator.print(f'''epoch {epoch}:''' , _lowercase ) __UpperCamelCase = eval_metric['accuracy'] if best_performance < eval_metric["accuracy"]: __UpperCamelCase = eval_metric['accuracy'] if args.performance_lower_bound is not None: assert ( args.performance_lower_bound <= best_performance ), f'''Best performance metric {best_performance} is lower than the lower bound {args.performance_lower_bound}''' accelerator.wait_for_everyone() if accelerator.is_main_process: with open(os.path.join(args.output_dir , 'all_results.json' ) , 'w' ) as f: json.dump(_lowercase , _lowercase ) def _A ( ) -> List[str]: """simple docstring""" __UpperCamelCase = argparse.ArgumentParser(description='Simple example of training script tracking peak GPU memory usage.' ) parser.add_argument( '--model_name_or_path' , type=_lowercase , default='bert-base-cased' , help='Path to pretrained model or model identifier from huggingface.co/models.' , required=_lowercase , ) parser.add_argument( '--output_dir' , type=_lowercase , default='.' , help='Optional save directory where all checkpoint folders will be stored. Default is the current working directory.' , ) parser.add_argument( '--performance_lower_bound' , type=_lowercase , default=_lowercase , help='Optional lower bound for the performance metric. If set, the training will throw error when the performance metric drops below this value.' , ) parser.add_argument( '--num_epochs' , type=_lowercase , default=3 , help='Number of train epochs.' , ) __UpperCamelCase = parser.parse_args() __UpperCamelCase = {'lr': 2e-5, 'num_epochs': args.num_epochs, 'seed': 42, 'batch_size': 16} training_function(_lowercase , _lowercase ) if __name__ == "__main__": main()
1
1
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available __snake_case = { '''configuration_groupvit''': [ '''GROUPVIT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''GroupViTConfig''', '''GroupViTOnnxConfig''', '''GroupViTTextConfig''', '''GroupViTVisionConfig''', ], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __snake_case = [ '''GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST''', '''GroupViTModel''', '''GroupViTPreTrainedModel''', '''GroupViTTextModel''', '''GroupViTVisionModel''', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __snake_case = [ '''TF_GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST''', '''TFGroupViTModel''', '''TFGroupViTPreTrainedModel''', '''TFGroupViTTextModel''', '''TFGroupViTVisionModel''', ] if TYPE_CHECKING: from .configuration_groupvit import ( GROUPVIT_PRETRAINED_CONFIG_ARCHIVE_MAP, GroupViTConfig, GroupViTOnnxConfig, GroupViTTextConfig, GroupViTVisionConfig, ) try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_groupvit import ( GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST, GroupViTModel, GroupViTPreTrainedModel, GroupViTTextModel, GroupViTVisionModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_groupvit import ( TF_GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST, TFGroupViTModel, TFGroupViTPreTrainedModel, TFGroupViTTextModel, TFGroupViTVisionModel, ) else: import sys __snake_case = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
1
from transformers import BertTokenizer, EncoderDecoderModel, SeqaSeqTrainer, SeqaSeqTrainingArguments from transformers.testing_utils import TestCasePlus, require_torch, slow from transformers.utils import is_datasets_available if is_datasets_available(): import datasets class __lowerCamelCase (_a ): @slow @require_torch def snake_case_ ( self: Union[str, Any] ): '''simple docstring''' __UpperCamelCase = EncoderDecoderModel.from_encoder_decoder_pretrained('prajjwal1/bert-tiny','prajjwal1/bert-tiny' ) __UpperCamelCase = BertTokenizer.from_pretrained('bert-base-uncased' ) __UpperCamelCase = bertabert.config.encoder.vocab_size __UpperCamelCase = tokenizer.sep_token_id __UpperCamelCase = tokenizer.cls_token_id __UpperCamelCase = 128 __UpperCamelCase = datasets.load_dataset('cnn_dailymail','3.0.0',split='train[:1%]' ) __UpperCamelCase = datasets.load_dataset('cnn_dailymail','3.0.0',split='validation[:1%]' ) __UpperCamelCase = train_dataset.select(range(32 ) ) __UpperCamelCase = val_dataset.select(range(16 ) ) __UpperCamelCase = 4 def _map_to_encoder_decoder_inputs(A_: Dict ): # Tokenizer will automatically set [BOS] <text> [EOS] __UpperCamelCase = tokenizer(batch['article'],padding='max_length',truncation=A_,max_length=512 ) __UpperCamelCase = tokenizer(batch['highlights'],padding='max_length',truncation=A_,max_length=128 ) __UpperCamelCase = inputs.input_ids __UpperCamelCase = inputs.attention_mask __UpperCamelCase = outputs.input_ids __UpperCamelCase = outputs.input_ids.copy() __UpperCamelCase = [ [-100 if token == tokenizer.pad_token_id else token for token in labels] for labels in batch['labels'] ] __UpperCamelCase = outputs.attention_mask assert all(len(A_ ) == 512 for x in inputs.input_ids ) assert all(len(A_ ) == 128 for x in outputs.input_ids ) return batch def _compute_metrics(A_: str ): __UpperCamelCase = pred.label_ids __UpperCamelCase = pred.predictions # all unnecessary tokens are removed __UpperCamelCase = tokenizer.batch_decode(A_,skip_special_tokens=A_ ) __UpperCamelCase = tokenizer.batch_decode(A_,skip_special_tokens=A_ ) __UpperCamelCase = sum([int(pred_str[i] == label_str[i] ) for i in range(len(A_ ) )] ) / len(A_ ) return {"accuracy": accuracy} # map train dataset __UpperCamelCase = train_dataset.map( _map_to_encoder_decoder_inputs,batched=A_,batch_size=A_,remove_columns=['article', 'highlights'],) train_dataset.set_format( type='torch',columns=['input_ids', 'attention_mask', 'decoder_input_ids', 'decoder_attention_mask', 'labels'],) # same for validation dataset __UpperCamelCase = val_dataset.map( _map_to_encoder_decoder_inputs,batched=A_,batch_size=A_,remove_columns=['article', 'highlights'],) val_dataset.set_format( type='torch',columns=['input_ids', 'attention_mask', 'decoder_input_ids', 'decoder_attention_mask', 'labels'],) __UpperCamelCase = self.get_auto_remove_tmp_dir() __UpperCamelCase = SeqaSeqTrainingArguments( output_dir=A_,per_device_train_batch_size=A_,per_device_eval_batch_size=A_,predict_with_generate=A_,evaluation_strategy='steps',do_train=A_,do_eval=A_,warmup_steps=0,eval_steps=2,logging_steps=2,) # instantiate trainer __UpperCamelCase = SeqaSeqTrainer( model=A_,args=A_,compute_metrics=_compute_metrics,train_dataset=A_,eval_dataset=A_,tokenizer=A_,) # start training trainer.train()
1
1
from dataclasses import dataclass from typing import Optional, Tuple import torch from torch import nn from transformers import RobertaPreTrainedModel, XLMRobertaConfig, XLMRobertaModel from transformers.utils import ModelOutput @dataclass class __lowerCamelCase (_a ): _lowercase = None _lowercase = None _lowercase = None _lowercase = None class __lowerCamelCase (_a ): def __init__( self: Optional[Any],A_: str=1,A_: int=0,A_: Tuple=2,A_: Optional[int]=512,A_: Tuple="cls",A_: Any=False,A_: int=True,**A_: Optional[Any],): '''simple docstring''' super().__init__(pad_token_id=A_,bos_token_id=A_,eos_token_id=A_,**A_ ) __UpperCamelCase = project_dim __UpperCamelCase = pooler_fn __UpperCamelCase = learn_encoder __UpperCamelCase = use_attention_mask class __lowerCamelCase (_a ): _lowercase = [R"""pooler""", R"""logit_scale"""] _lowercase = [R"""position_ids""", R"""predictions.decoder.bias"""] _lowercase = """roberta""" _lowercase = RobertaSeriesConfig def __init__( self: int,A_: List[str] ): '''simple docstring''' super().__init__(A_ ) __UpperCamelCase = XLMRobertaModel(A_ ) __UpperCamelCase = nn.Linear(config.hidden_size,config.project_dim ) __UpperCamelCase = getattr(A_,'has_pre_transformation',A_ ) if self.has_pre_transformation: __UpperCamelCase = nn.Linear(config.hidden_size,config.project_dim ) __UpperCamelCase = nn.LayerNorm(config.hidden_size,eps=config.layer_norm_eps ) self.post_init() def snake_case_ ( self: List[Any],A_: Optional[torch.Tensor] = None,A_: Optional[torch.Tensor] = None,A_: Optional[torch.Tensor] = None,A_: Optional[torch.Tensor] = None,A_: Optional[torch.Tensor] = None,A_: Optional[torch.Tensor] = None,A_: Optional[torch.Tensor] = None,A_: Optional[torch.Tensor] = None,A_: Optional[bool] = None,A_: Optional[bool] = None,A_: Optional[bool] = None,): '''simple docstring''' __UpperCamelCase = return_dict if return_dict is not None else self.config.use_return_dict __UpperCamelCase = self.base_model( input_ids=A_,attention_mask=A_,token_type_ids=A_,position_ids=A_,head_mask=A_,inputs_embeds=A_,encoder_hidden_states=A_,encoder_attention_mask=A_,output_attentions=A_,output_hidden_states=True if self.has_pre_transformation else output_hidden_states,return_dict=A_,) if self.has_pre_transformation: __UpperCamelCase = outputs['hidden_states'][-2] __UpperCamelCase = self.pre_LN(A_ ) __UpperCamelCase = self.transformation_pre(A_ ) return TransformationModelOutput( projection_state=A_,last_hidden_state=outputs.last_hidden_state,hidden_states=outputs.hidden_states,attentions=outputs.attentions,) else: __UpperCamelCase = self.transformation(outputs.last_hidden_state ) return TransformationModelOutput( projection_state=A_,last_hidden_state=outputs.last_hidden_state,hidden_states=outputs.hidden_states,attentions=outputs.attentions,)
1
def _A ( _lowercase = 1_00 ) -> int: """simple docstring""" __UpperCamelCase = 0 __UpperCamelCase = 0 for i in range(1 , n + 1 ): sum_of_squares += i**2 sum_of_ints += i return sum_of_ints**2 - sum_of_squares if __name__ == "__main__": print(f"""{solution() = }""")
1
1
import pytest from datasets import inspect_metric, list_metrics, load_metric @pytest.fixture def _A ( _lowercase ) -> Tuple: """simple docstring""" monkeypatch.setattr('datasets.utils.deprecation_utils._emitted_deprecation_warnings' , set() ) @pytest.fixture def _A ( _lowercase ) -> int: """simple docstring""" class __lowerCamelCase : def __init__( self: Optional[Any],A_: Tuple ): '''simple docstring''' __UpperCamelCase = metric_id class __lowerCamelCase : _lowercase = [MetricMock(_a ) for metric_id in ["""accuracy""", """mse""", """precision""", """codeparrot/apps_metric"""]] def snake_case_ ( self: Any ): '''simple docstring''' return self._metrics monkeypatch.setattr('datasets.inspect.huggingface_hub' , HfhMock() ) @pytest.mark.parametrize( 'func, args' , [(load_metric, ('metrics/mse',)), (list_metrics, ()), (inspect_metric, ('metrics/mse', 'tmp_path'))] ) def _A ( _lowercase , _lowercase , _lowercase , _lowercase , _lowercase ) -> str: """simple docstring""" if "tmp_path" in args: __UpperCamelCase = tuple(arg if arg != 'tmp_path' else tmp_path for arg in args ) with pytest.warns(_lowercase , match='https://huggingface.co/docs/evaluate' ): func(*_lowercase )
1
def _A ( _lowercase , _lowercase ) -> int: """simple docstring""" return (pointa[0] - pointa[0]) ** 2 + (pointa[1] - pointa[1]) ** 2 def _A ( _lowercase , _lowercase=0 ) -> Dict: """simple docstring""" return sorted(_lowercase , key=lambda _lowercase : x[column] ) def _A ( _lowercase , _lowercase , _lowercase=float('inf' ) ) -> List[Any]: """simple docstring""" for i in range(points_counts - 1 ): for j in range(i + 1 , _lowercase ): __UpperCamelCase = euclidean_distance_sqr(points[i] , points[j] ) if current_dis < min_dis: __UpperCamelCase = current_dis return min_dis def _A ( _lowercase , _lowercase , _lowercase=float('inf' ) ) -> Tuple: """simple docstring""" for i in range(min(6 , points_counts - 1 ) , _lowercase ): for j in range(max(0 , i - 6 ) , _lowercase ): __UpperCamelCase = euclidean_distance_sqr(points[i] , points[j] ) if current_dis < min_dis: __UpperCamelCase = current_dis return min_dis def _A ( _lowercase , _lowercase , _lowercase ) -> Optional[Any]: """simple docstring""" if points_counts <= 3: return dis_between_closest_pair(_lowercase , _lowercase ) # recursion __UpperCamelCase = points_counts // 2 __UpperCamelCase = closest_pair_of_points_sqr( _lowercase , points_sorted_on_y[:mid] , _lowercase ) __UpperCamelCase = closest_pair_of_points_sqr( _lowercase , points_sorted_on_y[mid:] , points_counts - mid ) __UpperCamelCase = min(_lowercase , _lowercase ) __UpperCamelCase = [] for point in points_sorted_on_x: if abs(point[0] - points_sorted_on_x[mid][0] ) < closest_pair_dis: cross_strip.append(_lowercase ) __UpperCamelCase = dis_between_closest_in_strip( _lowercase , len(_lowercase ) , _lowercase ) return min(_lowercase , _lowercase ) def _A ( _lowercase , _lowercase ) -> Optional[int]: """simple docstring""" __UpperCamelCase = column_based_sort(_lowercase , column=0 ) __UpperCamelCase = column_based_sort(_lowercase , column=1 ) return ( closest_pair_of_points_sqr( _lowercase , _lowercase , _lowercase ) ) ** 0.5 if __name__ == "__main__": __snake_case = [(2, 3), (1_2, 3_0), (4_0, 5_0), (5, 1), (1_2, 1_0), (3, 4)] print('''Distance:''', closest_pair_of_points(points, len(points)))
1
1
from __future__ import annotations from collections import namedtuple def _A ( _lowercase , _lowercase , _lowercase ) -> tuple: """simple docstring""" __UpperCamelCase = namedtuple('result' , 'name value' ) if (voltage, current, power).count(0 ) != 1: raise ValueError('Only one argument must be 0' ) elif power < 0: raise ValueError( 'Power cannot be negative in any electrical/electronics system' ) elif voltage == 0: return result('voltage' , power / current ) elif current == 0: return result('current' , power / voltage ) elif power == 0: return result('power' , float(round(abs(voltage * current ) , 2 ) ) ) else: raise ValueError('Exactly one argument must be 0' ) if __name__ == "__main__": import doctest doctest.testmod()
1
from collections import OrderedDict from typing import Mapping from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging __snake_case = logging.get_logger(__name__) __snake_case = { '''bert-base-uncased''': '''https://huggingface.co/bert-base-uncased/resolve/main/config.json''', '''bert-large-uncased''': '''https://huggingface.co/bert-large-uncased/resolve/main/config.json''', '''bert-base-cased''': '''https://huggingface.co/bert-base-cased/resolve/main/config.json''', '''bert-large-cased''': '''https://huggingface.co/bert-large-cased/resolve/main/config.json''', '''bert-base-multilingual-uncased''': '''https://huggingface.co/bert-base-multilingual-uncased/resolve/main/config.json''', '''bert-base-multilingual-cased''': '''https://huggingface.co/bert-base-multilingual-cased/resolve/main/config.json''', '''bert-base-chinese''': '''https://huggingface.co/bert-base-chinese/resolve/main/config.json''', '''bert-base-german-cased''': '''https://huggingface.co/bert-base-german-cased/resolve/main/config.json''', '''bert-large-uncased-whole-word-masking''': ( '''https://huggingface.co/bert-large-uncased-whole-word-masking/resolve/main/config.json''' ), '''bert-large-cased-whole-word-masking''': ( '''https://huggingface.co/bert-large-cased-whole-word-masking/resolve/main/config.json''' ), '''bert-large-uncased-whole-word-masking-finetuned-squad''': ( '''https://huggingface.co/bert-large-uncased-whole-word-masking-finetuned-squad/resolve/main/config.json''' ), '''bert-large-cased-whole-word-masking-finetuned-squad''': ( '''https://huggingface.co/bert-large-cased-whole-word-masking-finetuned-squad/resolve/main/config.json''' ), '''bert-base-cased-finetuned-mrpc''': '''https://huggingface.co/bert-base-cased-finetuned-mrpc/resolve/main/config.json''', '''bert-base-german-dbmdz-cased''': '''https://huggingface.co/bert-base-german-dbmdz-cased/resolve/main/config.json''', '''bert-base-german-dbmdz-uncased''': '''https://huggingface.co/bert-base-german-dbmdz-uncased/resolve/main/config.json''', '''cl-tohoku/bert-base-japanese''': '''https://huggingface.co/cl-tohoku/bert-base-japanese/resolve/main/config.json''', '''cl-tohoku/bert-base-japanese-whole-word-masking''': ( '''https://huggingface.co/cl-tohoku/bert-base-japanese-whole-word-masking/resolve/main/config.json''' ), '''cl-tohoku/bert-base-japanese-char''': ( '''https://huggingface.co/cl-tohoku/bert-base-japanese-char/resolve/main/config.json''' ), '''cl-tohoku/bert-base-japanese-char-whole-word-masking''': ( '''https://huggingface.co/cl-tohoku/bert-base-japanese-char-whole-word-masking/resolve/main/config.json''' ), '''TurkuNLP/bert-base-finnish-cased-v1''': ( '''https://huggingface.co/TurkuNLP/bert-base-finnish-cased-v1/resolve/main/config.json''' ), '''TurkuNLP/bert-base-finnish-uncased-v1''': ( '''https://huggingface.co/TurkuNLP/bert-base-finnish-uncased-v1/resolve/main/config.json''' ), '''wietsedv/bert-base-dutch-cased''': '''https://huggingface.co/wietsedv/bert-base-dutch-cased/resolve/main/config.json''', # See all BERT models at https://huggingface.co/models?filter=bert } class __lowerCamelCase (_a ): _lowercase = """bert""" def __init__( self: Any,A_: Dict=3_0522,A_: Optional[Any]=768,A_: Union[str, Any]=12,A_: List[Any]=12,A_: Optional[int]=3072,A_: Union[str, Any]="gelu",A_: List[str]=0.1,A_: Dict=0.1,A_: Optional[int]=512,A_: Optional[Any]=2,A_: Union[str, Any]=0.0_2,A_: List[Any]=1E-12,A_: Optional[int]=0,A_: List[Any]="absolute",A_: str=True,A_: Union[str, Any]=None,**A_: int,): '''simple docstring''' super().__init__(pad_token_id=A_,**A_ ) __UpperCamelCase = vocab_size __UpperCamelCase = hidden_size __UpperCamelCase = num_hidden_layers __UpperCamelCase = num_attention_heads __UpperCamelCase = hidden_act __UpperCamelCase = intermediate_size __UpperCamelCase = hidden_dropout_prob __UpperCamelCase = attention_probs_dropout_prob __UpperCamelCase = max_position_embeddings __UpperCamelCase = type_vocab_size __UpperCamelCase = initializer_range __UpperCamelCase = layer_norm_eps __UpperCamelCase = position_embedding_type __UpperCamelCase = use_cache __UpperCamelCase = classifier_dropout class __lowerCamelCase (_a ): @property def snake_case_ ( self: Optional[int] ): '''simple docstring''' if self.task == "multiple-choice": __UpperCamelCase = {0: 'batch', 1: 'choice', 2: 'sequence'} else: __UpperCamelCase = {0: 'batch', 1: 'sequence'} return OrderedDict( [ ('input_ids', dynamic_axis), ('attention_mask', dynamic_axis), ('token_type_ids', dynamic_axis), ] )
1
1
from typing import Dict, List, Optional, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import ( center_crop, get_resize_output_image_size, normalize, rescale, resize, to_channel_dimension_format, ) from ...image_utils import ( IMAGENET_STANDARD_MEAN, IMAGENET_STANDARD_STD, ChannelDimension, ImageInput, PILImageResampling, make_list_of_images, to_numpy_array, valid_images, ) from ...utils import TensorType, logging __snake_case = logging.get_logger(__name__) class __lowerCamelCase (_a ): _lowercase = ["""pixel_values"""] def __init__( self: Tuple,A_: bool = True,A_: Optional[Dict[str, int]] = None,A_: PILImageResampling = PILImageResampling.BILINEAR,A_: bool = True,A_: Dict[str, int] = None,A_: bool = True,A_: Union[int, float] = 1 / 255,A_: bool = True,A_: Optional[Union[float, List[float]]] = None,A_: Optional[Union[float, List[float]]] = None,**A_: List[str],): '''simple docstring''' super().__init__(**A_ ) __UpperCamelCase = size if size is not None else {'shortest_edge': 256} __UpperCamelCase = get_size_dict(A_,default_to_square=A_ ) __UpperCamelCase = crop_size if crop_size is not None else {'height': 224, 'width': 224} __UpperCamelCase = get_size_dict(A_ ) __UpperCamelCase = do_resize __UpperCamelCase = size __UpperCamelCase = resample __UpperCamelCase = do_center_crop __UpperCamelCase = crop_size __UpperCamelCase = do_rescale __UpperCamelCase = rescale_factor __UpperCamelCase = do_normalize __UpperCamelCase = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN __UpperCamelCase = image_std if image_std is not None else IMAGENET_STANDARD_STD def snake_case_ ( self: Any,A_: np.ndarray,A_: Dict[str, int],A_: PILImageResampling = PILImageResampling.BICUBIC,A_: Optional[Union[str, ChannelDimension]] = None,**A_: Union[str, Any],): '''simple docstring''' __UpperCamelCase = get_size_dict(A_,default_to_square=A_ ) if "shortest_edge" not in size: raise ValueError(F'''The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}''' ) __UpperCamelCase = get_resize_output_image_size(A_,size=size['shortest_edge'],default_to_square=A_ ) return resize(A_,size=A_,resample=A_,data_format=A_,**A_ ) def snake_case_ ( self: int,A_: np.ndarray,A_: Dict[str, int],A_: Optional[Union[str, ChannelDimension]] = None,**A_: Optional[int],): '''simple docstring''' __UpperCamelCase = get_size_dict(A_ ) return center_crop(A_,size=(size['height'], size['width']),data_format=A_,**A_ ) def snake_case_ ( self: Any,A_: np.ndarray,A_: float,A_: Optional[Union[str, ChannelDimension]] = None,**A_: Optional[int] ): '''simple docstring''' return rescale(A_,scale=A_,data_format=A_,**A_ ) def snake_case_ ( self: Tuple,A_: np.ndarray,A_: Union[float, List[float]],A_: Union[float, List[float]],A_: Optional[Union[str, ChannelDimension]] = None,**A_: Optional[int],): '''simple docstring''' return normalize(A_,mean=A_,std=A_,data_format=A_,**A_ ) def snake_case_ ( self: Any,A_: ImageInput,A_: Optional[bool] = None,A_: Dict[str, int] = None,A_: PILImageResampling = None,A_: bool = None,A_: Dict[str, int] = None,A_: Optional[bool] = None,A_: Optional[float] = None,A_: Optional[bool] = None,A_: Optional[Union[float, List[float]]] = None,A_: Optional[Union[float, List[float]]] = None,A_: Optional[Union[str, TensorType]] = None,A_: Union[str, ChannelDimension] = ChannelDimension.FIRST,**A_: Union[str, Any],): '''simple docstring''' __UpperCamelCase = do_resize if do_resize is not None else self.do_resize __UpperCamelCase = size if size is not None else self.size __UpperCamelCase = get_size_dict(A_,default_to_square=A_ ) __UpperCamelCase = resample if resample is not None else self.resample __UpperCamelCase = do_center_crop if do_center_crop is not None else self.do_center_crop __UpperCamelCase = crop_size if crop_size is not None else self.crop_size __UpperCamelCase = get_size_dict(A_ ) __UpperCamelCase = do_rescale if do_rescale is not None else self.do_rescale __UpperCamelCase = rescale_factor if rescale_factor is not None else self.rescale_factor __UpperCamelCase = do_normalize if do_normalize is not None else self.do_normalize __UpperCamelCase = image_mean if image_mean is not None else self.image_mean __UpperCamelCase = image_std if image_std is not None else self.image_std __UpperCamelCase = make_list_of_images(A_ ) if not valid_images(A_ ): raise ValueError( 'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, ' 'torch.Tensor, tf.Tensor or jax.ndarray.' ) if do_resize and size is None: raise ValueError('Size must be specified if do_resize is True.' ) if do_center_crop and crop_size is None: raise ValueError('Crop size must be specified if do_center_crop is True.' ) if do_rescale and rescale_factor is None: raise ValueError('Rescale factor must be specified if do_rescale is True.' ) if do_normalize and (image_mean is None or image_std is None): raise ValueError('Image mean and std must be specified if do_normalize is True.' ) # All transformations expect numpy arrays. __UpperCamelCase = [to_numpy_array(A_ ) for image in images] if do_resize: __UpperCamelCase = [self.resize(image=A_,size=A_,resample=A_ ) for image in images] if do_center_crop: __UpperCamelCase = [self.center_crop(image=A_,size=A_ ) for image in images] if do_rescale: __UpperCamelCase = [self.rescale(image=A_,scale=A_ ) for image in images] if do_normalize: __UpperCamelCase = [self.normalize(image=A_,mean=A_,std=A_ ) for image in images] __UpperCamelCase = [to_channel_dimension_format(A_,A_ ) for image in images] __UpperCamelCase = {'pixel_values': images} return BatchFeature(data=A_,tensor_type=A_ )
1
def _A ( _lowercase ) -> int: """simple docstring""" assert column_title.isupper() __UpperCamelCase = 0 __UpperCamelCase = len(_lowercase ) - 1 __UpperCamelCase = 0 while index >= 0: __UpperCamelCase = (ord(column_title[index] ) - 64) * pow(26 , _lowercase ) answer += value power += 1 index -= 1 return answer if __name__ == "__main__": from doctest import testmod testmod()
1
1
from __future__ import annotations def _A ( _lowercase ) -> float: """simple docstring""" if not nums: raise ValueError('List is empty' ) return sum(_lowercase ) / len(_lowercase ) if __name__ == "__main__": import doctest doctest.testmod()
1
import argparse import requests import torch # pip3 install salesforce-lavis # I'm actually installing a slightly modified version: pip3 install git+https://github.com/nielsrogge/LAVIS.git@fix_lavis from lavis.models import load_model_and_preprocess from PIL import Image from transformers import ( AutoTokenizer, BlipaConfig, BlipaForConditionalGeneration, BlipaProcessor, BlipaVisionConfig, BlipImageProcessor, OPTConfig, TaConfig, ) from transformers.utils.constants import OPENAI_CLIP_MEAN, OPENAI_CLIP_STD def _A ( ) -> int: """simple docstring""" __UpperCamelCase = 'https://storage.googleapis.com/sfr-vision-language-research/LAVIS/assets/merlion.png' __UpperCamelCase = Image.open(requests.get(_lowercase , stream=_lowercase ).raw ).convert('RGB' ) return image def _A ( _lowercase ) -> int: """simple docstring""" __UpperCamelCase = [] # fmt: off # vision encoder rename_keys.append(('visual_encoder.cls_token', 'vision_model.embeddings.class_embedding') ) rename_keys.append(('visual_encoder.pos_embed', 'vision_model.embeddings.position_embedding') ) rename_keys.append(('visual_encoder.patch_embed.proj.weight', 'vision_model.embeddings.patch_embedding.weight') ) rename_keys.append(('visual_encoder.patch_embed.proj.bias', 'vision_model.embeddings.patch_embedding.bias') ) rename_keys.append(('ln_vision.weight', 'vision_model.post_layernorm.weight') ) rename_keys.append(('ln_vision.bias', 'vision_model.post_layernorm.bias') ) for i in range(config.vision_config.num_hidden_layers ): rename_keys.append((f'''visual_encoder.blocks.{i}.norm1.weight''', f'''vision_model.encoder.layers.{i}.layer_norm1.weight''') ) rename_keys.append((f'''visual_encoder.blocks.{i}.norm1.bias''', f'''vision_model.encoder.layers.{i}.layer_norm1.bias''') ) rename_keys.append((f'''visual_encoder.blocks.{i}.norm2.weight''', f'''vision_model.encoder.layers.{i}.layer_norm2.weight''') ) rename_keys.append((f'''visual_encoder.blocks.{i}.norm2.bias''', f'''vision_model.encoder.layers.{i}.layer_norm2.bias''') ) rename_keys.append((f'''visual_encoder.blocks.{i}.attn.qkv.weight''', f'''vision_model.encoder.layers.{i}.self_attn.qkv.weight''') ) rename_keys.append((f'''visual_encoder.blocks.{i}.attn.proj.weight''', f'''vision_model.encoder.layers.{i}.self_attn.projection.weight''',) ) rename_keys.append((f'''visual_encoder.blocks.{i}.attn.proj.bias''', f'''vision_model.encoder.layers.{i}.self_attn.projection.bias''') ) rename_keys.append((f'''visual_encoder.blocks.{i}.mlp.fc1.weight''', f'''vision_model.encoder.layers.{i}.mlp.fc1.weight''') ) rename_keys.append((f'''visual_encoder.blocks.{i}.mlp.fc1.bias''', f'''vision_model.encoder.layers.{i}.mlp.fc1.bias''') ) rename_keys.append((f'''visual_encoder.blocks.{i}.mlp.fc2.weight''', f'''vision_model.encoder.layers.{i}.mlp.fc2.weight''') ) rename_keys.append((f'''visual_encoder.blocks.{i}.mlp.fc2.bias''', f'''vision_model.encoder.layers.{i}.mlp.fc2.bias''') ) # QFormer rename_keys.append(('Qformer.bert.embeddings.LayerNorm.weight', 'qformer.layernorm.weight') ) rename_keys.append(('Qformer.bert.embeddings.LayerNorm.bias', 'qformer.layernorm.bias') ) # fmt: on return rename_keys def _A ( _lowercase , _lowercase , _lowercase ) -> Optional[int]: """simple docstring""" __UpperCamelCase = dct.pop(_lowercase ) __UpperCamelCase = val def _A ( _lowercase , _lowercase ) -> int: """simple docstring""" for i in range(config.vision_config.num_hidden_layers ): # read in original q and v biases __UpperCamelCase = state_dict.pop(f'''visual_encoder.blocks.{i}.attn.q_bias''' ) __UpperCamelCase = state_dict.pop(f'''visual_encoder.blocks.{i}.attn.v_bias''' ) # next, set bias in the state dict __UpperCamelCase = torch.cat((q_bias, torch.zeros_like(_lowercase , requires_grad=_lowercase ), v_bias) ) __UpperCamelCase = qkv_bias def _A ( _lowercase , _lowercase ) -> Any: """simple docstring""" __UpperCamelCase = 3_64 if 'coco' in model_name else 2_24 __UpperCamelCase = BlipaVisionConfig(image_size=_lowercase ).to_dict() # make sure the models have proper bos_token_id and eos_token_id set (important for generation) # seems like flan-T5 models don't have bos_token_id properly set? if "opt-2.7b" in model_name: __UpperCamelCase = OPTConfig.from_pretrained('facebook/opt-2.7b' , eos_token_id=_lowercase ).to_dict() elif "opt-6.7b" in model_name: __UpperCamelCase = OPTConfig.from_pretrained('facebook/opt-6.7b' , eos_token_id=_lowercase ).to_dict() elif "t5-xl" in model_name: __UpperCamelCase = TaConfig.from_pretrained('google/flan-t5-xl' , dense_act_fn='gelu' , bos_token_id=1 ).to_dict() elif "t5-xxl" in model_name: __UpperCamelCase = TaConfig.from_pretrained('google/flan-t5-xxl' , dense_act_fn='gelu' , bos_token_id=1 ).to_dict() __UpperCamelCase = BlipaConfig(vision_config=_lowercase , text_config=_lowercase ) return config, image_size @torch.no_grad() def _A ( _lowercase , _lowercase=None , _lowercase=False ) -> Union[str, Any]: """simple docstring""" __UpperCamelCase = ( AutoTokenizer.from_pretrained('facebook/opt-2.7b' ) if 'opt' in model_name else AutoTokenizer.from_pretrained('google/flan-t5-xl' ) ) __UpperCamelCase = tokenizer('\n' , add_special_tokens=_lowercase ).input_ids[0] __UpperCamelCase, __UpperCamelCase = get_blipa_config(_lowercase , eos_token_id=_lowercase ) __UpperCamelCase = BlipaForConditionalGeneration(_lowercase ).eval() __UpperCamelCase = { 'blip2-opt-2.7b': ('blip2_opt', 'pretrain_opt2.7b'), 'blip2-opt-6.7b': ('blip2_opt', 'pretrain_opt6.7b'), 'blip2-opt-2.7b-coco': ('blip2_opt', 'caption_coco_opt2.7b'), 'blip2-opt-6.7b-coco': ('blip2_opt', 'caption_coco_opt6.7b'), 'blip2-flan-t5-xl': ('blip2_t5', 'pretrain_flant5xl'), 'blip2-flan-t5-xl-coco': ('blip2_t5', 'caption_coco_flant5xl'), 'blip2-flan-t5-xxl': ('blip2_t5', 'pretrain_flant5xxl'), } __UpperCamelCase, __UpperCamelCase = model_name_to_original[model_name] # load original model print('Loading original model...' ) __UpperCamelCase = 'cuda' if torch.cuda.is_available() else 'cpu' __UpperCamelCase, __UpperCamelCase, __UpperCamelCase = load_model_and_preprocess( name=_lowercase , model_type=_lowercase , is_eval=_lowercase , device=_lowercase ) original_model.eval() print('Done!' ) # update state dict keys __UpperCamelCase = original_model.state_dict() __UpperCamelCase = create_rename_keys(_lowercase ) for src, dest in rename_keys: rename_key(_lowercase , _lowercase , _lowercase ) # some keys can be renamed efficiently for key, val in state_dict.copy().items(): __UpperCamelCase = state_dict.pop(_lowercase ) if key.startswith('Qformer.bert' ): __UpperCamelCase = key.replace('Qformer.bert' , 'qformer' ) if "attention.self" in key: __UpperCamelCase = key.replace('self' , 'attention' ) if "opt_proj" in key: __UpperCamelCase = key.replace('opt_proj' , 'language_projection' ) if "t5_proj" in key: __UpperCamelCase = key.replace('t5_proj' , 'language_projection' ) if key.startswith('opt' ): __UpperCamelCase = key.replace('opt' , 'language' ) if key.startswith('t5' ): __UpperCamelCase = key.replace('t5' , 'language' ) __UpperCamelCase = val # read in qv biases read_in_q_v_bias(_lowercase , _lowercase ) __UpperCamelCase, __UpperCamelCase = hf_model.load_state_dict(_lowercase , strict=_lowercase ) assert len(_lowercase ) == 0 assert unexpected_keys == ["qformer.embeddings.position_ids"] __UpperCamelCase = load_demo_image() __UpperCamelCase = vis_processors['eval'](_lowercase ).unsqueeze(0 ).to(_lowercase ) __UpperCamelCase = tokenizer(['\n'] , return_tensors='pt' ).input_ids.to(_lowercase ) # create processor __UpperCamelCase = BlipImageProcessor( size={'height': image_size, 'width': image_size} , image_mean=_lowercase , image_std=_lowercase ) __UpperCamelCase = BlipaProcessor(image_processor=_lowercase , tokenizer=_lowercase ) __UpperCamelCase = processor(images=_lowercase , return_tensors='pt' ).pixel_values.to(_lowercase ) # make sure processor creates exact same pixel values assert torch.allclose(_lowercase , _lowercase ) original_model.to(_lowercase ) hf_model.to(_lowercase ) with torch.no_grad(): if "opt" in model_name: __UpperCamelCase = original_model({'image': original_pixel_values, 'text_input': ['']} ).logits __UpperCamelCase = hf_model(_lowercase , _lowercase ).logits else: __UpperCamelCase = original_model( {'image': original_pixel_values, 'text_input': ['\n'], 'text_output': ['\n']} ).logits __UpperCamelCase = input_ids.masked_fill(input_ids == tokenizer.pad_token_id , -1_00 ) __UpperCamelCase = hf_model(_lowercase , _lowercase , labels=_lowercase ).logits assert original_logits.shape == logits.shape print('First values of original logits:' , original_logits[0, :3, :3] ) print('First values of HF logits:' , logits[0, :3, :3] ) # assert values if model_name == "blip2-flan-t5-xl": __UpperCamelCase = torch.tensor( [[-41.58_50, -4.44_40, -8.99_22], [-47.43_22, -5.91_43, -1.73_40]] , device=_lowercase ) assert torch.allclose(logits[0, :3, :3] , _lowercase , atol=1e-4 ) elif model_name == "blip2-flan-t5-xl-coco": __UpperCamelCase = torch.tensor( [[-57.01_09, -9.89_67, -12.62_80], [-68.65_78, -12.71_91, -10.50_65]] , device=_lowercase ) else: # cast to same type __UpperCamelCase = logits.dtype assert torch.allclose(original_logits.to(_lowercase ) , _lowercase , atol=1e-2 ) print('Looks ok!' ) print('Generating a caption...' ) __UpperCamelCase = '' __UpperCamelCase = tokenizer(_lowercase , return_tensors='pt' ).input_ids.to(_lowercase ) __UpperCamelCase = original_model.generate({'image': original_pixel_values} ) __UpperCamelCase = hf_model.generate( _lowercase , _lowercase , do_sample=_lowercase , num_beams=5 , max_length=30 , min_length=1 , top_p=0.9 , repetition_penalty=1.0 , length_penalty=1.0 , temperature=1 , ) print('Original generation:' , _lowercase ) __UpperCamelCase = input_ids.shape[1] __UpperCamelCase = processor.batch_decode(outputs[:, prompt_length:] , skip_special_tokens=_lowercase ) __UpperCamelCase = [text.strip() for text in output_text] print('HF generation:' , _lowercase ) if pytorch_dump_folder_path is not None: processor.save_pretrained(_lowercase ) hf_model.save_pretrained(_lowercase ) if push_to_hub: processor.push_to_hub(f'''nielsr/{model_name}''' ) hf_model.push_to_hub(f'''nielsr/{model_name}''' ) if __name__ == "__main__": __snake_case = argparse.ArgumentParser() __snake_case = [ '''blip2-opt-2.7b''', '''blip2-opt-6.7b''', '''blip2-opt-2.7b-coco''', '''blip2-opt-6.7b-coco''', '''blip2-flan-t5-xl''', '''blip2-flan-t5-xl-coco''', '''blip2-flan-t5-xxl''', ] parser.add_argument( '''--model_name''', default='''blip2-opt-2.7b''', choices=choices, type=str, help='''Path to hf config.json of model to convert''', ) parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''') parser.add_argument( '''--push_to_hub''', action='''store_true''', help='''Whether to push the model and processor to the hub after converting''', ) __snake_case = parser.parse_args() convert_blipa_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
1
1
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_tokenizers_available, is_torch_available, ) __snake_case = { '''configuration_convbert''': ['''CONVBERT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''ConvBertConfig''', '''ConvBertOnnxConfig'''], '''tokenization_convbert''': ['''ConvBertTokenizer'''], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __snake_case = ['''ConvBertTokenizerFast'''] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __snake_case = [ '''CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST''', '''ConvBertForMaskedLM''', '''ConvBertForMultipleChoice''', '''ConvBertForQuestionAnswering''', '''ConvBertForSequenceClassification''', '''ConvBertForTokenClassification''', '''ConvBertLayer''', '''ConvBertModel''', '''ConvBertPreTrainedModel''', '''load_tf_weights_in_convbert''', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __snake_case = [ '''TF_CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST''', '''TFConvBertForMaskedLM''', '''TFConvBertForMultipleChoice''', '''TFConvBertForQuestionAnswering''', '''TFConvBertForSequenceClassification''', '''TFConvBertForTokenClassification''', '''TFConvBertLayer''', '''TFConvBertModel''', '''TFConvBertPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_convbert import CONVBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, ConvBertConfig, ConvBertOnnxConfig from .tokenization_convbert import ConvBertTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_convbert_fast import ConvBertTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_convbert import ( CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST, ConvBertForMaskedLM, ConvBertForMultipleChoice, ConvBertForQuestionAnswering, ConvBertForSequenceClassification, ConvBertForTokenClassification, ConvBertLayer, ConvBertModel, ConvBertPreTrainedModel, load_tf_weights_in_convbert, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_convbert import ( TF_CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST, TFConvBertForMaskedLM, TFConvBertForMultipleChoice, TFConvBertForQuestionAnswering, TFConvBertForSequenceClassification, TFConvBertForTokenClassification, TFConvBertLayer, TFConvBertModel, TFConvBertPreTrainedModel, ) else: import sys __snake_case = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
1
import logging import os import sys from dataclasses import dataclass, field from importlib import import_module from typing import Dict, List, Optional, Tuple import numpy as np from seqeval.metrics import accuracy_score, fa_score, precision_score, recall_score from torch import nn from utils_ner import Split, TokenClassificationDataset, TokenClassificationTask import transformers from transformers import ( AutoConfig, AutoModelForTokenClassification, AutoTokenizer, DataCollatorWithPadding, EvalPrediction, HfArgumentParser, Trainer, TrainingArguments, set_seed, ) from transformers.trainer_utils import is_main_process __snake_case = logging.getLogger(__name__) @dataclass class __lowerCamelCase : _lowercase = field( metadata={"""help""": """Path to pretrained model or model identifier from huggingface.co/models"""} ) _lowercase = field( default=_a , metadata={"""help""": """Pretrained config name or path if not the same as model_name"""} ) _lowercase = field( default="""NER""" , metadata={"""help""": """Task type to fine tune in training (e.g. NER, POS, etc)"""} ) _lowercase = field( default=_a , metadata={"""help""": """Pretrained tokenizer name or path if not the same as model_name"""} ) _lowercase = field(default=_a , metadata={"""help""": """Set this flag to use fast tokenization."""} ) # If you want to tweak more attributes on your tokenizer, you should do it in a distinct script, # or just modify its tokenizer_config.json. _lowercase = field( default=_a , metadata={"""help""": """Where do you want to store the pretrained models downloaded from huggingface.co"""} , ) @dataclass class __lowerCamelCase : _lowercase = field( metadata={"""help""": """The input data dir. Should contain the .txt files for a CoNLL-2003-formatted task."""} ) _lowercase = field( default=_a , metadata={"""help""": """Path to a file containing all labels. If not specified, CoNLL-2003 labels are used."""} , ) _lowercase = field( default=128 , metadata={ """help""": ( """The maximum total input sequence length after tokenization. Sequences longer """ """than this will be truncated, sequences shorter will be padded.""" ) } , ) _lowercase = field( default=_a , metadata={"""help""": """Overwrite the cached training and evaluation sets"""} ) def _A ( ) -> str: """simple docstring""" __UpperCamelCase = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) ) if len(sys.argv ) == 2 and sys.argv[1].endswith('.json' ): # If we pass only one argument to the script and it's the path to a json file, # let's parse it to get our arguments. __UpperCamelCase, __UpperCamelCase, __UpperCamelCase = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) ) else: __UpperCamelCase, __UpperCamelCase, __UpperCamelCase = parser.parse_args_into_dataclasses() if ( os.path.exists(training_args.output_dir ) and os.listdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir ): raise ValueError( f'''Output directory ({training_args.output_dir}) already exists and is not empty. Use''' ' --overwrite_output_dir to overcome.' ) __UpperCamelCase = import_module('tasks' ) try: __UpperCamelCase = getattr(_lowercase , model_args.task_type ) __UpperCamelCase = token_classification_task_clazz() except AttributeError: raise ValueError( f'''Task {model_args.task_type} needs to be defined as a TokenClassificationTask subclass in {module}. ''' f'''Available tasks classes are: {TokenClassificationTask.__subclasses__()}''' ) # Setup logging logging.basicConfig( format='%(asctime)s - %(levelname)s - %(name)s - %(message)s' , datefmt='%m/%d/%Y %H:%M:%S' , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , ) logger.warning( 'Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s' , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.local_rank != -1 ) , training_args.fpaa , ) # Set the verbosity to info of the Transformers logger (on main process only): if is_main_process(training_args.local_rank ): transformers.utils.logging.set_verbosity_info() transformers.utils.logging.enable_default_handler() transformers.utils.logging.enable_explicit_format() logger.info('Training/evaluation parameters %s' , _lowercase ) # Set seed set_seed(training_args.seed ) # Prepare CONLL-2003 task __UpperCamelCase = token_classification_task.get_labels(data_args.labels ) __UpperCamelCase = dict(enumerate(_lowercase ) ) __UpperCamelCase = len(_lowercase ) # Load pretrained model and tokenizer # # Distributed training: # The .from_pretrained methods guarantee that only one local process can concurrently # download model & vocab. __UpperCamelCase = AutoConfig.from_pretrained( model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=_lowercase , idalabel=_lowercase , labelaid={label: i for i, label in enumerate(_lowercase )} , cache_dir=model_args.cache_dir , ) __UpperCamelCase = AutoTokenizer.from_pretrained( model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , use_fast=model_args.use_fast , ) __UpperCamelCase = AutoModelForTokenClassification.from_pretrained( model_args.model_name_or_path , from_tf=bool('.ckpt' in model_args.model_name_or_path ) , config=_lowercase , cache_dir=model_args.cache_dir , ) # Get datasets __UpperCamelCase = ( TokenClassificationDataset( token_classification_task=_lowercase , data_dir=data_args.data_dir , tokenizer=_lowercase , labels=_lowercase , model_type=config.model_type , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.train , ) if training_args.do_train else None ) __UpperCamelCase = ( TokenClassificationDataset( token_classification_task=_lowercase , data_dir=data_args.data_dir , tokenizer=_lowercase , labels=_lowercase , model_type=config.model_type , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.dev , ) if training_args.do_eval else None ) def align_predictions(_lowercase , _lowercase ) -> Tuple[List[int], List[int]]: __UpperCamelCase = np.argmax(_lowercase , axis=2 ) __UpperCamelCase, __UpperCamelCase = preds.shape __UpperCamelCase = [[] for _ in range(_lowercase )] __UpperCamelCase = [[] for _ in range(_lowercase )] for i in range(_lowercase ): for j in range(_lowercase ): if label_ids[i, j] != nn.CrossEntropyLoss().ignore_index: out_label_list[i].append(label_map[label_ids[i][j]] ) preds_list[i].append(label_map[preds[i][j]] ) return preds_list, out_label_list def compute_metrics(_lowercase ) -> Dict: __UpperCamelCase, __UpperCamelCase = align_predictions(p.predictions , p.label_ids ) return { "accuracy_score": accuracy_score(_lowercase , _lowercase ), "precision": precision_score(_lowercase , _lowercase ), "recall": recall_score(_lowercase , _lowercase ), "f1": fa_score(_lowercase , _lowercase ), } # Data collator __UpperCamelCase = DataCollatorWithPadding(_lowercase , pad_to_multiple_of=8 ) if training_args.fpaa else None # Initialize our Trainer __UpperCamelCase = Trainer( model=_lowercase , args=_lowercase , train_dataset=_lowercase , eval_dataset=_lowercase , compute_metrics=_lowercase , data_collator=_lowercase , ) # Training if training_args.do_train: trainer.train( model_path=model_args.model_name_or_path if os.path.isdir(model_args.model_name_or_path ) else None ) trainer.save_model() # For convenience, we also re-save the tokenizer to the same directory, # so that you can share your model easily on huggingface.co/models =) if trainer.is_world_process_zero(): tokenizer.save_pretrained(training_args.output_dir ) # Evaluation __UpperCamelCase = {} if training_args.do_eval: logger.info('*** Evaluate ***' ) __UpperCamelCase = trainer.evaluate() __UpperCamelCase = os.path.join(training_args.output_dir , 'eval_results.txt' ) if trainer.is_world_process_zero(): with open(_lowercase , 'w' ) as writer: logger.info('***** Eval results *****' ) for key, value in result.items(): logger.info(' %s = %s' , _lowercase , _lowercase ) writer.write('%s = %s\n' % (key, value) ) results.update(_lowercase ) # Predict if training_args.do_predict: __UpperCamelCase = TokenClassificationDataset( token_classification_task=_lowercase , data_dir=data_args.data_dir , tokenizer=_lowercase , labels=_lowercase , model_type=config.model_type , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.test , ) __UpperCamelCase, __UpperCamelCase, __UpperCamelCase = trainer.predict(_lowercase ) __UpperCamelCase, __UpperCamelCase = align_predictions(_lowercase , _lowercase ) __UpperCamelCase = os.path.join(training_args.output_dir , 'test_results.txt' ) if trainer.is_world_process_zero(): with open(_lowercase , 'w' ) as writer: for key, value in metrics.items(): logger.info(' %s = %s' , _lowercase , _lowercase ) writer.write('%s = %s\n' % (key, value) ) # Save predictions __UpperCamelCase = os.path.join(training_args.output_dir , 'test_predictions.txt' ) if trainer.is_world_process_zero(): with open(_lowercase , 'w' ) as writer: with open(os.path.join(data_args.data_dir , 'test.txt' ) , 'r' ) as f: token_classification_task.write_predictions_to_file(_lowercase , _lowercase , _lowercase ) return results def _A ( _lowercase ) -> Dict: """simple docstring""" main() if __name__ == "__main__": main()
1
1
import math def _A ( _lowercase , _lowercase ) -> float: """simple docstring""" return math.pow(_lowercase , 2 ) - a def _A ( _lowercase ) -> float: """simple docstring""" return 2 * x def _A ( _lowercase ) -> float: """simple docstring""" __UpperCamelCase = 2.0 while start <= a: __UpperCamelCase = math.pow(_lowercase , 2 ) return start def _A ( _lowercase , _lowercase = 99_99 , _lowercase = 0.00_00_00_00_00_00_01 ) -> float: """simple docstring""" if a < 0: raise ValueError('math domain error' ) __UpperCamelCase = get_initial_point(_lowercase ) for _ in range(_lowercase ): __UpperCamelCase = value __UpperCamelCase = value - fx(_lowercase , _lowercase ) / fx_derivative(_lowercase ) if abs(prev_value - value ) < tolerance: return value return value if __name__ == "__main__": from doctest import testmod testmod()
1
# # This a `torch.distributed` diagnostics script that checks that all GPUs in the cluster (one or # many nodes) can talk to each other via nccl and allocate gpu memory. # # To run first adjust the number of processes and nodes: # # python -m torch.distributed.run --nproc_per_node 2 --nnodes 1 torch-distributed-gpu-test.py # # You may need to add --master_addr $MASTER_ADDR --master_port $MASTER_PORT if using a custom addr:port # # You can also use the rdzv API: --rdzv_endpoint $MASTER_ADDR:$MASTER_PORT --rdzv_backend c10d # # use torch.distributed.launch instead of torch.distributed.run for torch < 1.9 # # If you get a hanging in `barrier` calls you have some network issues, you may try to debug this with: # # NCCL_DEBUG=INFO python -m torch.distributed.run --nproc_per_node 2 --nnodes 1 torch-distributed-gpu-test.py # # which should tell you what's going on behind the scenes. # # # This script can be run via `srun` in the SLURM environment as well. Here is a SLURM script that # runs on 2 nodes of 4 gpus per node: # # #SBATCH --job-name=test-nodes # name # #SBATCH --nodes=2 # nodes # #SBATCH --ntasks-per-node=1 # crucial - only 1 task per dist per node! # #SBATCH --cpus-per-task=10 # number of cores per tasks # #SBATCH --gres=gpu:4 # number of gpus # #SBATCH --time 0:05:00 # maximum execution time (HH:MM:SS) # #SBATCH --output=%x-%j.out # output file name # # GPUS_PER_NODE=4 # MASTER_ADDR=$(scontrol show hostnames $SLURM_JOB_NODELIST | head -n 1) # MASTER_PORT=6000 # # srun --jobid $SLURM_JOBID bash -c 'python -m torch.distributed.run \ # --nproc_per_node $GPUS_PER_NODE --nnodes $SLURM_NNODES --node_rank $SLURM_PROCID \ # --master_addr $MASTER_ADDR --master_port $MASTER_PORT \ # torch-distributed-gpu-test.py' # import fcntl import os import socket import torch import torch.distributed as dist def _A ( *_lowercase ) -> Tuple: """simple docstring""" with open(_lowercase , 'r' ) as fh: fcntl.flock(_lowercase , fcntl.LOCK_EX ) try: print(*_lowercase ) finally: fcntl.flock(_lowercase , fcntl.LOCK_UN ) __snake_case = int(os.environ['''LOCAL_RANK''']) torch.cuda.set_device(local_rank) __snake_case = torch.device('''cuda''', local_rank) __snake_case = socket.gethostname() __snake_case = f"""[{hostname}-{local_rank}]""" try: # test distributed dist.init_process_group('''nccl''') dist.all_reduce(torch.ones(1).to(device), op=dist.ReduceOp.SUM) dist.barrier() # test cuda is available and can allocate memory torch.cuda.is_available() torch.ones(1).cuda(local_rank) # global rank __snake_case = dist.get_rank() __snake_case = dist.get_world_size() printflock(f"""{gpu} is OK (global rank: {rank}/{world_size})""") dist.barrier() if rank == 0: printflock(f"""pt={torch.__version__}, cuda={torch.version.cuda}, nccl={torch.cuda.nccl.version()}""") except Exception: printflock(f"""{gpu} is broken""") raise
1
1
import sys import tempfile import unittest import unittest.mock as mock from pathlib import Path from huggingface_hub import HfFolder, delete_repo from requests.exceptions import HTTPError from transformers import AutoFeatureExtractor, WavaVecaFeatureExtractor from transformers.testing_utils import TOKEN, USER, get_tests_dir, is_staging_test sys.path.append(str(Path(__file__).parent.parent / '''utils''')) from test_module.custom_feature_extraction import CustomFeatureExtractor # noqa E402 __snake_case = get_tests_dir('''fixtures''') class __lowerCamelCase (unittest.TestCase ): def snake_case_ ( self: int ): '''simple docstring''' __UpperCamelCase = mock.Mock() __UpperCamelCase = 500 __UpperCamelCase = {} __UpperCamelCase = HTTPError __UpperCamelCase = {} # Download this model to make sure it's in the cache. __UpperCamelCase = WavaVecaFeatureExtractor.from_pretrained('hf-internal-testing/tiny-random-wav2vec2' ) # Under the mock environment we get a 500 error when trying to reach the model. with mock.patch('requests.Session.request',return_value=A_ ) as mock_head: __UpperCamelCase = WavaVecaFeatureExtractor.from_pretrained('hf-internal-testing/tiny-random-wav2vec2' ) # This check we did call the fake head request mock_head.assert_called() def snake_case_ ( self: Dict ): '''simple docstring''' __UpperCamelCase = WavaVecaFeatureExtractor.from_pretrained( 'https://huggingface.co/hf-internal-testing/tiny-random-wav2vec2/resolve/main/preprocessor_config.json' ) @is_staging_test class __lowerCamelCase (unittest.TestCase ): @classmethod def snake_case_ ( cls: Tuple ): '''simple docstring''' __UpperCamelCase = TOKEN HfFolder.save_token(A_ ) @classmethod def snake_case_ ( cls: Tuple ): '''simple docstring''' try: delete_repo(token=cls._token,repo_id='test-feature-extractor' ) except HTTPError: pass try: delete_repo(token=cls._token,repo_id='valid_org/test-feature-extractor-org' ) except HTTPError: pass try: delete_repo(token=cls._token,repo_id='test-dynamic-feature-extractor' ) except HTTPError: pass def snake_case_ ( self: Tuple ): '''simple docstring''' __UpperCamelCase = WavaVecaFeatureExtractor.from_pretrained(A_ ) feature_extractor.push_to_hub('test-feature-extractor',use_auth_token=self._token ) __UpperCamelCase = WavaVecaFeatureExtractor.from_pretrained(F'''{USER}/test-feature-extractor''' ) for k, v in feature_extractor.__dict__.items(): self.assertEqual(A_,getattr(A_,A_ ) ) # Reset repo delete_repo(token=self._token,repo_id='test-feature-extractor' ) # Push to hub via save_pretrained with tempfile.TemporaryDirectory() as tmp_dir: feature_extractor.save_pretrained( A_,repo_id='test-feature-extractor',push_to_hub=A_,use_auth_token=self._token ) __UpperCamelCase = WavaVecaFeatureExtractor.from_pretrained(F'''{USER}/test-feature-extractor''' ) for k, v in feature_extractor.__dict__.items(): self.assertEqual(A_,getattr(A_,A_ ) ) def snake_case_ ( self: List[str] ): '''simple docstring''' __UpperCamelCase = WavaVecaFeatureExtractor.from_pretrained(A_ ) feature_extractor.push_to_hub('valid_org/test-feature-extractor',use_auth_token=self._token ) __UpperCamelCase = WavaVecaFeatureExtractor.from_pretrained('valid_org/test-feature-extractor' ) for k, v in feature_extractor.__dict__.items(): self.assertEqual(A_,getattr(A_,A_ ) ) # Reset repo delete_repo(token=self._token,repo_id='valid_org/test-feature-extractor' ) # Push to hub via save_pretrained with tempfile.TemporaryDirectory() as tmp_dir: feature_extractor.save_pretrained( A_,repo_id='valid_org/test-feature-extractor-org',push_to_hub=A_,use_auth_token=self._token ) __UpperCamelCase = WavaVecaFeatureExtractor.from_pretrained('valid_org/test-feature-extractor-org' ) for k, v in feature_extractor.__dict__.items(): self.assertEqual(A_,getattr(A_,A_ ) ) def snake_case_ ( self: int ): '''simple docstring''' CustomFeatureExtractor.register_for_auto_class() __UpperCamelCase = CustomFeatureExtractor.from_pretrained(A_ ) feature_extractor.push_to_hub('test-dynamic-feature-extractor',use_auth_token=self._token ) # This has added the proper auto_map field to the config self.assertDictEqual( feature_extractor.auto_map,{'AutoFeatureExtractor': 'custom_feature_extraction.CustomFeatureExtractor'},) __UpperCamelCase = AutoFeatureExtractor.from_pretrained( F'''{USER}/test-dynamic-feature-extractor''',trust_remote_code=A_ ) # Can't make an isinstance check because the new_feature_extractor is from the CustomFeatureExtractor class of a dynamic module self.assertEqual(new_feature_extractor.__class__.__name__,'CustomFeatureExtractor' )
1
import pytest import datasets # Import fixture modules as plugins __snake_case = ['''tests.fixtures.files''', '''tests.fixtures.hub''', '''tests.fixtures.fsspec'''] def _A ( _lowercase , _lowercase ) -> Tuple: """simple docstring""" for item in items: if any(marker in item.keywords for marker in ['integration', 'unit'] ): continue item.add_marker(pytest.mark.unit ) def _A ( _lowercase ) -> str: """simple docstring""" config.addinivalue_line('markers' , 'torchaudio_latest: mark test to run with torchaudio>=0.12' ) @pytest.fixture(autouse=_lowercase ) def _A ( _lowercase , _lowercase ) -> Any: """simple docstring""" __UpperCamelCase = tmp_path_factory.getbasetemp() / 'cache' __UpperCamelCase = test_hf_cache_home / 'datasets' __UpperCamelCase = test_hf_cache_home / 'metrics' __UpperCamelCase = test_hf_cache_home / 'modules' monkeypatch.setattr('datasets.config.HF_DATASETS_CACHE' , str(_lowercase ) ) monkeypatch.setattr('datasets.config.HF_METRICS_CACHE' , str(_lowercase ) ) monkeypatch.setattr('datasets.config.HF_MODULES_CACHE' , str(_lowercase ) ) __UpperCamelCase = test_hf_datasets_cache / 'downloads' monkeypatch.setattr('datasets.config.DOWNLOADED_DATASETS_PATH' , str(_lowercase ) ) __UpperCamelCase = test_hf_datasets_cache / 'downloads' / 'extracted' monkeypatch.setattr('datasets.config.EXTRACTED_DATASETS_PATH' , str(_lowercase ) ) @pytest.fixture(autouse=_lowercase , scope='session' ) def _A ( ) -> Dict: """simple docstring""" datasets.disable_progress_bar() @pytest.fixture(autouse=_lowercase ) def _A ( _lowercase ) -> Tuple: """simple docstring""" monkeypatch.setattr('datasets.config.HF_UPDATE_DOWNLOAD_COUNTS' , _lowercase ) @pytest.fixture def _A ( _lowercase ) -> Any: """simple docstring""" monkeypatch.setattr('sqlalchemy.util.deprecations.SILENCE_UBER_WARNING' , _lowercase )
1
1
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available __snake_case = { '''configuration_time_series_transformer''': [ '''TIME_SERIES_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''TimeSeriesTransformerConfig''', ], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __snake_case = [ '''TIME_SERIES_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''', '''TimeSeriesTransformerForPrediction''', '''TimeSeriesTransformerModel''', '''TimeSeriesTransformerPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_time_series_transformer import ( TIME_SERIES_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, TimeSeriesTransformerConfig, ) try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_time_series_transformer import ( TIME_SERIES_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, TimeSeriesTransformerForPrediction, TimeSeriesTransformerModel, TimeSeriesTransformerPreTrainedModel, ) else: import sys __snake_case = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
1
import random import unittest import numpy as np import torch from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer from diffusers import ( AutoencoderKL, DDIMScheduler, UNetaDConditionModel, VideoToVideoSDPipeline, ) from diffusers.utils import floats_tensor, is_xformers_available, skip_mps from diffusers.utils.testing_utils import enable_full_determinism, slow, torch_device from ..pipeline_params import ( TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS, ) from ..test_pipelines_common import PipelineTesterMixin enable_full_determinism() @skip_mps class __lowerCamelCase (_a , unittest.TestCase ): _lowercase = VideoToVideoSDPipeline _lowercase = TEXT_GUIDED_IMAGE_VARIATION_PARAMS.union({"""video"""} ) - {"""image""", """width""", """height"""} _lowercase = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS.union({"""video"""} ) - {"""image"""} _lowercase = PipelineTesterMixin.required_optional_params - {"""latents"""} _lowercase = False # No `output_type`. _lowercase = frozenset( [ """num_inference_steps""", """generator""", """latents""", """return_dict""", """callback""", """callback_steps""", ] ) def snake_case_ ( self: List[str] ): '''simple docstring''' torch.manual_seed(0 ) __UpperCamelCase = UNetaDConditionModel( block_out_channels=(32, 64, 64, 64),layers_per_block=2,sample_size=32,in_channels=4,out_channels=4,down_block_types=('CrossAttnDownBlock3D', 'CrossAttnDownBlock3D', 'CrossAttnDownBlock3D', 'DownBlock3D'),up_block_types=('UpBlock3D', 'CrossAttnUpBlock3D', 'CrossAttnUpBlock3D', 'CrossAttnUpBlock3D'),cross_attention_dim=32,attention_head_dim=4,) __UpperCamelCase = DDIMScheduler( beta_start=0.0_0_0_8_5,beta_end=0.0_1_2,beta_schedule='scaled_linear',clip_sample=A_,set_alpha_to_one=A_,) torch.manual_seed(0 ) __UpperCamelCase = AutoencoderKL( block_out_channels=[32, 64],in_channels=3,out_channels=3,down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'],up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'],latent_channels=4,sample_size=128,) torch.manual_seed(0 ) __UpperCamelCase = CLIPTextConfig( bos_token_id=0,eos_token_id=2,hidden_size=32,intermediate_size=37,layer_norm_eps=1E-05,num_attention_heads=4,num_hidden_layers=5,pad_token_id=1,vocab_size=1000,hidden_act='gelu',projection_dim=512,) __UpperCamelCase = CLIPTextModel(A_ ) __UpperCamelCase = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' ) __UpperCamelCase = { 'unet': unet, 'scheduler': scheduler, 'vae': vae, 'text_encoder': text_encoder, 'tokenizer': tokenizer, } return components def snake_case_ ( self: Union[str, Any],A_: Any,A_: Any=0 ): '''simple docstring''' __UpperCamelCase = floats_tensor((1, 3, 3, 32, 32),rng=random.Random(A_ ) ).to(A_ ) if str(A_ ).startswith('mps' ): __UpperCamelCase = torch.manual_seed(A_ ) else: __UpperCamelCase = torch.Generator(device=A_ ).manual_seed(A_ ) __UpperCamelCase = { 'prompt': 'A painting of a squirrel eating a burger', 'video': video, 'generator': generator, 'num_inference_steps': 2, 'guidance_scale': 6.0, 'output_type': 'pt', } return inputs def snake_case_ ( self: Union[str, Any] ): '''simple docstring''' __UpperCamelCase = 'cpu' # ensure determinism for the device-dependent torch.Generator __UpperCamelCase = self.get_dummy_components() __UpperCamelCase = VideoToVideoSDPipeline(**A_ ) __UpperCamelCase = sd_pipe.to(A_ ) sd_pipe.set_progress_bar_config(disable=A_ ) __UpperCamelCase = self.get_dummy_inputs(A_ ) __UpperCamelCase = 'np' __UpperCamelCase = sd_pipe(**A_ ).frames __UpperCamelCase = frames[0][-3:, -3:, -1] assert frames[0].shape == (32, 32, 3) __UpperCamelCase = np.array([106, 117, 113, 174, 137, 112, 148, 151, 131] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 @unittest.skipIf( torch_device != 'cuda' or not is_xformers_available(),reason='XFormers attention is only available with CUDA and `xformers` installed',) def snake_case_ ( self: Any ): '''simple docstring''' self._test_xformers_attention_forwardGenerator_pass(test_mean_pixel_difference=A_,expected_max_diff=5E-3 ) @unittest.skip(reason='Batching needs to be properly figured out first for this pipeline.' ) def snake_case_ ( self: str ): '''simple docstring''' pass @unittest.skip(reason='Batching needs to be properly figured out first for this pipeline.' ) def snake_case_ ( self: Optional[Any] ): '''simple docstring''' pass @unittest.skip(reason='`num_images_per_prompt` argument is not supported for this pipeline.' ) def snake_case_ ( self: int ): '''simple docstring''' pass def snake_case_ ( self: Any ): '''simple docstring''' return super().test_progress_bar() @slow @skip_mps class __lowerCamelCase (unittest.TestCase ): def snake_case_ ( self: Tuple ): '''simple docstring''' __UpperCamelCase = VideoToVideoSDPipeline.from_pretrained('cerspense/zeroscope_v2_XL',torch_dtype=torch.floataa ) pipe.enable_model_cpu_offload() # 10 frames __UpperCamelCase = torch.Generator(device='cpu' ).manual_seed(0 ) __UpperCamelCase = torch.randn((1, 10, 3, 1024, 576),generator=A_ ) __UpperCamelCase = video.to('cuda' ) __UpperCamelCase = 'Spiderman is surfing' __UpperCamelCase = pipe(A_,video=A_,generator=A_,num_inference_steps=3,output_type='pt' ).frames __UpperCamelCase = np.array([-1.0_4_5_8_9_8_4, -1.1_2_7_9_2_9_7, -0.9_6_6_3_0_8_6, -0.9_1_5_0_3_9_0_6, -0.7_5_0_9_7_6_5_6] ) assert np.abs(video_frames.cpu().numpy()[0, 0, 0, 0, -5:] - expected_array ).sum() < 1E-2
1
1
def _A ( _lowercase , _lowercase , _lowercase ) -> float: """simple docstring""" __UpperCamelCase = (num_of_terms / 2) * (2 * first_term + (num_of_terms - 1) * common_diff) # formula for sum of series return total def _A ( ) -> Union[str, Any]: """simple docstring""" print(sum_of_series(1 , 1 , 10 ) ) if __name__ == "__main__": import doctest doctest.testmod()
1
import argparse from transformers import CLIPImageProcessor, CLIPVisionModelWithProjection from diffusers import UnCLIPImageVariationPipeline, UnCLIPPipeline if __name__ == "__main__": __snake_case = argparse.ArgumentParser() parser.add_argument('''--dump_path''', default=None, type=str, required=True, help='''Path to the output model.''') parser.add_argument( '''--txt2img_unclip''', default='''kakaobrain/karlo-v1-alpha''', type=str, required=False, help='''The pretrained txt2img unclip.''', ) __snake_case = parser.parse_args() __snake_case = UnCLIPPipeline.from_pretrained(args.txtaimg_unclip) __snake_case = CLIPImageProcessor() __snake_case = CLIPVisionModelWithProjection.from_pretrained('''openai/clip-vit-large-patch14''') __snake_case = UnCLIPImageVariationPipeline( decoder=txtaimg.decoder, text_encoder=txtaimg.text_encoder, tokenizer=txtaimg.tokenizer, text_proj=txtaimg.text_proj, feature_extractor=feature_extractor, image_encoder=image_encoder, super_res_first=txtaimg.super_res_first, super_res_last=txtaimg.super_res_last, decoder_scheduler=txtaimg.decoder_scheduler, super_res_scheduler=txtaimg.super_res_scheduler, ) imgaimg.save_pretrained(args.dump_path)
1
1
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available, is_vision_available, ) __snake_case = { '''configuration_perceiver''': ['''PERCEIVER_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''PerceiverConfig''', '''PerceiverOnnxConfig'''], '''tokenization_perceiver''': ['''PerceiverTokenizer'''], } try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __snake_case = ['''PerceiverFeatureExtractor'''] __snake_case = ['''PerceiverImageProcessor'''] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __snake_case = [ '''PERCEIVER_PRETRAINED_MODEL_ARCHIVE_LIST''', '''PerceiverForImageClassificationConvProcessing''', '''PerceiverForImageClassificationFourier''', '''PerceiverForImageClassificationLearned''', '''PerceiverForMaskedLM''', '''PerceiverForMultimodalAutoencoding''', '''PerceiverForOpticalFlow''', '''PerceiverForSequenceClassification''', '''PerceiverLayer''', '''PerceiverModel''', '''PerceiverPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_perceiver import PERCEIVER_PRETRAINED_CONFIG_ARCHIVE_MAP, PerceiverConfig, PerceiverOnnxConfig from .tokenization_perceiver import PerceiverTokenizer try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .feature_extraction_perceiver import PerceiverFeatureExtractor from .image_processing_perceiver import PerceiverImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_perceiver import ( PERCEIVER_PRETRAINED_MODEL_ARCHIVE_LIST, PerceiverForImageClassificationConvProcessing, PerceiverForImageClassificationFourier, PerceiverForImageClassificationLearned, PerceiverForMaskedLM, PerceiverForMultimodalAutoencoding, PerceiverForOpticalFlow, PerceiverForSequenceClassification, PerceiverLayer, PerceiverModel, PerceiverPreTrainedModel, ) else: import sys __snake_case = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
1
from typing import TYPE_CHECKING # rely on isort to merge the imports from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available __snake_case = { '''configuration_autoformer''': [ '''AUTOFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''AutoformerConfig''', ], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __snake_case = [ '''AUTOFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''', '''AutoformerForPrediction''', '''AutoformerModel''', '''AutoformerPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_autoformer import ( AUTOFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, AutoformerConfig, ) try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_autoformer import ( AUTOFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, AutoformerForPrediction, AutoformerModel, AutoformerPreTrainedModel, ) else: import sys __snake_case = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
1
1
def _A ( _lowercase ) -> int: """simple docstring""" if not isinstance(_lowercase , _lowercase ): raise TypeError('only integers accepted as input' ) else: __UpperCamelCase = str(abs(_lowercase ) ) __UpperCamelCase = [list(_lowercase ) for char in range(len(_lowercase ) )] for index in range(len(_lowercase ) ): num_transpositions[index].pop(_lowercase ) return max( int(''.join(list(_lowercase ) ) ) for transposition in num_transpositions ) if __name__ == "__main__": __import__('''doctest''').testmod()
1
import argparse import os import re # All paths are set with the intent you should run this script from the root of the repo with the command # python utils/check_dummies.py __snake_case = '''src/diffusers''' # Matches is_xxx_available() __snake_case = re.compile(r'''is\_([a-z_]*)_available\(\)''') # Matches from xxx import bla __snake_case = re.compile(r'''\s+from\s+\S*\s+import\s+([^\(\s].*)\n''') __snake_case = ''' {0} = None ''' __snake_case = ''' class {0}(metaclass=DummyObject): _backends = {1} def __init__(self, *args, **kwargs): requires_backends(self, {1}) @classmethod def from_config(cls, *args, **kwargs): requires_backends(cls, {1}) @classmethod def from_pretrained(cls, *args, **kwargs): requires_backends(cls, {1}) ''' __snake_case = ''' def {0}(*args, **kwargs): requires_backends({0}, {1}) ''' def _A ( _lowercase ) -> int: """simple docstring""" __UpperCamelCase = _re_backend.findall(_lowercase ) if len(_lowercase ) == 0: return None return "_and_".join(_lowercase ) def _A ( ) -> Tuple: """simple docstring""" with open(os.path.join(_lowercase , '__init__.py' ) , 'r' , encoding='utf-8' , newline='\n' ) as f: __UpperCamelCase = f.readlines() # Get to the point we do the actual imports for type checking __UpperCamelCase = 0 __UpperCamelCase = {} # Go through the end of the file while line_index < len(_lowercase ): # If the line contains is_backend_available, we grab all objects associated with the `else` block __UpperCamelCase = find_backend(lines[line_index] ) if backend is not None: while not lines[line_index].startswith('else:' ): line_index += 1 line_index += 1 __UpperCamelCase = [] # Until we unindent, add backend objects to the list while line_index < len(_lowercase ) and len(lines[line_index] ) > 1: __UpperCamelCase = lines[line_index] __UpperCamelCase = _re_single_line_import.search(_lowercase ) if single_line_import_search is not None: objects.extend(single_line_import_search.groups()[0].split(', ' ) ) elif line.startswith(' ' * 8 ): objects.append(line[8:-2] ) line_index += 1 if len(_lowercase ) > 0: __UpperCamelCase = objects else: line_index += 1 return backend_specific_objects def _A ( _lowercase , _lowercase ) -> Union[str, Any]: """simple docstring""" if name.isupper(): return DUMMY_CONSTANT.format(_lowercase ) elif name.islower(): return DUMMY_FUNCTION.format(_lowercase , _lowercase ) else: return DUMMY_CLASS.format(_lowercase , _lowercase ) def _A ( _lowercase=None ) -> Optional[Any]: """simple docstring""" if backend_specific_objects is None: __UpperCamelCase = read_init() # For special correspondence backend to module name as used in the function requires_modulename __UpperCamelCase = {} for backend, objects in backend_specific_objects.items(): __UpperCamelCase = '[' + ', '.join(f'''"{b}"''' for b in backend.split('_and_' ) ) + ']' __UpperCamelCase = '# This file is autogenerated by the command `make fix-copies`, do not edit.\n' dummy_file += "from ..utils import DummyObject, requires_backends\n\n" dummy_file += "\n".join([create_dummy_object(_lowercase , _lowercase ) for o in objects] ) __UpperCamelCase = dummy_file return dummy_files def _A ( _lowercase=False ) -> List[str]: """simple docstring""" __UpperCamelCase = create_dummy_files() # For special correspondence backend to shortcut as used in utils/dummy_xxx_objects.py __UpperCamelCase = {'torch': 'pt'} # Locate actual dummy modules and read their content. __UpperCamelCase = os.path.join(_lowercase , 'utils' ) __UpperCamelCase = { backend: os.path.join(_lowercase , f'''dummy_{short_names.get(_lowercase , _lowercase )}_objects.py''' ) for backend in dummy_files.keys() } __UpperCamelCase = {} for backend, file_path in dummy_file_paths.items(): if os.path.isfile(_lowercase ): with open(_lowercase , 'r' , encoding='utf-8' , newline='\n' ) as f: __UpperCamelCase = f.read() else: __UpperCamelCase = '' for backend in dummy_files.keys(): if dummy_files[backend] != actual_dummies[backend]: if overwrite: print( f'''Updating diffusers.utils.dummy_{short_names.get(_lowercase , _lowercase )}_objects.py as the main ''' '__init__ has new objects.' ) with open(dummy_file_paths[backend] , 'w' , encoding='utf-8' , newline='\n' ) as f: f.write(dummy_files[backend] ) else: raise ValueError( 'The main __init__ has objects that are not present in ' f'''diffusers.utils.dummy_{short_names.get(_lowercase , _lowercase )}_objects.py. Run `make fix-copies` ''' 'to fix this.' ) if __name__ == "__main__": __snake_case = argparse.ArgumentParser() parser.add_argument('''--fix_and_overwrite''', action='''store_true''', help='''Whether to fix inconsistencies.''') __snake_case = parser.parse_args() check_dummies(args.fix_and_overwrite)
1
1
from typing import Any, Dict, List, Optional, Tuple, Union import torch from torch import nn from torch.utils.data import DistributedSampler, RandomSampler from transformers import PreTrainedModel, Trainer, logging from transformers.integrations import is_fairscale_available from transformers.models.fsmt.configuration_fsmt import FSMTConfig from transformers.optimization import ( Adafactor, AdamW, get_constant_schedule, get_constant_schedule_with_warmup, get_cosine_schedule_with_warmup, get_cosine_with_hard_restarts_schedule_with_warmup, get_linear_schedule_with_warmup, get_polynomial_decay_schedule_with_warmup, ) from transformers.trainer_pt_utils import get_tpu_sampler from transformers.training_args import ParallelMode from transformers.utils import is_torch_tpu_available if is_fairscale_available(): from fairscale.optim import OSS __snake_case = logging.get_logger(__name__) __snake_case = { '''linear''': get_linear_schedule_with_warmup, '''cosine''': get_cosine_schedule_with_warmup, '''cosine_w_restarts''': get_cosine_with_hard_restarts_schedule_with_warmup, '''polynomial''': get_polynomial_decay_schedule_with_warmup, '''constant''': get_constant_schedule, '''constant_w_warmup''': get_constant_schedule_with_warmup, } class __lowerCamelCase (_a ): def __init__( self: Any,A_: List[str]=None,A_: Optional[Any]=None,*A_: Tuple,**A_: Optional[int] ): '''simple docstring''' super().__init__(*A_,**A_ ) if config is None: assert isinstance(self.model,A_ ), ( "If no `config` is passed the model to be trained has to be of type `PreTrainedModel`, but is" F''' {self.model.__class__}''' ) __UpperCamelCase = self.model.config else: __UpperCamelCase = config __UpperCamelCase = data_args __UpperCamelCase = self.config.tgt_vocab_size if isinstance(self.config,A_ ) else self.config.vocab_size if self.args.label_smoothing != 0 or (self.data_args is not None and self.data_args.ignore_pad_token_for_loss): assert self.config.pad_token_id is not None, ( "Make sure that `config.pad_token_id` is correcly defined when ignoring `pad_token` for loss" " calculation or doing label smoothing." ) if self.config.pad_token_id is None and self.config.eos_token_id is not None: logger.warning( F'''The `config.pad_token_id` is `None`. Using `config.eos_token_id` = {self.config.eos_token_id} for''' ' padding..' ) if self.args.label_smoothing == 0: __UpperCamelCase = torch.nn.CrossEntropyLoss(ignore_index=self.config.pad_token_id ) else: # dynamically import label_smoothed_nll_loss from utils import label_smoothed_nll_loss __UpperCamelCase = label_smoothed_nll_loss def snake_case_ ( self: Dict,A_: int ): '''simple docstring''' if self.optimizer is None: __UpperCamelCase = ['bias', 'LayerNorm.weight'] __UpperCamelCase = [ { 'params': [p for n, p in self.model.named_parameters() if not any(nd in n for nd in no_decay )], 'weight_decay': self.args.weight_decay, }, { 'params': [p for n, p in self.model.named_parameters() if any(nd in n for nd in no_decay )], 'weight_decay': 0.0, }, ] __UpperCamelCase = Adafactor if self.args.adafactor else AdamW if self.args.adafactor: __UpperCamelCase = Adafactor __UpperCamelCase = {'scale_parameter': False, 'relative_step': False} else: __UpperCamelCase = AdamW __UpperCamelCase = { 'betas': (self.args.adam_betaa, self.args.adam_betaa), 'eps': self.args.adam_epsilon, } __UpperCamelCase = self.args.learning_rate if self.sharded_ddp: __UpperCamelCase = OSS( params=A_,optim=A_,**A_,) else: __UpperCamelCase = optimizer_cls(A_,**A_ ) if self.lr_scheduler is None: __UpperCamelCase = self._get_lr_scheduler(A_ ) else: # ignoring --lr_scheduler logger.warning('scheduler is passed to `Seq2SeqTrainer`, `--lr_scheduler` arg is ignored.' ) def snake_case_ ( self: Any,A_: Optional[Any] ): '''simple docstring''' __UpperCamelCase = arg_to_scheduler[self.args.lr_scheduler] if self.args.lr_scheduler == "constant": __UpperCamelCase = schedule_func(self.optimizer ) elif self.args.lr_scheduler == "constant_w_warmup": __UpperCamelCase = schedule_func(self.optimizer,num_warmup_steps=self.args.warmup_steps ) else: __UpperCamelCase = schedule_func( self.optimizer,num_warmup_steps=self.args.warmup_steps,num_training_steps=A_ ) return scheduler def snake_case_ ( self: List[Any] ): '''simple docstring''' if isinstance(self.train_dataset,torch.utils.data.IterableDataset ): return None elif is_torch_tpu_available(): return get_tpu_sampler(self.train_dataset ) else: if self.args.sortish_sampler: self.train_dataset.make_sortish_sampler( self.args.per_device_train_batch_size,distributed=(self.args.parallel_mode == ParallelMode.DISTRIBUTED),) return ( RandomSampler(self.train_dataset ) if self.args.local_rank == -1 else DistributedSampler(self.train_dataset ) ) def snake_case_ ( self: Optional[int],A_: Any,A_: Optional[Any],A_: Any ): '''simple docstring''' if self.args.label_smoothing == 0: if self.data_args is not None and self.data_args.ignore_pad_token_for_loss: # force training to ignore pad token __UpperCamelCase = model(**A_,use_cache=A_ )[0] __UpperCamelCase = self.loss_fn(logits.view(-1,logits.shape[-1] ),labels.view(-1 ) ) else: # compute usual loss via models __UpperCamelCase, __UpperCamelCase = model(**A_,labels=A_,use_cache=A_ )[:2] else: # compute label smoothed loss __UpperCamelCase = model(**A_,use_cache=A_ )[0] __UpperCamelCase = torch.nn.functional.log_softmax(A_,dim=-1 ) __UpperCamelCase, __UpperCamelCase = self.loss_fn(A_,A_,self.args.label_smoothing,ignore_index=self.config.pad_token_id ) return loss, logits def snake_case_ ( self: Tuple,A_: List[Any],A_: List[Any] ): '''simple docstring''' __UpperCamelCase = inputs.pop('labels' ) __UpperCamelCase, __UpperCamelCase = self._compute_loss(A_,A_,A_ ) return loss def snake_case_ ( self: Optional[int],A_: nn.Module,A_: Dict[str, Union[torch.Tensor, Any]],A_: bool,A_: Optional[List[str]] = None,): '''simple docstring''' __UpperCamelCase = self._prepare_inputs(A_ ) __UpperCamelCase = { 'max_length': self.data_args.val_max_target_length if self.data_args is not None else self.config.max_length, 'num_beams': self.data_args.eval_beams if self.data_args is not None else self.config.num_beams, } if self.args.predict_with_generate and not self.args.prediction_loss_only: __UpperCamelCase = self.model.generate( inputs['input_ids'],attention_mask=inputs['attention_mask'],**A_,) # in case the batch is shorter than max length, the output should be padded if generated_tokens.shape[-1] < gen_kwargs["max_length"]: __UpperCamelCase = self._pad_tensors_to_max_len(A_,gen_kwargs['max_length'] ) __UpperCamelCase = inputs.pop('labels' ) with torch.no_grad(): # compute loss on predict data __UpperCamelCase, __UpperCamelCase = self._compute_loss(A_,A_,A_ ) __UpperCamelCase = loss.mean().detach() if self.args.prediction_loss_only: return (loss, None, None) __UpperCamelCase = generated_tokens if self.args.predict_with_generate else logits if labels.shape[-1] < gen_kwargs["max_length"]: __UpperCamelCase = self._pad_tensors_to_max_len(A_,gen_kwargs['max_length'] ) return (loss, logits, labels) def snake_case_ ( self: List[Any],A_: Tuple,A_: Optional[Any] ): '''simple docstring''' __UpperCamelCase = self.config.pad_token_id if self.config.pad_token_id is not None else self.config.eos_token_id if pad_token_id is None: raise ValueError( 'Make sure that either `config.pad_token_id` or `config.eos_token_id` is defined if tensor has to be' F''' padded to `max_length`={max_length}''' ) __UpperCamelCase = pad_token_id * torch.ones( (tensor.shape[0], max_length),dtype=tensor.dtype,device=tensor.device ) __UpperCamelCase = tensor return padded_tensor
1
import string def _A ( _lowercase ) -> None: """simple docstring""" for key in range(len(string.ascii_uppercase ) ): __UpperCamelCase = '' for symbol in message: if symbol in string.ascii_uppercase: __UpperCamelCase = string.ascii_uppercase.find(_lowercase ) __UpperCamelCase = num - key if num < 0: __UpperCamelCase = num + len(string.ascii_uppercase ) __UpperCamelCase = translated + string.ascii_uppercase[num] else: __UpperCamelCase = translated + symbol print(f'''Decryption using Key #{key}: {translated}''' ) def _A ( ) -> None: """simple docstring""" __UpperCamelCase = input('Encrypted message: ' ) __UpperCamelCase = message.upper() decrypt(_lowercase ) if __name__ == "__main__": import doctest doctest.testmod() main()
1
1
import argparse import os import re import numpy as np import PIL import torch from timm import create_model from torch.optim.lr_scheduler import OneCycleLR from torch.utils.data import DataLoader, Dataset from torchvision.transforms import Compose, RandomResizedCrop, Resize, ToTensor from accelerate import Accelerator def _A ( _lowercase ) -> Union[str, Any]: """simple docstring""" __UpperCamelCase = fname.split(os.path.sep )[-1] return re.search(r'^(.*)_\d+\.jpg$' , _lowercase ).groups()[0] class __lowerCamelCase (_a ): def __init__( self: Optional[int],A_: Tuple,A_: Optional[Any]=None,A_: Dict=None ): '''simple docstring''' __UpperCamelCase = file_names __UpperCamelCase = image_transform __UpperCamelCase = label_to_id def __len__( self: Any ): '''simple docstring''' return len(self.file_names ) def __getitem__( self: Dict,A_: Dict ): '''simple docstring''' __UpperCamelCase = self.file_names[idx] __UpperCamelCase = PIL.Image.open(A_ ) __UpperCamelCase = raw_image.convert('RGB' ) if self.image_transform is not None: __UpperCamelCase = self.image_transform(A_ ) __UpperCamelCase = extract_label(A_ ) if self.label_to_id is not None: __UpperCamelCase = self.label_to_id[label] return {"image": image, "label": label} def _A ( _lowercase , _lowercase ) -> Tuple: """simple docstring""" if args.with_tracking: __UpperCamelCase = Accelerator( cpu=args.cpu , mixed_precision=args.mixed_precision , log_with='all' , project_dir=args.project_dir ) else: __UpperCamelCase = Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision ) # Sample hyper-parameters for learning rate, batch size, seed and a few other HPs __UpperCamelCase = config['lr'] __UpperCamelCase = int(config['num_epochs'] ) __UpperCamelCase = int(config['seed'] ) __UpperCamelCase = int(config['batch_size'] ) __UpperCamelCase = config['image_size'] if not isinstance(_lowercase , (list, tuple) ): __UpperCamelCase = (image_size, image_size) # Parse out whether we are saving every epoch or after a certain number of batches if hasattr(args.checkpointing_steps , 'isdigit' ): if args.checkpointing_steps == "epoch": __UpperCamelCase = args.checkpointing_steps elif args.checkpointing_steps.isdigit(): __UpperCamelCase = int(args.checkpointing_steps ) else: raise ValueError( f'''Argument `checkpointing_steps` must be either a number or `epoch`. `{args.checkpointing_steps}` passed.''' ) else: __UpperCamelCase = None # We need to initialize the trackers we use, and also store our configuration if args.with_tracking: __UpperCamelCase = os.path.split(_lowercase )[-1].split('.' )[0] accelerator.init_trackers(_lowercase , _lowercase ) # Grab all the image filenames __UpperCamelCase = [os.path.join(args.data_dir , _lowercase ) for fname in os.listdir(args.data_dir ) if fname.endswith('.jpg' )] # Build the label correspondences __UpperCamelCase = [extract_label(_lowercase ) for fname in file_names] __UpperCamelCase = list(set(_lowercase ) ) id_to_label.sort() __UpperCamelCase = {lbl: i for i, lbl in enumerate(_lowercase )} # Set the seed before splitting the data. np.random.seed(_lowercase ) torch.manual_seed(_lowercase ) torch.cuda.manual_seed_all(_lowercase ) # Split our filenames between train and validation __UpperCamelCase = np.random.permutation(len(_lowercase ) ) __UpperCamelCase = int(0.8 * len(_lowercase ) ) __UpperCamelCase = random_perm[:cut] __UpperCamelCase = random_perm[cut:] # For training we use a simple RandomResizedCrop __UpperCamelCase = Compose([RandomResizedCrop(_lowercase , scale=(0.5, 1.0) ), ToTensor()] ) __UpperCamelCase = PetsDataset( [file_names[i] for i in train_split] , image_transform=_lowercase , label_to_id=_lowercase ) # For evaluation, we use a deterministic Resize __UpperCamelCase = Compose([Resize(_lowercase ), ToTensor()] ) __UpperCamelCase = PetsDataset([file_names[i] for i in eval_split] , image_transform=_lowercase , label_to_id=_lowercase ) # Instantiate dataloaders. __UpperCamelCase = DataLoader(_lowercase , shuffle=_lowercase , batch_size=_lowercase , num_workers=4 ) __UpperCamelCase = DataLoader(_lowercase , shuffle=_lowercase , batch_size=_lowercase , num_workers=4 ) # Instantiate the model (we build the model here so that the seed also control new weights initialization) __UpperCamelCase = create_model('resnet50d' , pretrained=_lowercase , num_classes=len(_lowercase ) ) # We could avoid this line since the accelerator is set with `device_placement=True` (default value). # Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer # creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that). __UpperCamelCase = model.to(accelerator.device ) # Freezing the base model for param in model.parameters(): __UpperCamelCase = False for param in model.get_classifier().parameters(): __UpperCamelCase = True # We normalize the batches of images to be a bit faster. __UpperCamelCase = torch.tensor(model.default_cfg['mean'] )[None, :, None, None].to(accelerator.device ) __UpperCamelCase = torch.tensor(model.default_cfg['std'] )[None, :, None, None].to(accelerator.device ) # Instantiate optimizer __UpperCamelCase = torch.optim.Adam(params=model.parameters() , lr=lr / 25 ) # Instantiate learning rate scheduler __UpperCamelCase = OneCycleLR(optimizer=_lowercase , max_lr=_lowercase , epochs=_lowercase , steps_per_epoch=len(_lowercase ) ) # Prepare everything # There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the # prepare method. __UpperCamelCase, __UpperCamelCase, __UpperCamelCase, __UpperCamelCase, __UpperCamelCase = accelerator.prepare( _lowercase , _lowercase , _lowercase , _lowercase , _lowercase ) # We need to keep track of how many total steps we have iterated over __UpperCamelCase = 0 # We also need to keep track of the starting epoch so files are named properly __UpperCamelCase = 0 # Potentially load in the weights and states from a previous save if args.resume_from_checkpoint: if args.resume_from_checkpoint is not None or args.resume_from_checkpoint != "": accelerator.print(f'''Resumed from checkpoint: {args.resume_from_checkpoint}''' ) accelerator.load_state(args.resume_from_checkpoint ) __UpperCamelCase = os.path.basename(args.resume_from_checkpoint ) else: # Get the most recent checkpoint __UpperCamelCase = [f.name for f in os.scandir(os.getcwd() ) if f.is_dir()] dirs.sort(key=os.path.getctime ) __UpperCamelCase = dirs[-1] # Sorts folders by date modified, most recent checkpoint is the last # Extract `epoch_{i}` or `step_{i}` __UpperCamelCase = os.path.splitext(_lowercase )[0] if "epoch" in training_difference: __UpperCamelCase = int(training_difference.replace('epoch_' , '' ) ) + 1 __UpperCamelCase = None else: __UpperCamelCase = int(training_difference.replace('step_' , '' ) ) __UpperCamelCase = resume_step // len(_lowercase ) resume_step -= starting_epoch * len(_lowercase ) # Now we train the model for epoch in range(_lowercase , _lowercase ): model.train() if args.with_tracking: __UpperCamelCase = 0 if args.resume_from_checkpoint and epoch == starting_epoch and resume_step is not None: # We need to skip steps until we reach the resumed step __UpperCamelCase = accelerator.skip_first_batches(_lowercase , _lowercase ) overall_step += resume_step else: # After the first iteration though, we need to go back to the original dataloader __UpperCamelCase = train_dataloader for batch in active_dataloader: # We could avoid this line since we set the accelerator with `device_placement=True`. __UpperCamelCase = {k: v.to(accelerator.device ) for k, v in batch.items()} __UpperCamelCase = (batch['image'] - mean) / std __UpperCamelCase = model(_lowercase ) __UpperCamelCase = torch.nn.functional.cross_entropy(_lowercase , batch['label'] ) # We keep track of the loss at each epoch if args.with_tracking: total_loss += loss.detach().float() accelerator.backward(_lowercase ) optimizer.step() lr_scheduler.step() optimizer.zero_grad() overall_step += 1 if isinstance(_lowercase , _lowercase ): __UpperCamelCase = f'''step_{overall_step}''' if overall_step % checkpointing_steps == 0: if args.output_dir is not None: __UpperCamelCase = os.path.join(args.output_dir , _lowercase ) accelerator.save_state(_lowercase ) model.eval() __UpperCamelCase = 0 __UpperCamelCase = 0 for step, batch in enumerate(_lowercase ): # We could avoid this line since we set the accelerator with `device_placement=True`. __UpperCamelCase = {k: v.to(accelerator.device ) for k, v in batch.items()} __UpperCamelCase = (batch['image'] - mean) / std with torch.no_grad(): __UpperCamelCase = model(_lowercase ) __UpperCamelCase = outputs.argmax(dim=-1 ) __UpperCamelCase, __UpperCamelCase = accelerator.gather_for_metrics((predictions, batch['label']) ) __UpperCamelCase = predictions == references num_elems += accurate_preds.shape[0] accurate += accurate_preds.long().sum() __UpperCamelCase = accurate.item() / num_elems # Use accelerator.print to print only on the main process. accelerator.print(f'''epoch {epoch}: {1_00 * eval_metric:.2f}''' ) if args.with_tracking: accelerator.log( { 'accuracy': 1_00 * eval_metric, 'train_loss': total_loss.item() / len(_lowercase ), 'epoch': epoch, } , step=_lowercase , ) if checkpointing_steps == "epoch": __UpperCamelCase = f'''epoch_{epoch}''' if args.output_dir is not None: __UpperCamelCase = os.path.join(args.output_dir , _lowercase ) accelerator.save_state(_lowercase ) if args.with_tracking: accelerator.end_training() def _A ( ) -> Dict: """simple docstring""" __UpperCamelCase = argparse.ArgumentParser(description='Simple example of training script.' ) parser.add_argument('--data_dir' , required=_lowercase , help='The data folder on disk.' ) parser.add_argument('--fp16' , action='store_true' , help='If passed, will use FP16 training.' ) parser.add_argument( '--mixed_precision' , type=_lowercase , default=_lowercase , choices=['no', 'fp16', 'bf16', 'fp8'] , help='Whether to use mixed precision. Choose' 'between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10.' 'and an Nvidia Ampere GPU.' , ) parser.add_argument('--cpu' , action='store_true' , help='If passed, will train on the CPU.' ) parser.add_argument( '--checkpointing_steps' , type=_lowercase , default=_lowercase , help='Whether the various states should be saved at the end of every n steps, or \'epoch\' for each epoch.' , ) parser.add_argument( '--output_dir' , type=_lowercase , default='.' , help='Optional save directory where all checkpoint folders will be stored. Default is the current working directory.' , ) parser.add_argument( '--resume_from_checkpoint' , type=_lowercase , default=_lowercase , help='If the training should continue from a checkpoint folder.' , ) parser.add_argument( '--with_tracking' , action='store_true' , help='Whether to load in all available experiment trackers from the environment and use them for logging.' , ) parser.add_argument( '--project_dir' , type=_lowercase , default='logs' , help='Location on where to store experiment tracking logs` and relevent project information' , ) __UpperCamelCase = parser.parse_args() __UpperCamelCase = {'lr': 3e-2, 'num_epochs': 3, 'seed': 42, 'batch_size': 64, 'image_size': 2_24} training_function(_lowercase , _lowercase ) if __name__ == "__main__": main()
1
import gc import random import unittest import numpy as np import torch from PIL import Image from transformers import XLMRobertaTokenizerFast from diffusers import DDIMScheduler, KandinskyInpaintPipeline, KandinskyPriorPipeline, UNetaDConditionModel, VQModel from diffusers.pipelines.kandinsky.text_encoder import MCLIPConfig, MultilingualCLIP from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference enable_full_determinism() class __lowerCamelCase (_a , unittest.TestCase ): _lowercase = KandinskyInpaintPipeline _lowercase = ["""prompt""", """image_embeds""", """negative_image_embeds""", """image""", """mask_image"""] _lowercase = [ """prompt""", """negative_prompt""", """image_embeds""", """negative_image_embeds""", """image""", """mask_image""", ] _lowercase = [ """generator""", """height""", """width""", """latents""", """guidance_scale""", """negative_prompt""", """num_inference_steps""", """return_dict""", """guidance_scale""", """num_images_per_prompt""", """output_type""", """return_dict""", ] _lowercase = False @property def snake_case_ ( self: int ): '''simple docstring''' return 32 @property def snake_case_ ( self: str ): '''simple docstring''' return 32 @property def snake_case_ ( self: Tuple ): '''simple docstring''' return self.time_input_dim @property def snake_case_ ( self: Union[str, Any] ): '''simple docstring''' return self.time_input_dim * 4 @property def snake_case_ ( self: Optional[int] ): '''simple docstring''' return 100 @property def snake_case_ ( self: str ): '''simple docstring''' __UpperCamelCase = XLMRobertaTokenizerFast.from_pretrained('YiYiXu/tiny-random-mclip-base' ) return tokenizer @property def snake_case_ ( self: Any ): '''simple docstring''' torch.manual_seed(0 ) __UpperCamelCase = MCLIPConfig( numDims=self.cross_attention_dim,transformerDimensions=self.text_embedder_hidden_size,hidden_size=self.text_embedder_hidden_size,intermediate_size=37,num_attention_heads=4,num_hidden_layers=5,vocab_size=1005,) __UpperCamelCase = MultilingualCLIP(A_ ) __UpperCamelCase = text_encoder.eval() return text_encoder @property def snake_case_ ( self: Any ): '''simple docstring''' torch.manual_seed(0 ) __UpperCamelCase = { 'in_channels': 9, # Out channels is double in channels because predicts mean and variance 'out_channels': 8, 'addition_embed_type': 'text_image', 'down_block_types': ('ResnetDownsampleBlock2D', 'SimpleCrossAttnDownBlock2D'), 'up_block_types': ('SimpleCrossAttnUpBlock2D', 'ResnetUpsampleBlock2D'), 'mid_block_type': 'UNetMidBlock2DSimpleCrossAttn', 'block_out_channels': (self.block_out_channels_a, self.block_out_channels_a * 2), 'layers_per_block': 1, 'encoder_hid_dim': self.text_embedder_hidden_size, 'encoder_hid_dim_type': 'text_image_proj', 'cross_attention_dim': self.cross_attention_dim, 'attention_head_dim': 4, 'resnet_time_scale_shift': 'scale_shift', 'class_embed_type': None, } __UpperCamelCase = UNetaDConditionModel(**A_ ) return model @property def snake_case_ ( self: str ): '''simple docstring''' return { "block_out_channels": [32, 64], "down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"], "in_channels": 3, "latent_channels": 4, "layers_per_block": 1, "norm_num_groups": 8, "norm_type": "spatial", "num_vq_embeddings": 12, "out_channels": 3, "up_block_types": [ "AttnUpDecoderBlock2D", "UpDecoderBlock2D", ], "vq_embed_dim": 4, } @property def snake_case_ ( self: str ): '''simple docstring''' torch.manual_seed(0 ) __UpperCamelCase = VQModel(**self.dummy_movq_kwargs ) return model def snake_case_ ( self: Dict ): '''simple docstring''' __UpperCamelCase = self.dummy_text_encoder __UpperCamelCase = self.dummy_tokenizer __UpperCamelCase = self.dummy_unet __UpperCamelCase = self.dummy_movq __UpperCamelCase = DDIMScheduler( num_train_timesteps=1000,beta_schedule='linear',beta_start=0.0_0_0_8_5,beta_end=0.0_1_2,clip_sample=A_,set_alpha_to_one=A_,steps_offset=1,prediction_type='epsilon',thresholding=A_,) __UpperCamelCase = { 'text_encoder': text_encoder, 'tokenizer': tokenizer, 'unet': unet, 'scheduler': scheduler, 'movq': movq, } return components def snake_case_ ( self: Tuple,A_: Optional[int],A_: Dict=0 ): '''simple docstring''' __UpperCamelCase = floats_tensor((1, self.cross_attention_dim),rng=random.Random(A_ ) ).to(A_ ) __UpperCamelCase = floats_tensor((1, self.cross_attention_dim),rng=random.Random(seed + 1 ) ).to(A_ ) # create init_image __UpperCamelCase = floats_tensor((1, 3, 64, 64),rng=random.Random(A_ ) ).to(A_ ) __UpperCamelCase = image.cpu().permute(0,2,3,1 )[0] __UpperCamelCase = Image.fromarray(np.uinta(A_ ) ).convert('RGB' ).resize((256, 256) ) # create mask __UpperCamelCase = np.ones((64, 64),dtype=np.floataa ) __UpperCamelCase = 0 if str(A_ ).startswith('mps' ): __UpperCamelCase = torch.manual_seed(A_ ) else: __UpperCamelCase = torch.Generator(device=A_ ).manual_seed(A_ ) __UpperCamelCase = { 'prompt': 'horse', 'image': init_image, 'mask_image': mask, 'image_embeds': image_embeds, 'negative_image_embeds': negative_image_embeds, 'generator': generator, 'height': 64, 'width': 64, 'num_inference_steps': 2, 'guidance_scale': 4.0, 'output_type': 'np', } return inputs def snake_case_ ( self: Any ): '''simple docstring''' __UpperCamelCase = 'cpu' __UpperCamelCase = self.get_dummy_components() __UpperCamelCase = self.pipeline_class(**A_ ) __UpperCamelCase = pipe.to(A_ ) pipe.set_progress_bar_config(disable=A_ ) __UpperCamelCase = pipe(**self.get_dummy_inputs(A_ ) ) __UpperCamelCase = output.images __UpperCamelCase = pipe( **self.get_dummy_inputs(A_ ),return_dict=A_,)[0] __UpperCamelCase = image[0, -3:, -3:, -1] __UpperCamelCase = image_from_tuple[0, -3:, -3:, -1] print(F'''image.shape {image.shape}''' ) assert image.shape == (1, 64, 64, 3) __UpperCamelCase = np.array( [0.8_3_2_6_9_1_9, 0.7_3_7_9_0_4_6_7, 0.2_0_9_1_8_5_8_1, 0.9_3_0_9_6_1_2, 0.5_5_1_1_7_9_1, 0.4_3_7_1_3_3_2_8, 0.5_5_1_3_3_2_1, 0.4_9_9_2_2_9_3_4, 0.5_9_4_9_7_7_8_6] ) assert ( np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 ), F''' expected_slice {expected_slice}, but got {image_slice.flatten()}''' assert ( np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2 ), F''' expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}''' def snake_case_ ( self: Optional[Any] ): '''simple docstring''' super().test_inference_batch_single_identical(expected_max_diff=3E-3 ) @slow @require_torch_gpu class __lowerCamelCase (unittest.TestCase ): def snake_case_ ( self: Tuple ): '''simple docstring''' super().tearDown() gc.collect() torch.cuda.empty_cache() def snake_case_ ( self: Any ): '''simple docstring''' __UpperCamelCase = load_numpy( 'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main' '/kandinsky/kandinsky_inpaint_cat_with_hat_fp16.npy' ) __UpperCamelCase = load_image( 'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main' '/kandinsky/cat.png' ) __UpperCamelCase = np.ones((768, 768),dtype=np.floataa ) __UpperCamelCase = 0 __UpperCamelCase = 'a hat' __UpperCamelCase = KandinskyPriorPipeline.from_pretrained( 'kandinsky-community/kandinsky-2-1-prior',torch_dtype=torch.floataa ) pipe_prior.to(A_ ) __UpperCamelCase = KandinskyInpaintPipeline.from_pretrained( 'kandinsky-community/kandinsky-2-1-inpaint',torch_dtype=torch.floataa ) __UpperCamelCase = pipeline.to(A_ ) pipeline.set_progress_bar_config(disable=A_ ) __UpperCamelCase = torch.Generator(device='cpu' ).manual_seed(0 ) __UpperCamelCase, __UpperCamelCase = pipe_prior( A_,generator=A_,num_inference_steps=5,negative_prompt='',).to_tuple() __UpperCamelCase = pipeline( A_,image=A_,mask_image=A_,image_embeds=A_,negative_image_embeds=A_,generator=A_,num_inference_steps=100,height=768,width=768,output_type='np',) __UpperCamelCase = output.images[0] assert image.shape == (768, 768, 3) assert_mean_pixel_difference(A_,A_ )
1
1
def _A ( _lowercase , _lowercase ) -> int: """simple docstring""" while b: __UpperCamelCase, __UpperCamelCase = b, a % b return a def _A ( _lowercase , _lowercase ) -> int: """simple docstring""" return a if b == 0 else euclidean_gcd_recursive(_lowercase , a % b ) def _A ( ) -> List[Any]: """simple docstring""" print(f'''euclidean_gcd(3, 5) = {euclidean_gcd(3 , 5 )}''' ) print(f'''euclidean_gcd(5, 3) = {euclidean_gcd(5 , 3 )}''' ) print(f'''euclidean_gcd(1, 3) = {euclidean_gcd(1 , 3 )}''' ) print(f'''euclidean_gcd(3, 6) = {euclidean_gcd(3 , 6 )}''' ) print(f'''euclidean_gcd(6, 3) = {euclidean_gcd(6 , 3 )}''' ) print(f'''euclidean_gcd_recursive(3, 5) = {euclidean_gcd_recursive(3 , 5 )}''' ) print(f'''euclidean_gcd_recursive(5, 3) = {euclidean_gcd_recursive(5 , 3 )}''' ) print(f'''euclidean_gcd_recursive(1, 3) = {euclidean_gcd_recursive(1 , 3 )}''' ) print(f'''euclidean_gcd_recursive(3, 6) = {euclidean_gcd_recursive(3 , 6 )}''' ) print(f'''euclidean_gcd_recursive(6, 3) = {euclidean_gcd_recursive(6 , 3 )}''' ) if __name__ == "__main__": main()
1
from typing import Any class __lowerCamelCase : def __init__( self: int,A_: Any ): '''simple docstring''' __UpperCamelCase = data __UpperCamelCase = None def __repr__( self: Any ): '''simple docstring''' return F'''Node({self.data})''' class __lowerCamelCase : def __init__( self: Union[str, Any] ): '''simple docstring''' __UpperCamelCase = None def __iter__( self: int ): '''simple docstring''' __UpperCamelCase = self.head while node: yield node.data __UpperCamelCase = node.next def __len__( self: List[str] ): '''simple docstring''' return sum(1 for _ in self ) def __repr__( self: Any ): '''simple docstring''' return "->".join([str(A_ ) for item in self] ) def __getitem__( self: int,A_: int ): '''simple docstring''' if not 0 <= index < len(self ): raise ValueError('list index out of range.' ) for i, node in enumerate(self ): if i == index: return node return None def __setitem__( self: int,A_: int,A_: Any ): '''simple docstring''' if not 0 <= index < len(self ): raise ValueError('list index out of range.' ) __UpperCamelCase = self.head for _ in range(A_ ): __UpperCamelCase = current.next __UpperCamelCase = data def snake_case_ ( self: Union[str, Any],A_: Any ): '''simple docstring''' self.insert_nth(len(self ),A_ ) def snake_case_ ( self: List[Any],A_: Any ): '''simple docstring''' self.insert_nth(0,A_ ) def snake_case_ ( self: Optional[Any],A_: int,A_: Any ): '''simple docstring''' if not 0 <= index <= len(self ): raise IndexError('list index out of range' ) __UpperCamelCase = Node(A_ ) if self.head is None: __UpperCamelCase = new_node elif index == 0: __UpperCamelCase = self.head # link new_node to head __UpperCamelCase = new_node else: __UpperCamelCase = self.head for _ in range(index - 1 ): __UpperCamelCase = temp.next __UpperCamelCase = temp.next __UpperCamelCase = new_node def snake_case_ ( self: str ): # print every node data '''simple docstring''' print(self ) def snake_case_ ( self: int ): '''simple docstring''' return self.delete_nth(0 ) def snake_case_ ( self: str ): # delete from tail '''simple docstring''' return self.delete_nth(len(self ) - 1 ) def snake_case_ ( self: Any,A_: int = 0 ): '''simple docstring''' if not 0 <= index <= len(self ) - 1: # test if index is valid raise IndexError('List index out of range.' ) __UpperCamelCase = self.head # default first node if index == 0: __UpperCamelCase = self.head.next else: __UpperCamelCase = self.head for _ in range(index - 1 ): __UpperCamelCase = temp.next __UpperCamelCase = temp.next __UpperCamelCase = temp.next.next return delete_node.data def snake_case_ ( self: Any ): '''simple docstring''' return self.head is None def snake_case_ ( self: Optional[int] ): '''simple docstring''' __UpperCamelCase = None __UpperCamelCase = self.head while current: # Store the current node's next node. __UpperCamelCase = current.next # Make the current node's next point backwards __UpperCamelCase = prev # Make the previous node be the current node __UpperCamelCase = current # Make the current node the next node (to progress iteration) __UpperCamelCase = next_node # Return prev in order to put the head at the end __UpperCamelCase = prev def _A ( ) -> None: """simple docstring""" __UpperCamelCase = LinkedList() assert linked_list.is_empty() is True assert str(_lowercase ) == "" try: linked_list.delete_head() raise AssertionError # This should not happen. except IndexError: assert True # This should happen. try: linked_list.delete_tail() raise AssertionError # This should not happen. except IndexError: assert True # This should happen. for i in range(10 ): assert len(_lowercase ) == i linked_list.insert_nth(_lowercase , i + 1 ) assert str(_lowercase ) == "->".join(str(_lowercase ) for i in range(1 , 11 ) ) linked_list.insert_head(0 ) linked_list.insert_tail(11 ) assert str(_lowercase ) == "->".join(str(_lowercase ) for i in range(0 , 12 ) ) assert linked_list.delete_head() == 0 assert linked_list.delete_nth(9 ) == 10 assert linked_list.delete_tail() == 11 assert len(_lowercase ) == 9 assert str(_lowercase ) == "->".join(str(_lowercase ) for i in range(1 , 10 ) ) assert all(linked_list[i] == i + 1 for i in range(0 , 9 ) ) is True for i in range(0 , 9 ): __UpperCamelCase = -i assert all(linked_list[i] == -i for i in range(0 , 9 ) ) is True linked_list.reverse() assert str(_lowercase ) == "->".join(str(_lowercase ) for i in range(-8 , 1 ) ) def _A ( ) -> None: """simple docstring""" __UpperCamelCase = [ -9, 1_00, Node(77_34_51_12 ), 'dlrow olleH', 7, 55_55, 0, -1_92.5_55_55, 'Hello, world!', 77.9, Node(10 ), None, None, 12.20, ] __UpperCamelCase = LinkedList() for i in test_input: linked_list.insert_tail(_lowercase ) # Check if it's empty or not assert linked_list.is_empty() is False assert ( str(_lowercase ) == "-9->100->Node(77345112)->dlrow olleH->7->5555->0->" "-192.55555->Hello, world!->77.9->Node(10)->None->None->12.2" ) # Delete the head __UpperCamelCase = linked_list.delete_head() assert result == -9 assert ( str(_lowercase ) == "100->Node(77345112)->dlrow olleH->7->5555->0->-192.55555->" "Hello, world!->77.9->Node(10)->None->None->12.2" ) # Delete the tail __UpperCamelCase = linked_list.delete_tail() assert result == 12.2 assert ( str(_lowercase ) == "100->Node(77345112)->dlrow olleH->7->5555->0->-192.55555->" "Hello, world!->77.9->Node(10)->None->None" ) # Delete a node in specific location in linked list __UpperCamelCase = linked_list.delete_nth(10 ) assert result is None assert ( str(_lowercase ) == "100->Node(77345112)->dlrow olleH->7->5555->0->-192.55555->" "Hello, world!->77.9->Node(10)->None" ) # Add a Node instance to its head linked_list.insert_head(Node('Hello again, world!' ) ) assert ( str(_lowercase ) == "Node(Hello again, world!)->100->Node(77345112)->dlrow olleH->" "7->5555->0->-192.55555->Hello, world!->77.9->Node(10)->None" ) # Add None to its tail linked_list.insert_tail(_lowercase ) assert ( str(_lowercase ) == "Node(Hello again, world!)->100->Node(77345112)->dlrow olleH->" "7->5555->0->-192.55555->Hello, world!->77.9->Node(10)->None->None" ) # Reverse the linked list linked_list.reverse() assert ( str(_lowercase ) == "None->None->Node(10)->77.9->Hello, world!->-192.55555->0->5555->" "7->dlrow olleH->Node(77345112)->100->Node(Hello again, world!)" ) def _A ( ) -> List[str]: """simple docstring""" from doctest import testmod testmod() __UpperCamelCase = LinkedList() linked_list.insert_head(input('Inserting 1st at head ' ).strip() ) linked_list.insert_head(input('Inserting 2nd at head ' ).strip() ) print('\nPrint list:' ) linked_list.print_list() linked_list.insert_tail(input('\nInserting 1st at tail ' ).strip() ) linked_list.insert_tail(input('Inserting 2nd at tail ' ).strip() ) print('\nPrint list:' ) linked_list.print_list() print('\nDelete head' ) linked_list.delete_head() print('Delete tail' ) linked_list.delete_tail() print('\nPrint list:' ) linked_list.print_list() print('\nReverse linked list' ) linked_list.reverse() print('\nPrint list:' ) linked_list.print_list() print('\nString representation of linked list:' ) print(_lowercase ) print('\nReading/changing Node data using indexing:' ) print(f'''Element at Position 1: {linked_list[1]}''' ) __UpperCamelCase = input('Enter New Value: ' ).strip() print('New list:' ) print(_lowercase ) print(f'''length of linked_list is : {len(_lowercase )}''' ) if __name__ == "__main__": main()
1
1
from typing import Dict, List, Optional, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import center_crop, normalize, rescale, resize, to_channel_dimension_format from ...image_utils import ( IMAGENET_STANDARD_MEAN, IMAGENET_STANDARD_STD, ChannelDimension, ImageInput, PILImageResampling, make_list_of_images, to_numpy_array, valid_images, ) from ...utils import TensorType, is_vision_available, logging if is_vision_available(): import PIL __snake_case = logging.get_logger(__name__) class __lowerCamelCase (_a ): _lowercase = ["""pixel_values"""] def __init__( self: str,A_: bool = True,A_: Dict[str, int] = None,A_: PILImageResampling = PIL.Image.BICUBIC,A_: bool = True,A_: Dict[str, int] = None,A_: Union[int, float] = 1 / 255,A_: bool = True,A_: bool = True,A_: Optional[Union[float, List[float]]] = None,A_: Optional[Union[float, List[float]]] = None,**A_: int,): '''simple docstring''' super().__init__(**A_ ) __UpperCamelCase = size if size is not None else {'height': 256, 'width': 256} __UpperCamelCase = get_size_dict(A_ ) __UpperCamelCase = crop_size if crop_size is not None else {'height': 224, 'width': 224} __UpperCamelCase = get_size_dict(A_,param_name='crop_size' ) __UpperCamelCase = do_resize __UpperCamelCase = size __UpperCamelCase = resample __UpperCamelCase = do_center_crop __UpperCamelCase = crop_size __UpperCamelCase = do_rescale __UpperCamelCase = rescale_factor __UpperCamelCase = do_normalize __UpperCamelCase = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN __UpperCamelCase = image_std if image_std is not None else IMAGENET_STANDARD_STD def snake_case_ ( self: Tuple,A_: np.ndarray,A_: Dict[str, int],A_: PILImageResampling = PIL.Image.BICUBIC,A_: Optional[Union[str, ChannelDimension]] = None,**A_: Union[str, Any],): '''simple docstring''' __UpperCamelCase = get_size_dict(A_ ) if "height" not in size or "width" not in size: raise ValueError(F'''The size dictionary must have keys \'height\' and \'width\'. Got {size.keys()}''' ) return resize( A_,size=(size['height'], size['width']),resample=A_,data_format=A_,**A_ ) def snake_case_ ( self: List[str],A_: np.ndarray,A_: Dict[str, int],A_: Optional[Union[str, ChannelDimension]] = None,**A_: Union[str, Any],): '''simple docstring''' __UpperCamelCase = get_size_dict(A_ ) if "height" not in size or "width" not in size: raise ValueError(F'''The size dictionary must have keys \'height\' and \'width\'. Got {size.keys()}''' ) return center_crop(A_,size=(size['height'], size['width']),data_format=A_,**A_ ) def snake_case_ ( self: Optional[int],A_: np.ndarray,A_: Union[int, float],A_: Optional[Union[str, ChannelDimension]] = None,**A_: Union[str, Any],): '''simple docstring''' return rescale(A_,scale=A_,data_format=A_,**A_ ) def snake_case_ ( self: List[str],A_: np.ndarray,A_: Union[float, List[float]],A_: Union[float, List[float]],A_: Optional[Union[str, ChannelDimension]] = None,**A_: List[Any],): '''simple docstring''' return normalize(A_,mean=A_,std=A_,data_format=A_,**A_ ) def snake_case_ ( self: Tuple,A_: ImageInput,A_: bool = None,A_: Dict[str, int] = None,A_: Any=None,A_: bool = None,A_: Dict[str, int] = None,A_: bool = None,A_: float = None,A_: bool = None,A_: Optional[Union[float, List[float]]] = None,A_: Optional[Union[float, List[float]]] = None,A_: Optional[Union[str, TensorType]] = None,A_: ChannelDimension = ChannelDimension.FIRST,**A_: List[str],): '''simple docstring''' __UpperCamelCase = do_resize if do_resize is not None else self.do_resize __UpperCamelCase = resample if resample is not None else self.resample __UpperCamelCase = do_center_crop if do_center_crop is not None else self.do_center_crop __UpperCamelCase = do_rescale if do_rescale is not None else self.do_rescale __UpperCamelCase = rescale_factor if rescale_factor is not None else self.rescale_factor __UpperCamelCase = do_normalize if do_normalize is not None else self.do_normalize __UpperCamelCase = image_mean if image_mean is not None else self.image_mean __UpperCamelCase = image_std if image_std is not None else self.image_std __UpperCamelCase = size if size is not None else self.size __UpperCamelCase = get_size_dict(A_ ) __UpperCamelCase = crop_size if crop_size is not None else self.crop_size __UpperCamelCase = get_size_dict(A_,param_name='crop_size' ) __UpperCamelCase = make_list_of_images(A_ ) if not valid_images(A_ ): raise ValueError( 'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, ' 'torch.Tensor, tf.Tensor or jax.ndarray.' ) if do_resize and size is None or resample is None: raise ValueError('Size and resample must be specified if do_resize is True.' ) if do_center_crop and crop_size is None: raise ValueError('Crop size must be specified if do_center_crop is True.' ) if do_rescale and rescale_factor is None: raise ValueError('Rescale factor must be specified if do_rescale is True.' ) if do_normalize and (image_mean is None or image_std is None): raise ValueError('Image mean and std must be specified if do_normalize is True.' ) # All transformations expect numpy arrays. __UpperCamelCase = [to_numpy_array(A_ ) for image in images] if do_resize: __UpperCamelCase = [self.resize(image=A_,size=A_,resample=A_ ) for image in images] if do_center_crop: __UpperCamelCase = [self.center_crop(image=A_,size=A_ ) for image in images] if do_rescale: __UpperCamelCase = [self.rescale(image=A_,scale=A_ ) for image in images] if do_normalize: __UpperCamelCase = [self.normalize(image=A_,mean=A_,std=A_ ) for image in images] __UpperCamelCase = [to_channel_dimension_format(A_,A_ ) for image in images] __UpperCamelCase = {'pixel_values': images} return BatchFeature(data=A_,tensor_type=A_ )
1
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_torch_available, ) __snake_case = {'''configuration_unispeech''': ['''UNISPEECH_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''UniSpeechConfig''']} try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __snake_case = [ '''UNISPEECH_PRETRAINED_MODEL_ARCHIVE_LIST''', '''UniSpeechForCTC''', '''UniSpeechForPreTraining''', '''UniSpeechForSequenceClassification''', '''UniSpeechModel''', '''UniSpeechPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_unispeech import UNISPEECH_PRETRAINED_CONFIG_ARCHIVE_MAP, UniSpeechConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_unispeech import ( UNISPEECH_PRETRAINED_MODEL_ARCHIVE_LIST, UniSpeechForCTC, UniSpeechForPreTraining, UniSpeechForSequenceClassification, UniSpeechModel, UniSpeechPreTrainedModel, ) else: import sys __snake_case = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
1
1
from random import randint, random def _A ( _lowercase , _lowercase , _lowercase , _lowercase = False , _lowercase = False , _lowercase = 5 , ) -> list: """simple docstring""" __UpperCamelCase = [[-1] * number_of_cells] # Create a highway without any car __UpperCamelCase = 0 __UpperCamelCase = max(_lowercase , 0 ) while i < number_of_cells: __UpperCamelCase = ( randint(0 , _lowercase ) if random_speed else initial_speed ) # Place the cars i += ( randint(1 , max_speed * 2 ) if random_frequency else frequency ) # Arbitrary number, may need tuning return highway def _A ( _lowercase , _lowercase ) -> int: """simple docstring""" __UpperCamelCase = 0 __UpperCamelCase = highway_now[car_index + 1 :] for cell in range(len(_lowercase ) ): # May need a better name for this if cells[cell] != -1: # If the cell is not empty then return distance # we have the distance we wanted distance += 1 # Here if the car is near the end of the highway return distance + get_distance(_lowercase , -1 ) def _A ( _lowercase , _lowercase , _lowercase ) -> list: """simple docstring""" __UpperCamelCase = len(_lowercase ) # Beforce calculations, the highway is empty __UpperCamelCase = [-1] * number_of_cells for car_index in range(_lowercase ): if highway_now[car_index] != -1: # Add 1 to the current speed of the car and cap the speed __UpperCamelCase = min(highway_now[car_index] + 1 , _lowercase ) # Number of empty cell before the next car __UpperCamelCase = get_distance(_lowercase , _lowercase ) - 1 # We can't have the car causing an accident __UpperCamelCase = min(next_highway[car_index] , _lowercase ) if random() < probability: # Randomly, a driver will slow down __UpperCamelCase = max(next_highway[car_index] - 1 , 0 ) return next_highway def _A ( _lowercase , _lowercase , _lowercase , _lowercase ) -> list: """simple docstring""" __UpperCamelCase = len(highway[0] ) for i in range(_lowercase ): __UpperCamelCase = update(highway[i] , _lowercase , _lowercase ) __UpperCamelCase = [-1] * number_of_cells for car_index in range(_lowercase ): __UpperCamelCase = next_speeds_calculated[car_index] if speed != -1: # Change the position based on the speed (with % to create the loop) __UpperCamelCase = (car_index + speed) % number_of_cells # Commit the change of position __UpperCamelCase = speed highway.append(_lowercase ) return highway if __name__ == "__main__": import doctest doctest.testmod()
1
__snake_case = { '''a''': '''AAAAA''', '''b''': '''AAAAB''', '''c''': '''AAABA''', '''d''': '''AAABB''', '''e''': '''AABAA''', '''f''': '''AABAB''', '''g''': '''AABBA''', '''h''': '''AABBB''', '''i''': '''ABAAA''', '''j''': '''BBBAA''', '''k''': '''ABAAB''', '''l''': '''ABABA''', '''m''': '''ABABB''', '''n''': '''ABBAA''', '''o''': '''ABBAB''', '''p''': '''ABBBA''', '''q''': '''ABBBB''', '''r''': '''BAAAA''', '''s''': '''BAAAB''', '''t''': '''BAABA''', '''u''': '''BAABB''', '''v''': '''BBBAB''', '''w''': '''BABAA''', '''x''': '''BABAB''', '''y''': '''BABBA''', '''z''': '''BABBB''', ''' ''': ''' ''', } __snake_case = {value: key for key, value in encode_dict.items()} def _A ( _lowercase ) -> str: """simple docstring""" __UpperCamelCase = '' for letter in word.lower(): if letter.isalpha() or letter == " ": encoded += encode_dict[letter] else: raise Exception('encode() accepts only letters of the alphabet and spaces' ) return encoded def _A ( _lowercase ) -> str: """simple docstring""" if set(_lowercase ) - {"A", "B", " "} != set(): raise Exception('decode() accepts only \'A\', \'B\' and spaces' ) __UpperCamelCase = '' for word in coded.split(): while len(_lowercase ) != 0: decoded += decode_dict[word[:5]] __UpperCamelCase = word[5:] decoded += " " return decoded.strip() if __name__ == "__main__": from doctest import testmod testmod()
1
1
from __future__ import annotations from functools import lru_cache from math import ceil __snake_case = 1_0_0 __snake_case = set(range(3, NUM_PRIMES, 2)) primes.add(2) __snake_case = 42 for prime in range(3, ceil(NUM_PRIMES**0.5), 2): if prime not in primes: continue primes.difference_update(set(range(prime * prime, NUM_PRIMES, prime))) @lru_cache(maxsize=1_00 ) def _A ( _lowercase ) -> set[int]: """simple docstring""" if number_to_partition < 0: return set() elif number_to_partition == 0: return {1} __UpperCamelCase = set() __UpperCamelCase = 42 __UpperCamelCase = 42 for prime in primes: if prime > number_to_partition: continue for sub in partition(number_to_partition - prime ): ret.add(sub * prime ) return ret def _A ( _lowercase = 50_00 ) -> int | None: """simple docstring""" for number_to_partition in range(1 , _lowercase ): if len(partition(_lowercase ) ) > number_unique_partitions: return number_to_partition return None if __name__ == "__main__": print(f"""{solution() = }""")
1
from collections.abc import Generator from math import sin def _A ( _lowercase ) -> bytes: """simple docstring""" if len(_lowercase ) != 32: raise ValueError('Input must be of length 32' ) __UpperCamelCase = B'' for i in [3, 2, 1, 0]: little_endian += string_aa[8 * i : 8 * i + 8] return little_endian def _A ( _lowercase ) -> bytes: """simple docstring""" if i < 0: raise ValueError('Input must be non-negative' ) __UpperCamelCase = format(_lowercase , '08x' )[-8:] __UpperCamelCase = B'' for i in [3, 2, 1, 0]: little_endian_hex += hex_rep[2 * i : 2 * i + 2].encode('utf-8' ) return little_endian_hex def _A ( _lowercase ) -> bytes: """simple docstring""" __UpperCamelCase = B'' for char in message: bit_string += format(_lowercase , '08b' ).encode('utf-8' ) __UpperCamelCase = format(len(_lowercase ) , '064b' ).encode('utf-8' ) # Pad bit_string to a multiple of 512 chars bit_string += b"1" while len(_lowercase ) % 5_12 != 4_48: bit_string += b"0" bit_string += to_little_endian(start_len[32:] ) + to_little_endian(start_len[:32] ) return bit_string def _A ( _lowercase ) -> Generator[list[int], None, None]: """simple docstring""" if len(_lowercase ) % 5_12 != 0: raise ValueError('Input must have length that\'s a multiple of 512' ) for pos in range(0 , len(_lowercase ) , 5_12 ): __UpperCamelCase = bit_string[pos : pos + 5_12] __UpperCamelCase = [] for i in range(0 , 5_12 , 32 ): block_words.append(int(to_little_endian(block[i : i + 32] ) , 2 ) ) yield block_words def _A ( _lowercase ) -> int: """simple docstring""" if i < 0: raise ValueError('Input must be non-negative' ) __UpperCamelCase = format(_lowercase , '032b' ) __UpperCamelCase = '' for c in i_str: new_str += "1" if c == "0" else "0" return int(_lowercase , 2 ) def _A ( _lowercase , _lowercase ) -> int: """simple docstring""" return (a + b) % 2**32 def _A ( _lowercase , _lowercase ) -> int: """simple docstring""" if i < 0: raise ValueError('Input must be non-negative' ) if shift < 0: raise ValueError('Shift must be non-negative' ) return ((i << shift) ^ (i >> (32 - shift))) % 2**32 def _A ( _lowercase ) -> bytes: """simple docstring""" __UpperCamelCase = preprocess(_lowercase ) __UpperCamelCase = [int(2**32 * abs(sin(i + 1 ) ) ) for i in range(64 )] # Starting states __UpperCamelCase = 0X67_45_23_01 __UpperCamelCase = 0Xef_cd_ab_89 __UpperCamelCase = 0X98_ba_dc_fe __UpperCamelCase = 0X10_32_54_76 __UpperCamelCase = [ 7, 12, 17, 22, 7, 12, 17, 22, 7, 12, 17, 22, 7, 12, 17, 22, 5, 9, 14, 20, 5, 9, 14, 20, 5, 9, 14, 20, 5, 9, 14, 20, 4, 11, 16, 23, 4, 11, 16, 23, 4, 11, 16, 23, 4, 11, 16, 23, 6, 10, 15, 21, 6, 10, 15, 21, 6, 10, 15, 21, 6, 10, 15, 21, ] # Process bit string in chunks, each with 16 32-char words for block_words in get_block_words(_lowercase ): __UpperCamelCase = aa __UpperCamelCase = ba __UpperCamelCase = ca __UpperCamelCase = da # Hash current chunk for i in range(64 ): if i <= 15: # f = (b & c) | (not_32(b) & d) # Alternate definition for f __UpperCamelCase = d ^ (b & (c ^ d)) __UpperCamelCase = i elif i <= 31: # f = (d & b) | (not_32(d) & c) # Alternate definition for f __UpperCamelCase = c ^ (d & (b ^ c)) __UpperCamelCase = (5 * i + 1) % 16 elif i <= 47: __UpperCamelCase = b ^ c ^ d __UpperCamelCase = (3 * i + 5) % 16 else: __UpperCamelCase = c ^ (b | not_aa(_lowercase )) __UpperCamelCase = (7 * i) % 16 __UpperCamelCase = (f + a + added_consts[i] + block_words[g]) % 2**32 __UpperCamelCase = d __UpperCamelCase = c __UpperCamelCase = b __UpperCamelCase = sum_aa(_lowercase , left_rotate_aa(_lowercase , shift_amounts[i] ) ) # Add hashed chunk to running total __UpperCamelCase = sum_aa(_lowercase , _lowercase ) __UpperCamelCase = sum_aa(_lowercase , _lowercase ) __UpperCamelCase = sum_aa(_lowercase , _lowercase ) __UpperCamelCase = sum_aa(_lowercase , _lowercase ) __UpperCamelCase = reformat_hex(_lowercase ) + reformat_hex(_lowercase ) + reformat_hex(_lowercase ) + reformat_hex(_lowercase ) return digest if __name__ == "__main__": import doctest doctest.testmod()
1
1
import collections import os from shutil import copyfile from typing import Any, Dict, List, Optional, Tuple from ...tokenization_utils import PreTrainedTokenizer from ...utils import logging __snake_case = logging.get_logger(__name__) __snake_case = '''▁''' __snake_case = {'''vocab_file''': '''prophetnet.tokenizer'''} __snake_case = { '''vocab_file''': { '''microsoft/xprophetnet-large-wiki100-cased''': ( '''https://huggingface.co/microsoft/xprophetnet-large-wiki100-cased/resolve/main/prophetnet.tokenizer''' ), } } __snake_case = { '''microsoft/xprophetnet-large-wiki100-cased''': {'''do_lower_case''': False}, } __snake_case = { '''microsoft/xprophetnet-large-wiki100-cased''': 5_1_2, } def _A ( _lowercase ) -> Optional[Any]: """simple docstring""" __UpperCamelCase = collections.OrderedDict() with open(_lowercase , 'r' , encoding='utf-8' ) as reader: __UpperCamelCase = reader.readlines() for index, token in enumerate(_lowercase ): __UpperCamelCase = token.rstrip('\n' ) __UpperCamelCase = index return vocab class __lowerCamelCase (_a ): _lowercase = VOCAB_FILES_NAMES _lowercase = PRETRAINED_VOCAB_FILES_MAP _lowercase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES _lowercase = ["""input_ids""", """attention_mask"""] def __init__( self: str,A_: int,A_: str="[SEP]",A_: List[Any]="[SEP]",A_: str="[SEP]",A_: Any="[UNK]",A_: Optional[int]="[PAD]",A_: List[str]="[CLS]",A_: Dict="[MASK]",A_: Optional[Dict[str, Any]] = None,**A_: str,): '''simple docstring''' __UpperCamelCase = {} if sp_model_kwargs is None else sp_model_kwargs super().__init__( bos_token=A_,eos_token=A_,sep_token=A_,unk_token=A_,pad_token=A_,cls_token=A_,mask_token=A_,sp_model_kwargs=self.sp_model_kwargs,**A_,) try: import sentencepiece as spm except ImportError: logger.warning( 'You need to install SentencePiece to use XLMRobertaTokenizer: https://github.com/google/sentencepiece' ' pip install sentencepiece' ) raise __UpperCamelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(str(A_ ) ) __UpperCamelCase = vocab_file # Original fairseq vocab and spm vocab must be "aligned": # Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 # -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ---- # fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-' # spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a' # put special tokens and [unused] tokens into the vocab __UpperCamelCase = {'[PAD]': 0, '[CLS]': 1, '[SEP]': 2, '[UNK]': 3, '[MASK]': 4} for i in range(10 ): __UpperCamelCase = F'''[unused{i}]''' __UpperCamelCase = 5 + i # The first "real" token "," has position 15 in the embedding vocab and position 3 in the spm vocab __UpperCamelCase = 12 __UpperCamelCase = {v: k for k, v in self.fairseq_tokens_to_ids.items()} for k in self.fairseq_tokens_to_ids.keys(): self.unique_no_split_tokens.append(A_ ) def __getstate__( self: Union[str, Any] ): '''simple docstring''' __UpperCamelCase = self.__dict__.copy() __UpperCamelCase = None return state def __setstate__( self: List[Any],A_: List[Any] ): '''simple docstring''' __UpperCamelCase = d try: import sentencepiece as spm except ImportError: logger.warning( 'You need to install SentencePiece to use XLMRobertaTokenizer: https://github.com/google/sentencepiece' ' pip install sentencepiece' ) raise # for backward compatibility if not hasattr(self,'sp_model_kwargs' ): __UpperCamelCase = {} __UpperCamelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(self.vocab_file ) def snake_case_ ( self: Any,A_: List[int],A_: Optional[List[int]] = None,A_: bool = False ): '''simple docstring''' if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=A_,token_ids_a=A_,already_has_special_tokens=A_ ) if token_ids_a is None: return ([0] * len(A_ )) + [1] return ([0] * len(A_ )) + [1] + ([0] * len(A_ )) + [1] def snake_case_ ( self: Optional[int],A_: List[int],A_: Optional[List[int]] = None ): '''simple docstring''' __UpperCamelCase = [self.sep_token_id] if token_ids_a is None: return len(token_ids_a + sep ) * [0] return len(token_ids_a + sep + sep + token_ids_a + sep ) * [0] @property def snake_case_ ( self: Union[str, Any] ): '''simple docstring''' return len(self.sp_model ) + self.fairseq_offset def snake_case_ ( self: Optional[Any] ): '''simple docstring''' __UpperCamelCase = {self.convert_ids_to_tokens(A_ ): i for i in range(self.vocab_size )} vocab.update(self.added_tokens_encoder ) return vocab def snake_case_ ( self: List[Any],A_: str ): '''simple docstring''' return self.sp_model.encode(A_,out_type=A_ ) def snake_case_ ( self: Any,A_: Optional[int] ): '''simple docstring''' if token in self.fairseq_tokens_to_ids: return self.fairseq_tokens_to_ids[token] __UpperCamelCase = self.sp_model.PieceToId(A_ ) # Need to return unknown token if the SP model returned 0 return spm_id + self.fairseq_offset if spm_id else self.unk_token_id def snake_case_ ( self: str,A_: int ): '''simple docstring''' if index in self.fairseq_ids_to_tokens: return self.fairseq_ids_to_tokens[index] return self.sp_model.IdToPiece(index - self.fairseq_offset ) def snake_case_ ( self: Tuple,A_: int ): '''simple docstring''' __UpperCamelCase = ''.join(A_ ).replace(A_,' ' ).strip() return out_string def snake_case_ ( self: Optional[int],A_: str,A_: Optional[str] = None ): '''simple docstring''' if not os.path.isdir(A_ ): logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' ) return __UpperCamelCase = os.path.join( A_,(filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(A_ ) and os.path.isfile(self.vocab_file ): copyfile(self.vocab_file,A_ ) elif not os.path.isfile(self.vocab_file ): with open(A_,'wb' ) as fi: __UpperCamelCase = self.sp_model.serialized_model_proto() fi.write(A_ ) return (out_vocab_file,) def snake_case_ ( self: Tuple,A_: List[int],A_: Optional[List[int]] = None ): '''simple docstring''' if token_ids_a is None: return token_ids_a + [self.sep_token_id] __UpperCamelCase = [self.sep_token_id] return token_ids_a + sep + token_ids_a + sep
1
from __future__ import annotations import time from math import sqrt # 1 for manhattan, 0 for euclidean __snake_case = 0 __snake_case = [ [0, 0, 0, 0, 0, 0, 0], [0, 1, 0, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles [0, 0, 0, 0, 0, 0, 0], [0, 0, 1, 0, 0, 0, 0], [1, 0, 1, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 1, 0, 0], ] __snake_case = [[-1, 0], [0, -1], [1, 0], [0, 1]] # up, left, down, right __snake_case = tuple[int, int] class __lowerCamelCase : def __init__( self: str,A_: int,A_: int,A_: int,A_: int,A_: int,A_: Node | None,): '''simple docstring''' __UpperCamelCase = pos_x __UpperCamelCase = pos_y __UpperCamelCase = (pos_y, pos_x) __UpperCamelCase = goal_x __UpperCamelCase = goal_y __UpperCamelCase = g_cost __UpperCamelCase = parent __UpperCamelCase = self.calculate_heuristic() __UpperCamelCase = self.g_cost + self.h_cost def snake_case_ ( self: str ): '''simple docstring''' __UpperCamelCase = self.pos_x - self.goal_x __UpperCamelCase = self.pos_y - self.goal_y if HEURISTIC == 1: return abs(A_ ) + abs(A_ ) else: return sqrt(dy**2 + dx**2 ) def __lt__( self: int,A_: Node ): '''simple docstring''' return self.f_cost < other.f_cost class __lowerCamelCase : def __init__( self: Any,A_: TPosition,A_: TPosition ): '''simple docstring''' __UpperCamelCase = Node(start[1],start[0],goal[1],goal[0],0,A_ ) __UpperCamelCase = Node(goal[1],goal[0],goal[1],goal[0],9_9999,A_ ) __UpperCamelCase = [self.start] __UpperCamelCase = [] __UpperCamelCase = False def snake_case_ ( self: Any ): '''simple docstring''' while self.open_nodes: # Open Nodes are sorted using __lt__ self.open_nodes.sort() __UpperCamelCase = self.open_nodes.pop(0 ) if current_node.pos == self.target.pos: return self.retrace_path(A_ ) self.closed_nodes.append(A_ ) __UpperCamelCase = self.get_successors(A_ ) for child_node in successors: if child_node in self.closed_nodes: continue if child_node not in self.open_nodes: self.open_nodes.append(A_ ) else: # retrieve the best current path __UpperCamelCase = self.open_nodes.pop(self.open_nodes.index(A_ ) ) if child_node.g_cost < better_node.g_cost: self.open_nodes.append(A_ ) else: self.open_nodes.append(A_ ) return [self.start.pos] def snake_case_ ( self: int,A_: Node ): '''simple docstring''' __UpperCamelCase = [] for action in delta: __UpperCamelCase = parent.pos_x + action[1] __UpperCamelCase = parent.pos_y + action[0] if not (0 <= pos_x <= len(grid[0] ) - 1 and 0 <= pos_y <= len(A_ ) - 1): continue if grid[pos_y][pos_x] != 0: continue successors.append( Node( A_,A_,self.target.pos_y,self.target.pos_x,parent.g_cost + 1,A_,) ) return successors def snake_case_ ( self: Any,A_: Node | None ): '''simple docstring''' __UpperCamelCase = node __UpperCamelCase = [] while current_node is not None: path.append((current_node.pos_y, current_node.pos_x) ) __UpperCamelCase = current_node.parent path.reverse() return path class __lowerCamelCase : def __init__( self: List[Any],A_: TPosition,A_: TPosition ): '''simple docstring''' __UpperCamelCase = AStar(A_,A_ ) __UpperCamelCase = AStar(A_,A_ ) __UpperCamelCase = False def snake_case_ ( self: Union[str, Any] ): '''simple docstring''' while self.fwd_astar.open_nodes or self.bwd_astar.open_nodes: self.fwd_astar.open_nodes.sort() self.bwd_astar.open_nodes.sort() __UpperCamelCase = self.fwd_astar.open_nodes.pop(0 ) __UpperCamelCase = self.bwd_astar.open_nodes.pop(0 ) if current_bwd_node.pos == current_fwd_node.pos: return self.retrace_bidirectional_path( A_,A_ ) self.fwd_astar.closed_nodes.append(A_ ) self.bwd_astar.closed_nodes.append(A_ ) __UpperCamelCase = current_bwd_node __UpperCamelCase = current_fwd_node __UpperCamelCase = { self.fwd_astar: self.fwd_astar.get_successors(A_ ), self.bwd_astar: self.bwd_astar.get_successors(A_ ), } for astar in [self.fwd_astar, self.bwd_astar]: for child_node in successors[astar]: if child_node in astar.closed_nodes: continue if child_node not in astar.open_nodes: astar.open_nodes.append(A_ ) else: # retrieve the best current path __UpperCamelCase = astar.open_nodes.pop( astar.open_nodes.index(A_ ) ) if child_node.g_cost < better_node.g_cost: astar.open_nodes.append(A_ ) else: astar.open_nodes.append(A_ ) return [self.fwd_astar.start.pos] def snake_case_ ( self: List[str],A_: Node,A_: Node ): '''simple docstring''' __UpperCamelCase = self.fwd_astar.retrace_path(A_ ) __UpperCamelCase = self.bwd_astar.retrace_path(A_ ) bwd_path.pop() bwd_path.reverse() __UpperCamelCase = fwd_path + bwd_path return path if __name__ == "__main__": # all coordinates are given in format [y,x] __snake_case = (0, 0) __snake_case = (len(grid) - 1, len(grid[0]) - 1) for elem in grid: print(elem) __snake_case = time.time() __snake_case = AStar(init, goal) __snake_case = a_star.search() __snake_case = time.time() - start_time print(f"""AStar execution time = {end_time:f} seconds""") __snake_case = time.time() __snake_case = BidirectionalAStar(init, goal) __snake_case = time.time() - bd_start_time print(f"""BidirectionalAStar execution time = {bd_end_time:f} seconds""")
1
1
import warnings from ...utils import is_sklearn_available, requires_backends if is_sklearn_available(): from scipy.stats import pearsonr, spearmanr from sklearn.metrics import fa_score, matthews_corrcoef __snake_case = ( '''This metric will be removed from the library soon, metrics should be handled with the 🤗 Evaluate ''' '''library. You can have a look at this example script for pointers: ''' '''https://github.com/huggingface/transformers/blob/main/examples/pytorch/text-classification/run_glue.py''' ) def _A ( _lowercase , _lowercase ) -> str: """simple docstring""" warnings.warn(_lowercase , _lowercase ) requires_backends(_lowercase , 'sklearn' ) return (preds == labels).mean() def _A ( _lowercase , _lowercase ) -> Optional[int]: """simple docstring""" warnings.warn(_lowercase , _lowercase ) requires_backends(_lowercase , 'sklearn' ) __UpperCamelCase = simple_accuracy(_lowercase , _lowercase ) __UpperCamelCase = fa_score(y_true=_lowercase , y_pred=_lowercase ) return { "acc": acc, "f1": fa, "acc_and_f1": (acc + fa) / 2, } def _A ( _lowercase , _lowercase ) -> List[str]: """simple docstring""" warnings.warn(_lowercase , _lowercase ) requires_backends(_lowercase , 'sklearn' ) __UpperCamelCase = pearsonr(_lowercase , _lowercase )[0] __UpperCamelCase = spearmanr(_lowercase , _lowercase )[0] return { "pearson": pearson_corr, "spearmanr": spearman_corr, "corr": (pearson_corr + spearman_corr) / 2, } def _A ( _lowercase , _lowercase , _lowercase ) -> Dict: """simple docstring""" warnings.warn(_lowercase , _lowercase ) requires_backends(_lowercase , 'sklearn' ) assert len(_lowercase ) == len(_lowercase ), f'''Predictions and labels have mismatched lengths {len(_lowercase )} and {len(_lowercase )}''' if task_name == "cola": return {"mcc": matthews_corrcoef(_lowercase , _lowercase )} elif task_name == "sst-2": return {"acc": simple_accuracy(_lowercase , _lowercase )} elif task_name == "mrpc": return acc_and_fa(_lowercase , _lowercase ) elif task_name == "sts-b": return pearson_and_spearman(_lowercase , _lowercase ) elif task_name == "qqp": return acc_and_fa(_lowercase , _lowercase ) elif task_name == "mnli": return {"mnli/acc": simple_accuracy(_lowercase , _lowercase )} elif task_name == "mnli-mm": return {"mnli-mm/acc": simple_accuracy(_lowercase , _lowercase )} elif task_name == "qnli": return {"acc": simple_accuracy(_lowercase , _lowercase )} elif task_name == "rte": return {"acc": simple_accuracy(_lowercase , _lowercase )} elif task_name == "wnli": return {"acc": simple_accuracy(_lowercase , _lowercase )} elif task_name == "hans": return {"acc": simple_accuracy(_lowercase , _lowercase )} else: raise KeyError(_lowercase ) def _A ( _lowercase , _lowercase , _lowercase ) -> Union[str, Any]: """simple docstring""" warnings.warn(_lowercase , _lowercase ) requires_backends(_lowercase , 'sklearn' ) if len(_lowercase ) != len(_lowercase ): raise ValueError(f'''Predictions and labels have mismatched lengths {len(_lowercase )} and {len(_lowercase )}''' ) if task_name == "xnli": return {"acc": simple_accuracy(_lowercase , _lowercase )} else: raise KeyError(_lowercase )
1
import sys import tempfile import unittest import unittest.mock as mock from pathlib import Path from huggingface_hub import HfFolder, delete_repo from requests.exceptions import HTTPError from transformers import AutoFeatureExtractor, WavaVecaFeatureExtractor from transformers.testing_utils import TOKEN, USER, get_tests_dir, is_staging_test sys.path.append(str(Path(__file__).parent.parent / '''utils''')) from test_module.custom_feature_extraction import CustomFeatureExtractor # noqa E402 __snake_case = get_tests_dir('''fixtures''') class __lowerCamelCase (unittest.TestCase ): def snake_case_ ( self: int ): '''simple docstring''' __UpperCamelCase = mock.Mock() __UpperCamelCase = 500 __UpperCamelCase = {} __UpperCamelCase = HTTPError __UpperCamelCase = {} # Download this model to make sure it's in the cache. __UpperCamelCase = WavaVecaFeatureExtractor.from_pretrained('hf-internal-testing/tiny-random-wav2vec2' ) # Under the mock environment we get a 500 error when trying to reach the model. with mock.patch('requests.Session.request',return_value=A_ ) as mock_head: __UpperCamelCase = WavaVecaFeatureExtractor.from_pretrained('hf-internal-testing/tiny-random-wav2vec2' ) # This check we did call the fake head request mock_head.assert_called() def snake_case_ ( self: Dict ): '''simple docstring''' __UpperCamelCase = WavaVecaFeatureExtractor.from_pretrained( 'https://huggingface.co/hf-internal-testing/tiny-random-wav2vec2/resolve/main/preprocessor_config.json' ) @is_staging_test class __lowerCamelCase (unittest.TestCase ): @classmethod def snake_case_ ( cls: Tuple ): '''simple docstring''' __UpperCamelCase = TOKEN HfFolder.save_token(A_ ) @classmethod def snake_case_ ( cls: Tuple ): '''simple docstring''' try: delete_repo(token=cls._token,repo_id='test-feature-extractor' ) except HTTPError: pass try: delete_repo(token=cls._token,repo_id='valid_org/test-feature-extractor-org' ) except HTTPError: pass try: delete_repo(token=cls._token,repo_id='test-dynamic-feature-extractor' ) except HTTPError: pass def snake_case_ ( self: Tuple ): '''simple docstring''' __UpperCamelCase = WavaVecaFeatureExtractor.from_pretrained(A_ ) feature_extractor.push_to_hub('test-feature-extractor',use_auth_token=self._token ) __UpperCamelCase = WavaVecaFeatureExtractor.from_pretrained(F'''{USER}/test-feature-extractor''' ) for k, v in feature_extractor.__dict__.items(): self.assertEqual(A_,getattr(A_,A_ ) ) # Reset repo delete_repo(token=self._token,repo_id='test-feature-extractor' ) # Push to hub via save_pretrained with tempfile.TemporaryDirectory() as tmp_dir: feature_extractor.save_pretrained( A_,repo_id='test-feature-extractor',push_to_hub=A_,use_auth_token=self._token ) __UpperCamelCase = WavaVecaFeatureExtractor.from_pretrained(F'''{USER}/test-feature-extractor''' ) for k, v in feature_extractor.__dict__.items(): self.assertEqual(A_,getattr(A_,A_ ) ) def snake_case_ ( self: List[str] ): '''simple docstring''' __UpperCamelCase = WavaVecaFeatureExtractor.from_pretrained(A_ ) feature_extractor.push_to_hub('valid_org/test-feature-extractor',use_auth_token=self._token ) __UpperCamelCase = WavaVecaFeatureExtractor.from_pretrained('valid_org/test-feature-extractor' ) for k, v in feature_extractor.__dict__.items(): self.assertEqual(A_,getattr(A_,A_ ) ) # Reset repo delete_repo(token=self._token,repo_id='valid_org/test-feature-extractor' ) # Push to hub via save_pretrained with tempfile.TemporaryDirectory() as tmp_dir: feature_extractor.save_pretrained( A_,repo_id='valid_org/test-feature-extractor-org',push_to_hub=A_,use_auth_token=self._token ) __UpperCamelCase = WavaVecaFeatureExtractor.from_pretrained('valid_org/test-feature-extractor-org' ) for k, v in feature_extractor.__dict__.items(): self.assertEqual(A_,getattr(A_,A_ ) ) def snake_case_ ( self: int ): '''simple docstring''' CustomFeatureExtractor.register_for_auto_class() __UpperCamelCase = CustomFeatureExtractor.from_pretrained(A_ ) feature_extractor.push_to_hub('test-dynamic-feature-extractor',use_auth_token=self._token ) # This has added the proper auto_map field to the config self.assertDictEqual( feature_extractor.auto_map,{'AutoFeatureExtractor': 'custom_feature_extraction.CustomFeatureExtractor'},) __UpperCamelCase = AutoFeatureExtractor.from_pretrained( F'''{USER}/test-dynamic-feature-extractor''',trust_remote_code=A_ ) # Can't make an isinstance check because the new_feature_extractor is from the CustomFeatureExtractor class of a dynamic module self.assertEqual(new_feature_extractor.__class__.__name__,'CustomFeatureExtractor' )
1
1
from typing import TYPE_CHECKING # rely on isort to merge the imports from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available __snake_case = { '''configuration_autoformer''': [ '''AUTOFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''AutoformerConfig''', ], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __snake_case = [ '''AUTOFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''', '''AutoformerForPrediction''', '''AutoformerModel''', '''AutoformerPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_autoformer import ( AUTOFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, AutoformerConfig, ) try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_autoformer import ( AUTOFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, AutoformerForPrediction, AutoformerModel, AutoformerPreTrainedModel, ) else: import sys __snake_case = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
1
import argparse import json import os import evaluate import torch from datasets import load_dataset from torch.optim import AdamW from torch.utils.data import DataLoader from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed from accelerate import Accelerator, DistributedType from accelerate.utils.deepspeed import DummyOptim, DummyScheduler __snake_case = 1_6 __snake_case = 3_2 def _A ( _lowercase , _lowercase = 16 , _lowercase = "bert-base-cased" ) -> Union[str, Any]: """simple docstring""" __UpperCamelCase = AutoTokenizer.from_pretrained(_lowercase ) __UpperCamelCase = load_dataset('glue' , 'mrpc' ) def tokenize_function(_lowercase ): # max_length=None => use the model max length (it's actually the default) __UpperCamelCase = tokenizer(examples['sentence1'] , examples['sentence2'] , truncation=_lowercase , max_length=_lowercase ) return outputs # Apply the method we just defined to all the examples in all the splits of the dataset __UpperCamelCase = datasets.map( _lowercase , batched=_lowercase , remove_columns=['idx', 'sentence1', 'sentence2'] , load_from_cache_file=_lowercase ) # We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the # transformers library __UpperCamelCase = tokenized_datasets.rename_column('label' , 'labels' ) def collate_fn(_lowercase ): # On TPU it's best to pad everything to the same length or training will be very slow. if accelerator.distributed_type == DistributedType.TPU: return tokenizer.pad(_lowercase , padding='max_length' , max_length=1_28 , return_tensors='pt' ) return tokenizer.pad(_lowercase , padding='longest' , return_tensors='pt' ) # Instantiate dataloaders. __UpperCamelCase = DataLoader( tokenized_datasets['train'] , shuffle=_lowercase , collate_fn=_lowercase , batch_size=_lowercase ) __UpperCamelCase = DataLoader( tokenized_datasets['validation'] , shuffle=_lowercase , collate_fn=_lowercase , batch_size=_lowercase ) return train_dataloader, eval_dataloader def _A ( _lowercase , _lowercase ) -> int: """simple docstring""" __UpperCamelCase = Accelerator() # Sample hyper-parameters for learning rate, batch size, seed and a few other HPs __UpperCamelCase = config['lr'] __UpperCamelCase = int(config['num_epochs'] ) __UpperCamelCase = int(config['seed'] ) __UpperCamelCase = int(config['batch_size'] ) __UpperCamelCase = args.model_name_or_path set_seed(_lowercase ) __UpperCamelCase, __UpperCamelCase = get_dataloaders(_lowercase , _lowercase , _lowercase ) # Instantiate the model (we build the model here so that the seed also control new weights initialization) __UpperCamelCase = AutoModelForSequenceClassification.from_pretrained(_lowercase , return_dict=_lowercase ) # Instantiate optimizer __UpperCamelCase = ( AdamW if accelerator.state.deepspeed_plugin is None or 'optimizer' not in accelerator.state.deepspeed_plugin.deepspeed_config else DummyOptim ) __UpperCamelCase = optimizer_cls(params=model.parameters() , lr=_lowercase ) if accelerator.state.deepspeed_plugin is not None: __UpperCamelCase = accelerator.state.deepspeed_plugin.deepspeed_config[ 'gradient_accumulation_steps' ] else: __UpperCamelCase = 1 __UpperCamelCase = (len(_lowercase ) * num_epochs) // gradient_accumulation_steps # Instantiate scheduler if ( accelerator.state.deepspeed_plugin is None or "scheduler" not in accelerator.state.deepspeed_plugin.deepspeed_config ): __UpperCamelCase = get_linear_schedule_with_warmup( optimizer=_lowercase , num_warmup_steps=0 , num_training_steps=_lowercase , ) else: __UpperCamelCase = DummyScheduler(_lowercase , total_num_steps=_lowercase , warmup_num_steps=0 ) # Prepare everything # There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the # prepare method. __UpperCamelCase, __UpperCamelCase, __UpperCamelCase, __UpperCamelCase, __UpperCamelCase = accelerator.prepare( _lowercase , _lowercase , _lowercase , _lowercase , _lowercase ) # We need to keep track of how many total steps we have iterated over __UpperCamelCase = 0 # We also need to keep track of the stating epoch so files are named properly __UpperCamelCase = 0 # Now we train the model __UpperCamelCase = evaluate.load('glue' , 'mrpc' ) __UpperCamelCase = 0 __UpperCamelCase = {} for epoch in range(_lowercase , _lowercase ): model.train() for step, batch in enumerate(_lowercase ): __UpperCamelCase = model(**_lowercase ) __UpperCamelCase = outputs.loss __UpperCamelCase = loss / gradient_accumulation_steps accelerator.backward(_lowercase ) if step % gradient_accumulation_steps == 0: optimizer.step() lr_scheduler.step() optimizer.zero_grad() overall_step += 1 model.eval() __UpperCamelCase = 0 for step, batch in enumerate(_lowercase ): # We could avoid this line since we set the accelerator with `device_placement=True`. batch.to(accelerator.device ) with torch.no_grad(): __UpperCamelCase = model(**_lowercase ) __UpperCamelCase = outputs.logits.argmax(dim=-1 ) # It is slightly faster to call this once, than multiple times __UpperCamelCase, __UpperCamelCase = accelerator.gather( (predictions, batch['labels']) ) # If we are in a multiprocess environment, the last batch has duplicates if accelerator.use_distributed: if step == len(_lowercase ) - 1: __UpperCamelCase = predictions[: len(eval_dataloader.dataset ) - samples_seen] __UpperCamelCase = references[: len(eval_dataloader.dataset ) - samples_seen] else: samples_seen += references.shape[0] metric.add_batch( predictions=_lowercase , references=_lowercase , ) __UpperCamelCase = metric.compute() # Use accelerator.print to print only on the main process. accelerator.print(f'''epoch {epoch}:''' , _lowercase ) __UpperCamelCase = eval_metric['accuracy'] if best_performance < eval_metric["accuracy"]: __UpperCamelCase = eval_metric['accuracy'] if args.performance_lower_bound is not None: assert ( args.performance_lower_bound <= best_performance ), f'''Best performance metric {best_performance} is lower than the lower bound {args.performance_lower_bound}''' accelerator.wait_for_everyone() if accelerator.is_main_process: with open(os.path.join(args.output_dir , 'all_results.json' ) , 'w' ) as f: json.dump(_lowercase , _lowercase ) def _A ( ) -> List[str]: """simple docstring""" __UpperCamelCase = argparse.ArgumentParser(description='Simple example of training script tracking peak GPU memory usage.' ) parser.add_argument( '--model_name_or_path' , type=_lowercase , default='bert-base-cased' , help='Path to pretrained model or model identifier from huggingface.co/models.' , required=_lowercase , ) parser.add_argument( '--output_dir' , type=_lowercase , default='.' , help='Optional save directory where all checkpoint folders will be stored. Default is the current working directory.' , ) parser.add_argument( '--performance_lower_bound' , type=_lowercase , default=_lowercase , help='Optional lower bound for the performance metric. If set, the training will throw error when the performance metric drops below this value.' , ) parser.add_argument( '--num_epochs' , type=_lowercase , default=3 , help='Number of train epochs.' , ) __UpperCamelCase = parser.parse_args() __UpperCamelCase = {'lr': 2e-5, 'num_epochs': args.num_epochs, 'seed': 42, 'batch_size': 16} training_function(_lowercase , _lowercase ) if __name__ == "__main__": main()
1
1
from typing import Dict, List, Optional, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import convert_to_rgb, normalize, rescale, resize, to_channel_dimension_format from ...image_utils import ( OPENAI_CLIP_MEAN, OPENAI_CLIP_STD, ChannelDimension, ImageInput, PILImageResampling, make_list_of_images, to_numpy_array, valid_images, ) from ...utils import TensorType, is_vision_available, logging if is_vision_available(): import PIL __snake_case = logging.get_logger(__name__) class __lowerCamelCase (_a ): _lowercase = ["""pixel_values"""] def __init__( self: Union[str, Any],A_: bool = True,A_: Dict[str, int] = None,A_: PILImageResampling = PILImageResampling.BICUBIC,A_: bool = True,A_: Union[int, float] = 1 / 255,A_: bool = True,A_: Optional[Union[float, List[float]]] = None,A_: Optional[Union[float, List[float]]] = None,A_: bool = True,**A_: List[str],): '''simple docstring''' super().__init__(**A_ ) __UpperCamelCase = size if size is not None else {'height': 384, 'width': 384} __UpperCamelCase = get_size_dict(A_,default_to_square=A_ ) __UpperCamelCase = do_resize __UpperCamelCase = size __UpperCamelCase = resample __UpperCamelCase = do_rescale __UpperCamelCase = rescale_factor __UpperCamelCase = do_normalize __UpperCamelCase = image_mean if image_mean is not None else OPENAI_CLIP_MEAN __UpperCamelCase = image_std if image_std is not None else OPENAI_CLIP_STD __UpperCamelCase = do_convert_rgb def snake_case_ ( self: Union[str, Any],A_: np.ndarray,A_: Dict[str, int],A_: PILImageResampling = PILImageResampling.BICUBIC,A_: Optional[Union[str, ChannelDimension]] = None,**A_: Union[str, Any],): '''simple docstring''' __UpperCamelCase = get_size_dict(A_,default_to_square=A_ ) if "height" not in size or "width" not in size: raise ValueError(F'''The `size` dictionary must contain the keys `height` and `width`. Got {size.keys()}''' ) __UpperCamelCase = (size['height'], size['width']) return resize(A_,size=A_,resample=A_,data_format=A_,**A_ ) def snake_case_ ( self: Optional[Any],A_: np.ndarray,A_: Union[int, float],A_: Optional[Union[str, ChannelDimension]] = None,**A_: Union[str, Any],): '''simple docstring''' return rescale(A_,scale=A_,data_format=A_,**A_ ) def snake_case_ ( self: Tuple,A_: np.ndarray,A_: Union[float, List[float]],A_: Union[float, List[float]],A_: Optional[Union[str, ChannelDimension]] = None,**A_: Dict,): '''simple docstring''' return normalize(A_,mean=A_,std=A_,data_format=A_,**A_ ) def snake_case_ ( self: Optional[Any],A_: ImageInput,A_: Optional[bool] = None,A_: Optional[Dict[str, int]] = None,A_: PILImageResampling = None,A_: Optional[bool] = None,A_: Optional[float] = None,A_: Optional[bool] = None,A_: Optional[Union[float, List[float]]] = None,A_: Optional[Union[float, List[float]]] = None,A_: Optional[Union[str, TensorType]] = None,A_: bool = None,A_: ChannelDimension = ChannelDimension.FIRST,**A_: Tuple,): '''simple docstring''' __UpperCamelCase = do_resize if do_resize is not None else self.do_resize __UpperCamelCase = resample if resample is not None else self.resample __UpperCamelCase = do_rescale if do_rescale is not None else self.do_rescale __UpperCamelCase = rescale_factor if rescale_factor is not None else self.rescale_factor __UpperCamelCase = do_normalize if do_normalize is not None else self.do_normalize __UpperCamelCase = image_mean if image_mean is not None else self.image_mean __UpperCamelCase = image_std if image_std is not None else self.image_std __UpperCamelCase = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb __UpperCamelCase = size if size is not None else self.size __UpperCamelCase = get_size_dict(A_,default_to_square=A_ ) __UpperCamelCase = make_list_of_images(A_ ) if not valid_images(A_ ): raise ValueError( 'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, ' 'torch.Tensor, tf.Tensor or jax.ndarray.' ) if do_resize and size is None or resample is None: raise ValueError('Size and resample must be specified if do_resize is True.' ) if do_rescale and rescale_factor is None: raise ValueError('Rescale factor must be specified if do_rescale is True.' ) if do_normalize and (image_mean is None or image_std is None): raise ValueError('Image mean and std must be specified if do_normalize is True.' ) # PIL RGBA images are converted to RGB if do_convert_rgb: __UpperCamelCase = [convert_to_rgb(A_ ) for image in images] # All transformations expect numpy arrays. __UpperCamelCase = [to_numpy_array(A_ ) for image in images] if do_resize: __UpperCamelCase = [self.resize(image=A_,size=A_,resample=A_ ) for image in images] if do_rescale: __UpperCamelCase = [self.rescale(image=A_,scale=A_ ) for image in images] if do_normalize: __UpperCamelCase = [self.normalize(image=A_,mean=A_,std=A_ ) for image in images] __UpperCamelCase = [to_channel_dimension_format(A_,A_ ) for image in images] __UpperCamelCase = BatchFeature(data={'pixel_values': images},tensor_type=A_ ) return encoded_outputs
1
from transformers import BertTokenizer, EncoderDecoderModel, SeqaSeqTrainer, SeqaSeqTrainingArguments from transformers.testing_utils import TestCasePlus, require_torch, slow from transformers.utils import is_datasets_available if is_datasets_available(): import datasets class __lowerCamelCase (_a ): @slow @require_torch def snake_case_ ( self: Union[str, Any] ): '''simple docstring''' __UpperCamelCase = EncoderDecoderModel.from_encoder_decoder_pretrained('prajjwal1/bert-tiny','prajjwal1/bert-tiny' ) __UpperCamelCase = BertTokenizer.from_pretrained('bert-base-uncased' ) __UpperCamelCase = bertabert.config.encoder.vocab_size __UpperCamelCase = tokenizer.sep_token_id __UpperCamelCase = tokenizer.cls_token_id __UpperCamelCase = 128 __UpperCamelCase = datasets.load_dataset('cnn_dailymail','3.0.0',split='train[:1%]' ) __UpperCamelCase = datasets.load_dataset('cnn_dailymail','3.0.0',split='validation[:1%]' ) __UpperCamelCase = train_dataset.select(range(32 ) ) __UpperCamelCase = val_dataset.select(range(16 ) ) __UpperCamelCase = 4 def _map_to_encoder_decoder_inputs(A_: Dict ): # Tokenizer will automatically set [BOS] <text> [EOS] __UpperCamelCase = tokenizer(batch['article'],padding='max_length',truncation=A_,max_length=512 ) __UpperCamelCase = tokenizer(batch['highlights'],padding='max_length',truncation=A_,max_length=128 ) __UpperCamelCase = inputs.input_ids __UpperCamelCase = inputs.attention_mask __UpperCamelCase = outputs.input_ids __UpperCamelCase = outputs.input_ids.copy() __UpperCamelCase = [ [-100 if token == tokenizer.pad_token_id else token for token in labels] for labels in batch['labels'] ] __UpperCamelCase = outputs.attention_mask assert all(len(A_ ) == 512 for x in inputs.input_ids ) assert all(len(A_ ) == 128 for x in outputs.input_ids ) return batch def _compute_metrics(A_: str ): __UpperCamelCase = pred.label_ids __UpperCamelCase = pred.predictions # all unnecessary tokens are removed __UpperCamelCase = tokenizer.batch_decode(A_,skip_special_tokens=A_ ) __UpperCamelCase = tokenizer.batch_decode(A_,skip_special_tokens=A_ ) __UpperCamelCase = sum([int(pred_str[i] == label_str[i] ) for i in range(len(A_ ) )] ) / len(A_ ) return {"accuracy": accuracy} # map train dataset __UpperCamelCase = train_dataset.map( _map_to_encoder_decoder_inputs,batched=A_,batch_size=A_,remove_columns=['article', 'highlights'],) train_dataset.set_format( type='torch',columns=['input_ids', 'attention_mask', 'decoder_input_ids', 'decoder_attention_mask', 'labels'],) # same for validation dataset __UpperCamelCase = val_dataset.map( _map_to_encoder_decoder_inputs,batched=A_,batch_size=A_,remove_columns=['article', 'highlights'],) val_dataset.set_format( type='torch',columns=['input_ids', 'attention_mask', 'decoder_input_ids', 'decoder_attention_mask', 'labels'],) __UpperCamelCase = self.get_auto_remove_tmp_dir() __UpperCamelCase = SeqaSeqTrainingArguments( output_dir=A_,per_device_train_batch_size=A_,per_device_eval_batch_size=A_,predict_with_generate=A_,evaluation_strategy='steps',do_train=A_,do_eval=A_,warmup_steps=0,eval_steps=2,logging_steps=2,) # instantiate trainer __UpperCamelCase = SeqaSeqTrainer( model=A_,args=A_,compute_metrics=_compute_metrics,train_dataset=A_,eval_dataset=A_,tokenizer=A_,) # start training trainer.train()
1
1
import copy import os from typing import Union from ...configuration_utils import PretrainedConfig from ...models.auto.modeling_auto import MODEL_FOR_CAUSAL_LM_MAPPING_NAMES from ...utils import logging from ..auto import CONFIG_MAPPING __snake_case = logging.get_logger(__name__) __snake_case = { '''salesforce/blip2-opt-2.7b''': '''https://huggingface.co/salesforce/blip2-opt-2.7b/resolve/main/config.json''', } class __lowerCamelCase (_a ): _lowercase = """blip_2_vision_model""" def __init__( self: Optional[int],A_: str=1408,A_: Union[str, Any]=6144,A_: Tuple=39,A_: Optional[int]=16,A_: List[str]=224,A_: Optional[Any]=14,A_: Union[str, Any]="gelu",A_: List[Any]=0.0_0_0_0_1,A_: List[Any]=0.0,A_: List[Any]=1E-10,A_: Optional[int]=True,**A_: Union[str, Any],): '''simple docstring''' super().__init__(**A_ ) __UpperCamelCase = hidden_size __UpperCamelCase = intermediate_size __UpperCamelCase = num_hidden_layers __UpperCamelCase = num_attention_heads __UpperCamelCase = patch_size __UpperCamelCase = image_size __UpperCamelCase = initializer_range __UpperCamelCase = attention_dropout __UpperCamelCase = layer_norm_eps __UpperCamelCase = hidden_act __UpperCamelCase = qkv_bias @classmethod def snake_case_ ( cls: Tuple,A_: Union[str, os.PathLike],**A_: Tuple ): '''simple docstring''' cls._set_token_in_kwargs(A_ ) __UpperCamelCase, __UpperCamelCase = cls.get_config_dict(A_,**A_ ) # get the vision config dict if we are loading from Blip2Config if config_dict.get('model_type' ) == "blip-2": __UpperCamelCase = config_dict['vision_config'] if "model_type" in config_dict and hasattr(cls,'model_type' ) and config_dict["model_type"] != cls.model_type: logger.warning( F'''You are using a model of type {config_dict['model_type']} to instantiate a model of type ''' F'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' ) return cls.from_dict(A_,**A_ ) class __lowerCamelCase (_a ): _lowercase = """blip_2_qformer""" def __init__( self: Optional[Any],A_: Optional[int]=3_0522,A_: List[Any]=768,A_: Any=12,A_: Dict=12,A_: int=3072,A_: Dict="gelu",A_: Union[str, Any]=0.1,A_: Any=0.1,A_: Any=512,A_: Dict=0.0_2,A_: str=1E-12,A_: List[Any]=0,A_: Optional[int]="absolute",A_: List[str]=2,A_: Optional[Any]=1408,**A_: Optional[Any],): '''simple docstring''' super().__init__(pad_token_id=A_,**A_ ) __UpperCamelCase = vocab_size __UpperCamelCase = hidden_size __UpperCamelCase = num_hidden_layers __UpperCamelCase = num_attention_heads __UpperCamelCase = hidden_act __UpperCamelCase = intermediate_size __UpperCamelCase = hidden_dropout_prob __UpperCamelCase = attention_probs_dropout_prob __UpperCamelCase = max_position_embeddings __UpperCamelCase = initializer_range __UpperCamelCase = layer_norm_eps __UpperCamelCase = position_embedding_type __UpperCamelCase = cross_attention_frequency __UpperCamelCase = encoder_hidden_size @classmethod def snake_case_ ( cls: int,A_: Union[str, os.PathLike],**A_: int ): '''simple docstring''' cls._set_token_in_kwargs(A_ ) __UpperCamelCase, __UpperCamelCase = cls.get_config_dict(A_,**A_ ) # get the qformer config dict if we are loading from Blip2Config if config_dict.get('model_type' ) == "blip-2": __UpperCamelCase = config_dict['qformer_config'] if "model_type" in config_dict and hasattr(cls,'model_type' ) and config_dict["model_type"] != cls.model_type: logger.warning( F'''You are using a model of type {config_dict['model_type']} to instantiate a model of type ''' F'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' ) return cls.from_dict(A_,**A_ ) class __lowerCamelCase (_a ): _lowercase = """blip-2""" _lowercase = True def __init__( self: Union[str, Any],A_: Optional[int]=None,A_: List[str]=None,A_: Optional[Any]=None,A_: List[str]=32,**A_: Optional[Any] ): '''simple docstring''' super().__init__(**A_ ) if vision_config is None: __UpperCamelCase = {} logger.info('vision_config is None. initializing the Blip2VisionConfig with default values.' ) if qformer_config is None: __UpperCamelCase = {} logger.info('qformer_config is None. Initializing the Blip2QFormerConfig with default values.' ) if text_config is None: __UpperCamelCase = {} logger.info('text_config is None. Initializing the text config with default values (`OPTConfig`).' ) __UpperCamelCase = BlipaVisionConfig(**A_ ) __UpperCamelCase = BlipaQFormerConfig(**A_ ) __UpperCamelCase = text_config['model_type'] if 'model_type' in text_config else 'opt' __UpperCamelCase = CONFIG_MAPPING[text_model_type](**A_ ) __UpperCamelCase = self.text_config.tie_word_embeddings __UpperCamelCase = self.text_config.is_encoder_decoder __UpperCamelCase = num_query_tokens __UpperCamelCase = self.vision_config.hidden_size __UpperCamelCase = self.text_config.model_type in MODEL_FOR_CAUSAL_LM_MAPPING_NAMES __UpperCamelCase = 1.0 __UpperCamelCase = 0.0_2 @classmethod def snake_case_ ( cls: List[Any],A_: BlipaVisionConfig,A_: BlipaQFormerConfig,A_: PretrainedConfig,**A_: Optional[int],): '''simple docstring''' return cls( vision_config=vision_config.to_dict(),qformer_config=qformer_config.to_dict(),text_config=text_config.to_dict(),**A_,) def snake_case_ ( self: Optional[int] ): '''simple docstring''' __UpperCamelCase = copy.deepcopy(self.__dict__ ) __UpperCamelCase = self.vision_config.to_dict() __UpperCamelCase = self.qformer_config.to_dict() __UpperCamelCase = self.text_config.to_dict() __UpperCamelCase = self.__class__.model_type return output
1
def _A ( _lowercase = 1_00 ) -> int: """simple docstring""" __UpperCamelCase = 0 __UpperCamelCase = 0 for i in range(1 , n + 1 ): sum_of_squares += i**2 sum_of_ints += i return sum_of_ints**2 - sum_of_squares if __name__ == "__main__": print(f"""{solution() = }""")
1
1
from maths.is_square_free import is_square_free from maths.prime_factors import prime_factors def _A ( _lowercase ) -> int: """simple docstring""" __UpperCamelCase = prime_factors(_lowercase ) if is_square_free(_lowercase ): return -1 if len(_lowercase ) % 2 else 1 return 0 if __name__ == "__main__": import doctest doctest.testmod()
1
def _A ( _lowercase , _lowercase ) -> int: """simple docstring""" return (pointa[0] - pointa[0]) ** 2 + (pointa[1] - pointa[1]) ** 2 def _A ( _lowercase , _lowercase=0 ) -> Dict: """simple docstring""" return sorted(_lowercase , key=lambda _lowercase : x[column] ) def _A ( _lowercase , _lowercase , _lowercase=float('inf' ) ) -> List[Any]: """simple docstring""" for i in range(points_counts - 1 ): for j in range(i + 1 , _lowercase ): __UpperCamelCase = euclidean_distance_sqr(points[i] , points[j] ) if current_dis < min_dis: __UpperCamelCase = current_dis return min_dis def _A ( _lowercase , _lowercase , _lowercase=float('inf' ) ) -> Tuple: """simple docstring""" for i in range(min(6 , points_counts - 1 ) , _lowercase ): for j in range(max(0 , i - 6 ) , _lowercase ): __UpperCamelCase = euclidean_distance_sqr(points[i] , points[j] ) if current_dis < min_dis: __UpperCamelCase = current_dis return min_dis def _A ( _lowercase , _lowercase , _lowercase ) -> Optional[Any]: """simple docstring""" if points_counts <= 3: return dis_between_closest_pair(_lowercase , _lowercase ) # recursion __UpperCamelCase = points_counts // 2 __UpperCamelCase = closest_pair_of_points_sqr( _lowercase , points_sorted_on_y[:mid] , _lowercase ) __UpperCamelCase = closest_pair_of_points_sqr( _lowercase , points_sorted_on_y[mid:] , points_counts - mid ) __UpperCamelCase = min(_lowercase , _lowercase ) __UpperCamelCase = [] for point in points_sorted_on_x: if abs(point[0] - points_sorted_on_x[mid][0] ) < closest_pair_dis: cross_strip.append(_lowercase ) __UpperCamelCase = dis_between_closest_in_strip( _lowercase , len(_lowercase ) , _lowercase ) return min(_lowercase , _lowercase ) def _A ( _lowercase , _lowercase ) -> Optional[int]: """simple docstring""" __UpperCamelCase = column_based_sort(_lowercase , column=0 ) __UpperCamelCase = column_based_sort(_lowercase , column=1 ) return ( closest_pair_of_points_sqr( _lowercase , _lowercase , _lowercase ) ) ** 0.5 if __name__ == "__main__": __snake_case = [(2, 3), (1_2, 3_0), (4_0, 5_0), (5, 1), (1_2, 1_0), (3, 4)] print('''Distance:''', closest_pair_of_points(points, len(points)))
1
1
from typing import List, Optional, Union import numpy as np import tensorflow as tf from .utils import logging __snake_case = logging.get_logger(__name__) def _A ( _lowercase ) -> List[int]: """simple docstring""" if isinstance(_lowercase , np.ndarray ): return list(tensor.shape ) __UpperCamelCase = tf.shape(_lowercase ) if tensor.shape == tf.TensorShape(_lowercase ): return dynamic __UpperCamelCase = tensor.shape.as_list() return [dynamic[i] if s is None else s for i, s in enumerate(_lowercase )] def _A ( _lowercase , _lowercase = None , _lowercase = None ) -> tf.Tensor: """simple docstring""" return tf.nn.softmax(logits=logits + 1e-9 , axis=_lowercase , name=_lowercase ) def _A ( _lowercase , _lowercase , _lowercase , _lowercase=1e-5 , _lowercase=-1 ) -> str: """simple docstring""" if weight.shape.rank != 1 or bias.shape.rank != 1 or not isinstance(_lowercase , _lowercase ): raise NotImplementedError('Only 1D weight and bias tensors are supported for now, with only a single axis.' ) # Get mean and variance on the axis to be normalized __UpperCamelCase, __UpperCamelCase = tf.nn.moments(_lowercase , axes=[axis] , keepdims=_lowercase ) if axis != -1: # Reshape scale and weight to have the same rank as inputs, but with 1 dimensions # on every dimension except axis __UpperCamelCase = [1] * inputs.shape.rank __UpperCamelCase = shape_list(_lowercase )[axis] __UpperCamelCase = tf.reshape(_lowercase , _lowercase ) __UpperCamelCase = tf.reshape(_lowercase , _lowercase ) # Compute layer normalization using the batch_normalization # function. __UpperCamelCase = tf.nn.batch_normalization( _lowercase , _lowercase , _lowercase , offset=_lowercase , scale=_lowercase , variance_epsilon=_lowercase , ) return outputs def _A ( _lowercase , _lowercase=0 , _lowercase=-1 ) -> Tuple: """simple docstring""" if end_dim < 0: end_dim += input.shape.rank if start_dim < 0: start_dim += input.shape.rank if start_dim == end_dim: return input __UpperCamelCase = tf.shape(_lowercase ) __UpperCamelCase = tf.math.reduce_prod(in_shape[start_dim : end_dim + 1] ) __UpperCamelCase = tf.concat([in_shape[:start_dim], [flattened_dim], in_shape[end_dim + 1 :]] , axis=0 ) return tf.reshape(_lowercase , _lowercase ) def _A ( _lowercase ) -> tf.Tensor: """simple docstring""" if not isinstance(_lowercase , tf.Tensor ): __UpperCamelCase = tf.convert_to_tensor(_lowercase ) # Catches stray NumPy inputs if encoder_attention_mask.shape.rank == 3: __UpperCamelCase = encoder_attention_mask[:, None, :, :] if encoder_attention_mask.shape.rank == 2: __UpperCamelCase = encoder_attention_mask[:, None, None, :] # T5 has a mask that can compare sequence ids, we can simulate this here with this transposition # Cf. https://github.com/tensorflow/mesh/blob/8d2465e9bc93129b913b5ccc6a59aa97abd96ec6/mesh_tensorflow # /transformer/transformer_layers.py#L270 # encoder_extended_attention_mask = (encoder_extended_attention_mask == # encoder_extended_attention_mask.transpose(-1, -2)) __UpperCamelCase = ( tf.cast(1 , encoder_attention_mask.dtype ) - encoder_extended_attention_mask ) * encoder_extended_attention_mask.dtype.min return encoder_extended_attention_mask def _A ( _lowercase , _lowercase , _lowercase = "input_ids" ) -> None: """simple docstring""" tf.debugging.assert_less( _lowercase , tf.cast(_lowercase , dtype=tensor.dtype ) , message=( f'''The maximum value of {tensor_name} ({tf.math.reduce_max(_lowercase )}) must be smaller than the embedding ''' f'''layer\'s input dimension ({embed_dim}). The likely cause is some problem at tokenization time.''' ) , ) def _A ( _lowercase , _lowercase , _lowercase ) -> Optional[int]: """simple docstring""" __UpperCamelCase = 6_45_12 # Check that no item in `data` is larger than `HDF5_OBJECT_HEADER_LIMIT` # because in that case even chunking the array would not make the saving # possible. __UpperCamelCase = [x for x in data if len(_lowercase ) > HDF5_OBJECT_HEADER_LIMIT] # Expecting this to never be true. if bad_attributes: raise RuntimeError( 'The following attributes cannot be saved to HDF5 file because ' f'''they are larger than {HDF5_OBJECT_HEADER_LIMIT} ''' f'''bytes: {bad_attributes}''' ) __UpperCamelCase = np.asarray(_lowercase ) __UpperCamelCase = 1 __UpperCamelCase = np.array_split(_lowercase , _lowercase ) # This will never loop forever thanks to the test above. while any(x.nbytes > HDF5_OBJECT_HEADER_LIMIT for x in chunked_data ): num_chunks += 1 __UpperCamelCase = np.array_split(_lowercase , _lowercase ) if num_chunks > 1: for chunk_id, chunk_data in enumerate(_lowercase ): __UpperCamelCase = chunk_data else: __UpperCamelCase = data def _A ( _lowercase , _lowercase ) -> Tuple: """simple docstring""" if name in group.attrs: __UpperCamelCase = [n.decode('utf8' ) if hasattr(_lowercase , 'decode' ) else n for n in group.attrs[name]] else: __UpperCamelCase = [] __UpperCamelCase = 0 while "%s%d" % (name, chunk_id) in group.attrs: data.extend( [n.decode('utf8' ) if hasattr(_lowercase , 'decode' ) else n for n in group.attrs['%s%d' % (name, chunk_id)]] ) chunk_id += 1 return data def _A ( _lowercase ) -> Tuple: """simple docstring""" def _expand_single_ad_tensor(_lowercase ): if isinstance(_lowercase , tf.Tensor ) and t.shape.rank == 1: return tf.expand_dims(_lowercase , axis=-1 ) return t return tf.nest.map_structure(_expand_single_ad_tensor , _lowercase )
1
from collections import OrderedDict from typing import Mapping from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging __snake_case = logging.get_logger(__name__) __snake_case = { '''bert-base-uncased''': '''https://huggingface.co/bert-base-uncased/resolve/main/config.json''', '''bert-large-uncased''': '''https://huggingface.co/bert-large-uncased/resolve/main/config.json''', '''bert-base-cased''': '''https://huggingface.co/bert-base-cased/resolve/main/config.json''', '''bert-large-cased''': '''https://huggingface.co/bert-large-cased/resolve/main/config.json''', '''bert-base-multilingual-uncased''': '''https://huggingface.co/bert-base-multilingual-uncased/resolve/main/config.json''', '''bert-base-multilingual-cased''': '''https://huggingface.co/bert-base-multilingual-cased/resolve/main/config.json''', '''bert-base-chinese''': '''https://huggingface.co/bert-base-chinese/resolve/main/config.json''', '''bert-base-german-cased''': '''https://huggingface.co/bert-base-german-cased/resolve/main/config.json''', '''bert-large-uncased-whole-word-masking''': ( '''https://huggingface.co/bert-large-uncased-whole-word-masking/resolve/main/config.json''' ), '''bert-large-cased-whole-word-masking''': ( '''https://huggingface.co/bert-large-cased-whole-word-masking/resolve/main/config.json''' ), '''bert-large-uncased-whole-word-masking-finetuned-squad''': ( '''https://huggingface.co/bert-large-uncased-whole-word-masking-finetuned-squad/resolve/main/config.json''' ), '''bert-large-cased-whole-word-masking-finetuned-squad''': ( '''https://huggingface.co/bert-large-cased-whole-word-masking-finetuned-squad/resolve/main/config.json''' ), '''bert-base-cased-finetuned-mrpc''': '''https://huggingface.co/bert-base-cased-finetuned-mrpc/resolve/main/config.json''', '''bert-base-german-dbmdz-cased''': '''https://huggingface.co/bert-base-german-dbmdz-cased/resolve/main/config.json''', '''bert-base-german-dbmdz-uncased''': '''https://huggingface.co/bert-base-german-dbmdz-uncased/resolve/main/config.json''', '''cl-tohoku/bert-base-japanese''': '''https://huggingface.co/cl-tohoku/bert-base-japanese/resolve/main/config.json''', '''cl-tohoku/bert-base-japanese-whole-word-masking''': ( '''https://huggingface.co/cl-tohoku/bert-base-japanese-whole-word-masking/resolve/main/config.json''' ), '''cl-tohoku/bert-base-japanese-char''': ( '''https://huggingface.co/cl-tohoku/bert-base-japanese-char/resolve/main/config.json''' ), '''cl-tohoku/bert-base-japanese-char-whole-word-masking''': ( '''https://huggingface.co/cl-tohoku/bert-base-japanese-char-whole-word-masking/resolve/main/config.json''' ), '''TurkuNLP/bert-base-finnish-cased-v1''': ( '''https://huggingface.co/TurkuNLP/bert-base-finnish-cased-v1/resolve/main/config.json''' ), '''TurkuNLP/bert-base-finnish-uncased-v1''': ( '''https://huggingface.co/TurkuNLP/bert-base-finnish-uncased-v1/resolve/main/config.json''' ), '''wietsedv/bert-base-dutch-cased''': '''https://huggingface.co/wietsedv/bert-base-dutch-cased/resolve/main/config.json''', # See all BERT models at https://huggingface.co/models?filter=bert } class __lowerCamelCase (_a ): _lowercase = """bert""" def __init__( self: Any,A_: Dict=3_0522,A_: Optional[Any]=768,A_: Union[str, Any]=12,A_: List[Any]=12,A_: Optional[int]=3072,A_: Union[str, Any]="gelu",A_: List[str]=0.1,A_: Dict=0.1,A_: Optional[int]=512,A_: Optional[Any]=2,A_: Union[str, Any]=0.0_2,A_: List[Any]=1E-12,A_: Optional[int]=0,A_: List[Any]="absolute",A_: str=True,A_: Union[str, Any]=None,**A_: int,): '''simple docstring''' super().__init__(pad_token_id=A_,**A_ ) __UpperCamelCase = vocab_size __UpperCamelCase = hidden_size __UpperCamelCase = num_hidden_layers __UpperCamelCase = num_attention_heads __UpperCamelCase = hidden_act __UpperCamelCase = intermediate_size __UpperCamelCase = hidden_dropout_prob __UpperCamelCase = attention_probs_dropout_prob __UpperCamelCase = max_position_embeddings __UpperCamelCase = type_vocab_size __UpperCamelCase = initializer_range __UpperCamelCase = layer_norm_eps __UpperCamelCase = position_embedding_type __UpperCamelCase = use_cache __UpperCamelCase = classifier_dropout class __lowerCamelCase (_a ): @property def snake_case_ ( self: Optional[int] ): '''simple docstring''' if self.task == "multiple-choice": __UpperCamelCase = {0: 'batch', 1: 'choice', 2: 'sequence'} else: __UpperCamelCase = {0: 'batch', 1: 'sequence'} return OrderedDict( [ ('input_ids', dynamic_axis), ('attention_mask', dynamic_axis), ('token_type_ids', dynamic_axis), ] )
1
1
import math import sys def _A ( _lowercase ) -> str: """simple docstring""" __UpperCamelCase = '' try: with open(_lowercase , 'rb' ) as binary_file: __UpperCamelCase = binary_file.read() for dat in data: __UpperCamelCase = f'''{dat:08b}''' result += curr_byte return result except OSError: print('File not accessible' ) sys.exit() def _A ( _lowercase ) -> str: """simple docstring""" __UpperCamelCase = {'0': '0', '1': '1'} __UpperCamelCase, __UpperCamelCase = '', '' __UpperCamelCase = len(_lowercase ) for i in range(len(_lowercase ) ): curr_string += data_bits[i] if curr_string not in lexicon: continue __UpperCamelCase = lexicon[curr_string] result += last_match_id __UpperCamelCase = last_match_id + '0' if math.loga(_lowercase ).is_integer(): __UpperCamelCase = {} for curr_key in list(_lowercase ): __UpperCamelCase = lexicon.pop(_lowercase ) __UpperCamelCase = new_lex __UpperCamelCase = last_match_id + '1' index += 1 __UpperCamelCase = '' return result def _A ( _lowercase , _lowercase ) -> None: """simple docstring""" __UpperCamelCase = 8 try: with open(_lowercase , 'wb' ) as opened_file: __UpperCamelCase = [ to_write[i : i + byte_length] for i in range(0 , len(_lowercase ) , _lowercase ) ] if len(result_byte_array[-1] ) % byte_length == 0: result_byte_array.append('10000000' ) else: result_byte_array[-1] += "1" + "0" * ( byte_length - len(result_byte_array[-1] ) - 1 ) for elem in result_byte_array[:-1]: opened_file.write(int(_lowercase , 2 ).to_bytes(1 , byteorder='big' ) ) except OSError: print('File not accessible' ) sys.exit() def _A ( _lowercase ) -> str: """simple docstring""" __UpperCamelCase = 0 for letter in data_bits: if letter == "1": break counter += 1 __UpperCamelCase = data_bits[counter:] __UpperCamelCase = data_bits[counter + 1 :] return data_bits def _A ( _lowercase , _lowercase ) -> None: """simple docstring""" __UpperCamelCase = read_file_binary(_lowercase ) __UpperCamelCase = remove_prefix(_lowercase ) __UpperCamelCase = decompress_data(_lowercase ) write_file_binary(_lowercase , _lowercase ) if __name__ == "__main__": compress(sys.argv[1], sys.argv[2])
1
def _A ( _lowercase ) -> int: """simple docstring""" assert column_title.isupper() __UpperCamelCase = 0 __UpperCamelCase = len(_lowercase ) - 1 __UpperCamelCase = 0 while index >= 0: __UpperCamelCase = (ord(column_title[index] ) - 64) * pow(26 , _lowercase ) answer += value power += 1 index -= 1 return answer if __name__ == "__main__": from doctest import testmod testmod()
1
1
def _A ( _lowercase ) -> int: """simple docstring""" __UpperCamelCase = len(_lowercase ) __UpperCamelCase = len(matrix[0] ) __UpperCamelCase = min(_lowercase , _lowercase ) for row in range(_lowercase ): # Check if diagonal element is not zero if matrix[row][row] != 0: # Eliminate all the elements below the diagonal for col in range(row + 1 , _lowercase ): __UpperCamelCase = matrix[col][row] / matrix[row][row] for i in range(_lowercase , _lowercase ): matrix[col][i] -= multiplier * matrix[row][i] else: # Find a non-zero diagonal element to swap rows __UpperCamelCase = True for i in range(row + 1 , _lowercase ): if matrix[i][row] != 0: __UpperCamelCase, __UpperCamelCase = matrix[i], matrix[row] __UpperCamelCase = False break if reduce: rank -= 1 for i in range(_lowercase ): __UpperCamelCase = matrix[i][rank] # Reduce the row pointer by one to stay on the same row row -= 1 return rank if __name__ == "__main__": import doctest doctest.testmod()
1
import argparse import requests import torch # pip3 install salesforce-lavis # I'm actually installing a slightly modified version: pip3 install git+https://github.com/nielsrogge/LAVIS.git@fix_lavis from lavis.models import load_model_and_preprocess from PIL import Image from transformers import ( AutoTokenizer, BlipaConfig, BlipaForConditionalGeneration, BlipaProcessor, BlipaVisionConfig, BlipImageProcessor, OPTConfig, TaConfig, ) from transformers.utils.constants import OPENAI_CLIP_MEAN, OPENAI_CLIP_STD def _A ( ) -> int: """simple docstring""" __UpperCamelCase = 'https://storage.googleapis.com/sfr-vision-language-research/LAVIS/assets/merlion.png' __UpperCamelCase = Image.open(requests.get(_lowercase , stream=_lowercase ).raw ).convert('RGB' ) return image def _A ( _lowercase ) -> int: """simple docstring""" __UpperCamelCase = [] # fmt: off # vision encoder rename_keys.append(('visual_encoder.cls_token', 'vision_model.embeddings.class_embedding') ) rename_keys.append(('visual_encoder.pos_embed', 'vision_model.embeddings.position_embedding') ) rename_keys.append(('visual_encoder.patch_embed.proj.weight', 'vision_model.embeddings.patch_embedding.weight') ) rename_keys.append(('visual_encoder.patch_embed.proj.bias', 'vision_model.embeddings.patch_embedding.bias') ) rename_keys.append(('ln_vision.weight', 'vision_model.post_layernorm.weight') ) rename_keys.append(('ln_vision.bias', 'vision_model.post_layernorm.bias') ) for i in range(config.vision_config.num_hidden_layers ): rename_keys.append((f'''visual_encoder.blocks.{i}.norm1.weight''', f'''vision_model.encoder.layers.{i}.layer_norm1.weight''') ) rename_keys.append((f'''visual_encoder.blocks.{i}.norm1.bias''', f'''vision_model.encoder.layers.{i}.layer_norm1.bias''') ) rename_keys.append((f'''visual_encoder.blocks.{i}.norm2.weight''', f'''vision_model.encoder.layers.{i}.layer_norm2.weight''') ) rename_keys.append((f'''visual_encoder.blocks.{i}.norm2.bias''', f'''vision_model.encoder.layers.{i}.layer_norm2.bias''') ) rename_keys.append((f'''visual_encoder.blocks.{i}.attn.qkv.weight''', f'''vision_model.encoder.layers.{i}.self_attn.qkv.weight''') ) rename_keys.append((f'''visual_encoder.blocks.{i}.attn.proj.weight''', f'''vision_model.encoder.layers.{i}.self_attn.projection.weight''',) ) rename_keys.append((f'''visual_encoder.blocks.{i}.attn.proj.bias''', f'''vision_model.encoder.layers.{i}.self_attn.projection.bias''') ) rename_keys.append((f'''visual_encoder.blocks.{i}.mlp.fc1.weight''', f'''vision_model.encoder.layers.{i}.mlp.fc1.weight''') ) rename_keys.append((f'''visual_encoder.blocks.{i}.mlp.fc1.bias''', f'''vision_model.encoder.layers.{i}.mlp.fc1.bias''') ) rename_keys.append((f'''visual_encoder.blocks.{i}.mlp.fc2.weight''', f'''vision_model.encoder.layers.{i}.mlp.fc2.weight''') ) rename_keys.append((f'''visual_encoder.blocks.{i}.mlp.fc2.bias''', f'''vision_model.encoder.layers.{i}.mlp.fc2.bias''') ) # QFormer rename_keys.append(('Qformer.bert.embeddings.LayerNorm.weight', 'qformer.layernorm.weight') ) rename_keys.append(('Qformer.bert.embeddings.LayerNorm.bias', 'qformer.layernorm.bias') ) # fmt: on return rename_keys def _A ( _lowercase , _lowercase , _lowercase ) -> Optional[int]: """simple docstring""" __UpperCamelCase = dct.pop(_lowercase ) __UpperCamelCase = val def _A ( _lowercase , _lowercase ) -> int: """simple docstring""" for i in range(config.vision_config.num_hidden_layers ): # read in original q and v biases __UpperCamelCase = state_dict.pop(f'''visual_encoder.blocks.{i}.attn.q_bias''' ) __UpperCamelCase = state_dict.pop(f'''visual_encoder.blocks.{i}.attn.v_bias''' ) # next, set bias in the state dict __UpperCamelCase = torch.cat((q_bias, torch.zeros_like(_lowercase , requires_grad=_lowercase ), v_bias) ) __UpperCamelCase = qkv_bias def _A ( _lowercase , _lowercase ) -> Any: """simple docstring""" __UpperCamelCase = 3_64 if 'coco' in model_name else 2_24 __UpperCamelCase = BlipaVisionConfig(image_size=_lowercase ).to_dict() # make sure the models have proper bos_token_id and eos_token_id set (important for generation) # seems like flan-T5 models don't have bos_token_id properly set? if "opt-2.7b" in model_name: __UpperCamelCase = OPTConfig.from_pretrained('facebook/opt-2.7b' , eos_token_id=_lowercase ).to_dict() elif "opt-6.7b" in model_name: __UpperCamelCase = OPTConfig.from_pretrained('facebook/opt-6.7b' , eos_token_id=_lowercase ).to_dict() elif "t5-xl" in model_name: __UpperCamelCase = TaConfig.from_pretrained('google/flan-t5-xl' , dense_act_fn='gelu' , bos_token_id=1 ).to_dict() elif "t5-xxl" in model_name: __UpperCamelCase = TaConfig.from_pretrained('google/flan-t5-xxl' , dense_act_fn='gelu' , bos_token_id=1 ).to_dict() __UpperCamelCase = BlipaConfig(vision_config=_lowercase , text_config=_lowercase ) return config, image_size @torch.no_grad() def _A ( _lowercase , _lowercase=None , _lowercase=False ) -> Union[str, Any]: """simple docstring""" __UpperCamelCase = ( AutoTokenizer.from_pretrained('facebook/opt-2.7b' ) if 'opt' in model_name else AutoTokenizer.from_pretrained('google/flan-t5-xl' ) ) __UpperCamelCase = tokenizer('\n' , add_special_tokens=_lowercase ).input_ids[0] __UpperCamelCase, __UpperCamelCase = get_blipa_config(_lowercase , eos_token_id=_lowercase ) __UpperCamelCase = BlipaForConditionalGeneration(_lowercase ).eval() __UpperCamelCase = { 'blip2-opt-2.7b': ('blip2_opt', 'pretrain_opt2.7b'), 'blip2-opt-6.7b': ('blip2_opt', 'pretrain_opt6.7b'), 'blip2-opt-2.7b-coco': ('blip2_opt', 'caption_coco_opt2.7b'), 'blip2-opt-6.7b-coco': ('blip2_opt', 'caption_coco_opt6.7b'), 'blip2-flan-t5-xl': ('blip2_t5', 'pretrain_flant5xl'), 'blip2-flan-t5-xl-coco': ('blip2_t5', 'caption_coco_flant5xl'), 'blip2-flan-t5-xxl': ('blip2_t5', 'pretrain_flant5xxl'), } __UpperCamelCase, __UpperCamelCase = model_name_to_original[model_name] # load original model print('Loading original model...' ) __UpperCamelCase = 'cuda' if torch.cuda.is_available() else 'cpu' __UpperCamelCase, __UpperCamelCase, __UpperCamelCase = load_model_and_preprocess( name=_lowercase , model_type=_lowercase , is_eval=_lowercase , device=_lowercase ) original_model.eval() print('Done!' ) # update state dict keys __UpperCamelCase = original_model.state_dict() __UpperCamelCase = create_rename_keys(_lowercase ) for src, dest in rename_keys: rename_key(_lowercase , _lowercase , _lowercase ) # some keys can be renamed efficiently for key, val in state_dict.copy().items(): __UpperCamelCase = state_dict.pop(_lowercase ) if key.startswith('Qformer.bert' ): __UpperCamelCase = key.replace('Qformer.bert' , 'qformer' ) if "attention.self" in key: __UpperCamelCase = key.replace('self' , 'attention' ) if "opt_proj" in key: __UpperCamelCase = key.replace('opt_proj' , 'language_projection' ) if "t5_proj" in key: __UpperCamelCase = key.replace('t5_proj' , 'language_projection' ) if key.startswith('opt' ): __UpperCamelCase = key.replace('opt' , 'language' ) if key.startswith('t5' ): __UpperCamelCase = key.replace('t5' , 'language' ) __UpperCamelCase = val # read in qv biases read_in_q_v_bias(_lowercase , _lowercase ) __UpperCamelCase, __UpperCamelCase = hf_model.load_state_dict(_lowercase , strict=_lowercase ) assert len(_lowercase ) == 0 assert unexpected_keys == ["qformer.embeddings.position_ids"] __UpperCamelCase = load_demo_image() __UpperCamelCase = vis_processors['eval'](_lowercase ).unsqueeze(0 ).to(_lowercase ) __UpperCamelCase = tokenizer(['\n'] , return_tensors='pt' ).input_ids.to(_lowercase ) # create processor __UpperCamelCase = BlipImageProcessor( size={'height': image_size, 'width': image_size} , image_mean=_lowercase , image_std=_lowercase ) __UpperCamelCase = BlipaProcessor(image_processor=_lowercase , tokenizer=_lowercase ) __UpperCamelCase = processor(images=_lowercase , return_tensors='pt' ).pixel_values.to(_lowercase ) # make sure processor creates exact same pixel values assert torch.allclose(_lowercase , _lowercase ) original_model.to(_lowercase ) hf_model.to(_lowercase ) with torch.no_grad(): if "opt" in model_name: __UpperCamelCase = original_model({'image': original_pixel_values, 'text_input': ['']} ).logits __UpperCamelCase = hf_model(_lowercase , _lowercase ).logits else: __UpperCamelCase = original_model( {'image': original_pixel_values, 'text_input': ['\n'], 'text_output': ['\n']} ).logits __UpperCamelCase = input_ids.masked_fill(input_ids == tokenizer.pad_token_id , -1_00 ) __UpperCamelCase = hf_model(_lowercase , _lowercase , labels=_lowercase ).logits assert original_logits.shape == logits.shape print('First values of original logits:' , original_logits[0, :3, :3] ) print('First values of HF logits:' , logits[0, :3, :3] ) # assert values if model_name == "blip2-flan-t5-xl": __UpperCamelCase = torch.tensor( [[-41.58_50, -4.44_40, -8.99_22], [-47.43_22, -5.91_43, -1.73_40]] , device=_lowercase ) assert torch.allclose(logits[0, :3, :3] , _lowercase , atol=1e-4 ) elif model_name == "blip2-flan-t5-xl-coco": __UpperCamelCase = torch.tensor( [[-57.01_09, -9.89_67, -12.62_80], [-68.65_78, -12.71_91, -10.50_65]] , device=_lowercase ) else: # cast to same type __UpperCamelCase = logits.dtype assert torch.allclose(original_logits.to(_lowercase ) , _lowercase , atol=1e-2 ) print('Looks ok!' ) print('Generating a caption...' ) __UpperCamelCase = '' __UpperCamelCase = tokenizer(_lowercase , return_tensors='pt' ).input_ids.to(_lowercase ) __UpperCamelCase = original_model.generate({'image': original_pixel_values} ) __UpperCamelCase = hf_model.generate( _lowercase , _lowercase , do_sample=_lowercase , num_beams=5 , max_length=30 , min_length=1 , top_p=0.9 , repetition_penalty=1.0 , length_penalty=1.0 , temperature=1 , ) print('Original generation:' , _lowercase ) __UpperCamelCase = input_ids.shape[1] __UpperCamelCase = processor.batch_decode(outputs[:, prompt_length:] , skip_special_tokens=_lowercase ) __UpperCamelCase = [text.strip() for text in output_text] print('HF generation:' , _lowercase ) if pytorch_dump_folder_path is not None: processor.save_pretrained(_lowercase ) hf_model.save_pretrained(_lowercase ) if push_to_hub: processor.push_to_hub(f'''nielsr/{model_name}''' ) hf_model.push_to_hub(f'''nielsr/{model_name}''' ) if __name__ == "__main__": __snake_case = argparse.ArgumentParser() __snake_case = [ '''blip2-opt-2.7b''', '''blip2-opt-6.7b''', '''blip2-opt-2.7b-coco''', '''blip2-opt-6.7b-coco''', '''blip2-flan-t5-xl''', '''blip2-flan-t5-xl-coco''', '''blip2-flan-t5-xxl''', ] parser.add_argument( '''--model_name''', default='''blip2-opt-2.7b''', choices=choices, type=str, help='''Path to hf config.json of model to convert''', ) parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''') parser.add_argument( '''--push_to_hub''', action='''store_true''', help='''Whether to push the model and processor to the hub after converting''', ) __snake_case = parser.parse_args() convert_blipa_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
1
1
import datetime import platform import subprocess from typing import Optional, Tuple, Union import numpy as np def _A ( _lowercase , _lowercase ) -> np.array: """simple docstring""" __UpperCamelCase = f'''{sampling_rate}''' __UpperCamelCase = '1' __UpperCamelCase = 'f32le' __UpperCamelCase = [ 'ffmpeg', '-i', 'pipe:0', '-ac', ac, '-ar', ar, '-f', format_for_conversion, '-hide_banner', '-loglevel', 'quiet', 'pipe:1', ] try: with subprocess.Popen(_lowercase , stdin=subprocess.PIPE , stdout=subprocess.PIPE ) as ffmpeg_process: __UpperCamelCase = ffmpeg_process.communicate(_lowercase ) except FileNotFoundError as error: raise ValueError('ffmpeg was not found but is required to load audio files from filename' ) from error __UpperCamelCase = output_stream[0] __UpperCamelCase = np.frombuffer(_lowercase , np.floataa ) if audio.shape[0] == 0: raise ValueError('Malformed soundfile' ) return audio def _A ( _lowercase , _lowercase , _lowercase = "f32le" , ) -> List[str]: """simple docstring""" __UpperCamelCase = f'''{sampling_rate}''' __UpperCamelCase = '1' if format_for_conversion == "s16le": __UpperCamelCase = 2 elif format_for_conversion == "f32le": __UpperCamelCase = 4 else: raise ValueError(f'''Unhandled format `{format_for_conversion}`. Please use `s16le` or `f32le`''' ) __UpperCamelCase = platform.system() if system == "Linux": __UpperCamelCase = 'alsa' __UpperCamelCase = 'default' elif system == "Darwin": __UpperCamelCase = 'avfoundation' __UpperCamelCase = ':0' elif system == "Windows": __UpperCamelCase = 'dshow' __UpperCamelCase = 'default' __UpperCamelCase = [ 'ffmpeg', '-f', format_, '-i', input_, '-ac', ac, '-ar', ar, '-f', format_for_conversion, '-fflags', 'nobuffer', '-hide_banner', '-loglevel', 'quiet', 'pipe:1', ] __UpperCamelCase = int(round(sampling_rate * chunk_length_s ) ) * size_of_sample __UpperCamelCase = _ffmpeg_stream(_lowercase , _lowercase ) for item in iterator: yield item def _A ( _lowercase , _lowercase , _lowercase = None , _lowercase = None , _lowercase = "f32le" , ) -> Tuple: """simple docstring""" if stream_chunk_s is not None: __UpperCamelCase = stream_chunk_s else: __UpperCamelCase = chunk_length_s __UpperCamelCase = ffmpeg_microphone(_lowercase , _lowercase , format_for_conversion=_lowercase ) if format_for_conversion == "s16le": __UpperCamelCase = np.intaa __UpperCamelCase = 2 elif format_for_conversion == "f32le": __UpperCamelCase = np.floataa __UpperCamelCase = 4 else: raise ValueError(f'''Unhandled format `{format_for_conversion}`. Please use `s16le` or `f32le`''' ) if stride_length_s is None: __UpperCamelCase = chunk_length_s / 6 __UpperCamelCase = int(round(sampling_rate * chunk_length_s ) ) * size_of_sample if isinstance(_lowercase , (int, float) ): __UpperCamelCase = [stride_length_s, stride_length_s] __UpperCamelCase = int(round(sampling_rate * stride_length_s[0] ) ) * size_of_sample __UpperCamelCase = int(round(sampling_rate * stride_length_s[1] ) ) * size_of_sample __UpperCamelCase = datetime.datetime.now() __UpperCamelCase = datetime.timedelta(seconds=_lowercase ) for item in chunk_bytes_iter(_lowercase , _lowercase , stride=(stride_left, stride_right) , stream=_lowercase ): # Put everything back in numpy scale __UpperCamelCase = np.frombuffer(item['raw'] , dtype=_lowercase ) __UpperCamelCase = ( item['stride'][0] // size_of_sample, item['stride'][1] // size_of_sample, ) __UpperCamelCase = sampling_rate audio_time += delta if datetime.datetime.now() > audio_time + 10 * delta: # We're late !! SKIP continue yield item def _A ( _lowercase , _lowercase , _lowercase , _lowercase = False ) -> Optional[int]: """simple docstring""" __UpperCamelCase = B'' __UpperCamelCase, __UpperCamelCase = stride if stride_left + stride_right >= chunk_len: raise ValueError( f'''Stride needs to be strictly smaller than chunk_len: ({stride_left}, {stride_right}) vs {chunk_len}''' ) __UpperCamelCase = 0 for raw in iterator: acc += raw if stream and len(_lowercase ) < chunk_len: __UpperCamelCase = (_stride_left, 0) yield {"raw": acc[:chunk_len], "stride": stride, "partial": True} else: while len(_lowercase ) >= chunk_len: # We are flushing the accumulator __UpperCamelCase = (_stride_left, stride_right) __UpperCamelCase = {'raw': acc[:chunk_len], 'stride': stride} if stream: __UpperCamelCase = False yield item __UpperCamelCase = stride_left __UpperCamelCase = acc[chunk_len - stride_left - stride_right :] # Last chunk if len(_lowercase ) > stride_left: __UpperCamelCase = {'raw': acc, 'stride': (_stride_left, 0)} if stream: __UpperCamelCase = False yield item def _A ( _lowercase , _lowercase ) -> str: """simple docstring""" __UpperCamelCase = 2**24 # 16Mo try: with subprocess.Popen(_lowercase , stdout=subprocess.PIPE , bufsize=_lowercase ) as ffmpeg_process: while True: __UpperCamelCase = ffmpeg_process.stdout.read(_lowercase ) if raw == b"": break yield raw except FileNotFoundError as error: raise ValueError('ffmpeg was not found but is required to stream audio files from filename' ) from error
1
import logging import os import sys from dataclasses import dataclass, field from importlib import import_module from typing import Dict, List, Optional, Tuple import numpy as np from seqeval.metrics import accuracy_score, fa_score, precision_score, recall_score from torch import nn from utils_ner import Split, TokenClassificationDataset, TokenClassificationTask import transformers from transformers import ( AutoConfig, AutoModelForTokenClassification, AutoTokenizer, DataCollatorWithPadding, EvalPrediction, HfArgumentParser, Trainer, TrainingArguments, set_seed, ) from transformers.trainer_utils import is_main_process __snake_case = logging.getLogger(__name__) @dataclass class __lowerCamelCase : _lowercase = field( metadata={"""help""": """Path to pretrained model or model identifier from huggingface.co/models"""} ) _lowercase = field( default=_a , metadata={"""help""": """Pretrained config name or path if not the same as model_name"""} ) _lowercase = field( default="""NER""" , metadata={"""help""": """Task type to fine tune in training (e.g. NER, POS, etc)"""} ) _lowercase = field( default=_a , metadata={"""help""": """Pretrained tokenizer name or path if not the same as model_name"""} ) _lowercase = field(default=_a , metadata={"""help""": """Set this flag to use fast tokenization."""} ) # If you want to tweak more attributes on your tokenizer, you should do it in a distinct script, # or just modify its tokenizer_config.json. _lowercase = field( default=_a , metadata={"""help""": """Where do you want to store the pretrained models downloaded from huggingface.co"""} , ) @dataclass class __lowerCamelCase : _lowercase = field( metadata={"""help""": """The input data dir. Should contain the .txt files for a CoNLL-2003-formatted task."""} ) _lowercase = field( default=_a , metadata={"""help""": """Path to a file containing all labels. If not specified, CoNLL-2003 labels are used."""} , ) _lowercase = field( default=128 , metadata={ """help""": ( """The maximum total input sequence length after tokenization. Sequences longer """ """than this will be truncated, sequences shorter will be padded.""" ) } , ) _lowercase = field( default=_a , metadata={"""help""": """Overwrite the cached training and evaluation sets"""} ) def _A ( ) -> str: """simple docstring""" __UpperCamelCase = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) ) if len(sys.argv ) == 2 and sys.argv[1].endswith('.json' ): # If we pass only one argument to the script and it's the path to a json file, # let's parse it to get our arguments. __UpperCamelCase, __UpperCamelCase, __UpperCamelCase = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) ) else: __UpperCamelCase, __UpperCamelCase, __UpperCamelCase = parser.parse_args_into_dataclasses() if ( os.path.exists(training_args.output_dir ) and os.listdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir ): raise ValueError( f'''Output directory ({training_args.output_dir}) already exists and is not empty. Use''' ' --overwrite_output_dir to overcome.' ) __UpperCamelCase = import_module('tasks' ) try: __UpperCamelCase = getattr(_lowercase , model_args.task_type ) __UpperCamelCase = token_classification_task_clazz() except AttributeError: raise ValueError( f'''Task {model_args.task_type} needs to be defined as a TokenClassificationTask subclass in {module}. ''' f'''Available tasks classes are: {TokenClassificationTask.__subclasses__()}''' ) # Setup logging logging.basicConfig( format='%(asctime)s - %(levelname)s - %(name)s - %(message)s' , datefmt='%m/%d/%Y %H:%M:%S' , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , ) logger.warning( 'Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s' , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.local_rank != -1 ) , training_args.fpaa , ) # Set the verbosity to info of the Transformers logger (on main process only): if is_main_process(training_args.local_rank ): transformers.utils.logging.set_verbosity_info() transformers.utils.logging.enable_default_handler() transformers.utils.logging.enable_explicit_format() logger.info('Training/evaluation parameters %s' , _lowercase ) # Set seed set_seed(training_args.seed ) # Prepare CONLL-2003 task __UpperCamelCase = token_classification_task.get_labels(data_args.labels ) __UpperCamelCase = dict(enumerate(_lowercase ) ) __UpperCamelCase = len(_lowercase ) # Load pretrained model and tokenizer # # Distributed training: # The .from_pretrained methods guarantee that only one local process can concurrently # download model & vocab. __UpperCamelCase = AutoConfig.from_pretrained( model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=_lowercase , idalabel=_lowercase , labelaid={label: i for i, label in enumerate(_lowercase )} , cache_dir=model_args.cache_dir , ) __UpperCamelCase = AutoTokenizer.from_pretrained( model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , use_fast=model_args.use_fast , ) __UpperCamelCase = AutoModelForTokenClassification.from_pretrained( model_args.model_name_or_path , from_tf=bool('.ckpt' in model_args.model_name_or_path ) , config=_lowercase , cache_dir=model_args.cache_dir , ) # Get datasets __UpperCamelCase = ( TokenClassificationDataset( token_classification_task=_lowercase , data_dir=data_args.data_dir , tokenizer=_lowercase , labels=_lowercase , model_type=config.model_type , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.train , ) if training_args.do_train else None ) __UpperCamelCase = ( TokenClassificationDataset( token_classification_task=_lowercase , data_dir=data_args.data_dir , tokenizer=_lowercase , labels=_lowercase , model_type=config.model_type , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.dev , ) if training_args.do_eval else None ) def align_predictions(_lowercase , _lowercase ) -> Tuple[List[int], List[int]]: __UpperCamelCase = np.argmax(_lowercase , axis=2 ) __UpperCamelCase, __UpperCamelCase = preds.shape __UpperCamelCase = [[] for _ in range(_lowercase )] __UpperCamelCase = [[] for _ in range(_lowercase )] for i in range(_lowercase ): for j in range(_lowercase ): if label_ids[i, j] != nn.CrossEntropyLoss().ignore_index: out_label_list[i].append(label_map[label_ids[i][j]] ) preds_list[i].append(label_map[preds[i][j]] ) return preds_list, out_label_list def compute_metrics(_lowercase ) -> Dict: __UpperCamelCase, __UpperCamelCase = align_predictions(p.predictions , p.label_ids ) return { "accuracy_score": accuracy_score(_lowercase , _lowercase ), "precision": precision_score(_lowercase , _lowercase ), "recall": recall_score(_lowercase , _lowercase ), "f1": fa_score(_lowercase , _lowercase ), } # Data collator __UpperCamelCase = DataCollatorWithPadding(_lowercase , pad_to_multiple_of=8 ) if training_args.fpaa else None # Initialize our Trainer __UpperCamelCase = Trainer( model=_lowercase , args=_lowercase , train_dataset=_lowercase , eval_dataset=_lowercase , compute_metrics=_lowercase , data_collator=_lowercase , ) # Training if training_args.do_train: trainer.train( model_path=model_args.model_name_or_path if os.path.isdir(model_args.model_name_or_path ) else None ) trainer.save_model() # For convenience, we also re-save the tokenizer to the same directory, # so that you can share your model easily on huggingface.co/models =) if trainer.is_world_process_zero(): tokenizer.save_pretrained(training_args.output_dir ) # Evaluation __UpperCamelCase = {} if training_args.do_eval: logger.info('*** Evaluate ***' ) __UpperCamelCase = trainer.evaluate() __UpperCamelCase = os.path.join(training_args.output_dir , 'eval_results.txt' ) if trainer.is_world_process_zero(): with open(_lowercase , 'w' ) as writer: logger.info('***** Eval results *****' ) for key, value in result.items(): logger.info(' %s = %s' , _lowercase , _lowercase ) writer.write('%s = %s\n' % (key, value) ) results.update(_lowercase ) # Predict if training_args.do_predict: __UpperCamelCase = TokenClassificationDataset( token_classification_task=_lowercase , data_dir=data_args.data_dir , tokenizer=_lowercase , labels=_lowercase , model_type=config.model_type , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.test , ) __UpperCamelCase, __UpperCamelCase, __UpperCamelCase = trainer.predict(_lowercase ) __UpperCamelCase, __UpperCamelCase = align_predictions(_lowercase , _lowercase ) __UpperCamelCase = os.path.join(training_args.output_dir , 'test_results.txt' ) if trainer.is_world_process_zero(): with open(_lowercase , 'w' ) as writer: for key, value in metrics.items(): logger.info(' %s = %s' , _lowercase , _lowercase ) writer.write('%s = %s\n' % (key, value) ) # Save predictions __UpperCamelCase = os.path.join(training_args.output_dir , 'test_predictions.txt' ) if trainer.is_world_process_zero(): with open(_lowercase , 'w' ) as writer: with open(os.path.join(data_args.data_dir , 'test.txt' ) , 'r' ) as f: token_classification_task.write_predictions_to_file(_lowercase , _lowercase , _lowercase ) return results def _A ( _lowercase ) -> Dict: """simple docstring""" main() if __name__ == "__main__": main()
1
1
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_torch_available, is_vision_available, ) __snake_case = {'''configuration_beit''': ['''BEIT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''BeitConfig''', '''BeitOnnxConfig''']} try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __snake_case = ['''BeitFeatureExtractor'''] __snake_case = ['''BeitImageProcessor'''] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __snake_case = [ '''BEIT_PRETRAINED_MODEL_ARCHIVE_LIST''', '''BeitForImageClassification''', '''BeitForMaskedImageModeling''', '''BeitForSemanticSegmentation''', '''BeitModel''', '''BeitPreTrainedModel''', ] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __snake_case = [ '''FlaxBeitForImageClassification''', '''FlaxBeitForMaskedImageModeling''', '''FlaxBeitModel''', '''FlaxBeitPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_beit import BEIT_PRETRAINED_CONFIG_ARCHIVE_MAP, BeitConfig, BeitOnnxConfig try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .feature_extraction_beit import BeitFeatureExtractor from .image_processing_beit import BeitImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_beit import ( BEIT_PRETRAINED_MODEL_ARCHIVE_LIST, BeitForImageClassification, BeitForMaskedImageModeling, BeitForSemanticSegmentation, BeitModel, BeitPreTrainedModel, ) try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_beit import ( FlaxBeitForImageClassification, FlaxBeitForMaskedImageModeling, FlaxBeitModel, FlaxBeitPreTrainedModel, ) else: import sys __snake_case = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
1
# # This a `torch.distributed` diagnostics script that checks that all GPUs in the cluster (one or # many nodes) can talk to each other via nccl and allocate gpu memory. # # To run first adjust the number of processes and nodes: # # python -m torch.distributed.run --nproc_per_node 2 --nnodes 1 torch-distributed-gpu-test.py # # You may need to add --master_addr $MASTER_ADDR --master_port $MASTER_PORT if using a custom addr:port # # You can also use the rdzv API: --rdzv_endpoint $MASTER_ADDR:$MASTER_PORT --rdzv_backend c10d # # use torch.distributed.launch instead of torch.distributed.run for torch < 1.9 # # If you get a hanging in `barrier` calls you have some network issues, you may try to debug this with: # # NCCL_DEBUG=INFO python -m torch.distributed.run --nproc_per_node 2 --nnodes 1 torch-distributed-gpu-test.py # # which should tell you what's going on behind the scenes. # # # This script can be run via `srun` in the SLURM environment as well. Here is a SLURM script that # runs on 2 nodes of 4 gpus per node: # # #SBATCH --job-name=test-nodes # name # #SBATCH --nodes=2 # nodes # #SBATCH --ntasks-per-node=1 # crucial - only 1 task per dist per node! # #SBATCH --cpus-per-task=10 # number of cores per tasks # #SBATCH --gres=gpu:4 # number of gpus # #SBATCH --time 0:05:00 # maximum execution time (HH:MM:SS) # #SBATCH --output=%x-%j.out # output file name # # GPUS_PER_NODE=4 # MASTER_ADDR=$(scontrol show hostnames $SLURM_JOB_NODELIST | head -n 1) # MASTER_PORT=6000 # # srun --jobid $SLURM_JOBID bash -c 'python -m torch.distributed.run \ # --nproc_per_node $GPUS_PER_NODE --nnodes $SLURM_NNODES --node_rank $SLURM_PROCID \ # --master_addr $MASTER_ADDR --master_port $MASTER_PORT \ # torch-distributed-gpu-test.py' # import fcntl import os import socket import torch import torch.distributed as dist def _A ( *_lowercase ) -> Tuple: """simple docstring""" with open(_lowercase , 'r' ) as fh: fcntl.flock(_lowercase , fcntl.LOCK_EX ) try: print(*_lowercase ) finally: fcntl.flock(_lowercase , fcntl.LOCK_UN ) __snake_case = int(os.environ['''LOCAL_RANK''']) torch.cuda.set_device(local_rank) __snake_case = torch.device('''cuda''', local_rank) __snake_case = socket.gethostname() __snake_case = f"""[{hostname}-{local_rank}]""" try: # test distributed dist.init_process_group('''nccl''') dist.all_reduce(torch.ones(1).to(device), op=dist.ReduceOp.SUM) dist.barrier() # test cuda is available and can allocate memory torch.cuda.is_available() torch.ones(1).cuda(local_rank) # global rank __snake_case = dist.get_rank() __snake_case = dist.get_world_size() printflock(f"""{gpu} is OK (global rank: {rank}/{world_size})""") dist.barrier() if rank == 0: printflock(f"""pt={torch.__version__}, cuda={torch.version.cuda}, nccl={torch.cuda.nccl.version()}""") except Exception: printflock(f"""{gpu} is broken""") raise
1
1
from typing import Optional, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature from ...image_transforms import get_image_size, pad, rescale, to_channel_dimension_format from ...image_utils import ChannelDimension, ImageInput, make_list_of_images, to_numpy_array, valid_images from ...utils import TensorType, logging __snake_case = logging.get_logger(__name__) class __lowerCamelCase (_a ): _lowercase = ["""pixel_values"""] def __init__( self: Any,A_: bool = True,A_: Union[int, float] = 1 / 255,A_: bool = True,A_: int = 8,**A_: Optional[Any],): '''simple docstring''' super().__init__(**A_ ) __UpperCamelCase = do_rescale __UpperCamelCase = rescale_factor __UpperCamelCase = do_pad __UpperCamelCase = pad_size def snake_case_ ( self: Any,A_: np.ndarray,A_: float,A_: Optional[Union[str, ChannelDimension]] = None,**A_: Tuple ): '''simple docstring''' return rescale(A_,scale=A_,data_format=A_,**A_ ) def snake_case_ ( self: int,A_: np.ndarray,A_: int,A_: Optional[Union[str, ChannelDimension]] = None ): '''simple docstring''' __UpperCamelCase, __UpperCamelCase = get_image_size(A_ ) __UpperCamelCase = (old_height // size + 1) * size - old_height __UpperCamelCase = (old_width // size + 1) * size - old_width return pad(A_,((0, pad_height), (0, pad_width)),mode='symmetric',data_format=A_ ) def snake_case_ ( self: Optional[int],A_: ImageInput,A_: Optional[bool] = None,A_: Optional[float] = None,A_: Optional[bool] = None,A_: Optional[int] = None,A_: Optional[Union[str, TensorType]] = None,A_: Union[str, ChannelDimension] = ChannelDimension.FIRST,**A_: List[str],): '''simple docstring''' __UpperCamelCase = do_rescale if do_rescale is not None else self.do_rescale __UpperCamelCase = rescale_factor if rescale_factor is not None else self.rescale_factor __UpperCamelCase = do_pad if do_pad is not None else self.do_pad __UpperCamelCase = pad_size if pad_size is not None else self.pad_size __UpperCamelCase = make_list_of_images(A_ ) if not valid_images(A_ ): raise ValueError( 'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, ' 'torch.Tensor, tf.Tensor or jax.ndarray.' ) if do_rescale and rescale_factor is None: raise ValueError('Rescale factor must be specified if do_rescale is True.' ) # All transformations expect numpy arrays. __UpperCamelCase = [to_numpy_array(A_ ) for image in images] if do_rescale: __UpperCamelCase = [self.rescale(image=A_,scale=A_ ) for image in images] if do_pad: __UpperCamelCase = [self.pad(A_,size=A_ ) for image in images] __UpperCamelCase = [to_channel_dimension_format(A_,A_ ) for image in images] __UpperCamelCase = {'pixel_values': images} return BatchFeature(data=A_,tensor_type=A_ )
1
import pytest import datasets # Import fixture modules as plugins __snake_case = ['''tests.fixtures.files''', '''tests.fixtures.hub''', '''tests.fixtures.fsspec'''] def _A ( _lowercase , _lowercase ) -> Tuple: """simple docstring""" for item in items: if any(marker in item.keywords for marker in ['integration', 'unit'] ): continue item.add_marker(pytest.mark.unit ) def _A ( _lowercase ) -> str: """simple docstring""" config.addinivalue_line('markers' , 'torchaudio_latest: mark test to run with torchaudio>=0.12' ) @pytest.fixture(autouse=_lowercase ) def _A ( _lowercase , _lowercase ) -> Any: """simple docstring""" __UpperCamelCase = tmp_path_factory.getbasetemp() / 'cache' __UpperCamelCase = test_hf_cache_home / 'datasets' __UpperCamelCase = test_hf_cache_home / 'metrics' __UpperCamelCase = test_hf_cache_home / 'modules' monkeypatch.setattr('datasets.config.HF_DATASETS_CACHE' , str(_lowercase ) ) monkeypatch.setattr('datasets.config.HF_METRICS_CACHE' , str(_lowercase ) ) monkeypatch.setattr('datasets.config.HF_MODULES_CACHE' , str(_lowercase ) ) __UpperCamelCase = test_hf_datasets_cache / 'downloads' monkeypatch.setattr('datasets.config.DOWNLOADED_DATASETS_PATH' , str(_lowercase ) ) __UpperCamelCase = test_hf_datasets_cache / 'downloads' / 'extracted' monkeypatch.setattr('datasets.config.EXTRACTED_DATASETS_PATH' , str(_lowercase ) ) @pytest.fixture(autouse=_lowercase , scope='session' ) def _A ( ) -> Dict: """simple docstring""" datasets.disable_progress_bar() @pytest.fixture(autouse=_lowercase ) def _A ( _lowercase ) -> Tuple: """simple docstring""" monkeypatch.setattr('datasets.config.HF_UPDATE_DOWNLOAD_COUNTS' , _lowercase ) @pytest.fixture def _A ( _lowercase ) -> Any: """simple docstring""" monkeypatch.setattr('sqlalchemy.util.deprecations.SILENCE_UBER_WARNING' , _lowercase )
1
1
import baseaa def _A ( _lowercase ) -> bytes: """simple docstring""" return baseaa.aaaencode(string.encode('utf-8' ) ) def _A ( _lowercase ) -> str: """simple docstring""" return baseaa.aaadecode(_lowercase ).decode('utf-8' ) if __name__ == "__main__": import doctest doctest.testmod()
1
import random import unittest import numpy as np import torch from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer from diffusers import ( AutoencoderKL, DDIMScheduler, UNetaDConditionModel, VideoToVideoSDPipeline, ) from diffusers.utils import floats_tensor, is_xformers_available, skip_mps from diffusers.utils.testing_utils import enable_full_determinism, slow, torch_device from ..pipeline_params import ( TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS, ) from ..test_pipelines_common import PipelineTesterMixin enable_full_determinism() @skip_mps class __lowerCamelCase (_a , unittest.TestCase ): _lowercase = VideoToVideoSDPipeline _lowercase = TEXT_GUIDED_IMAGE_VARIATION_PARAMS.union({"""video"""} ) - {"""image""", """width""", """height"""} _lowercase = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS.union({"""video"""} ) - {"""image"""} _lowercase = PipelineTesterMixin.required_optional_params - {"""latents"""} _lowercase = False # No `output_type`. _lowercase = frozenset( [ """num_inference_steps""", """generator""", """latents""", """return_dict""", """callback""", """callback_steps""", ] ) def snake_case_ ( self: List[str] ): '''simple docstring''' torch.manual_seed(0 ) __UpperCamelCase = UNetaDConditionModel( block_out_channels=(32, 64, 64, 64),layers_per_block=2,sample_size=32,in_channels=4,out_channels=4,down_block_types=('CrossAttnDownBlock3D', 'CrossAttnDownBlock3D', 'CrossAttnDownBlock3D', 'DownBlock3D'),up_block_types=('UpBlock3D', 'CrossAttnUpBlock3D', 'CrossAttnUpBlock3D', 'CrossAttnUpBlock3D'),cross_attention_dim=32,attention_head_dim=4,) __UpperCamelCase = DDIMScheduler( beta_start=0.0_0_0_8_5,beta_end=0.0_1_2,beta_schedule='scaled_linear',clip_sample=A_,set_alpha_to_one=A_,) torch.manual_seed(0 ) __UpperCamelCase = AutoencoderKL( block_out_channels=[32, 64],in_channels=3,out_channels=3,down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'],up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'],latent_channels=4,sample_size=128,) torch.manual_seed(0 ) __UpperCamelCase = CLIPTextConfig( bos_token_id=0,eos_token_id=2,hidden_size=32,intermediate_size=37,layer_norm_eps=1E-05,num_attention_heads=4,num_hidden_layers=5,pad_token_id=1,vocab_size=1000,hidden_act='gelu',projection_dim=512,) __UpperCamelCase = CLIPTextModel(A_ ) __UpperCamelCase = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' ) __UpperCamelCase = { 'unet': unet, 'scheduler': scheduler, 'vae': vae, 'text_encoder': text_encoder, 'tokenizer': tokenizer, } return components def snake_case_ ( self: Union[str, Any],A_: Any,A_: Any=0 ): '''simple docstring''' __UpperCamelCase = floats_tensor((1, 3, 3, 32, 32),rng=random.Random(A_ ) ).to(A_ ) if str(A_ ).startswith('mps' ): __UpperCamelCase = torch.manual_seed(A_ ) else: __UpperCamelCase = torch.Generator(device=A_ ).manual_seed(A_ ) __UpperCamelCase = { 'prompt': 'A painting of a squirrel eating a burger', 'video': video, 'generator': generator, 'num_inference_steps': 2, 'guidance_scale': 6.0, 'output_type': 'pt', } return inputs def snake_case_ ( self: Union[str, Any] ): '''simple docstring''' __UpperCamelCase = 'cpu' # ensure determinism for the device-dependent torch.Generator __UpperCamelCase = self.get_dummy_components() __UpperCamelCase = VideoToVideoSDPipeline(**A_ ) __UpperCamelCase = sd_pipe.to(A_ ) sd_pipe.set_progress_bar_config(disable=A_ ) __UpperCamelCase = self.get_dummy_inputs(A_ ) __UpperCamelCase = 'np' __UpperCamelCase = sd_pipe(**A_ ).frames __UpperCamelCase = frames[0][-3:, -3:, -1] assert frames[0].shape == (32, 32, 3) __UpperCamelCase = np.array([106, 117, 113, 174, 137, 112, 148, 151, 131] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 @unittest.skipIf( torch_device != 'cuda' or not is_xformers_available(),reason='XFormers attention is only available with CUDA and `xformers` installed',) def snake_case_ ( self: Any ): '''simple docstring''' self._test_xformers_attention_forwardGenerator_pass(test_mean_pixel_difference=A_,expected_max_diff=5E-3 ) @unittest.skip(reason='Batching needs to be properly figured out first for this pipeline.' ) def snake_case_ ( self: str ): '''simple docstring''' pass @unittest.skip(reason='Batching needs to be properly figured out first for this pipeline.' ) def snake_case_ ( self: Optional[Any] ): '''simple docstring''' pass @unittest.skip(reason='`num_images_per_prompt` argument is not supported for this pipeline.' ) def snake_case_ ( self: int ): '''simple docstring''' pass def snake_case_ ( self: Any ): '''simple docstring''' return super().test_progress_bar() @slow @skip_mps class __lowerCamelCase (unittest.TestCase ): def snake_case_ ( self: Tuple ): '''simple docstring''' __UpperCamelCase = VideoToVideoSDPipeline.from_pretrained('cerspense/zeroscope_v2_XL',torch_dtype=torch.floataa ) pipe.enable_model_cpu_offload() # 10 frames __UpperCamelCase = torch.Generator(device='cpu' ).manual_seed(0 ) __UpperCamelCase = torch.randn((1, 10, 3, 1024, 576),generator=A_ ) __UpperCamelCase = video.to('cuda' ) __UpperCamelCase = 'Spiderman is surfing' __UpperCamelCase = pipe(A_,video=A_,generator=A_,num_inference_steps=3,output_type='pt' ).frames __UpperCamelCase = np.array([-1.0_4_5_8_9_8_4, -1.1_2_7_9_2_9_7, -0.9_6_6_3_0_8_6, -0.9_1_5_0_3_9_0_6, -0.7_5_0_9_7_6_5_6] ) assert np.abs(video_frames.cpu().numpy()[0, 0, 0, 0, -5:] - expected_array ).sum() < 1E-2
1
1
import argparse import gc import json import os import re import torch from huggingface_hub import hf_hub_download from transformers import AutoModelForCausalLM, AutoTokenizer, PreTrainedTokenizerFast, RwkvConfig from transformers.modeling_utils import WEIGHTS_INDEX_NAME, shard_checkpoint __snake_case = { '''169M''': 1_2, '''430M''': 2_4, '''1B5''': 2_4, '''3B''': 3_2, '''7B''': 3_2, '''14B''': 4_0, } __snake_case = { '''169M''': 7_6_8, '''430M''': 1_0_2_4, '''1B5''': 2_0_4_8, '''3B''': 2_5_6_0, '''7B''': 4_0_9_6, '''14B''': 5_1_2_0, } def _A ( _lowercase ) -> List[str]: """simple docstring""" __UpperCamelCase = list(state_dict.keys() ) for name in state_dict_keys: __UpperCamelCase = state_dict.pop(_lowercase ) # emb -> embedding if name.startswith('emb.' ): __UpperCamelCase = name.replace('emb.' , 'embeddings.' ) # ln_0 -> pre_ln (only present at block 0) if name.startswith('blocks.0.ln0' ): __UpperCamelCase = name.replace('blocks.0.ln0' , 'blocks.0.pre_ln' ) # att -> attention __UpperCamelCase = re.sub(r'blocks\.(\d+)\.att' , r'blocks.\1.attention' , _lowercase ) # ffn -> feed_forward __UpperCamelCase = re.sub(r'blocks\.(\d+)\.ffn' , r'blocks.\1.feed_forward' , _lowercase ) # time_mix_k -> time_mix_key and reshape if name.endswith('.time_mix_k' ): __UpperCamelCase = name.replace('.time_mix_k' , '.time_mix_key' ) # time_mix_v -> time_mix_value and reshape if name.endswith('.time_mix_v' ): __UpperCamelCase = name.replace('.time_mix_v' , '.time_mix_value' ) # time_mix_r -> time_mix_key and reshape if name.endswith('.time_mix_r' ): __UpperCamelCase = name.replace('.time_mix_r' , '.time_mix_receptance' ) if name != "head.weight": __UpperCamelCase = 'rwkv.' + name __UpperCamelCase = weight return state_dict def _A ( _lowercase , _lowercase , _lowercase , _lowercase=None , _lowercase=None , _lowercase=False , _lowercase=None ) -> Any: """simple docstring""" if tokenizer_file is None: print('No `--tokenizer_file` provided, we will use the default tokenizer.' ) __UpperCamelCase = 5_02_77 __UpperCamelCase = AutoTokenizer.from_pretrained('EleutherAI/gpt-neox-20b' ) else: __UpperCamelCase = PreTrainedTokenizerFast(tokenizer_file=_lowercase ) __UpperCamelCase = len(_lowercase ) tokenizer.save_pretrained(_lowercase ) # 2. Build the config __UpperCamelCase = list(NUM_HIDDEN_LAYERS_MAPPING.keys() ) if size is None: # Try to infer size from the checkpoint name for candidate in possible_sizes: if candidate in checkpoint_file: __UpperCamelCase = candidate break if size is None: raise ValueError('Could not infer the size, please provide it with the `--size` argument.' ) if size not in possible_sizes: raise ValueError(f'''`size` should be one of {possible_sizes}, got {size}.''' ) __UpperCamelCase = RwkvConfig( vocab_size=_lowercase , num_hidden_layers=NUM_HIDDEN_LAYERS_MAPPING[size] , hidden_size=HIDEN_SIZE_MAPPING[size] , ) config.save_pretrained(_lowercase ) # 3. Download model file then convert state_dict __UpperCamelCase = hf_hub_download(_lowercase , _lowercase ) __UpperCamelCase = torch.load(_lowercase , map_location='cpu' ) __UpperCamelCase = convert_state_dict(_lowercase ) # 4. Split in shards and save __UpperCamelCase, __UpperCamelCase = shard_checkpoint(_lowercase ) for shard_file, shard in shards.items(): torch.save(_lowercase , os.path.join(_lowercase , _lowercase ) ) if index is not None: __UpperCamelCase = os.path.join(_lowercase , _lowercase ) # Save the index as well with open(_lowercase , 'w' , encoding='utf-8' ) as f: __UpperCamelCase = json.dumps(_lowercase , indent=2 , sort_keys=_lowercase ) + '\n' f.write(_lowercase ) # 5. Clean up shards (for some reason the file PyTorch saves take the same space as the whole state_dict print( 'Cleaning up shards. This may error with an OOM error, it this is the case don\'t worry you still have converted the model.' ) __UpperCamelCase = list(shards.keys() ) del state_dict del shards gc.collect() for shard_file in shard_files: __UpperCamelCase = torch.load(os.path.join(_lowercase , _lowercase ) ) torch.save({k: v.cpu().clone() for k, v in state_dict.items()} , os.path.join(_lowercase , _lowercase ) ) del state_dict gc.collect() if push_to_hub: if model_name is None: raise ValueError('Please provide a `model_name` to push the model to the Hub.' ) __UpperCamelCase = AutoModelForCausalLM.from_pretrained(_lowercase ) model.push_to_hub(_lowercase , max_shard_size='2GB' ) tokenizer.push_to_hub(_lowercase ) if __name__ == "__main__": __snake_case = argparse.ArgumentParser() # Required parameters parser.add_argument( '''--repo_id''', default=None, type=str, required=True, help='''Repo ID from which to pull the checkpoint.''' ) parser.add_argument( '''--checkpoint_file''', default=None, type=str, required=True, help='''Name of the checkpoint file in the repo.''' ) parser.add_argument( '''--output_dir''', default=None, type=str, required=True, help='''Where to save the converted model.''' ) parser.add_argument( '''--tokenizer_file''', default=None, type=str, help='''Path to the tokenizer file to use (if not provided, only the model is converted).''', ) parser.add_argument( '''--size''', default=None, type=str, help='''Size of the model. Will be inferred from the `checkpoint_file` if not passed.''', ) parser.add_argument( '''--push_to_hub''', action='''store_true''', help='''Push to the Hub the converted model.''', ) parser.add_argument( '''--model_name''', default=None, type=str, help='''Name of the pushed model on the Hub, including the username / organization.''', ) __snake_case = parser.parse_args() convert_rmkv_checkpoint_to_hf_format( args.repo_id, args.checkpoint_file, args.output_dir, size=args.size, tokenizer_file=args.tokenizer_file, push_to_hub=args.push_to_hub, model_name=args.model_name, )
1
import argparse from transformers import CLIPImageProcessor, CLIPVisionModelWithProjection from diffusers import UnCLIPImageVariationPipeline, UnCLIPPipeline if __name__ == "__main__": __snake_case = argparse.ArgumentParser() parser.add_argument('''--dump_path''', default=None, type=str, required=True, help='''Path to the output model.''') parser.add_argument( '''--txt2img_unclip''', default='''kakaobrain/karlo-v1-alpha''', type=str, required=False, help='''The pretrained txt2img unclip.''', ) __snake_case = parser.parse_args() __snake_case = UnCLIPPipeline.from_pretrained(args.txtaimg_unclip) __snake_case = CLIPImageProcessor() __snake_case = CLIPVisionModelWithProjection.from_pretrained('''openai/clip-vit-large-patch14''') __snake_case = UnCLIPImageVariationPipeline( decoder=txtaimg.decoder, text_encoder=txtaimg.text_encoder, tokenizer=txtaimg.tokenizer, text_proj=txtaimg.text_proj, feature_extractor=feature_extractor, image_encoder=image_encoder, super_res_first=txtaimg.super_res_first, super_res_last=txtaimg.super_res_last, decoder_scheduler=txtaimg.decoder_scheduler, super_res_scheduler=txtaimg.super_res_scheduler, ) imgaimg.save_pretrained(args.dump_path)
1
1
class __lowerCamelCase : def __init__( self: Union[str, Any],A_: Tuple ): '''simple docstring''' __UpperCamelCase = val __UpperCamelCase = None __UpperCamelCase = None def snake_case_ ( self: Any,A_: List[Any] ): '''simple docstring''' if self.val: if val < self.val: if self.left is None: __UpperCamelCase = Node(A_ ) else: self.left.insert(A_ ) elif val > self.val: if self.right is None: __UpperCamelCase = Node(A_ ) else: self.right.insert(A_ ) else: __UpperCamelCase = val def _A ( _lowercase , _lowercase ) -> Tuple: """simple docstring""" if root: inorder(root.left , _lowercase ) res.append(root.val ) inorder(root.right , _lowercase ) def _A ( _lowercase ) -> Optional[int]: """simple docstring""" if len(_lowercase ) == 0: return arr __UpperCamelCase = Node(arr[0] ) for i in range(1 , len(_lowercase ) ): root.insert(arr[i] ) # Traverse BST in order. __UpperCamelCase = [] inorder(_lowercase , _lowercase ) return res if __name__ == "__main__": print(tree_sort([1_0, 1, 3, 2, 9, 1_4, 1_3]))
1
from typing import TYPE_CHECKING # rely on isort to merge the imports from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available __snake_case = { '''configuration_autoformer''': [ '''AUTOFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''AutoformerConfig''', ], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __snake_case = [ '''AUTOFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''', '''AutoformerForPrediction''', '''AutoformerModel''', '''AutoformerPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_autoformer import ( AUTOFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, AutoformerConfig, ) try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_autoformer import ( AUTOFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, AutoformerForPrediction, AutoformerModel, AutoformerPreTrainedModel, ) else: import sys __snake_case = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
1
1
import ast import os import re import shutil import tempfile import unittest from unittest import mock import torch from accelerate.test_utils.examples import compare_against_test from accelerate.test_utils.testing import TempDirTestCase, require_trackers, run_command, slow from accelerate.utils import write_basic_config # DataLoaders built from `test_samples/MRPC` for quick testing # Should mock `{script_name}.get_dataloaders` via: # @mock.patch("{script_name}.get_dataloaders", mocked_dataloaders) __snake_case = [ '''cross_validation.py''', '''gradient_accumulation.py''', '''local_sgd.py''', '''multi_process_metrics.py''', '''memory.py''', '''automatic_gradient_accumulation.py''', '''fsdp_with_peak_mem_tracking.py''', '''deepspeed_with_config_support.py''', '''megatron_lm_gpt_pretraining.py''', ] class __lowerCamelCase (unittest.TestCase ): def snake_case_ ( self: List[str],A_: str,A_: bool,A_: str = None,A_: list = None ): '''simple docstring''' __UpperCamelCase = None __UpperCamelCase = os.path.abspath(os.path.join('examples','by_feature' ) ) __UpperCamelCase = os.path.abspath('examples' ) for item in os.listdir(A_ ): if item not in EXCLUDE_EXAMPLES: __UpperCamelCase = os.path.join(A_,A_ ) if os.path.isfile(A_ ) and ".py" in item_path: with self.subTest( tested_script=A_,feature_script=A_,tested_section='main()' if parser_only else 'training_function()',): __UpperCamelCase = compare_against_test( os.path.join(A_,A_ ),A_,A_,A_ ) __UpperCamelCase = '\n'.join(A_ ) if special_strings is not None: for string in special_strings: __UpperCamelCase = diff.replace(A_,'' ) self.assertEqual(A_,'' ) def snake_case_ ( self: int ): '''simple docstring''' self.one_complete_example('complete_nlp_example.py',A_ ) self.one_complete_example('complete_nlp_example.py',A_ ) def snake_case_ ( self: Any ): '''simple docstring''' __UpperCamelCase = os.path.abspath(os.path.join('examples','cv_example.py' ) ) __UpperCamelCase = [ ' ' * 16 + '{\n\n', ' ' * 20 + '"accuracy": eval_metric["accuracy"],\n\n', ' ' * 20 + '"f1": eval_metric["f1"],\n\n', ' ' * 20 + '"train_loss": total_loss.item() / len(train_dataloader),\n\n', ' ' * 20 + '"epoch": epoch,\n\n', ' ' * 16 + '},\n\n', ' ' * 16 + 'step=epoch,\n', ' ' * 12, ' ' * 8 + 'for step, batch in enumerate(active_dataloader):\n', ] self.one_complete_example('complete_cv_example.py',A_,A_,A_ ) self.one_complete_example('complete_cv_example.py',A_,A_,A_ ) @mock.patch.dict(os.environ , {"""TESTING_MOCKED_DATALOADERS""": """1"""} ) class __lowerCamelCase (_a ): _lowercase = False @classmethod def snake_case_ ( cls: int ): '''simple docstring''' super().setUpClass() __UpperCamelCase = tempfile.mkdtemp() __UpperCamelCase = os.path.join(cls._tmpdir,'default_config.yml' ) write_basic_config(save_location=cls.configPath ) __UpperCamelCase = ['accelerate', 'launch', '--config_file', cls.configPath] @classmethod def snake_case_ ( cls: Optional[Any] ): '''simple docstring''' super().tearDownClass() shutil.rmtree(cls._tmpdir ) def snake_case_ ( self: Tuple ): '''simple docstring''' __UpperCamelCase = F''' examples/by_feature/checkpointing.py --checkpointing_steps epoch --output_dir {self.tmpdir} '''.split() run_command(self._launch_args + testargs ) self.assertTrue(os.path.exists(os.path.join(self.tmpdir,'epoch_0' ) ) ) def snake_case_ ( self: str ): '''simple docstring''' __UpperCamelCase = F''' examples/by_feature/checkpointing.py --checkpointing_steps 1 --output_dir {self.tmpdir} '''.split() __UpperCamelCase = run_command(self._launch_args + testargs ) self.assertTrue(os.path.exists(os.path.join(self.tmpdir,'step_2' ) ) ) def snake_case_ ( self: Optional[Any] ): '''simple docstring''' __UpperCamelCase = F''' examples/by_feature/checkpointing.py --resume_from_checkpoint {os.path.join(self.tmpdir,'epoch_0' )} '''.split() __UpperCamelCase = run_command(self._launch_args + testargs,return_stdout=A_ ) self.assertNotIn('epoch 0:',A_ ) self.assertIn('epoch 1:',A_ ) def snake_case_ ( self: str ): '''simple docstring''' __UpperCamelCase = F''' examples/by_feature/checkpointing.py --resume_from_checkpoint {os.path.join(self.tmpdir,'step_2' )} '''.split() __UpperCamelCase = run_command(self._launch_args + testargs,return_stdout=A_ ) if torch.cuda.is_available(): __UpperCamelCase = torch.cuda.device_count() else: __UpperCamelCase = 1 if num_processes > 1: self.assertNotIn('epoch 0:',A_ ) self.assertIn('epoch 1:',A_ ) else: self.assertIn('epoch 0:',A_ ) self.assertIn('epoch 1:',A_ ) @slow def snake_case_ ( self: Optional[Any] ): '''simple docstring''' __UpperCamelCase = '\n examples/by_feature/cross_validation.py\n --num_folds 2\n '.split() with mock.patch.dict(os.environ,{'TESTING_MOCKED_DATALOADERS': '0'} ): __UpperCamelCase = run_command(self._launch_args + testargs,return_stdout=A_ ) __UpperCamelCase = re.findall('({.+})',A_ ) __UpperCamelCase = [r for r in results if 'accuracy' in r][-1] __UpperCamelCase = ast.literal_eval(A_ ) self.assertGreaterEqual(results['accuracy'],0.7_5 ) def snake_case_ ( self: Tuple ): '''simple docstring''' __UpperCamelCase = ['examples/by_feature/multi_process_metrics.py'] run_command(self._launch_args + testargs ) @require_trackers @mock.patch.dict(os.environ,{'WANDB_MODE': 'offline'} ) def snake_case_ ( self: Dict ): '''simple docstring''' with tempfile.TemporaryDirectory() as tmpdir: __UpperCamelCase = F''' examples/by_feature/tracking.py --with_tracking --project_dir {tmpdir} '''.split() run_command(self._launch_args + testargs ) self.assertTrue(os.path.exists(os.path.join(A_,'tracking' ) ) ) def snake_case_ ( self: List[str] ): '''simple docstring''' __UpperCamelCase = ['examples/by_feature/gradient_accumulation.py'] run_command(self._launch_args + testargs ) def snake_case_ ( self: List[Any] ): '''simple docstring''' __UpperCamelCase = ['examples/by_feature/local_sgd.py'] run_command(self._launch_args + testargs )
1
import argparse import os import re # All paths are set with the intent you should run this script from the root of the repo with the command # python utils/check_dummies.py __snake_case = '''src/diffusers''' # Matches is_xxx_available() __snake_case = re.compile(r'''is\_([a-z_]*)_available\(\)''') # Matches from xxx import bla __snake_case = re.compile(r'''\s+from\s+\S*\s+import\s+([^\(\s].*)\n''') __snake_case = ''' {0} = None ''' __snake_case = ''' class {0}(metaclass=DummyObject): _backends = {1} def __init__(self, *args, **kwargs): requires_backends(self, {1}) @classmethod def from_config(cls, *args, **kwargs): requires_backends(cls, {1}) @classmethod def from_pretrained(cls, *args, **kwargs): requires_backends(cls, {1}) ''' __snake_case = ''' def {0}(*args, **kwargs): requires_backends({0}, {1}) ''' def _A ( _lowercase ) -> int: """simple docstring""" __UpperCamelCase = _re_backend.findall(_lowercase ) if len(_lowercase ) == 0: return None return "_and_".join(_lowercase ) def _A ( ) -> Tuple: """simple docstring""" with open(os.path.join(_lowercase , '__init__.py' ) , 'r' , encoding='utf-8' , newline='\n' ) as f: __UpperCamelCase = f.readlines() # Get to the point we do the actual imports for type checking __UpperCamelCase = 0 __UpperCamelCase = {} # Go through the end of the file while line_index < len(_lowercase ): # If the line contains is_backend_available, we grab all objects associated with the `else` block __UpperCamelCase = find_backend(lines[line_index] ) if backend is not None: while not lines[line_index].startswith('else:' ): line_index += 1 line_index += 1 __UpperCamelCase = [] # Until we unindent, add backend objects to the list while line_index < len(_lowercase ) and len(lines[line_index] ) > 1: __UpperCamelCase = lines[line_index] __UpperCamelCase = _re_single_line_import.search(_lowercase ) if single_line_import_search is not None: objects.extend(single_line_import_search.groups()[0].split(', ' ) ) elif line.startswith(' ' * 8 ): objects.append(line[8:-2] ) line_index += 1 if len(_lowercase ) > 0: __UpperCamelCase = objects else: line_index += 1 return backend_specific_objects def _A ( _lowercase , _lowercase ) -> Union[str, Any]: """simple docstring""" if name.isupper(): return DUMMY_CONSTANT.format(_lowercase ) elif name.islower(): return DUMMY_FUNCTION.format(_lowercase , _lowercase ) else: return DUMMY_CLASS.format(_lowercase , _lowercase ) def _A ( _lowercase=None ) -> Optional[Any]: """simple docstring""" if backend_specific_objects is None: __UpperCamelCase = read_init() # For special correspondence backend to module name as used in the function requires_modulename __UpperCamelCase = {} for backend, objects in backend_specific_objects.items(): __UpperCamelCase = '[' + ', '.join(f'''"{b}"''' for b in backend.split('_and_' ) ) + ']' __UpperCamelCase = '# This file is autogenerated by the command `make fix-copies`, do not edit.\n' dummy_file += "from ..utils import DummyObject, requires_backends\n\n" dummy_file += "\n".join([create_dummy_object(_lowercase , _lowercase ) for o in objects] ) __UpperCamelCase = dummy_file return dummy_files def _A ( _lowercase=False ) -> List[str]: """simple docstring""" __UpperCamelCase = create_dummy_files() # For special correspondence backend to shortcut as used in utils/dummy_xxx_objects.py __UpperCamelCase = {'torch': 'pt'} # Locate actual dummy modules and read their content. __UpperCamelCase = os.path.join(_lowercase , 'utils' ) __UpperCamelCase = { backend: os.path.join(_lowercase , f'''dummy_{short_names.get(_lowercase , _lowercase )}_objects.py''' ) for backend in dummy_files.keys() } __UpperCamelCase = {} for backend, file_path in dummy_file_paths.items(): if os.path.isfile(_lowercase ): with open(_lowercase , 'r' , encoding='utf-8' , newline='\n' ) as f: __UpperCamelCase = f.read() else: __UpperCamelCase = '' for backend in dummy_files.keys(): if dummy_files[backend] != actual_dummies[backend]: if overwrite: print( f'''Updating diffusers.utils.dummy_{short_names.get(_lowercase , _lowercase )}_objects.py as the main ''' '__init__ has new objects.' ) with open(dummy_file_paths[backend] , 'w' , encoding='utf-8' , newline='\n' ) as f: f.write(dummy_files[backend] ) else: raise ValueError( 'The main __init__ has objects that are not present in ' f'''diffusers.utils.dummy_{short_names.get(_lowercase , _lowercase )}_objects.py. Run `make fix-copies` ''' 'to fix this.' ) if __name__ == "__main__": __snake_case = argparse.ArgumentParser() parser.add_argument('''--fix_and_overwrite''', action='''store_true''', help='''Whether to fix inconsistencies.''') __snake_case = parser.parse_args() check_dummies(args.fix_and_overwrite)
1
1
from collections import OrderedDict from typing import Mapping from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging __snake_case = logging.get_logger(__name__) __snake_case = { '''facebook/levit-128S''': '''https://huggingface.co/facebook/levit-128S/resolve/main/config.json''', # See all LeViT models at https://huggingface.co/models?filter=levit } class __lowerCamelCase (_a ): _lowercase = """levit""" def __init__( self: List[str],A_: Dict=224,A_: Dict=3,A_: Any=3,A_: Any=2,A_: Any=1,A_: Dict=16,A_: str=[128, 256, 384],A_: Any=[4, 8, 12],A_: List[str]=[4, 4, 4],A_: List[Any]=[16, 16, 16],A_: Union[str, Any]=0,A_: Optional[Any]=[2, 2, 2],A_: Union[str, Any]=[2, 2, 2],A_: str=0.0_2,**A_: Any,): '''simple docstring''' super().__init__(**A_ ) __UpperCamelCase = image_size __UpperCamelCase = num_channels __UpperCamelCase = kernel_size __UpperCamelCase = stride __UpperCamelCase = padding __UpperCamelCase = hidden_sizes __UpperCamelCase = num_attention_heads __UpperCamelCase = depths __UpperCamelCase = key_dim __UpperCamelCase = drop_path_rate __UpperCamelCase = patch_size __UpperCamelCase = attention_ratio __UpperCamelCase = mlp_ratio __UpperCamelCase = initializer_range __UpperCamelCase = [ ['Subsample', key_dim[0], hidden_sizes[0] // key_dim[0], 4, 2, 2], ['Subsample', key_dim[0], hidden_sizes[1] // key_dim[0], 4, 2, 2], ] class __lowerCamelCase (_a ): _lowercase = version.parse("""1.11""" ) @property def snake_case_ ( self: int ): '''simple docstring''' return OrderedDict( [ ('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}), ] ) @property def snake_case_ ( self: Any ): '''simple docstring''' return 1E-4
1
import string def _A ( _lowercase ) -> None: """simple docstring""" for key in range(len(string.ascii_uppercase ) ): __UpperCamelCase = '' for symbol in message: if symbol in string.ascii_uppercase: __UpperCamelCase = string.ascii_uppercase.find(_lowercase ) __UpperCamelCase = num - key if num < 0: __UpperCamelCase = num + len(string.ascii_uppercase ) __UpperCamelCase = translated + string.ascii_uppercase[num] else: __UpperCamelCase = translated + symbol print(f'''Decryption using Key #{key}: {translated}''' ) def _A ( ) -> None: """simple docstring""" __UpperCamelCase = input('Encrypted message: ' ) __UpperCamelCase = message.upper() decrypt(_lowercase ) if __name__ == "__main__": import doctest doctest.testmod() main()
1
1
import random import unittest import numpy as np from diffusers import ( DPMSolverMultistepScheduler, EulerAncestralDiscreteScheduler, EulerDiscreteScheduler, LMSDiscreteScheduler, OnnxStableDiffusionImgaImgPipeline, PNDMScheduler, ) from diffusers.utils import floats_tensor from diffusers.utils.testing_utils import ( is_onnx_available, load_image, nightly, require_onnxruntime, require_torch_gpu, ) from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin if is_onnx_available(): import onnxruntime as ort class __lowerCamelCase (_a , unittest.TestCase ): _lowercase = """hf-internal-testing/tiny-random-OnnxStableDiffusionPipeline""" def snake_case_ ( self: int,A_: Tuple=0 ): '''simple docstring''' __UpperCamelCase = floats_tensor((1, 3, 128, 128),rng=random.Random(A_ ) ) __UpperCamelCase = np.random.RandomState(A_ ) __UpperCamelCase = { 'prompt': 'A painting of a squirrel eating a burger', 'image': image, 'generator': generator, 'num_inference_steps': 3, 'strength': 0.7_5, 'guidance_scale': 7.5, 'output_type': 'numpy', } return inputs def snake_case_ ( self: Union[str, Any] ): '''simple docstring''' __UpperCamelCase = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint,provider='CPUExecutionProvider' ) pipe.set_progress_bar_config(disable=A_ ) __UpperCamelCase = self.get_dummy_inputs() __UpperCamelCase = pipe(**A_ ).images __UpperCamelCase = image[0, -3:, -3:, -1].flatten() assert image.shape == (1, 128, 128, 3) __UpperCamelCase = np.array([0.6_9_6_4_3, 0.5_8_4_8_4, 0.5_0_3_1_4, 0.5_8_7_6_0, 0.5_5_3_6_8, 0.5_9_6_4_3, 0.5_1_5_2_9, 0.4_1_2_1_7, 0.4_9_0_8_7] ) assert np.abs(image_slice - expected_slice ).max() < 1E-1 def snake_case_ ( self: Tuple ): '''simple docstring''' __UpperCamelCase = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint,provider='CPUExecutionProvider' ) __UpperCamelCase = PNDMScheduler.from_config(pipe.scheduler.config,skip_prk_steps=A_ ) pipe.set_progress_bar_config(disable=A_ ) __UpperCamelCase = self.get_dummy_inputs() __UpperCamelCase = pipe(**A_ ).images __UpperCamelCase = image[0, -3:, -3:, -1] assert image.shape == (1, 128, 128, 3) __UpperCamelCase = np.array([0.6_1_7_3_7, 0.5_4_6_4_2, 0.5_3_1_8_3, 0.5_4_4_6_5, 0.5_2_7_4_2, 0.6_0_5_2_5, 0.4_9_9_6_9, 0.4_0_6_5_5, 0.4_8_1_5_4] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1 def snake_case_ ( self: List[str] ): '''simple docstring''' __UpperCamelCase = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint,provider='CPUExecutionProvider' ) __UpperCamelCase = LMSDiscreteScheduler.from_config(pipe.scheduler.config ) pipe.set_progress_bar_config(disable=A_ ) # warmup pass to apply optimizations __UpperCamelCase = pipe(**self.get_dummy_inputs() ) __UpperCamelCase = self.get_dummy_inputs() __UpperCamelCase = pipe(**A_ ).images __UpperCamelCase = image[0, -3:, -3:, -1] assert image.shape == (1, 128, 128, 3) __UpperCamelCase = np.array([0.5_2_7_6_1, 0.5_9_9_7_7, 0.4_9_0_3_3, 0.4_9_6_1_9, 0.5_4_2_8_2, 0.5_0_3_1_1, 0.4_7_6_0_0, 0.4_0_9_1_8, 0.4_5_2_0_3] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1 def snake_case_ ( self: Optional[Any] ): '''simple docstring''' __UpperCamelCase = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint,provider='CPUExecutionProvider' ) __UpperCamelCase = EulerDiscreteScheduler.from_config(pipe.scheduler.config ) pipe.set_progress_bar_config(disable=A_ ) __UpperCamelCase = self.get_dummy_inputs() __UpperCamelCase = pipe(**A_ ).images __UpperCamelCase = image[0, -3:, -3:, -1] assert image.shape == (1, 128, 128, 3) __UpperCamelCase = np.array([0.5_2_9_1_1, 0.6_0_0_0_4, 0.4_9_2_2_9, 0.4_9_8_0_5, 0.5_4_5_0_2, 0.5_0_6_8_0, 0.4_7_7_7_7, 0.4_1_0_2_8, 0.4_5_3_0_4] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1 def snake_case_ ( self: List[Any] ): '''simple docstring''' __UpperCamelCase = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint,provider='CPUExecutionProvider' ) __UpperCamelCase = EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config ) pipe.set_progress_bar_config(disable=A_ ) __UpperCamelCase = self.get_dummy_inputs() __UpperCamelCase = pipe(**A_ ).images __UpperCamelCase = image[0, -3:, -3:, -1] assert image.shape == (1, 128, 128, 3) __UpperCamelCase = np.array([0.5_2_9_1_1, 0.6_0_0_0_4, 0.4_9_2_2_9, 0.4_9_8_0_5, 0.5_4_5_0_2, 0.5_0_6_8_0, 0.4_7_7_7_7, 0.4_1_0_2_8, 0.4_5_3_0_4] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1 def snake_case_ ( self: str ): '''simple docstring''' __UpperCamelCase = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint,provider='CPUExecutionProvider' ) __UpperCamelCase = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config ) pipe.set_progress_bar_config(disable=A_ ) __UpperCamelCase = self.get_dummy_inputs() __UpperCamelCase = pipe(**A_ ).images __UpperCamelCase = image[0, -3:, -3:, -1] assert image.shape == (1, 128, 128, 3) __UpperCamelCase = np.array([0.6_5_3_3_1, 0.5_8_2_7_7, 0.4_8_2_0_4, 0.5_6_0_5_9, 0.5_3_6_6_5, 0.5_6_2_3_5, 0.5_0_9_6_9, 0.4_0_0_0_9, 0.4_6_5_5_2] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1 @nightly @require_onnxruntime @require_torch_gpu class __lowerCamelCase (unittest.TestCase ): @property def snake_case_ ( self: Any ): '''simple docstring''' return ( "CUDAExecutionProvider", { "gpu_mem_limit": "15000000000", # 15GB "arena_extend_strategy": "kSameAsRequested", }, ) @property def snake_case_ ( self: Dict ): '''simple docstring''' __UpperCamelCase = ort.SessionOptions() __UpperCamelCase = False return options def snake_case_ ( self: Dict ): '''simple docstring''' __UpperCamelCase = load_image( 'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main' '/img2img/sketch-mountains-input.jpg' ) __UpperCamelCase = init_image.resize((768, 512) ) # using the PNDM scheduler by default __UpperCamelCase = OnnxStableDiffusionImgaImgPipeline.from_pretrained( 'CompVis/stable-diffusion-v1-4',revision='onnx',safety_checker=A_,feature_extractor=A_,provider=self.gpu_provider,sess_options=self.gpu_options,) pipe.set_progress_bar_config(disable=A_ ) __UpperCamelCase = 'A fantasy landscape, trending on artstation' __UpperCamelCase = np.random.RandomState(0 ) __UpperCamelCase = pipe( prompt=A_,image=A_,strength=0.7_5,guidance_scale=7.5,num_inference_steps=10,generator=A_,output_type='np',) __UpperCamelCase = output.images __UpperCamelCase = images[0, 255:258, 383:386, -1] assert images.shape == (1, 512, 768, 3) __UpperCamelCase = np.array([0.4_9_0_9, 0.5_0_5_9, 0.5_3_7_2, 0.4_6_2_3, 0.4_8_7_6, 0.5_0_4_9, 0.4_8_2_0, 0.4_9_5_6, 0.5_0_1_9] ) # TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues assert np.abs(image_slice.flatten() - expected_slice ).max() < 2E-2 def snake_case_ ( self: List[str] ): '''simple docstring''' __UpperCamelCase = load_image( 'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main' '/img2img/sketch-mountains-input.jpg' ) __UpperCamelCase = init_image.resize((768, 512) ) __UpperCamelCase = LMSDiscreteScheduler.from_pretrained( 'runwayml/stable-diffusion-v1-5',subfolder='scheduler',revision='onnx' ) __UpperCamelCase = OnnxStableDiffusionImgaImgPipeline.from_pretrained( 'runwayml/stable-diffusion-v1-5',revision='onnx',scheduler=A_,safety_checker=A_,feature_extractor=A_,provider=self.gpu_provider,sess_options=self.gpu_options,) pipe.set_progress_bar_config(disable=A_ ) __UpperCamelCase = 'A fantasy landscape, trending on artstation' __UpperCamelCase = np.random.RandomState(0 ) __UpperCamelCase = pipe( prompt=A_,image=A_,strength=0.7_5,guidance_scale=7.5,num_inference_steps=20,generator=A_,output_type='np',) __UpperCamelCase = output.images __UpperCamelCase = images[0, 255:258, 383:386, -1] assert images.shape == (1, 512, 768, 3) __UpperCamelCase = np.array([0.8_0_4_3, 0.9_2_6, 0.9_5_8_1, 0.8_1_1_9, 0.8_9_5_4, 0.9_1_3, 0.7_2_0_9, 0.7_4_6_3, 0.7_4_3_1] ) # TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues assert np.abs(image_slice.flatten() - expected_slice ).max() < 2E-2
1
import gc import random import unittest import numpy as np import torch from PIL import Image from transformers import XLMRobertaTokenizerFast from diffusers import DDIMScheduler, KandinskyInpaintPipeline, KandinskyPriorPipeline, UNetaDConditionModel, VQModel from diffusers.pipelines.kandinsky.text_encoder import MCLIPConfig, MultilingualCLIP from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference enable_full_determinism() class __lowerCamelCase (_a , unittest.TestCase ): _lowercase = KandinskyInpaintPipeline _lowercase = ["""prompt""", """image_embeds""", """negative_image_embeds""", """image""", """mask_image"""] _lowercase = [ """prompt""", """negative_prompt""", """image_embeds""", """negative_image_embeds""", """image""", """mask_image""", ] _lowercase = [ """generator""", """height""", """width""", """latents""", """guidance_scale""", """negative_prompt""", """num_inference_steps""", """return_dict""", """guidance_scale""", """num_images_per_prompt""", """output_type""", """return_dict""", ] _lowercase = False @property def snake_case_ ( self: int ): '''simple docstring''' return 32 @property def snake_case_ ( self: str ): '''simple docstring''' return 32 @property def snake_case_ ( self: Tuple ): '''simple docstring''' return self.time_input_dim @property def snake_case_ ( self: Union[str, Any] ): '''simple docstring''' return self.time_input_dim * 4 @property def snake_case_ ( self: Optional[int] ): '''simple docstring''' return 100 @property def snake_case_ ( self: str ): '''simple docstring''' __UpperCamelCase = XLMRobertaTokenizerFast.from_pretrained('YiYiXu/tiny-random-mclip-base' ) return tokenizer @property def snake_case_ ( self: Any ): '''simple docstring''' torch.manual_seed(0 ) __UpperCamelCase = MCLIPConfig( numDims=self.cross_attention_dim,transformerDimensions=self.text_embedder_hidden_size,hidden_size=self.text_embedder_hidden_size,intermediate_size=37,num_attention_heads=4,num_hidden_layers=5,vocab_size=1005,) __UpperCamelCase = MultilingualCLIP(A_ ) __UpperCamelCase = text_encoder.eval() return text_encoder @property def snake_case_ ( self: Any ): '''simple docstring''' torch.manual_seed(0 ) __UpperCamelCase = { 'in_channels': 9, # Out channels is double in channels because predicts mean and variance 'out_channels': 8, 'addition_embed_type': 'text_image', 'down_block_types': ('ResnetDownsampleBlock2D', 'SimpleCrossAttnDownBlock2D'), 'up_block_types': ('SimpleCrossAttnUpBlock2D', 'ResnetUpsampleBlock2D'), 'mid_block_type': 'UNetMidBlock2DSimpleCrossAttn', 'block_out_channels': (self.block_out_channels_a, self.block_out_channels_a * 2), 'layers_per_block': 1, 'encoder_hid_dim': self.text_embedder_hidden_size, 'encoder_hid_dim_type': 'text_image_proj', 'cross_attention_dim': self.cross_attention_dim, 'attention_head_dim': 4, 'resnet_time_scale_shift': 'scale_shift', 'class_embed_type': None, } __UpperCamelCase = UNetaDConditionModel(**A_ ) return model @property def snake_case_ ( self: str ): '''simple docstring''' return { "block_out_channels": [32, 64], "down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"], "in_channels": 3, "latent_channels": 4, "layers_per_block": 1, "norm_num_groups": 8, "norm_type": "spatial", "num_vq_embeddings": 12, "out_channels": 3, "up_block_types": [ "AttnUpDecoderBlock2D", "UpDecoderBlock2D", ], "vq_embed_dim": 4, } @property def snake_case_ ( self: str ): '''simple docstring''' torch.manual_seed(0 ) __UpperCamelCase = VQModel(**self.dummy_movq_kwargs ) return model def snake_case_ ( self: Dict ): '''simple docstring''' __UpperCamelCase = self.dummy_text_encoder __UpperCamelCase = self.dummy_tokenizer __UpperCamelCase = self.dummy_unet __UpperCamelCase = self.dummy_movq __UpperCamelCase = DDIMScheduler( num_train_timesteps=1000,beta_schedule='linear',beta_start=0.0_0_0_8_5,beta_end=0.0_1_2,clip_sample=A_,set_alpha_to_one=A_,steps_offset=1,prediction_type='epsilon',thresholding=A_,) __UpperCamelCase = { 'text_encoder': text_encoder, 'tokenizer': tokenizer, 'unet': unet, 'scheduler': scheduler, 'movq': movq, } return components def snake_case_ ( self: Tuple,A_: Optional[int],A_: Dict=0 ): '''simple docstring''' __UpperCamelCase = floats_tensor((1, self.cross_attention_dim),rng=random.Random(A_ ) ).to(A_ ) __UpperCamelCase = floats_tensor((1, self.cross_attention_dim),rng=random.Random(seed + 1 ) ).to(A_ ) # create init_image __UpperCamelCase = floats_tensor((1, 3, 64, 64),rng=random.Random(A_ ) ).to(A_ ) __UpperCamelCase = image.cpu().permute(0,2,3,1 )[0] __UpperCamelCase = Image.fromarray(np.uinta(A_ ) ).convert('RGB' ).resize((256, 256) ) # create mask __UpperCamelCase = np.ones((64, 64),dtype=np.floataa ) __UpperCamelCase = 0 if str(A_ ).startswith('mps' ): __UpperCamelCase = torch.manual_seed(A_ ) else: __UpperCamelCase = torch.Generator(device=A_ ).manual_seed(A_ ) __UpperCamelCase = { 'prompt': 'horse', 'image': init_image, 'mask_image': mask, 'image_embeds': image_embeds, 'negative_image_embeds': negative_image_embeds, 'generator': generator, 'height': 64, 'width': 64, 'num_inference_steps': 2, 'guidance_scale': 4.0, 'output_type': 'np', } return inputs def snake_case_ ( self: Any ): '''simple docstring''' __UpperCamelCase = 'cpu' __UpperCamelCase = self.get_dummy_components() __UpperCamelCase = self.pipeline_class(**A_ ) __UpperCamelCase = pipe.to(A_ ) pipe.set_progress_bar_config(disable=A_ ) __UpperCamelCase = pipe(**self.get_dummy_inputs(A_ ) ) __UpperCamelCase = output.images __UpperCamelCase = pipe( **self.get_dummy_inputs(A_ ),return_dict=A_,)[0] __UpperCamelCase = image[0, -3:, -3:, -1] __UpperCamelCase = image_from_tuple[0, -3:, -3:, -1] print(F'''image.shape {image.shape}''' ) assert image.shape == (1, 64, 64, 3) __UpperCamelCase = np.array( [0.8_3_2_6_9_1_9, 0.7_3_7_9_0_4_6_7, 0.2_0_9_1_8_5_8_1, 0.9_3_0_9_6_1_2, 0.5_5_1_1_7_9_1, 0.4_3_7_1_3_3_2_8, 0.5_5_1_3_3_2_1, 0.4_9_9_2_2_9_3_4, 0.5_9_4_9_7_7_8_6] ) assert ( np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 ), F''' expected_slice {expected_slice}, but got {image_slice.flatten()}''' assert ( np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2 ), F''' expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}''' def snake_case_ ( self: Optional[Any] ): '''simple docstring''' super().test_inference_batch_single_identical(expected_max_diff=3E-3 ) @slow @require_torch_gpu class __lowerCamelCase (unittest.TestCase ): def snake_case_ ( self: Tuple ): '''simple docstring''' super().tearDown() gc.collect() torch.cuda.empty_cache() def snake_case_ ( self: Any ): '''simple docstring''' __UpperCamelCase = load_numpy( 'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main' '/kandinsky/kandinsky_inpaint_cat_with_hat_fp16.npy' ) __UpperCamelCase = load_image( 'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main' '/kandinsky/cat.png' ) __UpperCamelCase = np.ones((768, 768),dtype=np.floataa ) __UpperCamelCase = 0 __UpperCamelCase = 'a hat' __UpperCamelCase = KandinskyPriorPipeline.from_pretrained( 'kandinsky-community/kandinsky-2-1-prior',torch_dtype=torch.floataa ) pipe_prior.to(A_ ) __UpperCamelCase = KandinskyInpaintPipeline.from_pretrained( 'kandinsky-community/kandinsky-2-1-inpaint',torch_dtype=torch.floataa ) __UpperCamelCase = pipeline.to(A_ ) pipeline.set_progress_bar_config(disable=A_ ) __UpperCamelCase = torch.Generator(device='cpu' ).manual_seed(0 ) __UpperCamelCase, __UpperCamelCase = pipe_prior( A_,generator=A_,num_inference_steps=5,negative_prompt='',).to_tuple() __UpperCamelCase = pipeline( A_,image=A_,mask_image=A_,image_embeds=A_,negative_image_embeds=A_,generator=A_,num_inference_steps=100,height=768,width=768,output_type='np',) __UpperCamelCase = output.images[0] assert image.shape == (768, 768, 3) assert_mean_pixel_difference(A_,A_ )
1
1
from __future__ import annotations def _A ( _lowercase , _lowercase , _lowercase ) -> int | float: """simple docstring""" if len(_lowercase ) == 0: raise ValueError('find_max() arg is an empty sequence' ) if ( left >= len(_lowercase ) or left < -len(_lowercase ) or right >= len(_lowercase ) or right < -len(_lowercase ) ): raise IndexError('list index out of range' ) if left == right: return nums[left] __UpperCamelCase = (left + right) >> 1 # the middle __UpperCamelCase = find_max(_lowercase , _lowercase , _lowercase ) # find max in range[left, mid] __UpperCamelCase = find_max(_lowercase , mid + 1 , _lowercase ) # find max in range[mid + 1, right] return left_max if left_max >= right_max else right_max if __name__ == "__main__": import doctest doctest.testmod(verbose=True)
1
from typing import Any class __lowerCamelCase : def __init__( self: int,A_: Any ): '''simple docstring''' __UpperCamelCase = data __UpperCamelCase = None def __repr__( self: Any ): '''simple docstring''' return F'''Node({self.data})''' class __lowerCamelCase : def __init__( self: Union[str, Any] ): '''simple docstring''' __UpperCamelCase = None def __iter__( self: int ): '''simple docstring''' __UpperCamelCase = self.head while node: yield node.data __UpperCamelCase = node.next def __len__( self: List[str] ): '''simple docstring''' return sum(1 for _ in self ) def __repr__( self: Any ): '''simple docstring''' return "->".join([str(A_ ) for item in self] ) def __getitem__( self: int,A_: int ): '''simple docstring''' if not 0 <= index < len(self ): raise ValueError('list index out of range.' ) for i, node in enumerate(self ): if i == index: return node return None def __setitem__( self: int,A_: int,A_: Any ): '''simple docstring''' if not 0 <= index < len(self ): raise ValueError('list index out of range.' ) __UpperCamelCase = self.head for _ in range(A_ ): __UpperCamelCase = current.next __UpperCamelCase = data def snake_case_ ( self: Union[str, Any],A_: Any ): '''simple docstring''' self.insert_nth(len(self ),A_ ) def snake_case_ ( self: List[Any],A_: Any ): '''simple docstring''' self.insert_nth(0,A_ ) def snake_case_ ( self: Optional[Any],A_: int,A_: Any ): '''simple docstring''' if not 0 <= index <= len(self ): raise IndexError('list index out of range' ) __UpperCamelCase = Node(A_ ) if self.head is None: __UpperCamelCase = new_node elif index == 0: __UpperCamelCase = self.head # link new_node to head __UpperCamelCase = new_node else: __UpperCamelCase = self.head for _ in range(index - 1 ): __UpperCamelCase = temp.next __UpperCamelCase = temp.next __UpperCamelCase = new_node def snake_case_ ( self: str ): # print every node data '''simple docstring''' print(self ) def snake_case_ ( self: int ): '''simple docstring''' return self.delete_nth(0 ) def snake_case_ ( self: str ): # delete from tail '''simple docstring''' return self.delete_nth(len(self ) - 1 ) def snake_case_ ( self: Any,A_: int = 0 ): '''simple docstring''' if not 0 <= index <= len(self ) - 1: # test if index is valid raise IndexError('List index out of range.' ) __UpperCamelCase = self.head # default first node if index == 0: __UpperCamelCase = self.head.next else: __UpperCamelCase = self.head for _ in range(index - 1 ): __UpperCamelCase = temp.next __UpperCamelCase = temp.next __UpperCamelCase = temp.next.next return delete_node.data def snake_case_ ( self: Any ): '''simple docstring''' return self.head is None def snake_case_ ( self: Optional[int] ): '''simple docstring''' __UpperCamelCase = None __UpperCamelCase = self.head while current: # Store the current node's next node. __UpperCamelCase = current.next # Make the current node's next point backwards __UpperCamelCase = prev # Make the previous node be the current node __UpperCamelCase = current # Make the current node the next node (to progress iteration) __UpperCamelCase = next_node # Return prev in order to put the head at the end __UpperCamelCase = prev def _A ( ) -> None: """simple docstring""" __UpperCamelCase = LinkedList() assert linked_list.is_empty() is True assert str(_lowercase ) == "" try: linked_list.delete_head() raise AssertionError # This should not happen. except IndexError: assert True # This should happen. try: linked_list.delete_tail() raise AssertionError # This should not happen. except IndexError: assert True # This should happen. for i in range(10 ): assert len(_lowercase ) == i linked_list.insert_nth(_lowercase , i + 1 ) assert str(_lowercase ) == "->".join(str(_lowercase ) for i in range(1 , 11 ) ) linked_list.insert_head(0 ) linked_list.insert_tail(11 ) assert str(_lowercase ) == "->".join(str(_lowercase ) for i in range(0 , 12 ) ) assert linked_list.delete_head() == 0 assert linked_list.delete_nth(9 ) == 10 assert linked_list.delete_tail() == 11 assert len(_lowercase ) == 9 assert str(_lowercase ) == "->".join(str(_lowercase ) for i in range(1 , 10 ) ) assert all(linked_list[i] == i + 1 for i in range(0 , 9 ) ) is True for i in range(0 , 9 ): __UpperCamelCase = -i assert all(linked_list[i] == -i for i in range(0 , 9 ) ) is True linked_list.reverse() assert str(_lowercase ) == "->".join(str(_lowercase ) for i in range(-8 , 1 ) ) def _A ( ) -> None: """simple docstring""" __UpperCamelCase = [ -9, 1_00, Node(77_34_51_12 ), 'dlrow olleH', 7, 55_55, 0, -1_92.5_55_55, 'Hello, world!', 77.9, Node(10 ), None, None, 12.20, ] __UpperCamelCase = LinkedList() for i in test_input: linked_list.insert_tail(_lowercase ) # Check if it's empty or not assert linked_list.is_empty() is False assert ( str(_lowercase ) == "-9->100->Node(77345112)->dlrow olleH->7->5555->0->" "-192.55555->Hello, world!->77.9->Node(10)->None->None->12.2" ) # Delete the head __UpperCamelCase = linked_list.delete_head() assert result == -9 assert ( str(_lowercase ) == "100->Node(77345112)->dlrow olleH->7->5555->0->-192.55555->" "Hello, world!->77.9->Node(10)->None->None->12.2" ) # Delete the tail __UpperCamelCase = linked_list.delete_tail() assert result == 12.2 assert ( str(_lowercase ) == "100->Node(77345112)->dlrow olleH->7->5555->0->-192.55555->" "Hello, world!->77.9->Node(10)->None->None" ) # Delete a node in specific location in linked list __UpperCamelCase = linked_list.delete_nth(10 ) assert result is None assert ( str(_lowercase ) == "100->Node(77345112)->dlrow olleH->7->5555->0->-192.55555->" "Hello, world!->77.9->Node(10)->None" ) # Add a Node instance to its head linked_list.insert_head(Node('Hello again, world!' ) ) assert ( str(_lowercase ) == "Node(Hello again, world!)->100->Node(77345112)->dlrow olleH->" "7->5555->0->-192.55555->Hello, world!->77.9->Node(10)->None" ) # Add None to its tail linked_list.insert_tail(_lowercase ) assert ( str(_lowercase ) == "Node(Hello again, world!)->100->Node(77345112)->dlrow olleH->" "7->5555->0->-192.55555->Hello, world!->77.9->Node(10)->None->None" ) # Reverse the linked list linked_list.reverse() assert ( str(_lowercase ) == "None->None->Node(10)->77.9->Hello, world!->-192.55555->0->5555->" "7->dlrow olleH->Node(77345112)->100->Node(Hello again, world!)" ) def _A ( ) -> List[str]: """simple docstring""" from doctest import testmod testmod() __UpperCamelCase = LinkedList() linked_list.insert_head(input('Inserting 1st at head ' ).strip() ) linked_list.insert_head(input('Inserting 2nd at head ' ).strip() ) print('\nPrint list:' ) linked_list.print_list() linked_list.insert_tail(input('\nInserting 1st at tail ' ).strip() ) linked_list.insert_tail(input('Inserting 2nd at tail ' ).strip() ) print('\nPrint list:' ) linked_list.print_list() print('\nDelete head' ) linked_list.delete_head() print('Delete tail' ) linked_list.delete_tail() print('\nPrint list:' ) linked_list.print_list() print('\nReverse linked list' ) linked_list.reverse() print('\nPrint list:' ) linked_list.print_list() print('\nString representation of linked list:' ) print(_lowercase ) print('\nReading/changing Node data using indexing:' ) print(f'''Element at Position 1: {linked_list[1]}''' ) __UpperCamelCase = input('Enter New Value: ' ).strip() print('New list:' ) print(_lowercase ) print(f'''length of linked_list is : {len(_lowercase )}''' ) if __name__ == "__main__": main()
1
1
from collections import OrderedDict from typing import Mapping from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging __snake_case = logging.get_logger(__name__) __snake_case = { '''xlm-roberta-base''': '''https://huggingface.co/xlm-roberta-base/resolve/main/config.json''', '''xlm-roberta-large''': '''https://huggingface.co/xlm-roberta-large/resolve/main/config.json''', '''xlm-roberta-large-finetuned-conll02-dutch''': ( '''https://huggingface.co/xlm-roberta-large-finetuned-conll02-dutch/resolve/main/config.json''' ), '''xlm-roberta-large-finetuned-conll02-spanish''': ( '''https://huggingface.co/xlm-roberta-large-finetuned-conll02-spanish/resolve/main/config.json''' ), '''xlm-roberta-large-finetuned-conll03-english''': ( '''https://huggingface.co/xlm-roberta-large-finetuned-conll03-english/resolve/main/config.json''' ), '''xlm-roberta-large-finetuned-conll03-german''': ( '''https://huggingface.co/xlm-roberta-large-finetuned-conll03-german/resolve/main/config.json''' ), } class __lowerCamelCase (_a ): _lowercase = """xlm-roberta""" def __init__( self: int,A_: Any=3_0522,A_: Any=768,A_: Tuple=12,A_: Any=12,A_: Optional[int]=3072,A_: List[Any]="gelu",A_: Union[str, Any]=0.1,A_: Optional[Any]=0.1,A_: Union[str, Any]=512,A_: Union[str, Any]=2,A_: Any=0.0_2,A_: int=1E-12,A_: List[Any]=1,A_: List[Any]=0,A_: int=2,A_: Any="absolute",A_: int=True,A_: Any=None,**A_: int,): '''simple docstring''' super().__init__(pad_token_id=A_,bos_token_id=A_,eos_token_id=A_,**A_ ) __UpperCamelCase = vocab_size __UpperCamelCase = hidden_size __UpperCamelCase = num_hidden_layers __UpperCamelCase = num_attention_heads __UpperCamelCase = hidden_act __UpperCamelCase = intermediate_size __UpperCamelCase = hidden_dropout_prob __UpperCamelCase = attention_probs_dropout_prob __UpperCamelCase = max_position_embeddings __UpperCamelCase = type_vocab_size __UpperCamelCase = initializer_range __UpperCamelCase = layer_norm_eps __UpperCamelCase = position_embedding_type __UpperCamelCase = use_cache __UpperCamelCase = classifier_dropout class __lowerCamelCase (_a ): @property def snake_case_ ( self: Optional[int] ): '''simple docstring''' if self.task == "multiple-choice": __UpperCamelCase = {0: 'batch', 1: 'choice', 2: 'sequence'} else: __UpperCamelCase = {0: 'batch', 1: 'sequence'} return OrderedDict( [ ('input_ids', dynamic_axis), ('attention_mask', dynamic_axis), ] )
1
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_torch_available, ) __snake_case = {'''configuration_unispeech''': ['''UNISPEECH_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''UniSpeechConfig''']} try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __snake_case = [ '''UNISPEECH_PRETRAINED_MODEL_ARCHIVE_LIST''', '''UniSpeechForCTC''', '''UniSpeechForPreTraining''', '''UniSpeechForSequenceClassification''', '''UniSpeechModel''', '''UniSpeechPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_unispeech import UNISPEECH_PRETRAINED_CONFIG_ARCHIVE_MAP, UniSpeechConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_unispeech import ( UNISPEECH_PRETRAINED_MODEL_ARCHIVE_LIST, UniSpeechForCTC, UniSpeechForPreTraining, UniSpeechForSequenceClassification, UniSpeechModel, UniSpeechPreTrainedModel, ) else: import sys __snake_case = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
1
1
def _A ( ) -> List[str]: """simple docstring""" __UpperCamelCase = 0 for i in range(1 , 10_01 ): total += i**i return str(_lowercase )[-10:] if __name__ == "__main__": print(solution())
1
__snake_case = { '''a''': '''AAAAA''', '''b''': '''AAAAB''', '''c''': '''AAABA''', '''d''': '''AAABB''', '''e''': '''AABAA''', '''f''': '''AABAB''', '''g''': '''AABBA''', '''h''': '''AABBB''', '''i''': '''ABAAA''', '''j''': '''BBBAA''', '''k''': '''ABAAB''', '''l''': '''ABABA''', '''m''': '''ABABB''', '''n''': '''ABBAA''', '''o''': '''ABBAB''', '''p''': '''ABBBA''', '''q''': '''ABBBB''', '''r''': '''BAAAA''', '''s''': '''BAAAB''', '''t''': '''BAABA''', '''u''': '''BAABB''', '''v''': '''BBBAB''', '''w''': '''BABAA''', '''x''': '''BABAB''', '''y''': '''BABBA''', '''z''': '''BABBB''', ''' ''': ''' ''', } __snake_case = {value: key for key, value in encode_dict.items()} def _A ( _lowercase ) -> str: """simple docstring""" __UpperCamelCase = '' for letter in word.lower(): if letter.isalpha() or letter == " ": encoded += encode_dict[letter] else: raise Exception('encode() accepts only letters of the alphabet and spaces' ) return encoded def _A ( _lowercase ) -> str: """simple docstring""" if set(_lowercase ) - {"A", "B", " "} != set(): raise Exception('decode() accepts only \'A\', \'B\' and spaces' ) __UpperCamelCase = '' for word in coded.split(): while len(_lowercase ) != 0: decoded += decode_dict[word[:5]] __UpperCamelCase = word[5:] decoded += " " return decoded.strip() if __name__ == "__main__": from doctest import testmod testmod()
1
1
import argparse import json import os import evaluate import torch from datasets import load_dataset from torch.optim import AdamW from torch.utils.data import DataLoader from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed from accelerate import Accelerator, DistributedType from accelerate.utils.deepspeed import DummyOptim, DummyScheduler __snake_case = 1_6 __snake_case = 3_2 def _A ( _lowercase , _lowercase = 16 , _lowercase = "bert-base-cased" ) -> Union[str, Any]: """simple docstring""" __UpperCamelCase = AutoTokenizer.from_pretrained(_lowercase ) __UpperCamelCase = load_dataset('glue' , 'mrpc' ) def tokenize_function(_lowercase ): # max_length=None => use the model max length (it's actually the default) __UpperCamelCase = tokenizer(examples['sentence1'] , examples['sentence2'] , truncation=_lowercase , max_length=_lowercase ) return outputs # Apply the method we just defined to all the examples in all the splits of the dataset __UpperCamelCase = datasets.map( _lowercase , batched=_lowercase , remove_columns=['idx', 'sentence1', 'sentence2'] , load_from_cache_file=_lowercase ) # We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the # transformers library __UpperCamelCase = tokenized_datasets.rename_column('label' , 'labels' ) def collate_fn(_lowercase ): # On TPU it's best to pad everything to the same length or training will be very slow. if accelerator.distributed_type == DistributedType.TPU: return tokenizer.pad(_lowercase , padding='max_length' , max_length=1_28 , return_tensors='pt' ) return tokenizer.pad(_lowercase , padding='longest' , return_tensors='pt' ) # Instantiate dataloaders. __UpperCamelCase = DataLoader( tokenized_datasets['train'] , shuffle=_lowercase , collate_fn=_lowercase , batch_size=_lowercase ) __UpperCamelCase = DataLoader( tokenized_datasets['validation'] , shuffle=_lowercase , collate_fn=_lowercase , batch_size=_lowercase ) return train_dataloader, eval_dataloader def _A ( _lowercase , _lowercase ) -> int: """simple docstring""" __UpperCamelCase = Accelerator() # Sample hyper-parameters for learning rate, batch size, seed and a few other HPs __UpperCamelCase = config['lr'] __UpperCamelCase = int(config['num_epochs'] ) __UpperCamelCase = int(config['seed'] ) __UpperCamelCase = int(config['batch_size'] ) __UpperCamelCase = args.model_name_or_path set_seed(_lowercase ) __UpperCamelCase, __UpperCamelCase = get_dataloaders(_lowercase , _lowercase , _lowercase ) # Instantiate the model (we build the model here so that the seed also control new weights initialization) __UpperCamelCase = AutoModelForSequenceClassification.from_pretrained(_lowercase , return_dict=_lowercase ) # Instantiate optimizer __UpperCamelCase = ( AdamW if accelerator.state.deepspeed_plugin is None or 'optimizer' not in accelerator.state.deepspeed_plugin.deepspeed_config else DummyOptim ) __UpperCamelCase = optimizer_cls(params=model.parameters() , lr=_lowercase ) if accelerator.state.deepspeed_plugin is not None: __UpperCamelCase = accelerator.state.deepspeed_plugin.deepspeed_config[ 'gradient_accumulation_steps' ] else: __UpperCamelCase = 1 __UpperCamelCase = (len(_lowercase ) * num_epochs) // gradient_accumulation_steps # Instantiate scheduler if ( accelerator.state.deepspeed_plugin is None or "scheduler" not in accelerator.state.deepspeed_plugin.deepspeed_config ): __UpperCamelCase = get_linear_schedule_with_warmup( optimizer=_lowercase , num_warmup_steps=0 , num_training_steps=_lowercase , ) else: __UpperCamelCase = DummyScheduler(_lowercase , total_num_steps=_lowercase , warmup_num_steps=0 ) # Prepare everything # There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the # prepare method. __UpperCamelCase, __UpperCamelCase, __UpperCamelCase, __UpperCamelCase, __UpperCamelCase = accelerator.prepare( _lowercase , _lowercase , _lowercase , _lowercase , _lowercase ) # We need to keep track of how many total steps we have iterated over __UpperCamelCase = 0 # We also need to keep track of the stating epoch so files are named properly __UpperCamelCase = 0 # Now we train the model __UpperCamelCase = evaluate.load('glue' , 'mrpc' ) __UpperCamelCase = 0 __UpperCamelCase = {} for epoch in range(_lowercase , _lowercase ): model.train() for step, batch in enumerate(_lowercase ): __UpperCamelCase = model(**_lowercase ) __UpperCamelCase = outputs.loss __UpperCamelCase = loss / gradient_accumulation_steps accelerator.backward(_lowercase ) if step % gradient_accumulation_steps == 0: optimizer.step() lr_scheduler.step() optimizer.zero_grad() overall_step += 1 model.eval() __UpperCamelCase = 0 for step, batch in enumerate(_lowercase ): # We could avoid this line since we set the accelerator with `device_placement=True`. batch.to(accelerator.device ) with torch.no_grad(): __UpperCamelCase = model(**_lowercase ) __UpperCamelCase = outputs.logits.argmax(dim=-1 ) # It is slightly faster to call this once, than multiple times __UpperCamelCase, __UpperCamelCase = accelerator.gather( (predictions, batch['labels']) ) # If we are in a multiprocess environment, the last batch has duplicates if accelerator.use_distributed: if step == len(_lowercase ) - 1: __UpperCamelCase = predictions[: len(eval_dataloader.dataset ) - samples_seen] __UpperCamelCase = references[: len(eval_dataloader.dataset ) - samples_seen] else: samples_seen += references.shape[0] metric.add_batch( predictions=_lowercase , references=_lowercase , ) __UpperCamelCase = metric.compute() # Use accelerator.print to print only on the main process. accelerator.print(f'''epoch {epoch}:''' , _lowercase ) __UpperCamelCase = eval_metric['accuracy'] if best_performance < eval_metric["accuracy"]: __UpperCamelCase = eval_metric['accuracy'] if args.performance_lower_bound is not None: assert ( args.performance_lower_bound <= best_performance ), f'''Best performance metric {best_performance} is lower than the lower bound {args.performance_lower_bound}''' accelerator.wait_for_everyone() if accelerator.is_main_process: with open(os.path.join(args.output_dir , 'all_results.json' ) , 'w' ) as f: json.dump(_lowercase , _lowercase ) def _A ( ) -> List[str]: """simple docstring""" __UpperCamelCase = argparse.ArgumentParser(description='Simple example of training script tracking peak GPU memory usage.' ) parser.add_argument( '--model_name_or_path' , type=_lowercase , default='bert-base-cased' , help='Path to pretrained model or model identifier from huggingface.co/models.' , required=_lowercase , ) parser.add_argument( '--output_dir' , type=_lowercase , default='.' , help='Optional save directory where all checkpoint folders will be stored. Default is the current working directory.' , ) parser.add_argument( '--performance_lower_bound' , type=_lowercase , default=_lowercase , help='Optional lower bound for the performance metric. If set, the training will throw error when the performance metric drops below this value.' , ) parser.add_argument( '--num_epochs' , type=_lowercase , default=3 , help='Number of train epochs.' , ) __UpperCamelCase = parser.parse_args() __UpperCamelCase = {'lr': 2e-5, 'num_epochs': args.num_epochs, 'seed': 42, 'batch_size': 16} training_function(_lowercase , _lowercase ) if __name__ == "__main__": main()
1
from collections.abc import Generator from math import sin def _A ( _lowercase ) -> bytes: """simple docstring""" if len(_lowercase ) != 32: raise ValueError('Input must be of length 32' ) __UpperCamelCase = B'' for i in [3, 2, 1, 0]: little_endian += string_aa[8 * i : 8 * i + 8] return little_endian def _A ( _lowercase ) -> bytes: """simple docstring""" if i < 0: raise ValueError('Input must be non-negative' ) __UpperCamelCase = format(_lowercase , '08x' )[-8:] __UpperCamelCase = B'' for i in [3, 2, 1, 0]: little_endian_hex += hex_rep[2 * i : 2 * i + 2].encode('utf-8' ) return little_endian_hex def _A ( _lowercase ) -> bytes: """simple docstring""" __UpperCamelCase = B'' for char in message: bit_string += format(_lowercase , '08b' ).encode('utf-8' ) __UpperCamelCase = format(len(_lowercase ) , '064b' ).encode('utf-8' ) # Pad bit_string to a multiple of 512 chars bit_string += b"1" while len(_lowercase ) % 5_12 != 4_48: bit_string += b"0" bit_string += to_little_endian(start_len[32:] ) + to_little_endian(start_len[:32] ) return bit_string def _A ( _lowercase ) -> Generator[list[int], None, None]: """simple docstring""" if len(_lowercase ) % 5_12 != 0: raise ValueError('Input must have length that\'s a multiple of 512' ) for pos in range(0 , len(_lowercase ) , 5_12 ): __UpperCamelCase = bit_string[pos : pos + 5_12] __UpperCamelCase = [] for i in range(0 , 5_12 , 32 ): block_words.append(int(to_little_endian(block[i : i + 32] ) , 2 ) ) yield block_words def _A ( _lowercase ) -> int: """simple docstring""" if i < 0: raise ValueError('Input must be non-negative' ) __UpperCamelCase = format(_lowercase , '032b' ) __UpperCamelCase = '' for c in i_str: new_str += "1" if c == "0" else "0" return int(_lowercase , 2 ) def _A ( _lowercase , _lowercase ) -> int: """simple docstring""" return (a + b) % 2**32 def _A ( _lowercase , _lowercase ) -> int: """simple docstring""" if i < 0: raise ValueError('Input must be non-negative' ) if shift < 0: raise ValueError('Shift must be non-negative' ) return ((i << shift) ^ (i >> (32 - shift))) % 2**32 def _A ( _lowercase ) -> bytes: """simple docstring""" __UpperCamelCase = preprocess(_lowercase ) __UpperCamelCase = [int(2**32 * abs(sin(i + 1 ) ) ) for i in range(64 )] # Starting states __UpperCamelCase = 0X67_45_23_01 __UpperCamelCase = 0Xef_cd_ab_89 __UpperCamelCase = 0X98_ba_dc_fe __UpperCamelCase = 0X10_32_54_76 __UpperCamelCase = [ 7, 12, 17, 22, 7, 12, 17, 22, 7, 12, 17, 22, 7, 12, 17, 22, 5, 9, 14, 20, 5, 9, 14, 20, 5, 9, 14, 20, 5, 9, 14, 20, 4, 11, 16, 23, 4, 11, 16, 23, 4, 11, 16, 23, 4, 11, 16, 23, 6, 10, 15, 21, 6, 10, 15, 21, 6, 10, 15, 21, 6, 10, 15, 21, ] # Process bit string in chunks, each with 16 32-char words for block_words in get_block_words(_lowercase ): __UpperCamelCase = aa __UpperCamelCase = ba __UpperCamelCase = ca __UpperCamelCase = da # Hash current chunk for i in range(64 ): if i <= 15: # f = (b & c) | (not_32(b) & d) # Alternate definition for f __UpperCamelCase = d ^ (b & (c ^ d)) __UpperCamelCase = i elif i <= 31: # f = (d & b) | (not_32(d) & c) # Alternate definition for f __UpperCamelCase = c ^ (d & (b ^ c)) __UpperCamelCase = (5 * i + 1) % 16 elif i <= 47: __UpperCamelCase = b ^ c ^ d __UpperCamelCase = (3 * i + 5) % 16 else: __UpperCamelCase = c ^ (b | not_aa(_lowercase )) __UpperCamelCase = (7 * i) % 16 __UpperCamelCase = (f + a + added_consts[i] + block_words[g]) % 2**32 __UpperCamelCase = d __UpperCamelCase = c __UpperCamelCase = b __UpperCamelCase = sum_aa(_lowercase , left_rotate_aa(_lowercase , shift_amounts[i] ) ) # Add hashed chunk to running total __UpperCamelCase = sum_aa(_lowercase , _lowercase ) __UpperCamelCase = sum_aa(_lowercase , _lowercase ) __UpperCamelCase = sum_aa(_lowercase , _lowercase ) __UpperCamelCase = sum_aa(_lowercase , _lowercase ) __UpperCamelCase = reformat_hex(_lowercase ) + reformat_hex(_lowercase ) + reformat_hex(_lowercase ) + reformat_hex(_lowercase ) return digest if __name__ == "__main__": import doctest doctest.testmod()
1
1
def _A ( _lowercase = 10_00 ) -> int: """simple docstring""" __UpperCamelCase = 2**power __UpperCamelCase = str(_lowercase ) __UpperCamelCase = list(_lowercase ) __UpperCamelCase = 0 for i in list_num: sum_of_num += int(_lowercase ) return sum_of_num if __name__ == "__main__": __snake_case = int(input('''Enter the power of 2: ''').strip()) print('''2 ^ ''', power, ''' = ''', 2**power) __snake_case = solution(power) print('''Sum of the digits is: ''', result)
1
from __future__ import annotations import time from math import sqrt # 1 for manhattan, 0 for euclidean __snake_case = 0 __snake_case = [ [0, 0, 0, 0, 0, 0, 0], [0, 1, 0, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles [0, 0, 0, 0, 0, 0, 0], [0, 0, 1, 0, 0, 0, 0], [1, 0, 1, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 1, 0, 0], ] __snake_case = [[-1, 0], [0, -1], [1, 0], [0, 1]] # up, left, down, right __snake_case = tuple[int, int] class __lowerCamelCase : def __init__( self: str,A_: int,A_: int,A_: int,A_: int,A_: int,A_: Node | None,): '''simple docstring''' __UpperCamelCase = pos_x __UpperCamelCase = pos_y __UpperCamelCase = (pos_y, pos_x) __UpperCamelCase = goal_x __UpperCamelCase = goal_y __UpperCamelCase = g_cost __UpperCamelCase = parent __UpperCamelCase = self.calculate_heuristic() __UpperCamelCase = self.g_cost + self.h_cost def snake_case_ ( self: str ): '''simple docstring''' __UpperCamelCase = self.pos_x - self.goal_x __UpperCamelCase = self.pos_y - self.goal_y if HEURISTIC == 1: return abs(A_ ) + abs(A_ ) else: return sqrt(dy**2 + dx**2 ) def __lt__( self: int,A_: Node ): '''simple docstring''' return self.f_cost < other.f_cost class __lowerCamelCase : def __init__( self: Any,A_: TPosition,A_: TPosition ): '''simple docstring''' __UpperCamelCase = Node(start[1],start[0],goal[1],goal[0],0,A_ ) __UpperCamelCase = Node(goal[1],goal[0],goal[1],goal[0],9_9999,A_ ) __UpperCamelCase = [self.start] __UpperCamelCase = [] __UpperCamelCase = False def snake_case_ ( self: Any ): '''simple docstring''' while self.open_nodes: # Open Nodes are sorted using __lt__ self.open_nodes.sort() __UpperCamelCase = self.open_nodes.pop(0 ) if current_node.pos == self.target.pos: return self.retrace_path(A_ ) self.closed_nodes.append(A_ ) __UpperCamelCase = self.get_successors(A_ ) for child_node in successors: if child_node in self.closed_nodes: continue if child_node not in self.open_nodes: self.open_nodes.append(A_ ) else: # retrieve the best current path __UpperCamelCase = self.open_nodes.pop(self.open_nodes.index(A_ ) ) if child_node.g_cost < better_node.g_cost: self.open_nodes.append(A_ ) else: self.open_nodes.append(A_ ) return [self.start.pos] def snake_case_ ( self: int,A_: Node ): '''simple docstring''' __UpperCamelCase = [] for action in delta: __UpperCamelCase = parent.pos_x + action[1] __UpperCamelCase = parent.pos_y + action[0] if not (0 <= pos_x <= len(grid[0] ) - 1 and 0 <= pos_y <= len(A_ ) - 1): continue if grid[pos_y][pos_x] != 0: continue successors.append( Node( A_,A_,self.target.pos_y,self.target.pos_x,parent.g_cost + 1,A_,) ) return successors def snake_case_ ( self: Any,A_: Node | None ): '''simple docstring''' __UpperCamelCase = node __UpperCamelCase = [] while current_node is not None: path.append((current_node.pos_y, current_node.pos_x) ) __UpperCamelCase = current_node.parent path.reverse() return path class __lowerCamelCase : def __init__( self: List[Any],A_: TPosition,A_: TPosition ): '''simple docstring''' __UpperCamelCase = AStar(A_,A_ ) __UpperCamelCase = AStar(A_,A_ ) __UpperCamelCase = False def snake_case_ ( self: Union[str, Any] ): '''simple docstring''' while self.fwd_astar.open_nodes or self.bwd_astar.open_nodes: self.fwd_astar.open_nodes.sort() self.bwd_astar.open_nodes.sort() __UpperCamelCase = self.fwd_astar.open_nodes.pop(0 ) __UpperCamelCase = self.bwd_astar.open_nodes.pop(0 ) if current_bwd_node.pos == current_fwd_node.pos: return self.retrace_bidirectional_path( A_,A_ ) self.fwd_astar.closed_nodes.append(A_ ) self.bwd_astar.closed_nodes.append(A_ ) __UpperCamelCase = current_bwd_node __UpperCamelCase = current_fwd_node __UpperCamelCase = { self.fwd_astar: self.fwd_astar.get_successors(A_ ), self.bwd_astar: self.bwd_astar.get_successors(A_ ), } for astar in [self.fwd_astar, self.bwd_astar]: for child_node in successors[astar]: if child_node in astar.closed_nodes: continue if child_node not in astar.open_nodes: astar.open_nodes.append(A_ ) else: # retrieve the best current path __UpperCamelCase = astar.open_nodes.pop( astar.open_nodes.index(A_ ) ) if child_node.g_cost < better_node.g_cost: astar.open_nodes.append(A_ ) else: astar.open_nodes.append(A_ ) return [self.fwd_astar.start.pos] def snake_case_ ( self: List[str],A_: Node,A_: Node ): '''simple docstring''' __UpperCamelCase = self.fwd_astar.retrace_path(A_ ) __UpperCamelCase = self.bwd_astar.retrace_path(A_ ) bwd_path.pop() bwd_path.reverse() __UpperCamelCase = fwd_path + bwd_path return path if __name__ == "__main__": # all coordinates are given in format [y,x] __snake_case = (0, 0) __snake_case = (len(grid) - 1, len(grid[0]) - 1) for elem in grid: print(elem) __snake_case = time.time() __snake_case = AStar(init, goal) __snake_case = a_star.search() __snake_case = time.time() - start_time print(f"""AStar execution time = {end_time:f} seconds""") __snake_case = time.time() __snake_case = BidirectionalAStar(init, goal) __snake_case = time.time() - bd_start_time print(f"""BidirectionalAStar execution time = {bd_end_time:f} seconds""")
1
1
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available __snake_case = { '''configuration_graphormer''': ['''GRAPHORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''GraphormerConfig'''], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __snake_case = [ '''GRAPHORMER_PRETRAINED_MODEL_ARCHIVE_LIST''', '''GraphormerForGraphClassification''', '''GraphormerModel''', '''GraphormerPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_graphormer import GRAPHORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, GraphormerConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_graphormer import ( GRAPHORMER_PRETRAINED_MODEL_ARCHIVE_LIST, GraphormerForGraphClassification, GraphormerModel, GraphormerPreTrainedModel, ) else: import sys __snake_case = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
1
import sys import tempfile import unittest import unittest.mock as mock from pathlib import Path from huggingface_hub import HfFolder, delete_repo from requests.exceptions import HTTPError from transformers import AutoFeatureExtractor, WavaVecaFeatureExtractor from transformers.testing_utils import TOKEN, USER, get_tests_dir, is_staging_test sys.path.append(str(Path(__file__).parent.parent / '''utils''')) from test_module.custom_feature_extraction import CustomFeatureExtractor # noqa E402 __snake_case = get_tests_dir('''fixtures''') class __lowerCamelCase (unittest.TestCase ): def snake_case_ ( self: int ): '''simple docstring''' __UpperCamelCase = mock.Mock() __UpperCamelCase = 500 __UpperCamelCase = {} __UpperCamelCase = HTTPError __UpperCamelCase = {} # Download this model to make sure it's in the cache. __UpperCamelCase = WavaVecaFeatureExtractor.from_pretrained('hf-internal-testing/tiny-random-wav2vec2' ) # Under the mock environment we get a 500 error when trying to reach the model. with mock.patch('requests.Session.request',return_value=A_ ) as mock_head: __UpperCamelCase = WavaVecaFeatureExtractor.from_pretrained('hf-internal-testing/tiny-random-wav2vec2' ) # This check we did call the fake head request mock_head.assert_called() def snake_case_ ( self: Dict ): '''simple docstring''' __UpperCamelCase = WavaVecaFeatureExtractor.from_pretrained( 'https://huggingface.co/hf-internal-testing/tiny-random-wav2vec2/resolve/main/preprocessor_config.json' ) @is_staging_test class __lowerCamelCase (unittest.TestCase ): @classmethod def snake_case_ ( cls: Tuple ): '''simple docstring''' __UpperCamelCase = TOKEN HfFolder.save_token(A_ ) @classmethod def snake_case_ ( cls: Tuple ): '''simple docstring''' try: delete_repo(token=cls._token,repo_id='test-feature-extractor' ) except HTTPError: pass try: delete_repo(token=cls._token,repo_id='valid_org/test-feature-extractor-org' ) except HTTPError: pass try: delete_repo(token=cls._token,repo_id='test-dynamic-feature-extractor' ) except HTTPError: pass def snake_case_ ( self: Tuple ): '''simple docstring''' __UpperCamelCase = WavaVecaFeatureExtractor.from_pretrained(A_ ) feature_extractor.push_to_hub('test-feature-extractor',use_auth_token=self._token ) __UpperCamelCase = WavaVecaFeatureExtractor.from_pretrained(F'''{USER}/test-feature-extractor''' ) for k, v in feature_extractor.__dict__.items(): self.assertEqual(A_,getattr(A_,A_ ) ) # Reset repo delete_repo(token=self._token,repo_id='test-feature-extractor' ) # Push to hub via save_pretrained with tempfile.TemporaryDirectory() as tmp_dir: feature_extractor.save_pretrained( A_,repo_id='test-feature-extractor',push_to_hub=A_,use_auth_token=self._token ) __UpperCamelCase = WavaVecaFeatureExtractor.from_pretrained(F'''{USER}/test-feature-extractor''' ) for k, v in feature_extractor.__dict__.items(): self.assertEqual(A_,getattr(A_,A_ ) ) def snake_case_ ( self: List[str] ): '''simple docstring''' __UpperCamelCase = WavaVecaFeatureExtractor.from_pretrained(A_ ) feature_extractor.push_to_hub('valid_org/test-feature-extractor',use_auth_token=self._token ) __UpperCamelCase = WavaVecaFeatureExtractor.from_pretrained('valid_org/test-feature-extractor' ) for k, v in feature_extractor.__dict__.items(): self.assertEqual(A_,getattr(A_,A_ ) ) # Reset repo delete_repo(token=self._token,repo_id='valid_org/test-feature-extractor' ) # Push to hub via save_pretrained with tempfile.TemporaryDirectory() as tmp_dir: feature_extractor.save_pretrained( A_,repo_id='valid_org/test-feature-extractor-org',push_to_hub=A_,use_auth_token=self._token ) __UpperCamelCase = WavaVecaFeatureExtractor.from_pretrained('valid_org/test-feature-extractor-org' ) for k, v in feature_extractor.__dict__.items(): self.assertEqual(A_,getattr(A_,A_ ) ) def snake_case_ ( self: int ): '''simple docstring''' CustomFeatureExtractor.register_for_auto_class() __UpperCamelCase = CustomFeatureExtractor.from_pretrained(A_ ) feature_extractor.push_to_hub('test-dynamic-feature-extractor',use_auth_token=self._token ) # This has added the proper auto_map field to the config self.assertDictEqual( feature_extractor.auto_map,{'AutoFeatureExtractor': 'custom_feature_extraction.CustomFeatureExtractor'},) __UpperCamelCase = AutoFeatureExtractor.from_pretrained( F'''{USER}/test-dynamic-feature-extractor''',trust_remote_code=A_ ) # Can't make an isinstance check because the new_feature_extractor is from the CustomFeatureExtractor class of a dynamic module self.assertEqual(new_feature_extractor.__class__.__name__,'CustomFeatureExtractor' )
1
1
__snake_case = ''' # Transformers 설치 방법 ! pip install transformers datasets # 마지막 릴리스 대신 소스에서 설치하려면, 위 명령을 주석으로 바꾸고 아래 명령을 해제하세요. # ! pip install git+https://github.com/huggingface/transformers.git ''' __snake_case = [{'''type''': '''code''', '''content''': INSTALL_CONTENT}] __snake_case = { '''{processor_class}''': '''FakeProcessorClass''', '''{model_class}''': '''FakeModelClass''', '''{object_class}''': '''FakeObjectClass''', }
1
import argparse import json import os import evaluate import torch from datasets import load_dataset from torch.optim import AdamW from torch.utils.data import DataLoader from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed from accelerate import Accelerator, DistributedType from accelerate.utils.deepspeed import DummyOptim, DummyScheduler __snake_case = 1_6 __snake_case = 3_2 def _A ( _lowercase , _lowercase = 16 , _lowercase = "bert-base-cased" ) -> Union[str, Any]: """simple docstring""" __UpperCamelCase = AutoTokenizer.from_pretrained(_lowercase ) __UpperCamelCase = load_dataset('glue' , 'mrpc' ) def tokenize_function(_lowercase ): # max_length=None => use the model max length (it's actually the default) __UpperCamelCase = tokenizer(examples['sentence1'] , examples['sentence2'] , truncation=_lowercase , max_length=_lowercase ) return outputs # Apply the method we just defined to all the examples in all the splits of the dataset __UpperCamelCase = datasets.map( _lowercase , batched=_lowercase , remove_columns=['idx', 'sentence1', 'sentence2'] , load_from_cache_file=_lowercase ) # We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the # transformers library __UpperCamelCase = tokenized_datasets.rename_column('label' , 'labels' ) def collate_fn(_lowercase ): # On TPU it's best to pad everything to the same length or training will be very slow. if accelerator.distributed_type == DistributedType.TPU: return tokenizer.pad(_lowercase , padding='max_length' , max_length=1_28 , return_tensors='pt' ) return tokenizer.pad(_lowercase , padding='longest' , return_tensors='pt' ) # Instantiate dataloaders. __UpperCamelCase = DataLoader( tokenized_datasets['train'] , shuffle=_lowercase , collate_fn=_lowercase , batch_size=_lowercase ) __UpperCamelCase = DataLoader( tokenized_datasets['validation'] , shuffle=_lowercase , collate_fn=_lowercase , batch_size=_lowercase ) return train_dataloader, eval_dataloader def _A ( _lowercase , _lowercase ) -> int: """simple docstring""" __UpperCamelCase = Accelerator() # Sample hyper-parameters for learning rate, batch size, seed and a few other HPs __UpperCamelCase = config['lr'] __UpperCamelCase = int(config['num_epochs'] ) __UpperCamelCase = int(config['seed'] ) __UpperCamelCase = int(config['batch_size'] ) __UpperCamelCase = args.model_name_or_path set_seed(_lowercase ) __UpperCamelCase, __UpperCamelCase = get_dataloaders(_lowercase , _lowercase , _lowercase ) # Instantiate the model (we build the model here so that the seed also control new weights initialization) __UpperCamelCase = AutoModelForSequenceClassification.from_pretrained(_lowercase , return_dict=_lowercase ) # Instantiate optimizer __UpperCamelCase = ( AdamW if accelerator.state.deepspeed_plugin is None or 'optimizer' not in accelerator.state.deepspeed_plugin.deepspeed_config else DummyOptim ) __UpperCamelCase = optimizer_cls(params=model.parameters() , lr=_lowercase ) if accelerator.state.deepspeed_plugin is not None: __UpperCamelCase = accelerator.state.deepspeed_plugin.deepspeed_config[ 'gradient_accumulation_steps' ] else: __UpperCamelCase = 1 __UpperCamelCase = (len(_lowercase ) * num_epochs) // gradient_accumulation_steps # Instantiate scheduler if ( accelerator.state.deepspeed_plugin is None or "scheduler" not in accelerator.state.deepspeed_plugin.deepspeed_config ): __UpperCamelCase = get_linear_schedule_with_warmup( optimizer=_lowercase , num_warmup_steps=0 , num_training_steps=_lowercase , ) else: __UpperCamelCase = DummyScheduler(_lowercase , total_num_steps=_lowercase , warmup_num_steps=0 ) # Prepare everything # There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the # prepare method. __UpperCamelCase, __UpperCamelCase, __UpperCamelCase, __UpperCamelCase, __UpperCamelCase = accelerator.prepare( _lowercase , _lowercase , _lowercase , _lowercase , _lowercase ) # We need to keep track of how many total steps we have iterated over __UpperCamelCase = 0 # We also need to keep track of the stating epoch so files are named properly __UpperCamelCase = 0 # Now we train the model __UpperCamelCase = evaluate.load('glue' , 'mrpc' ) __UpperCamelCase = 0 __UpperCamelCase = {} for epoch in range(_lowercase , _lowercase ): model.train() for step, batch in enumerate(_lowercase ): __UpperCamelCase = model(**_lowercase ) __UpperCamelCase = outputs.loss __UpperCamelCase = loss / gradient_accumulation_steps accelerator.backward(_lowercase ) if step % gradient_accumulation_steps == 0: optimizer.step() lr_scheduler.step() optimizer.zero_grad() overall_step += 1 model.eval() __UpperCamelCase = 0 for step, batch in enumerate(_lowercase ): # We could avoid this line since we set the accelerator with `device_placement=True`. batch.to(accelerator.device ) with torch.no_grad(): __UpperCamelCase = model(**_lowercase ) __UpperCamelCase = outputs.logits.argmax(dim=-1 ) # It is slightly faster to call this once, than multiple times __UpperCamelCase, __UpperCamelCase = accelerator.gather( (predictions, batch['labels']) ) # If we are in a multiprocess environment, the last batch has duplicates if accelerator.use_distributed: if step == len(_lowercase ) - 1: __UpperCamelCase = predictions[: len(eval_dataloader.dataset ) - samples_seen] __UpperCamelCase = references[: len(eval_dataloader.dataset ) - samples_seen] else: samples_seen += references.shape[0] metric.add_batch( predictions=_lowercase , references=_lowercase , ) __UpperCamelCase = metric.compute() # Use accelerator.print to print only on the main process. accelerator.print(f'''epoch {epoch}:''' , _lowercase ) __UpperCamelCase = eval_metric['accuracy'] if best_performance < eval_metric["accuracy"]: __UpperCamelCase = eval_metric['accuracy'] if args.performance_lower_bound is not None: assert ( args.performance_lower_bound <= best_performance ), f'''Best performance metric {best_performance} is lower than the lower bound {args.performance_lower_bound}''' accelerator.wait_for_everyone() if accelerator.is_main_process: with open(os.path.join(args.output_dir , 'all_results.json' ) , 'w' ) as f: json.dump(_lowercase , _lowercase ) def _A ( ) -> List[str]: """simple docstring""" __UpperCamelCase = argparse.ArgumentParser(description='Simple example of training script tracking peak GPU memory usage.' ) parser.add_argument( '--model_name_or_path' , type=_lowercase , default='bert-base-cased' , help='Path to pretrained model or model identifier from huggingface.co/models.' , required=_lowercase , ) parser.add_argument( '--output_dir' , type=_lowercase , default='.' , help='Optional save directory where all checkpoint folders will be stored. Default is the current working directory.' , ) parser.add_argument( '--performance_lower_bound' , type=_lowercase , default=_lowercase , help='Optional lower bound for the performance metric. If set, the training will throw error when the performance metric drops below this value.' , ) parser.add_argument( '--num_epochs' , type=_lowercase , default=3 , help='Number of train epochs.' , ) __UpperCamelCase = parser.parse_args() __UpperCamelCase = {'lr': 2e-5, 'num_epochs': args.num_epochs, 'seed': 42, 'batch_size': 16} training_function(_lowercase , _lowercase ) if __name__ == "__main__": main()
1
1
import argparse import logging import os from pathlib import Path from typing import Any, Dict import pytorch_lightning as pl from pytorch_lightning.utilities import rank_zero_info from transformers import ( AdamW, AutoConfig, AutoModel, AutoModelForPreTraining, AutoModelForQuestionAnswering, AutoModelForSeqaSeqLM, AutoModelForSequenceClassification, AutoModelForTokenClassification, AutoModelWithLMHead, AutoTokenizer, PretrainedConfig, PreTrainedTokenizer, ) from transformers.optimization import ( Adafactor, get_cosine_schedule_with_warmup, get_cosine_with_hard_restarts_schedule_with_warmup, get_linear_schedule_with_warmup, get_polynomial_decay_schedule_with_warmup, ) from transformers.utils.versions import require_version __snake_case = logging.getLogger(__name__) require_version('''pytorch_lightning>=1.0.4''') __snake_case = { '''base''': AutoModel, '''sequence-classification''': AutoModelForSequenceClassification, '''question-answering''': AutoModelForQuestionAnswering, '''pretraining''': AutoModelForPreTraining, '''token-classification''': AutoModelForTokenClassification, '''language-modeling''': AutoModelWithLMHead, '''summarization''': AutoModelForSeqaSeqLM, '''translation''': AutoModelForSeqaSeqLM, } # update this and the import above to support new schedulers from transformers.optimization __snake_case = { '''linear''': get_linear_schedule_with_warmup, '''cosine''': get_cosine_schedule_with_warmup, '''cosine_w_restarts''': get_cosine_with_hard_restarts_schedule_with_warmup, '''polynomial''': get_polynomial_decay_schedule_with_warmup, # '': get_constant_schedule, # not supported for now # '': get_constant_schedule_with_warmup, # not supported for now } __snake_case = sorted(arg_to_scheduler.keys()) __snake_case = '''{''' + ''', '''.join(arg_to_scheduler_choices) + '''}''' class __lowerCamelCase (pl.LightningModule ): def __init__( self: Union[str, Any],A_: argparse.Namespace,A_: List[str]=None,A_: List[Any]="base",A_: int=None,A_: Any=None,A_: Dict=None,**A_: Union[str, Any],): '''simple docstring''' super().__init__() # TODO: move to self.save_hyperparameters() # self.save_hyperparameters() # can also expand arguments into trainer signature for easier reading self.save_hyperparameters(A_ ) __UpperCamelCase = 0 __UpperCamelCase = Path(self.hparams.output_dir ) __UpperCamelCase = self.hparams.cache_dir if self.hparams.cache_dir else None if config is None: __UpperCamelCase = AutoConfig.from_pretrained( self.hparams.config_name if self.hparams.config_name else self.hparams.model_name_or_path,**({'num_labels': num_labels} if num_labels is not None else {}),cache_dir=A_,**A_,) else: __UpperCamelCase = config __UpperCamelCase = ('encoder_layerdrop', 'decoder_layerdrop', 'dropout', 'attention_dropout') for p in extra_model_params: if getattr(self.hparams,A_,A_ ): assert hasattr(self.config,A_ ), F'''model config doesn\'t have a `{p}` attribute''' setattr(self.config,A_,getattr(self.hparams,A_ ) ) if tokenizer is None: __UpperCamelCase = AutoTokenizer.from_pretrained( self.hparams.tokenizer_name if self.hparams.tokenizer_name else self.hparams.model_name_or_path,cache_dir=A_,) else: __UpperCamelCase = tokenizer __UpperCamelCase = MODEL_MODES[mode] if model is None: __UpperCamelCase = self.model_type.from_pretrained( self.hparams.model_name_or_path,from_tf=bool('.ckpt' in self.hparams.model_name_or_path ),config=self.config,cache_dir=A_,) else: __UpperCamelCase = model def snake_case_ ( self: str,*A_: Dict,**A_: Any ): '''simple docstring''' __UpperCamelCase = self.model_type.from_pretrained(*A_,**A_ ) def snake_case_ ( self: str ): '''simple docstring''' __UpperCamelCase = arg_to_scheduler[self.hparams.lr_scheduler] __UpperCamelCase = get_schedule_func( self.opt,num_warmup_steps=self.hparams.warmup_steps,num_training_steps=self.total_steps() ) __UpperCamelCase = {'scheduler': scheduler, 'interval': 'step', 'frequency': 1} return scheduler def snake_case_ ( self: Dict ): '''simple docstring''' __UpperCamelCase = self.model __UpperCamelCase = ['bias', 'LayerNorm.weight'] __UpperCamelCase = [ { 'params': [ p for n, p in model.named_parameters() if not any(nd in n for nd in no_decay ) ], # check this named paramters 'weight_decay': self.hparams.weight_decay, }, { 'params': [p for n, p in model.named_parameters() if any(nd in n for nd in no_decay )], 'weight_decay': 0.0, }, ] if self.hparams.adafactor: __UpperCamelCase = Adafactor( A_,lr=self.hparams.learning_rate,scale_parameter=A_,relative_step=A_ ) else: __UpperCamelCase = AdamW( A_,lr=self.hparams.learning_rate,eps=self.hparams.adam_epsilon ) __UpperCamelCase = optimizer __UpperCamelCase = self.get_lr_scheduler() return [optimizer], [scheduler] def snake_case_ ( self: int,A_: int,A_: str ): '''simple docstring''' return self.validation_step(A_,A_ ) def snake_case_ ( self: Any,A_: Any ): '''simple docstring''' return self.validation_end(A_ ) def snake_case_ ( self: Optional[Any] ): '''simple docstring''' __UpperCamelCase = max(1,self.hparams.gpus ) # TODO: consider num_tpu_cores __UpperCamelCase = self.hparams.train_batch_size * self.hparams.accumulate_grad_batches * num_devices return (self.dataset_size / effective_batch_size) * self.hparams.max_epochs def snake_case_ ( self: List[str],A_: Optional[Any] ): '''simple docstring''' if stage == "test": __UpperCamelCase = len(self.test_dataloader().dataset ) else: __UpperCamelCase = self.get_dataloader('train',self.hparams.train_batch_size,shuffle=A_ ) __UpperCamelCase = len(self.train_dataloader().dataset ) def snake_case_ ( self: Optional[Any],A_: str,A_: int,A_: bool = False ): '''simple docstring''' raise NotImplementedError('You must implement this for your task' ) def snake_case_ ( self: int ): '''simple docstring''' return self.train_loader def snake_case_ ( self: Optional[Any] ): '''simple docstring''' return self.get_dataloader('dev',self.hparams.eval_batch_size,shuffle=A_ ) def snake_case_ ( self: Any ): '''simple docstring''' return self.get_dataloader('test',self.hparams.eval_batch_size,shuffle=A_ ) def snake_case_ ( self: List[str],A_: Dict ): '''simple docstring''' return os.path.join( self.hparams.data_dir,'cached_{}_{}_{}'.format( A_,list(filter(A_,self.hparams.model_name_or_path.split('/' ) ) ).pop(),str(self.hparams.max_seq_length ),),) @pl.utilities.rank_zero_only def snake_case_ ( self: Optional[Any],A_: Dict[str, Any] ): '''simple docstring''' __UpperCamelCase = self.output_dir.joinpath('best_tfmr' ) __UpperCamelCase = self.step_count self.model.save_pretrained(A_ ) self.tokenizer.save_pretrained(A_ ) @staticmethod def snake_case_ ( A_: List[Any],A_: Tuple ): '''simple docstring''' parser.add_argument( '--model_name_or_path',default=A_,type=A_,required=A_,help='Path to pretrained model or model identifier from huggingface.co/models',) parser.add_argument( '--config_name',default='',type=A_,help='Pretrained config name or path if not the same as model_name' ) parser.add_argument( '--tokenizer_name',default=A_,type=A_,help='Pretrained tokenizer name or path if not the same as model_name',) parser.add_argument( '--cache_dir',default=str(Path(A_ ).parent / 'test_run' / 'cache' ),type=A_,help='Where do you want to store the pre-trained models downloaded from huggingface.co',) parser.add_argument( '--encoder_layerdrop',type=A_,help='Encoder layer dropout probability (Optional). Goes into model.config',) parser.add_argument( '--decoder_layerdrop',type=A_,help='Decoder layer dropout probability (Optional). Goes into model.config',) parser.add_argument( '--dropout',type=A_,help='Dropout probability (Optional). Goes into model.config',) parser.add_argument( '--attention_dropout',type=A_,help='Attention dropout probability (Optional). Goes into model.config',) parser.add_argument('--learning_rate',default=5E-5,type=A_,help='The initial learning rate for Adam.' ) parser.add_argument( '--lr_scheduler',default='linear',choices=A_,metavar=A_,type=A_,help='Learning rate scheduler',) parser.add_argument('--weight_decay',default=0.0,type=A_,help='Weight decay if we apply some.' ) parser.add_argument('--adam_epsilon',default=1E-8,type=A_,help='Epsilon for Adam optimizer.' ) parser.add_argument('--warmup_steps',default=0,type=A_,help='Linear warmup over warmup_steps.' ) parser.add_argument('--num_workers',default=4,type=A_,help='kwarg passed to DataLoader' ) parser.add_argument('--num_train_epochs',dest='max_epochs',default=3,type=A_ ) parser.add_argument('--train_batch_size',default=32,type=A_ ) parser.add_argument('--eval_batch_size',default=32,type=A_ ) parser.add_argument('--adafactor',action='store_true' ) class __lowerCamelCase (pl.Callback ): def snake_case_ ( self: Optional[int],A_: Tuple,A_: Optional[Any] ): '''simple docstring''' if ( trainer.is_global_zero and trainer.global_rank == 0 ): # we initialize the retriever only on master worker with RAY. In new pytorch-lightning accelorators are removed. pl_module.model.rag.retriever.init_retrieval() # better to use hook functions. class __lowerCamelCase (pl.Callback ): def snake_case_ ( self: Optional[int],A_: Union[str, Any],A_: int ): '''simple docstring''' for name, param in pl_module.model.rag.named_parameters(): if param.grad is None: print(A_ ) class __lowerCamelCase (pl.Callback ): def snake_case_ ( self: str,A_: List[str],A_: List[str] ): '''simple docstring''' __UpperCamelCase = trainer.lr_schedulers[0]['scheduler'] __UpperCamelCase = {F'''lr_group_{i}''': lr for i, lr in enumerate(lr_scheduler.get_lr() )} pl_module.logger.log_metrics(A_ ) def snake_case_ ( self: Any,A_: pl.Trainer,A_: pl.LightningModule ): '''simple docstring''' rank_zero_info('***** Validation results *****' ) __UpperCamelCase = trainer.callback_metrics # Log results for key in sorted(A_ ): if key not in ["log", "progress_bar"]: rank_zero_info('{} = {}\n'.format(A_,str(metrics[key] ) ) ) def snake_case_ ( self: Dict,A_: pl.Trainer,A_: pl.LightningModule ): '''simple docstring''' rank_zero_info('***** Test results *****' ) __UpperCamelCase = trainer.callback_metrics # Log and save results to file __UpperCamelCase = os.path.join(pl_module.hparams.output_dir,'test_results.txt' ) with open(A_,'w' ) as writer: for key in sorted(A_ ): if key not in ["log", "progress_bar"]: rank_zero_info('{} = {}\n'.format(A_,str(metrics[key] ) ) ) writer.write('{} = {}\n'.format(A_,str(metrics[key] ) ) ) def _A ( _lowercase , _lowercase ) -> None: """simple docstring""" parser.add_argument( '--output_dir' , default=str(Path(_lowercase ).parent / 'test_run' / 'model_checkpoints' ) , type=_lowercase , help='The output directory where the model predictions and checkpoints will be written.' , ) parser.add_argument( '--fp16' , action='store_true' , help='Whether to use 16-bit (mixed) precision (through NVIDIA apex) instead of 32-bit' , ) parser.add_argument( '--fp16_opt_level' , type=_lowercase , default='O2' , help=( 'For fp16: Apex AMP optimization level selected in [\'O0\', \'O1\', \'O2\', and \'O3\'].' 'See details at https://nvidia.github.io/apex/amp.html' ) , ) parser.add_argument('--n_tpu_cores' , dest='tpu_cores' , type=_lowercase ) parser.add_argument('--max_grad_norm' , dest='gradient_clip_val' , default=1.0 , type=_lowercase , help='Max gradient norm' ) parser.add_argument('--do_train' , action='store_true' , help='Whether to run training.' ) parser.add_argument('--do_predict' , action='store_true' , help='Whether to run predictions on the test set.' ) parser.add_argument( '--gradient_accumulation_steps' , dest='accumulate_grad_batches' , type=_lowercase , default=1 , help='Number of updates steps to accumulate before performing a backward/update pass.' , ) parser.add_argument('--seed' , type=_lowercase , default=42 , help='random seed for initialization' ) parser.add_argument( '--data_dir' , default=str(Path(_lowercase ).parent / 'test_run' / 'dummy-train-data' ) , type=_lowercase , help='The input data dir. Should contain the training files for the CoNLL-2003 NER task.' , ) def _A ( _lowercase , _lowercase , _lowercase=None , _lowercase=True , _lowercase=[] , _lowercase=None , _lowercase=None , **_lowercase , ) -> List[str]: """simple docstring""" pl.seed_everything(args.seed ) # init model __UpperCamelCase = Path(model.hparams.output_dir ) odir.mkdir(exist_ok=_lowercase ) # add custom checkpoints if checkpoint_callback is None: __UpperCamelCase = pl.callbacks.ModelCheckpoint( filepath=args.output_dir , prefix='checkpoint' , monitor='val_loss' , mode='min' , save_top_k=1 ) if early_stopping_callback: extra_callbacks.append(_lowercase ) if logging_callback is None: __UpperCamelCase = LoggingCallback() __UpperCamelCase = {} if args.fpaa: __UpperCamelCase = 16 if args.gpus > 1: __UpperCamelCase = 'auto' __UpperCamelCase = 'ddp' __UpperCamelCase = args.accumulate_grad_batches __UpperCamelCase = None __UpperCamelCase = 'auto' __UpperCamelCase = pl.Trainer.from_argparse_args( _lowercase , weights_summary=_lowercase , callbacks=[logging_callback] + extra_callbacks + [InitCallback()] + [checkpoint_callback] , logger=_lowercase , val_check_interval=1 , num_sanity_val_steps=2 , **_lowercase , ) if args.do_train: trainer.fit(_lowercase ) else: print('RAG modeling tests with new set functions successfuly executed!' ) return trainer
1
from transformers import BertTokenizer, EncoderDecoderModel, SeqaSeqTrainer, SeqaSeqTrainingArguments from transformers.testing_utils import TestCasePlus, require_torch, slow from transformers.utils import is_datasets_available if is_datasets_available(): import datasets class __lowerCamelCase (_a ): @slow @require_torch def snake_case_ ( self: Union[str, Any] ): '''simple docstring''' __UpperCamelCase = EncoderDecoderModel.from_encoder_decoder_pretrained('prajjwal1/bert-tiny','prajjwal1/bert-tiny' ) __UpperCamelCase = BertTokenizer.from_pretrained('bert-base-uncased' ) __UpperCamelCase = bertabert.config.encoder.vocab_size __UpperCamelCase = tokenizer.sep_token_id __UpperCamelCase = tokenizer.cls_token_id __UpperCamelCase = 128 __UpperCamelCase = datasets.load_dataset('cnn_dailymail','3.0.0',split='train[:1%]' ) __UpperCamelCase = datasets.load_dataset('cnn_dailymail','3.0.0',split='validation[:1%]' ) __UpperCamelCase = train_dataset.select(range(32 ) ) __UpperCamelCase = val_dataset.select(range(16 ) ) __UpperCamelCase = 4 def _map_to_encoder_decoder_inputs(A_: Dict ): # Tokenizer will automatically set [BOS] <text> [EOS] __UpperCamelCase = tokenizer(batch['article'],padding='max_length',truncation=A_,max_length=512 ) __UpperCamelCase = tokenizer(batch['highlights'],padding='max_length',truncation=A_,max_length=128 ) __UpperCamelCase = inputs.input_ids __UpperCamelCase = inputs.attention_mask __UpperCamelCase = outputs.input_ids __UpperCamelCase = outputs.input_ids.copy() __UpperCamelCase = [ [-100 if token == tokenizer.pad_token_id else token for token in labels] for labels in batch['labels'] ] __UpperCamelCase = outputs.attention_mask assert all(len(A_ ) == 512 for x in inputs.input_ids ) assert all(len(A_ ) == 128 for x in outputs.input_ids ) return batch def _compute_metrics(A_: str ): __UpperCamelCase = pred.label_ids __UpperCamelCase = pred.predictions # all unnecessary tokens are removed __UpperCamelCase = tokenizer.batch_decode(A_,skip_special_tokens=A_ ) __UpperCamelCase = tokenizer.batch_decode(A_,skip_special_tokens=A_ ) __UpperCamelCase = sum([int(pred_str[i] == label_str[i] ) for i in range(len(A_ ) )] ) / len(A_ ) return {"accuracy": accuracy} # map train dataset __UpperCamelCase = train_dataset.map( _map_to_encoder_decoder_inputs,batched=A_,batch_size=A_,remove_columns=['article', 'highlights'],) train_dataset.set_format( type='torch',columns=['input_ids', 'attention_mask', 'decoder_input_ids', 'decoder_attention_mask', 'labels'],) # same for validation dataset __UpperCamelCase = val_dataset.map( _map_to_encoder_decoder_inputs,batched=A_,batch_size=A_,remove_columns=['article', 'highlights'],) val_dataset.set_format( type='torch',columns=['input_ids', 'attention_mask', 'decoder_input_ids', 'decoder_attention_mask', 'labels'],) __UpperCamelCase = self.get_auto_remove_tmp_dir() __UpperCamelCase = SeqaSeqTrainingArguments( output_dir=A_,per_device_train_batch_size=A_,per_device_eval_batch_size=A_,predict_with_generate=A_,evaluation_strategy='steps',do_train=A_,do_eval=A_,warmup_steps=0,eval_steps=2,logging_steps=2,) # instantiate trainer __UpperCamelCase = SeqaSeqTrainer( model=A_,args=A_,compute_metrics=_compute_metrics,train_dataset=A_,eval_dataset=A_,tokenizer=A_,) # start training trainer.train()
1
1
import numpy as np from PIL import Image def _A ( _lowercase , _lowercase , _lowercase ) -> np.ndarray: """simple docstring""" __UpperCamelCase = np.array(_lowercase ) if arr.shape[0] != arr.shape[1]: raise ValueError('The input array is not a square matrix' ) __UpperCamelCase = 0 __UpperCamelCase = 0 __UpperCamelCase = 0 __UpperCamelCase = 0 # compute the shape of the output matrix __UpperCamelCase = (arr.shape[0] - size) // stride + 1 # initialize the output matrix with zeros of shape maxpool_shape __UpperCamelCase = np.zeros((maxpool_shape, maxpool_shape) ) while i < arr.shape[0]: if i + size > arr.shape[0]: # if the end of the matrix is reached, break break while j < arr.shape[1]: # if the end of the matrix is reached, break if j + size > arr.shape[1]: break # compute the maximum of the pooling matrix __UpperCamelCase = np.max(arr[i : i + size, j : j + size] ) # shift the pooling matrix by stride of column pixels j += stride mat_j += 1 # shift the pooling matrix by stride of row pixels i += stride mat_i += 1 # reset the column index to 0 __UpperCamelCase = 0 __UpperCamelCase = 0 return updated_arr def _A ( _lowercase , _lowercase , _lowercase ) -> np.ndarray: """simple docstring""" __UpperCamelCase = np.array(_lowercase ) if arr.shape[0] != arr.shape[1]: raise ValueError('The input array is not a square matrix' ) __UpperCamelCase = 0 __UpperCamelCase = 0 __UpperCamelCase = 0 __UpperCamelCase = 0 # compute the shape of the output matrix __UpperCamelCase = (arr.shape[0] - size) // stride + 1 # initialize the output matrix with zeros of shape avgpool_shape __UpperCamelCase = np.zeros((avgpool_shape, avgpool_shape) ) while i < arr.shape[0]: # if the end of the matrix is reached, break if i + size > arr.shape[0]: break while j < arr.shape[1]: # if the end of the matrix is reached, break if j + size > arr.shape[1]: break # compute the average of the pooling matrix __UpperCamelCase = int(np.average(arr[i : i + size, j : j + size] ) ) # shift the pooling matrix by stride of column pixels j += stride mat_j += 1 # shift the pooling matrix by stride of row pixels i += stride mat_i += 1 # reset the column index to 0 __UpperCamelCase = 0 __UpperCamelCase = 0 return updated_arr # Main Function if __name__ == "__main__": from doctest import testmod testmod(name='''avgpooling''', verbose=True) # Loading the image __snake_case = Image.open('''path_to_image''') # Converting the image to numpy array and maxpooling, displaying the result # Ensure that the image is a square matrix Image.fromarray(maxpooling(np.array(image), size=3, stride=2)).show() # Converting the image to numpy array and averagepooling, displaying the result # Ensure that the image is a square matrix Image.fromarray(avgpooling(np.array(image), size=3, stride=2)).show()
1
def _A ( _lowercase = 1_00 ) -> int: """simple docstring""" __UpperCamelCase = 0 __UpperCamelCase = 0 for i in range(1 , n + 1 ): sum_of_squares += i**2 sum_of_ints += i return sum_of_ints**2 - sum_of_squares if __name__ == "__main__": print(f"""{solution() = }""")
1
1
def _A ( _lowercase , _lowercase ) -> float: """simple docstring""" if density <= 0: raise ValueError('Impossible fluid density' ) if bulk_modulus <= 0: raise ValueError('Impossible bulk modulus' ) return (bulk_modulus / density) ** 0.5 if __name__ == "__main__": import doctest doctest.testmod()
1
def _A ( _lowercase , _lowercase ) -> int: """simple docstring""" return (pointa[0] - pointa[0]) ** 2 + (pointa[1] - pointa[1]) ** 2 def _A ( _lowercase , _lowercase=0 ) -> Dict: """simple docstring""" return sorted(_lowercase , key=lambda _lowercase : x[column] ) def _A ( _lowercase , _lowercase , _lowercase=float('inf' ) ) -> List[Any]: """simple docstring""" for i in range(points_counts - 1 ): for j in range(i + 1 , _lowercase ): __UpperCamelCase = euclidean_distance_sqr(points[i] , points[j] ) if current_dis < min_dis: __UpperCamelCase = current_dis return min_dis def _A ( _lowercase , _lowercase , _lowercase=float('inf' ) ) -> Tuple: """simple docstring""" for i in range(min(6 , points_counts - 1 ) , _lowercase ): for j in range(max(0 , i - 6 ) , _lowercase ): __UpperCamelCase = euclidean_distance_sqr(points[i] , points[j] ) if current_dis < min_dis: __UpperCamelCase = current_dis return min_dis def _A ( _lowercase , _lowercase , _lowercase ) -> Optional[Any]: """simple docstring""" if points_counts <= 3: return dis_between_closest_pair(_lowercase , _lowercase ) # recursion __UpperCamelCase = points_counts // 2 __UpperCamelCase = closest_pair_of_points_sqr( _lowercase , points_sorted_on_y[:mid] , _lowercase ) __UpperCamelCase = closest_pair_of_points_sqr( _lowercase , points_sorted_on_y[mid:] , points_counts - mid ) __UpperCamelCase = min(_lowercase , _lowercase ) __UpperCamelCase = [] for point in points_sorted_on_x: if abs(point[0] - points_sorted_on_x[mid][0] ) < closest_pair_dis: cross_strip.append(_lowercase ) __UpperCamelCase = dis_between_closest_in_strip( _lowercase , len(_lowercase ) , _lowercase ) return min(_lowercase , _lowercase ) def _A ( _lowercase , _lowercase ) -> Optional[int]: """simple docstring""" __UpperCamelCase = column_based_sort(_lowercase , column=0 ) __UpperCamelCase = column_based_sort(_lowercase , column=1 ) return ( closest_pair_of_points_sqr( _lowercase , _lowercase , _lowercase ) ) ** 0.5 if __name__ == "__main__": __snake_case = [(2, 3), (1_2, 3_0), (4_0, 5_0), (5, 1), (1_2, 1_0), (3, 4)] print('''Distance:''', closest_pair_of_points(points, len(points)))
1
1
# Copyright 2023 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typing import TYPE_CHECKING # rely on isort to merge the imports from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available __snake_case = { '''configuration_efficientnet''': [ '''EFFICIENTNET_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''EfficientNetConfig''', '''EfficientNetOnnxConfig''', ] } try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __snake_case = ['''EfficientNetImageProcessor'''] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __snake_case = [ '''EFFICIENTNET_PRETRAINED_MODEL_ARCHIVE_LIST''', '''EfficientNetForImageClassification''', '''EfficientNetModel''', '''EfficientNetPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_efficientnet import ( EFFICIENTNET_PRETRAINED_CONFIG_ARCHIVE_MAP, EfficientNetConfig, EfficientNetOnnxConfig, ) try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .image_processing_efficientnet import EfficientNetImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_efficientnet import ( EFFICIENTNET_PRETRAINED_MODEL_ARCHIVE_LIST, EfficientNetForImageClassification, EfficientNetModel, EfficientNetPreTrainedModel, ) else: import sys __snake_case = _LazyModule(__name__, globals()['''__file__'''], _import_structure)
1
from collections import OrderedDict from typing import Mapping from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging __snake_case = logging.get_logger(__name__) __snake_case = { '''bert-base-uncased''': '''https://huggingface.co/bert-base-uncased/resolve/main/config.json''', '''bert-large-uncased''': '''https://huggingface.co/bert-large-uncased/resolve/main/config.json''', '''bert-base-cased''': '''https://huggingface.co/bert-base-cased/resolve/main/config.json''', '''bert-large-cased''': '''https://huggingface.co/bert-large-cased/resolve/main/config.json''', '''bert-base-multilingual-uncased''': '''https://huggingface.co/bert-base-multilingual-uncased/resolve/main/config.json''', '''bert-base-multilingual-cased''': '''https://huggingface.co/bert-base-multilingual-cased/resolve/main/config.json''', '''bert-base-chinese''': '''https://huggingface.co/bert-base-chinese/resolve/main/config.json''', '''bert-base-german-cased''': '''https://huggingface.co/bert-base-german-cased/resolve/main/config.json''', '''bert-large-uncased-whole-word-masking''': ( '''https://huggingface.co/bert-large-uncased-whole-word-masking/resolve/main/config.json''' ), '''bert-large-cased-whole-word-masking''': ( '''https://huggingface.co/bert-large-cased-whole-word-masking/resolve/main/config.json''' ), '''bert-large-uncased-whole-word-masking-finetuned-squad''': ( '''https://huggingface.co/bert-large-uncased-whole-word-masking-finetuned-squad/resolve/main/config.json''' ), '''bert-large-cased-whole-word-masking-finetuned-squad''': ( '''https://huggingface.co/bert-large-cased-whole-word-masking-finetuned-squad/resolve/main/config.json''' ), '''bert-base-cased-finetuned-mrpc''': '''https://huggingface.co/bert-base-cased-finetuned-mrpc/resolve/main/config.json''', '''bert-base-german-dbmdz-cased''': '''https://huggingface.co/bert-base-german-dbmdz-cased/resolve/main/config.json''', '''bert-base-german-dbmdz-uncased''': '''https://huggingface.co/bert-base-german-dbmdz-uncased/resolve/main/config.json''', '''cl-tohoku/bert-base-japanese''': '''https://huggingface.co/cl-tohoku/bert-base-japanese/resolve/main/config.json''', '''cl-tohoku/bert-base-japanese-whole-word-masking''': ( '''https://huggingface.co/cl-tohoku/bert-base-japanese-whole-word-masking/resolve/main/config.json''' ), '''cl-tohoku/bert-base-japanese-char''': ( '''https://huggingface.co/cl-tohoku/bert-base-japanese-char/resolve/main/config.json''' ), '''cl-tohoku/bert-base-japanese-char-whole-word-masking''': ( '''https://huggingface.co/cl-tohoku/bert-base-japanese-char-whole-word-masking/resolve/main/config.json''' ), '''TurkuNLP/bert-base-finnish-cased-v1''': ( '''https://huggingface.co/TurkuNLP/bert-base-finnish-cased-v1/resolve/main/config.json''' ), '''TurkuNLP/bert-base-finnish-uncased-v1''': ( '''https://huggingface.co/TurkuNLP/bert-base-finnish-uncased-v1/resolve/main/config.json''' ), '''wietsedv/bert-base-dutch-cased''': '''https://huggingface.co/wietsedv/bert-base-dutch-cased/resolve/main/config.json''', # See all BERT models at https://huggingface.co/models?filter=bert } class __lowerCamelCase (_a ): _lowercase = """bert""" def __init__( self: Any,A_: Dict=3_0522,A_: Optional[Any]=768,A_: Union[str, Any]=12,A_: List[Any]=12,A_: Optional[int]=3072,A_: Union[str, Any]="gelu",A_: List[str]=0.1,A_: Dict=0.1,A_: Optional[int]=512,A_: Optional[Any]=2,A_: Union[str, Any]=0.0_2,A_: List[Any]=1E-12,A_: Optional[int]=0,A_: List[Any]="absolute",A_: str=True,A_: Union[str, Any]=None,**A_: int,): '''simple docstring''' super().__init__(pad_token_id=A_,**A_ ) __UpperCamelCase = vocab_size __UpperCamelCase = hidden_size __UpperCamelCase = num_hidden_layers __UpperCamelCase = num_attention_heads __UpperCamelCase = hidden_act __UpperCamelCase = intermediate_size __UpperCamelCase = hidden_dropout_prob __UpperCamelCase = attention_probs_dropout_prob __UpperCamelCase = max_position_embeddings __UpperCamelCase = type_vocab_size __UpperCamelCase = initializer_range __UpperCamelCase = layer_norm_eps __UpperCamelCase = position_embedding_type __UpperCamelCase = use_cache __UpperCamelCase = classifier_dropout class __lowerCamelCase (_a ): @property def snake_case_ ( self: Optional[int] ): '''simple docstring''' if self.task == "multiple-choice": __UpperCamelCase = {0: 'batch', 1: 'choice', 2: 'sequence'} else: __UpperCamelCase = {0: 'batch', 1: 'sequence'} return OrderedDict( [ ('input_ids', dynamic_axis), ('attention_mask', dynamic_axis), ('token_type_ids', dynamic_axis), ] )
1
1
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_torch_available, ) __snake_case = {'''configuration_unispeech''': ['''UNISPEECH_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''UniSpeechConfig''']} try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __snake_case = [ '''UNISPEECH_PRETRAINED_MODEL_ARCHIVE_LIST''', '''UniSpeechForCTC''', '''UniSpeechForPreTraining''', '''UniSpeechForSequenceClassification''', '''UniSpeechModel''', '''UniSpeechPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_unispeech import UNISPEECH_PRETRAINED_CONFIG_ARCHIVE_MAP, UniSpeechConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_unispeech import ( UNISPEECH_PRETRAINED_MODEL_ARCHIVE_LIST, UniSpeechForCTC, UniSpeechForPreTraining, UniSpeechForSequenceClassification, UniSpeechModel, UniSpeechPreTrainedModel, ) else: import sys __snake_case = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
1
def _A ( _lowercase ) -> int: """simple docstring""" assert column_title.isupper() __UpperCamelCase = 0 __UpperCamelCase = len(_lowercase ) - 1 __UpperCamelCase = 0 while index >= 0: __UpperCamelCase = (ord(column_title[index] ) - 64) * pow(26 , _lowercase ) answer += value power += 1 index -= 1 return answer if __name__ == "__main__": from doctest import testmod testmod()
1
1
from __future__ import annotations import numpy as np def _A ( _lowercase ) -> int: """simple docstring""" return np.maximum(0 , _lowercase ) if __name__ == "__main__": print(np.array(relu([-1, 0, 5]))) # --> [0, 0, 5]
1
import argparse import requests import torch # pip3 install salesforce-lavis # I'm actually installing a slightly modified version: pip3 install git+https://github.com/nielsrogge/LAVIS.git@fix_lavis from lavis.models import load_model_and_preprocess from PIL import Image from transformers import ( AutoTokenizer, BlipaConfig, BlipaForConditionalGeneration, BlipaProcessor, BlipaVisionConfig, BlipImageProcessor, OPTConfig, TaConfig, ) from transformers.utils.constants import OPENAI_CLIP_MEAN, OPENAI_CLIP_STD def _A ( ) -> int: """simple docstring""" __UpperCamelCase = 'https://storage.googleapis.com/sfr-vision-language-research/LAVIS/assets/merlion.png' __UpperCamelCase = Image.open(requests.get(_lowercase , stream=_lowercase ).raw ).convert('RGB' ) return image def _A ( _lowercase ) -> int: """simple docstring""" __UpperCamelCase = [] # fmt: off # vision encoder rename_keys.append(('visual_encoder.cls_token', 'vision_model.embeddings.class_embedding') ) rename_keys.append(('visual_encoder.pos_embed', 'vision_model.embeddings.position_embedding') ) rename_keys.append(('visual_encoder.patch_embed.proj.weight', 'vision_model.embeddings.patch_embedding.weight') ) rename_keys.append(('visual_encoder.patch_embed.proj.bias', 'vision_model.embeddings.patch_embedding.bias') ) rename_keys.append(('ln_vision.weight', 'vision_model.post_layernorm.weight') ) rename_keys.append(('ln_vision.bias', 'vision_model.post_layernorm.bias') ) for i in range(config.vision_config.num_hidden_layers ): rename_keys.append((f'''visual_encoder.blocks.{i}.norm1.weight''', f'''vision_model.encoder.layers.{i}.layer_norm1.weight''') ) rename_keys.append((f'''visual_encoder.blocks.{i}.norm1.bias''', f'''vision_model.encoder.layers.{i}.layer_norm1.bias''') ) rename_keys.append((f'''visual_encoder.blocks.{i}.norm2.weight''', f'''vision_model.encoder.layers.{i}.layer_norm2.weight''') ) rename_keys.append((f'''visual_encoder.blocks.{i}.norm2.bias''', f'''vision_model.encoder.layers.{i}.layer_norm2.bias''') ) rename_keys.append((f'''visual_encoder.blocks.{i}.attn.qkv.weight''', f'''vision_model.encoder.layers.{i}.self_attn.qkv.weight''') ) rename_keys.append((f'''visual_encoder.blocks.{i}.attn.proj.weight''', f'''vision_model.encoder.layers.{i}.self_attn.projection.weight''',) ) rename_keys.append((f'''visual_encoder.blocks.{i}.attn.proj.bias''', f'''vision_model.encoder.layers.{i}.self_attn.projection.bias''') ) rename_keys.append((f'''visual_encoder.blocks.{i}.mlp.fc1.weight''', f'''vision_model.encoder.layers.{i}.mlp.fc1.weight''') ) rename_keys.append((f'''visual_encoder.blocks.{i}.mlp.fc1.bias''', f'''vision_model.encoder.layers.{i}.mlp.fc1.bias''') ) rename_keys.append((f'''visual_encoder.blocks.{i}.mlp.fc2.weight''', f'''vision_model.encoder.layers.{i}.mlp.fc2.weight''') ) rename_keys.append((f'''visual_encoder.blocks.{i}.mlp.fc2.bias''', f'''vision_model.encoder.layers.{i}.mlp.fc2.bias''') ) # QFormer rename_keys.append(('Qformer.bert.embeddings.LayerNorm.weight', 'qformer.layernorm.weight') ) rename_keys.append(('Qformer.bert.embeddings.LayerNorm.bias', 'qformer.layernorm.bias') ) # fmt: on return rename_keys def _A ( _lowercase , _lowercase , _lowercase ) -> Optional[int]: """simple docstring""" __UpperCamelCase = dct.pop(_lowercase ) __UpperCamelCase = val def _A ( _lowercase , _lowercase ) -> int: """simple docstring""" for i in range(config.vision_config.num_hidden_layers ): # read in original q and v biases __UpperCamelCase = state_dict.pop(f'''visual_encoder.blocks.{i}.attn.q_bias''' ) __UpperCamelCase = state_dict.pop(f'''visual_encoder.blocks.{i}.attn.v_bias''' ) # next, set bias in the state dict __UpperCamelCase = torch.cat((q_bias, torch.zeros_like(_lowercase , requires_grad=_lowercase ), v_bias) ) __UpperCamelCase = qkv_bias def _A ( _lowercase , _lowercase ) -> Any: """simple docstring""" __UpperCamelCase = 3_64 if 'coco' in model_name else 2_24 __UpperCamelCase = BlipaVisionConfig(image_size=_lowercase ).to_dict() # make sure the models have proper bos_token_id and eos_token_id set (important for generation) # seems like flan-T5 models don't have bos_token_id properly set? if "opt-2.7b" in model_name: __UpperCamelCase = OPTConfig.from_pretrained('facebook/opt-2.7b' , eos_token_id=_lowercase ).to_dict() elif "opt-6.7b" in model_name: __UpperCamelCase = OPTConfig.from_pretrained('facebook/opt-6.7b' , eos_token_id=_lowercase ).to_dict() elif "t5-xl" in model_name: __UpperCamelCase = TaConfig.from_pretrained('google/flan-t5-xl' , dense_act_fn='gelu' , bos_token_id=1 ).to_dict() elif "t5-xxl" in model_name: __UpperCamelCase = TaConfig.from_pretrained('google/flan-t5-xxl' , dense_act_fn='gelu' , bos_token_id=1 ).to_dict() __UpperCamelCase = BlipaConfig(vision_config=_lowercase , text_config=_lowercase ) return config, image_size @torch.no_grad() def _A ( _lowercase , _lowercase=None , _lowercase=False ) -> Union[str, Any]: """simple docstring""" __UpperCamelCase = ( AutoTokenizer.from_pretrained('facebook/opt-2.7b' ) if 'opt' in model_name else AutoTokenizer.from_pretrained('google/flan-t5-xl' ) ) __UpperCamelCase = tokenizer('\n' , add_special_tokens=_lowercase ).input_ids[0] __UpperCamelCase, __UpperCamelCase = get_blipa_config(_lowercase , eos_token_id=_lowercase ) __UpperCamelCase = BlipaForConditionalGeneration(_lowercase ).eval() __UpperCamelCase = { 'blip2-opt-2.7b': ('blip2_opt', 'pretrain_opt2.7b'), 'blip2-opt-6.7b': ('blip2_opt', 'pretrain_opt6.7b'), 'blip2-opt-2.7b-coco': ('blip2_opt', 'caption_coco_opt2.7b'), 'blip2-opt-6.7b-coco': ('blip2_opt', 'caption_coco_opt6.7b'), 'blip2-flan-t5-xl': ('blip2_t5', 'pretrain_flant5xl'), 'blip2-flan-t5-xl-coco': ('blip2_t5', 'caption_coco_flant5xl'), 'blip2-flan-t5-xxl': ('blip2_t5', 'pretrain_flant5xxl'), } __UpperCamelCase, __UpperCamelCase = model_name_to_original[model_name] # load original model print('Loading original model...' ) __UpperCamelCase = 'cuda' if torch.cuda.is_available() else 'cpu' __UpperCamelCase, __UpperCamelCase, __UpperCamelCase = load_model_and_preprocess( name=_lowercase , model_type=_lowercase , is_eval=_lowercase , device=_lowercase ) original_model.eval() print('Done!' ) # update state dict keys __UpperCamelCase = original_model.state_dict() __UpperCamelCase = create_rename_keys(_lowercase ) for src, dest in rename_keys: rename_key(_lowercase , _lowercase , _lowercase ) # some keys can be renamed efficiently for key, val in state_dict.copy().items(): __UpperCamelCase = state_dict.pop(_lowercase ) if key.startswith('Qformer.bert' ): __UpperCamelCase = key.replace('Qformer.bert' , 'qformer' ) if "attention.self" in key: __UpperCamelCase = key.replace('self' , 'attention' ) if "opt_proj" in key: __UpperCamelCase = key.replace('opt_proj' , 'language_projection' ) if "t5_proj" in key: __UpperCamelCase = key.replace('t5_proj' , 'language_projection' ) if key.startswith('opt' ): __UpperCamelCase = key.replace('opt' , 'language' ) if key.startswith('t5' ): __UpperCamelCase = key.replace('t5' , 'language' ) __UpperCamelCase = val # read in qv biases read_in_q_v_bias(_lowercase , _lowercase ) __UpperCamelCase, __UpperCamelCase = hf_model.load_state_dict(_lowercase , strict=_lowercase ) assert len(_lowercase ) == 0 assert unexpected_keys == ["qformer.embeddings.position_ids"] __UpperCamelCase = load_demo_image() __UpperCamelCase = vis_processors['eval'](_lowercase ).unsqueeze(0 ).to(_lowercase ) __UpperCamelCase = tokenizer(['\n'] , return_tensors='pt' ).input_ids.to(_lowercase ) # create processor __UpperCamelCase = BlipImageProcessor( size={'height': image_size, 'width': image_size} , image_mean=_lowercase , image_std=_lowercase ) __UpperCamelCase = BlipaProcessor(image_processor=_lowercase , tokenizer=_lowercase ) __UpperCamelCase = processor(images=_lowercase , return_tensors='pt' ).pixel_values.to(_lowercase ) # make sure processor creates exact same pixel values assert torch.allclose(_lowercase , _lowercase ) original_model.to(_lowercase ) hf_model.to(_lowercase ) with torch.no_grad(): if "opt" in model_name: __UpperCamelCase = original_model({'image': original_pixel_values, 'text_input': ['']} ).logits __UpperCamelCase = hf_model(_lowercase , _lowercase ).logits else: __UpperCamelCase = original_model( {'image': original_pixel_values, 'text_input': ['\n'], 'text_output': ['\n']} ).logits __UpperCamelCase = input_ids.masked_fill(input_ids == tokenizer.pad_token_id , -1_00 ) __UpperCamelCase = hf_model(_lowercase , _lowercase , labels=_lowercase ).logits assert original_logits.shape == logits.shape print('First values of original logits:' , original_logits[0, :3, :3] ) print('First values of HF logits:' , logits[0, :3, :3] ) # assert values if model_name == "blip2-flan-t5-xl": __UpperCamelCase = torch.tensor( [[-41.58_50, -4.44_40, -8.99_22], [-47.43_22, -5.91_43, -1.73_40]] , device=_lowercase ) assert torch.allclose(logits[0, :3, :3] , _lowercase , atol=1e-4 ) elif model_name == "blip2-flan-t5-xl-coco": __UpperCamelCase = torch.tensor( [[-57.01_09, -9.89_67, -12.62_80], [-68.65_78, -12.71_91, -10.50_65]] , device=_lowercase ) else: # cast to same type __UpperCamelCase = logits.dtype assert torch.allclose(original_logits.to(_lowercase ) , _lowercase , atol=1e-2 ) print('Looks ok!' ) print('Generating a caption...' ) __UpperCamelCase = '' __UpperCamelCase = tokenizer(_lowercase , return_tensors='pt' ).input_ids.to(_lowercase ) __UpperCamelCase = original_model.generate({'image': original_pixel_values} ) __UpperCamelCase = hf_model.generate( _lowercase , _lowercase , do_sample=_lowercase , num_beams=5 , max_length=30 , min_length=1 , top_p=0.9 , repetition_penalty=1.0 , length_penalty=1.0 , temperature=1 , ) print('Original generation:' , _lowercase ) __UpperCamelCase = input_ids.shape[1] __UpperCamelCase = processor.batch_decode(outputs[:, prompt_length:] , skip_special_tokens=_lowercase ) __UpperCamelCase = [text.strip() for text in output_text] print('HF generation:' , _lowercase ) if pytorch_dump_folder_path is not None: processor.save_pretrained(_lowercase ) hf_model.save_pretrained(_lowercase ) if push_to_hub: processor.push_to_hub(f'''nielsr/{model_name}''' ) hf_model.push_to_hub(f'''nielsr/{model_name}''' ) if __name__ == "__main__": __snake_case = argparse.ArgumentParser() __snake_case = [ '''blip2-opt-2.7b''', '''blip2-opt-6.7b''', '''blip2-opt-2.7b-coco''', '''blip2-opt-6.7b-coco''', '''blip2-flan-t5-xl''', '''blip2-flan-t5-xl-coco''', '''blip2-flan-t5-xxl''', ] parser.add_argument( '''--model_name''', default='''blip2-opt-2.7b''', choices=choices, type=str, help='''Path to hf config.json of model to convert''', ) parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''') parser.add_argument( '''--push_to_hub''', action='''store_true''', help='''Whether to push the model and processor to the hub after converting''', ) __snake_case = parser.parse_args() convert_blipa_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
1
1
import unittest from typing import Dict, List, Optional, Union import numpy as np from transformers.testing_utils import require_torch, require_vision from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import BridgeTowerImageProcessor class __lowerCamelCase (unittest.TestCase ): def __init__( self: int,A_: List[str],A_: bool = True,A_: Dict[str, int] = None,A_: int = 32,A_: bool = True,A_: Union[int, float] = 1 / 255,A_: bool = True,A_: bool = True,A_: Optional[Union[float, List[float]]] = [0.4_8_1_4_5_4_6_6, 0.4_5_7_8_2_7_5, 0.4_0_8_2_1_0_7_3],A_: Optional[Union[float, List[float]]] = [0.2_6_8_6_2_9_5_4, 0.2_6_1_3_0_2_5_8, 0.2_7_5_7_7_7_1_1],A_: bool = True,A_: Union[str, Any]=7,A_: Dict=30,A_: Dict=400,A_: Optional[int]=3,): '''simple docstring''' __UpperCamelCase = parent __UpperCamelCase = do_resize __UpperCamelCase = size if size is not None else {'shortest_edge': 288} __UpperCamelCase = size_divisor __UpperCamelCase = do_rescale __UpperCamelCase = rescale_factor __UpperCamelCase = do_normalize __UpperCamelCase = do_center_crop __UpperCamelCase = image_mean __UpperCamelCase = image_std __UpperCamelCase = do_pad __UpperCamelCase = batch_size __UpperCamelCase = num_channels __UpperCamelCase = min_resolution __UpperCamelCase = max_resolution def snake_case_ ( self: Dict ): '''simple docstring''' return { "image_mean": self.image_mean, "image_std": self.image_std, "do_normalize": self.do_normalize, "do_resize": self.do_resize, "size": self.size, "size_divisor": self.size_divisor, } def snake_case_ ( self: List[str],A_: Tuple,A_: List[Any]=False ): '''simple docstring''' if not batched: __UpperCamelCase = self.size['shortest_edge'] __UpperCamelCase = image_inputs[0] if isinstance(A_,Image.Image ): __UpperCamelCase, __UpperCamelCase = image.size else: __UpperCamelCase, __UpperCamelCase = image.shape[1], image.shape[2] __UpperCamelCase = size / min(A_,A_ ) if h < w: __UpperCamelCase, __UpperCamelCase = size, scale * w else: __UpperCamelCase, __UpperCamelCase = scale * h, size __UpperCamelCase = int((1333 / 800) * size ) if max(A_,A_ ) > max_size: __UpperCamelCase = max_size / max(A_,A_ ) __UpperCamelCase = newh * scale __UpperCamelCase = neww * scale __UpperCamelCase, __UpperCamelCase = int(newh + 0.5 ), int(neww + 0.5 ) __UpperCamelCase, __UpperCamelCase = ( newh // self.size_divisor * self.size_divisor, neww // self.size_divisor * self.size_divisor, ) else: __UpperCamelCase = [] for image in image_inputs: __UpperCamelCase, __UpperCamelCase = self.get_expected_values([image] ) expected_values.append((expected_height, expected_width) ) __UpperCamelCase = max(A_,key=lambda A_ : item[0] )[0] __UpperCamelCase = max(A_,key=lambda A_ : item[1] )[1] return expected_height, expected_width @require_torch @require_vision class __lowerCamelCase (_a , unittest.TestCase ): _lowercase = BridgeTowerImageProcessor if is_vision_available() else None def snake_case_ ( self: List[str] ): '''simple docstring''' __UpperCamelCase = BridgeTowerImageProcessingTester(self ) @property def snake_case_ ( self: int ): '''simple docstring''' return self.image_processor_tester.prepare_image_processor_dict() def snake_case_ ( self: Any ): '''simple docstring''' __UpperCamelCase = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(A_,'image_mean' ) ) self.assertTrue(hasattr(A_,'image_std' ) ) self.assertTrue(hasattr(A_,'do_normalize' ) ) self.assertTrue(hasattr(A_,'do_resize' ) ) self.assertTrue(hasattr(A_,'size' ) ) self.assertTrue(hasattr(A_,'size_divisor' ) ) def snake_case_ ( self: Dict ): '''simple docstring''' pass def snake_case_ ( self: str ): '''simple docstring''' __UpperCamelCase = self.image_processing_class(**self.image_processor_dict ) # create random PIL images __UpperCamelCase = prepare_image_inputs(self.image_processor_tester,equal_resolution=A_ ) for image in image_inputs: self.assertIsInstance(A_,Image.Image ) # Test not batched input __UpperCamelCase = image_processing(image_inputs[0],return_tensors='pt' ).pixel_values __UpperCamelCase, __UpperCamelCase = self.image_processor_tester.get_expected_values(A_ ) self.assertEqual( encoded_images.shape,(1, self.image_processor_tester.num_channels, expected_height, expected_width),) # Test batched __UpperCamelCase = image_processing(A_,return_tensors='pt' ).pixel_values __UpperCamelCase, __UpperCamelCase = self.image_processor_tester.get_expected_values(A_,batched=A_ ) self.assertEqual( encoded_images.shape,( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, expected_height, expected_width, ),) def snake_case_ ( self: Union[str, Any] ): '''simple docstring''' __UpperCamelCase = self.image_processing_class(**self.image_processor_dict ) # create random numpy tensors __UpperCamelCase = prepare_image_inputs(self.image_processor_tester,equal_resolution=A_,numpify=A_ ) for image in image_inputs: self.assertIsInstance(A_,np.ndarray ) # Test not batched input __UpperCamelCase = image_processing(image_inputs[0],return_tensors='pt' ).pixel_values __UpperCamelCase, __UpperCamelCase = self.image_processor_tester.get_expected_values(A_ ) self.assertEqual( encoded_images.shape,(1, self.image_processor_tester.num_channels, expected_height, expected_width),) # Test batched __UpperCamelCase = image_processing(A_,return_tensors='pt' ).pixel_values __UpperCamelCase, __UpperCamelCase = self.image_processor_tester.get_expected_values(A_,batched=A_ ) self.assertEqual( encoded_images.shape,( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, expected_height, expected_width, ),) def snake_case_ ( self: Any ): '''simple docstring''' __UpperCamelCase = self.image_processing_class(**self.image_processor_dict ) # create random PyTorch tensors __UpperCamelCase = prepare_image_inputs(self.image_processor_tester,equal_resolution=A_,torchify=A_ ) for image in image_inputs: self.assertIsInstance(A_,torch.Tensor ) # Test not batched input __UpperCamelCase = image_processing(image_inputs[0],return_tensors='pt' ).pixel_values __UpperCamelCase, __UpperCamelCase = self.image_processor_tester.get_expected_values(A_ ) self.assertEqual( encoded_images.shape,(1, self.image_processor_tester.num_channels, expected_height, expected_width),) # Test batched __UpperCamelCase = image_processing(A_,return_tensors='pt' ).pixel_values __UpperCamelCase, __UpperCamelCase = self.image_processor_tester.get_expected_values(A_,batched=A_ ) self.assertEqual( encoded_images.shape,( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, expected_height, expected_width, ),)
1
import logging import os import sys from dataclasses import dataclass, field from importlib import import_module from typing import Dict, List, Optional, Tuple import numpy as np from seqeval.metrics import accuracy_score, fa_score, precision_score, recall_score from torch import nn from utils_ner import Split, TokenClassificationDataset, TokenClassificationTask import transformers from transformers import ( AutoConfig, AutoModelForTokenClassification, AutoTokenizer, DataCollatorWithPadding, EvalPrediction, HfArgumentParser, Trainer, TrainingArguments, set_seed, ) from transformers.trainer_utils import is_main_process __snake_case = logging.getLogger(__name__) @dataclass class __lowerCamelCase : _lowercase = field( metadata={"""help""": """Path to pretrained model or model identifier from huggingface.co/models"""} ) _lowercase = field( default=_a , metadata={"""help""": """Pretrained config name or path if not the same as model_name"""} ) _lowercase = field( default="""NER""" , metadata={"""help""": """Task type to fine tune in training (e.g. NER, POS, etc)"""} ) _lowercase = field( default=_a , metadata={"""help""": """Pretrained tokenizer name or path if not the same as model_name"""} ) _lowercase = field(default=_a , metadata={"""help""": """Set this flag to use fast tokenization."""} ) # If you want to tweak more attributes on your tokenizer, you should do it in a distinct script, # or just modify its tokenizer_config.json. _lowercase = field( default=_a , metadata={"""help""": """Where do you want to store the pretrained models downloaded from huggingface.co"""} , ) @dataclass class __lowerCamelCase : _lowercase = field( metadata={"""help""": """The input data dir. Should contain the .txt files for a CoNLL-2003-formatted task."""} ) _lowercase = field( default=_a , metadata={"""help""": """Path to a file containing all labels. If not specified, CoNLL-2003 labels are used."""} , ) _lowercase = field( default=128 , metadata={ """help""": ( """The maximum total input sequence length after tokenization. Sequences longer """ """than this will be truncated, sequences shorter will be padded.""" ) } , ) _lowercase = field( default=_a , metadata={"""help""": """Overwrite the cached training and evaluation sets"""} ) def _A ( ) -> str: """simple docstring""" __UpperCamelCase = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) ) if len(sys.argv ) == 2 and sys.argv[1].endswith('.json' ): # If we pass only one argument to the script and it's the path to a json file, # let's parse it to get our arguments. __UpperCamelCase, __UpperCamelCase, __UpperCamelCase = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) ) else: __UpperCamelCase, __UpperCamelCase, __UpperCamelCase = parser.parse_args_into_dataclasses() if ( os.path.exists(training_args.output_dir ) and os.listdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir ): raise ValueError( f'''Output directory ({training_args.output_dir}) already exists and is not empty. Use''' ' --overwrite_output_dir to overcome.' ) __UpperCamelCase = import_module('tasks' ) try: __UpperCamelCase = getattr(_lowercase , model_args.task_type ) __UpperCamelCase = token_classification_task_clazz() except AttributeError: raise ValueError( f'''Task {model_args.task_type} needs to be defined as a TokenClassificationTask subclass in {module}. ''' f'''Available tasks classes are: {TokenClassificationTask.__subclasses__()}''' ) # Setup logging logging.basicConfig( format='%(asctime)s - %(levelname)s - %(name)s - %(message)s' , datefmt='%m/%d/%Y %H:%M:%S' , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , ) logger.warning( 'Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s' , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.local_rank != -1 ) , training_args.fpaa , ) # Set the verbosity to info of the Transformers logger (on main process only): if is_main_process(training_args.local_rank ): transformers.utils.logging.set_verbosity_info() transformers.utils.logging.enable_default_handler() transformers.utils.logging.enable_explicit_format() logger.info('Training/evaluation parameters %s' , _lowercase ) # Set seed set_seed(training_args.seed ) # Prepare CONLL-2003 task __UpperCamelCase = token_classification_task.get_labels(data_args.labels ) __UpperCamelCase = dict(enumerate(_lowercase ) ) __UpperCamelCase = len(_lowercase ) # Load pretrained model and tokenizer # # Distributed training: # The .from_pretrained methods guarantee that only one local process can concurrently # download model & vocab. __UpperCamelCase = AutoConfig.from_pretrained( model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=_lowercase , idalabel=_lowercase , labelaid={label: i for i, label in enumerate(_lowercase )} , cache_dir=model_args.cache_dir , ) __UpperCamelCase = AutoTokenizer.from_pretrained( model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , use_fast=model_args.use_fast , ) __UpperCamelCase = AutoModelForTokenClassification.from_pretrained( model_args.model_name_or_path , from_tf=bool('.ckpt' in model_args.model_name_or_path ) , config=_lowercase , cache_dir=model_args.cache_dir , ) # Get datasets __UpperCamelCase = ( TokenClassificationDataset( token_classification_task=_lowercase , data_dir=data_args.data_dir , tokenizer=_lowercase , labels=_lowercase , model_type=config.model_type , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.train , ) if training_args.do_train else None ) __UpperCamelCase = ( TokenClassificationDataset( token_classification_task=_lowercase , data_dir=data_args.data_dir , tokenizer=_lowercase , labels=_lowercase , model_type=config.model_type , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.dev , ) if training_args.do_eval else None ) def align_predictions(_lowercase , _lowercase ) -> Tuple[List[int], List[int]]: __UpperCamelCase = np.argmax(_lowercase , axis=2 ) __UpperCamelCase, __UpperCamelCase = preds.shape __UpperCamelCase = [[] for _ in range(_lowercase )] __UpperCamelCase = [[] for _ in range(_lowercase )] for i in range(_lowercase ): for j in range(_lowercase ): if label_ids[i, j] != nn.CrossEntropyLoss().ignore_index: out_label_list[i].append(label_map[label_ids[i][j]] ) preds_list[i].append(label_map[preds[i][j]] ) return preds_list, out_label_list def compute_metrics(_lowercase ) -> Dict: __UpperCamelCase, __UpperCamelCase = align_predictions(p.predictions , p.label_ids ) return { "accuracy_score": accuracy_score(_lowercase , _lowercase ), "precision": precision_score(_lowercase , _lowercase ), "recall": recall_score(_lowercase , _lowercase ), "f1": fa_score(_lowercase , _lowercase ), } # Data collator __UpperCamelCase = DataCollatorWithPadding(_lowercase , pad_to_multiple_of=8 ) if training_args.fpaa else None # Initialize our Trainer __UpperCamelCase = Trainer( model=_lowercase , args=_lowercase , train_dataset=_lowercase , eval_dataset=_lowercase , compute_metrics=_lowercase , data_collator=_lowercase , ) # Training if training_args.do_train: trainer.train( model_path=model_args.model_name_or_path if os.path.isdir(model_args.model_name_or_path ) else None ) trainer.save_model() # For convenience, we also re-save the tokenizer to the same directory, # so that you can share your model easily on huggingface.co/models =) if trainer.is_world_process_zero(): tokenizer.save_pretrained(training_args.output_dir ) # Evaluation __UpperCamelCase = {} if training_args.do_eval: logger.info('*** Evaluate ***' ) __UpperCamelCase = trainer.evaluate() __UpperCamelCase = os.path.join(training_args.output_dir , 'eval_results.txt' ) if trainer.is_world_process_zero(): with open(_lowercase , 'w' ) as writer: logger.info('***** Eval results *****' ) for key, value in result.items(): logger.info(' %s = %s' , _lowercase , _lowercase ) writer.write('%s = %s\n' % (key, value) ) results.update(_lowercase ) # Predict if training_args.do_predict: __UpperCamelCase = TokenClassificationDataset( token_classification_task=_lowercase , data_dir=data_args.data_dir , tokenizer=_lowercase , labels=_lowercase , model_type=config.model_type , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.test , ) __UpperCamelCase, __UpperCamelCase, __UpperCamelCase = trainer.predict(_lowercase ) __UpperCamelCase, __UpperCamelCase = align_predictions(_lowercase , _lowercase ) __UpperCamelCase = os.path.join(training_args.output_dir , 'test_results.txt' ) if trainer.is_world_process_zero(): with open(_lowercase , 'w' ) as writer: for key, value in metrics.items(): logger.info(' %s = %s' , _lowercase , _lowercase ) writer.write('%s = %s\n' % (key, value) ) # Save predictions __UpperCamelCase = os.path.join(training_args.output_dir , 'test_predictions.txt' ) if trainer.is_world_process_zero(): with open(_lowercase , 'w' ) as writer: with open(os.path.join(data_args.data_dir , 'test.txt' ) , 'r' ) as f: token_classification_task.write_predictions_to_file(_lowercase , _lowercase , _lowercase ) return results def _A ( _lowercase ) -> Dict: """simple docstring""" main() if __name__ == "__main__": main()
1
1
import unittest from transformers import SqueezeBertConfig, is_torch_available from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST, SqueezeBertForMaskedLM, SqueezeBertForMultipleChoice, SqueezeBertForQuestionAnswering, SqueezeBertForSequenceClassification, SqueezeBertForTokenClassification, SqueezeBertModel, ) class __lowerCamelCase (_a ): def __init__( self: Optional[int],A_: Dict,A_: List[str]=13,A_: List[str]=7,A_: Dict=True,A_: Any=True,A_: Optional[Any]=False,A_: Any=True,A_: List[Any]=99,A_: Optional[Any]=32,A_: Union[str, Any]=5,A_: str=4,A_: Optional[int]=64,A_: Tuple="gelu",A_: Optional[int]=0.1,A_: str=0.1,A_: Union[str, Any]=512,A_: Optional[Any]=16,A_: Any=2,A_: Dict=0.0_2,A_: Dict=3,A_: List[Any]=4,A_: Optional[Any]=None,A_: Tuple=2,A_: Optional[int]=2,A_: int=2,A_: List[Any]=2,A_: Optional[Any]=4,A_: Any=1,): '''simple docstring''' __UpperCamelCase = parent __UpperCamelCase = batch_size __UpperCamelCase = seq_length __UpperCamelCase = is_training __UpperCamelCase = use_input_mask __UpperCamelCase = use_token_type_ids __UpperCamelCase = use_labels __UpperCamelCase = vocab_size __UpperCamelCase = hidden_size __UpperCamelCase = num_hidden_layers __UpperCamelCase = num_attention_heads __UpperCamelCase = intermediate_size __UpperCamelCase = hidden_act __UpperCamelCase = hidden_dropout_prob __UpperCamelCase = attention_probs_dropout_prob __UpperCamelCase = max_position_embeddings __UpperCamelCase = type_vocab_size __UpperCamelCase = type_sequence_label_size __UpperCamelCase = initializer_range __UpperCamelCase = num_labels __UpperCamelCase = num_choices __UpperCamelCase = scope __UpperCamelCase = q_groups __UpperCamelCase = k_groups __UpperCamelCase = v_groups __UpperCamelCase = post_attention_groups __UpperCamelCase = intermediate_groups __UpperCamelCase = output_groups def snake_case_ ( self: Optional[int] ): '''simple docstring''' __UpperCamelCase = ids_tensor([self.batch_size, self.seq_length],self.vocab_size ) __UpperCamelCase = None if self.use_input_mask: __UpperCamelCase = random_attention_mask([self.batch_size, self.seq_length] ) __UpperCamelCase = None __UpperCamelCase = None __UpperCamelCase = None if self.use_labels: __UpperCamelCase = ids_tensor([self.batch_size],self.type_sequence_label_size ) __UpperCamelCase = ids_tensor([self.batch_size, self.seq_length],self.num_labels ) __UpperCamelCase = ids_tensor([self.batch_size],self.num_choices ) __UpperCamelCase = self.get_config() return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels def snake_case_ ( self: List[Any] ): '''simple docstring''' return SqueezeBertConfig( embedding_size=self.hidden_size,vocab_size=self.vocab_size,hidden_size=self.hidden_size,num_hidden_layers=self.num_hidden_layers,num_attention_heads=self.num_attention_heads,intermediate_size=self.intermediate_size,hidden_act=self.hidden_act,attention_probs_dropout_prob=self.hidden_dropout_prob,attention_dropout=self.attention_probs_dropout_prob,max_position_embeddings=self.max_position_embeddings,initializer_range=self.initializer_range,q_groups=self.q_groups,k_groups=self.k_groups,v_groups=self.v_groups,post_attention_groups=self.post_attention_groups,intermediate_groups=self.intermediate_groups,output_groups=self.output_groups,) def snake_case_ ( self: List[str],A_: str,A_: str,A_: str,A_: List[Any],A_: Any,A_: Optional[Any] ): '''simple docstring''' __UpperCamelCase = SqueezeBertModel(config=A_ ) model.to(A_ ) model.eval() __UpperCamelCase = model(A_,A_ ) __UpperCamelCase = model(A_ ) self.parent.assertEqual(result.last_hidden_state.shape,(self.batch_size, self.seq_length, self.hidden_size) ) def snake_case_ ( self: int,A_: Optional[Any],A_: List[Any],A_: List[str],A_: List[str],A_: Optional[Any],A_: Optional[int] ): '''simple docstring''' __UpperCamelCase = SqueezeBertForMaskedLM(config=A_ ) model.to(A_ ) model.eval() __UpperCamelCase = model(A_,attention_mask=A_,labels=A_ ) self.parent.assertEqual(result.logits.shape,(self.batch_size, self.seq_length, self.vocab_size) ) def snake_case_ ( self: str,A_: str,A_: Union[str, Any],A_: Tuple,A_: Any,A_: List[str],A_: Dict ): '''simple docstring''' __UpperCamelCase = SqueezeBertForQuestionAnswering(config=A_ ) model.to(A_ ) model.eval() __UpperCamelCase = model( A_,attention_mask=A_,start_positions=A_,end_positions=A_ ) self.parent.assertEqual(result.start_logits.shape,(self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape,(self.batch_size, self.seq_length) ) def snake_case_ ( self: Optional[int],A_: Optional[int],A_: List[str],A_: int,A_: Optional[int],A_: Dict,A_: int ): '''simple docstring''' __UpperCamelCase = self.num_labels __UpperCamelCase = SqueezeBertForSequenceClassification(A_ ) model.to(A_ ) model.eval() __UpperCamelCase = model(A_,attention_mask=A_,labels=A_ ) self.parent.assertEqual(result.logits.shape,(self.batch_size, self.num_labels) ) def snake_case_ ( self: str,A_: Any,A_: str,A_: Optional[Any],A_: Optional[Any],A_: Optional[Any],A_: Dict ): '''simple docstring''' __UpperCamelCase = self.num_labels __UpperCamelCase = SqueezeBertForTokenClassification(config=A_ ) model.to(A_ ) model.eval() __UpperCamelCase = model(A_,attention_mask=A_,labels=A_ ) self.parent.assertEqual(result.logits.shape,(self.batch_size, self.seq_length, self.num_labels) ) def snake_case_ ( self: Union[str, Any],A_: Tuple,A_: str,A_: Any,A_: Union[str, Any],A_: Dict,A_: str ): '''simple docstring''' __UpperCamelCase = self.num_choices __UpperCamelCase = SqueezeBertForMultipleChoice(config=A_ ) model.to(A_ ) model.eval() __UpperCamelCase = input_ids.unsqueeze(1 ).expand(-1,self.num_choices,-1 ).contiguous() __UpperCamelCase = input_mask.unsqueeze(1 ).expand(-1,self.num_choices,-1 ).contiguous() __UpperCamelCase = model( A_,attention_mask=A_,labels=A_,) self.parent.assertEqual(result.logits.shape,(self.batch_size, self.num_choices) ) def snake_case_ ( self: Dict ): '''simple docstring''' __UpperCamelCase = self.prepare_config_and_inputs() ((__UpperCamelCase), (__UpperCamelCase), (__UpperCamelCase), (__UpperCamelCase), (__UpperCamelCase), (__UpperCamelCase)) = config_and_inputs __UpperCamelCase = {'input_ids': input_ids, 'attention_mask': input_mask} return config, inputs_dict @require_torch class __lowerCamelCase (_a , _a , unittest.TestCase ): _lowercase = ( ( SqueezeBertModel, SqueezeBertForMaskedLM, SqueezeBertForMultipleChoice, SqueezeBertForQuestionAnswering, SqueezeBertForSequenceClassification, SqueezeBertForTokenClassification, ) if is_torch_available() else None ) _lowercase = ( { """feature-extraction""": SqueezeBertModel, """fill-mask""": SqueezeBertForMaskedLM, """question-answering""": SqueezeBertForQuestionAnswering, """text-classification""": SqueezeBertForSequenceClassification, """token-classification""": SqueezeBertForTokenClassification, """zero-shot""": SqueezeBertForSequenceClassification, } if is_torch_available() else {} ) _lowercase = False _lowercase = True _lowercase = False def snake_case_ ( self: Union[str, Any] ): '''simple docstring''' __UpperCamelCase = SqueezeBertModelTester(self ) __UpperCamelCase = ConfigTester(self,config_class=A_,dim=37 ) def snake_case_ ( self: int ): '''simple docstring''' self.config_tester.run_common_tests() def snake_case_ ( self: str ): '''simple docstring''' __UpperCamelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_squeezebert_model(*A_ ) def snake_case_ ( self: Any ): '''simple docstring''' __UpperCamelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_squeezebert_for_masked_lm(*A_ ) def snake_case_ ( self: Dict ): '''simple docstring''' __UpperCamelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_squeezebert_for_question_answering(*A_ ) def snake_case_ ( self: Union[str, Any] ): '''simple docstring''' __UpperCamelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_squeezebert_for_sequence_classification(*A_ ) def snake_case_ ( self: Optional[Any] ): '''simple docstring''' __UpperCamelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_squeezebert_for_token_classification(*A_ ) def snake_case_ ( self: Any ): '''simple docstring''' __UpperCamelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_squeezebert_for_multiple_choice(*A_ ) @slow def snake_case_ ( self: int ): '''simple docstring''' for model_name in SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: __UpperCamelCase = SqueezeBertModel.from_pretrained(A_ ) self.assertIsNotNone(A_ ) @require_sentencepiece @require_tokenizers @require_torch class __lowerCamelCase (unittest.TestCase ): @slow def snake_case_ ( self: List[Any] ): '''simple docstring''' __UpperCamelCase = SqueezeBertForSequenceClassification.from_pretrained('squeezebert/squeezebert-mnli' ) __UpperCamelCase = torch.tensor([[1, 2_9414, 232, 328, 740, 1140, 1_2695, 69, 13, 1588, 2]] ) __UpperCamelCase = model(A_ )[0] __UpperCamelCase = torch.Size((1, 3) ) self.assertEqual(output.shape,A_ ) __UpperCamelCase = torch.tensor([[0.6_4_0_1, -0.0_3_4_9, -0.6_0_4_1]] ) self.assertTrue(torch.allclose(A_,A_,atol=1E-4 ) )
1
# # This a `torch.distributed` diagnostics script that checks that all GPUs in the cluster (one or # many nodes) can talk to each other via nccl and allocate gpu memory. # # To run first adjust the number of processes and nodes: # # python -m torch.distributed.run --nproc_per_node 2 --nnodes 1 torch-distributed-gpu-test.py # # You may need to add --master_addr $MASTER_ADDR --master_port $MASTER_PORT if using a custom addr:port # # You can also use the rdzv API: --rdzv_endpoint $MASTER_ADDR:$MASTER_PORT --rdzv_backend c10d # # use torch.distributed.launch instead of torch.distributed.run for torch < 1.9 # # If you get a hanging in `barrier` calls you have some network issues, you may try to debug this with: # # NCCL_DEBUG=INFO python -m torch.distributed.run --nproc_per_node 2 --nnodes 1 torch-distributed-gpu-test.py # # which should tell you what's going on behind the scenes. # # # This script can be run via `srun` in the SLURM environment as well. Here is a SLURM script that # runs on 2 nodes of 4 gpus per node: # # #SBATCH --job-name=test-nodes # name # #SBATCH --nodes=2 # nodes # #SBATCH --ntasks-per-node=1 # crucial - only 1 task per dist per node! # #SBATCH --cpus-per-task=10 # number of cores per tasks # #SBATCH --gres=gpu:4 # number of gpus # #SBATCH --time 0:05:00 # maximum execution time (HH:MM:SS) # #SBATCH --output=%x-%j.out # output file name # # GPUS_PER_NODE=4 # MASTER_ADDR=$(scontrol show hostnames $SLURM_JOB_NODELIST | head -n 1) # MASTER_PORT=6000 # # srun --jobid $SLURM_JOBID bash -c 'python -m torch.distributed.run \ # --nproc_per_node $GPUS_PER_NODE --nnodes $SLURM_NNODES --node_rank $SLURM_PROCID \ # --master_addr $MASTER_ADDR --master_port $MASTER_PORT \ # torch-distributed-gpu-test.py' # import fcntl import os import socket import torch import torch.distributed as dist def _A ( *_lowercase ) -> Tuple: """simple docstring""" with open(_lowercase , 'r' ) as fh: fcntl.flock(_lowercase , fcntl.LOCK_EX ) try: print(*_lowercase ) finally: fcntl.flock(_lowercase , fcntl.LOCK_UN ) __snake_case = int(os.environ['''LOCAL_RANK''']) torch.cuda.set_device(local_rank) __snake_case = torch.device('''cuda''', local_rank) __snake_case = socket.gethostname() __snake_case = f"""[{hostname}-{local_rank}]""" try: # test distributed dist.init_process_group('''nccl''') dist.all_reduce(torch.ones(1).to(device), op=dist.ReduceOp.SUM) dist.barrier() # test cuda is available and can allocate memory torch.cuda.is_available() torch.ones(1).cuda(local_rank) # global rank __snake_case = dist.get_rank() __snake_case = dist.get_world_size() printflock(f"""{gpu} is OK (global rank: {rank}/{world_size})""") dist.barrier() if rank == 0: printflock(f"""pt={torch.__version__}, cuda={torch.version.cuda}, nccl={torch.cuda.nccl.version()}""") except Exception: printflock(f"""{gpu} is broken""") raise
1
1