code
stringlengths
86
54.5k
code_codestyle
int64
0
371
style_context
stringlengths
87
49.2k
style_context_codestyle
int64
0
349
label
int64
0
1
'''simple docstring''' from collections import Counter import numpy as np from sklearn import datasets from sklearn.model_selection import train_test_split snake_case__ = datasets.load_iris() snake_case__ = np.array(data["""data"""]) snake_case__ = np.array(data["""target"""]) snake_case__ = data["""target_names"""] snake_case__ , snake_case__ , snake_case__ , snake_case__ = train_test_split(X, y) def snake_case__ ( lowerCamelCase__ : Any , lowerCamelCase__ : List[str] ) -> Optional[int]: return np.linalg.norm(np.array(lowerCamelCase__ ) - np.array(lowerCamelCase__ ) ) def snake_case__ ( lowerCamelCase__ : int , lowerCamelCase__ : Dict , lowerCamelCase__ : int , lowerCamelCase__ : Optional[Any] , lowerCamelCase__ : Optional[int]=5 ) -> Optional[int]: A_ : Optional[int] = zip(lowerCamelCase__ , lowerCamelCase__ ) # List of distances of all points from the point to be classified A_ : str = [] for data_point in data: A_ : int = euclidean_distance(data_point[0] , lowerCamelCase__ ) distances.append((distance, data_point[1]) ) # Choosing 'k' points with the least distances. A_ : int = [i[1] for i in sorted(lowerCamelCase__ )[:k]] # Most commonly occurring class among them # is the class into which the point is classified A_ : Union[str, Any] = Counter(lowerCamelCase__ ).most_common(1 )[0][0] return classes[result] if __name__ == "__main__": print(classifier(X_train, y_train, classes, [4.4, 3.1, 1.3, 1.4]))
360
'''simple docstring''' import sys import tempfile import unittest import unittest.mock as mock from pathlib import Path from huggingface_hub import HfFolder, delete_repo from requests.exceptions import HTTPError from transformers import AutoImageProcessor, ViTImageProcessor from transformers.testing_utils import TOKEN, USER, get_tests_dir, is_staging_test sys.path.append(str(Path(__file__).parent.parent / """utils""")) from test_module.custom_image_processing import CustomImageProcessor # noqa E402 snake_case__ = get_tests_dir("""fixtures""") class UpperCamelCase_ (unittest.TestCase ): """simple docstring""" def _a ( self : List[str] ): """simple docstring""" A_ : List[Any] = mock.Mock() A_ : List[str] = 500 A_ : Tuple = {} A_ : int = HTTPError A_ : Optional[Any] = {} # Download this model to make sure it's in the cache. A_ : Tuple = ViTImageProcessor.from_pretrained('''hf-internal-testing/tiny-random-vit''' ) # Under the mock environment we get a 500 error when trying to reach the model. with mock.patch('''requests.Session.request''' , return_value=_lowerCamelCase ) as mock_head: A_ : List[Any] = ViTImageProcessor.from_pretrained('''hf-internal-testing/tiny-random-vit''' ) # This check we did call the fake head request mock_head.assert_called() def _a ( self : Tuple ): """simple docstring""" A_ : Tuple = ViTImageProcessor.from_pretrained( '''https://huggingface.co/hf-internal-testing/tiny-random-vit/resolve/main/preprocessor_config.json''' ) def _a ( self : Dict ): """simple docstring""" with self.assertRaises(_lowerCamelCase ): # config is in subfolder, the following should not work without specifying the subfolder A_ : Any = AutoImageProcessor.from_pretrained('''hf-internal-testing/stable-diffusion-all-variants''' ) A_ : Tuple = AutoImageProcessor.from_pretrained( '''hf-internal-testing/stable-diffusion-all-variants''' , subfolder='''feature_extractor''' ) self.assertIsNotNone(_lowerCamelCase ) @is_staging_test class UpperCamelCase_ (unittest.TestCase ): """simple docstring""" @classmethod def _a ( cls : Tuple ): """simple docstring""" A_ : int = TOKEN HfFolder.save_token(_lowerCamelCase ) @classmethod def _a ( cls : str ): """simple docstring""" try: delete_repo(token=cls._token , repo_id='''test-image-processor''' ) except HTTPError: pass try: delete_repo(token=cls._token , repo_id='''valid_org/test-image-processor-org''' ) except HTTPError: pass try: delete_repo(token=cls._token , repo_id='''test-dynamic-image-processor''' ) except HTTPError: pass def _a ( self : List[Any] ): """simple docstring""" A_ : Dict = ViTImageProcessor.from_pretrained(_lowerCamelCase ) image_processor.push_to_hub('''test-image-processor''' , use_auth_token=self._token ) A_ : Optional[int] = ViTImageProcessor.from_pretrained(f'{USER}/test-image-processor' ) for k, v in image_processor.__dict__.items(): self.assertEqual(_lowerCamelCase , getattr(_lowerCamelCase , _lowerCamelCase ) ) # Reset repo delete_repo(token=self._token , repo_id='''test-image-processor''' ) # Push to hub via save_pretrained with tempfile.TemporaryDirectory() as tmp_dir: image_processor.save_pretrained( _lowerCamelCase , repo_id='''test-image-processor''' , push_to_hub=_lowerCamelCase , use_auth_token=self._token ) A_ : List[Any] = ViTImageProcessor.from_pretrained(f'{USER}/test-image-processor' ) for k, v in image_processor.__dict__.items(): self.assertEqual(_lowerCamelCase , getattr(_lowerCamelCase , _lowerCamelCase ) ) def _a ( self : Optional[Any] ): """simple docstring""" A_ : int = ViTImageProcessor.from_pretrained(_lowerCamelCase ) image_processor.push_to_hub('''valid_org/test-image-processor''' , use_auth_token=self._token ) A_ : List[str] = ViTImageProcessor.from_pretrained('''valid_org/test-image-processor''' ) for k, v in image_processor.__dict__.items(): self.assertEqual(_lowerCamelCase , getattr(_lowerCamelCase , _lowerCamelCase ) ) # Reset repo delete_repo(token=self._token , repo_id='''valid_org/test-image-processor''' ) # Push to hub via save_pretrained with tempfile.TemporaryDirectory() as tmp_dir: image_processor.save_pretrained( _lowerCamelCase , repo_id='''valid_org/test-image-processor-org''' , push_to_hub=_lowerCamelCase , use_auth_token=self._token ) A_ : Any = ViTImageProcessor.from_pretrained('''valid_org/test-image-processor-org''' ) for k, v in image_processor.__dict__.items(): self.assertEqual(_lowerCamelCase , getattr(_lowerCamelCase , _lowerCamelCase ) ) def _a ( self : Optional[Any] ): """simple docstring""" CustomImageProcessor.register_for_auto_class() A_ : Any = CustomImageProcessor.from_pretrained(_lowerCamelCase ) image_processor.push_to_hub('''test-dynamic-image-processor''' , use_auth_token=self._token ) # This has added the proper auto_map field to the config self.assertDictEqual( image_processor.auto_map , {'''AutoImageProcessor''': '''custom_image_processing.CustomImageProcessor'''} , ) A_ : str = AutoImageProcessor.from_pretrained( f'{USER}/test-dynamic-image-processor' , trust_remote_code=_lowerCamelCase ) # Can't make an isinstance check because the new_image_processor is from the CustomImageProcessor class of a dynamic module self.assertEqual(new_image_processor.__class__.__name__ , '''CustomImageProcessor''' )
4
0
'''simple docstring''' from maths.prime_factors import prime_factors def snake_case__ ( lowerCamelCase__ : int ) -> int: if not isinstance(lowerCamelCase__ , lowerCamelCase__ ): A_ : Dict = f'Input value of [number={number}] must be an integer' raise TypeError(lowerCamelCase__ ) if number < 1: raise ValueError('''Input must be a positive integer''' ) return -1 if len(prime_factors(lowerCamelCase__ ) ) % 2 else 1 if __name__ == "__main__": import doctest doctest.testmod()
361
'''simple docstring''' # Copyright 2023 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import numpy as np import torch from ..models.clipseg import CLIPSegForImageSegmentation from ..utils import is_vision_available, requires_backends from .base import PipelineTool if is_vision_available(): from PIL import Image class UpperCamelCase_ (a__ ): """simple docstring""" _lowerCAmelCase = ( 'This is a tool that creates a segmentation mask of an image according to a label. It cannot create an image.' 'It takes two arguments named `image` which should be the original image, and `label` which should be a text ' 'describing the elements what should be identified in the segmentation mask. The tool returns the mask.' ) _lowerCAmelCase = 'CIDAS/clipseg-rd64-refined' _lowerCAmelCase = 'image_segmenter' _lowerCAmelCase = CLIPSegForImageSegmentation _lowerCAmelCase = ['image', 'text'] _lowerCAmelCase = ['image'] def __init__( self : Optional[int] , *_lowerCamelCase : Optional[int] , **_lowerCamelCase : Union[str, Any] ): """simple docstring""" requires_backends(self , ['''vision'''] ) super().__init__(*_lowerCamelCase , **_lowerCamelCase ) def _a ( self : List[str] , _lowerCamelCase : "Image" , _lowerCamelCase : str ): """simple docstring""" return self.pre_processor(text=[label] , images=[image] , padding=_lowerCamelCase , return_tensors='''pt''' ) def _a ( self : Union[str, Any] , _lowerCamelCase : Optional[int] ): """simple docstring""" with torch.no_grad(): A_ : Optional[int] = self.model(**_lowerCamelCase ).logits return logits def _a ( self : List[str] , _lowerCamelCase : Optional[int] ): """simple docstring""" A_ : int = outputs.cpu().detach().numpy() A_ : Tuple = 0 A_ : List[str] = 1 return Image.fromarray((array * 255).astype(np.uinta ) )
4
0
'''simple docstring''' import re from ..models.auto import AutoProcessor from ..models.vision_encoder_decoder import VisionEncoderDecoderModel from ..utils import is_vision_available from .base import PipelineTool if is_vision_available(): from PIL import Image class UpperCamelCase_ (a__ ): """simple docstring""" _lowerCAmelCase = 'naver-clova-ix/donut-base-finetuned-docvqa' _lowerCAmelCase = ( 'This is a tool that answers a question about an document (pdf). It takes an input named `document` which ' 'should be the document containing the information, as well as a `question` that is the question about the ' 'document. It returns a text that contains the answer to the question.' ) _lowerCAmelCase = 'document_qa' _lowerCAmelCase = AutoProcessor _lowerCAmelCase = VisionEncoderDecoderModel _lowerCAmelCase = ['image', 'text'] _lowerCAmelCase = ['text'] def __init__( self : Optional[int] , *_lowerCamelCase : Any , **_lowerCamelCase : Dict ): """simple docstring""" if not is_vision_available(): raise ValueError('''Pillow must be installed to use the DocumentQuestionAnsweringTool.''' ) super().__init__(*_lowerCamelCase , **_lowerCamelCase ) def _a ( self : str , _lowerCamelCase : "Image" , _lowerCamelCase : str ): """simple docstring""" A_ : Tuple = '''<s_docvqa><s_question>{user_input}</s_question><s_answer>''' A_ : List[str] = task_prompt.replace('''{user_input}''' , _lowerCamelCase ) A_ : str = self.pre_processor.tokenizer( _lowerCamelCase , add_special_tokens=_lowerCamelCase , return_tensors='''pt''' ).input_ids A_ : Optional[Any] = self.pre_processor(_lowerCamelCase , return_tensors='''pt''' ).pixel_values return {"decoder_input_ids": decoder_input_ids, "pixel_values": pixel_values} def _a ( self : Optional[Any] , _lowerCamelCase : Union[str, Any] ): """simple docstring""" return self.model.generate( inputs['''pixel_values'''].to(self.device ) , decoder_input_ids=inputs['''decoder_input_ids'''].to(self.device ) , max_length=self.model.decoder.config.max_position_embeddings , early_stopping=_lowerCamelCase , pad_token_id=self.pre_processor.tokenizer.pad_token_id , eos_token_id=self.pre_processor.tokenizer.eos_token_id , use_cache=_lowerCamelCase , num_beams=1 , bad_words_ids=[[self.pre_processor.tokenizer.unk_token_id]] , return_dict_in_generate=_lowerCamelCase , ).sequences def _a ( self : Optional[Any] , _lowerCamelCase : Optional[Any] ): """simple docstring""" A_ : Optional[int] = self.pre_processor.batch_decode(_lowerCamelCase )[0] A_ : int = sequence.replace(self.pre_processor.tokenizer.eos_token , '''''' ) A_ : Tuple = sequence.replace(self.pre_processor.tokenizer.pad_token , '''''' ) A_ : str = re.sub(R'''<.*?>''' , '''''' , _lowerCamelCase , count=1 ).strip() # remove first task start token A_ : Any = self.pre_processor.tokenajson(_lowerCamelCase ) return sequence["answer"]
362
'''simple docstring''' from collections.abc import Sequence def snake_case__ ( lowerCamelCase__ : Sequence[float] , lowerCamelCase__ : bool = False ) -> float: if not arr: return 0 A_ : Union[str, Any] = 0 if allow_empty_subarrays else float('''-inf''' ) A_ : str = 0.0 for num in arr: A_ : Any = max(0 if allow_empty_subarrays else num , curr_sum + num ) A_ : Tuple = max(lowerCamelCase__ , lowerCamelCase__ ) return max_sum if __name__ == "__main__": from doctest import testmod testmod() snake_case__ = [-2, 1, -3, 4, -1, 2, 1, -5, 4] print(F'{max_subarray_sum(nums) = }')
4
0
'''simple docstring''' import argparse import torch from ...utils import logging from . import AlbertConfig, AlbertForPreTraining, load_tf_weights_in_albert logging.set_verbosity_info() def snake_case__ ( lowerCamelCase__ : str , lowerCamelCase__ : Tuple , lowerCamelCase__ : Optional[int] ) -> int: # Initialise PyTorch model A_ : int = AlbertConfig.from_json_file(lowerCamelCase__ ) print(f'Building PyTorch model from configuration: {config}' ) A_ : List[Any] = AlbertForPreTraining(lowerCamelCase__ ) # Load weights from tf checkpoint load_tf_weights_in_albert(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) # Save pytorch-model print(f'Save PyTorch model to {pytorch_dump_path}' ) torch.save(model.state_dict() , lowerCamelCase__ ) if __name__ == "__main__": snake_case__ = argparse.ArgumentParser() # Required parameters parser.add_argument( """--tf_checkpoint_path""", default=None, type=str, required=True, help="""Path to the TensorFlow checkpoint path.""" ) parser.add_argument( """--albert_config_file""", default=None, type=str, required=True, help=( """The config json file corresponding to the pre-trained ALBERT model. \n""" """This specifies the model architecture.""" ), ) parser.add_argument( """--pytorch_dump_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model.""" ) snake_case__ = parser.parse_args() convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.albert_config_file, args.pytorch_dump_path)
363
'''simple docstring''' from ...configuration_utils import PretrainedConfig from ...utils import logging snake_case__ = logging.get_logger(__name__) snake_case__ = { """facebook/s2t-wav2vec2-large-en-de""": ( """https://huggingface.co/facebook/s2t-wav2vec2-large-en-de/resolve/main/config.json""" ), # See all Speech2Text models at https://huggingface.co/models?filter=speech2text2 } class UpperCamelCase_ (a__ ): """simple docstring""" _lowerCAmelCase = 'speech_to_text_2' _lowerCAmelCase = ['past_key_values'] _lowerCAmelCase = {'num_attention_heads': 'decoder_attention_heads', 'hidden_size': 'd_model'} def __init__( self : Optional[Any] , _lowerCamelCase : Optional[Any]=10000 , _lowerCamelCase : List[Any]=6 , _lowerCamelCase : int=2048 , _lowerCamelCase : Dict=4 , _lowerCamelCase : str=0.0 , _lowerCamelCase : int=True , _lowerCamelCase : int="relu" , _lowerCamelCase : Any=256 , _lowerCamelCase : List[Any]=0.1 , _lowerCamelCase : Tuple=0.0 , _lowerCamelCase : Union[str, Any]=0.0 , _lowerCamelCase : Optional[Any]=0.02 , _lowerCamelCase : int=2 , _lowerCamelCase : List[str]=True , _lowerCamelCase : str=1 , _lowerCamelCase : List[Any]=0 , _lowerCamelCase : Optional[int]=2 , _lowerCamelCase : Tuple=1024 , **_lowerCamelCase : int , ): """simple docstring""" A_ : Optional[int] = vocab_size A_ : Tuple = d_model A_ : List[str] = decoder_ffn_dim A_ : str = decoder_layers A_ : Any = decoder_attention_heads A_ : int = dropout A_ : str = attention_dropout A_ : Optional[int] = activation_dropout A_ : str = activation_function A_ : List[Any] = init_std A_ : Union[str, Any] = decoder_layerdrop A_ : Any = use_cache A_ : Optional[Any] = decoder_layers A_ : Optional[int] = scale_embedding # scale factor will be sqrt(d_model) if True A_ : Optional[Any] = max_target_positions super().__init__( pad_token_id=_lowerCamelCase , bos_token_id=_lowerCamelCase , eos_token_id=_lowerCamelCase , decoder_start_token_id=_lowerCamelCase , **_lowerCamelCase , )
4
0
import json from typing import List, Optional, Tuple from tokenizers import normalizers from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import logging from .tokenization_funnel import FunnelTokenizer snake_case__ = logging.get_logger(__name__) snake_case__ = {"""vocab_file""": """vocab.txt""", """tokenizer_file""": """tokenizer.json"""} snake_case__ = [ """small""", """small-base""", """medium""", """medium-base""", """intermediate""", """intermediate-base""", """large""", """large-base""", """xlarge""", """xlarge-base""", ] snake_case__ = { """vocab_file""": { """funnel-transformer/small""": """https://huggingface.co/funnel-transformer/small/resolve/main/vocab.txt""", """funnel-transformer/small-base""": """https://huggingface.co/funnel-transformer/small-base/resolve/main/vocab.txt""", """funnel-transformer/medium""": """https://huggingface.co/funnel-transformer/medium/resolve/main/vocab.txt""", """funnel-transformer/medium-base""": ( """https://huggingface.co/funnel-transformer/medium-base/resolve/main/vocab.txt""" ), """funnel-transformer/intermediate""": ( """https://huggingface.co/funnel-transformer/intermediate/resolve/main/vocab.txt""" ), """funnel-transformer/intermediate-base""": ( """https://huggingface.co/funnel-transformer/intermediate-base/resolve/main/vocab.txt""" ), """funnel-transformer/large""": """https://huggingface.co/funnel-transformer/large/resolve/main/vocab.txt""", """funnel-transformer/large-base""": """https://huggingface.co/funnel-transformer/large-base/resolve/main/vocab.txt""", """funnel-transformer/xlarge""": """https://huggingface.co/funnel-transformer/xlarge/resolve/main/vocab.txt""", """funnel-transformer/xlarge-base""": ( """https://huggingface.co/funnel-transformer/xlarge-base/resolve/main/vocab.txt""" ), }, """tokenizer_file""": { """funnel-transformer/small""": """https://huggingface.co/funnel-transformer/small/resolve/main/tokenizer.json""", """funnel-transformer/small-base""": ( """https://huggingface.co/funnel-transformer/small-base/resolve/main/tokenizer.json""" ), """funnel-transformer/medium""": """https://huggingface.co/funnel-transformer/medium/resolve/main/tokenizer.json""", """funnel-transformer/medium-base""": ( """https://huggingface.co/funnel-transformer/medium-base/resolve/main/tokenizer.json""" ), """funnel-transformer/intermediate""": ( """https://huggingface.co/funnel-transformer/intermediate/resolve/main/tokenizer.json""" ), """funnel-transformer/intermediate-base""": ( """https://huggingface.co/funnel-transformer/intermediate-base/resolve/main/tokenizer.json""" ), """funnel-transformer/large""": """https://huggingface.co/funnel-transformer/large/resolve/main/tokenizer.json""", """funnel-transformer/large-base""": ( """https://huggingface.co/funnel-transformer/large-base/resolve/main/tokenizer.json""" ), """funnel-transformer/xlarge""": """https://huggingface.co/funnel-transformer/xlarge/resolve/main/tokenizer.json""", """funnel-transformer/xlarge-base""": ( """https://huggingface.co/funnel-transformer/xlarge-base/resolve/main/tokenizer.json""" ), }, } snake_case__ = {F'funnel-transformer/{name}': 5_12 for name in _model_names} snake_case__ = {F'funnel-transformer/{name}': {"""do_lower_case""": True} for name in _model_names} class UpperCamelCase_ (a__ ): """simple docstring""" _lowerCAmelCase = VOCAB_FILES_NAMES _lowerCAmelCase = PRETRAINED_VOCAB_FILES_MAP _lowerCAmelCase = PRETRAINED_INIT_CONFIGURATION _lowerCAmelCase = FunnelTokenizer _lowerCAmelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES _lowerCAmelCase = 2 def __init__( self : Union[str, Any] , _lowerCamelCase : List[Any]=None , _lowerCamelCase : Tuple=None , _lowerCamelCase : Any=True , _lowerCamelCase : Union[str, Any]="<unk>" , _lowerCamelCase : str="<sep>" , _lowerCamelCase : Union[str, Any]="<pad>" , _lowerCamelCase : Optional[Any]="<cls>" , _lowerCamelCase : List[Any]="<mask>" , _lowerCamelCase : Union[str, Any]="<s>" , _lowerCamelCase : List[Any]="</s>" , _lowerCamelCase : List[Any]=True , _lowerCamelCase : int=True , _lowerCamelCase : List[Any]=None , _lowerCamelCase : Union[str, Any]="##" , **_lowerCamelCase : List[str] , ): """simple docstring""" super().__init__( _lowerCamelCase , tokenizer_file=_lowerCamelCase , do_lower_case=_lowerCamelCase , unk_token=_lowerCamelCase , sep_token=_lowerCamelCase , pad_token=_lowerCamelCase , cls_token=_lowerCamelCase , mask_token=_lowerCamelCase , bos_token=_lowerCamelCase , eos_token=_lowerCamelCase , clean_text=_lowerCamelCase , tokenize_chinese_chars=_lowerCamelCase , strip_accents=_lowerCamelCase , wordpieces_prefix=_lowerCamelCase , **_lowerCamelCase , ) A_ : Union[str, Any] = json.loads(self.backend_tokenizer.normalizer.__getstate__() ) if ( normalizer_state.get('''lowercase''' , _lowerCamelCase ) != do_lower_case or normalizer_state.get('''strip_accents''' , _lowerCamelCase ) != strip_accents or normalizer_state.get('''handle_chinese_chars''' , _lowerCamelCase ) != tokenize_chinese_chars ): A_ : str = getattr(_lowerCamelCase , normalizer_state.pop('''type''' ) ) A_ : Optional[Any] = do_lower_case A_ : List[Any] = strip_accents A_ : Any = tokenize_chinese_chars A_ : Any = normalizer_class(**_lowerCamelCase ) A_ : Union[str, Any] = do_lower_case def _a ( self : int , _lowerCamelCase : Any , _lowerCamelCase : Optional[Any]=None ): """simple docstring""" A_ : str = [self.cls_token_id] + token_ids_a + [self.sep_token_id] if token_ids_a: output += token_ids_a + [self.sep_token_id] return output def _a ( self : Union[str, Any] , _lowerCamelCase : List[int] , _lowerCamelCase : Optional[List[int]] = None ): """simple docstring""" A_ : int = [self.sep_token_id] A_ : List[Any] = [self.cls_token_id] if token_ids_a is None: return len(cls ) * [self.cls_token_type_id] + len(token_ids_a + sep ) * [0] return len(cls ) * [self.cls_token_type_id] + len(token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] def _a ( self : List[str] , _lowerCamelCase : str , _lowerCamelCase : Optional[str] = None ): """simple docstring""" A_ : Optional[int] = self._tokenizer.model.save(_lowerCamelCase , name=_lowerCamelCase ) return tuple(_lowerCamelCase )
364
'''simple docstring''' from collections import OrderedDict from typing import Mapping from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging from ..auto import CONFIG_MAPPING snake_case__ = logging.get_logger(__name__) snake_case__ = { """microsoft/table-transformer-detection""": ( """https://huggingface.co/microsoft/table-transformer-detection/resolve/main/config.json""" ), } class UpperCamelCase_ (a__ ): """simple docstring""" _lowerCAmelCase = 'table-transformer' _lowerCAmelCase = ['past_key_values'] _lowerCAmelCase = { 'hidden_size': 'd_model', 'num_attention_heads': 'encoder_attention_heads', } def __init__( self : Any , _lowerCamelCase : Union[str, Any]=True , _lowerCamelCase : Dict=None , _lowerCamelCase : int=3 , _lowerCamelCase : Any=100 , _lowerCamelCase : List[Any]=6 , _lowerCamelCase : Tuple=2048 , _lowerCamelCase : Any=8 , _lowerCamelCase : Dict=6 , _lowerCamelCase : Tuple=2048 , _lowerCamelCase : int=8 , _lowerCamelCase : Optional[int]=0.0 , _lowerCamelCase : List[Any]=0.0 , _lowerCamelCase : List[Any]=True , _lowerCamelCase : Optional[int]="relu" , _lowerCamelCase : Union[str, Any]=256 , _lowerCamelCase : Any=0.1 , _lowerCamelCase : Tuple=0.0 , _lowerCamelCase : Optional[int]=0.0 , _lowerCamelCase : str=0.02 , _lowerCamelCase : Tuple=1.0 , _lowerCamelCase : Dict=False , _lowerCamelCase : str="sine" , _lowerCamelCase : str="resnet50" , _lowerCamelCase : Any=True , _lowerCamelCase : List[str]=False , _lowerCamelCase : Any=1 , _lowerCamelCase : int=5 , _lowerCamelCase : Tuple=2 , _lowerCamelCase : Optional[int]=1 , _lowerCamelCase : Any=1 , _lowerCamelCase : Dict=5 , _lowerCamelCase : str=2 , _lowerCamelCase : Union[str, Any]=0.1 , **_lowerCamelCase : int , ): """simple docstring""" if backbone_config is not None and use_timm_backbone: raise ValueError('''You can\'t specify both `backbone_config` and `use_timm_backbone`.''' ) if not use_timm_backbone: if backbone_config is None: logger.info('''`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.''' ) A_ : int = CONFIG_MAPPING['''resnet'''](out_features=['''stage4'''] ) elif isinstance(_lowerCamelCase , _lowerCamelCase ): A_ : str = backbone_config.get('''model_type''' ) A_ : Optional[int] = CONFIG_MAPPING[backbone_model_type] A_ : List[str] = config_class.from_dict(_lowerCamelCase ) # set timm attributes to None A_ ,A_ ,A_ : Union[str, Any] = None, None, None A_ : Optional[Any] = use_timm_backbone A_ : Optional[int] = backbone_config A_ : Optional[Any] = num_channels A_ : Dict = num_queries A_ : str = d_model A_ : List[str] = encoder_ffn_dim A_ : int = encoder_layers A_ : Optional[Any] = encoder_attention_heads A_ : List[str] = decoder_ffn_dim A_ : Any = decoder_layers A_ : List[str] = decoder_attention_heads A_ : Tuple = dropout A_ : Optional[Any] = attention_dropout A_ : Any = activation_dropout A_ : List[Any] = activation_function A_ : Dict = init_std A_ : Any = init_xavier_std A_ : List[Any] = encoder_layerdrop A_ : int = decoder_layerdrop A_ : Any = encoder_layers A_ : List[str] = auxiliary_loss A_ : List[Any] = position_embedding_type A_ : Optional[Any] = backbone A_ : Tuple = use_pretrained_backbone A_ : List[Any] = dilation # Hungarian matcher A_ : List[str] = class_cost A_ : str = bbox_cost A_ : Union[str, Any] = giou_cost # Loss coefficients A_ : Any = mask_loss_coefficient A_ : Optional[int] = dice_loss_coefficient A_ : Dict = bbox_loss_coefficient A_ : int = giou_loss_coefficient A_ : int = eos_coefficient super().__init__(is_encoder_decoder=_lowerCamelCase , **_lowerCamelCase ) @property def _a ( self : List[Any] ): """simple docstring""" return self.encoder_attention_heads @property def _a ( self : Any ): """simple docstring""" return self.d_model class UpperCamelCase_ (a__ ): """simple docstring""" _lowerCAmelCase = version.parse('1.11' ) @property def _a ( self : Tuple ): """simple docstring""" return OrderedDict( [ ('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}), ('''pixel_mask''', {0: '''batch'''}), ] ) @property def _a ( self : Optional[int] ): """simple docstring""" return 1E-5 @property def _a ( self : str ): """simple docstring""" return 12
4
0
'''simple docstring''' from argparse import ArgumentParser, Namespace from ..utils import logging from . import BaseTransformersCLICommand def snake_case__ ( lowerCamelCase__ : Namespace ) -> Union[str, Any]: return ConvertCommand( args.model_type , args.tf_checkpoint , args.pytorch_dump_output , args.config , args.finetuning_task_name ) snake_case__ = """ transformers can only be used from the commandline to convert TensorFlow models in PyTorch, In that case, it requires TensorFlow to be installed. Please see https://www.tensorflow.org/install/ for installation instructions. """ class UpperCamelCase_ (a__ ): """simple docstring""" @staticmethod def _a ( _lowerCamelCase : ArgumentParser ): """simple docstring""" A_ : List[str] = parser.add_parser( '''convert''' , help='''CLI tool to run convert model from original author checkpoints to Transformers PyTorch checkpoints.''' , ) train_parser.add_argument('''--model_type''' , type=_lowerCamelCase , required=_lowerCamelCase , help='''Model\'s type.''' ) train_parser.add_argument( '''--tf_checkpoint''' , type=_lowerCamelCase , required=_lowerCamelCase , help='''TensorFlow checkpoint path or folder.''' ) train_parser.add_argument( '''--pytorch_dump_output''' , type=_lowerCamelCase , required=_lowerCamelCase , help='''Path to the PyTorch saved model output.''' ) train_parser.add_argument('''--config''' , type=_lowerCamelCase , default='''''' , help='''Configuration file path or folder.''' ) train_parser.add_argument( '''--finetuning_task_name''' , type=_lowerCamelCase , default=_lowerCamelCase , help='''Optional fine-tuning task name if the TF model was a finetuned model.''' , ) train_parser.set_defaults(func=_lowerCamelCase ) def __init__( self : Dict , _lowerCamelCase : str , _lowerCamelCase : str , _lowerCamelCase : str , _lowerCamelCase : str , _lowerCamelCase : str , *_lowerCamelCase : Optional[Any] , ): """simple docstring""" A_ : Dict = logging.get_logger('''transformers-cli/converting''' ) self._logger.info(f'Loading model {model_type}' ) A_ : Tuple = model_type A_ : List[Any] = tf_checkpoint A_ : Any = pytorch_dump_output A_ : Union[str, Any] = config A_ : Dict = finetuning_task_name def _a ( self : Any ): """simple docstring""" if self._model_type == "albert": try: from ..models.albert.convert_albert_original_tf_checkpoint_to_pytorch import ( convert_tf_checkpoint_to_pytorch, ) except ImportError: raise ImportError(_lowerCamelCase ) convert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output ) elif self._model_type == "bert": try: from ..models.bert.convert_bert_original_tf_checkpoint_to_pytorch import ( convert_tf_checkpoint_to_pytorch, ) except ImportError: raise ImportError(_lowerCamelCase ) convert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output ) elif self._model_type == "funnel": try: from ..models.funnel.convert_funnel_original_tf_checkpoint_to_pytorch import ( convert_tf_checkpoint_to_pytorch, ) except ImportError: raise ImportError(_lowerCamelCase ) convert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output ) elif self._model_type == "t5": try: from ..models.ta.convert_ta_original_tf_checkpoint_to_pytorch import convert_tf_checkpoint_to_pytorch except ImportError: raise ImportError(_lowerCamelCase ) convert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output ) elif self._model_type == "gpt": from ..models.openai.convert_openai_original_tf_checkpoint_to_pytorch import ( convert_openai_checkpoint_to_pytorch, ) convert_openai_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output ) elif self._model_type == "transfo_xl": try: from ..models.transfo_xl.convert_transfo_xl_original_tf_checkpoint_to_pytorch import ( convert_transfo_xl_checkpoint_to_pytorch, ) except ImportError: raise ImportError(_lowerCamelCase ) if "ckpt" in self._tf_checkpoint.lower(): A_ : List[Any] = self._tf_checkpoint A_ : Optional[int] = '''''' else: A_ : Union[str, Any] = self._tf_checkpoint A_ : int = '''''' convert_transfo_xl_checkpoint_to_pytorch( _lowerCamelCase , self._config , self._pytorch_dump_output , _lowerCamelCase ) elif self._model_type == "gpt2": try: from ..models.gpta.convert_gpta_original_tf_checkpoint_to_pytorch import ( convert_gpta_checkpoint_to_pytorch, ) except ImportError: raise ImportError(_lowerCamelCase ) convert_gpta_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output ) elif self._model_type == "xlnet": try: from ..models.xlnet.convert_xlnet_original_tf_checkpoint_to_pytorch import ( convert_xlnet_checkpoint_to_pytorch, ) except ImportError: raise ImportError(_lowerCamelCase ) convert_xlnet_checkpoint_to_pytorch( self._tf_checkpoint , self._config , self._pytorch_dump_output , self._finetuning_task_name ) elif self._model_type == "xlm": from ..models.xlm.convert_xlm_original_pytorch_checkpoint_to_pytorch import ( convert_xlm_checkpoint_to_pytorch, ) convert_xlm_checkpoint_to_pytorch(self._tf_checkpoint , self._pytorch_dump_output ) elif self._model_type == "lxmert": from ..models.lxmert.convert_lxmert_original_tf_checkpoint_to_pytorch import ( convert_lxmert_checkpoint_to_pytorch, ) convert_lxmert_checkpoint_to_pytorch(self._tf_checkpoint , self._pytorch_dump_output ) elif self._model_type == "rembert": from ..models.rembert.convert_rembert_tf_checkpoint_to_pytorch import ( convert_rembert_tf_checkpoint_to_pytorch, ) convert_rembert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output ) else: raise ValueError( '''--model_type should be selected in the list [bert, gpt, gpt2, t5, transfo_xl, xlnet, xlm, lxmert]''' )
365
'''simple docstring''' import inspect import unittest from transformers import BitConfig from transformers.testing_utils import require_torch, require_vision, slow, torch_device from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_backbone_common import BackboneTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from torch import nn from transformers import BitBackbone, BitForImageClassification, BitImageProcessor, BitModel from transformers.models.bit.modeling_bit import BIT_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image class UpperCamelCase_ : """simple docstring""" def __init__( self : Optional[Any] , _lowerCamelCase : int , _lowerCamelCase : List[str]=3 , _lowerCamelCase : Any=32 , _lowerCamelCase : Union[str, Any]=3 , _lowerCamelCase : int=10 , _lowerCamelCase : Union[str, Any]=[8, 16, 32, 64] , _lowerCamelCase : Dict=[1, 1, 2, 1] , _lowerCamelCase : Union[str, Any]=True , _lowerCamelCase : Optional[int]=True , _lowerCamelCase : Any="relu" , _lowerCamelCase : Optional[Any]=3 , _lowerCamelCase : Optional[Any]=None , _lowerCamelCase : Dict=["stage2", "stage3", "stage4"] , _lowerCamelCase : Union[str, Any]=[2, 3, 4] , _lowerCamelCase : Tuple=1 , ): """simple docstring""" A_ : List[str] = parent A_ : List[str] = batch_size A_ : Union[str, Any] = image_size A_ : Tuple = num_channels A_ : Any = embeddings_size A_ : int = hidden_sizes A_ : Optional[Any] = depths A_ : List[Any] = is_training A_ : Optional[int] = use_labels A_ : int = hidden_act A_ : Tuple = num_labels A_ : Union[str, Any] = scope A_ : List[Any] = len(_lowerCamelCase ) A_ : Union[str, Any] = out_features A_ : List[Any] = out_indices A_ : Dict = num_groups def _a ( self : Optional[int] ): """simple docstring""" A_ : Dict = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) A_ : Union[str, Any] = None if self.use_labels: A_ : Any = ids_tensor([self.batch_size] , self.num_labels ) A_ : Any = self.get_config() return config, pixel_values, labels def _a ( self : Union[str, Any] ): """simple docstring""" return BitConfig( num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , out_features=self.out_features , out_indices=self.out_indices , num_groups=self.num_groups , ) def _a ( self : List[Any] , _lowerCamelCase : List[str] , _lowerCamelCase : List[str] , _lowerCamelCase : Optional[Any] ): """simple docstring""" A_ : Any = BitModel(config=_lowerCamelCase ) model.to(_lowerCamelCase ) model.eval() A_ : int = model(_lowerCamelCase ) self.parent.assertEqual( result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , ) def _a ( self : Optional[int] , _lowerCamelCase : List[Any] , _lowerCamelCase : str , _lowerCamelCase : Optional[int] ): """simple docstring""" A_ : Dict = self.num_labels A_ : Optional[Any] = BitForImageClassification(_lowerCamelCase ) model.to(_lowerCamelCase ) model.eval() A_ : List[Any] = model(_lowerCamelCase , labels=_lowerCamelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def _a ( self : Any , _lowerCamelCase : int , _lowerCamelCase : int , _lowerCamelCase : List[Any] ): """simple docstring""" A_ : List[Any] = BitBackbone(config=_lowerCamelCase ) model.to(_lowerCamelCase ) model.eval() A_ : int = model(_lowerCamelCase ) # verify feature maps self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) ) self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[1], 4, 4] ) # verify channels self.parent.assertEqual(len(model.channels ) , len(config.out_features ) ) self.parent.assertListEqual(model.channels , config.hidden_sizes[1:] ) # verify backbone works with out_features=None A_ : Optional[Any] = None A_ : int = BitBackbone(config=_lowerCamelCase ) model.to(_lowerCamelCase ) model.eval() A_ : Optional[int] = model(_lowerCamelCase ) # verify feature maps self.parent.assertEqual(len(result.feature_maps ) , 1 ) self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[-1], 1, 1] ) # verify channels self.parent.assertEqual(len(model.channels ) , 1 ) self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]] ) def _a ( self : List[Any] ): """simple docstring""" A_ : Union[str, Any] = self.prepare_config_and_inputs() A_ ,A_ ,A_ : Union[str, Any] = config_and_inputs A_ : str = {'''pixel_values''': pixel_values} return config, inputs_dict @require_torch class UpperCamelCase_ (a__, a__, unittest.TestCase ): """simple docstring""" _lowerCAmelCase = (BitModel, BitForImageClassification, BitBackbone) if is_torch_available() else () _lowerCAmelCase = ( {'feature-extraction': BitModel, 'image-classification': BitForImageClassification} if is_torch_available() else {} ) _lowerCAmelCase = False _lowerCAmelCase = False _lowerCAmelCase = False _lowerCAmelCase = False _lowerCAmelCase = False def _a ( self : Optional[Any] ): """simple docstring""" A_ : List[str] = BitModelTester(self ) A_ : Optional[Any] = ConfigTester(self , config_class=_lowerCamelCase , has_text_modality=_lowerCamelCase ) def _a ( self : Optional[Any] ): """simple docstring""" self.create_and_test_config_common_properties() self.config_tester.create_and_test_config_to_json_string() self.config_tester.create_and_test_config_to_json_file() self.config_tester.create_and_test_config_from_and_save_pretrained() self.config_tester.create_and_test_config_with_num_labels() self.config_tester.check_config_can_be_init_without_params() self.config_tester.check_config_arguments_init() def _a ( self : List[Any] ): """simple docstring""" return @unittest.skip(reason='''Bit does not output attentions''' ) def _a ( self : str ): """simple docstring""" pass @unittest.skip(reason='''Bit does not use inputs_embeds''' ) def _a ( self : Union[str, Any] ): """simple docstring""" pass @unittest.skip(reason='''Bit does not support input and output embeddings''' ) def _a ( self : Any ): """simple docstring""" pass def _a ( self : List[Any] ): """simple docstring""" A_ ,A_ : str = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: A_ : Dict = model_class(_lowerCamelCase ) A_ : Dict = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic A_ : int = [*signature.parameters.keys()] A_ : Union[str, Any] = ['''pixel_values'''] self.assertListEqual(arg_names[:1] , _lowerCamelCase ) def _a ( self : Optional[Any] ): """simple docstring""" A_ : List[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*_lowerCamelCase ) def _a ( self : Optional[Any] ): """simple docstring""" A_ : Optional[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_backbone(*_lowerCamelCase ) def _a ( self : Tuple ): """simple docstring""" A_ ,A_ : Dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: A_ : str = model_class(config=_lowerCamelCase ) for name, module in model.named_modules(): if isinstance(_lowerCamelCase , (nn.BatchNormad, nn.GroupNorm) ): self.assertTrue( torch.all(module.weight == 1 ) , msg=f'Parameter {name} of model {model_class} seems not properly initialized' , ) self.assertTrue( torch.all(module.bias == 0 ) , msg=f'Parameter {name} of model {model_class} seems not properly initialized' , ) def _a ( self : int ): """simple docstring""" def check_hidden_states_output(_lowerCamelCase : Union[str, Any] , _lowerCamelCase : Dict , _lowerCamelCase : int ): A_ : Union[str, Any] = model_class(_lowerCamelCase ) model.to(_lowerCamelCase ) model.eval() with torch.no_grad(): A_ : Union[str, Any] = model(**self._prepare_for_class(_lowerCamelCase , _lowerCamelCase ) ) A_ : int = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states A_ : List[Any] = self.model_tester.num_stages self.assertEqual(len(_lowerCamelCase ) , expected_num_stages + 1 ) # Bit's feature maps are of shape (batch_size, num_channels, height, width) self.assertListEqual( list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , ) A_ ,A_ : str = self.model_tester.prepare_config_and_inputs_for_common() A_ : Tuple = ['''preactivation''', '''bottleneck'''] for model_class in self.all_model_classes: for layer_type in layers_type: A_ : Tuple = layer_type A_ : Optional[Any] = True check_hidden_states_output(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] A_ : List[str] = True check_hidden_states_output(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) @unittest.skip(reason='''Bit does not use feedforward chunking''' ) def _a ( self : Tuple ): """simple docstring""" pass def _a ( self : str ): """simple docstring""" A_ : Union[str, Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*_lowerCamelCase ) @slow def _a ( self : Union[str, Any] ): """simple docstring""" for model_name in BIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: A_ : List[Any] = BitModel.from_pretrained(_lowerCamelCase ) self.assertIsNotNone(_lowerCamelCase ) def snake_case__ ( ) -> Optional[int]: A_ : Optional[int] = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' ) return image @require_torch @require_vision class UpperCamelCase_ (unittest.TestCase ): """simple docstring""" @cached_property def _a ( self : List[Any] ): """simple docstring""" return ( BitImageProcessor.from_pretrained(BIT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ) if is_vision_available() else None ) @slow def _a ( self : Dict ): """simple docstring""" A_ : Optional[int] = BitForImageClassification.from_pretrained(BIT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ).to(_lowerCamelCase ) A_ : Union[str, Any] = self.default_image_processor A_ : Optional[int] = prepare_img() A_ : int = image_processor(images=_lowerCamelCase , return_tensors='''pt''' ).to(_lowerCamelCase ) # forward pass with torch.no_grad(): A_ : Union[str, Any] = model(**_lowerCamelCase ) # verify the logits A_ : Dict = torch.Size((1, 1000) ) self.assertEqual(outputs.logits.shape , _lowerCamelCase ) A_ : Tuple = torch.tensor([[-0.65_26, -0.52_63, -1.43_98]] ).to(_lowerCamelCase ) self.assertTrue(torch.allclose(outputs.logits[0, :3] , _lowerCamelCase , atol=1E-4 ) ) @require_torch class UpperCamelCase_ (a__, unittest.TestCase ): """simple docstring""" _lowerCAmelCase = (BitBackbone,) if is_torch_available() else () _lowerCAmelCase = BitConfig _lowerCAmelCase = False def _a ( self : List[str] ): """simple docstring""" A_ : Union[str, Any] = BitModelTester(self )
4
0
'''simple docstring''' def snake_case__ ( lowerCamelCase__ : list[list[int]] , lowerCamelCase__ : int , lowerCamelCase__ : int , lowerCamelCase__ : set ) -> int: A_ : Optional[Any] = len(lowerCamelCase__ ), len(grid[0] ) if ( min(lowerCamelCase__ , lowerCamelCase__ ) < 0 or row == row_length or col == col_length or (row, col) in visit or grid[row][col] == 1 ): return 0 if row == row_length - 1 and col == col_length - 1: return 1 visit.add((row, col) ) A_ : str = 0 count += depth_first_search(lowerCamelCase__ , row + 1 , lowerCamelCase__ , lowerCamelCase__ ) count += depth_first_search(lowerCamelCase__ , row - 1 , lowerCamelCase__ , lowerCamelCase__ ) count += depth_first_search(lowerCamelCase__ , lowerCamelCase__ , col + 1 , lowerCamelCase__ ) count += depth_first_search(lowerCamelCase__ , lowerCamelCase__ , col - 1 , lowerCamelCase__ ) visit.remove((row, col) ) return count if __name__ == "__main__": import doctest doctest.testmod()
366
'''simple docstring''' import pprint import requests snake_case__ = """https://zenquotes.io/api""" def snake_case__ ( ) -> list: return requests.get(API_ENDPOINT_URL + '''/today''' ).json() def snake_case__ ( ) -> list: return requests.get(API_ENDPOINT_URL + '''/random''' ).json() if __name__ == "__main__": snake_case__ = random_quotes() pprint.pprint(response)
4
0
'''simple docstring''' def snake_case__ ( lowerCamelCase__ : str ) -> str: return " ".join(input_str.split()[::-1] ) if __name__ == "__main__": import doctest doctest.testmod()
367
'''simple docstring''' from __future__ import annotations class UpperCamelCase_ : """simple docstring""" def __init__( self : Optional[int] , _lowerCamelCase : int ): """simple docstring""" A_ : Union[str, Any] = order # a_{0} ... a_{k} A_ : Union[str, Any] = [1.0] + [0.0] * order # b_{0} ... b_{k} A_ : int = [1.0] + [0.0] * order # x[n-1] ... x[n-k] A_ : str = [0.0] * self.order # y[n-1] ... y[n-k] A_ : Optional[Any] = [0.0] * self.order def _a ( self : Dict , _lowerCamelCase : list[float] , _lowerCamelCase : list[float] ): """simple docstring""" if len(_lowerCamelCase ) < self.order: A_ : Any = [1.0, *a_coeffs] if len(_lowerCamelCase ) != self.order + 1: A_ : List[Any] = ( f'Expected a_coeffs to have {self.order + 1} elements ' f'for {self.order}-order filter, got {len(_lowerCamelCase )}' ) raise ValueError(_lowerCamelCase ) if len(_lowerCamelCase ) != self.order + 1: A_ : Union[str, Any] = ( f'Expected b_coeffs to have {self.order + 1} elements ' f'for {self.order}-order filter, got {len(_lowerCamelCase )}' ) raise ValueError(_lowerCamelCase ) A_ : Tuple = a_coeffs A_ : str = b_coeffs def _a ( self : Tuple , _lowerCamelCase : float ): """simple docstring""" A_ : Any = 0.0 # Start at index 1 and do index 0 at the end. for i in range(1 , self.order + 1 ): result += ( self.b_coeffs[i] * self.input_history[i - 1] - self.a_coeffs[i] * self.output_history[i - 1] ) A_ : str = (result + self.b_coeffs[0] * sample) / self.a_coeffs[0] A_ : Optional[Any] = self.input_history[:-1] A_ : List[str] = self.output_history[:-1] A_ : Tuple = sample A_ : Tuple = result return result
4
0
'''simple docstring''' from __future__ import annotations from collections.abc import MutableSequence class UpperCamelCase_ : """simple docstring""" def __init__( self : int , _lowerCamelCase : int , _lowerCamelCase : MutableSequence[float] ): """simple docstring""" if len(_lowerCamelCase ) != degree + 1: raise ValueError( '''The number of coefficients should be equal to the degree + 1.''' ) A_ : list[float] = list(_lowerCamelCase ) A_ : Optional[Any] = degree def __add__( self : List[str] , _lowerCamelCase : Polynomial ): """simple docstring""" if self.degree > polynomial_a.degree: A_ : Union[str, Any] = self.coefficients[:] for i in range(polynomial_a.degree + 1 ): coefficients[i] += polynomial_a.coefficients[i] return Polynomial(self.degree , _lowerCamelCase ) else: A_ : Tuple = polynomial_a.coefficients[:] for i in range(self.degree + 1 ): coefficients[i] += self.coefficients[i] return Polynomial(polynomial_a.degree , _lowerCamelCase ) def __sub__( self : str , _lowerCamelCase : Polynomial ): """simple docstring""" return self + polynomial_a * Polynomial(0 , [-1] ) def __neg__( self : Any ): """simple docstring""" return Polynomial(self.degree , [-c for c in self.coefficients] ) def __mul__( self : Optional[int] , _lowerCamelCase : Polynomial ): """simple docstring""" A_ : list[float] = [0] * (self.degree + polynomial_a.degree + 1) for i in range(self.degree + 1 ): for j in range(polynomial_a.degree + 1 ): coefficients[i + j] += ( self.coefficients[i] * polynomial_a.coefficients[j] ) return Polynomial(self.degree + polynomial_a.degree , _lowerCamelCase ) def _a ( self : str , _lowerCamelCase : int | float ): """simple docstring""" A_ : int | float = 0 for i in range(self.degree + 1 ): result += self.coefficients[i] * (substitution**i) return result def __str__( self : Optional[Any] ): """simple docstring""" A_ : List[Any] = '''''' for i in range(self.degree , -1 , -1 ): if self.coefficients[i] == 0: continue elif self.coefficients[i] > 0: if polynomial: polynomial += " + " else: polynomial += " - " if i == 0: polynomial += str(abs(self.coefficients[i] ) ) elif i == 1: polynomial += str(abs(self.coefficients[i] ) ) + "x" else: polynomial += str(abs(self.coefficients[i] ) ) + "x^" + str(_lowerCamelCase ) return polynomial def __repr__( self : Any ): """simple docstring""" return self.__str__() def _a ( self : Dict ): """simple docstring""" A_ : list[float] = [0] * self.degree for i in range(self.degree ): A_ : int = self.coefficients[i + 1] * (i + 1) return Polynomial(self.degree - 1 , _lowerCamelCase ) def _a ( self : Union[str, Any] , _lowerCamelCase : int | float = 0 ): """simple docstring""" A_ : list[float] = [0] * (self.degree + 2) A_ : str = constant for i in range(self.degree + 1 ): A_ : List[str] = self.coefficients[i] / (i + 1) return Polynomial(self.degree + 1 , _lowerCamelCase ) def __eq__( self : List[Any] , _lowerCamelCase : object ): """simple docstring""" if not isinstance(_lowerCamelCase , _lowerCamelCase ): return False if self.degree != polynomial_a.degree: return False for i in range(self.degree + 1 ): if self.coefficients[i] != polynomial_a.coefficients[i]: return False return True def __ne__( self : Dict , _lowerCamelCase : object ): """simple docstring""" return not self.__eq__(_lowerCamelCase )
368
'''simple docstring''' class UpperCamelCase_ : """simple docstring""" def __init__( self : Optional[Any] , _lowerCamelCase : Union[str, Any] ): """simple docstring""" A_ : Union[str, Any] = val A_ : Tuple = None A_ : Any = None def _a ( self : Tuple , _lowerCamelCase : List[Any] ): """simple docstring""" if self.val: if val < self.val: if self.left is None: A_ : int = Node(_lowerCamelCase ) else: self.left.insert(_lowerCamelCase ) elif val > self.val: if self.right is None: A_ : List[str] = Node(_lowerCamelCase ) else: self.right.insert(_lowerCamelCase ) else: A_ : Any = val def snake_case__ ( lowerCamelCase__ : Any , lowerCamelCase__ : Optional[int] ) -> str: # Recursive traversal if root: inorder(root.left , lowerCamelCase__ ) res.append(root.val ) inorder(root.right , lowerCamelCase__ ) def snake_case__ ( lowerCamelCase__ : Optional[int] ) -> Tuple: # Build BST if len(lowerCamelCase__ ) == 0: return arr A_ : Dict = Node(arr[0] ) for i in range(1 , len(lowerCamelCase__ ) ): root.insert(arr[i] ) # Traverse BST in order. A_ : Tuple = [] inorder(lowerCamelCase__ , lowerCamelCase__ ) return res if __name__ == "__main__": print(tree_sort([10, 1, 3, 2, 9, 14, 13]))
4
0
'''simple docstring''' import datasets from .nmt_bleu import compute_bleu # From: https://github.com/tensorflow/nmt/blob/master/nmt/scripts/bleu.py snake_case__ = """\ @INPROCEEDINGS{Papineni02bleu:a, author = {Kishore Papineni and Salim Roukos and Todd Ward and Wei-jing Zhu}, title = {BLEU: a Method for Automatic Evaluation of Machine Translation}, booktitle = {}, year = {2002}, pages = {311--318} } @inproceedings{lin-och-2004-orange, title = \"{ORANGE}: a Method for Evaluating Automatic Evaluation Metrics for Machine Translation\", author = \"Lin, Chin-Yew and Och, Franz Josef\", booktitle = \"{COLING} 2004: Proceedings of the 20th International Conference on Computational Linguistics\", month = \"aug 23{--}aug 27\", year = \"2004\", address = \"Geneva, Switzerland\", publisher = \"COLING\", url = \"https://www.aclweb.org/anthology/C04-1072\", pages = \"501--507\", } """ snake_case__ = """\ BLEU (bilingual evaluation understudy) is an algorithm for evaluating the quality of text which has been machine-translated from one natural language to another. Quality is considered to be the correspondence between a machine's output and that of a human: \"the closer a machine translation is to a professional human translation, the better it is\" – this is the central idea behind BLEU. BLEU was one of the first metrics to claim a high correlation with human judgements of quality, and remains one of the most popular automated and inexpensive metrics. Scores are calculated for individual translated segments—generally sentences—by comparing them with a set of good quality reference translations. Those scores are then averaged over the whole corpus to reach an estimate of the translation's overall quality. Intelligibility or grammatical correctness are not taken into account[citation needed]. BLEU's output is always a number between 0 and 1. This value indicates how similar the candidate text is to the reference texts, with values closer to 1 representing more similar texts. Few human translations will attain a score of 1, since this would indicate that the candidate is identical to one of the reference translations. For this reason, it is not necessary to attain a score of 1. Because there are more opportunities to match, adding additional reference translations will increase the BLEU score. """ snake_case__ = """ Computes BLEU score of translated segments against one or more references. Args: predictions: list of translations to score. Each translation should be tokenized into a list of tokens. references: list of lists of references for each translation. Each reference should be tokenized into a list of tokens. max_order: Maximum n-gram order to use when computing BLEU score. smooth: Whether or not to apply Lin et al. 2004 smoothing. Returns: 'bleu': bleu score, 'precisions': geometric mean of n-gram precisions, 'brevity_penalty': brevity penalty, 'length_ratio': ratio of lengths, 'translation_length': translation_length, 'reference_length': reference_length Examples: >>> predictions = [ ... [\"hello\", \"there\", \"general\", \"kenobi\"], # tokenized prediction of the first sample ... [\"foo\", \"bar\", \"foobar\"] # tokenized prediction of the second sample ... ] >>> references = [ ... [[\"hello\", \"there\", \"general\", \"kenobi\"], [\"hello\", \"there\", \"!\"]], # tokenized references for the first sample (2 references) ... [[\"foo\", \"bar\", \"foobar\"]] # tokenized references for the second sample (1 reference) ... ] >>> bleu = datasets.load_metric(\"bleu\") >>> results = bleu.compute(predictions=predictions, references=references) >>> print(results[\"bleu\"]) 1.0 """ @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION, _KWARGS_DESCRIPTION ) class UpperCamelCase_ (datasets.Metric ): """simple docstring""" def _a ( self : Union[str, Any] ): """simple docstring""" return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { '''predictions''': datasets.Sequence(datasets.Value('''string''' , id='''token''' ) , id='''sequence''' ), '''references''': datasets.Sequence( datasets.Sequence(datasets.Value('''string''' , id='''token''' ) , id='''sequence''' ) , id='''references''' ), } ) , codebase_urls=['''https://github.com/tensorflow/nmt/blob/master/nmt/scripts/bleu.py'''] , reference_urls=[ '''https://en.wikipedia.org/wiki/BLEU''', '''https://towardsdatascience.com/evaluating-text-output-in-nlp-bleu-at-your-own-risk-e8609665a213''', ] , ) def _a ( self : Union[str, Any] , _lowerCamelCase : Tuple , _lowerCamelCase : Optional[Any] , _lowerCamelCase : Optional[int]=4 , _lowerCamelCase : int=False ): """simple docstring""" A_ : Optional[Any] = compute_bleu( reference_corpus=_lowerCamelCase , translation_corpus=_lowerCamelCase , max_order=_lowerCamelCase , smooth=_lowerCamelCase ) (A_) : Optional[Any] = score return { "bleu": bleu, "precisions": precisions, "brevity_penalty": bp, "length_ratio": ratio, "translation_length": translation_length, "reference_length": reference_length, }
369
'''simple docstring''' def snake_case__ ( lowerCamelCase__ : list ) -> list: if len(lowerCamelCase__ ) <= 1: return [tuple(lowerCamelCase__ )] A_ : List[str] = [] def generate(lowerCamelCase__ : int , lowerCamelCase__ : list ): if k == 1: res.append(tuple(arr[:] ) ) return generate(k - 1 , lowerCamelCase__ ) for i in range(k - 1 ): if k % 2 == 0: # k is even A_ ,A_ : Optional[int] = arr[k - 1], arr[i] else: # k is odd A_ ,A_ : Union[str, Any] = arr[k - 1], arr[0] generate(k - 1 , lowerCamelCase__ ) generate(len(lowerCamelCase__ ) , lowerCamelCase__ ) return res if __name__ == "__main__": snake_case__ = input("""Enter numbers separated by a comma:\n""").strip() snake_case__ = [int(item) for item in user_input.split(""",""")] print(heaps(arr))
4
0
'''simple docstring''' from collections import OrderedDict from typing import Mapping from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices snake_case__ = logging.get_logger(__name__) snake_case__ = { """microsoft/resnet-50""": """https://huggingface.co/microsoft/resnet-50/blob/main/config.json""", } class UpperCamelCase_ (a__, a__ ): """simple docstring""" _lowerCAmelCase = 'resnet' _lowerCAmelCase = ['basic', 'bottleneck'] def __init__( self : Union[str, Any] , _lowerCamelCase : List[str]=3 , _lowerCamelCase : str=64 , _lowerCamelCase : str=[256, 512, 1024, 2048] , _lowerCamelCase : Optional[Any]=[3, 4, 6, 3] , _lowerCamelCase : Any="bottleneck" , _lowerCamelCase : List[Any]="relu" , _lowerCamelCase : List[Any]=False , _lowerCamelCase : str=None , _lowerCamelCase : List[str]=None , **_lowerCamelCase : Tuple , ): """simple docstring""" super().__init__(**_lowerCamelCase ) if layer_type not in self.layer_types: raise ValueError(f'layer_type={layer_type} is not one of {",".join(self.layer_types )}' ) A_ : int = num_channels A_ : Dict = embedding_size A_ : Union[str, Any] = hidden_sizes A_ : Dict = depths A_ : str = layer_type A_ : int = hidden_act A_ : str = downsample_in_first_stage A_ : Dict = ['''stem'''] + [f'stage{idx}' for idx in range(1 , len(_lowerCamelCase ) + 1 )] A_ : Any = get_aligned_output_features_output_indices( out_features=_lowerCamelCase , out_indices=_lowerCamelCase , stage_names=self.stage_names ) class UpperCamelCase_ (a__ ): """simple docstring""" _lowerCAmelCase = version.parse('1.11' ) @property def _a ( self : int ): """simple docstring""" return OrderedDict( [ ('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}), ] ) @property def _a ( self : Optional[int] ): """simple docstring""" return 1E-3
370
'''simple docstring''' import unittest from queue import Empty from threading import Thread from transformers import AutoTokenizer, TextIteratorStreamer, TextStreamer, is_torch_available from transformers.testing_utils import CaptureStdout, require_torch, torch_device from ..test_modeling_common import ids_tensor if is_torch_available(): import torch from transformers import AutoModelForCausalLM @require_torch class UpperCamelCase_ (unittest.TestCase ): """simple docstring""" def _a ( self : Dict ): """simple docstring""" A_ : Optional[int] = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ) A_ : Tuple = AutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ).to(_lowerCamelCase ) A_ : Dict = -1 A_ : List[str] = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(_lowerCamelCase ) A_ : Any = model.generate(_lowerCamelCase , max_new_tokens=10 , do_sample=_lowerCamelCase ) A_ : List[str] = tokenizer.decode(greedy_ids[0] ) with CaptureStdout() as cs: A_ : List[str] = TextStreamer(_lowerCamelCase ) model.generate(_lowerCamelCase , max_new_tokens=10 , do_sample=_lowerCamelCase , streamer=_lowerCamelCase ) # The greedy text should be printed to stdout, except for the final "\n" in the streamer A_ : Dict = cs.out[:-1] self.assertEqual(_lowerCamelCase , _lowerCamelCase ) def _a ( self : Tuple ): """simple docstring""" A_ : Optional[int] = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ) A_ : List[str] = AutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ).to(_lowerCamelCase ) A_ : Dict = -1 A_ : List[str] = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(_lowerCamelCase ) A_ : Optional[int] = model.generate(_lowerCamelCase , max_new_tokens=10 , do_sample=_lowerCamelCase ) A_ : str = tokenizer.decode(greedy_ids[0] ) A_ : int = TextIteratorStreamer(_lowerCamelCase ) A_ : List[Any] = {'''input_ids''': input_ids, '''max_new_tokens''': 10, '''do_sample''': False, '''streamer''': streamer} A_ : List[Any] = Thread(target=model.generate , kwargs=_lowerCamelCase ) thread.start() A_ : List[Any] = '''''' for new_text in streamer: streamer_text += new_text self.assertEqual(_lowerCamelCase , _lowerCamelCase ) def _a ( self : int ): """simple docstring""" A_ : List[str] = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ) A_ : List[Any] = AutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ).to(_lowerCamelCase ) A_ : List[str] = -1 A_ : Any = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(_lowerCamelCase ) A_ : Tuple = model.generate(_lowerCamelCase , max_new_tokens=10 , do_sample=_lowerCamelCase ) A_ : Tuple = greedy_ids[:, input_ids.shape[1] :] A_ : Tuple = tokenizer.decode(new_greedy_ids[0] ) with CaptureStdout() as cs: A_ : Any = TextStreamer(_lowerCamelCase , skip_prompt=_lowerCamelCase ) model.generate(_lowerCamelCase , max_new_tokens=10 , do_sample=_lowerCamelCase , streamer=_lowerCamelCase ) # The greedy text should be printed to stdout, except for the final "\n" in the streamer A_ : Any = cs.out[:-1] self.assertEqual(_lowerCamelCase , _lowerCamelCase ) def _a ( self : List[Any] ): """simple docstring""" A_ : List[Any] = AutoTokenizer.from_pretrained('''distilgpt2''' ) A_ : Tuple = AutoModelForCausalLM.from_pretrained('''distilgpt2''' ).to(_lowerCamelCase ) A_ : List[Any] = -1 A_ : Union[str, Any] = torch.ones((1, 5) , device=_lowerCamelCase ).long() * model.config.bos_token_id with CaptureStdout() as cs: A_ : List[Any] = TextStreamer(_lowerCamelCase , skip_special_tokens=_lowerCamelCase ) model.generate(_lowerCamelCase , max_new_tokens=1 , do_sample=_lowerCamelCase , streamer=_lowerCamelCase ) # The prompt contains a special token, so the streamer should not print it. As such, the output text, when # re-tokenized, must only contain one token A_ : List[str] = cs.out[:-1] # Remove the final "\n" A_ : List[Any] = tokenizer(_lowerCamelCase , return_tensors='''pt''' ) self.assertEqual(streamer_text_tokenized.input_ids.shape , (1, 1) ) def _a ( self : Union[str, Any] ): """simple docstring""" A_ : str = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ) A_ : str = AutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ).to(_lowerCamelCase ) A_ : Union[str, Any] = -1 A_ : Union[str, Any] = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(_lowerCamelCase ) A_ : List[str] = TextIteratorStreamer(_lowerCamelCase , timeout=0.0_01 ) A_ : str = {'''input_ids''': input_ids, '''max_new_tokens''': 10, '''do_sample''': False, '''streamer''': streamer} A_ : List[str] = Thread(target=model.generate , kwargs=_lowerCamelCase ) thread.start() # The streamer will timeout after 0.001 seconds, so an exception will be raised with self.assertRaises(_lowerCamelCase ): A_ : str = '''''' for new_text in streamer: streamer_text += new_text
4
0
'''simple docstring''' import numpy as np def snake_case__ ( lowerCamelCase__ : np.ndarray , lowerCamelCase__ : float ) -> np.ndarray: return np.where(vector > 0 , lowerCamelCase__ , (alpha * (np.exp(lowerCamelCase__ ) - 1)) ) if __name__ == "__main__": import doctest doctest.testmod()
371
'''simple docstring''' import heapq def snake_case__ ( lowerCamelCase__ : dict ) -> set[int]: A_ : list[list] = [] # for each node and his adjacency list add them and the rank of the node to queue # using heapq module the queue will be filled like a Priority Queue # heapq works with a min priority queue, so I used -1*len(v) to build it for key, value in graph.items(): # O(log(n)) heapq.heappush(lowerCamelCase__ , [-1 * len(lowerCamelCase__ ), (key, value)] ) # chosen_vertices = set of chosen vertices A_ : str = set() # while queue isn't empty and there are still edges # (queue[0][0] is the rank of the node with max rank) while queue and queue[0][0] != 0: # extract vertex with max rank from queue and add it to chosen_vertices A_ : Tuple = heapq.heappop(lowerCamelCase__ )[1][0] chosen_vertices.add(lowerCamelCase__ ) # Remove all arcs adjacent to argmax for elem in queue: # if v haven't adjacent node, skip if elem[0] == 0: continue # if argmax is reachable from elem # remove argmax from elem's adjacent list and update his rank if argmax in elem[1][1]: A_ : List[str] = elem[1][1].index(lowerCamelCase__ ) del elem[1][1][index] elem[0] += 1 # re-order the queue heapq.heapify(lowerCamelCase__ ) return chosen_vertices if __name__ == "__main__": import doctest doctest.testmod() snake_case__ = {0: [1, 3], 1: [0, 3], 2: [0, 3, 4], 3: [0, 1, 2], 4: [2, 3]} print(F'Minimum vertex cover:\n{greedy_min_vertex_cover(graph)}')
4
0
'''simple docstring''' import warnings from ...configuration_utils import PretrainedConfig from ...utils import logging snake_case__ = logging.get_logger(__name__) snake_case__ = { """RUCAIBox/mvp""": """https://huggingface.co/RUCAIBox/mvp/resolve/main/config.json""", } class UpperCamelCase_ (a__ ): """simple docstring""" _lowerCAmelCase = 'mvp' _lowerCAmelCase = ['past_key_values'] _lowerCAmelCase = {'num_attention_heads': 'encoder_attention_heads', 'hidden_size': 'd_model'} def __init__( self : Dict , _lowerCamelCase : Dict=50267 , _lowerCamelCase : Union[str, Any]=1024 , _lowerCamelCase : Optional[int]=12 , _lowerCamelCase : str=4096 , _lowerCamelCase : str=16 , _lowerCamelCase : int=12 , _lowerCamelCase : Union[str, Any]=4096 , _lowerCamelCase : Any=16 , _lowerCamelCase : List[str]=0.0 , _lowerCamelCase : Union[str, Any]=0.0 , _lowerCamelCase : Optional[Any]="gelu" , _lowerCamelCase : Tuple=1024 , _lowerCamelCase : Optional[Any]=0.1 , _lowerCamelCase : Tuple=0.0 , _lowerCamelCase : List[Any]=0.0 , _lowerCamelCase : Union[str, Any]=0.02 , _lowerCamelCase : int=0.0 , _lowerCamelCase : List[str]=False , _lowerCamelCase : int=True , _lowerCamelCase : Tuple=1 , _lowerCamelCase : Union[str, Any]=0 , _lowerCamelCase : Optional[Any]=2 , _lowerCamelCase : List[Any]=True , _lowerCamelCase : int=2 , _lowerCamelCase : int=2 , _lowerCamelCase : Tuple=False , _lowerCamelCase : Dict=100 , _lowerCamelCase : int=800 , **_lowerCamelCase : int , ): """simple docstring""" A_ : List[Any] = vocab_size A_ : Dict = max_position_embeddings A_ : str = d_model A_ : int = encoder_ffn_dim A_ : Optional[Any] = encoder_layers A_ : Dict = encoder_attention_heads A_ : Optional[Any] = decoder_ffn_dim A_ : Dict = decoder_layers A_ : str = decoder_attention_heads A_ : List[str] = dropout A_ : str = attention_dropout A_ : int = activation_dropout A_ : List[str] = activation_function A_ : Dict = init_std A_ : List[Any] = encoder_layerdrop A_ : int = decoder_layerdrop A_ : int = classifier_dropout A_ : int = use_cache A_ : Any = encoder_layers A_ : List[Any] = scale_embedding # scale factor will be sqrt(d_model) if True A_ : List[Any] = use_prompt A_ : int = prompt_length A_ : Optional[Any] = prompt_mid_dim super().__init__( pad_token_id=_lowerCamelCase , bos_token_id=_lowerCamelCase , eos_token_id=_lowerCamelCase , is_encoder_decoder=_lowerCamelCase , decoder_start_token_id=_lowerCamelCase , forced_eos_token_id=_lowerCamelCase , **_lowerCamelCase , ) if self.forced_bos_token_id is None and kwargs.get('''force_bos_token_to_be_generated''' , _lowerCamelCase ): A_ : List[str] = self.bos_token_id warnings.warn( f'Please make sure the config includes `forced_bos_token_id={self.bos_token_id}` in future versions. ' '''The config can simply be saved and uploaded again to be fixed.''' )
350
'''simple docstring''' import argparse from collections import OrderedDict from pathlib import Path import torch from huggingface_hub import hf_hub_download from PIL import Image from torchvision.transforms import functional as F from transformers import DetrImageProcessor, TableTransformerConfig, TableTransformerForObjectDetection from transformers.utils import logging logging.set_verbosity_info() snake_case__ = logging.get_logger(__name__) # here we list all keys to be renamed (original name on the left, our name on the right) snake_case__ = [] for i in range(6): # encoder layers: output projection, 2 feedforward neural networks and 2 layernorms rename_keys.append( (F'transformer.encoder.layers.{i}.self_attn.out_proj.weight', F'encoder.layers.{i}.self_attn.out_proj.weight') ) rename_keys.append( (F'transformer.encoder.layers.{i}.self_attn.out_proj.bias', F'encoder.layers.{i}.self_attn.out_proj.bias') ) rename_keys.append((F'transformer.encoder.layers.{i}.linear1.weight', F'encoder.layers.{i}.fc1.weight')) rename_keys.append((F'transformer.encoder.layers.{i}.linear1.bias', F'encoder.layers.{i}.fc1.bias')) rename_keys.append((F'transformer.encoder.layers.{i}.linear2.weight', F'encoder.layers.{i}.fc2.weight')) rename_keys.append((F'transformer.encoder.layers.{i}.linear2.bias', F'encoder.layers.{i}.fc2.bias')) rename_keys.append( (F'transformer.encoder.layers.{i}.norm1.weight', F'encoder.layers.{i}.self_attn_layer_norm.weight') ) rename_keys.append((F'transformer.encoder.layers.{i}.norm1.bias', F'encoder.layers.{i}.self_attn_layer_norm.bias')) rename_keys.append((F'transformer.encoder.layers.{i}.norm2.weight', F'encoder.layers.{i}.final_layer_norm.weight')) rename_keys.append((F'transformer.encoder.layers.{i}.norm2.bias', F'encoder.layers.{i}.final_layer_norm.bias')) # decoder layers: 2 times output projection, 2 feedforward neural networks and 3 layernorms rename_keys.append( (F'transformer.decoder.layers.{i}.self_attn.out_proj.weight', F'decoder.layers.{i}.self_attn.out_proj.weight') ) rename_keys.append( (F'transformer.decoder.layers.{i}.self_attn.out_proj.bias', F'decoder.layers.{i}.self_attn.out_proj.bias') ) rename_keys.append( ( F'transformer.decoder.layers.{i}.multihead_attn.out_proj.weight', F'decoder.layers.{i}.encoder_attn.out_proj.weight', ) ) rename_keys.append( ( F'transformer.decoder.layers.{i}.multihead_attn.out_proj.bias', F'decoder.layers.{i}.encoder_attn.out_proj.bias', ) ) rename_keys.append((F'transformer.decoder.layers.{i}.linear1.weight', F'decoder.layers.{i}.fc1.weight')) rename_keys.append((F'transformer.decoder.layers.{i}.linear1.bias', F'decoder.layers.{i}.fc1.bias')) rename_keys.append((F'transformer.decoder.layers.{i}.linear2.weight', F'decoder.layers.{i}.fc2.weight')) rename_keys.append((F'transformer.decoder.layers.{i}.linear2.bias', F'decoder.layers.{i}.fc2.bias')) rename_keys.append( (F'transformer.decoder.layers.{i}.norm1.weight', F'decoder.layers.{i}.self_attn_layer_norm.weight') ) rename_keys.append((F'transformer.decoder.layers.{i}.norm1.bias', F'decoder.layers.{i}.self_attn_layer_norm.bias')) rename_keys.append( (F'transformer.decoder.layers.{i}.norm2.weight', F'decoder.layers.{i}.encoder_attn_layer_norm.weight') ) rename_keys.append( (F'transformer.decoder.layers.{i}.norm2.bias', F'decoder.layers.{i}.encoder_attn_layer_norm.bias') ) rename_keys.append((F'transformer.decoder.layers.{i}.norm3.weight', F'decoder.layers.{i}.final_layer_norm.weight')) rename_keys.append((F'transformer.decoder.layers.{i}.norm3.bias', F'decoder.layers.{i}.final_layer_norm.bias')) # convolutional projection + query embeddings + layernorm of encoder + layernorm of decoder + class and bounding box heads rename_keys.extend( [ ("""input_proj.weight""", """input_projection.weight"""), ("""input_proj.bias""", """input_projection.bias"""), ("""query_embed.weight""", """query_position_embeddings.weight"""), ("""transformer.encoder.norm.weight""", """encoder.layernorm.weight"""), ("""transformer.encoder.norm.bias""", """encoder.layernorm.bias"""), ("""transformer.decoder.norm.weight""", """decoder.layernorm.weight"""), ("""transformer.decoder.norm.bias""", """decoder.layernorm.bias"""), ("""class_embed.weight""", """class_labels_classifier.weight"""), ("""class_embed.bias""", """class_labels_classifier.bias"""), ("""bbox_embed.layers.0.weight""", """bbox_predictor.layers.0.weight"""), ("""bbox_embed.layers.0.bias""", """bbox_predictor.layers.0.bias"""), ("""bbox_embed.layers.1.weight""", """bbox_predictor.layers.1.weight"""), ("""bbox_embed.layers.1.bias""", """bbox_predictor.layers.1.bias"""), ("""bbox_embed.layers.2.weight""", """bbox_predictor.layers.2.weight"""), ("""bbox_embed.layers.2.bias""", """bbox_predictor.layers.2.bias"""), ] ) def snake_case__ ( lowerCamelCase__ : List[Any] , lowerCamelCase__ : List[Any] , lowerCamelCase__ : List[Any] ) -> Optional[Any]: A_ : Tuple = state_dict.pop(lowerCamelCase__ ) A_ : Optional[Any] = val def snake_case__ ( lowerCamelCase__ : Dict ) -> Any: A_ : int = OrderedDict() for key, value in state_dict.items(): if "backbone.0.body" in key: A_ : int = key.replace('''backbone.0.body''' , '''backbone.conv_encoder.model''' ) A_ : List[str] = value else: A_ : Optional[int] = value return new_state_dict def snake_case__ ( lowerCamelCase__ : Union[str, Any] ) -> Optional[Any]: A_ : Any = '''''' # first: transformer encoder for i in range(6 ): # read in weights + bias of input projection layer (in PyTorch's MultiHeadAttention, this is a single matrix + bias) A_ : Tuple = state_dict.pop(f'{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_weight' ) A_ : Dict = state_dict.pop(f'{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_bias' ) # next, add query, keys and values (in that order) to the state dict A_ : str = in_proj_weight[:2_5_6, :] A_ : Optional[Any] = in_proj_bias[:2_5_6] A_ : Dict = in_proj_weight[2_5_6:5_1_2, :] A_ : Tuple = in_proj_bias[2_5_6:5_1_2] A_ : Tuple = in_proj_weight[-2_5_6:, :] A_ : Optional[int] = in_proj_bias[-2_5_6:] # next: transformer decoder (which is a bit more complex because it also includes cross-attention) for i in range(6 ): # read in weights + bias of input projection layer of self-attention A_ : Union[str, Any] = state_dict.pop(f'{prefix}transformer.decoder.layers.{i}.self_attn.in_proj_weight' ) A_ : Dict = state_dict.pop(f'{prefix}transformer.decoder.layers.{i}.self_attn.in_proj_bias' ) # next, add query, keys and values (in that order) to the state dict A_ : List[str] = in_proj_weight[:2_5_6, :] A_ : int = in_proj_bias[:2_5_6] A_ : Any = in_proj_weight[2_5_6:5_1_2, :] A_ : List[str] = in_proj_bias[2_5_6:5_1_2] A_ : Union[str, Any] = in_proj_weight[-2_5_6:, :] A_ : Optional[Any] = in_proj_bias[-2_5_6:] # read in weights + bias of input projection layer of cross-attention A_ : Tuple = state_dict.pop( f'{prefix}transformer.decoder.layers.{i}.multihead_attn.in_proj_weight' ) A_ : Optional[Any] = state_dict.pop(f'{prefix}transformer.decoder.layers.{i}.multihead_attn.in_proj_bias' ) # next, add query, keys and values (in that order) of cross-attention to the state dict A_ : Dict = in_proj_weight_cross_attn[:2_5_6, :] A_ : Tuple = in_proj_bias_cross_attn[:2_5_6] A_ : int = in_proj_weight_cross_attn[2_5_6:5_1_2, :] A_ : List[str] = in_proj_bias_cross_attn[2_5_6:5_1_2] A_ : Any = in_proj_weight_cross_attn[-2_5_6:, :] A_ : Any = in_proj_bias_cross_attn[-2_5_6:] def snake_case__ ( lowerCamelCase__ : List[str] , lowerCamelCase__ : Tuple ) -> Dict: A_ ,A_ : int = image.size A_ : Tuple = max(lowerCamelCase__ , lowerCamelCase__ ) A_ : Optional[Any] = 8_0_0 if '''detection''' in checkpoint_url else 1_0_0_0 A_ : Union[str, Any] = target_max_size / current_max_size A_ : Any = image.resize((int(round(scale * width ) ), int(round(scale * height ) )) ) return resized_image def snake_case__ ( lowerCamelCase__ : Tuple ) -> str: A_ : Any = F.to_tensor(lowerCamelCase__ ) A_ : Optional[Any] = F.normalize(lowerCamelCase__ , mean=[0.485, 0.456, 0.406] , std=[0.229, 0.224, 0.225] ) return image @torch.no_grad() def snake_case__ ( lowerCamelCase__ : List[Any] , lowerCamelCase__ : int , lowerCamelCase__ : int ) -> str: logger.info('''Converting model...''' ) # load original state dict A_ : Tuple = torch.hub.load_state_dict_from_url(lowerCamelCase__ , map_location='''cpu''' ) # rename keys for src, dest in rename_keys: rename_key(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) A_ : str = rename_backbone_keys(lowerCamelCase__ ) # query, key and value matrices need special treatment read_in_q_k_v(lowerCamelCase__ ) # important: we need to prepend a prefix to each of the base model keys as the head models use different attributes for them A_ : List[Any] = '''model.''' for key in state_dict.copy().keys(): if not key.startswith('''class_labels_classifier''' ) and not key.startswith('''bbox_predictor''' ): A_ : List[Any] = state_dict.pop(lowerCamelCase__ ) A_ : str = val # create HuggingFace model and load state dict A_ : Union[str, Any] = TableTransformerConfig( backbone='''resnet18''' , mask_loss_coefficient=1 , dice_loss_coefficient=1 , ce_loss_coefficient=1 , bbox_loss_coefficient=5 , giou_loss_coefficient=2 , eos_coefficient=0.4 , class_cost=1 , bbox_cost=5 , giou_cost=2 , ) if "detection" in checkpoint_url: A_ : Dict = 1_5 A_ : Dict = 2 A_ : int = {0: '''table''', 1: '''table rotated'''} A_ : List[str] = idalabel A_ : Optional[int] = {v: k for k, v in idalabel.items()} else: A_ : Union[str, Any] = 1_2_5 A_ : Optional[Any] = 6 A_ : Optional[Any] = { 0: '''table''', 1: '''table column''', 2: '''table row''', 3: '''table column header''', 4: '''table projected row header''', 5: '''table spanning cell''', } A_ : int = idalabel A_ : Tuple = {v: k for k, v in idalabel.items()} A_ : Optional[Any] = DetrImageProcessor( format='''coco_detection''' , max_size=8_0_0 if '''detection''' in checkpoint_url else 1_0_0_0 ) A_ : int = TableTransformerForObjectDetection(lowerCamelCase__ ) model.load_state_dict(lowerCamelCase__ ) model.eval() # verify our conversion A_ : Optional[int] = '''example_pdf.png''' if '''detection''' in checkpoint_url else '''example_table.png''' A_ : Union[str, Any] = hf_hub_download(repo_id='''nielsr/example-pdf''' , repo_type='''dataset''' , filename=lowerCamelCase__ ) A_ : Tuple = Image.open(lowerCamelCase__ ).convert('''RGB''' ) A_ : int = normalize(resize(lowerCamelCase__ , lowerCamelCase__ ) ).unsqueeze(0 ) A_ : str = model(lowerCamelCase__ ) if "detection" in checkpoint_url: A_ : str = (1, 1_5, 3) A_ : int = torch.tensor( [[-6.7897, -16.9985, 6.7937], [-8.0186, -22.2192, 6.9677], [-7.3117, -21.0708, 7.4055]] ) A_ : Tuple = torch.tensor([[0.4867, 0.1767, 0.6732], [0.6718, 0.4479, 0.3830], [0.4716, 0.1760, 0.6364]] ) else: A_ : Optional[int] = (1, 1_2_5, 7) A_ : Dict = torch.tensor( [[-18.1430, -8.3214, 4.8274], [-18.4685, -7.1361, -4.2667], [-26.3693, -9.3429, -4.9962]] ) A_ : Any = torch.tensor([[0.4983, 0.5595, 0.9440], [0.4916, 0.6315, 0.5954], [0.6108, 0.8637, 0.1135]] ) assert outputs.logits.shape == expected_shape assert torch.allclose(outputs.logits[0, :3, :3] , lowerCamelCase__ , atol=1e-4 ) assert torch.allclose(outputs.pred_boxes[0, :3, :3] , lowerCamelCase__ , atol=1e-4 ) print('''Looks ok!''' ) if pytorch_dump_folder_path is not None: # Save model and image processor logger.info(f'Saving PyTorch model and image processor to {pytorch_dump_folder_path}...' ) Path(lowerCamelCase__ ).mkdir(exist_ok=lowerCamelCase__ ) model.save_pretrained(lowerCamelCase__ ) image_processor.save_pretrained(lowerCamelCase__ ) if push_to_hub: # Push model to HF hub logger.info('''Pushing model to the hub...''' ) A_ : List[Any] = ( '''microsoft/table-transformer-detection''' if '''detection''' in checkpoint_url else '''microsoft/table-transformer-structure-recognition''' ) model.push_to_hub(lowerCamelCase__ ) image_processor.push_to_hub(lowerCamelCase__ ) if __name__ == "__main__": snake_case__ = argparse.ArgumentParser() parser.add_argument( """--checkpoint_url""", default="""https://pubtables1m.blob.core.windows.net/model/pubtables1m_detection_detr_r18.pth""", type=str, choices=[ """https://pubtables1m.blob.core.windows.net/model/pubtables1m_detection_detr_r18.pth""", """https://pubtables1m.blob.core.windows.net/model/pubtables1m_structure_detr_r18.pth""", ], help="""URL of the Table Transformer checkpoint you'd like to convert.""", ) parser.add_argument( """--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the folder to output PyTorch model.""" ) parser.add_argument( """--push_to_hub""", action="""store_true""", help="""Whether or not to push the converted model to the 🤗 hub.""" ) snake_case__ = parser.parse_args() convert_table_transformer_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub)
4
0
'''simple docstring''' import argparse from typing import List import evaluate import numpy as np import torch from datasets import DatasetDict, load_dataset # New Code # # We'll be using StratifiedKFold for this example from sklearn.model_selection import StratifiedKFold from torch.optim import AdamW from torch.utils.data import DataLoader from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed from accelerate import Accelerator, DistributedType ######################################################################## # This is a fully working simple example to use Accelerate, # specifically showcasing how to perform Cross Validation, # and builds off the `nlp_example.py` script. # # This example trains a Bert base model on GLUE MRPC # in any of the following settings (with the same script): # - single CPU or single GPU # - multi GPUS (using PyTorch distributed mode) # - (multi) TPUs # - fp16 (mixed-precision) or fp32 (normal precision) # # To help focus on the differences in the code, building `DataLoaders` # was refactored into its own function. # New additions from the base script can be found quickly by # looking for the # New Code # tags # # To run it in each of these various modes, follow the instructions # in the readme for examples: # https://github.com/huggingface/accelerate/tree/main/examples # ######################################################################## snake_case__ = 16 snake_case__ = 32 def snake_case__ ( lowerCamelCase__ : Accelerator , lowerCamelCase__ : DatasetDict , lowerCamelCase__ : List[int] , lowerCamelCase__ : List[int] , lowerCamelCase__ : int = 1_6 ) -> Tuple: A_ : str = AutoTokenizer.from_pretrained('''bert-base-cased''' ) A_ : Any = DatasetDict( { '''train''': dataset['''train'''].select(lowerCamelCase__ ), '''validation''': dataset['''train'''].select(lowerCamelCase__ ), '''test''': dataset['''validation'''], } ) def tokenize_function(lowerCamelCase__ : Union[str, Any] ): # max_length=None => use the model max length (it's actually the default) A_ : Dict = tokenizer(examples['''sentence1'''] , examples['''sentence2'''] , truncation=lowerCamelCase__ , max_length=lowerCamelCase__ ) return outputs # Apply the method we just defined to all the examples in all the splits of the dataset # starting with the main process first: with accelerator.main_process_first(): A_ : Optional[Any] = datasets.map( lowerCamelCase__ , batched=lowerCamelCase__ , remove_columns=['''idx''', '''sentence1''', '''sentence2'''] , ) # We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the # transformers library A_ : Optional[Any] = tokenized_datasets.rename_column('''label''' , '''labels''' ) def collate_fn(lowerCamelCase__ : Any ): # On TPU it's best to pad everything to the same length or training will be very slow. A_ : Optional[Any] = 1_2_8 if accelerator.distributed_type == DistributedType.TPU else None # When using mixed precision we want round multiples of 8/16 if accelerator.mixed_precision == "fp8": A_ : int = 1_6 elif accelerator.mixed_precision != "no": A_ : str = 8 else: A_ : Tuple = None return tokenizer.pad( lowerCamelCase__ , padding='''longest''' , max_length=lowerCamelCase__ , pad_to_multiple_of=lowerCamelCase__ , return_tensors='''pt''' , ) # Instantiate dataloaders. A_ : Union[str, Any] = DataLoader( tokenized_datasets['''train'''] , shuffle=lowerCamelCase__ , collate_fn=lowerCamelCase__ , batch_size=lowerCamelCase__ ) A_ : Optional[int] = DataLoader( tokenized_datasets['''validation'''] , shuffle=lowerCamelCase__ , collate_fn=lowerCamelCase__ , batch_size=lowerCamelCase__ ) A_ : Dict = DataLoader( tokenized_datasets['''test'''] , shuffle=lowerCamelCase__ , collate_fn=lowerCamelCase__ , batch_size=lowerCamelCase__ ) return train_dataloader, eval_dataloader, test_dataloader def snake_case__ ( lowerCamelCase__ : List[Any] , lowerCamelCase__ : int ) -> Optional[int]: # New Code # A_ : List[Any] = [] # Download the dataset A_ : Tuple = load_dataset('''glue''' , '''mrpc''' ) # Create our splits A_ : Tuple = StratifiedKFold(n_splits=int(args.num_folds ) ) # Initialize accelerator A_ : int = Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision ) # Sample hyper-parameters for learning rate, batch size, seed and a few other HPs A_ : Tuple = config['''lr'''] A_ : int = int(config['''num_epochs'''] ) A_ : Optional[int] = int(config['''seed'''] ) A_ : Tuple = int(config['''batch_size'''] ) A_ : Optional[int] = evaluate.load('''glue''' , '''mrpc''' ) # If the batch size is too big we use gradient accumulation A_ : Any = 1 if batch_size > MAX_GPU_BATCH_SIZE and accelerator.distributed_type != DistributedType.TPU: A_ : int = batch_size // MAX_GPU_BATCH_SIZE A_ : Any = MAX_GPU_BATCH_SIZE set_seed(lowerCamelCase__ ) # New Code # # Create our folds: A_ : List[Any] = kfold.split(np.zeros(datasets['''train'''].num_rows ) , datasets['''train''']['''label'''] ) A_ : List[str] = [] # Iterate over them for i, (train_idxs, valid_idxs) in enumerate(lowerCamelCase__ ): A_ : str = get_fold_dataloaders( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , ) # Instantiate the model (we build the model here so that the seed also control new weights initialization) A_ : int = AutoModelForSequenceClassification.from_pretrained('''bert-base-cased''' , return_dict=lowerCamelCase__ ) # We could avoid this line since the accelerator is set with `device_placement=True` (default value). # Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer # creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that). A_ : Optional[int] = model.to(accelerator.device ) # Instantiate optimizer A_ : List[str] = AdamW(params=model.parameters() , lr=lowerCamelCase__ ) # Instantiate scheduler A_ : Optional[int] = get_linear_schedule_with_warmup( optimizer=lowerCamelCase__ , num_warmup_steps=1_0_0 , num_training_steps=(len(lowerCamelCase__ ) * num_epochs) // gradient_accumulation_steps , ) # Prepare everything # There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the # prepare method. A_ : Optional[int] = accelerator.prepare( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) # Now we train the model for epoch in range(lowerCamelCase__ ): model.train() for step, batch in enumerate(lowerCamelCase__ ): # We could avoid this line since we set the accelerator with `device_placement=True`. batch.to(accelerator.device ) A_ : Tuple = model(**lowerCamelCase__ ) A_ : Optional[Any] = outputs.loss A_ : Optional[int] = loss / gradient_accumulation_steps accelerator.backward(lowerCamelCase__ ) if step % gradient_accumulation_steps == 0: optimizer.step() lr_scheduler.step() optimizer.zero_grad() model.eval() for step, batch in enumerate(lowerCamelCase__ ): # We could avoid this line since we set the accelerator with `device_placement=True`. batch.to(accelerator.device ) with torch.no_grad(): A_ : Optional[Any] = model(**lowerCamelCase__ ) A_ : List[Any] = outputs.logits.argmax(dim=-1 ) A_ : Optional[int] = accelerator.gather_for_metrics((predictions, batch['''labels''']) ) metric.add_batch( predictions=lowerCamelCase__ , references=lowerCamelCase__ , ) A_ : Optional[int] = metric.compute() # Use accelerator.print to print only on the main process. accelerator.print(f'epoch {epoch}:' , lowerCamelCase__ ) # New Code # # We also run predictions on the test set at the very end A_ : Union[str, Any] = [] for step, batch in enumerate(lowerCamelCase__ ): # We could avoid this line since we set the accelerator with `device_placement=True`. batch.to(accelerator.device ) with torch.no_grad(): A_ : List[str] = model(**lowerCamelCase__ ) A_ : Any = outputs.logits A_ : Union[str, Any] = accelerator.gather_for_metrics((predictions, batch['''labels''']) ) fold_predictions.append(predictions.cpu() ) if i == 0: # We need all of the test predictions test_references.append(references.cpu() ) # Use accelerator.print to print only on the main process. test_predictions.append(torch.cat(lowerCamelCase__ , dim=0 ) ) # We now need to release all our memory and get rid of the current model, optimizer, etc accelerator.free_memory() # New Code # # Finally we check the accuracy of our folded results: A_ : Tuple = torch.cat(lowerCamelCase__ , dim=0 ) A_ : Dict = torch.stack(lowerCamelCase__ , dim=0 ).sum(dim=0 ).div(int(args.num_folds ) ).argmax(dim=-1 ) A_ : str = metric.compute(predictions=lowerCamelCase__ , references=lowerCamelCase__ ) accelerator.print('''Average test metrics from all folds:''' , lowerCamelCase__ ) def snake_case__ ( ) -> int: A_ : Union[str, Any] = argparse.ArgumentParser(description='''Simple example of training script.''' ) parser.add_argument( '''--mixed_precision''' , type=lowerCamelCase__ , default=lowerCamelCase__ , choices=['''no''', '''fp16''', '''bf16''', '''fp8'''] , help='''Whether to use mixed precision. Choose''' '''between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10.''' '''and an Nvidia Ampere GPU.''' , ) parser.add_argument('''--cpu''' , action='''store_true''' , help='''If passed, will train on the CPU.''' ) # New Code # parser.add_argument('''--num_folds''' , type=lowerCamelCase__ , default=3 , help='''The number of splits to perform across the dataset''' ) A_ : int = parser.parse_args() A_ : Dict = {'''lr''': 2e-5, '''num_epochs''': 3, '''seed''': 4_2, '''batch_size''': 1_6} training_function(lowerCamelCase__ , lowerCamelCase__ ) if __name__ == "__main__": main()
351
'''simple docstring''' import logging import os from dataclasses import dataclass from typing import List, Optional, Union import tqdm from filelock import FileLock from transformers import ( BartTokenizer, BartTokenizerFast, DataProcessor, PreTrainedTokenizer, RobertaTokenizer, RobertaTokenizerFast, XLMRobertaTokenizer, is_tf_available, is_torch_available, ) snake_case__ = logging.getLogger(__name__) @dataclass(frozen=a__ ) class UpperCamelCase_ : """simple docstring""" _lowerCAmelCase = 42 _lowerCAmelCase = 42 _lowerCAmelCase = None _lowerCAmelCase = None _lowerCAmelCase = None @dataclass(frozen=a__ ) class UpperCamelCase_ : """simple docstring""" _lowerCAmelCase = 42 _lowerCAmelCase = None _lowerCAmelCase = None _lowerCAmelCase = None _lowerCAmelCase = None if is_torch_available(): import torch from torch.utils.data import Dataset class UpperCamelCase_ (a__ ): """simple docstring""" _lowerCAmelCase = 42 def __init__( self : Optional[int] , _lowerCamelCase : str , _lowerCamelCase : PreTrainedTokenizer , _lowerCamelCase : str , _lowerCamelCase : Optional[int] = None , _lowerCamelCase : List[Any]=False , _lowerCamelCase : bool = False , ): """simple docstring""" A_ : Optional[int] = hans_processors[task]() A_ : int = os.path.join( _lowerCamelCase , '''cached_{}_{}_{}_{}'''.format( '''dev''' if evaluate else '''train''' , tokenizer.__class__.__name__ , str(_lowerCamelCase ) , _lowerCamelCase , ) , ) A_ : Dict = processor.get_labels() if tokenizer.__class__ in ( RobertaTokenizer, RobertaTokenizerFast, XLMRobertaTokenizer, BartTokenizer, BartTokenizerFast, ): # HACK(label indices are swapped in RoBERTa pretrained model) A_ ,A_ : List[str] = label_list[2], label_list[1] A_ : Optional[int] = label_list # Make sure only the first process in distributed training processes the dataset, # and the others will use the cache. A_ : str = cached_features_file + '''.lock''' with FileLock(_lowerCamelCase ): if os.path.exists(_lowerCamelCase ) and not overwrite_cache: logger.info(f'Loading features from cached file {cached_features_file}' ) A_ : List[str] = torch.load(_lowerCamelCase ) else: logger.info(f'Creating features from dataset file at {data_dir}' ) A_ : Optional[int] = ( processor.get_dev_examples(_lowerCamelCase ) if evaluate else processor.get_train_examples(_lowerCamelCase ) ) logger.info('''Training examples: %s''' , len(_lowerCamelCase ) ) A_ : Optional[int] = hans_convert_examples_to_features(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) logger.info('''Saving features into cached file %s''' , _lowerCamelCase ) torch.save(self.features , _lowerCamelCase ) def __len__( self : List[str] ): """simple docstring""" return len(self.features ) def __getitem__( self : List[str] , _lowerCamelCase : Optional[int] ): """simple docstring""" return self.features[i] def _a ( self : str ): """simple docstring""" return self.label_list if is_tf_available(): import tensorflow as tf class UpperCamelCase_ : """simple docstring""" _lowerCAmelCase = 42 def __init__( self : Optional[int] , _lowerCamelCase : str , _lowerCamelCase : PreTrainedTokenizer , _lowerCamelCase : str , _lowerCamelCase : Optional[int] = 128 , _lowerCamelCase : Dict=False , _lowerCamelCase : bool = False , ): """simple docstring""" A_ : Optional[int] = hans_processors[task]() A_ : Optional[int] = processor.get_labels() if tokenizer.__class__ in ( RobertaTokenizer, RobertaTokenizerFast, XLMRobertaTokenizer, BartTokenizer, BartTokenizerFast, ): # HACK(label indices are swapped in RoBERTa pretrained model) A_ ,A_ : Union[str, Any] = label_list[2], label_list[1] A_ : Tuple = label_list A_ : Optional[int] = processor.get_dev_examples(_lowerCamelCase ) if evaluate else processor.get_train_examples(_lowerCamelCase ) A_ : Tuple = hans_convert_examples_to_features(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) def gen(): for ex_index, ex in tqdm.tqdm(enumerate(self.features ) , desc='''convert examples to features''' ): if ex_index % 10000 == 0: logger.info('''Writing example %d of %d''' % (ex_index, len(_lowerCamelCase )) ) yield ( { "example_id": 0, "input_ids": ex.input_ids, "attention_mask": ex.attention_mask, "token_type_ids": ex.token_type_ids, }, ex.label, ) A_ : List[Any] = tf.data.Dataset.from_generator( _lowerCamelCase , ( { '''example_id''': tf.intaa, '''input_ids''': tf.intaa, '''attention_mask''': tf.intaa, '''token_type_ids''': tf.intaa, }, tf.intaa, ) , ( { '''example_id''': tf.TensorShape([] ), '''input_ids''': tf.TensorShape([None, None] ), '''attention_mask''': tf.TensorShape([None, None] ), '''token_type_ids''': tf.TensorShape([None, None] ), }, tf.TensorShape([] ), ) , ) def _a ( self : Any ): """simple docstring""" return self.dataset def __len__( self : Dict ): """simple docstring""" return len(self.features ) def __getitem__( self : Optional[int] , _lowerCamelCase : List[str] ): """simple docstring""" return self.features[i] def _a ( self : Tuple ): """simple docstring""" return self.label_list class UpperCamelCase_ (a__ ): """simple docstring""" def _a ( self : List[str] , _lowerCamelCase : Union[str, Any] ): """simple docstring""" return self._create_examples(self._read_tsv(os.path.join(_lowerCamelCase , '''heuristics_train_set.txt''' ) ) , '''train''' ) def _a ( self : List[str] , _lowerCamelCase : Tuple ): """simple docstring""" return self._create_examples(self._read_tsv(os.path.join(_lowerCamelCase , '''heuristics_evaluation_set.txt''' ) ) , '''dev''' ) def _a ( self : Any ): """simple docstring""" return ["contradiction", "entailment", "neutral"] def _a ( self : Optional[Any] , _lowerCamelCase : Optional[Any] , _lowerCamelCase : Any ): """simple docstring""" A_ : Tuple = [] for i, line in enumerate(_lowerCamelCase ): if i == 0: continue A_ : str = '''%s-%s''' % (set_type, line[0]) A_ : Optional[Any] = line[5] A_ : Union[str, Any] = line[6] A_ : List[str] = line[7][2:] if line[7].startswith('''ex''' ) else line[7] A_ : str = line[0] examples.append(InputExample(guid=_lowerCamelCase , text_a=_lowerCamelCase , text_b=_lowerCamelCase , label=_lowerCamelCase , pairID=_lowerCamelCase ) ) return examples def snake_case__ ( lowerCamelCase__ : List[InputExample] , lowerCamelCase__ : List[str] , lowerCamelCase__ : int , lowerCamelCase__ : PreTrainedTokenizer , ) -> int: A_ : Union[str, Any] = {label: i for i, label in enumerate(lowerCamelCase__ )} A_ : Optional[Any] = [] for ex_index, example in tqdm.tqdm(enumerate(lowerCamelCase__ ) , desc='''convert examples to features''' ): if ex_index % 1_0_0_0_0 == 0: logger.info('''Writing example %d''' % (ex_index) ) A_ : Optional[int] = tokenizer( example.text_a , example.text_b , add_special_tokens=lowerCamelCase__ , max_length=lowerCamelCase__ , padding='''max_length''' , truncation=lowerCamelCase__ , return_overflowing_tokens=lowerCamelCase__ , ) A_ : List[str] = label_map[example.label] if example.label in label_map else 0 A_ : Tuple = int(example.pairID ) features.append(InputFeatures(**lowerCamelCase__ , label=lowerCamelCase__ , pairID=lowerCamelCase__ ) ) for i, example in enumerate(examples[:5] ): logger.info('''*** Example ***''' ) logger.info(f'guid: {example}' ) logger.info(f'features: {features[i]}' ) return features snake_case__ = { """hans""": 3, } snake_case__ = { """hans""": HansProcessor, }
4
0
'''simple docstring''' from collections import OrderedDict from typing import Mapping from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices snake_case__ = logging.get_logger(__name__) snake_case__ = { """microsoft/swin-tiny-patch4-window7-224""": ( """https://huggingface.co/microsoft/swin-tiny-patch4-window7-224/resolve/main/config.json""" ), # See all Swin models at https://huggingface.co/models?filter=swin } class UpperCamelCase_ (a__, a__ ): """simple docstring""" _lowerCAmelCase = 'swin' _lowerCAmelCase = { 'num_attention_heads': 'num_heads', 'num_hidden_layers': 'num_layers', } def __init__( self : Any , _lowerCamelCase : Optional[Any]=224 , _lowerCamelCase : List[str]=4 , _lowerCamelCase : Optional[Any]=3 , _lowerCamelCase : Tuple=96 , _lowerCamelCase : List[Any]=[2, 2, 6, 2] , _lowerCamelCase : List[str]=[3, 6, 12, 24] , _lowerCamelCase : List[Any]=7 , _lowerCamelCase : Optional[int]=4.0 , _lowerCamelCase : List[str]=True , _lowerCamelCase : List[str]=0.0 , _lowerCamelCase : Any=0.0 , _lowerCamelCase : Dict=0.1 , _lowerCamelCase : List[str]="gelu" , _lowerCamelCase : Tuple=False , _lowerCamelCase : Dict=0.02 , _lowerCamelCase : Optional[Any]=1E-5 , _lowerCamelCase : Any=32 , _lowerCamelCase : Tuple=None , _lowerCamelCase : Any=None , **_lowerCamelCase : str , ): """simple docstring""" super().__init__(**_lowerCamelCase ) A_ : Optional[int] = image_size A_ : Optional[int] = patch_size A_ : Optional[int] = num_channels A_ : Any = embed_dim A_ : List[Any] = depths A_ : Any = len(_lowerCamelCase ) A_ : List[Any] = num_heads A_ : Tuple = window_size A_ : Tuple = mlp_ratio A_ : Dict = qkv_bias A_ : List[str] = hidden_dropout_prob A_ : List[str] = attention_probs_dropout_prob A_ : Any = drop_path_rate A_ : List[Any] = hidden_act A_ : Tuple = use_absolute_embeddings A_ : int = layer_norm_eps A_ : Optional[Any] = initializer_range A_ : Union[str, Any] = encoder_stride # we set the hidden_size attribute in order to make Swin work with VisionEncoderDecoderModel # this indicates the channel dimension after the last stage of the model A_ : str = int(embed_dim * 2 ** (len(_lowerCamelCase ) - 1) ) A_ : str = ['''stem'''] + [f'stage{idx}' for idx in range(1 , len(_lowerCamelCase ) + 1 )] A_ : Optional[Any] = get_aligned_output_features_output_indices( out_features=_lowerCamelCase , out_indices=_lowerCamelCase , stage_names=self.stage_names ) class UpperCamelCase_ (a__ ): """simple docstring""" _lowerCAmelCase = version.parse('1.11' ) @property def _a ( self : str ): """simple docstring""" return OrderedDict( [ ('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}), ] ) @property def _a ( self : Union[str, Any] ): """simple docstring""" return 1E-4
352
'''simple docstring''' import io import itertools import json from dataclasses import dataclass from typing import Optional import pyarrow as pa import pyarrow.json as paj import datasets from datasets.table import table_cast from datasets.utils.file_utils import readline snake_case__ = datasets.utils.logging.get_logger(__name__) @dataclass class UpperCamelCase_ (datasets.BuilderConfig ): """simple docstring""" _lowerCAmelCase = None _lowerCAmelCase = "utf-8" _lowerCAmelCase = None _lowerCAmelCase = None _lowerCAmelCase = True # deprecated _lowerCAmelCase = None # deprecated _lowerCAmelCase = 1_0 << 2_0 # 10MB _lowerCAmelCase = None class UpperCamelCase_ (datasets.ArrowBasedBuilder ): """simple docstring""" _lowerCAmelCase = JsonConfig def _a ( self : int ): """simple docstring""" if self.config.block_size is not None: logger.warning('''The JSON loader parameter `block_size` is deprecated. Please use `chunksize` instead''' ) A_ : List[Any] = self.config.block_size if self.config.use_threads is not True: logger.warning( '''The JSON loader parameter `use_threads` is deprecated and doesn\'t have any effect anymore.''' ) if self.config.newlines_in_values is not None: raise ValueError('''The JSON loader parameter `newlines_in_values` is no longer supported''' ) return datasets.DatasetInfo(features=self.config.features ) def _a ( self : Any , _lowerCamelCase : List[str] ): """simple docstring""" if not self.config.data_files: raise ValueError(f'At least one data file must be specified, but got data_files={self.config.data_files}' ) A_ : int = dl_manager.download_and_extract(self.config.data_files ) if isinstance(_lowerCamelCase , (str, list, tuple) ): A_ : Union[str, Any] = data_files if isinstance(_lowerCamelCase , _lowerCamelCase ): A_ : List[str] = [files] A_ : List[Any] = [dl_manager.iter_files(_lowerCamelCase ) for file in files] return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={'''files''': files} )] A_ : Tuple = [] for split_name, files in data_files.items(): if isinstance(_lowerCamelCase , _lowerCamelCase ): A_ : int = [files] A_ : Union[str, Any] = [dl_manager.iter_files(_lowerCamelCase ) for file in files] splits.append(datasets.SplitGenerator(name=_lowerCamelCase , gen_kwargs={'''files''': files} ) ) return splits def _a ( self : int , _lowerCamelCase : pa.Table ): """simple docstring""" if self.config.features is not None: # adding missing columns for column_name in set(self.config.features ) - set(pa_table.column_names ): A_ : Optional[int] = self.config.features.arrow_schema.field(_lowerCamelCase ).type A_ : Optional[int] = pa_table.append_column(_lowerCamelCase , pa.array([None] * len(_lowerCamelCase ) , type=_lowerCamelCase ) ) # more expensive cast to support nested structures with keys in a different order # allows str <-> int/float or str to Audio for example A_ : str = table_cast(_lowerCamelCase , self.config.features.arrow_schema ) return pa_table def _a ( self : List[str] , _lowerCamelCase : int ): """simple docstring""" for file_idx, file in enumerate(itertools.chain.from_iterable(_lowerCamelCase ) ): # If the file is one json object and if we need to look at the list of items in one specific field if self.config.field is not None: with open(_lowerCamelCase , encoding=self.config.encoding , errors=self.config.encoding_errors ) as f: A_ : int = json.load(_lowerCamelCase ) # We keep only the field we are interested in A_ : List[str] = dataset[self.config.field] # We accept two format: a list of dicts or a dict of lists if isinstance(_lowerCamelCase , (list, tuple) ): A_ : int = set().union(*[row.keys() for row in dataset] ) A_ : List[str] = {col: [row.get(_lowerCamelCase ) for row in dataset] for col in keys} else: A_ : Tuple = dataset A_ : Dict = pa.Table.from_pydict(_lowerCamelCase ) yield file_idx, self._cast_table(_lowerCamelCase ) # If the file has one json object per line else: with open(_lowerCamelCase , '''rb''' ) as f: A_ : int = 0 # Use block_size equal to the chunk size divided by 32 to leverage multithreading # Set a default minimum value of 16kB if the chunk size is really small A_ : int = max(self.config.chunksize // 32 , 16 << 10 ) A_ : int = ( self.config.encoding_errors if self.config.encoding_errors is not None else '''strict''' ) while True: A_ : Any = f.read(self.config.chunksize ) if not batch: break # Finish current line try: batch += f.readline() except (AttributeError, io.UnsupportedOperation): batch += readline(_lowerCamelCase ) # PyArrow only accepts utf-8 encoded bytes if self.config.encoding != "utf-8": A_ : Optional[Any] = batch.decode(self.config.encoding , errors=_lowerCamelCase ).encode('''utf-8''' ) try: while True: try: A_ : List[Any] = paj.read_json( io.BytesIO(_lowerCamelCase ) , read_options=paj.ReadOptions(block_size=_lowerCamelCase ) ) break except (pa.ArrowInvalid, pa.ArrowNotImplementedError) as e: if ( isinstance(_lowerCamelCase , pa.ArrowInvalid ) and "straddling" not in str(_lowerCamelCase ) or block_size > len(_lowerCamelCase ) ): raise else: # Increase the block size in case it was too small. # The block size will be reset for the next file. logger.debug( f'Batch of {len(_lowerCamelCase )} bytes couldn\'t be parsed with block_size={block_size}. Retrying with block_size={block_size * 2}.' ) block_size *= 2 except pa.ArrowInvalid as e: try: with open( _lowerCamelCase , encoding=self.config.encoding , errors=self.config.encoding_errors ) as f: A_ : Optional[Any] = json.load(_lowerCamelCase ) except json.JSONDecodeError: logger.error(f'Failed to read file \'{file}\' with error {type(_lowerCamelCase )}: {e}' ) raise e # If possible, parse the file as a list of json objects and exit the loop if isinstance(_lowerCamelCase , _lowerCamelCase ): # list is the only sequence type supported in JSON try: A_ : Optional[int] = set().union(*[row.keys() for row in dataset] ) A_ : Tuple = {col: [row.get(_lowerCamelCase ) for row in dataset] for col in keys} A_ : int = pa.Table.from_pydict(_lowerCamelCase ) except (pa.ArrowInvalid, AttributeError) as e: logger.error(f'Failed to read file \'{file}\' with error {type(_lowerCamelCase )}: {e}' ) raise ValueError(f'Not able to read records in the JSON file at {file}.' ) from None yield file_idx, self._cast_table(_lowerCamelCase ) break else: logger.error(f'Failed to read file \'{file}\' with error {type(_lowerCamelCase )}: {e}' ) raise ValueError( f'Not able to read records in the JSON file at {file}. ' f'You should probably indicate the field of the JSON file containing your records. ' f'This JSON file contain the following fields: {str(list(dataset.keys() ) )}. ' f'Select the correct one and provide it as `field=\'XXX\'` to the dataset loading method. ' ) from None # Uncomment for debugging (will print the Arrow table size and elements) # logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}") # logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows))) yield (file_idx, batch_idx), self._cast_table(_lowerCamelCase ) batch_idx += 1
4
0
import unicodedata from dataclasses import dataclass from typing import Optional, Union import numpy as np from transformers.data.data_collator import DataCollatorMixin from transformers.file_utils import PaddingStrategy from transformers.tokenization_utils_base import PreTrainedTokenizerBase def snake_case__ ( lowerCamelCase__ : List[str] , lowerCamelCase__ : int , lowerCamelCase__ : Union[str, Any] , lowerCamelCase__ : Tuple ) -> Tuple: if isinstance(lowerCamelCase__ , lowerCamelCase__ ): A_ : Tuple = np.full((len(lowerCamelCase__ ), sequence_length, 2) , lowerCamelCase__ ) else: A_ : Optional[Any] = np.full((len(lowerCamelCase__ ), sequence_length) , lowerCamelCase__ ) for i, tensor in enumerate(lowerCamelCase__ ): if padding_side == "right": if isinstance(lowerCamelCase__ , lowerCamelCase__ ): A_ : Union[str, Any] = tensor[:sequence_length] else: A_ : str = tensor[:sequence_length] else: if isinstance(lowerCamelCase__ , lowerCamelCase__ ): A_ : str = tensor[:sequence_length] else: A_ : Union[str, Any] = tensor[:sequence_length] return out_tensor.tolist() def snake_case__ ( lowerCamelCase__ : List[str] ) -> Any: A_ : int = ord(lowerCamelCase__ ) if (cp >= 3_3 and cp <= 4_7) or (cp >= 5_8 and cp <= 6_4) or (cp >= 9_1 and cp <= 9_6) or (cp >= 1_2_3 and cp <= 1_2_6): return True A_ : List[Any] = unicodedata.category(lowerCamelCase__ ) if cat.startswith('''P''' ): return True return False @dataclass class UpperCamelCase_ (a__ ): """simple docstring""" _lowerCAmelCase = 4_2 _lowerCAmelCase = True _lowerCAmelCase = None _lowerCAmelCase = None _lowerCAmelCase = -1_0_0 _lowerCAmelCase = 'pt' def _a ( self : Tuple , _lowerCamelCase : List[Any] ): """simple docstring""" import torch A_ : Any = '''label''' if '''label''' in features[0].keys() else '''labels''' A_ : int = [feature[label_name] for feature in features] if label_name in features[0].keys() else None A_ : List[str] = self.tokenizer.pad( _lowerCamelCase , padding=self.padding , max_length=self.max_length , pad_to_multiple_of=self.pad_to_multiple_of , return_tensors='''pt''' if labels is None else None , ) if labels is None: return batch A_ : Dict = torch.tensor(batch['''entity_ids'''] ).shape[1] A_ : List[str] = self.tokenizer.padding_side if padding_side == "right": A_ : Dict = [ list(_lowerCamelCase ) + [self.label_pad_token_id] * (sequence_length - len(_lowerCamelCase )) for label in labels ] else: A_ : str = [ [self.label_pad_token_id] * (sequence_length - len(_lowerCamelCase )) + list(_lowerCamelCase ) for label in labels ] A_ : List[Any] = [feature['''ner_tags'''] for feature in features] A_ : Dict = padding_tensor(_lowerCamelCase , -1 , _lowerCamelCase , _lowerCamelCase ) A_ : Any = [feature['''original_entity_spans'''] for feature in features] A_ : Dict = padding_tensor(_lowerCamelCase , (-1, -1) , _lowerCamelCase , _lowerCamelCase ) A_ : Union[str, Any] = {k: torch.tensor(_lowerCamelCase , dtype=torch.intaa ) for k, v in batch.items()} return batch
353
'''simple docstring''' from collections import OrderedDict from typing import Mapping from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices snake_case__ = logging.get_logger(__name__) snake_case__ = { """microsoft/swin-tiny-patch4-window7-224""": ( """https://huggingface.co/microsoft/swin-tiny-patch4-window7-224/resolve/main/config.json""" ), # See all Swin models at https://huggingface.co/models?filter=swin } class UpperCamelCase_ (a__, a__ ): """simple docstring""" _lowerCAmelCase = 'swin' _lowerCAmelCase = { 'num_attention_heads': 'num_heads', 'num_hidden_layers': 'num_layers', } def __init__( self : Any , _lowerCamelCase : Optional[Any]=224 , _lowerCamelCase : List[str]=4 , _lowerCamelCase : Optional[Any]=3 , _lowerCamelCase : Tuple=96 , _lowerCamelCase : List[Any]=[2, 2, 6, 2] , _lowerCamelCase : List[str]=[3, 6, 12, 24] , _lowerCamelCase : List[Any]=7 , _lowerCamelCase : Optional[int]=4.0 , _lowerCamelCase : List[str]=True , _lowerCamelCase : List[str]=0.0 , _lowerCamelCase : Any=0.0 , _lowerCamelCase : Dict=0.1 , _lowerCamelCase : List[str]="gelu" , _lowerCamelCase : Tuple=False , _lowerCamelCase : Dict=0.02 , _lowerCamelCase : Optional[Any]=1E-5 , _lowerCamelCase : Any=32 , _lowerCamelCase : Tuple=None , _lowerCamelCase : Any=None , **_lowerCamelCase : str , ): """simple docstring""" super().__init__(**_lowerCamelCase ) A_ : Optional[int] = image_size A_ : Optional[int] = patch_size A_ : Optional[int] = num_channels A_ : Any = embed_dim A_ : List[Any] = depths A_ : Any = len(_lowerCamelCase ) A_ : List[Any] = num_heads A_ : Tuple = window_size A_ : Tuple = mlp_ratio A_ : Dict = qkv_bias A_ : List[str] = hidden_dropout_prob A_ : List[str] = attention_probs_dropout_prob A_ : Any = drop_path_rate A_ : List[Any] = hidden_act A_ : Tuple = use_absolute_embeddings A_ : int = layer_norm_eps A_ : Optional[Any] = initializer_range A_ : Union[str, Any] = encoder_stride # we set the hidden_size attribute in order to make Swin work with VisionEncoderDecoderModel # this indicates the channel dimension after the last stage of the model A_ : str = int(embed_dim * 2 ** (len(_lowerCamelCase ) - 1) ) A_ : str = ['''stem'''] + [f'stage{idx}' for idx in range(1 , len(_lowerCamelCase ) + 1 )] A_ ,A_ : Optional[Any] = get_aligned_output_features_output_indices( out_features=_lowerCamelCase , out_indices=_lowerCamelCase , stage_names=self.stage_names ) class UpperCamelCase_ (a__ ): """simple docstring""" _lowerCAmelCase = version.parse('1.11' ) @property def _a ( self : str ): """simple docstring""" return OrderedDict( [ ('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}), ] ) @property def _a ( self : Union[str, Any] ): """simple docstring""" return 1E-4
4
0
'''simple docstring''' import random import unittest import numpy as np import transformers from transformers import is_flax_available, is_torch_available from transformers.testing_utils import is_pt_flax_cross_test, require_flax if is_flax_available(): import os import jax.numpy as jnp from jax import jit from transformers import AutoTokenizer, FlaxAutoModelForCausalLM from transformers.modeling_flax_pytorch_utils import load_flax_weights_in_pytorch_model snake_case__ = """0.12""" # assumed parallelism: 8 if is_torch_available(): import torch def snake_case__ ( lowerCamelCase__ : int , lowerCamelCase__ : Union[str, Any] , lowerCamelCase__ : Optional[int]=None ) -> Optional[int]: if rng is None: A_ : Any = random.Random() A_ : Optional[Any] = 1 for dim in shape: total_dims *= dim A_ : Tuple = [] for _ in range(lowerCamelCase__ ): values.append(rng.randint(0 , vocab_size - 1 ) ) A_ : str = np.array(lowerCamelCase__ , dtype=jnp.intaa ).reshape(lowerCamelCase__ ) return output def snake_case__ ( lowerCamelCase__ : Dict , lowerCamelCase__ : str=None ) -> int: A_ : Union[str, Any] = ids_tensor(lowerCamelCase__ , vocab_size=2 , rng=lowerCamelCase__ ) # make sure that at least one token is attended to for each batch A_ : List[Any] = 1 return attn_mask @require_flax class UpperCamelCase_ : """simple docstring""" _lowerCAmelCase : int = None _lowerCAmelCase : Optional[Any] = () def _a ( self : Tuple ): """simple docstring""" A_ : Tuple = self.model_tester.prepare_config_and_inputs_for_common() # cut to half length & take max batch_size 3 A_ : Any = 2 A_ : int = inputs['''input_ids'''].shape[-1] // 2 A_ : Optional[int] = inputs['''input_ids'''][:max_batch_size, :sequence_length] A_ : int = jnp.ones_like(_lowerCamelCase ) A_ : Dict = attention_mask[:max_batch_size, :sequence_length] # generate max 5 tokens A_ : Optional[Any] = input_ids.shape[-1] + 5 if config.eos_token_id is not None and config.pad_token_id is None: # hack to allow generate for models such as GPT2 as is done in `generate()` A_ : Tuple = config.eos_token_id return config, input_ids, attention_mask, max_length @is_pt_flax_cross_test def _a ( self : Tuple ): """simple docstring""" A_ : Union[str, Any] = self._get_input_ids_and_config() A_ : List[Any] = False A_ : Union[str, Any] = max_length A_ : List[Any] = 0 for model_class in self.all_generative_model_classes: A_ : Tuple = model_class(_lowerCamelCase ) A_ : Any = model_class.__name__[4:] # Skip the "Flax" at the beginning A_ : Dict = getattr(_lowerCamelCase , _lowerCamelCase ) A_ : str = pt_model_class(_lowerCamelCase ).eval() A_ : int = load_flax_weights_in_pytorch_model(_lowerCamelCase , flax_model.params ) A_ : Union[str, Any] = flax_model.generate(_lowerCamelCase ).sequences A_ : Optional[int] = pt_model.generate(torch.tensor(_lowerCamelCase , dtype=torch.long ) ) if flax_generation_outputs.shape[-1] > pt_generation_outputs.shape[-1]: A_ : List[Any] = flax_generation_outputs[:, : pt_generation_outputs.shape[-1]] self.assertListEqual(pt_generation_outputs.numpy().tolist() , flax_generation_outputs.tolist() ) def _a ( self : Any ): """simple docstring""" A_ : str = self._get_input_ids_and_config() A_ : Optional[int] = False A_ : List[Any] = max_length for model_class in self.all_generative_model_classes: A_ : List[Any] = model_class(_lowerCamelCase ) A_ : str = model.generate(_lowerCamelCase ).sequences self.assertEqual(generation_outputs.shape[-1] , _lowerCamelCase ) A_ : List[Any] = jit(model.generate ) A_ : Optional[Any] = jit_generate(_lowerCamelCase ).sequences self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() ) def _a ( self : Any ): """simple docstring""" A_ : List[Any] = self._get_input_ids_and_config() A_ : str = True A_ : Union[str, Any] = max_length for model_class in self.all_generative_model_classes: A_ : Union[str, Any] = model_class(_lowerCamelCase ) A_ : Optional[int] = model.generate(_lowerCamelCase ).sequences self.assertEqual(generation_outputs.shape[-1] , _lowerCamelCase ) A_ : str = jit(model.generate ) A_ : List[Any] = jit_generate(_lowerCamelCase ).sequences self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() ) def _a ( self : Union[str, Any] ): """simple docstring""" A_ : Tuple = self._get_input_ids_and_config() A_ : int = False A_ : str = max_length A_ : Any = 2 for model_class in self.all_generative_model_classes: A_ : Union[str, Any] = model_class(_lowerCamelCase ) A_ : Dict = model.generate(_lowerCamelCase ).sequences self.assertEqual(generation_outputs.shape[-1] , _lowerCamelCase ) A_ : int = jit(model.generate ) A_ : Union[str, Any] = jit_generate(_lowerCamelCase ).sequences self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() ) def _a ( self : Optional[Any] ): """simple docstring""" A_ : Dict = self._get_input_ids_and_config() A_ : Optional[int] = False A_ : List[str] = max_length A_ : int = 2 A_ : Union[str, Any] = 2 for model_class in self.all_generative_model_classes: A_ : Any = model_class(_lowerCamelCase ) A_ : str = model.generate(_lowerCamelCase ).sequences self.assertEqual(generation_outputs.shape[0] , input_ids.shape[0] * config.num_return_sequences ) def _a ( self : str ): """simple docstring""" A_ : Optional[int] = self._get_input_ids_and_config() A_ : Union[str, Any] = True A_ : Optional[Any] = max_length A_ : Any = 0.8 A_ : str = 10 A_ : Dict = 0.3 A_ : int = 1 A_ : Union[str, Any] = 8 A_ : str = 9 for model_class in self.all_generative_model_classes: A_ : Union[str, Any] = model_class(_lowerCamelCase ) A_ : Optional[Any] = model.generate(_lowerCamelCase ).sequences self.assertEqual(generation_outputs.shape[-1] , _lowerCamelCase ) A_ : Any = jit(model.generate ) A_ : int = jit_generate(_lowerCamelCase ).sequences self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() ) def _a ( self : List[str] ): """simple docstring""" A_ : str = self._get_input_ids_and_config() A_ : Optional[int] = max_length A_ : Tuple = 1 A_ : List[str] = 8 A_ : Optional[int] = 9 for model_class in self.all_generative_model_classes: A_ : Union[str, Any] = model_class(_lowerCamelCase ) A_ : str = model.generate(_lowerCamelCase ).sequences self.assertEqual(generation_outputs.shape[-1] , _lowerCamelCase ) A_ : Optional[int] = jit(model.generate ) A_ : Optional[int] = jit_generate(_lowerCamelCase ).sequences self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() ) def _a ( self : int ): """simple docstring""" A_ : List[Any] = self._get_input_ids_and_config() A_ : Tuple = max_length A_ : Dict = 2 A_ : Dict = 1 A_ : List[str] = 8 A_ : int = 9 for model_class in self.all_generative_model_classes: A_ : Tuple = model_class(_lowerCamelCase ) A_ : int = model.generate(_lowerCamelCase ).sequences self.assertEqual(generation_outputs.shape[-1] , _lowerCamelCase ) A_ : List[str] = jit(model.generate ) A_ : int = jit_generate(_lowerCamelCase ).sequences self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() ) def _a ( self : List[str] ): """simple docstring""" A_ : str = self._get_input_ids_and_config() # pad attention mask on the left A_ : str = attention_mask.at[(0, 0)].set(0 ) A_ : Optional[Any] = False A_ : List[Any] = max_length for model_class in self.all_generative_model_classes: A_ : List[str] = model_class(_lowerCamelCase ) A_ : Any = model.generate(_lowerCamelCase , attention_mask=_lowerCamelCase ).sequences self.assertEqual(generation_outputs.shape[-1] , _lowerCamelCase ) A_ : Optional[Any] = jit(model.generate ) A_ : Tuple = jit_generate(_lowerCamelCase , attention_mask=_lowerCamelCase ).sequences self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() ) def _a ( self : int ): """simple docstring""" A_ : int = self._get_input_ids_and_config() # pad attention mask on the left A_ : Dict = attention_mask.at[(0, 0)].set(0 ) A_ : int = True A_ : str = max_length for model_class in self.all_generative_model_classes: A_ : List[Any] = model_class(_lowerCamelCase ) A_ : Union[str, Any] = model.generate(_lowerCamelCase , attention_mask=_lowerCamelCase ).sequences self.assertEqual(generation_outputs.shape[-1] , _lowerCamelCase ) A_ : Optional[Any] = jit(model.generate ) A_ : List[str] = jit_generate(_lowerCamelCase , attention_mask=_lowerCamelCase ).sequences self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() ) def _a ( self : Any ): """simple docstring""" A_ : Tuple = self._get_input_ids_and_config() # pad attention mask on the left A_ : str = attention_mask.at[(0, 0)].set(0 ) A_ : Union[str, Any] = 2 A_ : Union[str, Any] = max_length for model_class in self.all_generative_model_classes: A_ : List[Any] = model_class(_lowerCamelCase ) A_ : Dict = model.generate(_lowerCamelCase , attention_mask=_lowerCamelCase ).sequences self.assertEqual(generation_outputs.shape[-1] , _lowerCamelCase ) A_ : List[Any] = jit(model.generate ) A_ : Optional[int] = jit_generate(_lowerCamelCase , attention_mask=_lowerCamelCase ).sequences self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() ) @require_flax class UpperCamelCase_ (unittest.TestCase ): """simple docstring""" def _a ( self : Optional[int] ): """simple docstring""" A_ : Any = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-bert''' ) A_ : Union[str, Any] = FlaxAutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-bert-flax-only''' ) A_ : Any = '''Hello world''' A_ : int = tokenizer(_lowerCamelCase , return_tensors='''np''' ).input_ids # typos are quickly detected (the correct argument is `do_sample`) with self.assertRaisesRegex(_lowerCamelCase , '''do_samples''' ): model.generate(_lowerCamelCase , do_samples=_lowerCamelCase ) # arbitrary arguments that will not be used anywhere are also not accepted with self.assertRaisesRegex(_lowerCamelCase , '''foo''' ): A_ : Optional[int] = {'''foo''': '''bar'''} model.generate(_lowerCamelCase , **_lowerCamelCase )
354
'''simple docstring''' from __future__ import annotations def snake_case__ ( lowerCamelCase__ : list[int] , lowerCamelCase__ : int ) -> list[int]: A_ : int = 0 A_ : str = len(lowerCamelCase__ ) - 1 while i < j: if nums[i] + nums[j] == target: return [i, j] elif nums[i] + nums[j] < target: A_ : Tuple = i + 1 else: A_ : List[str] = j - 1 return [] if __name__ == "__main__": import doctest doctest.testmod() print(F'{two_pointer([2, 7, 11, 15], 9) = }')
4
0
'''simple docstring''' import logging import os from dataclasses import dataclass, field from typing import Dict, Optional import numpy as np from utils_multiple_choice import MultipleChoiceDataset, Split, processors import transformers from transformers import ( AutoConfig, AutoModelForMultipleChoice, AutoTokenizer, DataCollatorWithPadding, EvalPrediction, HfArgumentParser, Trainer, TrainingArguments, set_seed, ) from transformers.trainer_utils import is_main_process snake_case__ = logging.getLogger(__name__) def snake_case__ ( lowerCamelCase__ : List[Any] , lowerCamelCase__ : Any ) -> Any: return (preds == labels).mean() @dataclass class UpperCamelCase_ : """simple docstring""" _lowerCAmelCase = field( metadata={'help': 'Path to pretrained model or model identifier from huggingface.co/models'} ) _lowerCAmelCase = field( default=a__, metadata={'help': 'Pretrained config name or path if not the same as model_name'} ) _lowerCAmelCase = field( default=a__, metadata={'help': 'Pretrained tokenizer name or path if not the same as model_name'} ) _lowerCAmelCase = field( default=a__, metadata={'help': 'Where do you want to store the pretrained models downloaded from huggingface.co'}, ) @dataclass class UpperCamelCase_ : """simple docstring""" _lowerCAmelCase = field(metadata={'help': 'The name of the task to train on: ' + ', '.join(processors.keys() )} ) _lowerCAmelCase = field(metadata={'help': 'Should contain the data files for the task.'} ) _lowerCAmelCase = field( default=1_2_8, metadata={ 'help': ( 'The maximum total input sequence length after tokenization. Sequences longer ' 'than this will be truncated, sequences shorter will be padded.' ) }, ) _lowerCAmelCase = field( default=a__, metadata={'help': 'Overwrite the cached training and evaluation sets'} ) def snake_case__ ( ) -> Optional[int]: # See all possible arguments in src/transformers/training_args.py # or by passing the --help flag to this script. # We now keep distinct sets of args, for a cleaner separation of concerns. A_ : List[Any] = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) ) A_ : Union[str, Any] = parser.parse_args_into_dataclasses() if ( os.path.exists(training_args.output_dir ) and os.listdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir ): raise ValueError( f'Output directory ({training_args.output_dir}) already exists and is not empty. Use' ''' --overwrite_output_dir to overcome.''' ) # Setup logging logging.basicConfig( format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' , datefmt='''%m/%d/%Y %H:%M:%S''' , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , ) logger.warning( '''Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s''' , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.local_rank != -1 ) , training_args.fpaa , ) # Set the verbosity to info of the Transformers logger (on main process only): if is_main_process(training_args.local_rank ): transformers.utils.logging.set_verbosity_info() transformers.utils.logging.enable_default_handler() transformers.utils.logging.enable_explicit_format() logger.info('''Training/evaluation parameters %s''' , lowerCamelCase__ ) # Set seed set_seed(training_args.seed ) try: A_ : Optional[Any] = processors[data_args.task_name]() A_ : str = processor.get_labels() A_ : List[str] = len(lowerCamelCase__ ) except KeyError: raise ValueError('''Task not found: %s''' % (data_args.task_name) ) # Load pretrained model and tokenizer # # Distributed training: # The .from_pretrained methods guarantee that only one local process can concurrently # download model & vocab. A_ : Any = AutoConfig.from_pretrained( model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=lowerCamelCase__ , finetuning_task=data_args.task_name , cache_dir=model_args.cache_dir , ) A_ : int = AutoTokenizer.from_pretrained( model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , ) A_ : Optional[Any] = AutoModelForMultipleChoice.from_pretrained( model_args.model_name_or_path , from_tf=bool('''.ckpt''' in model_args.model_name_or_path ) , config=lowerCamelCase__ , cache_dir=model_args.cache_dir , ) # Get datasets A_ : Tuple = ( MultipleChoiceDataset( data_dir=data_args.data_dir , tokenizer=lowerCamelCase__ , task=data_args.task_name , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.train , ) if training_args.do_train else None ) A_ : List[Any] = ( MultipleChoiceDataset( data_dir=data_args.data_dir , tokenizer=lowerCamelCase__ , task=data_args.task_name , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.dev , ) if training_args.do_eval else None ) def compute_metrics(lowerCamelCase__ : EvalPrediction ) -> Dict: A_ : Optional[Any] = np.argmax(p.predictions , axis=1 ) return {"acc": simple_accuracy(lowerCamelCase__ , p.label_ids )} # Data collator A_ : str = DataCollatorWithPadding(lowerCamelCase__ , pad_to_multiple_of=8 ) if training_args.fpaa else None # Initialize our Trainer A_ : Dict = Trainer( model=lowerCamelCase__ , args=lowerCamelCase__ , train_dataset=lowerCamelCase__ , eval_dataset=lowerCamelCase__ , compute_metrics=lowerCamelCase__ , data_collator=lowerCamelCase__ , ) # Training if training_args.do_train: trainer.train( model_path=model_args.model_name_or_path if os.path.isdir(model_args.model_name_or_path ) else None ) trainer.save_model() # For convenience, we also re-save the tokenizer to the same directory, # so that you can share your model easily on huggingface.co/models =) if trainer.is_world_master(): tokenizer.save_pretrained(training_args.output_dir ) # Evaluation A_ : int = {} if training_args.do_eval: logger.info('''*** Evaluate ***''' ) A_ : str = trainer.evaluate() A_ : str = os.path.join(training_args.output_dir , '''eval_results.txt''' ) if trainer.is_world_master(): with open(lowerCamelCase__ , '''w''' ) as writer: logger.info('''***** Eval results *****''' ) for key, value in result.items(): logger.info(''' %s = %s''' , lowerCamelCase__ , lowerCamelCase__ ) writer.write('''%s = %s\n''' % (key, value) ) results.update(lowerCamelCase__ ) return results def snake_case__ ( lowerCamelCase__ : Optional[int] ) -> int: # For xla_spawn (TPUs) main() if __name__ == "__main__": main()
355
'''simple docstring''' def snake_case__ ( lowerCamelCase__ : list[int] , lowerCamelCase__ : list[int] , lowerCamelCase__ : int ) -> bool: return not any( neighbour == 1 and colored_vertices[i] == color for i, neighbour in enumerate(lowerCamelCase__ ) ) def snake_case__ ( lowerCamelCase__ : list[list[int]] , lowerCamelCase__ : int , lowerCamelCase__ : list[int] , lowerCamelCase__ : int ) -> bool: # Base Case if index == len(lowerCamelCase__ ): return True # Recursive Step for i in range(lowerCamelCase__ ): if valid_coloring(graph[index] , lowerCamelCase__ , lowerCamelCase__ ): # Color current vertex A_ : int = i # Validate coloring if util_color(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , index + 1 ): return True # Backtrack A_ : str = -1 return False def snake_case__ ( lowerCamelCase__ : list[list[int]] , lowerCamelCase__ : int ) -> list[int]: A_ : List[str] = [-1] * len(lowerCamelCase__ ) if util_color(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , 0 ): return colored_vertices return []
4
0
'''simple docstring''' import inspect import os import unittest import torch import accelerate from accelerate import Accelerator from accelerate.test_utils import execute_subprocess_async, require_multi_gpu from accelerate.utils import patch_environment class UpperCamelCase_ (unittest.TestCase ): """simple docstring""" def _a ( self : List[Any] ): """simple docstring""" A_ : List[str] = inspect.getfile(accelerate.test_utils ) A_ : List[str] = os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ['''scripts''', '''test_script.py'''] ) A_ : Dict = os.path.sep.join( mod_file.split(os.path.sep )[:-1] + ['''scripts''', '''test_distributed_data_loop.py'''] ) A_ : Optional[Any] = os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ['''scripts''', '''test_ops.py'''] ) @require_multi_gpu def _a ( self : Union[str, Any] ): """simple docstring""" print(f'Found {torch.cuda.device_count()} devices.' ) A_ : List[str] = ['''torchrun''', f'--nproc_per_node={torch.cuda.device_count()}', self.test_file_path] with patch_environment(omp_num_threads=1 ): execute_subprocess_async(_lowerCamelCase , env=os.environ.copy() ) @require_multi_gpu def _a ( self : Optional[Any] ): """simple docstring""" print(f'Found {torch.cuda.device_count()} devices.' ) A_ : Optional[Any] = ['''torchrun''', f'--nproc_per_node={torch.cuda.device_count()}', self.operation_file_path] print(f'Command: {cmd}' ) with patch_environment(omp_num_threads=1 ): execute_subprocess_async(_lowerCamelCase , env=os.environ.copy() ) @require_multi_gpu def _a ( self : Optional[int] ): """simple docstring""" A_ : Dict = ['''torchrun''', f'--nproc_per_node={torch.cuda.device_count()}', inspect.getfile(self.__class__ )] with patch_environment(omp_num_threads=1 ): execute_subprocess_async(_lowerCamelCase , env=os.environ.copy() ) @require_multi_gpu def _a ( self : str ): """simple docstring""" print(f'Found {torch.cuda.device_count()} devices, using 2 devices only' ) A_ : List[Any] = ['''torchrun''', f'--nproc_per_node={torch.cuda.device_count()}', self.data_loop_file_path] with patch_environment(omp_num_threads=1 , cuda_visible_devices='''0,1''' ): execute_subprocess_async(_lowerCamelCase , env=os.environ.copy() ) if __name__ == "__main__": snake_case__ = Accelerator() snake_case__ = (accelerator.state.process_index + 2, 10) snake_case__ = torch.randint(0, 10, shape).to(accelerator.device) snake_case__ = """""" snake_case__ = accelerator.pad_across_processes(tensor) if tensora.shape[0] != accelerator.state.num_processes + 1: error_msg += F"Found shape {tensora.shape} but should have {accelerator.state.num_processes + 1} at dim 0." if not torch.equal(tensora[: accelerator.state.process_index + 2], tensor): error_msg += "Tensors have different values." if not torch.all(tensora[accelerator.state.process_index + 2 :] == 0): error_msg += "Padding was not done with the right value (0)." snake_case__ = accelerator.pad_across_processes(tensor, pad_first=True) if tensora.shape[0] != accelerator.state.num_processes + 1: error_msg += F"Found shape {tensora.shape} but should have {accelerator.state.num_processes + 1} at dim 0." snake_case__ = accelerator.state.num_processes - accelerator.state.process_index - 1 if not torch.equal(tensora[index:], tensor): error_msg += "Tensors have different values." if not torch.all(tensora[:index] == 0): error_msg += "Padding was not done with the right value (0)." # Raise error at the end to make sure we don't stop at the first failure. if len(error_msg) > 0: raise ValueError(error_msg)
356
'''simple docstring''' from __future__ import annotations from PIL import Image # Define glider example snake_case__ = [ [0, 1, 0, 0, 0, 0, 0, 0], [0, 0, 1, 0, 0, 0, 0, 0], [1, 1, 1, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0], ] # Define blinker example snake_case__ = [[0, 1, 0], [0, 1, 0], [0, 1, 0]] def snake_case__ ( lowerCamelCase__ : list[list[int]] ) -> list[list[int]]: A_ : str = [] for i in range(len(lowerCamelCase__ ) ): A_ : Optional[Any] = [] for j in range(len(cells[i] ) ): # Get the number of live neighbours A_ : Optional[int] = 0 if i > 0 and j > 0: neighbour_count += cells[i - 1][j - 1] if i > 0: neighbour_count += cells[i - 1][j] if i > 0 and j < len(cells[i] ) - 1: neighbour_count += cells[i - 1][j + 1] if j > 0: neighbour_count += cells[i][j - 1] if j < len(cells[i] ) - 1: neighbour_count += cells[i][j + 1] if i < len(lowerCamelCase__ ) - 1 and j > 0: neighbour_count += cells[i + 1][j - 1] if i < len(lowerCamelCase__ ) - 1: neighbour_count += cells[i + 1][j] if i < len(lowerCamelCase__ ) - 1 and j < len(cells[i] ) - 1: neighbour_count += cells[i + 1][j + 1] # Rules of the game of life (excerpt from Wikipedia): # 1. Any live cell with two or three live neighbours survives. # 2. Any dead cell with three live neighbours becomes a live cell. # 3. All other live cells die in the next generation. # Similarly, all other dead cells stay dead. A_ : List[str] = cells[i][j] == 1 if ( (alive and 2 <= neighbour_count <= 3) or not alive and neighbour_count == 3 ): next_generation_row.append(1 ) else: next_generation_row.append(0 ) next_generation.append(lowerCamelCase__ ) return next_generation def snake_case__ ( lowerCamelCase__ : list[list[int]] , lowerCamelCase__ : int ) -> list[Image.Image]: A_ : List[Any] = [] for _ in range(lowerCamelCase__ ): # Create output image A_ : Optional[int] = Image.new('''RGB''' , (len(cells[0] ), len(lowerCamelCase__ )) ) A_ : int = img.load() # Save cells to image for x in range(len(lowerCamelCase__ ) ): for y in range(len(cells[0] ) ): A_ : Optional[Any] = 2_5_5 - cells[y][x] * 2_5_5 A_ : str = (colour, colour, colour) # Save image images.append(lowerCamelCase__ ) A_ : Optional[int] = new_generation(lowerCamelCase__ ) return images if __name__ == "__main__": snake_case__ = generate_images(GLIDER, 16) images[0].save("""out.gif""", save_all=True, append_images=images[1:])
4
0
'''simple docstring''' from ...configuration_utils import PretrainedConfig from ...utils import logging snake_case__ = logging.get_logger(__name__) snake_case__ = { """facebook/s2t-wav2vec2-large-en-de""": ( """https://huggingface.co/facebook/s2t-wav2vec2-large-en-de/resolve/main/config.json""" ), # See all Speech2Text models at https://huggingface.co/models?filter=speech2text2 } class UpperCamelCase_ (a__ ): """simple docstring""" _lowerCAmelCase = 'speech_to_text_2' _lowerCAmelCase = ['past_key_values'] _lowerCAmelCase = {'num_attention_heads': 'decoder_attention_heads', 'hidden_size': 'd_model'} def __init__( self : Optional[Any] , _lowerCamelCase : Optional[Any]=10000 , _lowerCamelCase : List[Any]=6 , _lowerCamelCase : int=2048 , _lowerCamelCase : Dict=4 , _lowerCamelCase : str=0.0 , _lowerCamelCase : int=True , _lowerCamelCase : int="relu" , _lowerCamelCase : Any=256 , _lowerCamelCase : List[Any]=0.1 , _lowerCamelCase : Tuple=0.0 , _lowerCamelCase : Union[str, Any]=0.0 , _lowerCamelCase : Optional[Any]=0.02 , _lowerCamelCase : int=2 , _lowerCamelCase : List[str]=True , _lowerCamelCase : str=1 , _lowerCamelCase : List[Any]=0 , _lowerCamelCase : Optional[int]=2 , _lowerCamelCase : Tuple=1024 , **_lowerCamelCase : int , ): """simple docstring""" A_ : Optional[int] = vocab_size A_ : Tuple = d_model A_ : List[str] = decoder_ffn_dim A_ : str = decoder_layers A_ : Any = decoder_attention_heads A_ : int = dropout A_ : str = attention_dropout A_ : Optional[int] = activation_dropout A_ : str = activation_function A_ : List[Any] = init_std A_ : Union[str, Any] = decoder_layerdrop A_ : Any = use_cache A_ : Optional[Any] = decoder_layers A_ : Optional[int] = scale_embedding # scale factor will be sqrt(d_model) if True A_ : Optional[Any] = max_target_positions super().__init__( pad_token_id=_lowerCamelCase , bos_token_id=_lowerCamelCase , eos_token_id=_lowerCamelCase , decoder_start_token_id=_lowerCamelCase , **_lowerCamelCase , )
357
'''simple docstring''' import json import os import shutil import tempfile import unittest import numpy as np import pytest from transformers import BertTokenizer, BertTokenizerFast from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES from transformers.testing_utils import require_vision from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available if is_vision_available(): from PIL import Image from transformers import AlignProcessor, EfficientNetImageProcessor @require_vision class UpperCamelCase_ (unittest.TestCase ): """simple docstring""" def _a ( self : Union[str, Any] ): """simple docstring""" A_ : Any = tempfile.mkdtemp() A_ : List[Any] = [ '''[UNK]''', '''[CLS]''', '''[SEP]''', '''[PAD]''', '''[MASK]''', '''want''', '''##want''', '''##ed''', '''wa''', '''un''', '''runn''', '''##ing''', ''',''', '''low''', '''lowest''', ] A_ : str = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] ) with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer: vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) ) A_ : Tuple = { '''do_resize''': True, '''size''': 20, '''do_center_crop''': True, '''crop_size''': 18, '''do_normalize''': True, '''image_mean''': [0.48_14_54_66, 0.4_57_82_75, 0.40_82_10_73], '''image_std''': [0.26_86_29_54, 0.26_13_02_58, 0.27_57_77_11], } A_ : List[Any] = os.path.join(self.tmpdirname , _lowerCamelCase ) with open(self.image_processor_file , '''w''' , encoding='''utf-8''' ) as fp: json.dump(_lowerCamelCase , _lowerCamelCase ) def _a ( self : Dict , **_lowerCamelCase : Tuple ): """simple docstring""" return BertTokenizer.from_pretrained(self.tmpdirname , **_lowerCamelCase ) def _a ( self : Optional[int] , **_lowerCamelCase : Optional[int] ): """simple docstring""" return BertTokenizerFast.from_pretrained(self.tmpdirname , **_lowerCamelCase ) def _a ( self : Optional[Any] , **_lowerCamelCase : Tuple ): """simple docstring""" return EfficientNetImageProcessor.from_pretrained(self.tmpdirname , **_lowerCamelCase ) def _a ( self : Tuple ): """simple docstring""" shutil.rmtree(self.tmpdirname ) def _a ( self : int ): """simple docstring""" A_ : Union[str, Any] = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )] A_ : Any = [Image.fromarray(np.moveaxis(_lowerCamelCase , 0 , -1 ) ) for x in image_inputs] return image_inputs def _a ( self : int ): """simple docstring""" A_ : Tuple = self.get_tokenizer() A_ : Tuple = self.get_rust_tokenizer() A_ : Dict = self.get_image_processor() A_ : List[Any] = AlignProcessor(tokenizer=_lowerCamelCase , image_processor=_lowerCamelCase ) processor_slow.save_pretrained(self.tmpdirname ) A_ : str = AlignProcessor.from_pretrained(self.tmpdirname , use_fast=_lowerCamelCase ) A_ : Any = AlignProcessor(tokenizer=_lowerCamelCase , image_processor=_lowerCamelCase ) processor_fast.save_pretrained(self.tmpdirname ) A_ : List[Any] = AlignProcessor.from_pretrained(self.tmpdirname ) self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() ) self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() ) self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() ) self.assertIsInstance(processor_slow.tokenizer , _lowerCamelCase ) self.assertIsInstance(processor_fast.tokenizer , _lowerCamelCase ) self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() ) self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() ) self.assertIsInstance(processor_slow.image_processor , _lowerCamelCase ) self.assertIsInstance(processor_fast.image_processor , _lowerCamelCase ) def _a ( self : List[Any] ): """simple docstring""" A_ : List[str] = AlignProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() ) processor.save_pretrained(self.tmpdirname ) A_ : Optional[int] = self.get_tokenizer(bos_token='''(BOS)''' , eos_token='''(EOS)''' ) A_ : Tuple = self.get_image_processor(do_normalize=_lowerCamelCase , padding_value=1.0 ) A_ : List[str] = AlignProcessor.from_pretrained( self.tmpdirname , bos_token='''(BOS)''' , eos_token='''(EOS)''' , do_normalize=_lowerCamelCase , padding_value=1.0 ) self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() ) self.assertIsInstance(processor.tokenizer , _lowerCamelCase ) self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() ) self.assertIsInstance(processor.image_processor , _lowerCamelCase ) def _a ( self : Union[str, Any] ): """simple docstring""" A_ : Dict = self.get_image_processor() A_ : Any = self.get_tokenizer() A_ : List[str] = AlignProcessor(tokenizer=_lowerCamelCase , image_processor=_lowerCamelCase ) A_ : Any = self.prepare_image_inputs() A_ : List[Any] = image_processor(_lowerCamelCase , return_tensors='''np''' ) A_ : str = processor(images=_lowerCamelCase , return_tensors='''np''' ) for key in input_image_proc.keys(): self.assertAlmostEqual(input_image_proc[key].sum() , input_processor[key].sum() , delta=1E-2 ) def _a ( self : Dict ): """simple docstring""" A_ : str = self.get_image_processor() A_ : List[str] = self.get_tokenizer() A_ : Optional[int] = AlignProcessor(tokenizer=_lowerCamelCase , image_processor=_lowerCamelCase ) A_ : int = '''lower newer''' A_ : str = processor(text=_lowerCamelCase ) A_ : Dict = tokenizer(_lowerCamelCase , padding='''max_length''' , max_length=64 ) for key in encoded_tok.keys(): self.assertListEqual(encoded_tok[key] , encoded_processor[key] ) def _a ( self : str ): """simple docstring""" A_ : Optional[int] = self.get_image_processor() A_ : Optional[Any] = self.get_tokenizer() A_ : List[str] = AlignProcessor(tokenizer=_lowerCamelCase , image_processor=_lowerCamelCase ) A_ : List[Any] = '''lower newer''' A_ : Optional[int] = self.prepare_image_inputs() A_ : List[Any] = processor(text=_lowerCamelCase , images=_lowerCamelCase ) self.assertListEqual(list(inputs.keys() ) , ['''input_ids''', '''token_type_ids''', '''attention_mask''', '''pixel_values'''] ) # test if it raises when no input is passed with pytest.raises(_lowerCamelCase ): processor() def _a ( self : List[str] ): """simple docstring""" A_ : Optional[Any] = self.get_image_processor() A_ : Optional[int] = self.get_tokenizer() A_ : List[Any] = AlignProcessor(tokenizer=_lowerCamelCase , image_processor=_lowerCamelCase ) A_ : str = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]] A_ : str = processor.batch_decode(_lowerCamelCase ) A_ : Union[str, Any] = tokenizer.batch_decode(_lowerCamelCase ) self.assertListEqual(_lowerCamelCase , _lowerCamelCase ) def _a ( self : Tuple ): """simple docstring""" A_ : str = self.get_image_processor() A_ : Tuple = self.get_tokenizer() A_ : Any = AlignProcessor(tokenizer=_lowerCamelCase , image_processor=_lowerCamelCase ) A_ : str = '''lower newer''' A_ : List[str] = self.prepare_image_inputs() A_ : Tuple = processor(text=_lowerCamelCase , images=_lowerCamelCase ) self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
4
0
'''simple docstring''' snake_case__ = {str(digit): digit**5 for digit in range(10)} def snake_case__ ( lowerCamelCase__ : int ) -> int: return sum(DIGITS_FIFTH_POWER[digit] for digit in str(lowerCamelCase__ ) ) def snake_case__ ( ) -> int: return sum( number for number in range(1_0_0_0 , 1_0_0_0_0_0_0 ) if number == digits_fifth_powers_sum(lowerCamelCase__ ) ) if __name__ == "__main__": print(solution())
358
'''simple docstring''' import json import os from pathlib import Path from shutil import copyfile from typing import Any, Dict, List, Optional, Tuple, Union import sentencepiece from ...tokenization_utils import PreTrainedTokenizer from ...utils import logging snake_case__ = logging.get_logger(__name__) snake_case__ = """▁""" snake_case__ = { """vocab_file""": """vocab.json""", """spm_file""": """sentencepiece.bpe.model""", } snake_case__ = { """vocab_file""": { """facebook/s2t-small-librispeech-asr""": ( """https://huggingface.co/facebook/s2t-small-librispeech-asr/resolve/main/vocab.json""" ), }, """spm_file""": { """facebook/s2t-small-librispeech-asr""": ( """https://huggingface.co/facebook/s2t-small-librispeech-asr/resolve/main/sentencepiece.bpe.model""" ) }, } snake_case__ = { """facebook/s2t-small-librispeech-asr""": 10_24, } snake_case__ = ["""pt""", """fr""", """ru""", """nl""", """ro""", """it""", """es""", """de"""] snake_case__ = {"""mustc""": MUSTC_LANGS} class UpperCamelCase_ (a__ ): """simple docstring""" _lowerCAmelCase = VOCAB_FILES_NAMES _lowerCAmelCase = PRETRAINED_VOCAB_FILES_MAP _lowerCAmelCase = MAX_MODEL_INPUT_SIZES _lowerCAmelCase = ['input_ids', 'attention_mask'] _lowerCAmelCase = [] def __init__( self : Optional[int] , _lowerCamelCase : List[str] , _lowerCamelCase : List[str] , _lowerCamelCase : str="<s>" , _lowerCamelCase : Union[str, Any]="</s>" , _lowerCamelCase : Dict="<pad>" , _lowerCamelCase : str="<unk>" , _lowerCamelCase : Union[str, Any]=False , _lowerCamelCase : int=False , _lowerCamelCase : Any=None , _lowerCamelCase : Any=None , _lowerCamelCase : Optional[Dict[str, Any]] = None , **_lowerCamelCase : Optional[int] , ): """simple docstring""" A_ : Union[str, Any] = {} if sp_model_kwargs is None else sp_model_kwargs super().__init__( bos_token=_lowerCamelCase , eos_token=_lowerCamelCase , unk_token=_lowerCamelCase , pad_token=_lowerCamelCase , do_upper_case=_lowerCamelCase , do_lower_case=_lowerCamelCase , tgt_lang=_lowerCamelCase , lang_codes=_lowerCamelCase , sp_model_kwargs=self.sp_model_kwargs , **_lowerCamelCase , ) A_ : Optional[int] = do_upper_case A_ : Tuple = do_lower_case A_ : Tuple = load_json(_lowerCamelCase ) A_ : Tuple = {v: k for k, v in self.encoder.items()} A_ : List[Any] = spm_file A_ : List[str] = load_spm(_lowerCamelCase , self.sp_model_kwargs ) if lang_codes is not None: A_ : Any = lang_codes A_ : Optional[Any] = LANGUAGES[lang_codes] A_ : Optional[Any] = [f'<lang:{lang}>' for lang in self.langs] A_ : Union[str, Any] = {lang: self.sp_model.PieceToId(f'<lang:{lang}>' ) for lang in self.langs} A_ : Optional[int] = self.lang_tokens A_ : int = tgt_lang if tgt_lang is not None else self.langs[0] self.set_tgt_lang_special_tokens(self._tgt_lang ) else: A_ : Dict = {} @property def _a ( self : Tuple ): """simple docstring""" return len(self.encoder ) @property def _a ( self : int ): """simple docstring""" return self._tgt_lang @tgt_lang.setter def _a ( self : List[str] , _lowerCamelCase : Any ): """simple docstring""" A_ : int = new_tgt_lang self.set_tgt_lang_special_tokens(_lowerCamelCase ) def _a ( self : Tuple , _lowerCamelCase : str ): """simple docstring""" A_ : List[str] = self.lang_code_to_id[tgt_lang] A_ : Optional[Any] = [lang_code_id] def _a ( self : Optional[Any] , _lowerCamelCase : str ): """simple docstring""" return self.sp_model.encode(_lowerCamelCase , out_type=_lowerCamelCase ) def _a ( self : List[Any] , _lowerCamelCase : int ): """simple docstring""" return self.encoder.get(_lowerCamelCase , self.encoder[self.unk_token] ) def _a ( self : int , _lowerCamelCase : int ): """simple docstring""" return self.decoder.get(_lowerCamelCase , self.unk_token ) def _a ( self : int , _lowerCamelCase : List[str] ): """simple docstring""" A_ : List[Any] = [] A_ : Any = '''''' for token in tokens: # make sure that special tokens are not decoded using sentencepiece model if token in self.all_special_tokens: A_ : Union[str, Any] = self.sp_model.decode(_lowerCamelCase ) out_string += (decoded.upper() if self.do_upper_case else decoded) + token + " " A_ : Optional[Any] = [] else: current_sub_tokens.append(_lowerCamelCase ) A_ : Tuple = self.sp_model.decode(_lowerCamelCase ) out_string += decoded.upper() if self.do_upper_case else decoded return out_string.strip() def _a ( self : int , _lowerCamelCase : Dict , _lowerCamelCase : Any=None ): """simple docstring""" if token_ids_a is None: return self.prefix_tokens + token_ids_a + [self.eos_token_id] # We don't expect to process pairs, but leave the pair logic for API consistency return self.prefix_tokens + token_ids_a + token_ids_a + [self.eos_token_id] def _a ( self : List[Any] , _lowerCamelCase : List[int] , _lowerCamelCase : Optional[List[int]] = None , _lowerCamelCase : bool = False ): """simple docstring""" if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=_lowerCamelCase , token_ids_a=_lowerCamelCase , already_has_special_tokens=_lowerCamelCase ) A_ : Tuple = [1] * len(self.prefix_tokens ) A_ : Tuple = [1] if token_ids_a is None: return prefix_ones + ([0] * len(_lowerCamelCase )) + suffix_ones return prefix_ones + ([0] * len(_lowerCamelCase )) + ([0] * len(_lowerCamelCase )) + suffix_ones def _a ( self : Dict ): """simple docstring""" A_ : Union[str, Any] = self.encoder.copy() vocab.update(self.added_tokens_encoder ) return vocab def __getstate__( self : Union[str, Any] ): """simple docstring""" A_ : Dict = self.__dict__.copy() A_ : List[Any] = None return state def __setstate__( self : List[str] , _lowerCamelCase : Dict ): """simple docstring""" A_ : Dict = d # for backward compatibility if not hasattr(self , '''sp_model_kwargs''' ): A_ : Optional[int] = {} A_ : int = load_spm(self.spm_file , self.sp_model_kwargs ) def _a ( self : Optional[Any] , _lowerCamelCase : str , _lowerCamelCase : Optional[str] = None ): """simple docstring""" A_ : Dict = Path(_lowerCamelCase ) assert save_dir.is_dir(), f'{save_directory} should be a directory' A_ : Optional[int] = save_dir / ( (filename_prefix + '''-''' if filename_prefix else '''''') + self.vocab_files_names['''vocab_file'''] ) A_ : Optional[int] = save_dir / ( (filename_prefix + '''-''' if filename_prefix else '''''') + self.vocab_files_names['''spm_file'''] ) save_json(self.encoder , _lowerCamelCase ) if os.path.abspath(self.spm_file ) != os.path.abspath(_lowerCamelCase ) and os.path.isfile(self.spm_file ): copyfile(self.spm_file , _lowerCamelCase ) elif not os.path.isfile(self.spm_file ): with open(_lowerCamelCase , '''wb''' ) as fi: A_ : List[str] = self.sp_model.serialized_model_proto() fi.write(_lowerCamelCase ) return (str(_lowerCamelCase ), str(_lowerCamelCase )) def snake_case__ ( lowerCamelCase__ : str , lowerCamelCase__ : Dict[str, Any] ) -> sentencepiece.SentencePieceProcessor: A_ : Tuple = sentencepiece.SentencePieceProcessor(**lowerCamelCase__ ) spm.Load(str(lowerCamelCase__ ) ) return spm def snake_case__ ( lowerCamelCase__ : str ) -> Union[Dict, List]: with open(lowerCamelCase__ , '''r''' ) as f: return json.load(lowerCamelCase__ ) def snake_case__ ( lowerCamelCase__ : Any , lowerCamelCase__ : str ) -> None: with open(lowerCamelCase__ , '''w''' ) as f: json.dump(lowerCamelCase__ , lowerCamelCase__ , indent=2 )
4
0
'''simple docstring''' from __future__ import annotations import copy import inspect import json import math import os import tempfile import unittest from importlib import import_module import numpy as np from transformers import ViTMAEConfig from transformers.file_utils import cached_property, is_tf_available, is_vision_available from transformers.testing_utils import require_tf, require_vision, slow from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import TFViTMAEForPreTraining, TFViTMAEModel if is_vision_available(): from PIL import Image from transformers import ViTImageProcessor class UpperCamelCase_ : """simple docstring""" def __init__( self : Union[str, Any] , _lowerCamelCase : Tuple , _lowerCamelCase : str=13 , _lowerCamelCase : Tuple=30 , _lowerCamelCase : List[Any]=2 , _lowerCamelCase : Any=3 , _lowerCamelCase : int=True , _lowerCamelCase : List[Any]=True , _lowerCamelCase : int=32 , _lowerCamelCase : int=2 , _lowerCamelCase : int=4 , _lowerCamelCase : List[str]=37 , _lowerCamelCase : Optional[Any]="gelu" , _lowerCamelCase : List[str]=0.1 , _lowerCamelCase : Tuple=0.1 , _lowerCamelCase : Tuple=10 , _lowerCamelCase : Optional[Any]=0.02 , _lowerCamelCase : List[str]=3 , _lowerCamelCase : Optional[Any]=0.6 , _lowerCamelCase : Union[str, Any]=None , ): """simple docstring""" A_ : Optional[int] = parent A_ : Any = batch_size A_ : Any = image_size A_ : Union[str, Any] = patch_size A_ : Tuple = num_channels A_ : Tuple = is_training A_ : Union[str, Any] = use_labels A_ : Optional[int] = hidden_size A_ : Tuple = num_hidden_layers A_ : str = num_attention_heads A_ : Dict = intermediate_size A_ : List[Any] = hidden_act A_ : Union[str, Any] = hidden_dropout_prob A_ : Any = attention_probs_dropout_prob A_ : Tuple = type_sequence_label_size A_ : Optional[int] = initializer_range A_ : List[Any] = mask_ratio A_ : Dict = scope # in ViTMAE, the expected sequence length = (num_patches + 1) * (1 - config.mask_ratio), rounded above # (we add 1 for the [CLS] token) A_ : List[str] = (image_size // patch_size) ** 2 A_ : str = int(math.ceil((1 - mask_ratio) * (num_patches + 1) ) ) def _a ( self : str ): """simple docstring""" A_ : str = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) A_ : List[Any] = None if self.use_labels: A_ : Dict = ids_tensor([self.batch_size] , self.type_sequence_label_size ) A_ : Union[str, Any] = self.get_config() return config, pixel_values, labels def _a ( self : Optional[int] ): """simple docstring""" return ViTMAEConfig( image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , decoder_hidden_size=self.hidden_size , decoder_num_hidden_layers=self.num_hidden_layers , decoder_num_attention_heads=self.num_attention_heads , decoder_intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=_lowerCamelCase , initializer_range=self.initializer_range , mask_ratio=self.mask_ratio , ) def _a ( self : Optional[Any] , _lowerCamelCase : Optional[int] , _lowerCamelCase : List[Any] , _lowerCamelCase : Tuple ): """simple docstring""" A_ : Union[str, Any] = TFViTMAEModel(config=_lowerCamelCase ) A_ : List[Any] = model(_lowerCamelCase , training=_lowerCamelCase ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def _a ( self : Optional[Any] , _lowerCamelCase : str , _lowerCamelCase : Optional[int] , _lowerCamelCase : Union[str, Any] ): """simple docstring""" A_ : Any = TFViTMAEForPreTraining(_lowerCamelCase ) A_ : List[str] = model(_lowerCamelCase , training=_lowerCamelCase ) # expected sequence length = num_patches A_ : int = (self.image_size // self.patch_size) ** 2 A_ : int = self.patch_size**2 * self.num_channels self.parent.assertEqual(result.logits.shape , (self.batch_size, num_patches, expected_num_channels) ) # test greyscale images A_ : str = 1 A_ : Dict = TFViTMAEForPreTraining(_lowerCamelCase ) A_ : Tuple = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] ) A_ : Optional[Any] = model(_lowerCamelCase , training=_lowerCamelCase ) A_ : Tuple = self.patch_size**2 self.parent.assertEqual(result.logits.shape , (self.batch_size, num_patches, expected_num_channels) ) def _a ( self : int ): """simple docstring""" A_ : Tuple = self.prepare_config_and_inputs() (A_) : List[str] = config_and_inputs A_ : Tuple = {'''pixel_values''': pixel_values} return config, inputs_dict @require_tf class UpperCamelCase_ (a__, a__, unittest.TestCase ): """simple docstring""" _lowerCAmelCase = (TFViTMAEModel, TFViTMAEForPreTraining) if is_tf_available() else () _lowerCAmelCase = {'feature-extraction': TFViTMAEModel} if is_tf_available() else {} _lowerCAmelCase = False _lowerCAmelCase = False _lowerCAmelCase = False _lowerCAmelCase = False def _a ( self : Dict ): """simple docstring""" A_ : List[str] = TFViTMAEModelTester(self ) A_ : Optional[Any] = ConfigTester(self , config_class=_lowerCamelCase , has_text_modality=_lowerCamelCase , hidden_size=37 ) def _a ( self : Tuple ): """simple docstring""" self.config_tester.run_common_tests() @unittest.skip(reason='''ViTMAE does not use inputs_embeds''' ) def _a ( self : Any ): """simple docstring""" pass def _a ( self : Optional[Any] ): """simple docstring""" A_ : List[str] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: A_ : Tuple = model_class(_lowerCamelCase ) self.assertIsInstance(model.get_input_embeddings() , (tf.keras.layers.Layer) ) A_ : Union[str, Any] = model.get_output_embeddings() self.assertTrue(x is None or isinstance(_lowerCamelCase , tf.keras.layers.Layer ) ) def _a ( self : Any ): """simple docstring""" A_ : str = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: A_ : int = model_class(_lowerCamelCase ) A_ : Any = inspect.signature(model.call ) # signature.parameters is an OrderedDict => so arg_names order is deterministic A_ : List[Any] = [*signature.parameters.keys()] A_ : int = ['''pixel_values'''] self.assertListEqual(arg_names[:1] , _lowerCamelCase ) def _a ( self : str ): """simple docstring""" A_ : Dict = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*_lowerCamelCase ) def _a ( self : Any ): """simple docstring""" A_ : Any = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_pretraining(*_lowerCamelCase ) def _a ( self : Union[str, Any] ): """simple docstring""" np.random.seed(2 ) A_ : str = self.model_tester.prepare_config_and_inputs_for_common() A_ : Any = int((config.image_size // config.patch_size) ** 2 ) A_ : Any = np.random.uniform(size=(self.model_tester.batch_size, num_patches) ) for model_class in self.all_model_classes: A_ : List[str] = model_class(_lowerCamelCase ) A_ : Tuple = self._prepare_for_class(_lowerCamelCase , _lowerCamelCase ) A_ : Tuple = model(_lowerCamelCase , noise=_lowerCamelCase ) A_ : List[str] = copy.deepcopy(self._prepare_for_class(_lowerCamelCase , _lowerCamelCase ) ) A_ : Any = model(**_lowerCamelCase , noise=_lowerCamelCase ) A_ : Optional[Any] = outputs_dict[0].numpy() A_ : Optional[Any] = outputs_keywords[0].numpy() self.assertLess(np.sum(np.abs(output_dict - output_keywords ) ) , 1E-6 ) def _a ( self : List[Any] ): """simple docstring""" np.random.seed(2 ) A_ : str = self.model_tester.prepare_config_and_inputs_for_common() A_ : str = int((config.image_size // config.patch_size) ** 2 ) A_ : Dict = np.random.uniform(size=(self.model_tester.batch_size, num_patches) ) def prepare_numpy_arrays(_lowerCamelCase : Optional[Any] ): A_ : Any = {} for k, v in inputs_dict.items(): if tf.is_tensor(_lowerCamelCase ): A_ : Dict = v.numpy() else: A_ : Union[str, Any] = np.array(_lowerCamelCase ) return inputs_np_dict for model_class in self.all_model_classes: A_ : List[str] = model_class(_lowerCamelCase ) A_ : List[Any] = self._prepare_for_class(_lowerCamelCase , _lowerCamelCase ) A_ : int = prepare_numpy_arrays(_lowerCamelCase ) A_ : str = model(_lowerCamelCase , noise=_lowerCamelCase ) A_ : Union[str, Any] = model(**_lowerCamelCase , noise=_lowerCamelCase ) self.assert_outputs_same(_lowerCamelCase , _lowerCamelCase ) def _a ( self : List[str] , _lowerCamelCase : Any , _lowerCamelCase : List[str] , _lowerCamelCase : Optional[Any] ): """simple docstring""" np.random.seed(2 ) A_ : List[Any] = int((tf_model.config.image_size // tf_model.config.patch_size) ** 2 ) A_ : int = np.random.uniform(size=(self.model_tester.batch_size, num_patches) ) A_ : List[Any] = tf.constant(_lowerCamelCase ) # Add `noise` argument. # PT inputs will be prepared in `super().check_pt_tf_models()` with this added `noise` argument A_ : str = tf_noise super().check_pt_tf_models(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) def _a ( self : Dict ): """simple docstring""" np.random.seed(2 ) A_ : int = self.model_tester.prepare_config_and_inputs_for_common() A_ : Dict = { module_member for model_class in self.all_model_classes for module in (import_module(model_class.__module__ ),) for module_member_name in dir(_lowerCamelCase ) if module_member_name.endswith('''MainLayer''' ) # This condition is required, since `modeling_tf_clip.py` has 3 classes whose names end with `MainLayer`. and module_member_name[: -len('''MainLayer''' )] == model_class.__name__[: -len('''Model''' )] for module_member in (getattr(_lowerCamelCase , _lowerCamelCase ),) if isinstance(_lowerCamelCase , _lowerCamelCase ) and tf.keras.layers.Layer in module_member.__bases__ and getattr(_lowerCamelCase , '''_keras_serializable''' , _lowerCamelCase ) } A_ : List[Any] = int((config.image_size // config.patch_size) ** 2 ) A_ : Union[str, Any] = np.random.uniform(size=(self.model_tester.batch_size, num_patches) ) A_ : Any = tf.convert_to_tensor(_lowerCamelCase ) inputs_dict.update({'''noise''': noise} ) for main_layer_class in tf_main_layer_classes: A_ : Optional[Any] = main_layer_class(_lowerCamelCase ) A_ : List[Any] = { name: tf.keras.Input(tensor.shape[1:] , dtype=tensor.dtype ) for name, tensor in inputs_dict.items() } A_ : Tuple = tf.keras.Model(_lowerCamelCase , outputs=main_layer(_lowerCamelCase ) ) A_ : Dict = model(_lowerCamelCase ) with tempfile.TemporaryDirectory() as tmpdirname: A_ : List[Any] = os.path.join(_lowerCamelCase , '''keras_model.h5''' ) model.save(_lowerCamelCase ) A_ : Any = tf.keras.models.load_model( _lowerCamelCase , custom_objects={main_layer_class.__name__: main_layer_class} ) assert isinstance(_lowerCamelCase , tf.keras.Model ) A_ : Optional[int] = model(_lowerCamelCase ) self.assert_outputs_same(_lowerCamelCase , _lowerCamelCase ) @slow def _a ( self : str ): """simple docstring""" np.random.seed(2 ) A_ : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common() A_ : Union[str, Any] = int((config.image_size // config.patch_size) ** 2 ) A_ : int = np.random.uniform(size=(self.model_tester.batch_size, num_patches) ) for model_class in self.all_model_classes: A_ : Any = model_class(_lowerCamelCase ) A_ : List[Any] = self._prepare_for_class(_lowerCamelCase , _lowerCamelCase ) A_ : Optional[Any] = model(_lowerCamelCase , noise=_lowerCamelCase ) if model_class.__name__ == "TFViTMAEModel": A_ : Dict = outputs.last_hidden_state.numpy() A_ : List[Any] = 0 else: A_ : Any = outputs.logits.numpy() A_ : List[Any] = 0 with tempfile.TemporaryDirectory() as tmpdirname: model.save_pretrained(_lowerCamelCase , saved_model=_lowerCamelCase ) A_ : List[Any] = model_class.from_pretrained(_lowerCamelCase ) A_ : Any = model(_lowerCamelCase , noise=_lowerCamelCase ) if model_class.__name__ == "TFViTMAEModel": A_ : List[Any] = after_outputs['''last_hidden_state'''].numpy() A_ : Optional[Any] = 0 else: A_ : Dict = after_outputs['''logits'''].numpy() A_ : Dict = 0 A_ : List[Any] = np.amax(np.abs(out_a - out_a ) ) self.assertLessEqual(_lowerCamelCase , 1E-5 ) def _a ( self : Optional[Any] ): """simple docstring""" np.random.seed(2 ) A_ : List[str] = self.model_tester.prepare_config_and_inputs_for_common() A_ : int = int((config.image_size // config.patch_size) ** 2 ) A_ : Dict = np.random.uniform(size=(self.model_tester.batch_size, num_patches) ) for model_class in self.all_model_classes: A_ : List[Any] = model_class(_lowerCamelCase ) A_ : str = self._prepare_for_class(_lowerCamelCase , _lowerCamelCase ) A_ : str = model(_lowerCamelCase , noise=_lowerCamelCase ) A_ : Optional[Any] = model.get_config() # make sure that returned config is jsonifiable, which is required by keras json.dumps(_lowerCamelCase ) A_ : Optional[int] = model_class.from_config(model.get_config() ) # make sure it also accepts a normal config A_ : Any = model_class.from_config(model.config ) A_ : int = new_model(_lowerCamelCase ) # Build model new_model.set_weights(model.get_weights() ) A_ : Dict = new_model(_lowerCamelCase , noise=_lowerCamelCase ) self.assert_outputs_same(_lowerCamelCase , _lowerCamelCase ) @unittest.skip( reason='''ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load to get deterministic results.''' ) def _a ( self : List[str] ): """simple docstring""" pass @unittest.skip(reason='''ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load''' ) def _a ( self : List[Any] ): """simple docstring""" pass @slow def _a ( self : Optional[Any] ): """simple docstring""" A_ : Tuple = TFViTMAEModel.from_pretrained('''google/vit-base-patch16-224''' ) self.assertIsNotNone(_lowerCamelCase ) def snake_case__ ( ) -> List[str]: A_ : Optional[int] = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' ) return image @require_tf @require_vision class UpperCamelCase_ (unittest.TestCase ): """simple docstring""" @cached_property def _a ( self : int ): """simple docstring""" return ViTImageProcessor.from_pretrained('''facebook/vit-mae-base''' ) if is_vision_available() else None @slow def _a ( self : Optional[Any] ): """simple docstring""" np.random.seed(2 ) A_ : List[str] = TFViTMAEForPreTraining.from_pretrained('''facebook/vit-mae-base''' ) A_ : str = self.default_image_processor A_ : Dict = prepare_img() A_ : Union[str, Any] = image_processor(images=_lowerCamelCase , return_tensors='''tf''' ) # prepare a noise vector that will be also used for testing the TF model # (this way we can ensure that the PT and TF models operate on the same inputs) A_ : Tuple = ViTMAEConfig() A_ : List[str] = int((vit_mae_config.image_size // vit_mae_config.patch_size) ** 2 ) A_ : Optional[Any] = np.random.uniform(size=(1, num_patches) ) # forward pass A_ : Union[str, Any] = model(**_lowerCamelCase , noise=_lowerCamelCase ) # verify the logits A_ : Dict = tf.convert_to_tensor([1, 196, 768] ) self.assertEqual(outputs.logits.shape , _lowerCamelCase ) A_ : Dict = tf.convert_to_tensor( [[-0.05_48, -1.70_23, -0.93_25], [0.37_21, -0.56_70, -0.22_33], [0.82_35, -1.38_78, -0.35_24]] ) tf.debugging.assert_near(outputs.logits[0, :3, :3] , _lowerCamelCase , atol=1E-4 )
359
'''simple docstring''' import argparse import json import os import sys import tempfile import unittest from argparse import Namespace from dataclasses import dataclass, field from enum import Enum from pathlib import Path from typing import List, Literal, Optional import yaml from transformers import HfArgumentParser, TrainingArguments from transformers.hf_argparser import make_choice_type_function, string_to_bool # Since Python 3.10, we can use the builtin `|` operator for Union types # See PEP 604: https://peps.python.org/pep-0604 snake_case__ = sys.version_info >= (3, 10) def snake_case__ ( lowerCamelCase__ : Union[str, Any]=None , lowerCamelCase__ : str=None ) -> List[Any]: return field(default_factory=lambda: default , metadata=lowerCamelCase__ ) @dataclass class UpperCamelCase_ : """simple docstring""" _lowerCAmelCase = 42 _lowerCAmelCase = 42 _lowerCAmelCase = 42 _lowerCAmelCase = 42 @dataclass class UpperCamelCase_ : """simple docstring""" _lowerCAmelCase = 4_2 _lowerCAmelCase = field(default='toto', metadata={'help': 'help message'} ) @dataclass class UpperCamelCase_ : """simple docstring""" _lowerCAmelCase = False _lowerCAmelCase = True _lowerCAmelCase = None class UpperCamelCase_ (a__ ): """simple docstring""" _lowerCAmelCase = 'titi' _lowerCAmelCase = 'toto' class UpperCamelCase_ (a__ ): """simple docstring""" _lowerCAmelCase = 'titi' _lowerCAmelCase = 'toto' _lowerCAmelCase = 4_2 @dataclass class UpperCamelCase_ : """simple docstring""" _lowerCAmelCase = "toto" def _a ( self : Optional[Any] ): """simple docstring""" A_ : Optional[int] = BasicEnum(self.foo ) @dataclass class UpperCamelCase_ : """simple docstring""" _lowerCAmelCase = "toto" def _a ( self : Tuple ): """simple docstring""" A_ : Optional[Any] = MixedTypeEnum(self.foo ) @dataclass class UpperCamelCase_ : """simple docstring""" _lowerCAmelCase = None _lowerCAmelCase = field(default=a__, metadata={'help': 'help message'} ) _lowerCAmelCase = None _lowerCAmelCase = list_field(default=[] ) _lowerCAmelCase = list_field(default=[] ) @dataclass class UpperCamelCase_ : """simple docstring""" _lowerCAmelCase = list_field(default=[] ) _lowerCAmelCase = list_field(default=[1, 2, 3] ) _lowerCAmelCase = list_field(default=['Hallo', 'Bonjour', 'Hello'] ) _lowerCAmelCase = list_field(default=[0.1, 0.2, 0.3] ) @dataclass class UpperCamelCase_ : """simple docstring""" _lowerCAmelCase = field() _lowerCAmelCase = field() _lowerCAmelCase = field() def _a ( self : Tuple ): """simple docstring""" A_ : Tuple = BasicEnum(self.required_enum ) @dataclass class UpperCamelCase_ : """simple docstring""" _lowerCAmelCase = 42 _lowerCAmelCase = field() _lowerCAmelCase = None _lowerCAmelCase = field(default='toto', metadata={'help': 'help message'} ) _lowerCAmelCase = list_field(default=['Hallo', 'Bonjour', 'Hello'] ) if is_python_no_less_than_3_10: @dataclass class UpperCamelCase_ : """simple docstring""" _lowerCAmelCase = False _lowerCAmelCase = True _lowerCAmelCase = None @dataclass class UpperCamelCase_ : """simple docstring""" _lowerCAmelCase = None _lowerCAmelCase = field(default=a__, metadata={'help': 'help message'} ) _lowerCAmelCase = None _lowerCAmelCase = list_field(default=[] ) _lowerCAmelCase = list_field(default=[] ) class UpperCamelCase_ (unittest.TestCase ): """simple docstring""" def _a ( self : List[str] , _lowerCamelCase : argparse.ArgumentParser , _lowerCamelCase : argparse.ArgumentParser ): """simple docstring""" self.assertEqual(len(a._actions ) , len(b._actions ) ) for x, y in zip(a._actions , b._actions ): A_ : Union[str, Any] = {k: v for k, v in vars(_lowerCamelCase ).items() if k != '''container'''} A_ : Optional[Any] = {k: v for k, v in vars(_lowerCamelCase ).items() if k != '''container'''} # Choices with mixed type have custom function as "type" # So we need to compare results directly for equality if xx.get('''choices''' , _lowerCamelCase ) and yy.get('''choices''' , _lowerCamelCase ): for expected_choice in yy["choices"] + xx["choices"]: self.assertEqual(xx['''type'''](_lowerCamelCase ) , yy['''type'''](_lowerCamelCase ) ) del xx["type"], yy["type"] self.assertEqual(_lowerCamelCase , _lowerCamelCase ) def _a ( self : Optional[int] ): """simple docstring""" A_ : Union[str, Any] = HfArgumentParser(_lowerCamelCase ) A_ : Optional[Any] = argparse.ArgumentParser() expected.add_argument('''--foo''' , type=_lowerCamelCase , required=_lowerCamelCase ) expected.add_argument('''--bar''' , type=_lowerCamelCase , required=_lowerCamelCase ) expected.add_argument('''--baz''' , type=_lowerCamelCase , required=_lowerCamelCase ) expected.add_argument('''--flag''' , type=_lowerCamelCase , default=_lowerCamelCase , const=_lowerCamelCase , nargs='''?''' ) self.argparsersEqual(_lowerCamelCase , _lowerCamelCase ) A_ : Union[str, Any] = ['''--foo''', '''1''', '''--baz''', '''quux''', '''--bar''', '''0.5'''] ((A_) ,) : List[str] = parser.parse_args_into_dataclasses(_lowerCamelCase , look_for_args_file=_lowerCamelCase ) self.assertFalse(example.flag ) def _a ( self : Dict ): """simple docstring""" A_ : int = HfArgumentParser(_lowerCamelCase ) A_ : int = argparse.ArgumentParser() expected.add_argument('''--foo''' , default=42 , type=_lowerCamelCase ) expected.add_argument('''--baz''' , default='''toto''' , type=_lowerCamelCase , help='''help message''' ) self.argparsersEqual(_lowerCamelCase , _lowerCamelCase ) def _a ( self : Dict ): """simple docstring""" A_ : Any = argparse.ArgumentParser() expected.add_argument('''--foo''' , type=_lowerCamelCase , default=_lowerCamelCase , const=_lowerCamelCase , nargs='''?''' ) expected.add_argument('''--baz''' , type=_lowerCamelCase , default=_lowerCamelCase , const=_lowerCamelCase , nargs='''?''' ) # A boolean no_* argument always has to come after its "default: True" regular counter-part # and its default must be set to False expected.add_argument('''--no_baz''' , action='''store_false''' , default=_lowerCamelCase , dest='''baz''' ) expected.add_argument('''--opt''' , type=_lowerCamelCase , default=_lowerCamelCase ) A_ : Dict = [WithDefaultBoolExample] if is_python_no_less_than_3_10: dataclass_types.append(_lowerCamelCase ) for dataclass_type in dataclass_types: A_ : Any = HfArgumentParser(_lowerCamelCase ) self.argparsersEqual(_lowerCamelCase , _lowerCamelCase ) A_ : List[Any] = parser.parse_args([] ) self.assertEqual(_lowerCamelCase , Namespace(foo=_lowerCamelCase , baz=_lowerCamelCase , opt=_lowerCamelCase ) ) A_ : Optional[int] = parser.parse_args(['''--foo''', '''--no_baz'''] ) self.assertEqual(_lowerCamelCase , Namespace(foo=_lowerCamelCase , baz=_lowerCamelCase , opt=_lowerCamelCase ) ) A_ : Union[str, Any] = parser.parse_args(['''--foo''', '''--baz'''] ) self.assertEqual(_lowerCamelCase , Namespace(foo=_lowerCamelCase , baz=_lowerCamelCase , opt=_lowerCamelCase ) ) A_ : List[str] = parser.parse_args(['''--foo''', '''True''', '''--baz''', '''True''', '''--opt''', '''True'''] ) self.assertEqual(_lowerCamelCase , Namespace(foo=_lowerCamelCase , baz=_lowerCamelCase , opt=_lowerCamelCase ) ) A_ : List[Any] = parser.parse_args(['''--foo''', '''False''', '''--baz''', '''False''', '''--opt''', '''False'''] ) self.assertEqual(_lowerCamelCase , Namespace(foo=_lowerCamelCase , baz=_lowerCamelCase , opt=_lowerCamelCase ) ) def _a ( self : List[Any] ): """simple docstring""" A_ : str = HfArgumentParser(_lowerCamelCase ) A_ : Optional[int] = argparse.ArgumentParser() expected.add_argument( '''--foo''' , default='''toto''' , choices=['''titi''', '''toto''', 42] , type=make_choice_type_function(['''titi''', '''toto''', 42] ) , ) self.argparsersEqual(_lowerCamelCase , _lowerCamelCase ) A_ : str = parser.parse_args([] ) self.assertEqual(args.foo , '''toto''' ) A_ : List[Any] = parser.parse_args_into_dataclasses([] )[0] self.assertEqual(enum_ex.foo , MixedTypeEnum.toto ) A_ : int = parser.parse_args(['''--foo''', '''titi'''] ) self.assertEqual(args.foo , '''titi''' ) A_ : Dict = parser.parse_args_into_dataclasses(['''--foo''', '''titi'''] )[0] self.assertEqual(enum_ex.foo , MixedTypeEnum.titi ) A_ : Tuple = parser.parse_args(['''--foo''', '''42'''] ) self.assertEqual(args.foo , 42 ) A_ : List[str] = parser.parse_args_into_dataclasses(['''--foo''', '''42'''] )[0] self.assertEqual(enum_ex.foo , MixedTypeEnum.fourtytwo ) def _a ( self : Optional[int] ): """simple docstring""" @dataclass class UpperCamelCase_ : """simple docstring""" _lowerCAmelCase = "toto" A_ : List[str] = HfArgumentParser(_lowerCamelCase ) A_ : Tuple = argparse.ArgumentParser() expected.add_argument( '''--foo''' , default='''toto''' , choices=('''titi''', '''toto''', 42) , type=make_choice_type_function(['''titi''', '''toto''', 42] ) , ) self.argparsersEqual(_lowerCamelCase , _lowerCamelCase ) A_ : Tuple = parser.parse_args([] ) self.assertEqual(args.foo , '''toto''' ) A_ : List[str] = parser.parse_args(['''--foo''', '''titi'''] ) self.assertEqual(args.foo , '''titi''' ) A_ : int = parser.parse_args(['''--foo''', '''42'''] ) self.assertEqual(args.foo , 42 ) def _a ( self : Dict ): """simple docstring""" A_ : int = HfArgumentParser(_lowerCamelCase ) A_ : List[Any] = argparse.ArgumentParser() expected.add_argument('''--foo_int''' , nargs='''+''' , default=[] , type=_lowerCamelCase ) expected.add_argument('''--bar_int''' , nargs='''+''' , default=[1, 2, 3] , type=_lowerCamelCase ) expected.add_argument('''--foo_str''' , nargs='''+''' , default=['''Hallo''', '''Bonjour''', '''Hello'''] , type=_lowerCamelCase ) expected.add_argument('''--foo_float''' , nargs='''+''' , default=[0.1, 0.2, 0.3] , type=_lowerCamelCase ) self.argparsersEqual(_lowerCamelCase , _lowerCamelCase ) A_ : Optional[int] = parser.parse_args([] ) self.assertEqual( _lowerCamelCase , Namespace(foo_int=[] , bar_int=[1, 2, 3] , foo_str=['''Hallo''', '''Bonjour''', '''Hello'''] , foo_float=[0.1, 0.2, 0.3] ) , ) A_ : str = parser.parse_args('''--foo_int 1 --bar_int 2 3 --foo_str a b c --foo_float 0.1 0.7'''.split() ) self.assertEqual(_lowerCamelCase , Namespace(foo_int=[1] , bar_int=[2, 3] , foo_str=['''a''', '''b''', '''c'''] , foo_float=[0.1, 0.7] ) ) def _a ( self : Dict ): """simple docstring""" A_ : Optional[Any] = argparse.ArgumentParser() expected.add_argument('''--foo''' , default=_lowerCamelCase , type=_lowerCamelCase ) expected.add_argument('''--bar''' , default=_lowerCamelCase , type=_lowerCamelCase , help='''help message''' ) expected.add_argument('''--baz''' , default=_lowerCamelCase , type=_lowerCamelCase ) expected.add_argument('''--ces''' , nargs='''+''' , default=[] , type=_lowerCamelCase ) expected.add_argument('''--des''' , nargs='''+''' , default=[] , type=_lowerCamelCase ) A_ : Tuple = [OptionalExample] if is_python_no_less_than_3_10: dataclass_types.append(_lowerCamelCase ) for dataclass_type in dataclass_types: A_ : int = HfArgumentParser(_lowerCamelCase ) self.argparsersEqual(_lowerCamelCase , _lowerCamelCase ) A_ : List[Any] = parser.parse_args([] ) self.assertEqual(_lowerCamelCase , Namespace(foo=_lowerCamelCase , bar=_lowerCamelCase , baz=_lowerCamelCase , ces=[] , des=[] ) ) A_ : Optional[Any] = parser.parse_args('''--foo 12 --bar 3.14 --baz 42 --ces a b c --des 1 2 3'''.split() ) self.assertEqual(_lowerCamelCase , Namespace(foo=12 , bar=3.14 , baz='''42''' , ces=['''a''', '''b''', '''c'''] , des=[1, 2, 3] ) ) def _a ( self : List[Any] ): """simple docstring""" A_ : List[Any] = HfArgumentParser(_lowerCamelCase ) A_ : Dict = argparse.ArgumentParser() expected.add_argument('''--required_list''' , nargs='''+''' , type=_lowerCamelCase , required=_lowerCamelCase ) expected.add_argument('''--required_str''' , type=_lowerCamelCase , required=_lowerCamelCase ) expected.add_argument( '''--required_enum''' , type=make_choice_type_function(['''titi''', '''toto'''] ) , choices=['''titi''', '''toto'''] , required=_lowerCamelCase , ) self.argparsersEqual(_lowerCamelCase , _lowerCamelCase ) def _a ( self : Optional[Any] ): """simple docstring""" A_ : Union[str, Any] = HfArgumentParser(_lowerCamelCase ) A_ : List[Any] = argparse.ArgumentParser() expected.add_argument('''--foo''' , type=_lowerCamelCase , required=_lowerCamelCase ) expected.add_argument( '''--required_enum''' , type=make_choice_type_function(['''titi''', '''toto'''] ) , choices=['''titi''', '''toto'''] , required=_lowerCamelCase , ) expected.add_argument('''--opt''' , type=_lowerCamelCase , default=_lowerCamelCase ) expected.add_argument('''--baz''' , default='''toto''' , type=_lowerCamelCase , help='''help message''' ) expected.add_argument('''--foo_str''' , nargs='''+''' , default=['''Hallo''', '''Bonjour''', '''Hello'''] , type=_lowerCamelCase ) self.argparsersEqual(_lowerCamelCase , _lowerCamelCase ) def _a ( self : Tuple ): """simple docstring""" A_ : List[Any] = HfArgumentParser(_lowerCamelCase ) A_ : Union[str, Any] = { '''foo''': 12, '''bar''': 3.14, '''baz''': '''42''', '''flag''': True, } A_ : Optional[int] = parser.parse_dict(_lowerCamelCase )[0] A_ : str = BasicExample(**_lowerCamelCase ) self.assertEqual(_lowerCamelCase , _lowerCamelCase ) def _a ( self : List[str] ): """simple docstring""" A_ : Any = HfArgumentParser(_lowerCamelCase ) A_ : List[str] = { '''foo''': 12, '''bar''': 3.14, '''baz''': '''42''', '''flag''': True, '''extra''': 42, } self.assertRaises(_lowerCamelCase , parser.parse_dict , _lowerCamelCase , allow_extra_keys=_lowerCamelCase ) def _a ( self : Optional[Any] ): """simple docstring""" A_ : Union[str, Any] = HfArgumentParser(_lowerCamelCase ) A_ : List[str] = { '''foo''': 12, '''bar''': 3.14, '''baz''': '''42''', '''flag''': True, } with tempfile.TemporaryDirectory() as tmp_dir: A_ : Tuple = os.path.join(_lowerCamelCase , '''temp_json''' ) os.mkdir(_lowerCamelCase ) with open(temp_local_path + '''.json''' , '''w+''' ) as f: json.dump(_lowerCamelCase , _lowerCamelCase ) A_ : List[str] = parser.parse_yaml_file(Path(temp_local_path + '''.json''' ) )[0] A_ : Optional[Any] = BasicExample(**_lowerCamelCase ) self.assertEqual(_lowerCamelCase , _lowerCamelCase ) def _a ( self : int ): """simple docstring""" A_ : int = HfArgumentParser(_lowerCamelCase ) A_ : Tuple = { '''foo''': 12, '''bar''': 3.14, '''baz''': '''42''', '''flag''': True, } with tempfile.TemporaryDirectory() as tmp_dir: A_ : int = os.path.join(_lowerCamelCase , '''temp_yaml''' ) os.mkdir(_lowerCamelCase ) with open(temp_local_path + '''.yaml''' , '''w+''' ) as f: yaml.dump(_lowerCamelCase , _lowerCamelCase ) A_ : Optional[Any] = parser.parse_yaml_file(Path(temp_local_path + '''.yaml''' ) )[0] A_ : int = BasicExample(**_lowerCamelCase ) self.assertEqual(_lowerCamelCase , _lowerCamelCase ) def _a ( self : Union[str, Any] ): """simple docstring""" A_ : Dict = HfArgumentParser(_lowerCamelCase ) self.assertIsNotNone(_lowerCamelCase )
4
0
'''simple docstring''' from ....configuration_utils import PretrainedConfig from ....utils import logging snake_case__ = logging.get_logger(__name__) snake_case__ = { """Visual-Attention-Network/van-base""": ( """https://huggingface.co/Visual-Attention-Network/van-base/blob/main/config.json""" ), } class UpperCamelCase_ (a__ ): """simple docstring""" _lowerCAmelCase = 'van' def __init__( self : List[str] , _lowerCamelCase : Dict=224 , _lowerCamelCase : str=3 , _lowerCamelCase : List[str]=[7, 3, 3, 3] , _lowerCamelCase : Tuple=[4, 2, 2, 2] , _lowerCamelCase : Union[str, Any]=[64, 128, 320, 512] , _lowerCamelCase : Optional[int]=[3, 3, 12, 3] , _lowerCamelCase : str=[8, 8, 4, 4] , _lowerCamelCase : Optional[int]="gelu" , _lowerCamelCase : Any=0.02 , _lowerCamelCase : str=1E-6 , _lowerCamelCase : Union[str, Any]=1E-2 , _lowerCamelCase : List[Any]=0.0 , _lowerCamelCase : Any=0.0 , **_lowerCamelCase : Optional[Any] , ): """simple docstring""" super().__init__(**_lowerCamelCase ) A_ : int = image_size A_ : int = num_channels A_ : Any = patch_sizes A_ : List[Any] = strides A_ : Tuple = hidden_sizes A_ : Any = depths A_ : Dict = mlp_ratios A_ : Optional[int] = hidden_act A_ : str = initializer_range A_ : Optional[Any] = layer_norm_eps A_ : List[str] = layer_scale_init_value A_ : Optional[int] = drop_path_rate A_ : Optional[Any] = dropout_rate
360
'''simple docstring''' import sys import tempfile import unittest import unittest.mock as mock from pathlib import Path from huggingface_hub import HfFolder, delete_repo from requests.exceptions import HTTPError from transformers import AutoImageProcessor, ViTImageProcessor from transformers.testing_utils import TOKEN, USER, get_tests_dir, is_staging_test sys.path.append(str(Path(__file__).parent.parent / """utils""")) from test_module.custom_image_processing import CustomImageProcessor # noqa E402 snake_case__ = get_tests_dir("""fixtures""") class UpperCamelCase_ (unittest.TestCase ): """simple docstring""" def _a ( self : List[str] ): """simple docstring""" A_ : List[Any] = mock.Mock() A_ : List[str] = 500 A_ : Tuple = {} A_ : int = HTTPError A_ : Optional[Any] = {} # Download this model to make sure it's in the cache. A_ : Tuple = ViTImageProcessor.from_pretrained('''hf-internal-testing/tiny-random-vit''' ) # Under the mock environment we get a 500 error when trying to reach the model. with mock.patch('''requests.Session.request''' , return_value=_lowerCamelCase ) as mock_head: A_ : List[Any] = ViTImageProcessor.from_pretrained('''hf-internal-testing/tiny-random-vit''' ) # This check we did call the fake head request mock_head.assert_called() def _a ( self : Tuple ): """simple docstring""" A_ : Tuple = ViTImageProcessor.from_pretrained( '''https://huggingface.co/hf-internal-testing/tiny-random-vit/resolve/main/preprocessor_config.json''' ) def _a ( self : Dict ): """simple docstring""" with self.assertRaises(_lowerCamelCase ): # config is in subfolder, the following should not work without specifying the subfolder A_ : Any = AutoImageProcessor.from_pretrained('''hf-internal-testing/stable-diffusion-all-variants''' ) A_ : Tuple = AutoImageProcessor.from_pretrained( '''hf-internal-testing/stable-diffusion-all-variants''' , subfolder='''feature_extractor''' ) self.assertIsNotNone(_lowerCamelCase ) @is_staging_test class UpperCamelCase_ (unittest.TestCase ): """simple docstring""" @classmethod def _a ( cls : Tuple ): """simple docstring""" A_ : int = TOKEN HfFolder.save_token(_lowerCamelCase ) @classmethod def _a ( cls : str ): """simple docstring""" try: delete_repo(token=cls._token , repo_id='''test-image-processor''' ) except HTTPError: pass try: delete_repo(token=cls._token , repo_id='''valid_org/test-image-processor-org''' ) except HTTPError: pass try: delete_repo(token=cls._token , repo_id='''test-dynamic-image-processor''' ) except HTTPError: pass def _a ( self : List[Any] ): """simple docstring""" A_ : Dict = ViTImageProcessor.from_pretrained(_lowerCamelCase ) image_processor.push_to_hub('''test-image-processor''' , use_auth_token=self._token ) A_ : Optional[int] = ViTImageProcessor.from_pretrained(f'{USER}/test-image-processor' ) for k, v in image_processor.__dict__.items(): self.assertEqual(_lowerCamelCase , getattr(_lowerCamelCase , _lowerCamelCase ) ) # Reset repo delete_repo(token=self._token , repo_id='''test-image-processor''' ) # Push to hub via save_pretrained with tempfile.TemporaryDirectory() as tmp_dir: image_processor.save_pretrained( _lowerCamelCase , repo_id='''test-image-processor''' , push_to_hub=_lowerCamelCase , use_auth_token=self._token ) A_ : List[Any] = ViTImageProcessor.from_pretrained(f'{USER}/test-image-processor' ) for k, v in image_processor.__dict__.items(): self.assertEqual(_lowerCamelCase , getattr(_lowerCamelCase , _lowerCamelCase ) ) def _a ( self : Optional[Any] ): """simple docstring""" A_ : int = ViTImageProcessor.from_pretrained(_lowerCamelCase ) image_processor.push_to_hub('''valid_org/test-image-processor''' , use_auth_token=self._token ) A_ : List[str] = ViTImageProcessor.from_pretrained('''valid_org/test-image-processor''' ) for k, v in image_processor.__dict__.items(): self.assertEqual(_lowerCamelCase , getattr(_lowerCamelCase , _lowerCamelCase ) ) # Reset repo delete_repo(token=self._token , repo_id='''valid_org/test-image-processor''' ) # Push to hub via save_pretrained with tempfile.TemporaryDirectory() as tmp_dir: image_processor.save_pretrained( _lowerCamelCase , repo_id='''valid_org/test-image-processor-org''' , push_to_hub=_lowerCamelCase , use_auth_token=self._token ) A_ : Any = ViTImageProcessor.from_pretrained('''valid_org/test-image-processor-org''' ) for k, v in image_processor.__dict__.items(): self.assertEqual(_lowerCamelCase , getattr(_lowerCamelCase , _lowerCamelCase ) ) def _a ( self : Optional[Any] ): """simple docstring""" CustomImageProcessor.register_for_auto_class() A_ : Any = CustomImageProcessor.from_pretrained(_lowerCamelCase ) image_processor.push_to_hub('''test-dynamic-image-processor''' , use_auth_token=self._token ) # This has added the proper auto_map field to the config self.assertDictEqual( image_processor.auto_map , {'''AutoImageProcessor''': '''custom_image_processing.CustomImageProcessor'''} , ) A_ : str = AutoImageProcessor.from_pretrained( f'{USER}/test-dynamic-image-processor' , trust_remote_code=_lowerCamelCase ) # Can't make an isinstance check because the new_image_processor is from the CustomImageProcessor class of a dynamic module self.assertEqual(new_image_processor.__class__.__name__ , '''CustomImageProcessor''' )
4
0
'''simple docstring''' import json from typing import TYPE_CHECKING, List, Optional, Tuple from tokenizers import pre_tokenizers, processors from ...tokenization_utils_base import AddedToken, BatchEncoding from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import logging from .tokenization_blenderbot import BlenderbotTokenizer if TYPE_CHECKING: from transformers.pipelines.conversational import Conversation snake_case__ = logging.get_logger(__name__) snake_case__ = { """vocab_file""": """vocab.json""", """merges_file""": """merges.txt""", """tokenizer_config_file""": """tokenizer_config.json""", } snake_case__ = { """vocab_file""": {"""facebook/blenderbot-3B""": """https://huggingface.co/facebook/blenderbot-3B/resolve/main/vocab.json"""}, """merges_file""": {"""facebook/blenderbot-3B""": """https://huggingface.co/facebook/blenderbot-3B/resolve/main/merges.txt"""}, """tokenizer_config_file""": { """facebook/blenderbot-3B""": """https://huggingface.co/facebook/blenderbot-3B/resolve/main/tokenizer_config.json""" }, } snake_case__ = {"""facebook/blenderbot-3B""": 1_28} class UpperCamelCase_ (a__ ): """simple docstring""" _lowerCAmelCase = VOCAB_FILES_NAMES _lowerCAmelCase = PRETRAINED_VOCAB_FILES_MAP _lowerCAmelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES _lowerCAmelCase = ['input_ids', 'attention_mask'] _lowerCAmelCase = BlenderbotTokenizer def __init__( self : Dict , _lowerCamelCase : List[str]=None , _lowerCamelCase : Tuple=None , _lowerCamelCase : Dict=None , _lowerCamelCase : List[str]="replace" , _lowerCamelCase : str="<s>" , _lowerCamelCase : List[str]="</s>" , _lowerCamelCase : Optional[int]="</s>" , _lowerCamelCase : List[str]="<s>" , _lowerCamelCase : Optional[Any]="<unk>" , _lowerCamelCase : str="<pad>" , _lowerCamelCase : Optional[int]="<mask>" , _lowerCamelCase : Dict=False , _lowerCamelCase : Union[str, Any]=True , **_lowerCamelCase : List[Any] , ): """simple docstring""" super().__init__( _lowerCamelCase , _lowerCamelCase , tokenizer_file=_lowerCamelCase , errors=_lowerCamelCase , bos_token=_lowerCamelCase , eos_token=_lowerCamelCase , sep_token=_lowerCamelCase , cls_token=_lowerCamelCase , unk_token=_lowerCamelCase , pad_token=_lowerCamelCase , mask_token=_lowerCamelCase , add_prefix_space=_lowerCamelCase , trim_offsets=_lowerCamelCase , **_lowerCamelCase , ) A_ : int = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() ) if pre_tok_state.get('''add_prefix_space''' , _lowerCamelCase ) != add_prefix_space: A_ : str = getattr(_lowerCamelCase , pre_tok_state.pop('''type''' ) ) A_ : Any = add_prefix_space A_ : Optional[Any] = pre_tok_class(**_lowerCamelCase ) A_ : Tuple = add_prefix_space A_ : int = '''post_processor''' A_ : Union[str, Any] = getattr(self.backend_tokenizer , _lowerCamelCase , _lowerCamelCase ) if tokenizer_component_instance: A_ : Optional[int] = json.loads(tokenizer_component_instance.__getstate__() ) # The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class` if "sep" in state: A_ : Dict = tuple(state['''sep'''] ) if "cls" in state: A_ : Dict = tuple(state['''cls'''] ) A_ : Optional[int] = False if state.get('''add_prefix_space''' , _lowerCamelCase ) != add_prefix_space: A_ : Any = add_prefix_space A_ : Union[str, Any] = True if state.get('''trim_offsets''' , _lowerCamelCase ) != trim_offsets: A_ : Dict = trim_offsets A_ : List[Any] = True if changes_to_apply: A_ : int = getattr(_lowerCamelCase , state.pop('''type''' ) ) A_ : List[Any] = component_class(**_lowerCamelCase ) setattr(self.backend_tokenizer , _lowerCamelCase , _lowerCamelCase ) @property # Copied from transformers.models.roberta.tokenization_roberta_fast.RobertaTokenizerFast.mask_token with Roberta->Blenderbot, RoBERTa->Blenderbot def _a ( self : Union[str, Any] ): """simple docstring""" if self._mask_token is None: if self.verbose: logger.error('''Using mask_token, but it is not set yet.''' ) return None return str(self._mask_token ) @mask_token.setter def _a ( self : Optional[Any] , _lowerCamelCase : Optional[int] ): """simple docstring""" A_ : Any = AddedToken(_lowerCamelCase , lstrip=_lowerCamelCase , rstrip=_lowerCamelCase ) if isinstance(_lowerCamelCase , _lowerCamelCase ) else value A_ : str = value def _a ( self : int , *_lowerCamelCase : str , **_lowerCamelCase : Any ): """simple docstring""" A_ : List[str] = kwargs.get('''is_split_into_words''' , _lowerCamelCase ) assert self.add_prefix_space or not is_split_into_words, ( f'You need to instantiate {self.__class__.__name__} with add_prefix_space=True ' "to use it with pretokenized inputs." ) return super()._batch_encode_plus(*_lowerCamelCase , **_lowerCamelCase ) def _a ( self : Optional[Any] , *_lowerCamelCase : Union[str, Any] , **_lowerCamelCase : Optional[int] ): """simple docstring""" A_ : Any = kwargs.get('''is_split_into_words''' , _lowerCamelCase ) assert self.add_prefix_space or not is_split_into_words, ( f'You need to instantiate {self.__class__.__name__} with add_prefix_space=True ' "to use it with pretokenized inputs." ) return super()._encode_plus(*_lowerCamelCase , **_lowerCamelCase ) def _a ( self : List[str] , _lowerCamelCase : str , _lowerCamelCase : Optional[str] = None ): """simple docstring""" A_ : Tuple = self._tokenizer.model.save(_lowerCamelCase , name=_lowerCamelCase ) return tuple(_lowerCamelCase ) def _a ( self : Dict , _lowerCamelCase : List[int] , _lowerCamelCase : Optional[List[int]] = None ): """simple docstring""" A_ : int = [self.sep_token_id] A_ : List[Any] = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0] def _a ( self : List[Any] , _lowerCamelCase : List[int] , _lowerCamelCase : Optional[List[int]] = None ): """simple docstring""" return token_ids_a + [self.eos_token_id] def _a ( self : List[Any] , _lowerCamelCase : "Conversation" ): """simple docstring""" A_ : Optional[int] = [] for is_user, text in conversation.iter_texts(): if is_user: # We need to space prefix as it's being done within blenderbot inputs.append(''' ''' + text ) else: # Generated responses should contain them already. inputs.append(_lowerCamelCase ) A_ : Optional[int] = ''' '''.join(_lowerCamelCase ) A_ : List[str] = self.encode(_lowerCamelCase ) if len(_lowerCamelCase ) > self.model_max_length: A_ : int = input_ids[-self.model_max_length :] logger.warning(f'Trimmed input from conversation as it was longer than {self.model_max_length} tokens.' ) return input_ids
361
'''simple docstring''' # Copyright 2023 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import numpy as np import torch from ..models.clipseg import CLIPSegForImageSegmentation from ..utils import is_vision_available, requires_backends from .base import PipelineTool if is_vision_available(): from PIL import Image class UpperCamelCase_ (a__ ): """simple docstring""" _lowerCAmelCase = ( 'This is a tool that creates a segmentation mask of an image according to a label. It cannot create an image.' 'It takes two arguments named `image` which should be the original image, and `label` which should be a text ' 'describing the elements what should be identified in the segmentation mask. The tool returns the mask.' ) _lowerCAmelCase = 'CIDAS/clipseg-rd64-refined' _lowerCAmelCase = 'image_segmenter' _lowerCAmelCase = CLIPSegForImageSegmentation _lowerCAmelCase = ['image', 'text'] _lowerCAmelCase = ['image'] def __init__( self : Optional[int] , *_lowerCamelCase : Optional[int] , **_lowerCamelCase : Union[str, Any] ): """simple docstring""" requires_backends(self , ['''vision'''] ) super().__init__(*_lowerCamelCase , **_lowerCamelCase ) def _a ( self : List[str] , _lowerCamelCase : "Image" , _lowerCamelCase : str ): """simple docstring""" return self.pre_processor(text=[label] , images=[image] , padding=_lowerCamelCase , return_tensors='''pt''' ) def _a ( self : Union[str, Any] , _lowerCamelCase : Optional[int] ): """simple docstring""" with torch.no_grad(): A_ : Optional[int] = self.model(**_lowerCamelCase ).logits return logits def _a ( self : List[str] , _lowerCamelCase : Optional[int] ): """simple docstring""" A_ : int = outputs.cpu().detach().numpy() A_ : Tuple = 0 A_ : List[str] = 1 return Image.fromarray((array * 255).astype(np.uinta ) )
4
0
'''simple docstring''' import csv from collections import defaultdict from dataclasses import dataclass, field from typing import List, Optional import matplotlib.pyplot as plt import numpy as np from matplotlib.ticker import ScalarFormatter from transformers import HfArgumentParser def snake_case__ ( lowerCamelCase__ : List[str]=None , lowerCamelCase__ : int=None ) -> Any: return field(default_factory=lambda: default , metadata=lowerCamelCase__ ) @dataclass class UpperCamelCase_ : """simple docstring""" _lowerCAmelCase = field( metadata={'help': 'The csv file to plot.'}, ) _lowerCAmelCase = field( default=a__, metadata={'help': 'Whether to plot along batch size or sequence length. Defaults to sequence length.'}, ) _lowerCAmelCase = field( default=a__, metadata={'help': 'Whether the csv file has time results or memory results. Defaults to memory results.'}, ) _lowerCAmelCase = field( default=a__, metadata={'help': 'Disable logarithmic scale when plotting'}, ) _lowerCAmelCase = field( default=a__, metadata={ 'help': 'Whether the csv file has training results or inference results. Defaults to inference results.' }, ) _lowerCAmelCase = field( default=a__, metadata={'help': 'Filename under which the plot will be saved. If unused no plot is saved.'}, ) _lowerCAmelCase = list_field( default=a__, metadata={'help': 'List of model names that are used instead of the ones in the csv file.'} ) def snake_case__ ( lowerCamelCase__ : Tuple ) -> Optional[int]: try: int(lowerCamelCase__ ) return True except ValueError: return False def snake_case__ ( lowerCamelCase__ : List[Any] ) -> Union[str, Any]: try: float(lowerCamelCase__ ) return True except ValueError: return False class UpperCamelCase_ : """simple docstring""" def __init__( self : int , _lowerCamelCase : Tuple ): """simple docstring""" A_ : Optional[int] = args A_ : Optional[int] = defaultdict(lambda: {"bsz": [], "seq_len": [], "result": {}} ) with open(self.args.csv_file , newline='''''' ) as csv_file: A_ : Dict = csv.DictReader(_lowerCamelCase ) for row in reader: A_ : Union[str, Any] = row['''model'''] self.result_dict[model_name]["bsz"].append(int(row['''batch_size'''] ) ) self.result_dict[model_name]["seq_len"].append(int(row['''sequence_length'''] ) ) if can_convert_to_int(row['''result'''] ): # value is not None A_ : Dict = int(row['''result'''] ) elif can_convert_to_float(row['''result'''] ): # value is not None A_ : Optional[Any] = float(row['''result'''] ) def _a ( self : Any ): """simple docstring""" A_ : int = plt.subplots() A_ : Optional[int] = '''Time usage''' if self.args.is_time else '''Memory usage''' A_ : Optional[int] = title_str + ''' for training''' if self.args.is_train else title_str + ''' for inference''' if not self.args.no_log_scale: # set logarithm scales ax.set_xscale('''log''' ) ax.set_yscale('''log''' ) for axis in [ax.xaxis, ax.yaxis]: axis.set_major_formatter(ScalarFormatter() ) for model_name_idx, model_name in enumerate(self.result_dict.keys() ): A_ : int = sorted(set(self.result_dict[model_name]['''bsz'''] ) ) A_ : List[str] = sorted(set(self.result_dict[model_name]['''seq_len'''] ) ) A_ : Union[str, Any] = self.result_dict[model_name]['''result'''] (A_) : List[Any] = ( (batch_sizes, sequence_lengths) if self.args.plot_along_batch else (sequence_lengths, batch_sizes) ) A_ : Union[str, Any] = ( model_name if self.args.short_model_names is None else self.args.short_model_names[model_name_idx] ) for inner_loop_value in inner_loop_array: if self.args.plot_along_batch: A_ : Optional[Any] = np.asarray( [results[(x, inner_loop_value)] for x in x_axis_array if (x, inner_loop_value) in results] , dtype=_lowerCamelCase , ) else: A_ : List[str] = np.asarray( [results[(inner_loop_value, x)] for x in x_axis_array if (inner_loop_value, x) in results] , dtype=np.floataa , ) (A_) : Optional[int] = ( ('''batch_size''', '''len''') if self.args.plot_along_batch else ('''in #tokens''', '''bsz''') ) A_ : Union[str, Any] = np.asarray(_lowerCamelCase , _lowerCamelCase )[: len(_lowerCamelCase )] plt.scatter( _lowerCamelCase , _lowerCamelCase , label=f'{label_model_name} - {inner_loop_label}: {inner_loop_value}' ) plt.plot(_lowerCamelCase , _lowerCamelCase , '''--''' ) title_str += f' {label_model_name} vs.' A_ : List[Any] = title_str[:-4] A_ : Optional[Any] = '''Time in s''' if self.args.is_time else '''Memory in MB''' # plot plt.title(_lowerCamelCase ) plt.xlabel(_lowerCamelCase ) plt.ylabel(_lowerCamelCase ) plt.legend() if self.args.figure_png_file is not None: plt.savefig(self.args.figure_png_file ) else: plt.show() def snake_case__ ( ) -> Optional[int]: A_ : str = HfArgumentParser(lowerCamelCase__ ) A_ : Tuple = parser.parse_args_into_dataclasses()[0] A_ : Any = Plot(args=lowerCamelCase__ ) plot.plot() if __name__ == "__main__": main()
362
'''simple docstring''' from collections.abc import Sequence def snake_case__ ( lowerCamelCase__ : Sequence[float] , lowerCamelCase__ : bool = False ) -> float: if not arr: return 0 A_ : Union[str, Any] = 0 if allow_empty_subarrays else float('''-inf''' ) A_ : str = 0.0 for num in arr: A_ : Any = max(0 if allow_empty_subarrays else num , curr_sum + num ) A_ : Tuple = max(lowerCamelCase__ , lowerCamelCase__ ) return max_sum if __name__ == "__main__": from doctest import testmod testmod() snake_case__ = [-2, 1, -3, 4, -1, 2, 1, -5, 4] print(F'{max_subarray_sum(nums) = }')
4
0
'''simple docstring''' from ...configuration_utils import PretrainedConfig from ...utils import logging snake_case__ = logging.get_logger(__name__) snake_case__ = { """microsoft/biogpt""": """https://huggingface.co/microsoft/biogpt/resolve/main/config.json""", # See all BioGPT models at https://huggingface.co/models?filter=biogpt } class UpperCamelCase_ (a__ ): """simple docstring""" _lowerCAmelCase = 'biogpt' def __init__( self : Tuple , _lowerCamelCase : List[Any]=42384 , _lowerCamelCase : Dict=1024 , _lowerCamelCase : List[str]=24 , _lowerCamelCase : Union[str, Any]=16 , _lowerCamelCase : Tuple=4096 , _lowerCamelCase : Dict="gelu" , _lowerCamelCase : Optional[int]=0.1 , _lowerCamelCase : Any=0.1 , _lowerCamelCase : int=1024 , _lowerCamelCase : List[str]=0.02 , _lowerCamelCase : int=1E-12 , _lowerCamelCase : Dict=True , _lowerCamelCase : Tuple=True , _lowerCamelCase : str=0.0 , _lowerCamelCase : Dict=0.0 , _lowerCamelCase : List[str]=1 , _lowerCamelCase : Union[str, Any]=0 , _lowerCamelCase : int=2 , **_lowerCamelCase : Tuple , ): """simple docstring""" A_ : List[str] = vocab_size A_ : Optional[Any] = max_position_embeddings A_ : int = hidden_size A_ : Union[str, Any] = num_hidden_layers A_ : Optional[int] = num_attention_heads A_ : int = intermediate_size A_ : Optional[Any] = hidden_act A_ : List[str] = hidden_dropout_prob A_ : List[str] = attention_probs_dropout_prob A_ : Optional[Any] = initializer_range A_ : Union[str, Any] = layer_norm_eps A_ : Any = scale_embedding A_ : str = use_cache A_ : Optional[Any] = layerdrop A_ : List[str] = activation_dropout super().__init__(pad_token_id=_lowerCamelCase , bos_token_id=_lowerCamelCase , eos_token_id=_lowerCamelCase , **_lowerCamelCase )
363
'''simple docstring''' from ...configuration_utils import PretrainedConfig from ...utils import logging snake_case__ = logging.get_logger(__name__) snake_case__ = { """facebook/s2t-wav2vec2-large-en-de""": ( """https://huggingface.co/facebook/s2t-wav2vec2-large-en-de/resolve/main/config.json""" ), # See all Speech2Text models at https://huggingface.co/models?filter=speech2text2 } class UpperCamelCase_ (a__ ): """simple docstring""" _lowerCAmelCase = 'speech_to_text_2' _lowerCAmelCase = ['past_key_values'] _lowerCAmelCase = {'num_attention_heads': 'decoder_attention_heads', 'hidden_size': 'd_model'} def __init__( self : Optional[Any] , _lowerCamelCase : Optional[Any]=10000 , _lowerCamelCase : List[Any]=6 , _lowerCamelCase : int=2048 , _lowerCamelCase : Dict=4 , _lowerCamelCase : str=0.0 , _lowerCamelCase : int=True , _lowerCamelCase : int="relu" , _lowerCamelCase : Any=256 , _lowerCamelCase : List[Any]=0.1 , _lowerCamelCase : Tuple=0.0 , _lowerCamelCase : Union[str, Any]=0.0 , _lowerCamelCase : Optional[Any]=0.02 , _lowerCamelCase : int=2 , _lowerCamelCase : List[str]=True , _lowerCamelCase : str=1 , _lowerCamelCase : List[Any]=0 , _lowerCamelCase : Optional[int]=2 , _lowerCamelCase : Tuple=1024 , **_lowerCamelCase : int , ): """simple docstring""" A_ : Optional[int] = vocab_size A_ : Tuple = d_model A_ : List[str] = decoder_ffn_dim A_ : str = decoder_layers A_ : Any = decoder_attention_heads A_ : int = dropout A_ : str = attention_dropout A_ : Optional[int] = activation_dropout A_ : str = activation_function A_ : List[Any] = init_std A_ : Union[str, Any] = decoder_layerdrop A_ : Any = use_cache A_ : Optional[Any] = decoder_layers A_ : Optional[int] = scale_embedding # scale factor will be sqrt(d_model) if True A_ : Optional[Any] = max_target_positions super().__init__( pad_token_id=_lowerCamelCase , bos_token_id=_lowerCamelCase , eos_token_id=_lowerCamelCase , decoder_start_token_id=_lowerCamelCase , **_lowerCamelCase , )
4
0
import copy from typing import Dict, Optional from ...configuration_utils import PretrainedConfig from ...utils import logging from ..auto import CONFIG_MAPPING from ..detr import DetrConfig from ..swin import SwinConfig snake_case__ = { """facebook/maskformer-swin-base-ade""": ( """https://huggingface.co/facebook/maskformer-swin-base-ade/blob/main/config.json""" ) # See all MaskFormer models at https://huggingface.co/models?filter=maskformer } snake_case__ = logging.get_logger(__name__) class UpperCamelCase_ (a__ ): """simple docstring""" _lowerCAmelCase = 'maskformer' _lowerCAmelCase = {'hidden_size': 'mask_feature_size'} _lowerCAmelCase = ['resnet', 'swin'] _lowerCAmelCase = ['detr'] def __init__( self : Optional[int] , _lowerCamelCase : int = 256 , _lowerCamelCase : int = 256 , _lowerCamelCase : float = 0.1 , _lowerCamelCase : bool = False , _lowerCamelCase : Optional[Dict] = None , _lowerCamelCase : Optional[Dict] = None , _lowerCamelCase : float = 0.02 , _lowerCamelCase : float = 1.0 , _lowerCamelCase : float = 1.0 , _lowerCamelCase : float = 1.0 , _lowerCamelCase : float = 20.0 , _lowerCamelCase : Optional[bool] = None , **_lowerCamelCase : Optional[int] , ): """simple docstring""" if backbone_config is None: # fall back to https://huggingface.co/microsoft/swin-base-patch4-window12-384-in22k A_ : List[Any] = SwinConfig( image_size=384 , in_channels=3 , patch_size=4 , embed_dim=128 , depths=[2, 2, 18, 2] , num_heads=[4, 8, 16, 32] , window_size=12 , drop_path_rate=0.3 , out_features=['''stage1''', '''stage2''', '''stage3''', '''stage4'''] , ) if isinstance(_lowerCamelCase , _lowerCamelCase ): A_ : Union[str, Any] = backbone_config.pop('''model_type''' ) A_ : Dict = CONFIG_MAPPING[backbone_model_type] A_ : Any = config_class.from_dict(_lowerCamelCase ) # verify that the backbone is supported if backbone_config.model_type not in self.backbones_supported: logger.warning_once( f'Backbone {backbone_config.model_type} is not a supported model and may not be compatible with MaskFormer. ' f'Supported model types: {",".join(self.backbones_supported )}' ) if decoder_config is None: # fall back to https://huggingface.co/facebook/detr-resnet-50 A_ : List[Any] = DetrConfig() else: # verify that the decoder is supported A_ : Optional[int] = ( decoder_config.pop('''model_type''' ) if isinstance(_lowerCamelCase , _lowerCamelCase ) else decoder_config.model_type ) if decoder_type not in self.decoders_supported: raise ValueError( f'Transformer Decoder {decoder_type} not supported, please use one of' f' {",".join(self.decoders_supported )}' ) if isinstance(_lowerCamelCase , _lowerCamelCase ): A_ : Any = CONFIG_MAPPING[decoder_type] A_ : str = config_class.from_dict(_lowerCamelCase ) A_ : Any = backbone_config A_ : List[str] = decoder_config # main feature dimension for the model A_ : Union[str, Any] = fpn_feature_size A_ : Dict = mask_feature_size # initializer A_ : str = init_std A_ : List[str] = init_xavier_std # Hungarian matcher && loss A_ : Union[str, Any] = cross_entropy_weight A_ : Dict = dice_weight A_ : Union[str, Any] = mask_weight A_ : Optional[Any] = use_auxiliary_loss A_ : Optional[int] = no_object_weight A_ : Any = output_auxiliary_logits A_ : Optional[int] = self.decoder_config.encoder_attention_heads A_ : Any = self.decoder_config.num_hidden_layers super().__init__(**_lowerCamelCase ) @classmethod def _a ( cls : Any , _lowerCamelCase : PretrainedConfig , _lowerCamelCase : PretrainedConfig , **_lowerCamelCase : Optional[Any] ): """simple docstring""" return cls( backbone_config=_lowerCamelCase , decoder_config=_lowerCamelCase , **_lowerCamelCase , ) def _a ( self : int ): """simple docstring""" A_ : Union[str, Any] = copy.deepcopy(self.__dict__ ) A_ : int = self.backbone_config.to_dict() A_ : Optional[Any] = self.decoder_config.to_dict() A_ : List[str] = self.__class__.model_type return output
364
'''simple docstring''' from collections import OrderedDict from typing import Mapping from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging from ..auto import CONFIG_MAPPING snake_case__ = logging.get_logger(__name__) snake_case__ = { """microsoft/table-transformer-detection""": ( """https://huggingface.co/microsoft/table-transformer-detection/resolve/main/config.json""" ), } class UpperCamelCase_ (a__ ): """simple docstring""" _lowerCAmelCase = 'table-transformer' _lowerCAmelCase = ['past_key_values'] _lowerCAmelCase = { 'hidden_size': 'd_model', 'num_attention_heads': 'encoder_attention_heads', } def __init__( self : Any , _lowerCamelCase : Union[str, Any]=True , _lowerCamelCase : Dict=None , _lowerCamelCase : int=3 , _lowerCamelCase : Any=100 , _lowerCamelCase : List[Any]=6 , _lowerCamelCase : Tuple=2048 , _lowerCamelCase : Any=8 , _lowerCamelCase : Dict=6 , _lowerCamelCase : Tuple=2048 , _lowerCamelCase : int=8 , _lowerCamelCase : Optional[int]=0.0 , _lowerCamelCase : List[Any]=0.0 , _lowerCamelCase : List[Any]=True , _lowerCamelCase : Optional[int]="relu" , _lowerCamelCase : Union[str, Any]=256 , _lowerCamelCase : Any=0.1 , _lowerCamelCase : Tuple=0.0 , _lowerCamelCase : Optional[int]=0.0 , _lowerCamelCase : str=0.02 , _lowerCamelCase : Tuple=1.0 , _lowerCamelCase : Dict=False , _lowerCamelCase : str="sine" , _lowerCamelCase : str="resnet50" , _lowerCamelCase : Any=True , _lowerCamelCase : List[str]=False , _lowerCamelCase : Any=1 , _lowerCamelCase : int=5 , _lowerCamelCase : Tuple=2 , _lowerCamelCase : Optional[int]=1 , _lowerCamelCase : Any=1 , _lowerCamelCase : Dict=5 , _lowerCamelCase : str=2 , _lowerCamelCase : Union[str, Any]=0.1 , **_lowerCamelCase : int , ): """simple docstring""" if backbone_config is not None and use_timm_backbone: raise ValueError('''You can\'t specify both `backbone_config` and `use_timm_backbone`.''' ) if not use_timm_backbone: if backbone_config is None: logger.info('''`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.''' ) A_ : int = CONFIG_MAPPING['''resnet'''](out_features=['''stage4'''] ) elif isinstance(_lowerCamelCase , _lowerCamelCase ): A_ : str = backbone_config.get('''model_type''' ) A_ : Optional[int] = CONFIG_MAPPING[backbone_model_type] A_ : List[str] = config_class.from_dict(_lowerCamelCase ) # set timm attributes to None A_ ,A_ ,A_ : Union[str, Any] = None, None, None A_ : Optional[Any] = use_timm_backbone A_ : Optional[int] = backbone_config A_ : Optional[Any] = num_channels A_ : Dict = num_queries A_ : str = d_model A_ : List[str] = encoder_ffn_dim A_ : int = encoder_layers A_ : Optional[Any] = encoder_attention_heads A_ : List[str] = decoder_ffn_dim A_ : Any = decoder_layers A_ : List[str] = decoder_attention_heads A_ : Tuple = dropout A_ : Optional[Any] = attention_dropout A_ : Any = activation_dropout A_ : List[Any] = activation_function A_ : Dict = init_std A_ : Any = init_xavier_std A_ : List[Any] = encoder_layerdrop A_ : int = decoder_layerdrop A_ : Any = encoder_layers A_ : List[str] = auxiliary_loss A_ : List[Any] = position_embedding_type A_ : Optional[Any] = backbone A_ : Tuple = use_pretrained_backbone A_ : List[Any] = dilation # Hungarian matcher A_ : List[str] = class_cost A_ : str = bbox_cost A_ : Union[str, Any] = giou_cost # Loss coefficients A_ : Any = mask_loss_coefficient A_ : Optional[int] = dice_loss_coefficient A_ : Dict = bbox_loss_coefficient A_ : int = giou_loss_coefficient A_ : int = eos_coefficient super().__init__(is_encoder_decoder=_lowerCamelCase , **_lowerCamelCase ) @property def _a ( self : List[Any] ): """simple docstring""" return self.encoder_attention_heads @property def _a ( self : Any ): """simple docstring""" return self.d_model class UpperCamelCase_ (a__ ): """simple docstring""" _lowerCAmelCase = version.parse('1.11' ) @property def _a ( self : Tuple ): """simple docstring""" return OrderedDict( [ ('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}), ('''pixel_mask''', {0: '''batch'''}), ] ) @property def _a ( self : Optional[int] ): """simple docstring""" return 1E-5 @property def _a ( self : str ): """simple docstring""" return 12
4
0
'''simple docstring''' import os import sys from contextlib import contextmanager # Windows only if os.name == "nt": import ctypes import msvcrt # noqa class UpperCamelCase_ (ctypes.Structure ): """simple docstring""" _lowerCAmelCase = [('size', ctypes.c_int), ('visible', ctypes.c_byte)] def snake_case__ ( ) -> Tuple: if os.name == "nt": A_ : str = CursorInfo() A_ : int = ctypes.windll.kernelaa.GetStdHandle(-1_1 ) ctypes.windll.kernelaa.GetConsoleCursorInfo(lowerCamelCase__ , ctypes.byref(lowerCamelCase__ ) ) A_ : Any = False ctypes.windll.kernelaa.SetConsoleCursorInfo(lowerCamelCase__ , ctypes.byref(lowerCamelCase__ ) ) elif os.name == "posix": sys.stdout.write('''\033[?25l''' ) sys.stdout.flush() def snake_case__ ( ) -> int: if os.name == "nt": A_ : Tuple = CursorInfo() A_ : List[str] = ctypes.windll.kernelaa.GetStdHandle(-1_1 ) ctypes.windll.kernelaa.GetConsoleCursorInfo(lowerCamelCase__ , ctypes.byref(lowerCamelCase__ ) ) A_ : int = True ctypes.windll.kernelaa.SetConsoleCursorInfo(lowerCamelCase__ , ctypes.byref(lowerCamelCase__ ) ) elif os.name == "posix": sys.stdout.write('''\033[?25h''' ) sys.stdout.flush() @contextmanager def snake_case__ ( ) -> Dict: try: hide_cursor() yield finally: show_cursor()
365
'''simple docstring''' import inspect import unittest from transformers import BitConfig from transformers.testing_utils import require_torch, require_vision, slow, torch_device from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_backbone_common import BackboneTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from torch import nn from transformers import BitBackbone, BitForImageClassification, BitImageProcessor, BitModel from transformers.models.bit.modeling_bit import BIT_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image class UpperCamelCase_ : """simple docstring""" def __init__( self : Optional[Any] , _lowerCamelCase : int , _lowerCamelCase : List[str]=3 , _lowerCamelCase : Any=32 , _lowerCamelCase : Union[str, Any]=3 , _lowerCamelCase : int=10 , _lowerCamelCase : Union[str, Any]=[8, 16, 32, 64] , _lowerCamelCase : Dict=[1, 1, 2, 1] , _lowerCamelCase : Union[str, Any]=True , _lowerCamelCase : Optional[int]=True , _lowerCamelCase : Any="relu" , _lowerCamelCase : Optional[Any]=3 , _lowerCamelCase : Optional[Any]=None , _lowerCamelCase : Dict=["stage2", "stage3", "stage4"] , _lowerCamelCase : Union[str, Any]=[2, 3, 4] , _lowerCamelCase : Tuple=1 , ): """simple docstring""" A_ : List[str] = parent A_ : List[str] = batch_size A_ : Union[str, Any] = image_size A_ : Tuple = num_channels A_ : Any = embeddings_size A_ : int = hidden_sizes A_ : Optional[Any] = depths A_ : List[Any] = is_training A_ : Optional[int] = use_labels A_ : int = hidden_act A_ : Tuple = num_labels A_ : Union[str, Any] = scope A_ : List[Any] = len(_lowerCamelCase ) A_ : Union[str, Any] = out_features A_ : List[Any] = out_indices A_ : Dict = num_groups def _a ( self : Optional[int] ): """simple docstring""" A_ : Dict = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) A_ : Union[str, Any] = None if self.use_labels: A_ : Any = ids_tensor([self.batch_size] , self.num_labels ) A_ : Any = self.get_config() return config, pixel_values, labels def _a ( self : Union[str, Any] ): """simple docstring""" return BitConfig( num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , out_features=self.out_features , out_indices=self.out_indices , num_groups=self.num_groups , ) def _a ( self : List[Any] , _lowerCamelCase : List[str] , _lowerCamelCase : List[str] , _lowerCamelCase : Optional[Any] ): """simple docstring""" A_ : Any = BitModel(config=_lowerCamelCase ) model.to(_lowerCamelCase ) model.eval() A_ : int = model(_lowerCamelCase ) self.parent.assertEqual( result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , ) def _a ( self : Optional[int] , _lowerCamelCase : List[Any] , _lowerCamelCase : str , _lowerCamelCase : Optional[int] ): """simple docstring""" A_ : Dict = self.num_labels A_ : Optional[Any] = BitForImageClassification(_lowerCamelCase ) model.to(_lowerCamelCase ) model.eval() A_ : List[Any] = model(_lowerCamelCase , labels=_lowerCamelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def _a ( self : Any , _lowerCamelCase : int , _lowerCamelCase : int , _lowerCamelCase : List[Any] ): """simple docstring""" A_ : List[Any] = BitBackbone(config=_lowerCamelCase ) model.to(_lowerCamelCase ) model.eval() A_ : int = model(_lowerCamelCase ) # verify feature maps self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) ) self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[1], 4, 4] ) # verify channels self.parent.assertEqual(len(model.channels ) , len(config.out_features ) ) self.parent.assertListEqual(model.channels , config.hidden_sizes[1:] ) # verify backbone works with out_features=None A_ : Optional[Any] = None A_ : int = BitBackbone(config=_lowerCamelCase ) model.to(_lowerCamelCase ) model.eval() A_ : Optional[int] = model(_lowerCamelCase ) # verify feature maps self.parent.assertEqual(len(result.feature_maps ) , 1 ) self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[-1], 1, 1] ) # verify channels self.parent.assertEqual(len(model.channels ) , 1 ) self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]] ) def _a ( self : List[Any] ): """simple docstring""" A_ : Union[str, Any] = self.prepare_config_and_inputs() A_ ,A_ ,A_ : Union[str, Any] = config_and_inputs A_ : str = {'''pixel_values''': pixel_values} return config, inputs_dict @require_torch class UpperCamelCase_ (a__, a__, unittest.TestCase ): """simple docstring""" _lowerCAmelCase = (BitModel, BitForImageClassification, BitBackbone) if is_torch_available() else () _lowerCAmelCase = ( {'feature-extraction': BitModel, 'image-classification': BitForImageClassification} if is_torch_available() else {} ) _lowerCAmelCase = False _lowerCAmelCase = False _lowerCAmelCase = False _lowerCAmelCase = False _lowerCAmelCase = False def _a ( self : Optional[Any] ): """simple docstring""" A_ : List[str] = BitModelTester(self ) A_ : Optional[Any] = ConfigTester(self , config_class=_lowerCamelCase , has_text_modality=_lowerCamelCase ) def _a ( self : Optional[Any] ): """simple docstring""" self.create_and_test_config_common_properties() self.config_tester.create_and_test_config_to_json_string() self.config_tester.create_and_test_config_to_json_file() self.config_tester.create_and_test_config_from_and_save_pretrained() self.config_tester.create_and_test_config_with_num_labels() self.config_tester.check_config_can_be_init_without_params() self.config_tester.check_config_arguments_init() def _a ( self : List[Any] ): """simple docstring""" return @unittest.skip(reason='''Bit does not output attentions''' ) def _a ( self : str ): """simple docstring""" pass @unittest.skip(reason='''Bit does not use inputs_embeds''' ) def _a ( self : Union[str, Any] ): """simple docstring""" pass @unittest.skip(reason='''Bit does not support input and output embeddings''' ) def _a ( self : Any ): """simple docstring""" pass def _a ( self : List[Any] ): """simple docstring""" A_ ,A_ : str = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: A_ : Dict = model_class(_lowerCamelCase ) A_ : Dict = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic A_ : int = [*signature.parameters.keys()] A_ : Union[str, Any] = ['''pixel_values'''] self.assertListEqual(arg_names[:1] , _lowerCamelCase ) def _a ( self : Optional[Any] ): """simple docstring""" A_ : List[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*_lowerCamelCase ) def _a ( self : Optional[Any] ): """simple docstring""" A_ : Optional[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_backbone(*_lowerCamelCase ) def _a ( self : Tuple ): """simple docstring""" A_ ,A_ : Dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: A_ : str = model_class(config=_lowerCamelCase ) for name, module in model.named_modules(): if isinstance(_lowerCamelCase , (nn.BatchNormad, nn.GroupNorm) ): self.assertTrue( torch.all(module.weight == 1 ) , msg=f'Parameter {name} of model {model_class} seems not properly initialized' , ) self.assertTrue( torch.all(module.bias == 0 ) , msg=f'Parameter {name} of model {model_class} seems not properly initialized' , ) def _a ( self : int ): """simple docstring""" def check_hidden_states_output(_lowerCamelCase : Union[str, Any] , _lowerCamelCase : Dict , _lowerCamelCase : int ): A_ : Union[str, Any] = model_class(_lowerCamelCase ) model.to(_lowerCamelCase ) model.eval() with torch.no_grad(): A_ : Union[str, Any] = model(**self._prepare_for_class(_lowerCamelCase , _lowerCamelCase ) ) A_ : int = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states A_ : List[Any] = self.model_tester.num_stages self.assertEqual(len(_lowerCamelCase ) , expected_num_stages + 1 ) # Bit's feature maps are of shape (batch_size, num_channels, height, width) self.assertListEqual( list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , ) A_ ,A_ : str = self.model_tester.prepare_config_and_inputs_for_common() A_ : Tuple = ['''preactivation''', '''bottleneck'''] for model_class in self.all_model_classes: for layer_type in layers_type: A_ : Tuple = layer_type A_ : Optional[Any] = True check_hidden_states_output(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] A_ : List[str] = True check_hidden_states_output(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) @unittest.skip(reason='''Bit does not use feedforward chunking''' ) def _a ( self : Tuple ): """simple docstring""" pass def _a ( self : str ): """simple docstring""" A_ : Union[str, Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*_lowerCamelCase ) @slow def _a ( self : Union[str, Any] ): """simple docstring""" for model_name in BIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: A_ : List[Any] = BitModel.from_pretrained(_lowerCamelCase ) self.assertIsNotNone(_lowerCamelCase ) def snake_case__ ( ) -> Optional[int]: A_ : Optional[int] = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' ) return image @require_torch @require_vision class UpperCamelCase_ (unittest.TestCase ): """simple docstring""" @cached_property def _a ( self : List[Any] ): """simple docstring""" return ( BitImageProcessor.from_pretrained(BIT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ) if is_vision_available() else None ) @slow def _a ( self : Dict ): """simple docstring""" A_ : Optional[int] = BitForImageClassification.from_pretrained(BIT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ).to(_lowerCamelCase ) A_ : Union[str, Any] = self.default_image_processor A_ : Optional[int] = prepare_img() A_ : int = image_processor(images=_lowerCamelCase , return_tensors='''pt''' ).to(_lowerCamelCase ) # forward pass with torch.no_grad(): A_ : Union[str, Any] = model(**_lowerCamelCase ) # verify the logits A_ : Dict = torch.Size((1, 1000) ) self.assertEqual(outputs.logits.shape , _lowerCamelCase ) A_ : Tuple = torch.tensor([[-0.65_26, -0.52_63, -1.43_98]] ).to(_lowerCamelCase ) self.assertTrue(torch.allclose(outputs.logits[0, :3] , _lowerCamelCase , atol=1E-4 ) ) @require_torch class UpperCamelCase_ (a__, unittest.TestCase ): """simple docstring""" _lowerCAmelCase = (BitBackbone,) if is_torch_available() else () _lowerCAmelCase = BitConfig _lowerCAmelCase = False def _a ( self : List[str] ): """simple docstring""" A_ : Union[str, Any] = BitModelTester(self )
4
0
'''simple docstring''' from ... import PretrainedConfig snake_case__ = { """sijunhe/nezha-cn-base""": """https://huggingface.co/sijunhe/nezha-cn-base/resolve/main/config.json""", } class UpperCamelCase_ (a__ ): """simple docstring""" _lowerCAmelCase = NEZHA_PRETRAINED_CONFIG_ARCHIVE_MAP _lowerCAmelCase = 'nezha' def __init__( self : Tuple , _lowerCamelCase : int=21128 , _lowerCamelCase : Optional[Any]=768 , _lowerCamelCase : Dict=12 , _lowerCamelCase : List[str]=12 , _lowerCamelCase : int=3072 , _lowerCamelCase : List[str]="gelu" , _lowerCamelCase : Optional[int]=0.1 , _lowerCamelCase : Any=0.1 , _lowerCamelCase : List[Any]=512 , _lowerCamelCase : Optional[Any]=64 , _lowerCamelCase : Any=2 , _lowerCamelCase : int=0.02 , _lowerCamelCase : List[Any]=1E-12 , _lowerCamelCase : Optional[Any]=0.1 , _lowerCamelCase : Tuple=0 , _lowerCamelCase : Dict=2 , _lowerCamelCase : List[str]=3 , _lowerCamelCase : Union[str, Any]=True , **_lowerCamelCase : Optional[Any] , ): """simple docstring""" super().__init__(pad_token_id=_lowerCamelCase , bos_token_id=_lowerCamelCase , eos_token_id=_lowerCamelCase , **_lowerCamelCase ) A_ : Dict = vocab_size A_ : int = hidden_size A_ : str = num_hidden_layers A_ : Optional[int] = num_attention_heads A_ : List[Any] = hidden_act A_ : Optional[Any] = intermediate_size A_ : int = hidden_dropout_prob A_ : Union[str, Any] = attention_probs_dropout_prob A_ : Optional[int] = max_position_embeddings A_ : List[str] = max_relative_position A_ : Union[str, Any] = type_vocab_size A_ : Dict = initializer_range A_ : Tuple = layer_norm_eps A_ : Optional[Any] = classifier_dropout A_ : int = use_cache
366
'''simple docstring''' import pprint import requests snake_case__ = """https://zenquotes.io/api""" def snake_case__ ( ) -> list: return requests.get(API_ENDPOINT_URL + '''/today''' ).json() def snake_case__ ( ) -> list: return requests.get(API_ENDPOINT_URL + '''/random''' ).json() if __name__ == "__main__": snake_case__ = random_quotes() pprint.pprint(response)
4
0
'''simple docstring''' import contextlib import csv import json import os import sqlitea import tarfile import textwrap import zipfile import pyarrow as pa import pyarrow.parquet as pq import pytest import datasets import datasets.config @pytest.fixture(scope='''session''' ) def snake_case__ ( ) -> int: A_ : Any = 1_0 A_ : Dict = datasets.Features( { '''tokens''': datasets.Sequence(datasets.Value('''string''' ) ), '''labels''': datasets.Sequence(datasets.ClassLabel(names=['''negative''', '''positive'''] ) ), '''answers''': datasets.Sequence( { '''text''': datasets.Value('''string''' ), '''answer_start''': datasets.Value('''int32''' ), } ), '''id''': datasets.Value('''int64''' ), } ) A_ : Dict = datasets.Dataset.from_dict( { '''tokens''': [['''foo'''] * 5] * n, '''labels''': [[1] * 5] * n, '''answers''': [{'''answer_start''': [9_7], '''text''': ['''1976''']}] * 1_0, '''id''': list(range(lowerCamelCase__ ) ), } , features=lowerCamelCase__ , ) return dataset @pytest.fixture(scope='''session''' ) def snake_case__ ( lowerCamelCase__ : str , lowerCamelCase__ : List[str] ) -> Union[str, Any]: A_ : Dict = str(tmp_path_factory.mktemp('''data''' ) / '''file.arrow''' ) dataset.map(cache_file_name=lowerCamelCase__ ) return filename # FILE_CONTENT + files snake_case__ = """\ Text data. Second line of data.""" @pytest.fixture(scope='''session''' ) def snake_case__ ( lowerCamelCase__ : Dict ) -> List[str]: A_ : Union[str, Any] = tmp_path_factory.mktemp('''data''' ) / '''file.txt''' A_ : int = FILE_CONTENT with open(lowerCamelCase__ , '''w''' ) as f: f.write(lowerCamelCase__ ) return filename @pytest.fixture(scope='''session''' ) def snake_case__ ( lowerCamelCase__ : List[Any] ) -> Dict: import bza A_ : int = tmp_path_factory.mktemp('''data''' ) / '''file.txt.bz2''' A_ : Union[str, Any] = bytes(lowerCamelCase__ , '''utf-8''' ) with bza.open(lowerCamelCase__ , '''wb''' ) as f: f.write(lowerCamelCase__ ) return path @pytest.fixture(scope='''session''' ) def snake_case__ ( lowerCamelCase__ : List[str] ) -> List[Any]: import gzip A_ : List[Any] = str(tmp_path_factory.mktemp('''data''' ) / '''file.txt.gz''' ) A_ : int = bytes(lowerCamelCase__ , '''utf-8''' ) with gzip.open(lowerCamelCase__ , '''wb''' ) as f: f.write(lowerCamelCase__ ) return path @pytest.fixture(scope='''session''' ) def snake_case__ ( lowerCamelCase__ : List[str] ) -> Optional[Any]: if datasets.config.LZ4_AVAILABLE: import lza.frame A_ : List[str] = tmp_path_factory.mktemp('''data''' ) / '''file.txt.lz4''' A_ : Optional[int] = bytes(lowerCamelCase__ , '''utf-8''' ) with lza.frame.open(lowerCamelCase__ , '''wb''' ) as f: f.write(lowerCamelCase__ ) return path @pytest.fixture(scope='''session''' ) def snake_case__ ( lowerCamelCase__ : str , lowerCamelCase__ : Any ) -> List[str]: if datasets.config.PY7ZR_AVAILABLE: import pyazr A_ : int = tmp_path_factory.mktemp('''data''' ) / '''file.txt.7z''' with pyazr.SevenZipFile(lowerCamelCase__ , '''w''' ) as archive: archive.write(lowerCamelCase__ , arcname=os.path.basename(lowerCamelCase__ ) ) return path @pytest.fixture(scope='''session''' ) def snake_case__ ( lowerCamelCase__ : int , lowerCamelCase__ : str ) -> List[Any]: import tarfile A_ : Optional[Any] = tmp_path_factory.mktemp('''data''' ) / '''file.txt.tar''' with tarfile.TarFile(lowerCamelCase__ , '''w''' ) as f: f.add(lowerCamelCase__ , arcname=os.path.basename(lowerCamelCase__ ) ) return path @pytest.fixture(scope='''session''' ) def snake_case__ ( lowerCamelCase__ : str ) -> str: import lzma A_ : List[str] = tmp_path_factory.mktemp('''data''' ) / '''file.txt.xz''' A_ : Any = bytes(lowerCamelCase__ , '''utf-8''' ) with lzma.open(lowerCamelCase__ , '''wb''' ) as f: f.write(lowerCamelCase__ ) return path @pytest.fixture(scope='''session''' ) def snake_case__ ( lowerCamelCase__ : List[str] , lowerCamelCase__ : Dict ) -> Union[str, Any]: import zipfile A_ : Union[str, Any] = tmp_path_factory.mktemp('''data''' ) / '''file.txt.zip''' with zipfile.ZipFile(lowerCamelCase__ , '''w''' ) as f: f.write(lowerCamelCase__ , arcname=os.path.basename(lowerCamelCase__ ) ) return path @pytest.fixture(scope='''session''' ) def snake_case__ ( lowerCamelCase__ : List[str] ) -> int: if datasets.config.ZSTANDARD_AVAILABLE: import zstandard as zstd A_ : Any = tmp_path_factory.mktemp('''data''' ) / '''file.txt.zst''' A_ : List[str] = bytes(lowerCamelCase__ , '''utf-8''' ) with zstd.open(lowerCamelCase__ , '''wb''' ) as f: f.write(lowerCamelCase__ ) return path @pytest.fixture(scope='''session''' ) def snake_case__ ( lowerCamelCase__ : Tuple ) -> Tuple: A_ : List[Any] = tmp_path_factory.mktemp('''data''' ) / '''file.xml''' A_ : Optional[int] = textwrap.dedent( '''\ <?xml version="1.0" encoding="UTF-8" ?> <tmx version="1.4"> <header segtype="sentence" srclang="ca" /> <body> <tu> <tuv xml:lang="ca"><seg>Contingut 1</seg></tuv> <tuv xml:lang="en"><seg>Content 1</seg></tuv> </tu> <tu> <tuv xml:lang="ca"><seg>Contingut 2</seg></tuv> <tuv xml:lang="en"><seg>Content 2</seg></tuv> </tu> <tu> <tuv xml:lang="ca"><seg>Contingut 3</seg></tuv> <tuv xml:lang="en"><seg>Content 3</seg></tuv> </tu> <tu> <tuv xml:lang="ca"><seg>Contingut 4</seg></tuv> <tuv xml:lang="en"><seg>Content 4</seg></tuv> </tu> <tu> <tuv xml:lang="ca"><seg>Contingut 5</seg></tuv> <tuv xml:lang="en"><seg>Content 5</seg></tuv> </tu> </body> </tmx>''' ) with open(lowerCamelCase__ , '''w''' ) as f: f.write(lowerCamelCase__ ) return filename snake_case__ = [ {"""col_1""": """0""", """col_2""": 0, """col_3""": 0.0}, {"""col_1""": """1""", """col_2""": 1, """col_3""": 1.0}, {"""col_1""": """2""", """col_2""": 2, """col_3""": 2.0}, {"""col_1""": """3""", """col_2""": 3, """col_3""": 3.0}, ] snake_case__ = [ {"""col_1""": """4""", """col_2""": 4, """col_3""": 4.0}, {"""col_1""": """5""", """col_2""": 5, """col_3""": 5.0}, ] snake_case__ = { """col_1""": ["""0""", """1""", """2""", """3"""], """col_2""": [0, 1, 2, 3], """col_3""": [0.0, 1.0, 2.0, 3.0], } snake_case__ = [ {"""col_3""": 0.0, """col_1""": """0""", """col_2""": 0}, {"""col_3""": 1.0, """col_1""": """1""", """col_2""": 1}, ] snake_case__ = [ {"""col_1""": """s0""", """col_2""": 0, """col_3""": 0.0}, {"""col_1""": """s1""", """col_2""": 1, """col_3""": 1.0}, {"""col_1""": """s2""", """col_2""": 2, """col_3""": 2.0}, {"""col_1""": """s3""", """col_2""": 3, """col_3""": 3.0}, ] @pytest.fixture(scope='''session''' ) def snake_case__ ( ) -> List[str]: return DATA_DICT_OF_LISTS @pytest.fixture(scope='''session''' ) def snake_case__ ( lowerCamelCase__ : List[Any] ) -> Any: A_ : Optional[Any] = datasets.Dataset.from_dict(lowerCamelCase__ ) A_ : Union[str, Any] = str(tmp_path_factory.mktemp('''data''' ) / '''dataset.arrow''' ) dataset.map(cache_file_name=lowerCamelCase__ ) return path @pytest.fixture(scope='''session''' ) def snake_case__ ( lowerCamelCase__ : List[str] ) -> List[Any]: A_ : Tuple = str(tmp_path_factory.mktemp('''data''' ) / '''dataset.sqlite''' ) with contextlib.closing(sqlitea.connect(lowerCamelCase__ ) ) as con: A_ : str = con.cursor() cur.execute('''CREATE TABLE dataset(col_1 text, col_2 int, col_3 real)''' ) for item in DATA: cur.execute('''INSERT INTO dataset(col_1, col_2, col_3) VALUES (?, ?, ?)''' , tuple(item.values() ) ) con.commit() return path @pytest.fixture(scope='''session''' ) def snake_case__ ( lowerCamelCase__ : int ) -> Optional[int]: A_ : str = str(tmp_path_factory.mktemp('''data''' ) / '''dataset.csv''' ) with open(lowerCamelCase__ , '''w''' , newline='''''' ) as f: A_ : Any = csv.DictWriter(lowerCamelCase__ , fieldnames=['''col_1''', '''col_2''', '''col_3'''] ) writer.writeheader() for item in DATA: writer.writerow(lowerCamelCase__ ) return path @pytest.fixture(scope='''session''' ) def snake_case__ ( lowerCamelCase__ : List[str] ) -> Optional[int]: A_ : str = str(tmp_path_factory.mktemp('''data''' ) / '''dataset2.csv''' ) with open(lowerCamelCase__ , '''w''' , newline='''''' ) as f: A_ : str = csv.DictWriter(lowerCamelCase__ , fieldnames=['''col_1''', '''col_2''', '''col_3'''] ) writer.writeheader() for item in DATA: writer.writerow(lowerCamelCase__ ) return path @pytest.fixture(scope='''session''' ) def snake_case__ ( lowerCamelCase__ : Optional[int] , lowerCamelCase__ : List[Any] ) -> int: import bza A_ : List[str] = tmp_path_factory.mktemp('''data''' ) / '''dataset.csv.bz2''' with open(lowerCamelCase__ , '''rb''' ) as f: A_ : Union[str, Any] = f.read() # data = bytes(FILE_CONTENT, "utf-8") with bza.open(lowerCamelCase__ , '''wb''' ) as f: f.write(lowerCamelCase__ ) return path @pytest.fixture(scope='''session''' ) def snake_case__ ( lowerCamelCase__ : int , lowerCamelCase__ : Union[str, Any] , lowerCamelCase__ : List[Any] ) -> List[Any]: A_ : Tuple = tmp_path_factory.mktemp('''data''' ) / '''dataset.csv.zip''' with zipfile.ZipFile(lowerCamelCase__ , '''w''' ) as f: f.write(lowerCamelCase__ , arcname=os.path.basename(lowerCamelCase__ ) ) f.write(lowerCamelCase__ , arcname=os.path.basename(lowerCamelCase__ ) ) return path @pytest.fixture(scope='''session''' ) def snake_case__ ( lowerCamelCase__ : Dict , lowerCamelCase__ : Optional[Any] , lowerCamelCase__ : Optional[Any] ) -> Optional[int]: A_ : Dict = tmp_path_factory.mktemp('''data''' ) / '''dataset.csv.zip''' with zipfile.ZipFile(lowerCamelCase__ , '''w''' ) as f: f.write(lowerCamelCase__ , arcname=os.path.basename(csv_path.replace('''.csv''' , '''.CSV''' ) ) ) f.write(lowerCamelCase__ , arcname=os.path.basename(csva_path.replace('''.csv''' , '''.CSV''' ) ) ) return path @pytest.fixture(scope='''session''' ) def snake_case__ ( lowerCamelCase__ : int , lowerCamelCase__ : Optional[int] , lowerCamelCase__ : Any ) -> List[Any]: A_ : Optional[int] = tmp_path_factory.mktemp('''data''' ) / '''dataset_with_dir.csv.zip''' with zipfile.ZipFile(lowerCamelCase__ , '''w''' ) as f: f.write(lowerCamelCase__ , arcname=os.path.join('''main_dir''' , os.path.basename(lowerCamelCase__ ) ) ) f.write(lowerCamelCase__ , arcname=os.path.join('''main_dir''' , os.path.basename(lowerCamelCase__ ) ) ) return path @pytest.fixture(scope='''session''' ) def snake_case__ ( lowerCamelCase__ : Optional[Any] ) -> List[str]: A_ : Optional[Any] = str(tmp_path_factory.mktemp('''data''' ) / '''dataset.parquet''' ) A_ : Optional[int] = pa.schema( { '''col_1''': pa.string(), '''col_2''': pa.intaa(), '''col_3''': pa.floataa(), } ) with open(lowerCamelCase__ , '''wb''' ) as f: A_ : List[str] = pq.ParquetWriter(lowerCamelCase__ , schema=lowerCamelCase__ ) A_ : Union[str, Any] = pa.Table.from_pydict({k: [DATA[i][k] for i in range(len(lowerCamelCase__ ) )] for k in DATA[0]} , schema=lowerCamelCase__ ) writer.write_table(lowerCamelCase__ ) writer.close() return path @pytest.fixture(scope='''session''' ) def snake_case__ ( lowerCamelCase__ : int ) -> List[Any]: A_ : Tuple = str(tmp_path_factory.mktemp('''data''' ) / '''dataset.json''' ) A_ : List[Any] = {'''data''': DATA} with open(lowerCamelCase__ , '''w''' ) as f: json.dump(lowerCamelCase__ , lowerCamelCase__ ) return path @pytest.fixture(scope='''session''' ) def snake_case__ ( lowerCamelCase__ : str ) -> Dict: A_ : Tuple = str(tmp_path_factory.mktemp('''data''' ) / '''dataset.json''' ) A_ : Optional[Any] = {'''data''': DATA_DICT_OF_LISTS} with open(lowerCamelCase__ , '''w''' ) as f: json.dump(lowerCamelCase__ , lowerCamelCase__ ) return path @pytest.fixture(scope='''session''' ) def snake_case__ ( lowerCamelCase__ : List[Any] ) -> Optional[int]: A_ : Union[str, Any] = str(tmp_path_factory.mktemp('''data''' ) / '''dataset.jsonl''' ) with open(lowerCamelCase__ , '''w''' ) as f: for item in DATA: f.write(json.dumps(lowerCamelCase__ ) + '''\n''' ) return path @pytest.fixture(scope='''session''' ) def snake_case__ ( lowerCamelCase__ : Union[str, Any] ) -> List[str]: A_ : Dict = str(tmp_path_factory.mktemp('''data''' ) / '''dataset2.jsonl''' ) with open(lowerCamelCase__ , '''w''' ) as f: for item in DATA: f.write(json.dumps(lowerCamelCase__ ) + '''\n''' ) return path @pytest.fixture(scope='''session''' ) def snake_case__ ( lowerCamelCase__ : int ) -> List[Any]: A_ : Any = str(tmp_path_factory.mktemp('''data''' ) / '''dataset_312.jsonl''' ) with open(lowerCamelCase__ , '''w''' ) as f: for item in DATA_312: f.write(json.dumps(lowerCamelCase__ ) + '''\n''' ) return path @pytest.fixture(scope='''session''' ) def snake_case__ ( lowerCamelCase__ : Optional[Any] ) -> Optional[Any]: A_ : List[str] = str(tmp_path_factory.mktemp('''data''' ) / '''dataset-str.jsonl''' ) with open(lowerCamelCase__ , '''w''' ) as f: for item in DATA_STR: f.write(json.dumps(lowerCamelCase__ ) + '''\n''' ) return path @pytest.fixture(scope='''session''' ) def snake_case__ ( lowerCamelCase__ : Tuple , lowerCamelCase__ : str ) -> List[Any]: import gzip A_ : Optional[int] = str(tmp_path_factory.mktemp('''data''' ) / '''dataset.txt.gz''' ) with open(lowerCamelCase__ , '''rb''' ) as orig_file: with gzip.open(lowerCamelCase__ , '''wb''' ) as zipped_file: zipped_file.writelines(lowerCamelCase__ ) return path @pytest.fixture(scope='''session''' ) def snake_case__ ( lowerCamelCase__ : Tuple , lowerCamelCase__ : Union[str, Any] ) -> List[str]: import gzip A_ : str = str(tmp_path_factory.mktemp('''data''' ) / '''dataset.jsonl.gz''' ) with open(lowerCamelCase__ , '''rb''' ) as orig_file: with gzip.open(lowerCamelCase__ , '''wb''' ) as zipped_file: zipped_file.writelines(lowerCamelCase__ ) return path @pytest.fixture(scope='''session''' ) def snake_case__ ( lowerCamelCase__ : Any , lowerCamelCase__ : List[str] , lowerCamelCase__ : List[Any] ) -> str: A_ : Any = tmp_path_factory.mktemp('''data''' ) / '''dataset.jsonl.zip''' with zipfile.ZipFile(lowerCamelCase__ , '''w''' ) as f: f.write(lowerCamelCase__ , arcname=os.path.basename(lowerCamelCase__ ) ) f.write(lowerCamelCase__ , arcname=os.path.basename(lowerCamelCase__ ) ) return path @pytest.fixture(scope='''session''' ) def snake_case__ ( lowerCamelCase__ : Any , lowerCamelCase__ : int , lowerCamelCase__ : Optional[Any] , lowerCamelCase__ : Optional[int] ) -> Union[str, Any]: A_ : str = tmp_path_factory.mktemp('''data''' ) / '''dataset_nested.jsonl.zip''' with zipfile.ZipFile(lowerCamelCase__ , '''w''' ) as f: f.write(lowerCamelCase__ , arcname=os.path.join('''nested''' , os.path.basename(lowerCamelCase__ ) ) ) return path @pytest.fixture(scope='''session''' ) def snake_case__ ( lowerCamelCase__ : List[Any] , lowerCamelCase__ : Union[str, Any] , lowerCamelCase__ : List[Any] ) -> List[str]: A_ : Optional[int] = tmp_path_factory.mktemp('''data''' ) / '''dataset_with_dir.jsonl.zip''' with zipfile.ZipFile(lowerCamelCase__ , '''w''' ) as f: f.write(lowerCamelCase__ , arcname=os.path.join('''main_dir''' , os.path.basename(lowerCamelCase__ ) ) ) f.write(lowerCamelCase__ , arcname=os.path.join('''main_dir''' , os.path.basename(lowerCamelCase__ ) ) ) return path @pytest.fixture(scope='''session''' ) def snake_case__ ( lowerCamelCase__ : Dict , lowerCamelCase__ : Optional[Any] , lowerCamelCase__ : int ) -> Tuple: A_ : List[Any] = tmp_path_factory.mktemp('''data''' ) / '''dataset.jsonl.tar''' with tarfile.TarFile(lowerCamelCase__ , '''w''' ) as f: f.add(lowerCamelCase__ , arcname=os.path.basename(lowerCamelCase__ ) ) f.add(lowerCamelCase__ , arcname=os.path.basename(lowerCamelCase__ ) ) return path @pytest.fixture(scope='''session''' ) def snake_case__ ( lowerCamelCase__ : Any , lowerCamelCase__ : Optional[Any] , lowerCamelCase__ : Tuple , lowerCamelCase__ : List[Any] ) -> Any: A_ : int = tmp_path_factory.mktemp('''data''' ) / '''dataset_nested.jsonl.tar''' with tarfile.TarFile(lowerCamelCase__ , '''w''' ) as f: f.add(lowerCamelCase__ , arcname=os.path.join('''nested''' , os.path.basename(lowerCamelCase__ ) ) ) return path @pytest.fixture(scope='''session''' ) def snake_case__ ( lowerCamelCase__ : Tuple ) -> Tuple: A_ : Any = ['''0''', '''1''', '''2''', '''3'''] A_ : Optional[int] = str(tmp_path_factory.mktemp('''data''' ) / '''dataset.txt''' ) with open(lowerCamelCase__ , '''w''' ) as f: for item in data: f.write(item + '''\n''' ) return path @pytest.fixture(scope='''session''' ) def snake_case__ ( lowerCamelCase__ : Dict ) -> Any: A_ : Union[str, Any] = ['''0''', '''1''', '''2''', '''3'''] A_ : int = str(tmp_path_factory.mktemp('''data''' ) / '''dataset2.txt''' ) with open(lowerCamelCase__ , '''w''' ) as f: for item in data: f.write(item + '''\n''' ) return path @pytest.fixture(scope='''session''' ) def snake_case__ ( lowerCamelCase__ : int ) -> Union[str, Any]: A_ : Union[str, Any] = ['''0''', '''1''', '''2''', '''3'''] A_ : str = tmp_path_factory.mktemp('''data''' ) / '''dataset.abc''' with open(lowerCamelCase__ , '''w''' ) as f: for item in data: f.write(item + '''\n''' ) return path @pytest.fixture(scope='''session''' ) def snake_case__ ( lowerCamelCase__ : List[str] , lowerCamelCase__ : Any , lowerCamelCase__ : Union[str, Any] ) -> Optional[int]: A_ : Dict = tmp_path_factory.mktemp('''data''' ) / '''dataset.text.zip''' with zipfile.ZipFile(lowerCamelCase__ , '''w''' ) as f: f.write(lowerCamelCase__ , arcname=os.path.basename(lowerCamelCase__ ) ) f.write(lowerCamelCase__ , arcname=os.path.basename(lowerCamelCase__ ) ) return path @pytest.fixture(scope='''session''' ) def snake_case__ ( lowerCamelCase__ : Optional[Any] , lowerCamelCase__ : List[Any] , lowerCamelCase__ : Any ) -> List[Any]: A_ : int = tmp_path_factory.mktemp('''data''' ) / '''dataset_with_dir.text.zip''' with zipfile.ZipFile(lowerCamelCase__ , '''w''' ) as f: f.write(lowerCamelCase__ , arcname=os.path.join('''main_dir''' , os.path.basename(lowerCamelCase__ ) ) ) f.write(lowerCamelCase__ , arcname=os.path.join('''main_dir''' , os.path.basename(lowerCamelCase__ ) ) ) return path @pytest.fixture(scope='''session''' ) def snake_case__ ( lowerCamelCase__ : Optional[int] , lowerCamelCase__ : Union[str, Any] , lowerCamelCase__ : List[Any] ) -> str: A_ : Union[str, Any] = tmp_path_factory.mktemp('''data''' ) / '''dataset.ext.zip''' with zipfile.ZipFile(lowerCamelCase__ , '''w''' ) as f: f.write(lowerCamelCase__ , arcname=os.path.basename('''unsupported.ext''' ) ) f.write(lowerCamelCase__ , arcname=os.path.basename('''unsupported_2.ext''' ) ) return path @pytest.fixture(scope='''session''' ) def snake_case__ ( lowerCamelCase__ : int ) -> List[Any]: A_ : Union[str, Any] = '''\n'''.join(['''First''', '''Second\u2029with Unicode new line''', '''Third'''] ) A_ : Any = str(tmp_path_factory.mktemp('''data''' ) / '''dataset_with_unicode_new_lines.txt''' ) with open(lowerCamelCase__ , '''w''' , encoding='''utf-8''' ) as f: f.write(lowerCamelCase__ ) return path @pytest.fixture(scope='''session''' ) def snake_case__ ( ) -> List[Any]: return os.path.join('''tests''' , '''features''' , '''data''' , '''test_image_rgb.jpg''' ) @pytest.fixture(scope='''session''' ) def snake_case__ ( ) -> Optional[int]: return os.path.join('''tests''' , '''features''' , '''data''' , '''test_audio_44100.wav''' ) @pytest.fixture(scope='''session''' ) def snake_case__ ( lowerCamelCase__ : int , lowerCamelCase__ : int ) -> List[str]: A_ : Tuple = tmp_path_factory.mktemp('''data''' ) / '''dataset.img.zip''' with zipfile.ZipFile(lowerCamelCase__ , '''w''' ) as f: f.write(lowerCamelCase__ , arcname=os.path.basename(lowerCamelCase__ ) ) f.write(lowerCamelCase__ , arcname=os.path.basename(lowerCamelCase__ ).replace('''.jpg''' , '''2.jpg''' ) ) return path @pytest.fixture(scope='''session''' ) def snake_case__ ( lowerCamelCase__ : Any ) -> Tuple: A_ : Union[str, Any] = tmp_path_factory.mktemp('''data_dir''' ) (data_dir / "subdir").mkdir() with open(data_dir / '''subdir''' / '''train.txt''' , '''w''' ) as f: f.write('''foo\n''' * 1_0 ) with open(data_dir / '''subdir''' / '''test.txt''' , '''w''' ) as f: f.write('''bar\n''' * 1_0 ) # hidden file with open(data_dir / '''subdir''' / '''.test.txt''' , '''w''' ) as f: f.write('''bar\n''' * 1_0 ) # hidden directory (data_dir / ".subdir").mkdir() with open(data_dir / '''.subdir''' / '''train.txt''' , '''w''' ) as f: f.write('''foo\n''' * 1_0 ) with open(data_dir / '''.subdir''' / '''test.txt''' , '''w''' ) as f: f.write('''bar\n''' * 1_0 ) return data_dir
367
'''simple docstring''' from __future__ import annotations class UpperCamelCase_ : """simple docstring""" def __init__( self : Optional[int] , _lowerCamelCase : int ): """simple docstring""" A_ : Union[str, Any] = order # a_{0} ... a_{k} A_ : Union[str, Any] = [1.0] + [0.0] * order # b_{0} ... b_{k} A_ : int = [1.0] + [0.0] * order # x[n-1] ... x[n-k] A_ : str = [0.0] * self.order # y[n-1] ... y[n-k] A_ : Optional[Any] = [0.0] * self.order def _a ( self : Dict , _lowerCamelCase : list[float] , _lowerCamelCase : list[float] ): """simple docstring""" if len(_lowerCamelCase ) < self.order: A_ : Any = [1.0, *a_coeffs] if len(_lowerCamelCase ) != self.order + 1: A_ : List[Any] = ( f'Expected a_coeffs to have {self.order + 1} elements ' f'for {self.order}-order filter, got {len(_lowerCamelCase )}' ) raise ValueError(_lowerCamelCase ) if len(_lowerCamelCase ) != self.order + 1: A_ : Union[str, Any] = ( f'Expected b_coeffs to have {self.order + 1} elements ' f'for {self.order}-order filter, got {len(_lowerCamelCase )}' ) raise ValueError(_lowerCamelCase ) A_ : Tuple = a_coeffs A_ : str = b_coeffs def _a ( self : Tuple , _lowerCamelCase : float ): """simple docstring""" A_ : Any = 0.0 # Start at index 1 and do index 0 at the end. for i in range(1 , self.order + 1 ): result += ( self.b_coeffs[i] * self.input_history[i - 1] - self.a_coeffs[i] * self.output_history[i - 1] ) A_ : str = (result + self.b_coeffs[0] * sample) / self.a_coeffs[0] A_ : Optional[Any] = self.input_history[:-1] A_ : List[str] = self.output_history[:-1] A_ : Tuple = sample A_ : Tuple = result return result
4
0
'''simple docstring''' def snake_case__ ( ) -> int: return 1 def snake_case__ ( lowerCamelCase__ : int ) -> int: return 0 if x < 0 else two_pence(x - 2 ) + one_pence() def snake_case__ ( lowerCamelCase__ : int ) -> int: return 0 if x < 0 else five_pence(x - 5 ) + two_pence(lowerCamelCase__ ) def snake_case__ ( lowerCamelCase__ : int ) -> int: return 0 if x < 0 else ten_pence(x - 1_0 ) + five_pence(lowerCamelCase__ ) def snake_case__ ( lowerCamelCase__ : int ) -> int: return 0 if x < 0 else twenty_pence(x - 2_0 ) + ten_pence(lowerCamelCase__ ) def snake_case__ ( lowerCamelCase__ : int ) -> int: return 0 if x < 0 else fifty_pence(x - 5_0 ) + twenty_pence(lowerCamelCase__ ) def snake_case__ ( lowerCamelCase__ : int ) -> int: return 0 if x < 0 else one_pound(x - 1_0_0 ) + fifty_pence(lowerCamelCase__ ) def snake_case__ ( lowerCamelCase__ : int ) -> int: return 0 if x < 0 else two_pound(x - 2_0_0 ) + one_pound(lowerCamelCase__ ) def snake_case__ ( lowerCamelCase__ : int = 2_0_0 ) -> int: return two_pound(lowerCamelCase__ ) if __name__ == "__main__": print(solution(int(input().strip())))
368
'''simple docstring''' class UpperCamelCase_ : """simple docstring""" def __init__( self : Optional[Any] , _lowerCamelCase : Union[str, Any] ): """simple docstring""" A_ : Union[str, Any] = val A_ : Tuple = None A_ : Any = None def _a ( self : Tuple , _lowerCamelCase : List[Any] ): """simple docstring""" if self.val: if val < self.val: if self.left is None: A_ : int = Node(_lowerCamelCase ) else: self.left.insert(_lowerCamelCase ) elif val > self.val: if self.right is None: A_ : List[str] = Node(_lowerCamelCase ) else: self.right.insert(_lowerCamelCase ) else: A_ : Any = val def snake_case__ ( lowerCamelCase__ : Any , lowerCamelCase__ : Optional[int] ) -> str: # Recursive traversal if root: inorder(root.left , lowerCamelCase__ ) res.append(root.val ) inorder(root.right , lowerCamelCase__ ) def snake_case__ ( lowerCamelCase__ : Optional[int] ) -> Tuple: # Build BST if len(lowerCamelCase__ ) == 0: return arr A_ : Dict = Node(arr[0] ) for i in range(1 , len(lowerCamelCase__ ) ): root.insert(arr[i] ) # Traverse BST in order. A_ : Tuple = [] inorder(lowerCamelCase__ , lowerCamelCase__ ) return res if __name__ == "__main__": print(tree_sort([10, 1, 3, 2, 9, 14, 13]))
4
0
'''simple docstring''' import unittest from transformers import MraConfig, is_torch_available from transformers.testing_utils import require_torch, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask if is_torch_available(): import torch from transformers import ( MraForMaskedLM, MraForMultipleChoice, MraForQuestionAnswering, MraForSequenceClassification, MraForTokenClassification, MraModel, ) from transformers.models.mra.modeling_mra import MRA_PRETRAINED_MODEL_ARCHIVE_LIST class UpperCamelCase_ : """simple docstring""" def __init__( self : Optional[Any] , _lowerCamelCase : str , _lowerCamelCase : int=2 , _lowerCamelCase : Any=8 , _lowerCamelCase : Tuple=True , _lowerCamelCase : List[Any]=True , _lowerCamelCase : int=True , _lowerCamelCase : Tuple=True , _lowerCamelCase : Tuple=99 , _lowerCamelCase : List[str]=16 , _lowerCamelCase : Dict=5 , _lowerCamelCase : str=2 , _lowerCamelCase : Dict=36 , _lowerCamelCase : Union[str, Any]="gelu" , _lowerCamelCase : Optional[int]=0.0 , _lowerCamelCase : Union[str, Any]=0.0 , _lowerCamelCase : Dict=512 , _lowerCamelCase : str=16 , _lowerCamelCase : Optional[Any]=2 , _lowerCamelCase : Union[str, Any]=0.02 , _lowerCamelCase : Tuple=3 , _lowerCamelCase : Optional[int]=4 , _lowerCamelCase : List[Any]=None , ): """simple docstring""" A_ : Optional[Any] = parent A_ : Dict = batch_size A_ : str = seq_length A_ : Any = is_training A_ : List[Any] = use_input_mask A_ : Union[str, Any] = use_token_type_ids A_ : Optional[int] = use_labels A_ : Any = vocab_size A_ : List[str] = hidden_size A_ : Union[str, Any] = num_hidden_layers A_ : Any = num_attention_heads A_ : List[str] = intermediate_size A_ : List[str] = hidden_act A_ : Optional[Any] = hidden_dropout_prob A_ : Union[str, Any] = attention_probs_dropout_prob A_ : Optional[Any] = max_position_embeddings A_ : int = type_vocab_size A_ : List[str] = type_sequence_label_size A_ : Tuple = initializer_range A_ : int = num_labels A_ : Optional[Any] = num_choices A_ : Union[str, Any] = scope def _a ( self : str ): """simple docstring""" A_ : Any = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) A_ : Any = None if self.use_input_mask: A_ : List[Any] = random_attention_mask([self.batch_size, self.seq_length] ) A_ : List[Any] = None if self.use_token_type_ids: A_ : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) A_ : List[Any] = None A_ : Union[str, Any] = None A_ : Tuple = None if self.use_labels: A_ : str = ids_tensor([self.batch_size] , self.type_sequence_label_size ) A_ : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) A_ : Dict = ids_tensor([self.batch_size] , self.num_choices ) A_ : int = self.get_config() return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def _a ( self : str ): """simple docstring""" return MraConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=_lowerCamelCase , initializer_range=self.initializer_range , ) def _a ( self : Any ): """simple docstring""" A_ : List[Any] = self.get_config() A_ : str = 300 return config def _a ( self : Dict ): """simple docstring""" ( A_ ) : List[Any] = self.prepare_config_and_inputs() A_ : List[str] = True A_ : Dict = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] ) A_ : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 ) return ( config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels, encoder_hidden_states, encoder_attention_mask, ) def _a ( self : str , _lowerCamelCase : Union[str, Any] , _lowerCamelCase : Optional[int] , _lowerCamelCase : int , _lowerCamelCase : Dict , _lowerCamelCase : Any , _lowerCamelCase : Any , _lowerCamelCase : Dict ): """simple docstring""" A_ : List[str] = MraModel(config=_lowerCamelCase ) model.to(_lowerCamelCase ) model.eval() A_ : List[str] = model(_lowerCamelCase , attention_mask=_lowerCamelCase , token_type_ids=_lowerCamelCase ) A_ : Any = model(_lowerCamelCase , token_type_ids=_lowerCamelCase ) A_ : Optional[Any] = model(_lowerCamelCase ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def _a ( self : Dict , _lowerCamelCase : Tuple , _lowerCamelCase : Any , _lowerCamelCase : Dict , _lowerCamelCase : Optional[Any] , _lowerCamelCase : Optional[int] , _lowerCamelCase : List[Any] , _lowerCamelCase : List[str] , _lowerCamelCase : List[Any] , _lowerCamelCase : List[str] , ): """simple docstring""" A_ : Dict = True A_ : str = MraModel(_lowerCamelCase ) model.to(_lowerCamelCase ) model.eval() A_ : Union[str, Any] = model( _lowerCamelCase , attention_mask=_lowerCamelCase , token_type_ids=_lowerCamelCase , encoder_hidden_states=_lowerCamelCase , encoder_attention_mask=_lowerCamelCase , ) A_ : str = model( _lowerCamelCase , attention_mask=_lowerCamelCase , token_type_ids=_lowerCamelCase , encoder_hidden_states=_lowerCamelCase , ) A_ : List[str] = model(_lowerCamelCase , attention_mask=_lowerCamelCase , token_type_ids=_lowerCamelCase ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def _a ( self : List[str] , _lowerCamelCase : List[str] , _lowerCamelCase : int , _lowerCamelCase : List[str] , _lowerCamelCase : Optional[int] , _lowerCamelCase : Optional[Any] , _lowerCamelCase : Union[str, Any] , _lowerCamelCase : int ): """simple docstring""" A_ : List[Any] = MraForMaskedLM(config=_lowerCamelCase ) model.to(_lowerCamelCase ) model.eval() A_ : Optional[Any] = model(_lowerCamelCase , attention_mask=_lowerCamelCase , token_type_ids=_lowerCamelCase , labels=_lowerCamelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def _a ( self : Dict , _lowerCamelCase : int , _lowerCamelCase : Optional[Any] , _lowerCamelCase : Dict , _lowerCamelCase : str , _lowerCamelCase : Union[str, Any] , _lowerCamelCase : Optional[Any] , _lowerCamelCase : Optional[Any] ): """simple docstring""" A_ : Optional[int] = MraForQuestionAnswering(config=_lowerCamelCase ) model.to(_lowerCamelCase ) model.eval() A_ : List[str] = model( _lowerCamelCase , attention_mask=_lowerCamelCase , token_type_ids=_lowerCamelCase , start_positions=_lowerCamelCase , end_positions=_lowerCamelCase , ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def _a ( self : Any , _lowerCamelCase : List[str] , _lowerCamelCase : Dict , _lowerCamelCase : List[str] , _lowerCamelCase : Tuple , _lowerCamelCase : Tuple , _lowerCamelCase : List[str] , _lowerCamelCase : int ): """simple docstring""" A_ : Optional[Any] = self.num_labels A_ : Any = MraForSequenceClassification(_lowerCamelCase ) model.to(_lowerCamelCase ) model.eval() A_ : List[str] = model(_lowerCamelCase , attention_mask=_lowerCamelCase , token_type_ids=_lowerCamelCase , labels=_lowerCamelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def _a ( self : Tuple , _lowerCamelCase : str , _lowerCamelCase : Any , _lowerCamelCase : Dict , _lowerCamelCase : Any , _lowerCamelCase : Any , _lowerCamelCase : Any , _lowerCamelCase : Optional[Any] ): """simple docstring""" A_ : int = self.num_labels A_ : Optional[int] = MraForTokenClassification(config=_lowerCamelCase ) model.to(_lowerCamelCase ) model.eval() A_ : Any = model(_lowerCamelCase , attention_mask=_lowerCamelCase , token_type_ids=_lowerCamelCase , labels=_lowerCamelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def _a ( self : int , _lowerCamelCase : Union[str, Any] , _lowerCamelCase : List[str] , _lowerCamelCase : int , _lowerCamelCase : List[str] , _lowerCamelCase : List[str] , _lowerCamelCase : List[str] , _lowerCamelCase : Optional[int] ): """simple docstring""" A_ : Union[str, Any] = self.num_choices A_ : str = MraForMultipleChoice(config=_lowerCamelCase ) model.to(_lowerCamelCase ) model.eval() A_ : List[Any] = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() A_ : Optional[Any] = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() A_ : List[str] = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() A_ : Any = model( _lowerCamelCase , attention_mask=_lowerCamelCase , token_type_ids=_lowerCamelCase , labels=_lowerCamelCase , ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) ) def _a ( self : Optional[Any] ): """simple docstring""" A_ : Tuple = self.prepare_config_and_inputs() ( A_ ) : Optional[Any] = config_and_inputs A_ : Dict = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask} return config, inputs_dict @require_torch class UpperCamelCase_ (a__, unittest.TestCase ): """simple docstring""" _lowerCAmelCase = ( ( MraModel, MraForMaskedLM, MraForMultipleChoice, MraForQuestionAnswering, MraForSequenceClassification, MraForTokenClassification, ) if is_torch_available() else () ) _lowerCAmelCase = False _lowerCAmelCase = False _lowerCAmelCase = False _lowerCAmelCase = False _lowerCAmelCase = () def _a ( self : str ): """simple docstring""" A_ : List[str] = MraModelTester(self ) A_ : Optional[int] = ConfigTester(self , config_class=_lowerCamelCase , hidden_size=37 ) def _a ( self : int ): """simple docstring""" self.config_tester.run_common_tests() def _a ( self : str ): """simple docstring""" A_ : Any = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*_lowerCamelCase ) def _a ( self : Any ): """simple docstring""" A_ : List[str] = self.model_tester.prepare_config_and_inputs() for type in ["absolute", "relative_key", "relative_key_query"]: A_ : List[Any] = type self.model_tester.create_and_check_model(*_lowerCamelCase ) def _a ( self : Optional[Any] ): """simple docstring""" A_ : Dict = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_lm(*_lowerCamelCase ) def _a ( self : Tuple ): """simple docstring""" A_ : Optional[int] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_multiple_choice(*_lowerCamelCase ) def _a ( self : List[Any] ): """simple docstring""" A_ : List[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_question_answering(*_lowerCamelCase ) def _a ( self : Dict ): """simple docstring""" A_ : Optional[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_sequence_classification(*_lowerCamelCase ) def _a ( self : Union[str, Any] ): """simple docstring""" A_ : Optional[int] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_token_classification(*_lowerCamelCase ) @slow def _a ( self : int ): """simple docstring""" for model_name in MRA_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: A_ : Optional[Any] = MraModel.from_pretrained(_lowerCamelCase ) self.assertIsNotNone(_lowerCamelCase ) @unittest.skip(reason='''MRA does not output attentions''' ) def _a ( self : Union[str, Any] ): """simple docstring""" return @require_torch class UpperCamelCase_ (unittest.TestCase ): """simple docstring""" @slow def _a ( self : Dict ): """simple docstring""" A_ : str = MraModel.from_pretrained('''uw-madison/mra-base-512-4''' ) A_ : int = torch.arange(256 ).unsqueeze(0 ) with torch.no_grad(): A_ : List[str] = model(_lowerCamelCase )[0] A_ : List[str] = torch.Size((1, 256, 768) ) self.assertEqual(output.shape , _lowerCamelCase ) A_ : Any = torch.tensor( [[[-0.01_40, 0.08_30, -0.03_81], [0.15_46, 0.14_02, 0.02_20], [0.11_62, 0.08_51, 0.01_65]]] ) self.assertTrue(torch.allclose(output[:, :3, :3] , _lowerCamelCase , atol=1E-4 ) ) @slow def _a ( self : Tuple ): """simple docstring""" A_ : Dict = MraForMaskedLM.from_pretrained('''uw-madison/mra-base-512-4''' ) A_ : int = torch.arange(256 ).unsqueeze(0 ) with torch.no_grad(): A_ : Tuple = model(_lowerCamelCase )[0] A_ : List[str] = 50265 A_ : Union[str, Any] = torch.Size((1, 256, vocab_size) ) self.assertEqual(output.shape , _lowerCamelCase ) A_ : Optional[int] = torch.tensor( [[[9.25_95, -3.60_38, 11.88_19], [9.38_69, -3.26_93, 11.09_56], [11.85_24, -3.49_38, 13.12_10]]] ) self.assertTrue(torch.allclose(output[:, :3, :3] , _lowerCamelCase , atol=1E-4 ) ) @slow def _a ( self : Union[str, Any] ): """simple docstring""" A_ : int = MraForMaskedLM.from_pretrained('''uw-madison/mra-base-4096-8-d3''' ) A_ : Optional[Any] = torch.arange(4096 ).unsqueeze(0 ) with torch.no_grad(): A_ : str = model(_lowerCamelCase )[0] A_ : Dict = 50265 A_ : Any = torch.Size((1, 4096, vocab_size) ) self.assertEqual(output.shape , _lowerCamelCase ) A_ : List[Any] = torch.tensor( [[[5.47_89, -2.35_64, 7.50_64], [7.90_67, -1.33_69, 9.96_68], [9.07_12, -1.81_06, 7.03_80]]] ) self.assertTrue(torch.allclose(output[:, :3, :3] , _lowerCamelCase , atol=1E-4 ) )
369
'''simple docstring''' def snake_case__ ( lowerCamelCase__ : list ) -> list: if len(lowerCamelCase__ ) <= 1: return [tuple(lowerCamelCase__ )] A_ : List[str] = [] def generate(lowerCamelCase__ : int , lowerCamelCase__ : list ): if k == 1: res.append(tuple(arr[:] ) ) return generate(k - 1 , lowerCamelCase__ ) for i in range(k - 1 ): if k % 2 == 0: # k is even A_ ,A_ : Optional[int] = arr[k - 1], arr[i] else: # k is odd A_ ,A_ : Union[str, Any] = arr[k - 1], arr[0] generate(k - 1 , lowerCamelCase__ ) generate(len(lowerCamelCase__ ) , lowerCamelCase__ ) return res if __name__ == "__main__": snake_case__ = input("""Enter numbers separated by a comma:\n""").strip() snake_case__ = [int(item) for item in user_input.split(""",""")] print(heaps(arr))
4
0
'''simple docstring''' from graphs.minimum_spanning_tree_kruskal import kruskal def snake_case__ ( ) -> Optional[Any]: A_ : Optional[int] = 9 A_ : str = [ [0, 1, 4], [0, 7, 8], [1, 2, 8], [7, 8, 7], [7, 6, 1], [2, 8, 2], [8, 6, 6], [2, 3, 7], [2, 5, 4], [6, 5, 2], [3, 5, 1_4], [3, 4, 9], [5, 4, 1_0], [1, 7, 1_1], ] A_ : List[Any] = kruskal(lowerCamelCase__ , lowerCamelCase__ ) A_ : Optional[int] = [ [7, 6, 1], [2, 8, 2], [6, 5, 2], [0, 1, 4], [2, 5, 4], [2, 3, 7], [0, 7, 8], [3, 4, 9], ] assert sorted(lowerCamelCase__ ) == sorted(lowerCamelCase__ )
370
'''simple docstring''' import unittest from queue import Empty from threading import Thread from transformers import AutoTokenizer, TextIteratorStreamer, TextStreamer, is_torch_available from transformers.testing_utils import CaptureStdout, require_torch, torch_device from ..test_modeling_common import ids_tensor if is_torch_available(): import torch from transformers import AutoModelForCausalLM @require_torch class UpperCamelCase_ (unittest.TestCase ): """simple docstring""" def _a ( self : Dict ): """simple docstring""" A_ : Optional[int] = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ) A_ : Tuple = AutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ).to(_lowerCamelCase ) A_ : Dict = -1 A_ : List[str] = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(_lowerCamelCase ) A_ : Any = model.generate(_lowerCamelCase , max_new_tokens=10 , do_sample=_lowerCamelCase ) A_ : List[str] = tokenizer.decode(greedy_ids[0] ) with CaptureStdout() as cs: A_ : List[str] = TextStreamer(_lowerCamelCase ) model.generate(_lowerCamelCase , max_new_tokens=10 , do_sample=_lowerCamelCase , streamer=_lowerCamelCase ) # The greedy text should be printed to stdout, except for the final "\n" in the streamer A_ : Dict = cs.out[:-1] self.assertEqual(_lowerCamelCase , _lowerCamelCase ) def _a ( self : Tuple ): """simple docstring""" A_ : Optional[int] = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ) A_ : List[str] = AutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ).to(_lowerCamelCase ) A_ : Dict = -1 A_ : List[str] = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(_lowerCamelCase ) A_ : Optional[int] = model.generate(_lowerCamelCase , max_new_tokens=10 , do_sample=_lowerCamelCase ) A_ : str = tokenizer.decode(greedy_ids[0] ) A_ : int = TextIteratorStreamer(_lowerCamelCase ) A_ : List[Any] = {'''input_ids''': input_ids, '''max_new_tokens''': 10, '''do_sample''': False, '''streamer''': streamer} A_ : List[Any] = Thread(target=model.generate , kwargs=_lowerCamelCase ) thread.start() A_ : List[Any] = '''''' for new_text in streamer: streamer_text += new_text self.assertEqual(_lowerCamelCase , _lowerCamelCase ) def _a ( self : int ): """simple docstring""" A_ : List[str] = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ) A_ : List[Any] = AutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ).to(_lowerCamelCase ) A_ : List[str] = -1 A_ : Any = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(_lowerCamelCase ) A_ : Tuple = model.generate(_lowerCamelCase , max_new_tokens=10 , do_sample=_lowerCamelCase ) A_ : Tuple = greedy_ids[:, input_ids.shape[1] :] A_ : Tuple = tokenizer.decode(new_greedy_ids[0] ) with CaptureStdout() as cs: A_ : Any = TextStreamer(_lowerCamelCase , skip_prompt=_lowerCamelCase ) model.generate(_lowerCamelCase , max_new_tokens=10 , do_sample=_lowerCamelCase , streamer=_lowerCamelCase ) # The greedy text should be printed to stdout, except for the final "\n" in the streamer A_ : Any = cs.out[:-1] self.assertEqual(_lowerCamelCase , _lowerCamelCase ) def _a ( self : List[Any] ): """simple docstring""" A_ : List[Any] = AutoTokenizer.from_pretrained('''distilgpt2''' ) A_ : Tuple = AutoModelForCausalLM.from_pretrained('''distilgpt2''' ).to(_lowerCamelCase ) A_ : List[Any] = -1 A_ : Union[str, Any] = torch.ones((1, 5) , device=_lowerCamelCase ).long() * model.config.bos_token_id with CaptureStdout() as cs: A_ : List[Any] = TextStreamer(_lowerCamelCase , skip_special_tokens=_lowerCamelCase ) model.generate(_lowerCamelCase , max_new_tokens=1 , do_sample=_lowerCamelCase , streamer=_lowerCamelCase ) # The prompt contains a special token, so the streamer should not print it. As such, the output text, when # re-tokenized, must only contain one token A_ : List[str] = cs.out[:-1] # Remove the final "\n" A_ : List[Any] = tokenizer(_lowerCamelCase , return_tensors='''pt''' ) self.assertEqual(streamer_text_tokenized.input_ids.shape , (1, 1) ) def _a ( self : Union[str, Any] ): """simple docstring""" A_ : str = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ) A_ : str = AutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ).to(_lowerCamelCase ) A_ : Union[str, Any] = -1 A_ : Union[str, Any] = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(_lowerCamelCase ) A_ : List[str] = TextIteratorStreamer(_lowerCamelCase , timeout=0.0_01 ) A_ : str = {'''input_ids''': input_ids, '''max_new_tokens''': 10, '''do_sample''': False, '''streamer''': streamer} A_ : List[str] = Thread(target=model.generate , kwargs=_lowerCamelCase ) thread.start() # The streamer will timeout after 0.001 seconds, so an exception will be raised with self.assertRaises(_lowerCamelCase ): A_ : str = '''''' for new_text in streamer: streamer_text += new_text
4
0
'''simple docstring''' import warnings from ...utils import logging from .image_processing_flava import FlavaImageProcessor snake_case__ = logging.get_logger(__name__) class UpperCamelCase_ (a__ ): """simple docstring""" def __init__( self : Tuple , *_lowerCamelCase : List[str] , **_lowerCamelCase : Tuple ): """simple docstring""" warnings.warn( '''The class FlavaFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please''' ''' use FlavaImageProcessor instead.''' , _lowerCamelCase , ) super().__init__(*_lowerCamelCase , **_lowerCamelCase )
371
'''simple docstring''' import heapq def snake_case__ ( lowerCamelCase__ : dict ) -> set[int]: A_ : list[list] = [] # for each node and his adjacency list add them and the rank of the node to queue # using heapq module the queue will be filled like a Priority Queue # heapq works with a min priority queue, so I used -1*len(v) to build it for key, value in graph.items(): # O(log(n)) heapq.heappush(lowerCamelCase__ , [-1 * len(lowerCamelCase__ ), (key, value)] ) # chosen_vertices = set of chosen vertices A_ : str = set() # while queue isn't empty and there are still edges # (queue[0][0] is the rank of the node with max rank) while queue and queue[0][0] != 0: # extract vertex with max rank from queue and add it to chosen_vertices A_ : Tuple = heapq.heappop(lowerCamelCase__ )[1][0] chosen_vertices.add(lowerCamelCase__ ) # Remove all arcs adjacent to argmax for elem in queue: # if v haven't adjacent node, skip if elem[0] == 0: continue # if argmax is reachable from elem # remove argmax from elem's adjacent list and update his rank if argmax in elem[1][1]: A_ : List[str] = elem[1][1].index(lowerCamelCase__ ) del elem[1][1][index] elem[0] += 1 # re-order the queue heapq.heapify(lowerCamelCase__ ) return chosen_vertices if __name__ == "__main__": import doctest doctest.testmod() snake_case__ = {0: [1, 3], 1: [0, 3], 2: [0, 3, 4], 3: [0, 1, 2], 4: [2, 3]} print(F'Minimum vertex cover:\n{greedy_min_vertex_cover(graph)}')
4
0
'''simple docstring''' from collections import OrderedDict from typing import Mapping from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging snake_case__ = logging.get_logger(__name__) snake_case__ = { """facebook/levit-128S""": """https://huggingface.co/facebook/levit-128S/resolve/main/config.json""", # See all LeViT models at https://huggingface.co/models?filter=levit } class UpperCamelCase_ (a__ ): """simple docstring""" _lowerCAmelCase = 'levit' def __init__( self : int , _lowerCamelCase : List[Any]=224 , _lowerCamelCase : Union[str, Any]=3 , _lowerCamelCase : int=3 , _lowerCamelCase : Tuple=2 , _lowerCamelCase : Optional[int]=1 , _lowerCamelCase : List[str]=16 , _lowerCamelCase : Tuple=[128, 256, 384] , _lowerCamelCase : List[str]=[4, 8, 12] , _lowerCamelCase : Optional[int]=[4, 4, 4] , _lowerCamelCase : Union[str, Any]=[16, 16, 16] , _lowerCamelCase : int=0 , _lowerCamelCase : Union[str, Any]=[2, 2, 2] , _lowerCamelCase : Optional[Any]=[2, 2, 2] , _lowerCamelCase : Optional[Any]=0.02 , **_lowerCamelCase : List[Any] , ): """simple docstring""" super().__init__(**_lowerCamelCase ) A_ : List[Any] = image_size A_ : List[str] = num_channels A_ : Tuple = kernel_size A_ : Optional[int] = stride A_ : Dict = padding A_ : Tuple = hidden_sizes A_ : Tuple = num_attention_heads A_ : int = depths A_ : Any = key_dim A_ : Any = drop_path_rate A_ : Tuple = patch_size A_ : Union[str, Any] = attention_ratio A_ : str = mlp_ratio A_ : Optional[Any] = initializer_range A_ : Union[str, Any] = [ ['''Subsample''', key_dim[0], hidden_sizes[0] // key_dim[0], 4, 2, 2], ['''Subsample''', key_dim[0], hidden_sizes[1] // key_dim[0], 4, 2, 2], ] class UpperCamelCase_ (a__ ): """simple docstring""" _lowerCAmelCase = version.parse('1.11' ) @property def _a ( self : Optional[int] ): """simple docstring""" return OrderedDict( [ ('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}), ] ) @property def _a ( self : int ): """simple docstring""" return 1E-4
350
'''simple docstring''' import argparse from collections import OrderedDict from pathlib import Path import torch from huggingface_hub import hf_hub_download from PIL import Image from torchvision.transforms import functional as F from transformers import DetrImageProcessor, TableTransformerConfig, TableTransformerForObjectDetection from transformers.utils import logging logging.set_verbosity_info() snake_case__ = logging.get_logger(__name__) # here we list all keys to be renamed (original name on the left, our name on the right) snake_case__ = [] for i in range(6): # encoder layers: output projection, 2 feedforward neural networks and 2 layernorms rename_keys.append( (F'transformer.encoder.layers.{i}.self_attn.out_proj.weight', F'encoder.layers.{i}.self_attn.out_proj.weight') ) rename_keys.append( (F'transformer.encoder.layers.{i}.self_attn.out_proj.bias', F'encoder.layers.{i}.self_attn.out_proj.bias') ) rename_keys.append((F'transformer.encoder.layers.{i}.linear1.weight', F'encoder.layers.{i}.fc1.weight')) rename_keys.append((F'transformer.encoder.layers.{i}.linear1.bias', F'encoder.layers.{i}.fc1.bias')) rename_keys.append((F'transformer.encoder.layers.{i}.linear2.weight', F'encoder.layers.{i}.fc2.weight')) rename_keys.append((F'transformer.encoder.layers.{i}.linear2.bias', F'encoder.layers.{i}.fc2.bias')) rename_keys.append( (F'transformer.encoder.layers.{i}.norm1.weight', F'encoder.layers.{i}.self_attn_layer_norm.weight') ) rename_keys.append((F'transformer.encoder.layers.{i}.norm1.bias', F'encoder.layers.{i}.self_attn_layer_norm.bias')) rename_keys.append((F'transformer.encoder.layers.{i}.norm2.weight', F'encoder.layers.{i}.final_layer_norm.weight')) rename_keys.append((F'transformer.encoder.layers.{i}.norm2.bias', F'encoder.layers.{i}.final_layer_norm.bias')) # decoder layers: 2 times output projection, 2 feedforward neural networks and 3 layernorms rename_keys.append( (F'transformer.decoder.layers.{i}.self_attn.out_proj.weight', F'decoder.layers.{i}.self_attn.out_proj.weight') ) rename_keys.append( (F'transformer.decoder.layers.{i}.self_attn.out_proj.bias', F'decoder.layers.{i}.self_attn.out_proj.bias') ) rename_keys.append( ( F'transformer.decoder.layers.{i}.multihead_attn.out_proj.weight', F'decoder.layers.{i}.encoder_attn.out_proj.weight', ) ) rename_keys.append( ( F'transformer.decoder.layers.{i}.multihead_attn.out_proj.bias', F'decoder.layers.{i}.encoder_attn.out_proj.bias', ) ) rename_keys.append((F'transformer.decoder.layers.{i}.linear1.weight', F'decoder.layers.{i}.fc1.weight')) rename_keys.append((F'transformer.decoder.layers.{i}.linear1.bias', F'decoder.layers.{i}.fc1.bias')) rename_keys.append((F'transformer.decoder.layers.{i}.linear2.weight', F'decoder.layers.{i}.fc2.weight')) rename_keys.append((F'transformer.decoder.layers.{i}.linear2.bias', F'decoder.layers.{i}.fc2.bias')) rename_keys.append( (F'transformer.decoder.layers.{i}.norm1.weight', F'decoder.layers.{i}.self_attn_layer_norm.weight') ) rename_keys.append((F'transformer.decoder.layers.{i}.norm1.bias', F'decoder.layers.{i}.self_attn_layer_norm.bias')) rename_keys.append( (F'transformer.decoder.layers.{i}.norm2.weight', F'decoder.layers.{i}.encoder_attn_layer_norm.weight') ) rename_keys.append( (F'transformer.decoder.layers.{i}.norm2.bias', F'decoder.layers.{i}.encoder_attn_layer_norm.bias') ) rename_keys.append((F'transformer.decoder.layers.{i}.norm3.weight', F'decoder.layers.{i}.final_layer_norm.weight')) rename_keys.append((F'transformer.decoder.layers.{i}.norm3.bias', F'decoder.layers.{i}.final_layer_norm.bias')) # convolutional projection + query embeddings + layernorm of encoder + layernorm of decoder + class and bounding box heads rename_keys.extend( [ ("""input_proj.weight""", """input_projection.weight"""), ("""input_proj.bias""", """input_projection.bias"""), ("""query_embed.weight""", """query_position_embeddings.weight"""), ("""transformer.encoder.norm.weight""", """encoder.layernorm.weight"""), ("""transformer.encoder.norm.bias""", """encoder.layernorm.bias"""), ("""transformer.decoder.norm.weight""", """decoder.layernorm.weight"""), ("""transformer.decoder.norm.bias""", """decoder.layernorm.bias"""), ("""class_embed.weight""", """class_labels_classifier.weight"""), ("""class_embed.bias""", """class_labels_classifier.bias"""), ("""bbox_embed.layers.0.weight""", """bbox_predictor.layers.0.weight"""), ("""bbox_embed.layers.0.bias""", """bbox_predictor.layers.0.bias"""), ("""bbox_embed.layers.1.weight""", """bbox_predictor.layers.1.weight"""), ("""bbox_embed.layers.1.bias""", """bbox_predictor.layers.1.bias"""), ("""bbox_embed.layers.2.weight""", """bbox_predictor.layers.2.weight"""), ("""bbox_embed.layers.2.bias""", """bbox_predictor.layers.2.bias"""), ] ) def snake_case__ ( lowerCamelCase__ : List[Any] , lowerCamelCase__ : List[Any] , lowerCamelCase__ : List[Any] ) -> Optional[Any]: A_ : Tuple = state_dict.pop(lowerCamelCase__ ) A_ : Optional[Any] = val def snake_case__ ( lowerCamelCase__ : Dict ) -> Any: A_ : int = OrderedDict() for key, value in state_dict.items(): if "backbone.0.body" in key: A_ : int = key.replace('''backbone.0.body''' , '''backbone.conv_encoder.model''' ) A_ : List[str] = value else: A_ : Optional[int] = value return new_state_dict def snake_case__ ( lowerCamelCase__ : Union[str, Any] ) -> Optional[Any]: A_ : Any = '''''' # first: transformer encoder for i in range(6 ): # read in weights + bias of input projection layer (in PyTorch's MultiHeadAttention, this is a single matrix + bias) A_ : Tuple = state_dict.pop(f'{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_weight' ) A_ : Dict = state_dict.pop(f'{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_bias' ) # next, add query, keys and values (in that order) to the state dict A_ : str = in_proj_weight[:2_5_6, :] A_ : Optional[Any] = in_proj_bias[:2_5_6] A_ : Dict = in_proj_weight[2_5_6:5_1_2, :] A_ : Tuple = in_proj_bias[2_5_6:5_1_2] A_ : Tuple = in_proj_weight[-2_5_6:, :] A_ : Optional[int] = in_proj_bias[-2_5_6:] # next: transformer decoder (which is a bit more complex because it also includes cross-attention) for i in range(6 ): # read in weights + bias of input projection layer of self-attention A_ : Union[str, Any] = state_dict.pop(f'{prefix}transformer.decoder.layers.{i}.self_attn.in_proj_weight' ) A_ : Dict = state_dict.pop(f'{prefix}transformer.decoder.layers.{i}.self_attn.in_proj_bias' ) # next, add query, keys and values (in that order) to the state dict A_ : List[str] = in_proj_weight[:2_5_6, :] A_ : int = in_proj_bias[:2_5_6] A_ : Any = in_proj_weight[2_5_6:5_1_2, :] A_ : List[str] = in_proj_bias[2_5_6:5_1_2] A_ : Union[str, Any] = in_proj_weight[-2_5_6:, :] A_ : Optional[Any] = in_proj_bias[-2_5_6:] # read in weights + bias of input projection layer of cross-attention A_ : Tuple = state_dict.pop( f'{prefix}transformer.decoder.layers.{i}.multihead_attn.in_proj_weight' ) A_ : Optional[Any] = state_dict.pop(f'{prefix}transformer.decoder.layers.{i}.multihead_attn.in_proj_bias' ) # next, add query, keys and values (in that order) of cross-attention to the state dict A_ : Dict = in_proj_weight_cross_attn[:2_5_6, :] A_ : Tuple = in_proj_bias_cross_attn[:2_5_6] A_ : int = in_proj_weight_cross_attn[2_5_6:5_1_2, :] A_ : List[str] = in_proj_bias_cross_attn[2_5_6:5_1_2] A_ : Any = in_proj_weight_cross_attn[-2_5_6:, :] A_ : Any = in_proj_bias_cross_attn[-2_5_6:] def snake_case__ ( lowerCamelCase__ : List[str] , lowerCamelCase__ : Tuple ) -> Dict: A_ ,A_ : int = image.size A_ : Tuple = max(lowerCamelCase__ , lowerCamelCase__ ) A_ : Optional[Any] = 8_0_0 if '''detection''' in checkpoint_url else 1_0_0_0 A_ : Union[str, Any] = target_max_size / current_max_size A_ : Any = image.resize((int(round(scale * width ) ), int(round(scale * height ) )) ) return resized_image def snake_case__ ( lowerCamelCase__ : Tuple ) -> str: A_ : Any = F.to_tensor(lowerCamelCase__ ) A_ : Optional[Any] = F.normalize(lowerCamelCase__ , mean=[0.485, 0.456, 0.406] , std=[0.229, 0.224, 0.225] ) return image @torch.no_grad() def snake_case__ ( lowerCamelCase__ : List[Any] , lowerCamelCase__ : int , lowerCamelCase__ : int ) -> str: logger.info('''Converting model...''' ) # load original state dict A_ : Tuple = torch.hub.load_state_dict_from_url(lowerCamelCase__ , map_location='''cpu''' ) # rename keys for src, dest in rename_keys: rename_key(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) A_ : str = rename_backbone_keys(lowerCamelCase__ ) # query, key and value matrices need special treatment read_in_q_k_v(lowerCamelCase__ ) # important: we need to prepend a prefix to each of the base model keys as the head models use different attributes for them A_ : List[Any] = '''model.''' for key in state_dict.copy().keys(): if not key.startswith('''class_labels_classifier''' ) and not key.startswith('''bbox_predictor''' ): A_ : List[Any] = state_dict.pop(lowerCamelCase__ ) A_ : str = val # create HuggingFace model and load state dict A_ : Union[str, Any] = TableTransformerConfig( backbone='''resnet18''' , mask_loss_coefficient=1 , dice_loss_coefficient=1 , ce_loss_coefficient=1 , bbox_loss_coefficient=5 , giou_loss_coefficient=2 , eos_coefficient=0.4 , class_cost=1 , bbox_cost=5 , giou_cost=2 , ) if "detection" in checkpoint_url: A_ : Dict = 1_5 A_ : Dict = 2 A_ : int = {0: '''table''', 1: '''table rotated'''} A_ : List[str] = idalabel A_ : Optional[int] = {v: k for k, v in idalabel.items()} else: A_ : Union[str, Any] = 1_2_5 A_ : Optional[Any] = 6 A_ : Optional[Any] = { 0: '''table''', 1: '''table column''', 2: '''table row''', 3: '''table column header''', 4: '''table projected row header''', 5: '''table spanning cell''', } A_ : int = idalabel A_ : Tuple = {v: k for k, v in idalabel.items()} A_ : Optional[Any] = DetrImageProcessor( format='''coco_detection''' , max_size=8_0_0 if '''detection''' in checkpoint_url else 1_0_0_0 ) A_ : int = TableTransformerForObjectDetection(lowerCamelCase__ ) model.load_state_dict(lowerCamelCase__ ) model.eval() # verify our conversion A_ : Optional[int] = '''example_pdf.png''' if '''detection''' in checkpoint_url else '''example_table.png''' A_ : Union[str, Any] = hf_hub_download(repo_id='''nielsr/example-pdf''' , repo_type='''dataset''' , filename=lowerCamelCase__ ) A_ : Tuple = Image.open(lowerCamelCase__ ).convert('''RGB''' ) A_ : int = normalize(resize(lowerCamelCase__ , lowerCamelCase__ ) ).unsqueeze(0 ) A_ : str = model(lowerCamelCase__ ) if "detection" in checkpoint_url: A_ : str = (1, 1_5, 3) A_ : int = torch.tensor( [[-6.7897, -16.9985, 6.7937], [-8.0186, -22.2192, 6.9677], [-7.3117, -21.0708, 7.4055]] ) A_ : Tuple = torch.tensor([[0.4867, 0.1767, 0.6732], [0.6718, 0.4479, 0.3830], [0.4716, 0.1760, 0.6364]] ) else: A_ : Optional[int] = (1, 1_2_5, 7) A_ : Dict = torch.tensor( [[-18.1430, -8.3214, 4.8274], [-18.4685, -7.1361, -4.2667], [-26.3693, -9.3429, -4.9962]] ) A_ : Any = torch.tensor([[0.4983, 0.5595, 0.9440], [0.4916, 0.6315, 0.5954], [0.6108, 0.8637, 0.1135]] ) assert outputs.logits.shape == expected_shape assert torch.allclose(outputs.logits[0, :3, :3] , lowerCamelCase__ , atol=1e-4 ) assert torch.allclose(outputs.pred_boxes[0, :3, :3] , lowerCamelCase__ , atol=1e-4 ) print('''Looks ok!''' ) if pytorch_dump_folder_path is not None: # Save model and image processor logger.info(f'Saving PyTorch model and image processor to {pytorch_dump_folder_path}...' ) Path(lowerCamelCase__ ).mkdir(exist_ok=lowerCamelCase__ ) model.save_pretrained(lowerCamelCase__ ) image_processor.save_pretrained(lowerCamelCase__ ) if push_to_hub: # Push model to HF hub logger.info('''Pushing model to the hub...''' ) A_ : List[Any] = ( '''microsoft/table-transformer-detection''' if '''detection''' in checkpoint_url else '''microsoft/table-transformer-structure-recognition''' ) model.push_to_hub(lowerCamelCase__ ) image_processor.push_to_hub(lowerCamelCase__ ) if __name__ == "__main__": snake_case__ = argparse.ArgumentParser() parser.add_argument( """--checkpoint_url""", default="""https://pubtables1m.blob.core.windows.net/model/pubtables1m_detection_detr_r18.pth""", type=str, choices=[ """https://pubtables1m.blob.core.windows.net/model/pubtables1m_detection_detr_r18.pth""", """https://pubtables1m.blob.core.windows.net/model/pubtables1m_structure_detr_r18.pth""", ], help="""URL of the Table Transformer checkpoint you'd like to convert.""", ) parser.add_argument( """--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the folder to output PyTorch model.""" ) parser.add_argument( """--push_to_hub""", action="""store_true""", help="""Whether or not to push the converted model to the 🤗 hub.""" ) snake_case__ = parser.parse_args() convert_table_transformer_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub)
4
0
'''simple docstring''' from __future__ import annotations from statistics import mean def snake_case__ ( lowerCamelCase__ : list[int] , lowerCamelCase__ : list[int] , lowerCamelCase__ : int ) -> list[int]: A_ : Any = [0] * no_of_processes A_ : Optional[int] = [0] * no_of_processes # Initialize remaining_time to waiting_time. for i in range(lowerCamelCase__ ): A_ : Optional[Any] = burst_time[i] A_ : list[int] = [] A_ : Any = 0 A_ : Optional[Any] = 0 # When processes are not completed, # A process whose arrival time has passed \ # and has remaining execution time is put into the ready_process. # The shortest process in the ready_process, target_process is executed. while completed != no_of_processes: A_ : List[Any] = [] A_ : Optional[Any] = -1 for i in range(lowerCamelCase__ ): if (arrival_time[i] <= total_time) and (remaining_time[i] > 0): ready_process.append(lowerCamelCase__ ) if len(lowerCamelCase__ ) > 0: A_ : str = ready_process[0] for i in ready_process: if remaining_time[i] < remaining_time[target_process]: A_ : Tuple = i total_time += burst_time[target_process] completed += 1 A_ : List[Any] = 0 A_ : Any = ( total_time - arrival_time[target_process] - burst_time[target_process] ) else: total_time += 1 return waiting_time def snake_case__ ( lowerCamelCase__ : list[int] , lowerCamelCase__ : int , lowerCamelCase__ : list[int] ) -> list[int]: A_ : Any = [0] * no_of_processes for i in range(lowerCamelCase__ ): A_ : Optional[Any] = burst_time[i] + waiting_time[i] return turn_around_time if __name__ == "__main__": print("""[TEST CASE 01]""") snake_case__ = 4 snake_case__ = [2, 5, 3, 7] snake_case__ = [0, 0, 0, 0] snake_case__ = calculate_waitingtime(arrival_time, burst_time, no_of_processes) snake_case__ = calculate_turnaroundtime( burst_time, no_of_processes, waiting_time ) # Printing the Result print("""PID\tBurst Time\tArrival Time\tWaiting Time\tTurnaround Time""") for i, process_id in enumerate(list(range(1, 5))): print( F'{process_id}\t{burst_time[i]}\t\t\t{arrival_time[i]}\t\t\t\t' F'{waiting_time[i]}\t\t\t\t{turn_around_time[i]}' ) print(F'\nAverage waiting time = {mean(waiting_time):.5f}') print(F'Average turnaround time = {mean(turn_around_time):.5f}')
351
'''simple docstring''' import logging import os from dataclasses import dataclass from typing import List, Optional, Union import tqdm from filelock import FileLock from transformers import ( BartTokenizer, BartTokenizerFast, DataProcessor, PreTrainedTokenizer, RobertaTokenizer, RobertaTokenizerFast, XLMRobertaTokenizer, is_tf_available, is_torch_available, ) snake_case__ = logging.getLogger(__name__) @dataclass(frozen=a__ ) class UpperCamelCase_ : """simple docstring""" _lowerCAmelCase = 42 _lowerCAmelCase = 42 _lowerCAmelCase = None _lowerCAmelCase = None _lowerCAmelCase = None @dataclass(frozen=a__ ) class UpperCamelCase_ : """simple docstring""" _lowerCAmelCase = 42 _lowerCAmelCase = None _lowerCAmelCase = None _lowerCAmelCase = None _lowerCAmelCase = None if is_torch_available(): import torch from torch.utils.data import Dataset class UpperCamelCase_ (a__ ): """simple docstring""" _lowerCAmelCase = 42 def __init__( self : Optional[int] , _lowerCamelCase : str , _lowerCamelCase : PreTrainedTokenizer , _lowerCamelCase : str , _lowerCamelCase : Optional[int] = None , _lowerCamelCase : List[Any]=False , _lowerCamelCase : bool = False , ): """simple docstring""" A_ : Optional[int] = hans_processors[task]() A_ : int = os.path.join( _lowerCamelCase , '''cached_{}_{}_{}_{}'''.format( '''dev''' if evaluate else '''train''' , tokenizer.__class__.__name__ , str(_lowerCamelCase ) , _lowerCamelCase , ) , ) A_ : Dict = processor.get_labels() if tokenizer.__class__ in ( RobertaTokenizer, RobertaTokenizerFast, XLMRobertaTokenizer, BartTokenizer, BartTokenizerFast, ): # HACK(label indices are swapped in RoBERTa pretrained model) A_ ,A_ : List[str] = label_list[2], label_list[1] A_ : Optional[int] = label_list # Make sure only the first process in distributed training processes the dataset, # and the others will use the cache. A_ : str = cached_features_file + '''.lock''' with FileLock(_lowerCamelCase ): if os.path.exists(_lowerCamelCase ) and not overwrite_cache: logger.info(f'Loading features from cached file {cached_features_file}' ) A_ : List[str] = torch.load(_lowerCamelCase ) else: logger.info(f'Creating features from dataset file at {data_dir}' ) A_ : Optional[int] = ( processor.get_dev_examples(_lowerCamelCase ) if evaluate else processor.get_train_examples(_lowerCamelCase ) ) logger.info('''Training examples: %s''' , len(_lowerCamelCase ) ) A_ : Optional[int] = hans_convert_examples_to_features(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) logger.info('''Saving features into cached file %s''' , _lowerCamelCase ) torch.save(self.features , _lowerCamelCase ) def __len__( self : List[str] ): """simple docstring""" return len(self.features ) def __getitem__( self : List[str] , _lowerCamelCase : Optional[int] ): """simple docstring""" return self.features[i] def _a ( self : str ): """simple docstring""" return self.label_list if is_tf_available(): import tensorflow as tf class UpperCamelCase_ : """simple docstring""" _lowerCAmelCase = 42 def __init__( self : Optional[int] , _lowerCamelCase : str , _lowerCamelCase : PreTrainedTokenizer , _lowerCamelCase : str , _lowerCamelCase : Optional[int] = 128 , _lowerCamelCase : Dict=False , _lowerCamelCase : bool = False , ): """simple docstring""" A_ : Optional[int] = hans_processors[task]() A_ : Optional[int] = processor.get_labels() if tokenizer.__class__ in ( RobertaTokenizer, RobertaTokenizerFast, XLMRobertaTokenizer, BartTokenizer, BartTokenizerFast, ): # HACK(label indices are swapped in RoBERTa pretrained model) A_ ,A_ : Union[str, Any] = label_list[2], label_list[1] A_ : Tuple = label_list A_ : Optional[int] = processor.get_dev_examples(_lowerCamelCase ) if evaluate else processor.get_train_examples(_lowerCamelCase ) A_ : Tuple = hans_convert_examples_to_features(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) def gen(): for ex_index, ex in tqdm.tqdm(enumerate(self.features ) , desc='''convert examples to features''' ): if ex_index % 10000 == 0: logger.info('''Writing example %d of %d''' % (ex_index, len(_lowerCamelCase )) ) yield ( { "example_id": 0, "input_ids": ex.input_ids, "attention_mask": ex.attention_mask, "token_type_ids": ex.token_type_ids, }, ex.label, ) A_ : List[Any] = tf.data.Dataset.from_generator( _lowerCamelCase , ( { '''example_id''': tf.intaa, '''input_ids''': tf.intaa, '''attention_mask''': tf.intaa, '''token_type_ids''': tf.intaa, }, tf.intaa, ) , ( { '''example_id''': tf.TensorShape([] ), '''input_ids''': tf.TensorShape([None, None] ), '''attention_mask''': tf.TensorShape([None, None] ), '''token_type_ids''': tf.TensorShape([None, None] ), }, tf.TensorShape([] ), ) , ) def _a ( self : Any ): """simple docstring""" return self.dataset def __len__( self : Dict ): """simple docstring""" return len(self.features ) def __getitem__( self : Optional[int] , _lowerCamelCase : List[str] ): """simple docstring""" return self.features[i] def _a ( self : Tuple ): """simple docstring""" return self.label_list class UpperCamelCase_ (a__ ): """simple docstring""" def _a ( self : List[str] , _lowerCamelCase : Union[str, Any] ): """simple docstring""" return self._create_examples(self._read_tsv(os.path.join(_lowerCamelCase , '''heuristics_train_set.txt''' ) ) , '''train''' ) def _a ( self : List[str] , _lowerCamelCase : Tuple ): """simple docstring""" return self._create_examples(self._read_tsv(os.path.join(_lowerCamelCase , '''heuristics_evaluation_set.txt''' ) ) , '''dev''' ) def _a ( self : Any ): """simple docstring""" return ["contradiction", "entailment", "neutral"] def _a ( self : Optional[Any] , _lowerCamelCase : Optional[Any] , _lowerCamelCase : Any ): """simple docstring""" A_ : Tuple = [] for i, line in enumerate(_lowerCamelCase ): if i == 0: continue A_ : str = '''%s-%s''' % (set_type, line[0]) A_ : Optional[Any] = line[5] A_ : Union[str, Any] = line[6] A_ : List[str] = line[7][2:] if line[7].startswith('''ex''' ) else line[7] A_ : str = line[0] examples.append(InputExample(guid=_lowerCamelCase , text_a=_lowerCamelCase , text_b=_lowerCamelCase , label=_lowerCamelCase , pairID=_lowerCamelCase ) ) return examples def snake_case__ ( lowerCamelCase__ : List[InputExample] , lowerCamelCase__ : List[str] , lowerCamelCase__ : int , lowerCamelCase__ : PreTrainedTokenizer , ) -> int: A_ : Union[str, Any] = {label: i for i, label in enumerate(lowerCamelCase__ )} A_ : Optional[Any] = [] for ex_index, example in tqdm.tqdm(enumerate(lowerCamelCase__ ) , desc='''convert examples to features''' ): if ex_index % 1_0_0_0_0 == 0: logger.info('''Writing example %d''' % (ex_index) ) A_ : Optional[int] = tokenizer( example.text_a , example.text_b , add_special_tokens=lowerCamelCase__ , max_length=lowerCamelCase__ , padding='''max_length''' , truncation=lowerCamelCase__ , return_overflowing_tokens=lowerCamelCase__ , ) A_ : List[str] = label_map[example.label] if example.label in label_map else 0 A_ : Tuple = int(example.pairID ) features.append(InputFeatures(**lowerCamelCase__ , label=lowerCamelCase__ , pairID=lowerCamelCase__ ) ) for i, example in enumerate(examples[:5] ): logger.info('''*** Example ***''' ) logger.info(f'guid: {example}' ) logger.info(f'features: {features[i]}' ) return features snake_case__ = { """hans""": 3, } snake_case__ = { """hans""": HansProcessor, }
4
0
'''simple docstring''' import pickle import unittest import torch from accelerate import Accelerator from accelerate.state import AcceleratorState from accelerate.test_utils import require_cpu @require_cpu class UpperCamelCase_ (unittest.TestCase ): """simple docstring""" def _a ( self : int ): """simple docstring""" A_ : List[str] = torch.nn.Linear(10 , 10 ) A_ : List[Any] = torch.optim.SGD(model.parameters() , 0.1 ) A_ : List[Any] = Accelerator() A_ : int = accelerator.prepare(_lowerCamelCase ) try: pickle.loads(pickle.dumps(_lowerCamelCase ) ) except Exception as e: self.fail(f'Accelerated optimizer pickling failed with {e}' ) AcceleratorState._reset_state()
352
'''simple docstring''' import io import itertools import json from dataclasses import dataclass from typing import Optional import pyarrow as pa import pyarrow.json as paj import datasets from datasets.table import table_cast from datasets.utils.file_utils import readline snake_case__ = datasets.utils.logging.get_logger(__name__) @dataclass class UpperCamelCase_ (datasets.BuilderConfig ): """simple docstring""" _lowerCAmelCase = None _lowerCAmelCase = "utf-8" _lowerCAmelCase = None _lowerCAmelCase = None _lowerCAmelCase = True # deprecated _lowerCAmelCase = None # deprecated _lowerCAmelCase = 1_0 << 2_0 # 10MB _lowerCAmelCase = None class UpperCamelCase_ (datasets.ArrowBasedBuilder ): """simple docstring""" _lowerCAmelCase = JsonConfig def _a ( self : int ): """simple docstring""" if self.config.block_size is not None: logger.warning('''The JSON loader parameter `block_size` is deprecated. Please use `chunksize` instead''' ) A_ : List[Any] = self.config.block_size if self.config.use_threads is not True: logger.warning( '''The JSON loader parameter `use_threads` is deprecated and doesn\'t have any effect anymore.''' ) if self.config.newlines_in_values is not None: raise ValueError('''The JSON loader parameter `newlines_in_values` is no longer supported''' ) return datasets.DatasetInfo(features=self.config.features ) def _a ( self : Any , _lowerCamelCase : List[str] ): """simple docstring""" if not self.config.data_files: raise ValueError(f'At least one data file must be specified, but got data_files={self.config.data_files}' ) A_ : int = dl_manager.download_and_extract(self.config.data_files ) if isinstance(_lowerCamelCase , (str, list, tuple) ): A_ : Union[str, Any] = data_files if isinstance(_lowerCamelCase , _lowerCamelCase ): A_ : List[str] = [files] A_ : List[Any] = [dl_manager.iter_files(_lowerCamelCase ) for file in files] return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={'''files''': files} )] A_ : Tuple = [] for split_name, files in data_files.items(): if isinstance(_lowerCamelCase , _lowerCamelCase ): A_ : int = [files] A_ : Union[str, Any] = [dl_manager.iter_files(_lowerCamelCase ) for file in files] splits.append(datasets.SplitGenerator(name=_lowerCamelCase , gen_kwargs={'''files''': files} ) ) return splits def _a ( self : int , _lowerCamelCase : pa.Table ): """simple docstring""" if self.config.features is not None: # adding missing columns for column_name in set(self.config.features ) - set(pa_table.column_names ): A_ : Optional[int] = self.config.features.arrow_schema.field(_lowerCamelCase ).type A_ : Optional[int] = pa_table.append_column(_lowerCamelCase , pa.array([None] * len(_lowerCamelCase ) , type=_lowerCamelCase ) ) # more expensive cast to support nested structures with keys in a different order # allows str <-> int/float or str to Audio for example A_ : str = table_cast(_lowerCamelCase , self.config.features.arrow_schema ) return pa_table def _a ( self : List[str] , _lowerCamelCase : int ): """simple docstring""" for file_idx, file in enumerate(itertools.chain.from_iterable(_lowerCamelCase ) ): # If the file is one json object and if we need to look at the list of items in one specific field if self.config.field is not None: with open(_lowerCamelCase , encoding=self.config.encoding , errors=self.config.encoding_errors ) as f: A_ : int = json.load(_lowerCamelCase ) # We keep only the field we are interested in A_ : List[str] = dataset[self.config.field] # We accept two format: a list of dicts or a dict of lists if isinstance(_lowerCamelCase , (list, tuple) ): A_ : int = set().union(*[row.keys() for row in dataset] ) A_ : List[str] = {col: [row.get(_lowerCamelCase ) for row in dataset] for col in keys} else: A_ : Tuple = dataset A_ : Dict = pa.Table.from_pydict(_lowerCamelCase ) yield file_idx, self._cast_table(_lowerCamelCase ) # If the file has one json object per line else: with open(_lowerCamelCase , '''rb''' ) as f: A_ : int = 0 # Use block_size equal to the chunk size divided by 32 to leverage multithreading # Set a default minimum value of 16kB if the chunk size is really small A_ : int = max(self.config.chunksize // 32 , 16 << 10 ) A_ : int = ( self.config.encoding_errors if self.config.encoding_errors is not None else '''strict''' ) while True: A_ : Any = f.read(self.config.chunksize ) if not batch: break # Finish current line try: batch += f.readline() except (AttributeError, io.UnsupportedOperation): batch += readline(_lowerCamelCase ) # PyArrow only accepts utf-8 encoded bytes if self.config.encoding != "utf-8": A_ : Optional[Any] = batch.decode(self.config.encoding , errors=_lowerCamelCase ).encode('''utf-8''' ) try: while True: try: A_ : List[Any] = paj.read_json( io.BytesIO(_lowerCamelCase ) , read_options=paj.ReadOptions(block_size=_lowerCamelCase ) ) break except (pa.ArrowInvalid, pa.ArrowNotImplementedError) as e: if ( isinstance(_lowerCamelCase , pa.ArrowInvalid ) and "straddling" not in str(_lowerCamelCase ) or block_size > len(_lowerCamelCase ) ): raise else: # Increase the block size in case it was too small. # The block size will be reset for the next file. logger.debug( f'Batch of {len(_lowerCamelCase )} bytes couldn\'t be parsed with block_size={block_size}. Retrying with block_size={block_size * 2}.' ) block_size *= 2 except pa.ArrowInvalid as e: try: with open( _lowerCamelCase , encoding=self.config.encoding , errors=self.config.encoding_errors ) as f: A_ : Optional[Any] = json.load(_lowerCamelCase ) except json.JSONDecodeError: logger.error(f'Failed to read file \'{file}\' with error {type(_lowerCamelCase )}: {e}' ) raise e # If possible, parse the file as a list of json objects and exit the loop if isinstance(_lowerCamelCase , _lowerCamelCase ): # list is the only sequence type supported in JSON try: A_ : Optional[int] = set().union(*[row.keys() for row in dataset] ) A_ : Tuple = {col: [row.get(_lowerCamelCase ) for row in dataset] for col in keys} A_ : int = pa.Table.from_pydict(_lowerCamelCase ) except (pa.ArrowInvalid, AttributeError) as e: logger.error(f'Failed to read file \'{file}\' with error {type(_lowerCamelCase )}: {e}' ) raise ValueError(f'Not able to read records in the JSON file at {file}.' ) from None yield file_idx, self._cast_table(_lowerCamelCase ) break else: logger.error(f'Failed to read file \'{file}\' with error {type(_lowerCamelCase )}: {e}' ) raise ValueError( f'Not able to read records in the JSON file at {file}. ' f'You should probably indicate the field of the JSON file containing your records. ' f'This JSON file contain the following fields: {str(list(dataset.keys() ) )}. ' f'Select the correct one and provide it as `field=\'XXX\'` to the dataset loading method. ' ) from None # Uncomment for debugging (will print the Arrow table size and elements) # logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}") # logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows))) yield (file_idx, batch_idx), self._cast_table(_lowerCamelCase ) batch_idx += 1
4
0
from ...configuration_utils import PretrainedConfig from ...utils import logging snake_case__ = logging.get_logger(__name__) snake_case__ = { """RWKV/rwkv-4-169m-pile""": """https://huggingface.co/RWKV/rwkv-4-169m-pile/resolve/main/config.json""", """RWKV/rwkv-4-430m-pile""": """https://huggingface.co/RWKV/rwkv-4-430m-pile/resolve/main/config.json""", """RWKV/rwkv-4-1b5-pile""": """https://huggingface.co/RWKV/rwkv-4-1b5-pile/resolve/main/config.json""", """RWKV/rwkv-4-3b-pile""": """https://huggingface.co/RWKV/rwkv-4-3b-pile/resolve/main/config.json""", """RWKV/rwkv-4-7b-pile""": """https://huggingface.co/RWKV/rwkv-4-7b-pile/resolve/main/config.json""", """RWKV/rwkv-4-14b-pile""": """https://huggingface.co/RWKV/rwkv-4-14b-pile/resolve/main/config.json""", """RWKV/rwkv-raven-1b5""": """https://huggingface.co/RWKV/rwkv-raven-1b5/resolve/main/config.json""", """RWKV/rwkv-raven-3b""": """https://huggingface.co/RWKV/rwkv-raven-3b/resolve/main/config.json""", """RWKV/rwkv-raven-7b""": """https://huggingface.co/RWKV/rwkv-raven-7b/resolve/main/config.json""", """RWKV/rwkv-raven-14b""": """https://huggingface.co/RWKV/rwkv-raven-14b/resolve/main/config.json""", } class UpperCamelCase_ (a__ ): """simple docstring""" _lowerCAmelCase = 'rwkv' _lowerCAmelCase = {'max_position_embeddings': 'context_length'} def __init__( self : Tuple , _lowerCamelCase : str=50277 , _lowerCamelCase : Tuple=1024 , _lowerCamelCase : Optional[int]=4096 , _lowerCamelCase : Tuple=32 , _lowerCamelCase : List[str]=None , _lowerCamelCase : str=None , _lowerCamelCase : Tuple=1E-5 , _lowerCamelCase : Optional[Any]=0 , _lowerCamelCase : Dict=0 , _lowerCamelCase : Dict=6 , _lowerCamelCase : List[Any]=False , _lowerCamelCase : List[Any]=True , **_lowerCamelCase : List[str] , ): """simple docstring""" A_ : Optional[int] = vocab_size A_ : List[Any] = context_length A_ : Optional[int] = hidden_size A_ : Union[str, Any] = num_hidden_layers A_ : Optional[int] = attention_hidden_size if attention_hidden_size is not None else hidden_size A_ : Union[str, Any] = intermediate_size if intermediate_size is not None else 4 * hidden_size A_ : str = layer_norm_epsilon A_ : Dict = rescale_every A_ : Union[str, Any] = use_cache A_ : Dict = bos_token_id A_ : Dict = eos_token_id super().__init__( tie_word_embeddings=_lowerCamelCase , bos_token_id=_lowerCamelCase , eos_token_id=_lowerCamelCase , **_lowerCamelCase )
353
'''simple docstring''' from collections import OrderedDict from typing import Mapping from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices snake_case__ = logging.get_logger(__name__) snake_case__ = { """microsoft/swin-tiny-patch4-window7-224""": ( """https://huggingface.co/microsoft/swin-tiny-patch4-window7-224/resolve/main/config.json""" ), # See all Swin models at https://huggingface.co/models?filter=swin } class UpperCamelCase_ (a__, a__ ): """simple docstring""" _lowerCAmelCase = 'swin' _lowerCAmelCase = { 'num_attention_heads': 'num_heads', 'num_hidden_layers': 'num_layers', } def __init__( self : Any , _lowerCamelCase : Optional[Any]=224 , _lowerCamelCase : List[str]=4 , _lowerCamelCase : Optional[Any]=3 , _lowerCamelCase : Tuple=96 , _lowerCamelCase : List[Any]=[2, 2, 6, 2] , _lowerCamelCase : List[str]=[3, 6, 12, 24] , _lowerCamelCase : List[Any]=7 , _lowerCamelCase : Optional[int]=4.0 , _lowerCamelCase : List[str]=True , _lowerCamelCase : List[str]=0.0 , _lowerCamelCase : Any=0.0 , _lowerCamelCase : Dict=0.1 , _lowerCamelCase : List[str]="gelu" , _lowerCamelCase : Tuple=False , _lowerCamelCase : Dict=0.02 , _lowerCamelCase : Optional[Any]=1E-5 , _lowerCamelCase : Any=32 , _lowerCamelCase : Tuple=None , _lowerCamelCase : Any=None , **_lowerCamelCase : str , ): """simple docstring""" super().__init__(**_lowerCamelCase ) A_ : Optional[int] = image_size A_ : Optional[int] = patch_size A_ : Optional[int] = num_channels A_ : Any = embed_dim A_ : List[Any] = depths A_ : Any = len(_lowerCamelCase ) A_ : List[Any] = num_heads A_ : Tuple = window_size A_ : Tuple = mlp_ratio A_ : Dict = qkv_bias A_ : List[str] = hidden_dropout_prob A_ : List[str] = attention_probs_dropout_prob A_ : Any = drop_path_rate A_ : List[Any] = hidden_act A_ : Tuple = use_absolute_embeddings A_ : int = layer_norm_eps A_ : Optional[Any] = initializer_range A_ : Union[str, Any] = encoder_stride # we set the hidden_size attribute in order to make Swin work with VisionEncoderDecoderModel # this indicates the channel dimension after the last stage of the model A_ : str = int(embed_dim * 2 ** (len(_lowerCamelCase ) - 1) ) A_ : str = ['''stem'''] + [f'stage{idx}' for idx in range(1 , len(_lowerCamelCase ) + 1 )] A_ ,A_ : Optional[Any] = get_aligned_output_features_output_indices( out_features=_lowerCamelCase , out_indices=_lowerCamelCase , stage_names=self.stage_names ) class UpperCamelCase_ (a__ ): """simple docstring""" _lowerCAmelCase = version.parse('1.11' ) @property def _a ( self : str ): """simple docstring""" return OrderedDict( [ ('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}), ] ) @property def _a ( self : Union[str, Any] ): """simple docstring""" return 1E-4
4
0
'''simple docstring''' import logging import os import quant_trainer import torch from torch.utils.data import DataLoader from transformers import Trainer, is_torch_tpu_available from transformers.trainer_utils import PredictionOutput snake_case__ = logging.getLogger(__name__) if is_torch_tpu_available(check_device=False): import torch_xla.core.xla_model as xm import torch_xla.debug.metrics as met class UpperCamelCase_ (a__ ): """simple docstring""" def __init__( self : List[Any] , *_lowerCamelCase : int , _lowerCamelCase : Tuple=None , _lowerCamelCase : List[str]=None , _lowerCamelCase : Dict=None , **_lowerCamelCase : int ): """simple docstring""" super().__init__(*_lowerCamelCase , **_lowerCamelCase ) A_ : Optional[int] = eval_examples A_ : Any = post_process_function A_ : int = quant_trainer_args A_ : str = 128 # default number of calibration samples def _a ( self : int , _lowerCamelCase : str=None ): """simple docstring""" if calib_dataset is None and self.calib_dataset is None: raise ValueError('''Trainer: calibration requires an calib_dataset.''' ) A_ : List[str] = calib_dataset if calib_dataset is not None else self.calib_dataset A_ : Optional[int] = self._remove_unused_columns(_lowerCamelCase , description='''Calibration''' ) return DataLoader( _lowerCamelCase , batch_size=self.args.eval_batch_size , collate_fn=self.data_collator , drop_last=self.args.dataloader_drop_last , num_workers=self.args.dataloader_num_workers , pin_memory=self.args.dataloader_pin_memory , shuffle=_lowerCamelCase , ) def _a ( self : List[Any] , _lowerCamelCase : Union[str, Any]=None ): """simple docstring""" A_ : Dict = self.train_dataset if calib_dataset is None else calib_dataset A_ : Optional[Any] = self.get_calib_dataloader(_lowerCamelCase ) A_ : str = self.model quant_trainer.configure_model(_lowerCamelCase , self.quant_trainer_args , calib=_lowerCamelCase ) model.eval() quant_trainer.enable_calibration(_lowerCamelCase ) logger.info('''***** Running calibration *****''' ) logger.info(f' Num examples = {self.calib_num}' ) logger.info(f' Batch size = {calib_dataloader.batch_size}' ) for step, inputs in enumerate(_lowerCamelCase ): # Prediction step A_ : Union[str, Any] = self.prediction_step(_lowerCamelCase , _lowerCamelCase , prediction_loss_only=_lowerCamelCase ) if (step + 1) * calib_dataloader.batch_size >= self.calib_num: break quant_trainer.finish_calibration(_lowerCamelCase , self.quant_trainer_args ) A_ : int = model def _a ( self : Any , _lowerCamelCase : List[Any]=None , _lowerCamelCase : Tuple=None , _lowerCamelCase : Union[str, Any]=None , _lowerCamelCase : str = "eval" ): """simple docstring""" A_ : List[Any] = self.eval_dataset if eval_dataset is None else eval_dataset A_ : Optional[int] = self.get_eval_dataloader(_lowerCamelCase ) A_ : List[Any] = self.eval_examples if eval_examples is None else eval_examples # Temporarily disable metric computation, we will do it in the loop here. A_ : Tuple = self.compute_metrics A_ : Optional[Any] = None A_ : str = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop try: A_ : List[Any] = eval_loop( _lowerCamelCase , description='''Evaluation''' , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=_lowerCamelCase , ) finally: A_ : List[str] = compute_metrics if self.post_process_function is not None and self.compute_metrics is not None: A_ : Dict = self.post_process_function(_lowerCamelCase , _lowerCamelCase , output.predictions ) A_ : Tuple = self.compute_metrics(_lowerCamelCase ) # Prefix all keys with metric_key_prefix + '_' for key in list(metrics.keys() ): if not key.startswith(f'{metric_key_prefix}_' ): A_ : str = metrics.pop(_lowerCamelCase ) self.log(_lowerCamelCase ) else: A_ : Tuple = {} if self.args.tpu_metrics_debug or self.args.debug: # tpu-comment: Logging debug metrics for PyTorch/XLA (compile, execute times, ops, etc.) xm.master_print(met.metrics_report() ) A_ : Optional[Any] = self.callback_handler.on_evaluate(self.args , self.state , self.control , _lowerCamelCase ) return metrics def _a ( self : List[Any] , _lowerCamelCase : int , _lowerCamelCase : Dict , _lowerCamelCase : Optional[int]=None , _lowerCamelCase : str = "test" ): """simple docstring""" A_ : int = self.get_test_dataloader(_lowerCamelCase ) # Temporarily disable metric computation, we will do it in the loop here. A_ : Optional[int] = self.compute_metrics A_ : int = None A_ : Union[str, Any] = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop try: A_ : Any = eval_loop( _lowerCamelCase , description='''Prediction''' , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=_lowerCamelCase , ) finally: A_ : Optional[Any] = compute_metrics if self.post_process_function is None or self.compute_metrics is None: return output A_ : List[Any] = self.post_process_function(_lowerCamelCase , _lowerCamelCase , output.predictions , '''predict''' ) A_ : List[str] = self.compute_metrics(_lowerCamelCase ) # Prefix all keys with metric_key_prefix + '_' for key in list(metrics.keys() ): if not key.startswith(f'{metric_key_prefix}_' ): A_ : Any = metrics.pop(_lowerCamelCase ) return PredictionOutput(predictions=predictions.predictions , label_ids=predictions.label_ids , metrics=_lowerCamelCase ) def _a ( self : List[Any] , _lowerCamelCase : Optional[int]="./" ): """simple docstring""" A_ : Optional[int] = self.eval_dataset A_ : Any = self.get_eval_dataloader(_lowerCamelCase ) A_ : Any = next(iter(_lowerCamelCase ) ) # saving device - to make it consistent A_ : Optional[int] = torch.device('''cuda''' if torch.cuda.is_available() else '''cpu''' ) # convert to tuple A_ : List[Any] = tuple(v.to(_lowerCamelCase ) for k, v in batch.items() ) logger.info('''Converting model to be onnx compatible''' ) from pytorch_quantization.nn import TensorQuantizer A_ : int = True A_ : List[Any] = self.model.to(_lowerCamelCase ) model.eval() model.float() A_ : Dict = model.module if hasattr(_lowerCamelCase , '''module''' ) else model quant_trainer.configure_model(_lowerCamelCase , self.quant_trainer_args ) A_ : Any = os.path.join(_lowerCamelCase , '''model.onnx''' ) logger.info(f'exporting model to {output_model_file}' ) A_ : str = {0: '''batch_size''', 1: '''seq_len'''} torch.onnx.export( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , export_params=_lowerCamelCase , opset_version=13 , do_constant_folding=_lowerCamelCase , input_names=['''input_ids''', '''attention_mask''', '''token_type_ids'''] , output_names=['''output_start_logits''', '''output_end_logits'''] , dynamic_axes={ '''input_ids''': axes, '''attention_mask''': axes, '''token_type_ids''': axes, '''output_start_logits''': axes, '''output_end_logits''': axes, } , verbose=_lowerCamelCase , ) logger.info('''onnx export finished''' )
354
'''simple docstring''' from __future__ import annotations def snake_case__ ( lowerCamelCase__ : list[int] , lowerCamelCase__ : int ) -> list[int]: A_ : int = 0 A_ : str = len(lowerCamelCase__ ) - 1 while i < j: if nums[i] + nums[j] == target: return [i, j] elif nums[i] + nums[j] < target: A_ : Tuple = i + 1 else: A_ : List[str] = j - 1 return [] if __name__ == "__main__": import doctest doctest.testmod() print(F'{two_pointer([2, 7, 11, 15], 9) = }')
4
0
'''simple docstring''' from __future__ import annotations import unittest from transformers import LEDConfig, is_tf_available from transformers.testing_utils import require_tf, slow from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import TFLEDForConditionalGeneration, TFLEDModel @require_tf class UpperCamelCase_ : """simple docstring""" _lowerCAmelCase = LEDConfig _lowerCAmelCase = {} _lowerCAmelCase = 'gelu' def __init__( self : List[Any] , _lowerCamelCase : Optional[Any] , _lowerCamelCase : List[Any]=13 , _lowerCamelCase : Dict=7 , _lowerCamelCase : Dict=True , _lowerCamelCase : int=False , _lowerCamelCase : Optional[Any]=99 , _lowerCamelCase : List[Any]=32 , _lowerCamelCase : List[Any]=2 , _lowerCamelCase : Optional[int]=4 , _lowerCamelCase : Optional[int]=37 , _lowerCamelCase : str=0.1 , _lowerCamelCase : Union[str, Any]=0.1 , _lowerCamelCase : List[str]=20 , _lowerCamelCase : int=2 , _lowerCamelCase : Optional[Any]=1 , _lowerCamelCase : Tuple=0 , _lowerCamelCase : Any=4 , ): """simple docstring""" A_ : List[Any] = parent A_ : Tuple = batch_size A_ : List[Any] = seq_length A_ : List[Any] = is_training A_ : str = use_labels A_ : List[str] = vocab_size A_ : Union[str, Any] = hidden_size A_ : List[str] = num_hidden_layers A_ : List[Any] = num_attention_heads A_ : Optional[Any] = intermediate_size A_ : str = hidden_dropout_prob A_ : Any = attention_probs_dropout_prob A_ : Optional[Any] = max_position_embeddings A_ : List[Any] = eos_token_id A_ : str = pad_token_id A_ : Tuple = bos_token_id A_ : List[Any] = attention_window # `ModelTesterMixin.test_attention_outputs` is expecting attention tensors to be of size # [num_attention_heads, encoder_seq_length, encoder_key_length], but TFLongformerSelfAttention # returns attention of shape [num_attention_heads, encoder_seq_length, self.attention_window + 1] # because its local attention only attends to `self.attention_window` and one before and one after A_ : Optional[Any] = self.attention_window + 2 # because of padding `encoder_seq_length`, is different from `seq_length`. Relevant for # the `test_attention_outputs` and `test_hidden_states_output` tests A_ : Optional[int] = ( self.seq_length + (self.attention_window - self.seq_length % self.attention_window) % self.attention_window ) def _a ( self : Optional[Any] ): """simple docstring""" A_ : List[str] = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size ) A_ : Any = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 ) A_ : Union[str, Any] = tf.concat([input_ids, eos_tensor] , axis=1 ) A_ : int = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) A_ : List[Any] = self.config_cls( vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , attention_window=self.attention_window , **self.config_updates , ) A_ : Dict = prepare_led_inputs_dict(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) A_ : Optional[Any] = tf.concat( [tf.zeros_like(_lowerCamelCase )[:, :-1], tf.ones_like(_lowerCamelCase )[:, -1:]] , axis=-1 , ) A_ : Optional[int] = global_attention_mask return config, inputs_dict def _a ( self : Optional[Any] , _lowerCamelCase : str , _lowerCamelCase : Optional[int] ): """simple docstring""" A_ : Optional[int] = TFLEDModel(config=_lowerCamelCase ).get_decoder() A_ : str = inputs_dict['''input_ids'''] A_ : Optional[Any] = input_ids[:1, :] A_ : Optional[Any] = inputs_dict['''attention_mask'''][:1, :] A_ : List[Any] = 1 # first forward pass A_ : Optional[int] = model(_lowerCamelCase , attention_mask=_lowerCamelCase , use_cache=_lowerCamelCase ) A_ : List[str] = outputs.to_tuple() # create hypothetical next token and extent to next_input_ids A_ : List[Any] = ids_tensor((self.batch_size, 3) , config.vocab_size ) A_ : Any = tf.cast(ids_tensor((self.batch_size, 3) , 2 ) , tf.inta ) # append to next input_ids and A_ : List[Any] = tf.concat([input_ids, next_tokens] , axis=-1 ) A_ : Tuple = tf.concat([attention_mask, next_attn_mask] , axis=-1 ) A_ : Optional[int] = model(_lowerCamelCase , attention_mask=_lowerCamelCase )[0] A_ : Dict = model(_lowerCamelCase , attention_mask=_lowerCamelCase , past_key_values=_lowerCamelCase )[0] self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1] ) # select random slice A_ : List[str] = int(ids_tensor((1,) , output_from_past.shape[-1] ) ) A_ : List[str] = output_from_no_past[:, -3:, random_slice_idx] A_ : List[str] = output_from_past[:, :, random_slice_idx] # test that outputs are equal for slice tf.debugging.assert_near(_lowerCamelCase , _lowerCamelCase , rtol=1E-3 ) def snake_case__ ( lowerCamelCase__ : Optional[Any] , lowerCamelCase__ : Dict , lowerCamelCase__ : Tuple , lowerCamelCase__ : int=None , lowerCamelCase__ : int=None , lowerCamelCase__ : List[Any]=None , lowerCamelCase__ : Dict=None , ) -> Dict: if attention_mask is None: A_ : List[str] = tf.cast(tf.math.not_equal(lowerCamelCase__ , config.pad_token_id ) , tf.inta ) if decoder_attention_mask is None: A_ : List[str] = tf.concat( [ tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta ), tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ) , tf.inta ), ] , axis=-1 , ) if head_mask is None: A_ : Union[str, Any] = tf.ones((config.encoder_layers, config.encoder_attention_heads) ) if decoder_head_mask is None: A_ : List[str] = tf.ones((config.decoder_layers, config.decoder_attention_heads) ) return { "input_ids": input_ids, "attention_mask": attention_mask, "decoder_input_ids": decoder_input_ids, "decoder_attention_mask": decoder_attention_mask, "head_mask": head_mask, "decoder_head_mask": decoder_head_mask, } @require_tf class UpperCamelCase_ (a__, a__, unittest.TestCase ): """simple docstring""" _lowerCAmelCase = (TFLEDForConditionalGeneration, TFLEDModel) if is_tf_available() else () _lowerCAmelCase = (TFLEDForConditionalGeneration,) if is_tf_available() else () _lowerCAmelCase = ( { 'conversational': TFLEDForConditionalGeneration, 'feature-extraction': TFLEDModel, 'summarization': TFLEDForConditionalGeneration, 'text2text-generation': TFLEDForConditionalGeneration, 'translation': TFLEDForConditionalGeneration, } if is_tf_available() else {} ) _lowerCAmelCase = True _lowerCAmelCase = False _lowerCAmelCase = False _lowerCAmelCase = False def _a ( self : List[Any] ): """simple docstring""" A_ : Tuple = TFLEDModelTester(self ) A_ : Tuple = ConfigTester(self , config_class=_lowerCamelCase ) def _a ( self : Union[str, Any] ): """simple docstring""" self.config_tester.run_common_tests() def _a ( self : Any ): """simple docstring""" A_ : List[str] = self.model_tester.prepare_config_and_inputs_for_common() self.model_tester.check_decoder_model_past_large_inputs(*_lowerCamelCase ) def _a ( self : int ): """simple docstring""" A_ : List[str] = self.model_tester.prepare_config_and_inputs_for_common() A_ : str = tf.zeros_like(inputs_dict['''attention_mask'''] ) A_ : str = 2 A_ : Dict = tf.where( tf.range(self.model_tester.seq_length )[None, :] < num_global_attn_indices , 1 , inputs_dict['''global_attention_mask'''] , ) A_ : str = True A_ : Dict = self.model_tester.seq_length A_ : Dict = self.model_tester.encoder_seq_length def check_decoder_attentions_output(_lowerCamelCase : Union[str, Any] ): A_ : str = outputs.decoder_attentions self.assertEqual(len(_lowerCamelCase ) , self.model_tester.num_hidden_layers ) self.assertListEqual( list(decoder_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_length, seq_length] , ) def check_encoder_attentions_output(_lowerCamelCase : Union[str, Any] ): A_ : Optional[int] = [t.numpy() for t in outputs.encoder_attentions] A_ : Any = [t.numpy() for t in outputs.encoder_global_attentions] self.assertEqual(len(_lowerCamelCase ) , self.model_tester.num_hidden_layers ) self.assertEqual(len(_lowerCamelCase ) , self.model_tester.num_hidden_layers ) self.assertListEqual( list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_length, seq_length] , ) self.assertListEqual( list(global_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, num_global_attn_indices] , ) for model_class in self.all_model_classes: A_ : Optional[int] = True A_ : str = False A_ : Optional[Any] = False A_ : List[Any] = model_class(_lowerCamelCase ) A_ : str = model(self._prepare_for_class(_lowerCamelCase , _lowerCamelCase ) ) A_ : int = len(_lowerCamelCase ) self.assertEqual(config.output_hidden_states , _lowerCamelCase ) check_encoder_attentions_output(_lowerCamelCase ) if self.is_encoder_decoder: A_ : str = model_class(_lowerCamelCase ) A_ : Dict = model(self._prepare_for_class(_lowerCamelCase , _lowerCamelCase ) ) self.assertEqual(config.output_hidden_states , _lowerCamelCase ) check_decoder_attentions_output(_lowerCamelCase ) # Check that output attentions can also be changed via the config del inputs_dict["output_attentions"] A_ : List[Any] = True A_ : Optional[int] = model_class(_lowerCamelCase ) A_ : Dict = model(self._prepare_for_class(_lowerCamelCase , _lowerCamelCase ) ) self.assertEqual(config.output_hidden_states , _lowerCamelCase ) check_encoder_attentions_output(_lowerCamelCase ) # Check attention is always last and order is fine A_ : Tuple = True A_ : Dict = True A_ : Union[str, Any] = model_class(_lowerCamelCase ) A_ : List[str] = model(self._prepare_for_class(_lowerCamelCase , _lowerCamelCase ) ) self.assertEqual(out_len + (2 if self.is_encoder_decoder else 1) , len(_lowerCamelCase ) ) self.assertEqual(model.config.output_hidden_states , _lowerCamelCase ) check_encoder_attentions_output(_lowerCamelCase ) @unittest.skip('''LED keeps using potentially symbolic tensors in conditionals and breaks tracing.''' ) def _a ( self : int ): """simple docstring""" pass def _a ( self : Optional[int] ): """simple docstring""" pass def snake_case__ ( lowerCamelCase__ : List[str] ) -> Dict: return tf.constant(lowerCamelCase__ , dtype=tf.intaa ) snake_case__ = 1e-4 @slow @require_tf class UpperCamelCase_ (unittest.TestCase ): """simple docstring""" def _a ( self : Optional[int] ): """simple docstring""" A_ : List[Any] = TFLEDForConditionalGeneration.from_pretrained('''allenai/led-base-16384''' ).led # change to intended input here A_ : Any = _long_tensor([512 * [0, 31414, 232, 328, 740, 1140, 12695, 69]] ) A_ : List[Any] = _long_tensor([128 * [0, 31414, 232, 328, 740, 1140, 12695, 69]] ) A_ : List[Any] = prepare_led_inputs_dict(model.config , _lowerCamelCase , _lowerCamelCase ) A_ : Tuple = model(**_lowerCamelCase )[0] A_ : Dict = (1, 1024, 768) self.assertEqual(output.shape , _lowerCamelCase ) # change to expected output here A_ : Optional[Any] = tf.convert_to_tensor( [[2.30_50, 2.82_79, 0.65_31], [-1.84_57, -0.14_55, -3.56_61], [-1.01_86, 0.45_86, -2.20_43]] , ) tf.debugging.assert_near(output[:, :3, :3] , _lowerCamelCase , atol=1E-3 ) def _a ( self : Tuple ): """simple docstring""" A_ : Tuple = TFLEDForConditionalGeneration.from_pretrained('''allenai/led-base-16384''' ) # change to intended input here A_ : Optional[Any] = _long_tensor([512 * [0, 31414, 232, 328, 740, 1140, 12695, 69]] ) A_ : List[Any] = _long_tensor([128 * [0, 31414, 232, 328, 740, 1140, 12695, 69]] ) A_ : Union[str, Any] = prepare_led_inputs_dict(model.config , _lowerCamelCase , _lowerCamelCase ) A_ : Any = model(**_lowerCamelCase )[0] A_ : Optional[int] = (1, 1024, model.config.vocab_size) self.assertEqual(output.shape , _lowerCamelCase ) # change to expected output here A_ : int = tf.convert_to_tensor( [[33.65_07, 6.45_72, 16.80_89], [5.87_39, -2.42_38, 11.29_02], [-3.21_39, -4.31_49, 4.27_83]] , ) tf.debugging.assert_near(output[:, :3, :3] , _lowerCamelCase , atol=1E-3 , rtol=1E-3 )
355
'''simple docstring''' def snake_case__ ( lowerCamelCase__ : list[int] , lowerCamelCase__ : list[int] , lowerCamelCase__ : int ) -> bool: return not any( neighbour == 1 and colored_vertices[i] == color for i, neighbour in enumerate(lowerCamelCase__ ) ) def snake_case__ ( lowerCamelCase__ : list[list[int]] , lowerCamelCase__ : int , lowerCamelCase__ : list[int] , lowerCamelCase__ : int ) -> bool: # Base Case if index == len(lowerCamelCase__ ): return True # Recursive Step for i in range(lowerCamelCase__ ): if valid_coloring(graph[index] , lowerCamelCase__ , lowerCamelCase__ ): # Color current vertex A_ : int = i # Validate coloring if util_color(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , index + 1 ): return True # Backtrack A_ : str = -1 return False def snake_case__ ( lowerCamelCase__ : list[list[int]] , lowerCamelCase__ : int ) -> list[int]: A_ : List[str] = [-1] * len(lowerCamelCase__ ) if util_color(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , 0 ): return colored_vertices return []
4
0
'''simple docstring''' def snake_case__ ( lowerCamelCase__ : int = 1_0_0_0 ) -> int: A_ : int = 1, 1 A_ : Dict = 2 while True: A_ : List[Any] = 0 A_ : str = fa + fa A_ : str = fa, f index += 1 for _ in str(lowerCamelCase__ ): i += 1 if i == n: break return index if __name__ == "__main__": print(solution(int(str(input()).strip())))
356
'''simple docstring''' from __future__ import annotations from PIL import Image # Define glider example snake_case__ = [ [0, 1, 0, 0, 0, 0, 0, 0], [0, 0, 1, 0, 0, 0, 0, 0], [1, 1, 1, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0], ] # Define blinker example snake_case__ = [[0, 1, 0], [0, 1, 0], [0, 1, 0]] def snake_case__ ( lowerCamelCase__ : list[list[int]] ) -> list[list[int]]: A_ : str = [] for i in range(len(lowerCamelCase__ ) ): A_ : Optional[Any] = [] for j in range(len(cells[i] ) ): # Get the number of live neighbours A_ : Optional[int] = 0 if i > 0 and j > 0: neighbour_count += cells[i - 1][j - 1] if i > 0: neighbour_count += cells[i - 1][j] if i > 0 and j < len(cells[i] ) - 1: neighbour_count += cells[i - 1][j + 1] if j > 0: neighbour_count += cells[i][j - 1] if j < len(cells[i] ) - 1: neighbour_count += cells[i][j + 1] if i < len(lowerCamelCase__ ) - 1 and j > 0: neighbour_count += cells[i + 1][j - 1] if i < len(lowerCamelCase__ ) - 1: neighbour_count += cells[i + 1][j] if i < len(lowerCamelCase__ ) - 1 and j < len(cells[i] ) - 1: neighbour_count += cells[i + 1][j + 1] # Rules of the game of life (excerpt from Wikipedia): # 1. Any live cell with two or three live neighbours survives. # 2. Any dead cell with three live neighbours becomes a live cell. # 3. All other live cells die in the next generation. # Similarly, all other dead cells stay dead. A_ : List[str] = cells[i][j] == 1 if ( (alive and 2 <= neighbour_count <= 3) or not alive and neighbour_count == 3 ): next_generation_row.append(1 ) else: next_generation_row.append(0 ) next_generation.append(lowerCamelCase__ ) return next_generation def snake_case__ ( lowerCamelCase__ : list[list[int]] , lowerCamelCase__ : int ) -> list[Image.Image]: A_ : List[Any] = [] for _ in range(lowerCamelCase__ ): # Create output image A_ : Optional[int] = Image.new('''RGB''' , (len(cells[0] ), len(lowerCamelCase__ )) ) A_ : int = img.load() # Save cells to image for x in range(len(lowerCamelCase__ ) ): for y in range(len(cells[0] ) ): A_ : Optional[Any] = 2_5_5 - cells[y][x] * 2_5_5 A_ : str = (colour, colour, colour) # Save image images.append(lowerCamelCase__ ) A_ : Optional[int] = new_generation(lowerCamelCase__ ) return images if __name__ == "__main__": snake_case__ = generate_images(GLIDER, 16) images[0].save("""out.gif""", save_all=True, append_images=images[1:])
4
0
'''simple docstring''' from typing import List import numpy as np def snake_case__ ( lowerCamelCase__ : dict ) -> int: A_ : Tuple = {key: len(lowerCamelCase__ ) for key, value in gen_kwargs.items() if isinstance(lowerCamelCase__ , lowerCamelCase__ )} if len(set(lists_lengths.values() ) ) > 1: raise RuntimeError( ( '''Sharding is ambiguous for this dataset: ''' + '''we found several data sources lists of different lengths, and we don\'t know over which list we should parallelize:\n''' + '''\n'''.join(f'\t- key {key} has length {length}' for key, length in lists_lengths.items() ) + '''\nTo fix this, check the \'gen_kwargs\' and make sure to use lists only for data sources, ''' + '''and use tuples otherwise. In the end there should only be one single list, or several lists with the same length.''' ) ) A_ : Optional[Any] = max(lists_lengths.values() , default=0 ) return max(1 , lowerCamelCase__ ) def snake_case__ ( lowerCamelCase__ : int , lowerCamelCase__ : int ) -> List[range]: A_ : str = [] for group_idx in range(lowerCamelCase__ ): A_ : List[Any] = num_shards // max_num_jobs + (group_idx < (num_shards % max_num_jobs)) if num_shards_to_add == 0: break A_ : Union[str, Any] = shards_indices_per_group[-1].stop if shards_indices_per_group else 0 A_ : int = range(lowerCamelCase__ , start + num_shards_to_add ) shards_indices_per_group.append(lowerCamelCase__ ) return shards_indices_per_group def snake_case__ ( lowerCamelCase__ : dict , lowerCamelCase__ : int ) -> List[dict]: A_ : Optional[int] = _number_of_shards_in_gen_kwargs(lowerCamelCase__ ) if num_shards == 1: return [dict(lowerCamelCase__ )] else: A_ : List[Any] = _distribute_shards(num_shards=lowerCamelCase__ , max_num_jobs=lowerCamelCase__ ) return [ { key: [value[shard_idx] for shard_idx in shard_indices_per_group[group_idx]] if isinstance(lowerCamelCase__ , lowerCamelCase__ ) else value for key, value in gen_kwargs.items() } for group_idx in range(len(lowerCamelCase__ ) ) ] def snake_case__ ( lowerCamelCase__ : List[dict] ) -> dict: return { key: [value for gen_kwargs in gen_kwargs_list for value in gen_kwargs[key]] if isinstance(gen_kwargs_list[0][key] , lowerCamelCase__ ) else gen_kwargs_list[0][key] for key in gen_kwargs_list[0] } def snake_case__ ( lowerCamelCase__ : np.random.Generator , lowerCamelCase__ : dict ) -> dict: A_ : Union[str, Any] = {len(lowerCamelCase__ ) for value in gen_kwargs.values() if isinstance(lowerCamelCase__ , lowerCamelCase__ )} A_ : Tuple = {} for size in list_sizes: A_ : str = list(range(lowerCamelCase__ ) ) rng.shuffle(indices_per_size[size] ) # Now let's copy the gen_kwargs and shuffle the lists based on their sizes A_ : Optional[Any] = dict(lowerCamelCase__ ) for key, value in shuffled_kwargs.items(): if isinstance(lowerCamelCase__ , lowerCamelCase__ ): A_ : int = [value[i] for i in indices_per_size[len(lowerCamelCase__ )]] return shuffled_kwargs
357
'''simple docstring''' import json import os import shutil import tempfile import unittest import numpy as np import pytest from transformers import BertTokenizer, BertTokenizerFast from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES from transformers.testing_utils import require_vision from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available if is_vision_available(): from PIL import Image from transformers import AlignProcessor, EfficientNetImageProcessor @require_vision class UpperCamelCase_ (unittest.TestCase ): """simple docstring""" def _a ( self : Union[str, Any] ): """simple docstring""" A_ : Any = tempfile.mkdtemp() A_ : List[Any] = [ '''[UNK]''', '''[CLS]''', '''[SEP]''', '''[PAD]''', '''[MASK]''', '''want''', '''##want''', '''##ed''', '''wa''', '''un''', '''runn''', '''##ing''', ''',''', '''low''', '''lowest''', ] A_ : str = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] ) with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer: vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) ) A_ : Tuple = { '''do_resize''': True, '''size''': 20, '''do_center_crop''': True, '''crop_size''': 18, '''do_normalize''': True, '''image_mean''': [0.48_14_54_66, 0.4_57_82_75, 0.40_82_10_73], '''image_std''': [0.26_86_29_54, 0.26_13_02_58, 0.27_57_77_11], } A_ : List[Any] = os.path.join(self.tmpdirname , _lowerCamelCase ) with open(self.image_processor_file , '''w''' , encoding='''utf-8''' ) as fp: json.dump(_lowerCamelCase , _lowerCamelCase ) def _a ( self : Dict , **_lowerCamelCase : Tuple ): """simple docstring""" return BertTokenizer.from_pretrained(self.tmpdirname , **_lowerCamelCase ) def _a ( self : Optional[int] , **_lowerCamelCase : Optional[int] ): """simple docstring""" return BertTokenizerFast.from_pretrained(self.tmpdirname , **_lowerCamelCase ) def _a ( self : Optional[Any] , **_lowerCamelCase : Tuple ): """simple docstring""" return EfficientNetImageProcessor.from_pretrained(self.tmpdirname , **_lowerCamelCase ) def _a ( self : Tuple ): """simple docstring""" shutil.rmtree(self.tmpdirname ) def _a ( self : int ): """simple docstring""" A_ : Union[str, Any] = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )] A_ : Any = [Image.fromarray(np.moveaxis(_lowerCamelCase , 0 , -1 ) ) for x in image_inputs] return image_inputs def _a ( self : int ): """simple docstring""" A_ : Tuple = self.get_tokenizer() A_ : Tuple = self.get_rust_tokenizer() A_ : Dict = self.get_image_processor() A_ : List[Any] = AlignProcessor(tokenizer=_lowerCamelCase , image_processor=_lowerCamelCase ) processor_slow.save_pretrained(self.tmpdirname ) A_ : str = AlignProcessor.from_pretrained(self.tmpdirname , use_fast=_lowerCamelCase ) A_ : Any = AlignProcessor(tokenizer=_lowerCamelCase , image_processor=_lowerCamelCase ) processor_fast.save_pretrained(self.tmpdirname ) A_ : List[Any] = AlignProcessor.from_pretrained(self.tmpdirname ) self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() ) self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() ) self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() ) self.assertIsInstance(processor_slow.tokenizer , _lowerCamelCase ) self.assertIsInstance(processor_fast.tokenizer , _lowerCamelCase ) self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() ) self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() ) self.assertIsInstance(processor_slow.image_processor , _lowerCamelCase ) self.assertIsInstance(processor_fast.image_processor , _lowerCamelCase ) def _a ( self : List[Any] ): """simple docstring""" A_ : List[str] = AlignProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() ) processor.save_pretrained(self.tmpdirname ) A_ : Optional[int] = self.get_tokenizer(bos_token='''(BOS)''' , eos_token='''(EOS)''' ) A_ : Tuple = self.get_image_processor(do_normalize=_lowerCamelCase , padding_value=1.0 ) A_ : List[str] = AlignProcessor.from_pretrained( self.tmpdirname , bos_token='''(BOS)''' , eos_token='''(EOS)''' , do_normalize=_lowerCamelCase , padding_value=1.0 ) self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() ) self.assertIsInstance(processor.tokenizer , _lowerCamelCase ) self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() ) self.assertIsInstance(processor.image_processor , _lowerCamelCase ) def _a ( self : Union[str, Any] ): """simple docstring""" A_ : Dict = self.get_image_processor() A_ : Any = self.get_tokenizer() A_ : List[str] = AlignProcessor(tokenizer=_lowerCamelCase , image_processor=_lowerCamelCase ) A_ : Any = self.prepare_image_inputs() A_ : List[Any] = image_processor(_lowerCamelCase , return_tensors='''np''' ) A_ : str = processor(images=_lowerCamelCase , return_tensors='''np''' ) for key in input_image_proc.keys(): self.assertAlmostEqual(input_image_proc[key].sum() , input_processor[key].sum() , delta=1E-2 ) def _a ( self : Dict ): """simple docstring""" A_ : str = self.get_image_processor() A_ : List[str] = self.get_tokenizer() A_ : Optional[int] = AlignProcessor(tokenizer=_lowerCamelCase , image_processor=_lowerCamelCase ) A_ : int = '''lower newer''' A_ : str = processor(text=_lowerCamelCase ) A_ : Dict = tokenizer(_lowerCamelCase , padding='''max_length''' , max_length=64 ) for key in encoded_tok.keys(): self.assertListEqual(encoded_tok[key] , encoded_processor[key] ) def _a ( self : str ): """simple docstring""" A_ : Optional[int] = self.get_image_processor() A_ : Optional[Any] = self.get_tokenizer() A_ : List[str] = AlignProcessor(tokenizer=_lowerCamelCase , image_processor=_lowerCamelCase ) A_ : List[Any] = '''lower newer''' A_ : Optional[int] = self.prepare_image_inputs() A_ : List[Any] = processor(text=_lowerCamelCase , images=_lowerCamelCase ) self.assertListEqual(list(inputs.keys() ) , ['''input_ids''', '''token_type_ids''', '''attention_mask''', '''pixel_values'''] ) # test if it raises when no input is passed with pytest.raises(_lowerCamelCase ): processor() def _a ( self : List[str] ): """simple docstring""" A_ : Optional[Any] = self.get_image_processor() A_ : Optional[int] = self.get_tokenizer() A_ : List[Any] = AlignProcessor(tokenizer=_lowerCamelCase , image_processor=_lowerCamelCase ) A_ : str = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]] A_ : str = processor.batch_decode(_lowerCamelCase ) A_ : Union[str, Any] = tokenizer.batch_decode(_lowerCamelCase ) self.assertListEqual(_lowerCamelCase , _lowerCamelCase ) def _a ( self : Tuple ): """simple docstring""" A_ : str = self.get_image_processor() A_ : Tuple = self.get_tokenizer() A_ : Any = AlignProcessor(tokenizer=_lowerCamelCase , image_processor=_lowerCamelCase ) A_ : str = '''lower newer''' A_ : List[str] = self.prepare_image_inputs() A_ : Tuple = processor(text=_lowerCamelCase , images=_lowerCamelCase ) self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
4
0
'''simple docstring''' from __future__ import annotations class UpperCamelCase_ : """simple docstring""" def __init__( self : Optional[int] , _lowerCamelCase : int ): """simple docstring""" A_ : Union[str, Any] = order # a_{0} ... a_{k} A_ : Union[str, Any] = [1.0] + [0.0] * order # b_{0} ... b_{k} A_ : int = [1.0] + [0.0] * order # x[n-1] ... x[n-k] A_ : str = [0.0] * self.order # y[n-1] ... y[n-k] A_ : Optional[Any] = [0.0] * self.order def _a ( self : Dict , _lowerCamelCase : list[float] , _lowerCamelCase : list[float] ): """simple docstring""" if len(_lowerCamelCase ) < self.order: A_ : Any = [1.0, *a_coeffs] if len(_lowerCamelCase ) != self.order + 1: A_ : List[Any] = ( f'Expected a_coeffs to have {self.order + 1} elements ' f'for {self.order}-order filter, got {len(_lowerCamelCase )}' ) raise ValueError(_lowerCamelCase ) if len(_lowerCamelCase ) != self.order + 1: A_ : Union[str, Any] = ( f'Expected b_coeffs to have {self.order + 1} elements ' f'for {self.order}-order filter, got {len(_lowerCamelCase )}' ) raise ValueError(_lowerCamelCase ) A_ : Tuple = a_coeffs A_ : str = b_coeffs def _a ( self : Tuple , _lowerCamelCase : float ): """simple docstring""" A_ : Any = 0.0 # Start at index 1 and do index 0 at the end. for i in range(1 , self.order + 1 ): result += ( self.b_coeffs[i] * self.input_history[i - 1] - self.a_coeffs[i] * self.output_history[i - 1] ) A_ : str = (result + self.b_coeffs[0] * sample) / self.a_coeffs[0] A_ : Optional[Any] = self.input_history[:-1] A_ : List[str] = self.output_history[:-1] A_ : Tuple = sample A_ : Tuple = result return result
358
'''simple docstring''' import json import os from pathlib import Path from shutil import copyfile from typing import Any, Dict, List, Optional, Tuple, Union import sentencepiece from ...tokenization_utils import PreTrainedTokenizer from ...utils import logging snake_case__ = logging.get_logger(__name__) snake_case__ = """▁""" snake_case__ = { """vocab_file""": """vocab.json""", """spm_file""": """sentencepiece.bpe.model""", } snake_case__ = { """vocab_file""": { """facebook/s2t-small-librispeech-asr""": ( """https://huggingface.co/facebook/s2t-small-librispeech-asr/resolve/main/vocab.json""" ), }, """spm_file""": { """facebook/s2t-small-librispeech-asr""": ( """https://huggingface.co/facebook/s2t-small-librispeech-asr/resolve/main/sentencepiece.bpe.model""" ) }, } snake_case__ = { """facebook/s2t-small-librispeech-asr""": 10_24, } snake_case__ = ["""pt""", """fr""", """ru""", """nl""", """ro""", """it""", """es""", """de"""] snake_case__ = {"""mustc""": MUSTC_LANGS} class UpperCamelCase_ (a__ ): """simple docstring""" _lowerCAmelCase = VOCAB_FILES_NAMES _lowerCAmelCase = PRETRAINED_VOCAB_FILES_MAP _lowerCAmelCase = MAX_MODEL_INPUT_SIZES _lowerCAmelCase = ['input_ids', 'attention_mask'] _lowerCAmelCase = [] def __init__( self : Optional[int] , _lowerCamelCase : List[str] , _lowerCamelCase : List[str] , _lowerCamelCase : str="<s>" , _lowerCamelCase : Union[str, Any]="</s>" , _lowerCamelCase : Dict="<pad>" , _lowerCamelCase : str="<unk>" , _lowerCamelCase : Union[str, Any]=False , _lowerCamelCase : int=False , _lowerCamelCase : Any=None , _lowerCamelCase : Any=None , _lowerCamelCase : Optional[Dict[str, Any]] = None , **_lowerCamelCase : Optional[int] , ): """simple docstring""" A_ : Union[str, Any] = {} if sp_model_kwargs is None else sp_model_kwargs super().__init__( bos_token=_lowerCamelCase , eos_token=_lowerCamelCase , unk_token=_lowerCamelCase , pad_token=_lowerCamelCase , do_upper_case=_lowerCamelCase , do_lower_case=_lowerCamelCase , tgt_lang=_lowerCamelCase , lang_codes=_lowerCamelCase , sp_model_kwargs=self.sp_model_kwargs , **_lowerCamelCase , ) A_ : Optional[int] = do_upper_case A_ : Tuple = do_lower_case A_ : Tuple = load_json(_lowerCamelCase ) A_ : Tuple = {v: k for k, v in self.encoder.items()} A_ : List[Any] = spm_file A_ : List[str] = load_spm(_lowerCamelCase , self.sp_model_kwargs ) if lang_codes is not None: A_ : Any = lang_codes A_ : Optional[Any] = LANGUAGES[lang_codes] A_ : Optional[Any] = [f'<lang:{lang}>' for lang in self.langs] A_ : Union[str, Any] = {lang: self.sp_model.PieceToId(f'<lang:{lang}>' ) for lang in self.langs} A_ : Optional[int] = self.lang_tokens A_ : int = tgt_lang if tgt_lang is not None else self.langs[0] self.set_tgt_lang_special_tokens(self._tgt_lang ) else: A_ : Dict = {} @property def _a ( self : Tuple ): """simple docstring""" return len(self.encoder ) @property def _a ( self : int ): """simple docstring""" return self._tgt_lang @tgt_lang.setter def _a ( self : List[str] , _lowerCamelCase : Any ): """simple docstring""" A_ : int = new_tgt_lang self.set_tgt_lang_special_tokens(_lowerCamelCase ) def _a ( self : Tuple , _lowerCamelCase : str ): """simple docstring""" A_ : List[str] = self.lang_code_to_id[tgt_lang] A_ : Optional[Any] = [lang_code_id] def _a ( self : Optional[Any] , _lowerCamelCase : str ): """simple docstring""" return self.sp_model.encode(_lowerCamelCase , out_type=_lowerCamelCase ) def _a ( self : List[Any] , _lowerCamelCase : int ): """simple docstring""" return self.encoder.get(_lowerCamelCase , self.encoder[self.unk_token] ) def _a ( self : int , _lowerCamelCase : int ): """simple docstring""" return self.decoder.get(_lowerCamelCase , self.unk_token ) def _a ( self : int , _lowerCamelCase : List[str] ): """simple docstring""" A_ : List[Any] = [] A_ : Any = '''''' for token in tokens: # make sure that special tokens are not decoded using sentencepiece model if token in self.all_special_tokens: A_ : Union[str, Any] = self.sp_model.decode(_lowerCamelCase ) out_string += (decoded.upper() if self.do_upper_case else decoded) + token + " " A_ : Optional[Any] = [] else: current_sub_tokens.append(_lowerCamelCase ) A_ : Tuple = self.sp_model.decode(_lowerCamelCase ) out_string += decoded.upper() if self.do_upper_case else decoded return out_string.strip() def _a ( self : int , _lowerCamelCase : Dict , _lowerCamelCase : Any=None ): """simple docstring""" if token_ids_a is None: return self.prefix_tokens + token_ids_a + [self.eos_token_id] # We don't expect to process pairs, but leave the pair logic for API consistency return self.prefix_tokens + token_ids_a + token_ids_a + [self.eos_token_id] def _a ( self : List[Any] , _lowerCamelCase : List[int] , _lowerCamelCase : Optional[List[int]] = None , _lowerCamelCase : bool = False ): """simple docstring""" if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=_lowerCamelCase , token_ids_a=_lowerCamelCase , already_has_special_tokens=_lowerCamelCase ) A_ : Tuple = [1] * len(self.prefix_tokens ) A_ : Tuple = [1] if token_ids_a is None: return prefix_ones + ([0] * len(_lowerCamelCase )) + suffix_ones return prefix_ones + ([0] * len(_lowerCamelCase )) + ([0] * len(_lowerCamelCase )) + suffix_ones def _a ( self : Dict ): """simple docstring""" A_ : Union[str, Any] = self.encoder.copy() vocab.update(self.added_tokens_encoder ) return vocab def __getstate__( self : Union[str, Any] ): """simple docstring""" A_ : Dict = self.__dict__.copy() A_ : List[Any] = None return state def __setstate__( self : List[str] , _lowerCamelCase : Dict ): """simple docstring""" A_ : Dict = d # for backward compatibility if not hasattr(self , '''sp_model_kwargs''' ): A_ : Optional[int] = {} A_ : int = load_spm(self.spm_file , self.sp_model_kwargs ) def _a ( self : Optional[Any] , _lowerCamelCase : str , _lowerCamelCase : Optional[str] = None ): """simple docstring""" A_ : Dict = Path(_lowerCamelCase ) assert save_dir.is_dir(), f'{save_directory} should be a directory' A_ : Optional[int] = save_dir / ( (filename_prefix + '''-''' if filename_prefix else '''''') + self.vocab_files_names['''vocab_file'''] ) A_ : Optional[int] = save_dir / ( (filename_prefix + '''-''' if filename_prefix else '''''') + self.vocab_files_names['''spm_file'''] ) save_json(self.encoder , _lowerCamelCase ) if os.path.abspath(self.spm_file ) != os.path.abspath(_lowerCamelCase ) and os.path.isfile(self.spm_file ): copyfile(self.spm_file , _lowerCamelCase ) elif not os.path.isfile(self.spm_file ): with open(_lowerCamelCase , '''wb''' ) as fi: A_ : List[str] = self.sp_model.serialized_model_proto() fi.write(_lowerCamelCase ) return (str(_lowerCamelCase ), str(_lowerCamelCase )) def snake_case__ ( lowerCamelCase__ : str , lowerCamelCase__ : Dict[str, Any] ) -> sentencepiece.SentencePieceProcessor: A_ : Tuple = sentencepiece.SentencePieceProcessor(**lowerCamelCase__ ) spm.Load(str(lowerCamelCase__ ) ) return spm def snake_case__ ( lowerCamelCase__ : str ) -> Union[Dict, List]: with open(lowerCamelCase__ , '''r''' ) as f: return json.load(lowerCamelCase__ ) def snake_case__ ( lowerCamelCase__ : Any , lowerCamelCase__ : str ) -> None: with open(lowerCamelCase__ , '''w''' ) as f: json.dump(lowerCamelCase__ , lowerCamelCase__ , indent=2 )
4
0
'''simple docstring''' def snake_case__ ( lowerCamelCase__ : list ) -> list: A_ : str = len(lowerCamelCase__ ) for i in range(1 , lowerCamelCase__ ): A_ : List[Any] = collection[i] A_ : int = 0 A_ : str = i - 1 while low <= high: A_ : Optional[int] = (low + high) // 2 if val < collection[mid]: A_ : Any = mid - 1 else: A_ : List[Any] = mid + 1 for j in range(lowerCamelCase__ , lowerCamelCase__ , -1 ): A_ : Optional[int] = collection[j - 1] A_ : Any = val return collection if __name__ == "__main__": snake_case__ = input("""Enter numbers separated by a comma:\n""").strip() snake_case__ = [int(item) for item in user_input.split(""",""")] print(binary_insertion_sort(unsorted))
359
'''simple docstring''' import argparse import json import os import sys import tempfile import unittest from argparse import Namespace from dataclasses import dataclass, field from enum import Enum from pathlib import Path from typing import List, Literal, Optional import yaml from transformers import HfArgumentParser, TrainingArguments from transformers.hf_argparser import make_choice_type_function, string_to_bool # Since Python 3.10, we can use the builtin `|` operator for Union types # See PEP 604: https://peps.python.org/pep-0604 snake_case__ = sys.version_info >= (3, 10) def snake_case__ ( lowerCamelCase__ : Union[str, Any]=None , lowerCamelCase__ : str=None ) -> List[Any]: return field(default_factory=lambda: default , metadata=lowerCamelCase__ ) @dataclass class UpperCamelCase_ : """simple docstring""" _lowerCAmelCase = 42 _lowerCAmelCase = 42 _lowerCAmelCase = 42 _lowerCAmelCase = 42 @dataclass class UpperCamelCase_ : """simple docstring""" _lowerCAmelCase = 4_2 _lowerCAmelCase = field(default='toto', metadata={'help': 'help message'} ) @dataclass class UpperCamelCase_ : """simple docstring""" _lowerCAmelCase = False _lowerCAmelCase = True _lowerCAmelCase = None class UpperCamelCase_ (a__ ): """simple docstring""" _lowerCAmelCase = 'titi' _lowerCAmelCase = 'toto' class UpperCamelCase_ (a__ ): """simple docstring""" _lowerCAmelCase = 'titi' _lowerCAmelCase = 'toto' _lowerCAmelCase = 4_2 @dataclass class UpperCamelCase_ : """simple docstring""" _lowerCAmelCase = "toto" def _a ( self : Optional[Any] ): """simple docstring""" A_ : Optional[int] = BasicEnum(self.foo ) @dataclass class UpperCamelCase_ : """simple docstring""" _lowerCAmelCase = "toto" def _a ( self : Tuple ): """simple docstring""" A_ : Optional[Any] = MixedTypeEnum(self.foo ) @dataclass class UpperCamelCase_ : """simple docstring""" _lowerCAmelCase = None _lowerCAmelCase = field(default=a__, metadata={'help': 'help message'} ) _lowerCAmelCase = None _lowerCAmelCase = list_field(default=[] ) _lowerCAmelCase = list_field(default=[] ) @dataclass class UpperCamelCase_ : """simple docstring""" _lowerCAmelCase = list_field(default=[] ) _lowerCAmelCase = list_field(default=[1, 2, 3] ) _lowerCAmelCase = list_field(default=['Hallo', 'Bonjour', 'Hello'] ) _lowerCAmelCase = list_field(default=[0.1, 0.2, 0.3] ) @dataclass class UpperCamelCase_ : """simple docstring""" _lowerCAmelCase = field() _lowerCAmelCase = field() _lowerCAmelCase = field() def _a ( self : Tuple ): """simple docstring""" A_ : Tuple = BasicEnum(self.required_enum ) @dataclass class UpperCamelCase_ : """simple docstring""" _lowerCAmelCase = 42 _lowerCAmelCase = field() _lowerCAmelCase = None _lowerCAmelCase = field(default='toto', metadata={'help': 'help message'} ) _lowerCAmelCase = list_field(default=['Hallo', 'Bonjour', 'Hello'] ) if is_python_no_less_than_3_10: @dataclass class UpperCamelCase_ : """simple docstring""" _lowerCAmelCase = False _lowerCAmelCase = True _lowerCAmelCase = None @dataclass class UpperCamelCase_ : """simple docstring""" _lowerCAmelCase = None _lowerCAmelCase = field(default=a__, metadata={'help': 'help message'} ) _lowerCAmelCase = None _lowerCAmelCase = list_field(default=[] ) _lowerCAmelCase = list_field(default=[] ) class UpperCamelCase_ (unittest.TestCase ): """simple docstring""" def _a ( self : List[str] , _lowerCamelCase : argparse.ArgumentParser , _lowerCamelCase : argparse.ArgumentParser ): """simple docstring""" self.assertEqual(len(a._actions ) , len(b._actions ) ) for x, y in zip(a._actions , b._actions ): A_ : Union[str, Any] = {k: v for k, v in vars(_lowerCamelCase ).items() if k != '''container'''} A_ : Optional[Any] = {k: v for k, v in vars(_lowerCamelCase ).items() if k != '''container'''} # Choices with mixed type have custom function as "type" # So we need to compare results directly for equality if xx.get('''choices''' , _lowerCamelCase ) and yy.get('''choices''' , _lowerCamelCase ): for expected_choice in yy["choices"] + xx["choices"]: self.assertEqual(xx['''type'''](_lowerCamelCase ) , yy['''type'''](_lowerCamelCase ) ) del xx["type"], yy["type"] self.assertEqual(_lowerCamelCase , _lowerCamelCase ) def _a ( self : Optional[int] ): """simple docstring""" A_ : Union[str, Any] = HfArgumentParser(_lowerCamelCase ) A_ : Optional[Any] = argparse.ArgumentParser() expected.add_argument('''--foo''' , type=_lowerCamelCase , required=_lowerCamelCase ) expected.add_argument('''--bar''' , type=_lowerCamelCase , required=_lowerCamelCase ) expected.add_argument('''--baz''' , type=_lowerCamelCase , required=_lowerCamelCase ) expected.add_argument('''--flag''' , type=_lowerCamelCase , default=_lowerCamelCase , const=_lowerCamelCase , nargs='''?''' ) self.argparsersEqual(_lowerCamelCase , _lowerCamelCase ) A_ : Union[str, Any] = ['''--foo''', '''1''', '''--baz''', '''quux''', '''--bar''', '''0.5'''] ((A_) ,) : List[str] = parser.parse_args_into_dataclasses(_lowerCamelCase , look_for_args_file=_lowerCamelCase ) self.assertFalse(example.flag ) def _a ( self : Dict ): """simple docstring""" A_ : int = HfArgumentParser(_lowerCamelCase ) A_ : int = argparse.ArgumentParser() expected.add_argument('''--foo''' , default=42 , type=_lowerCamelCase ) expected.add_argument('''--baz''' , default='''toto''' , type=_lowerCamelCase , help='''help message''' ) self.argparsersEqual(_lowerCamelCase , _lowerCamelCase ) def _a ( self : Dict ): """simple docstring""" A_ : Any = argparse.ArgumentParser() expected.add_argument('''--foo''' , type=_lowerCamelCase , default=_lowerCamelCase , const=_lowerCamelCase , nargs='''?''' ) expected.add_argument('''--baz''' , type=_lowerCamelCase , default=_lowerCamelCase , const=_lowerCamelCase , nargs='''?''' ) # A boolean no_* argument always has to come after its "default: True" regular counter-part # and its default must be set to False expected.add_argument('''--no_baz''' , action='''store_false''' , default=_lowerCamelCase , dest='''baz''' ) expected.add_argument('''--opt''' , type=_lowerCamelCase , default=_lowerCamelCase ) A_ : Dict = [WithDefaultBoolExample] if is_python_no_less_than_3_10: dataclass_types.append(_lowerCamelCase ) for dataclass_type in dataclass_types: A_ : Any = HfArgumentParser(_lowerCamelCase ) self.argparsersEqual(_lowerCamelCase , _lowerCamelCase ) A_ : List[Any] = parser.parse_args([] ) self.assertEqual(_lowerCamelCase , Namespace(foo=_lowerCamelCase , baz=_lowerCamelCase , opt=_lowerCamelCase ) ) A_ : Optional[int] = parser.parse_args(['''--foo''', '''--no_baz'''] ) self.assertEqual(_lowerCamelCase , Namespace(foo=_lowerCamelCase , baz=_lowerCamelCase , opt=_lowerCamelCase ) ) A_ : Union[str, Any] = parser.parse_args(['''--foo''', '''--baz'''] ) self.assertEqual(_lowerCamelCase , Namespace(foo=_lowerCamelCase , baz=_lowerCamelCase , opt=_lowerCamelCase ) ) A_ : List[str] = parser.parse_args(['''--foo''', '''True''', '''--baz''', '''True''', '''--opt''', '''True'''] ) self.assertEqual(_lowerCamelCase , Namespace(foo=_lowerCamelCase , baz=_lowerCamelCase , opt=_lowerCamelCase ) ) A_ : List[Any] = parser.parse_args(['''--foo''', '''False''', '''--baz''', '''False''', '''--opt''', '''False'''] ) self.assertEqual(_lowerCamelCase , Namespace(foo=_lowerCamelCase , baz=_lowerCamelCase , opt=_lowerCamelCase ) ) def _a ( self : List[Any] ): """simple docstring""" A_ : str = HfArgumentParser(_lowerCamelCase ) A_ : Optional[int] = argparse.ArgumentParser() expected.add_argument( '''--foo''' , default='''toto''' , choices=['''titi''', '''toto''', 42] , type=make_choice_type_function(['''titi''', '''toto''', 42] ) , ) self.argparsersEqual(_lowerCamelCase , _lowerCamelCase ) A_ : str = parser.parse_args([] ) self.assertEqual(args.foo , '''toto''' ) A_ : List[Any] = parser.parse_args_into_dataclasses([] )[0] self.assertEqual(enum_ex.foo , MixedTypeEnum.toto ) A_ : int = parser.parse_args(['''--foo''', '''titi'''] ) self.assertEqual(args.foo , '''titi''' ) A_ : Dict = parser.parse_args_into_dataclasses(['''--foo''', '''titi'''] )[0] self.assertEqual(enum_ex.foo , MixedTypeEnum.titi ) A_ : Tuple = parser.parse_args(['''--foo''', '''42'''] ) self.assertEqual(args.foo , 42 ) A_ : List[str] = parser.parse_args_into_dataclasses(['''--foo''', '''42'''] )[0] self.assertEqual(enum_ex.foo , MixedTypeEnum.fourtytwo ) def _a ( self : Optional[int] ): """simple docstring""" @dataclass class UpperCamelCase_ : """simple docstring""" _lowerCAmelCase = "toto" A_ : List[str] = HfArgumentParser(_lowerCamelCase ) A_ : Tuple = argparse.ArgumentParser() expected.add_argument( '''--foo''' , default='''toto''' , choices=('''titi''', '''toto''', 42) , type=make_choice_type_function(['''titi''', '''toto''', 42] ) , ) self.argparsersEqual(_lowerCamelCase , _lowerCamelCase ) A_ : Tuple = parser.parse_args([] ) self.assertEqual(args.foo , '''toto''' ) A_ : List[str] = parser.parse_args(['''--foo''', '''titi'''] ) self.assertEqual(args.foo , '''titi''' ) A_ : int = parser.parse_args(['''--foo''', '''42'''] ) self.assertEqual(args.foo , 42 ) def _a ( self : Dict ): """simple docstring""" A_ : int = HfArgumentParser(_lowerCamelCase ) A_ : List[Any] = argparse.ArgumentParser() expected.add_argument('''--foo_int''' , nargs='''+''' , default=[] , type=_lowerCamelCase ) expected.add_argument('''--bar_int''' , nargs='''+''' , default=[1, 2, 3] , type=_lowerCamelCase ) expected.add_argument('''--foo_str''' , nargs='''+''' , default=['''Hallo''', '''Bonjour''', '''Hello'''] , type=_lowerCamelCase ) expected.add_argument('''--foo_float''' , nargs='''+''' , default=[0.1, 0.2, 0.3] , type=_lowerCamelCase ) self.argparsersEqual(_lowerCamelCase , _lowerCamelCase ) A_ : Optional[int] = parser.parse_args([] ) self.assertEqual( _lowerCamelCase , Namespace(foo_int=[] , bar_int=[1, 2, 3] , foo_str=['''Hallo''', '''Bonjour''', '''Hello'''] , foo_float=[0.1, 0.2, 0.3] ) , ) A_ : str = parser.parse_args('''--foo_int 1 --bar_int 2 3 --foo_str a b c --foo_float 0.1 0.7'''.split() ) self.assertEqual(_lowerCamelCase , Namespace(foo_int=[1] , bar_int=[2, 3] , foo_str=['''a''', '''b''', '''c'''] , foo_float=[0.1, 0.7] ) ) def _a ( self : Dict ): """simple docstring""" A_ : Optional[Any] = argparse.ArgumentParser() expected.add_argument('''--foo''' , default=_lowerCamelCase , type=_lowerCamelCase ) expected.add_argument('''--bar''' , default=_lowerCamelCase , type=_lowerCamelCase , help='''help message''' ) expected.add_argument('''--baz''' , default=_lowerCamelCase , type=_lowerCamelCase ) expected.add_argument('''--ces''' , nargs='''+''' , default=[] , type=_lowerCamelCase ) expected.add_argument('''--des''' , nargs='''+''' , default=[] , type=_lowerCamelCase ) A_ : Tuple = [OptionalExample] if is_python_no_less_than_3_10: dataclass_types.append(_lowerCamelCase ) for dataclass_type in dataclass_types: A_ : int = HfArgumentParser(_lowerCamelCase ) self.argparsersEqual(_lowerCamelCase , _lowerCamelCase ) A_ : List[Any] = parser.parse_args([] ) self.assertEqual(_lowerCamelCase , Namespace(foo=_lowerCamelCase , bar=_lowerCamelCase , baz=_lowerCamelCase , ces=[] , des=[] ) ) A_ : Optional[Any] = parser.parse_args('''--foo 12 --bar 3.14 --baz 42 --ces a b c --des 1 2 3'''.split() ) self.assertEqual(_lowerCamelCase , Namespace(foo=12 , bar=3.14 , baz='''42''' , ces=['''a''', '''b''', '''c'''] , des=[1, 2, 3] ) ) def _a ( self : List[Any] ): """simple docstring""" A_ : List[Any] = HfArgumentParser(_lowerCamelCase ) A_ : Dict = argparse.ArgumentParser() expected.add_argument('''--required_list''' , nargs='''+''' , type=_lowerCamelCase , required=_lowerCamelCase ) expected.add_argument('''--required_str''' , type=_lowerCamelCase , required=_lowerCamelCase ) expected.add_argument( '''--required_enum''' , type=make_choice_type_function(['''titi''', '''toto'''] ) , choices=['''titi''', '''toto'''] , required=_lowerCamelCase , ) self.argparsersEqual(_lowerCamelCase , _lowerCamelCase ) def _a ( self : Optional[Any] ): """simple docstring""" A_ : Union[str, Any] = HfArgumentParser(_lowerCamelCase ) A_ : List[Any] = argparse.ArgumentParser() expected.add_argument('''--foo''' , type=_lowerCamelCase , required=_lowerCamelCase ) expected.add_argument( '''--required_enum''' , type=make_choice_type_function(['''titi''', '''toto'''] ) , choices=['''titi''', '''toto'''] , required=_lowerCamelCase , ) expected.add_argument('''--opt''' , type=_lowerCamelCase , default=_lowerCamelCase ) expected.add_argument('''--baz''' , default='''toto''' , type=_lowerCamelCase , help='''help message''' ) expected.add_argument('''--foo_str''' , nargs='''+''' , default=['''Hallo''', '''Bonjour''', '''Hello'''] , type=_lowerCamelCase ) self.argparsersEqual(_lowerCamelCase , _lowerCamelCase ) def _a ( self : Tuple ): """simple docstring""" A_ : List[Any] = HfArgumentParser(_lowerCamelCase ) A_ : Union[str, Any] = { '''foo''': 12, '''bar''': 3.14, '''baz''': '''42''', '''flag''': True, } A_ : Optional[int] = parser.parse_dict(_lowerCamelCase )[0] A_ : str = BasicExample(**_lowerCamelCase ) self.assertEqual(_lowerCamelCase , _lowerCamelCase ) def _a ( self : List[str] ): """simple docstring""" A_ : Any = HfArgumentParser(_lowerCamelCase ) A_ : List[str] = { '''foo''': 12, '''bar''': 3.14, '''baz''': '''42''', '''flag''': True, '''extra''': 42, } self.assertRaises(_lowerCamelCase , parser.parse_dict , _lowerCamelCase , allow_extra_keys=_lowerCamelCase ) def _a ( self : Optional[Any] ): """simple docstring""" A_ : Union[str, Any] = HfArgumentParser(_lowerCamelCase ) A_ : List[str] = { '''foo''': 12, '''bar''': 3.14, '''baz''': '''42''', '''flag''': True, } with tempfile.TemporaryDirectory() as tmp_dir: A_ : Tuple = os.path.join(_lowerCamelCase , '''temp_json''' ) os.mkdir(_lowerCamelCase ) with open(temp_local_path + '''.json''' , '''w+''' ) as f: json.dump(_lowerCamelCase , _lowerCamelCase ) A_ : List[str] = parser.parse_yaml_file(Path(temp_local_path + '''.json''' ) )[0] A_ : Optional[Any] = BasicExample(**_lowerCamelCase ) self.assertEqual(_lowerCamelCase , _lowerCamelCase ) def _a ( self : int ): """simple docstring""" A_ : int = HfArgumentParser(_lowerCamelCase ) A_ : Tuple = { '''foo''': 12, '''bar''': 3.14, '''baz''': '''42''', '''flag''': True, } with tempfile.TemporaryDirectory() as tmp_dir: A_ : int = os.path.join(_lowerCamelCase , '''temp_yaml''' ) os.mkdir(_lowerCamelCase ) with open(temp_local_path + '''.yaml''' , '''w+''' ) as f: yaml.dump(_lowerCamelCase , _lowerCamelCase ) A_ : Optional[Any] = parser.parse_yaml_file(Path(temp_local_path + '''.yaml''' ) )[0] A_ : int = BasicExample(**_lowerCamelCase ) self.assertEqual(_lowerCamelCase , _lowerCamelCase ) def _a ( self : Union[str, Any] ): """simple docstring""" A_ : Dict = HfArgumentParser(_lowerCamelCase ) self.assertIsNotNone(_lowerCamelCase )
4
0
'''simple docstring''' import logging import os import sys from dataclasses import dataclass, field from itertools import chain from typing import Optional, Union import datasets import numpy as np import torch from datasets import load_dataset import transformers from transformers import ( AutoConfig, AutoModelForMultipleChoice, AutoTokenizer, HfArgumentParser, Trainer, TrainingArguments, default_data_collator, set_seed, ) from transformers.tokenization_utils_base import PreTrainedTokenizerBase from transformers.trainer_utils import get_last_checkpoint from transformers.utils import PaddingStrategy, check_min_version, send_example_telemetry # Will error if the minimal version of Transformers is not installed. Remove at your own risks. check_min_version("""4.31.0""") snake_case__ = logging.getLogger(__name__) @dataclass class UpperCamelCase_ : """simple docstring""" _lowerCAmelCase = field( metadata={'help': 'Path to pretrained model or model identifier from huggingface.co/models'} ) _lowerCAmelCase = field( default=a__, metadata={'help': 'Pretrained config name or path if not the same as model_name'} ) _lowerCAmelCase = field( default=a__, metadata={'help': 'Pretrained tokenizer name or path if not the same as model_name'} ) _lowerCAmelCase = field( default=a__, metadata={'help': 'Where do you want to store the pretrained models downloaded from huggingface.co'}, ) _lowerCAmelCase = field( default=a__, metadata={'help': 'Whether to use one of the fast tokenizer (backed by the tokenizers library) or not.'}, ) _lowerCAmelCase = field( default='main', metadata={'help': 'The specific model version to use (can be a branch name, tag name or commit id).'}, ) _lowerCAmelCase = field( default=a__, metadata={ 'help': ( 'Will use the token generated when running `huggingface-cli login` (necessary to use this script ' 'with private models).' ) }, ) @dataclass class UpperCamelCase_ : """simple docstring""" _lowerCAmelCase = field(default=a__, metadata={'help': 'The input training data file (a text file).'} ) _lowerCAmelCase = field( default=a__, metadata={'help': 'An optional input evaluation data file to evaluate the perplexity on (a text file).'}, ) _lowerCAmelCase = field( default=a__, metadata={'help': 'Overwrite the cached training and evaluation sets'} ) _lowerCAmelCase = field( default=a__, metadata={'help': 'The number of processes to use for the preprocessing.'}, ) _lowerCAmelCase = field( default=a__, metadata={ 'help': ( 'The maximum total input sequence length after tokenization. If passed, sequences longer ' 'than this will be truncated, sequences shorter will be padded.' ) }, ) _lowerCAmelCase = field( default=a__, metadata={ 'help': ( 'Whether to pad all samples to the maximum sentence length. ' 'If False, will pad the samples dynamically when batching to the maximum length in the batch. More ' 'efficient on GPU but very bad for TPU.' ) }, ) _lowerCAmelCase = field( default=a__, metadata={ 'help': ( 'For debugging purposes or quicker training, truncate the number of training examples to this ' 'value if set.' ) }, ) _lowerCAmelCase = field( default=a__, metadata={ 'help': ( 'For debugging purposes or quicker training, truncate the number of evaluation examples to this ' 'value if set.' ) }, ) def _a ( self : str ): """simple docstring""" if self.train_file is not None: A_ : Any = self.train_file.split('''.''' )[-1] assert extension in ["csv", "json"], "`train_file` should be a csv or a json file." if self.validation_file is not None: A_ : str = self.validation_file.split('''.''' )[-1] assert extension in ["csv", "json"], "`validation_file` should be a csv or a json file." @dataclass class UpperCamelCase_ : """simple docstring""" _lowerCAmelCase = 4_2 _lowerCAmelCase = True _lowerCAmelCase = None _lowerCAmelCase = None def __call__( self : int , _lowerCamelCase : List[str] ): """simple docstring""" A_ : int = '''label''' if '''label''' in features[0].keys() else '''labels''' A_ : Any = [feature.pop(_lowerCamelCase ) for feature in features] A_ : Dict = len(_lowerCamelCase ) A_ : Dict = len(features[0]['''input_ids'''] ) A_ : List[Any] = [ [{k: v[i] for k, v in feature.items()} for i in range(_lowerCamelCase )] for feature in features ] A_ : Optional[int] = list(chain(*_lowerCamelCase ) ) A_ : Optional[int] = self.tokenizer.pad( _lowerCamelCase , padding=self.padding , max_length=self.max_length , pad_to_multiple_of=self.pad_to_multiple_of , return_tensors='''pt''' , ) # Un-flatten A_ : str = {k: v.view(_lowerCamelCase , _lowerCamelCase , -1 ) for k, v in batch.items()} # Add back labels A_ : Optional[Any] = torch.tensor(_lowerCamelCase , dtype=torch.intaa ) return batch def snake_case__ ( ) -> List[str]: # See all possible arguments in src/transformers/training_args.py # or by passing the --help flag to this script. # We now keep distinct sets of args, for a cleaner separation of concerns. A_ : Tuple = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) ) if len(sys.argv ) == 2 and sys.argv[1].endswith('''.json''' ): # If we pass only one argument to the script and it's the path to a json file, # let's parse it to get our arguments. A_ : List[str] = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) ) else: A_ : str = parser.parse_args_into_dataclasses() # Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The # information sent is the one passed as arguments along with your Python/PyTorch versions. send_example_telemetry('''run_swag''' , lowerCamelCase__ , lowerCamelCase__ ) # Setup logging logging.basicConfig( format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' , datefmt='''%m/%d/%Y %H:%M:%S''' , handlers=[logging.StreamHandler(sys.stdout )] , ) if training_args.should_log: # The default of training_args.log_level is passive, so we set log level at info here to have that default. transformers.utils.logging.set_verbosity_info() A_ : Optional[Any] = training_args.get_process_log_level() logger.setLevel(lowerCamelCase__ ) datasets.utils.logging.set_verbosity(lowerCamelCase__ ) transformers.utils.logging.set_verbosity(lowerCamelCase__ ) transformers.utils.logging.enable_default_handler() transformers.utils.logging.enable_explicit_format() # Log on each process the small summary: logger.warning( f'Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}' + f'distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}' ) logger.info(f'Training/evaluation parameters {training_args}' ) # Detecting last checkpoint. A_ : List[str] = None if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir: A_ : Any = get_last_checkpoint(training_args.output_dir ) if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0: raise ValueError( f'Output directory ({training_args.output_dir}) already exists and is not empty. ' '''Use --overwrite_output_dir to overcome.''' ) elif last_checkpoint is not None and training_args.resume_from_checkpoint is None: logger.info( f'Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change ' '''the `--output_dir` or add `--overwrite_output_dir` to train from scratch.''' ) # Set seed before initializing model. set_seed(training_args.seed ) # Get the datasets: you can either provide your own CSV/JSON/TXT training and evaluation files (see below) # or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/ # (the dataset will be downloaded automatically from the datasets Hub). # For CSV/JSON files, this script will use the column called 'text' or the first column if no column called # 'text' is found. You can easily tweak this behavior (see below). # In distributed training, the load_dataset function guarantee that only one local process can concurrently # download the dataset. if data_args.train_file is not None or data_args.validation_file is not None: A_ : Optional[Any] = {} if data_args.train_file is not None: A_ : Any = data_args.train_file if data_args.validation_file is not None: A_ : Tuple = data_args.validation_file A_ : Optional[int] = data_args.train_file.split('''.''' )[-1] A_ : str = load_dataset( lowerCamelCase__ , data_files=lowerCamelCase__ , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , ) else: # Downloading and loading the swag dataset from the hub. A_ : List[Any] = load_dataset( '''swag''' , '''regular''' , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , ) # See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at # https://huggingface.co/docs/datasets/loading_datasets.html. # Load pretrained model and tokenizer # Distributed training: # The .from_pretrained methods guarantee that only one local process can concurrently # download model & vocab. A_ : Any = AutoConfig.from_pretrained( model_args.config_name if model_args.config_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ) A_ : Union[str, Any] = AutoTokenizer.from_pretrained( model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , use_fast=model_args.use_fast_tokenizer , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ) A_ : Optional[Any] = AutoModelForMultipleChoice.from_pretrained( model_args.model_name_or_path , from_tf=bool('''.ckpt''' in model_args.model_name_or_path ) , config=lowerCamelCase__ , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ) # When using your own dataset or a different dataset from swag, you will probably need to change this. A_ : str = [f'ending{i}' for i in range(4 )] A_ : Union[str, Any] = '''sent1''' A_ : List[Any] = '''sent2''' if data_args.max_seq_length is None: A_ : int = tokenizer.model_max_length if max_seq_length > 1_0_2_4: logger.warning( '''The chosen tokenizer supports a `model_max_length` that is longer than the default `block_size` value''' ''' of 1024. If you would like to use a longer `block_size` up to `tokenizer.model_max_length` you can''' ''' override this default with `--block_size xxx`.''' ) A_ : Tuple = 1_0_2_4 else: if data_args.max_seq_length > tokenizer.model_max_length: logger.warning( f'The max_seq_length passed ({data_args.max_seq_length}) is larger than the maximum length for the' f'model ({tokenizer.model_max_length}). Using max_seq_length={tokenizer.model_max_length}.' ) A_ : List[Any] = min(data_args.max_seq_length , tokenizer.model_max_length ) # Preprocessing the datasets. def preprocess_function(lowerCamelCase__ : str ): A_ : Tuple = [[context] * 4 for context in examples[context_name]] A_ : Union[str, Any] = examples[question_header_name] A_ : Optional[Any] = [ [f'{header} {examples[end][i]}' for end in ending_names] for i, header in enumerate(lowerCamelCase__ ) ] # Flatten out A_ : int = list(chain(*lowerCamelCase__ ) ) A_ : int = list(chain(*lowerCamelCase__ ) ) # Tokenize A_ : Tuple = tokenizer( lowerCamelCase__ , lowerCamelCase__ , truncation=lowerCamelCase__ , max_length=lowerCamelCase__ , padding='''max_length''' if data_args.pad_to_max_length else False , ) # Un-flatten return {k: [v[i : i + 4] for i in range(0 , len(lowerCamelCase__ ) , 4 )] for k, v in tokenized_examples.items()} if training_args.do_train: if "train" not in raw_datasets: raise ValueError('''--do_train requires a train dataset''' ) A_ : Tuple = raw_datasets['''train'''] if data_args.max_train_samples is not None: A_ : Union[str, Any] = min(len(lowerCamelCase__ ) , data_args.max_train_samples ) A_ : Any = train_dataset.select(range(lowerCamelCase__ ) ) with training_args.main_process_first(desc='''train dataset map pre-processing''' ): A_ : Optional[Any] = train_dataset.map( lowerCamelCase__ , batched=lowerCamelCase__ , num_proc=data_args.preprocessing_num_workers , load_from_cache_file=not data_args.overwrite_cache , ) if training_args.do_eval: if "validation" not in raw_datasets: raise ValueError('''--do_eval requires a validation dataset''' ) A_ : int = raw_datasets['''validation'''] if data_args.max_eval_samples is not None: A_ : Optional[Any] = min(len(lowerCamelCase__ ) , data_args.max_eval_samples ) A_ : Any = eval_dataset.select(range(lowerCamelCase__ ) ) with training_args.main_process_first(desc='''validation dataset map pre-processing''' ): A_ : List[str] = eval_dataset.map( lowerCamelCase__ , batched=lowerCamelCase__ , num_proc=data_args.preprocessing_num_workers , load_from_cache_file=not data_args.overwrite_cache , ) # Data collator A_ : Optional[Any] = ( default_data_collator if data_args.pad_to_max_length else DataCollatorForMultipleChoice(tokenizer=lowerCamelCase__ , pad_to_multiple_of=8 if training_args.fpaa else None ) ) # Metric def compute_metrics(lowerCamelCase__ : List[str] ): A_ : Any = eval_predictions A_ : str = np.argmax(lowerCamelCase__ , axis=1 ) return {"accuracy": (preds == label_ids).astype(np.floataa ).mean().item()} # Initialize our Trainer A_ : Any = Trainer( model=lowerCamelCase__ , args=lowerCamelCase__ , train_dataset=train_dataset if training_args.do_train else None , eval_dataset=eval_dataset if training_args.do_eval else None , tokenizer=lowerCamelCase__ , data_collator=lowerCamelCase__ , compute_metrics=lowerCamelCase__ , ) # Training if training_args.do_train: A_ : Tuple = None if training_args.resume_from_checkpoint is not None: A_ : Any = training_args.resume_from_checkpoint elif last_checkpoint is not None: A_ : Any = last_checkpoint A_ : List[str] = trainer.train(resume_from_checkpoint=lowerCamelCase__ ) trainer.save_model() # Saves the tokenizer too for easy upload A_ : str = train_result.metrics A_ : List[str] = ( data_args.max_train_samples if data_args.max_train_samples is not None else len(lowerCamelCase__ ) ) A_ : Dict = min(lowerCamelCase__ , len(lowerCamelCase__ ) ) trainer.log_metrics('''train''' , lowerCamelCase__ ) trainer.save_metrics('''train''' , lowerCamelCase__ ) trainer.save_state() # Evaluation if training_args.do_eval: logger.info('''*** Evaluate ***''' ) A_ : Union[str, Any] = trainer.evaluate() A_ : Union[str, Any] = data_args.max_eval_samples if data_args.max_eval_samples is not None else len(lowerCamelCase__ ) A_ : Union[str, Any] = min(lowerCamelCase__ , len(lowerCamelCase__ ) ) trainer.log_metrics('''eval''' , lowerCamelCase__ ) trainer.save_metrics('''eval''' , lowerCamelCase__ ) A_ : Optional[int] = { '''finetuned_from''': model_args.model_name_or_path, '''tasks''': '''multiple-choice''', '''dataset_tags''': '''swag''', '''dataset_args''': '''regular''', '''dataset''': '''SWAG''', '''language''': '''en''', } if training_args.push_to_hub: trainer.push_to_hub(**lowerCamelCase__ ) else: trainer.create_model_card(**lowerCamelCase__ ) def snake_case__ ( lowerCamelCase__ : Dict ) -> Tuple: # For xla_spawn (TPUs) main() if __name__ == "__main__": main()
360
'''simple docstring''' import sys import tempfile import unittest import unittest.mock as mock from pathlib import Path from huggingface_hub import HfFolder, delete_repo from requests.exceptions import HTTPError from transformers import AutoImageProcessor, ViTImageProcessor from transformers.testing_utils import TOKEN, USER, get_tests_dir, is_staging_test sys.path.append(str(Path(__file__).parent.parent / """utils""")) from test_module.custom_image_processing import CustomImageProcessor # noqa E402 snake_case__ = get_tests_dir("""fixtures""") class UpperCamelCase_ (unittest.TestCase ): """simple docstring""" def _a ( self : List[str] ): """simple docstring""" A_ : List[Any] = mock.Mock() A_ : List[str] = 500 A_ : Tuple = {} A_ : int = HTTPError A_ : Optional[Any] = {} # Download this model to make sure it's in the cache. A_ : Tuple = ViTImageProcessor.from_pretrained('''hf-internal-testing/tiny-random-vit''' ) # Under the mock environment we get a 500 error when trying to reach the model. with mock.patch('''requests.Session.request''' , return_value=_lowerCamelCase ) as mock_head: A_ : List[Any] = ViTImageProcessor.from_pretrained('''hf-internal-testing/tiny-random-vit''' ) # This check we did call the fake head request mock_head.assert_called() def _a ( self : Tuple ): """simple docstring""" A_ : Tuple = ViTImageProcessor.from_pretrained( '''https://huggingface.co/hf-internal-testing/tiny-random-vit/resolve/main/preprocessor_config.json''' ) def _a ( self : Dict ): """simple docstring""" with self.assertRaises(_lowerCamelCase ): # config is in subfolder, the following should not work without specifying the subfolder A_ : Any = AutoImageProcessor.from_pretrained('''hf-internal-testing/stable-diffusion-all-variants''' ) A_ : Tuple = AutoImageProcessor.from_pretrained( '''hf-internal-testing/stable-diffusion-all-variants''' , subfolder='''feature_extractor''' ) self.assertIsNotNone(_lowerCamelCase ) @is_staging_test class UpperCamelCase_ (unittest.TestCase ): """simple docstring""" @classmethod def _a ( cls : Tuple ): """simple docstring""" A_ : int = TOKEN HfFolder.save_token(_lowerCamelCase ) @classmethod def _a ( cls : str ): """simple docstring""" try: delete_repo(token=cls._token , repo_id='''test-image-processor''' ) except HTTPError: pass try: delete_repo(token=cls._token , repo_id='''valid_org/test-image-processor-org''' ) except HTTPError: pass try: delete_repo(token=cls._token , repo_id='''test-dynamic-image-processor''' ) except HTTPError: pass def _a ( self : List[Any] ): """simple docstring""" A_ : Dict = ViTImageProcessor.from_pretrained(_lowerCamelCase ) image_processor.push_to_hub('''test-image-processor''' , use_auth_token=self._token ) A_ : Optional[int] = ViTImageProcessor.from_pretrained(f'{USER}/test-image-processor' ) for k, v in image_processor.__dict__.items(): self.assertEqual(_lowerCamelCase , getattr(_lowerCamelCase , _lowerCamelCase ) ) # Reset repo delete_repo(token=self._token , repo_id='''test-image-processor''' ) # Push to hub via save_pretrained with tempfile.TemporaryDirectory() as tmp_dir: image_processor.save_pretrained( _lowerCamelCase , repo_id='''test-image-processor''' , push_to_hub=_lowerCamelCase , use_auth_token=self._token ) A_ : List[Any] = ViTImageProcessor.from_pretrained(f'{USER}/test-image-processor' ) for k, v in image_processor.__dict__.items(): self.assertEqual(_lowerCamelCase , getattr(_lowerCamelCase , _lowerCamelCase ) ) def _a ( self : Optional[Any] ): """simple docstring""" A_ : int = ViTImageProcessor.from_pretrained(_lowerCamelCase ) image_processor.push_to_hub('''valid_org/test-image-processor''' , use_auth_token=self._token ) A_ : List[str] = ViTImageProcessor.from_pretrained('''valid_org/test-image-processor''' ) for k, v in image_processor.__dict__.items(): self.assertEqual(_lowerCamelCase , getattr(_lowerCamelCase , _lowerCamelCase ) ) # Reset repo delete_repo(token=self._token , repo_id='''valid_org/test-image-processor''' ) # Push to hub via save_pretrained with tempfile.TemporaryDirectory() as tmp_dir: image_processor.save_pretrained( _lowerCamelCase , repo_id='''valid_org/test-image-processor-org''' , push_to_hub=_lowerCamelCase , use_auth_token=self._token ) A_ : Any = ViTImageProcessor.from_pretrained('''valid_org/test-image-processor-org''' ) for k, v in image_processor.__dict__.items(): self.assertEqual(_lowerCamelCase , getattr(_lowerCamelCase , _lowerCamelCase ) ) def _a ( self : Optional[Any] ): """simple docstring""" CustomImageProcessor.register_for_auto_class() A_ : Any = CustomImageProcessor.from_pretrained(_lowerCamelCase ) image_processor.push_to_hub('''test-dynamic-image-processor''' , use_auth_token=self._token ) # This has added the proper auto_map field to the config self.assertDictEqual( image_processor.auto_map , {'''AutoImageProcessor''': '''custom_image_processing.CustomImageProcessor'''} , ) A_ : str = AutoImageProcessor.from_pretrained( f'{USER}/test-dynamic-image-processor' , trust_remote_code=_lowerCamelCase ) # Can't make an isinstance check because the new_image_processor is from the CustomImageProcessor class of a dynamic module self.assertEqual(new_image_processor.__class__.__name__ , '''CustomImageProcessor''' )
4
0
'''simple docstring''' from ..utils import DummyObject, requires_backends class UpperCamelCase_ (metaclass=a__ ): """simple docstring""" _lowerCAmelCase = ['keras_nlp'] def __init__( self : Optional[Any] , *_lowerCamelCase : Dict , **_lowerCamelCase : List[str] ): """simple docstring""" requires_backends(self , ['''keras_nlp'''] )
361
'''simple docstring''' # Copyright 2023 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import numpy as np import torch from ..models.clipseg import CLIPSegForImageSegmentation from ..utils import is_vision_available, requires_backends from .base import PipelineTool if is_vision_available(): from PIL import Image class UpperCamelCase_ (a__ ): """simple docstring""" _lowerCAmelCase = ( 'This is a tool that creates a segmentation mask of an image according to a label. It cannot create an image.' 'It takes two arguments named `image` which should be the original image, and `label` which should be a text ' 'describing the elements what should be identified in the segmentation mask. The tool returns the mask.' ) _lowerCAmelCase = 'CIDAS/clipseg-rd64-refined' _lowerCAmelCase = 'image_segmenter' _lowerCAmelCase = CLIPSegForImageSegmentation _lowerCAmelCase = ['image', 'text'] _lowerCAmelCase = ['image'] def __init__( self : Optional[int] , *_lowerCamelCase : Optional[int] , **_lowerCamelCase : Union[str, Any] ): """simple docstring""" requires_backends(self , ['''vision'''] ) super().__init__(*_lowerCamelCase , **_lowerCamelCase ) def _a ( self : List[str] , _lowerCamelCase : "Image" , _lowerCamelCase : str ): """simple docstring""" return self.pre_processor(text=[label] , images=[image] , padding=_lowerCamelCase , return_tensors='''pt''' ) def _a ( self : Union[str, Any] , _lowerCamelCase : Optional[int] ): """simple docstring""" with torch.no_grad(): A_ : Optional[int] = self.model(**_lowerCamelCase ).logits return logits def _a ( self : List[str] , _lowerCamelCase : Optional[int] ): """simple docstring""" A_ : int = outputs.cpu().detach().numpy() A_ : Tuple = 0 A_ : List[str] = 1 return Image.fromarray((array * 255).astype(np.uinta ) )
4
0
'''simple docstring''' import argparse import logging from collections import namedtuple import torch from model_bertabs import BertAbsSummarizer from models.model_builder import AbsSummarizer # The authors' implementation from transformers import BertTokenizer logging.basicConfig(level=logging.INFO) snake_case__ = logging.getLogger(__name__) snake_case__ = """Hello world! cécé herlolip""" snake_case__ = namedtuple( """BertAbsConfig""", [ """temp_dir""", """large""", """use_bert_emb""", """finetune_bert""", """encoder""", """share_emb""", """max_pos""", """enc_layers""", """enc_hidden_size""", """enc_heads""", """enc_ff_size""", """enc_dropout""", """dec_layers""", """dec_hidden_size""", """dec_heads""", """dec_ff_size""", """dec_dropout""", ], ) def snake_case__ ( lowerCamelCase__ : Any , lowerCamelCase__ : Optional[int] ) -> Tuple: A_ : str = BertAbsConfig( temp_dir='''.''' , finetune_bert=lowerCamelCase__ , large=lowerCamelCase__ , share_emb=lowerCamelCase__ , use_bert_emb=lowerCamelCase__ , encoder='''bert''' , max_pos=5_1_2 , enc_layers=6 , enc_hidden_size=5_1_2 , enc_heads=8 , enc_ff_size=5_1_2 , enc_dropout=0.2 , dec_layers=6 , dec_hidden_size=7_6_8 , dec_heads=8 , dec_ff_size=2_0_4_8 , dec_dropout=0.2 , ) A_ : List[Any] = torch.load(lowerCamelCase__ , lambda lowerCamelCase__ , lowerCamelCase__ : storage ) A_ : int = AbsSummarizer(lowerCamelCase__ , torch.device('''cpu''' ) , lowerCamelCase__ ) original.eval() A_ : Tuple = BertAbsSummarizer(lowerCamelCase__ , torch.device('''cpu''' ) ) new_model.eval() # ------------------- # Convert the weights # ------------------- logging.info('''convert the model''' ) new_model.bert.load_state_dict(original.bert.state_dict() ) new_model.decoder.load_state_dict(original.decoder.state_dict() ) new_model.generator.load_state_dict(original.generator.state_dict() ) # ---------------------------------- # Make sure the outpus are identical # ---------------------------------- logging.info('''Make sure that the models\' outputs are identical''' ) A_ : int = BertTokenizer.from_pretrained('''bert-base-uncased''' ) # prepare the model inputs A_ : Optional[int] = tokenizer.encode('''This is sample éàalj\'-.''' ) encoder_input_ids.extend([tokenizer.pad_token_id] * (5_1_2 - len(lowerCamelCase__ )) ) A_ : List[str] = torch.tensor(lowerCamelCase__ ).unsqueeze(0 ) A_ : Dict = tokenizer.encode('''This is sample 3 éàalj\'-.''' ) decoder_input_ids.extend([tokenizer.pad_token_id] * (5_1_2 - len(lowerCamelCase__ )) ) A_ : List[Any] = torch.tensor(lowerCamelCase__ ).unsqueeze(0 ) # failsafe to make sure the weights reset does not affect the # loaded weights. assert torch.max(torch.abs(original.generator[0].weight - new_model.generator[0].weight ) ) == 0 # forward pass A_ : str = encoder_input_ids A_ : Dict = decoder_input_ids A_ : Any = None A_ : Optional[int] = None A_ : int = None A_ : Optional[Any] = None A_ : Optional[Any] = None # The original model does not apply the geneator layer immediatly but rather in # the beam search (where it combines softmax + linear layer). Since we already # apply the softmax in our generation process we only apply the linear layer here. # We make sure that the outputs of the full stack are identical A_ : Union[str, Any] = original(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )[0] A_ : Optional[int] = original.generator(lowerCamelCase__ ) A_ : Union[str, Any] = new_model( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )[0] A_ : str = new_model.generator(lowerCamelCase__ ) A_ : Optional[int] = torch.max(torch.abs(output_converted_model - output_original_model ) ).item() print('''Maximum absolute difference beween weights: {:.2f}'''.format(lowerCamelCase__ ) ) A_ : Tuple = torch.max(torch.abs(output_converted_generator - output_original_generator ) ).item() print('''Maximum absolute difference beween weights: {:.2f}'''.format(lowerCamelCase__ ) ) A_ : Any = torch.allclose(lowerCamelCase__ , lowerCamelCase__ , atol=1e-3 ) if are_identical: logging.info('''all weights are equal up to 1e-3''' ) else: raise ValueError('''the weights are different. The new model is likely different from the original one.''' ) # The model has been saved with torch.save(model) and this is bound to the exact # directory structure. We save the state_dict instead. logging.info('''saving the model\'s state dictionary''' ) torch.save( new_model.state_dict() , '''./bertabs-finetuned-cnndm-extractive-abstractive-summarization/pytorch_model.bin''' ) if __name__ == "__main__": snake_case__ = argparse.ArgumentParser() parser.add_argument( """--bertabs_checkpoint_path""", default=None, type=str, required=True, help="""Path the official PyTorch dump.""", ) parser.add_argument( """--pytorch_dump_folder_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model.""", ) snake_case__ = parser.parse_args() convert_bertabs_checkpoints( args.bertabs_checkpoint_path, args.pytorch_dump_folder_path, )
362
'''simple docstring''' from collections.abc import Sequence def snake_case__ ( lowerCamelCase__ : Sequence[float] , lowerCamelCase__ : bool = False ) -> float: if not arr: return 0 A_ : Union[str, Any] = 0 if allow_empty_subarrays else float('''-inf''' ) A_ : str = 0.0 for num in arr: A_ : Any = max(0 if allow_empty_subarrays else num , curr_sum + num ) A_ : Tuple = max(lowerCamelCase__ , lowerCamelCase__ ) return max_sum if __name__ == "__main__": from doctest import testmod testmod() snake_case__ = [-2, 1, -3, 4, -1, 2, 1, -5, 4] print(F'{max_subarray_sum(nums) = }')
4
0
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available, is_tokenizers_available, is_torch_available, ) snake_case__ = {"""configuration_plbart""": ["""PLBART_PRETRAINED_CONFIG_ARCHIVE_MAP""", """PLBartConfig"""]} try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: snake_case__ = ["""PLBartTokenizer"""] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: snake_case__ = [ """PLBART_PRETRAINED_MODEL_ARCHIVE_LIST""", """PLBartForCausalLM""", """PLBartForConditionalGeneration""", """PLBartForSequenceClassification""", """PLBartModel""", """PLBartPreTrainedModel""", ] if TYPE_CHECKING: from .configuration_plbart import PLBART_PRETRAINED_CONFIG_ARCHIVE_MAP, PLBartConfig try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_plbart import PLBartTokenizer try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_plbart import ( PLBART_PRETRAINED_MODEL_ARCHIVE_LIST, PLBartForCausalLM, PLBartForConditionalGeneration, PLBartForSequenceClassification, PLBartModel, PLBartPreTrainedModel, ) else: import sys snake_case__ = _LazyModule(__name__, globals()["""__file__"""], _import_structure)
363
'''simple docstring''' from ...configuration_utils import PretrainedConfig from ...utils import logging snake_case__ = logging.get_logger(__name__) snake_case__ = { """facebook/s2t-wav2vec2-large-en-de""": ( """https://huggingface.co/facebook/s2t-wav2vec2-large-en-de/resolve/main/config.json""" ), # See all Speech2Text models at https://huggingface.co/models?filter=speech2text2 } class UpperCamelCase_ (a__ ): """simple docstring""" _lowerCAmelCase = 'speech_to_text_2' _lowerCAmelCase = ['past_key_values'] _lowerCAmelCase = {'num_attention_heads': 'decoder_attention_heads', 'hidden_size': 'd_model'} def __init__( self : Optional[Any] , _lowerCamelCase : Optional[Any]=10000 , _lowerCamelCase : List[Any]=6 , _lowerCamelCase : int=2048 , _lowerCamelCase : Dict=4 , _lowerCamelCase : str=0.0 , _lowerCamelCase : int=True , _lowerCamelCase : int="relu" , _lowerCamelCase : Any=256 , _lowerCamelCase : List[Any]=0.1 , _lowerCamelCase : Tuple=0.0 , _lowerCamelCase : Union[str, Any]=0.0 , _lowerCamelCase : Optional[Any]=0.02 , _lowerCamelCase : int=2 , _lowerCamelCase : List[str]=True , _lowerCamelCase : str=1 , _lowerCamelCase : List[Any]=0 , _lowerCamelCase : Optional[int]=2 , _lowerCamelCase : Tuple=1024 , **_lowerCamelCase : int , ): """simple docstring""" A_ : Optional[int] = vocab_size A_ : Tuple = d_model A_ : List[str] = decoder_ffn_dim A_ : str = decoder_layers A_ : Any = decoder_attention_heads A_ : int = dropout A_ : str = attention_dropout A_ : Optional[int] = activation_dropout A_ : str = activation_function A_ : List[Any] = init_std A_ : Union[str, Any] = decoder_layerdrop A_ : Any = use_cache A_ : Optional[Any] = decoder_layers A_ : Optional[int] = scale_embedding # scale factor will be sqrt(d_model) if True A_ : Optional[Any] = max_target_positions super().__init__( pad_token_id=_lowerCamelCase , bos_token_id=_lowerCamelCase , eos_token_id=_lowerCamelCase , decoder_start_token_id=_lowerCamelCase , **_lowerCamelCase , )
4
0
import os from shutil import copyfile from typing import List, Optional, Tuple from tokenizers import processors from ...tokenization_utils import AddedToken, BatchEncoding from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import is_sentencepiece_available, logging if is_sentencepiece_available(): from .tokenization_nllb import NllbTokenizer else: snake_case__ = None snake_case__ = logging.get_logger(__name__) snake_case__ = {"""vocab_file""": """sentencepiece.bpe.model""", """tokenizer_file""": """tokenizer.json"""} snake_case__ = { """vocab_file""": { """facebook/nllb-200-distilled-600M""": ( """https://huggingface.co/facebook/nllb-200-distilled-600M/resolve/main/sentencepiece.bpe.model""" ), }, """tokenizer_file""": { """facebook/nllb-200-distilled-600M""": ( """https://huggingface.co/facebook/nllb-200-distilled-600M/resolve/main/tokenizer.json""" ), }, } snake_case__ = { """facebook/nllb-large-en-ro""": 10_24, """facebook/nllb-200-distilled-600M""": 10_24, } # fmt: off snake_case__ = ["""ace_Arab""", """ace_Latn""", """acm_Arab""", """acq_Arab""", """aeb_Arab""", """afr_Latn""", """ajp_Arab""", """aka_Latn""", """amh_Ethi""", """apc_Arab""", """arb_Arab""", """ars_Arab""", """ary_Arab""", """arz_Arab""", """asm_Beng""", """ast_Latn""", """awa_Deva""", """ayr_Latn""", """azb_Arab""", """azj_Latn""", """bak_Cyrl""", """bam_Latn""", """ban_Latn""", """bel_Cyrl""", """bem_Latn""", """ben_Beng""", """bho_Deva""", """bjn_Arab""", """bjn_Latn""", """bod_Tibt""", """bos_Latn""", """bug_Latn""", """bul_Cyrl""", """cat_Latn""", """ceb_Latn""", """ces_Latn""", """cjk_Latn""", """ckb_Arab""", """crh_Latn""", """cym_Latn""", """dan_Latn""", """deu_Latn""", """dik_Latn""", """dyu_Latn""", """dzo_Tibt""", """ell_Grek""", """eng_Latn""", """epo_Latn""", """est_Latn""", """eus_Latn""", """ewe_Latn""", """fao_Latn""", """pes_Arab""", """fij_Latn""", """fin_Latn""", """fon_Latn""", """fra_Latn""", """fur_Latn""", """fuv_Latn""", """gla_Latn""", """gle_Latn""", """glg_Latn""", """grn_Latn""", """guj_Gujr""", """hat_Latn""", """hau_Latn""", """heb_Hebr""", """hin_Deva""", """hne_Deva""", """hrv_Latn""", """hun_Latn""", """hye_Armn""", """ibo_Latn""", """ilo_Latn""", """ind_Latn""", """isl_Latn""", """ita_Latn""", """jav_Latn""", """jpn_Jpan""", """kab_Latn""", """kac_Latn""", """kam_Latn""", """kan_Knda""", """kas_Arab""", """kas_Deva""", """kat_Geor""", """knc_Arab""", """knc_Latn""", """kaz_Cyrl""", """kbp_Latn""", """kea_Latn""", """khm_Khmr""", """kik_Latn""", """kin_Latn""", """kir_Cyrl""", """kmb_Latn""", """kon_Latn""", """kor_Hang""", """kmr_Latn""", """lao_Laoo""", """lvs_Latn""", """lij_Latn""", """lim_Latn""", """lin_Latn""", """lit_Latn""", """lmo_Latn""", """ltg_Latn""", """ltz_Latn""", """lua_Latn""", """lug_Latn""", """luo_Latn""", """lus_Latn""", """mag_Deva""", """mai_Deva""", """mal_Mlym""", """mar_Deva""", """min_Latn""", """mkd_Cyrl""", """plt_Latn""", """mlt_Latn""", """mni_Beng""", """khk_Cyrl""", """mos_Latn""", """mri_Latn""", """zsm_Latn""", """mya_Mymr""", """nld_Latn""", """nno_Latn""", """nob_Latn""", """npi_Deva""", """nso_Latn""", """nus_Latn""", """nya_Latn""", """oci_Latn""", """gaz_Latn""", """ory_Orya""", """pag_Latn""", """pan_Guru""", """pap_Latn""", """pol_Latn""", """por_Latn""", """prs_Arab""", """pbt_Arab""", """quy_Latn""", """ron_Latn""", """run_Latn""", """rus_Cyrl""", """sag_Latn""", """san_Deva""", """sat_Beng""", """scn_Latn""", """shn_Mymr""", """sin_Sinh""", """slk_Latn""", """slv_Latn""", """smo_Latn""", """sna_Latn""", """snd_Arab""", """som_Latn""", """sot_Latn""", """spa_Latn""", """als_Latn""", """srd_Latn""", """srp_Cyrl""", """ssw_Latn""", """sun_Latn""", """swe_Latn""", """swh_Latn""", """szl_Latn""", """tam_Taml""", """tat_Cyrl""", """tel_Telu""", """tgk_Cyrl""", """tgl_Latn""", """tha_Thai""", """tir_Ethi""", """taq_Latn""", """taq_Tfng""", """tpi_Latn""", """tsn_Latn""", """tso_Latn""", """tuk_Latn""", """tum_Latn""", """tur_Latn""", """twi_Latn""", """tzm_Tfng""", """uig_Arab""", """ukr_Cyrl""", """umb_Latn""", """urd_Arab""", """uzn_Latn""", """vec_Latn""", """vie_Latn""", """war_Latn""", """wol_Latn""", """xho_Latn""", """ydd_Hebr""", """yor_Latn""", """yue_Hant""", """zho_Hans""", """zho_Hant""", """zul_Latn"""] class UpperCamelCase_ (a__ ): """simple docstring""" _lowerCAmelCase = VOCAB_FILES_NAMES _lowerCAmelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES _lowerCAmelCase = PRETRAINED_VOCAB_FILES_MAP _lowerCAmelCase = ['input_ids', 'attention_mask'] _lowerCAmelCase = NllbTokenizer _lowerCAmelCase = [] _lowerCAmelCase = [] def __init__( self : Optional[Any] , _lowerCamelCase : Any=None , _lowerCamelCase : List[str]=None , _lowerCamelCase : str="<s>" , _lowerCamelCase : Optional[Any]="</s>" , _lowerCamelCase : str="</s>" , _lowerCamelCase : Dict="<s>" , _lowerCamelCase : Optional[int]="<unk>" , _lowerCamelCase : Tuple="<pad>" , _lowerCamelCase : List[str]="<mask>" , _lowerCamelCase : Union[str, Any]=None , _lowerCamelCase : List[Any]=None , _lowerCamelCase : Tuple=None , _lowerCamelCase : Union[str, Any]=False , **_lowerCamelCase : Tuple , ): """simple docstring""" A_ : Dict = AddedToken(_lowerCamelCase , lstrip=_lowerCamelCase , rstrip=_lowerCamelCase ) if isinstance(_lowerCamelCase , _lowerCamelCase ) else mask_token A_ : Union[str, Any] = legacy_behaviour super().__init__( vocab_file=_lowerCamelCase , tokenizer_file=_lowerCamelCase , bos_token=_lowerCamelCase , eos_token=_lowerCamelCase , sep_token=_lowerCamelCase , cls_token=_lowerCamelCase , unk_token=_lowerCamelCase , pad_token=_lowerCamelCase , mask_token=_lowerCamelCase , src_lang=_lowerCamelCase , tgt_lang=_lowerCamelCase , additional_special_tokens=_lowerCamelCase , legacy_behaviour=_lowerCamelCase , **_lowerCamelCase , ) A_ : str = vocab_file A_ : int = False if not self.vocab_file else True A_ : Any = FAIRSEQ_LANGUAGE_CODES.copy() if additional_special_tokens is not None: # Only add those special tokens if they are not already there. _additional_special_tokens.extend( [t for t in additional_special_tokens if t not in _additional_special_tokens] ) self.add_special_tokens({'''additional_special_tokens''': _additional_special_tokens} ) A_ : str = { lang_code: self.convert_tokens_to_ids(_lowerCamelCase ) for lang_code in FAIRSEQ_LANGUAGE_CODES } A_ : Any = src_lang if src_lang is not None else '''eng_Latn''' A_ : Tuple = self.convert_tokens_to_ids(self._src_lang ) A_ : Union[str, Any] = tgt_lang self.set_src_lang_special_tokens(self._src_lang ) @property def _a ( self : List[str] ): """simple docstring""" return self._src_lang @src_lang.setter def _a ( self : int , _lowerCamelCase : str ): """simple docstring""" A_ : Dict = new_src_lang self.set_src_lang_special_tokens(self._src_lang ) def _a ( self : int , _lowerCamelCase : List[int] , _lowerCamelCase : Optional[List[int]] = None ): """simple docstring""" if token_ids_a is None: return self.prefix_tokens + token_ids_a + self.suffix_tokens # We don't expect to process pairs, but leave the pair logic for API consistency return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens def _a ( self : Optional[int] , _lowerCamelCase : List[int] , _lowerCamelCase : Optional[List[int]] = None ): """simple docstring""" A_ : Dict = [self.sep_token_id] A_ : List[str] = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0] def _a ( self : Optional[Any] , _lowerCamelCase : List[str] , _lowerCamelCase : str , _lowerCamelCase : Optional[str] , _lowerCamelCase : Optional[str] , **_lowerCamelCase : str ): """simple docstring""" if src_lang is None or tgt_lang is None: raise ValueError('''Translation requires a `src_lang` and a `tgt_lang` for this model''' ) A_ : List[Any] = src_lang A_ : List[Any] = self(_lowerCamelCase , add_special_tokens=_lowerCamelCase , return_tensors=_lowerCamelCase , **_lowerCamelCase ) A_ : str = self.convert_tokens_to_ids(_lowerCamelCase ) A_ : Tuple = tgt_lang_id return inputs def _a ( self : Any , _lowerCamelCase : List[str] , _lowerCamelCase : str = "eng_Latn" , _lowerCamelCase : Optional[List[str]] = None , _lowerCamelCase : str = "fra_Latn" , **_lowerCamelCase : List[Any] , ): """simple docstring""" A_ : Dict = src_lang A_ : Any = tgt_lang return super().prepare_seqaseq_batch(_lowerCamelCase , _lowerCamelCase , **_lowerCamelCase ) def _a ( self : List[str] ): """simple docstring""" return self.set_src_lang_special_tokens(self.src_lang ) def _a ( self : Optional[int] ): """simple docstring""" return self.set_tgt_lang_special_tokens(self.tgt_lang ) def _a ( self : Any , _lowerCamelCase : Optional[Any] ): """simple docstring""" A_ : Optional[int] = self.convert_tokens_to_ids(_lowerCamelCase ) if self.legacy_behaviour: A_ : Tuple = [] A_ : int = [self.eos_token_id, self.cur_lang_code] else: A_ : Any = [self.cur_lang_code] A_ : str = [self.eos_token_id] A_ : Dict = self.convert_ids_to_tokens(self.prefix_tokens ) A_ : str = self.convert_ids_to_tokens(self.suffix_tokens ) A_ : Tuple = processors.TemplateProcessing( single=prefix_tokens_str + ['''$A'''] + suffix_tokens_str , pair=prefix_tokens_str + ['''$A''', '''$B'''] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens ) ) , ) def _a ( self : Union[str, Any] , _lowerCamelCase : str ): """simple docstring""" A_ : str = self.convert_tokens_to_ids(_lowerCamelCase ) if self.legacy_behaviour: A_ : Tuple = [] A_ : Optional[Any] = [self.eos_token_id, self.cur_lang_code] else: A_ : Dict = [self.cur_lang_code] A_ : Tuple = [self.eos_token_id] A_ : List[Any] = self.convert_ids_to_tokens(self.prefix_tokens ) A_ : int = self.convert_ids_to_tokens(self.suffix_tokens ) A_ : Union[str, Any] = processors.TemplateProcessing( single=prefix_tokens_str + ['''$A'''] + suffix_tokens_str , pair=prefix_tokens_str + ['''$A''', '''$B'''] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens ) ) , ) def _a ( self : Tuple , _lowerCamelCase : str , _lowerCamelCase : Optional[str] = None ): """simple docstring""" if not self.can_save_slow_tokenizer: raise ValueError( '''Your fast tokenizer does not have the necessary information to save the vocabulary for a slow ''' '''tokenizer.''' ) if not os.path.isdir(_lowerCamelCase ): logger.error(f'Vocabulary path ({save_directory}) should be a directory.' ) return A_ : Any = os.path.join( _lowerCamelCase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(_lowerCamelCase ): copyfile(self.vocab_file , _lowerCamelCase ) return (out_vocab_file,)
364
'''simple docstring''' from collections import OrderedDict from typing import Mapping from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging from ..auto import CONFIG_MAPPING snake_case__ = logging.get_logger(__name__) snake_case__ = { """microsoft/table-transformer-detection""": ( """https://huggingface.co/microsoft/table-transformer-detection/resolve/main/config.json""" ), } class UpperCamelCase_ (a__ ): """simple docstring""" _lowerCAmelCase = 'table-transformer' _lowerCAmelCase = ['past_key_values'] _lowerCAmelCase = { 'hidden_size': 'd_model', 'num_attention_heads': 'encoder_attention_heads', } def __init__( self : Any , _lowerCamelCase : Union[str, Any]=True , _lowerCamelCase : Dict=None , _lowerCamelCase : int=3 , _lowerCamelCase : Any=100 , _lowerCamelCase : List[Any]=6 , _lowerCamelCase : Tuple=2048 , _lowerCamelCase : Any=8 , _lowerCamelCase : Dict=6 , _lowerCamelCase : Tuple=2048 , _lowerCamelCase : int=8 , _lowerCamelCase : Optional[int]=0.0 , _lowerCamelCase : List[Any]=0.0 , _lowerCamelCase : List[Any]=True , _lowerCamelCase : Optional[int]="relu" , _lowerCamelCase : Union[str, Any]=256 , _lowerCamelCase : Any=0.1 , _lowerCamelCase : Tuple=0.0 , _lowerCamelCase : Optional[int]=0.0 , _lowerCamelCase : str=0.02 , _lowerCamelCase : Tuple=1.0 , _lowerCamelCase : Dict=False , _lowerCamelCase : str="sine" , _lowerCamelCase : str="resnet50" , _lowerCamelCase : Any=True , _lowerCamelCase : List[str]=False , _lowerCamelCase : Any=1 , _lowerCamelCase : int=5 , _lowerCamelCase : Tuple=2 , _lowerCamelCase : Optional[int]=1 , _lowerCamelCase : Any=1 , _lowerCamelCase : Dict=5 , _lowerCamelCase : str=2 , _lowerCamelCase : Union[str, Any]=0.1 , **_lowerCamelCase : int , ): """simple docstring""" if backbone_config is not None and use_timm_backbone: raise ValueError('''You can\'t specify both `backbone_config` and `use_timm_backbone`.''' ) if not use_timm_backbone: if backbone_config is None: logger.info('''`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.''' ) A_ : int = CONFIG_MAPPING['''resnet'''](out_features=['''stage4'''] ) elif isinstance(_lowerCamelCase , _lowerCamelCase ): A_ : str = backbone_config.get('''model_type''' ) A_ : Optional[int] = CONFIG_MAPPING[backbone_model_type] A_ : List[str] = config_class.from_dict(_lowerCamelCase ) # set timm attributes to None A_ ,A_ ,A_ : Union[str, Any] = None, None, None A_ : Optional[Any] = use_timm_backbone A_ : Optional[int] = backbone_config A_ : Optional[Any] = num_channels A_ : Dict = num_queries A_ : str = d_model A_ : List[str] = encoder_ffn_dim A_ : int = encoder_layers A_ : Optional[Any] = encoder_attention_heads A_ : List[str] = decoder_ffn_dim A_ : Any = decoder_layers A_ : List[str] = decoder_attention_heads A_ : Tuple = dropout A_ : Optional[Any] = attention_dropout A_ : Any = activation_dropout A_ : List[Any] = activation_function A_ : Dict = init_std A_ : Any = init_xavier_std A_ : List[Any] = encoder_layerdrop A_ : int = decoder_layerdrop A_ : Any = encoder_layers A_ : List[str] = auxiliary_loss A_ : List[Any] = position_embedding_type A_ : Optional[Any] = backbone A_ : Tuple = use_pretrained_backbone A_ : List[Any] = dilation # Hungarian matcher A_ : List[str] = class_cost A_ : str = bbox_cost A_ : Union[str, Any] = giou_cost # Loss coefficients A_ : Any = mask_loss_coefficient A_ : Optional[int] = dice_loss_coefficient A_ : Dict = bbox_loss_coefficient A_ : int = giou_loss_coefficient A_ : int = eos_coefficient super().__init__(is_encoder_decoder=_lowerCamelCase , **_lowerCamelCase ) @property def _a ( self : List[Any] ): """simple docstring""" return self.encoder_attention_heads @property def _a ( self : Any ): """simple docstring""" return self.d_model class UpperCamelCase_ (a__ ): """simple docstring""" _lowerCAmelCase = version.parse('1.11' ) @property def _a ( self : Tuple ): """simple docstring""" return OrderedDict( [ ('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}), ('''pixel_mask''', {0: '''batch'''}), ] ) @property def _a ( self : Optional[int] ): """simple docstring""" return 1E-5 @property def _a ( self : str ): """simple docstring""" return 12
4
0
'''simple docstring''' import argparse from collections import OrderedDict from pathlib import Path import torch from huggingface_hub import hf_hub_download from PIL import Image from torchvision.transforms import functional as F from transformers import DetrImageProcessor, TableTransformerConfig, TableTransformerForObjectDetection from transformers.utils import logging logging.set_verbosity_info() snake_case__ = logging.get_logger(__name__) # here we list all keys to be renamed (original name on the left, our name on the right) snake_case__ = [] for i in range(6): # encoder layers: output projection, 2 feedforward neural networks and 2 layernorms rename_keys.append( (F'transformer.encoder.layers.{i}.self_attn.out_proj.weight', F'encoder.layers.{i}.self_attn.out_proj.weight') ) rename_keys.append( (F'transformer.encoder.layers.{i}.self_attn.out_proj.bias', F'encoder.layers.{i}.self_attn.out_proj.bias') ) rename_keys.append((F'transformer.encoder.layers.{i}.linear1.weight', F'encoder.layers.{i}.fc1.weight')) rename_keys.append((F'transformer.encoder.layers.{i}.linear1.bias', F'encoder.layers.{i}.fc1.bias')) rename_keys.append((F'transformer.encoder.layers.{i}.linear2.weight', F'encoder.layers.{i}.fc2.weight')) rename_keys.append((F'transformer.encoder.layers.{i}.linear2.bias', F'encoder.layers.{i}.fc2.bias')) rename_keys.append( (F'transformer.encoder.layers.{i}.norm1.weight', F'encoder.layers.{i}.self_attn_layer_norm.weight') ) rename_keys.append((F'transformer.encoder.layers.{i}.norm1.bias', F'encoder.layers.{i}.self_attn_layer_norm.bias')) rename_keys.append((F'transformer.encoder.layers.{i}.norm2.weight', F'encoder.layers.{i}.final_layer_norm.weight')) rename_keys.append((F'transformer.encoder.layers.{i}.norm2.bias', F'encoder.layers.{i}.final_layer_norm.bias')) # decoder layers: 2 times output projection, 2 feedforward neural networks and 3 layernorms rename_keys.append( (F'transformer.decoder.layers.{i}.self_attn.out_proj.weight', F'decoder.layers.{i}.self_attn.out_proj.weight') ) rename_keys.append( (F'transformer.decoder.layers.{i}.self_attn.out_proj.bias', F'decoder.layers.{i}.self_attn.out_proj.bias') ) rename_keys.append( ( F'transformer.decoder.layers.{i}.multihead_attn.out_proj.weight', F'decoder.layers.{i}.encoder_attn.out_proj.weight', ) ) rename_keys.append( ( F'transformer.decoder.layers.{i}.multihead_attn.out_proj.bias', F'decoder.layers.{i}.encoder_attn.out_proj.bias', ) ) rename_keys.append((F'transformer.decoder.layers.{i}.linear1.weight', F'decoder.layers.{i}.fc1.weight')) rename_keys.append((F'transformer.decoder.layers.{i}.linear1.bias', F'decoder.layers.{i}.fc1.bias')) rename_keys.append((F'transformer.decoder.layers.{i}.linear2.weight', F'decoder.layers.{i}.fc2.weight')) rename_keys.append((F'transformer.decoder.layers.{i}.linear2.bias', F'decoder.layers.{i}.fc2.bias')) rename_keys.append( (F'transformer.decoder.layers.{i}.norm1.weight', F'decoder.layers.{i}.self_attn_layer_norm.weight') ) rename_keys.append((F'transformer.decoder.layers.{i}.norm1.bias', F'decoder.layers.{i}.self_attn_layer_norm.bias')) rename_keys.append( (F'transformer.decoder.layers.{i}.norm2.weight', F'decoder.layers.{i}.encoder_attn_layer_norm.weight') ) rename_keys.append( (F'transformer.decoder.layers.{i}.norm2.bias', F'decoder.layers.{i}.encoder_attn_layer_norm.bias') ) rename_keys.append((F'transformer.decoder.layers.{i}.norm3.weight', F'decoder.layers.{i}.final_layer_norm.weight')) rename_keys.append((F'transformer.decoder.layers.{i}.norm3.bias', F'decoder.layers.{i}.final_layer_norm.bias')) # convolutional projection + query embeddings + layernorm of encoder + layernorm of decoder + class and bounding box heads rename_keys.extend( [ ("""input_proj.weight""", """input_projection.weight"""), ("""input_proj.bias""", """input_projection.bias"""), ("""query_embed.weight""", """query_position_embeddings.weight"""), ("""transformer.encoder.norm.weight""", """encoder.layernorm.weight"""), ("""transformer.encoder.norm.bias""", """encoder.layernorm.bias"""), ("""transformer.decoder.norm.weight""", """decoder.layernorm.weight"""), ("""transformer.decoder.norm.bias""", """decoder.layernorm.bias"""), ("""class_embed.weight""", """class_labels_classifier.weight"""), ("""class_embed.bias""", """class_labels_classifier.bias"""), ("""bbox_embed.layers.0.weight""", """bbox_predictor.layers.0.weight"""), ("""bbox_embed.layers.0.bias""", """bbox_predictor.layers.0.bias"""), ("""bbox_embed.layers.1.weight""", """bbox_predictor.layers.1.weight"""), ("""bbox_embed.layers.1.bias""", """bbox_predictor.layers.1.bias"""), ("""bbox_embed.layers.2.weight""", """bbox_predictor.layers.2.weight"""), ("""bbox_embed.layers.2.bias""", """bbox_predictor.layers.2.bias"""), ] ) def snake_case__ ( lowerCamelCase__ : List[Any] , lowerCamelCase__ : List[Any] , lowerCamelCase__ : List[Any] ) -> Optional[Any]: A_ : Tuple = state_dict.pop(lowerCamelCase__ ) A_ : Optional[Any] = val def snake_case__ ( lowerCamelCase__ : Dict ) -> Any: A_ : int = OrderedDict() for key, value in state_dict.items(): if "backbone.0.body" in key: A_ : int = key.replace('''backbone.0.body''' , '''backbone.conv_encoder.model''' ) A_ : List[str] = value else: A_ : Optional[int] = value return new_state_dict def snake_case__ ( lowerCamelCase__ : Union[str, Any] ) -> Optional[Any]: A_ : Any = '''''' # first: transformer encoder for i in range(6 ): # read in weights + bias of input projection layer (in PyTorch's MultiHeadAttention, this is a single matrix + bias) A_ : Tuple = state_dict.pop(f'{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_weight' ) A_ : Dict = state_dict.pop(f'{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_bias' ) # next, add query, keys and values (in that order) to the state dict A_ : str = in_proj_weight[:2_5_6, :] A_ : Optional[Any] = in_proj_bias[:2_5_6] A_ : Dict = in_proj_weight[2_5_6:5_1_2, :] A_ : Tuple = in_proj_bias[2_5_6:5_1_2] A_ : Tuple = in_proj_weight[-2_5_6:, :] A_ : Optional[int] = in_proj_bias[-2_5_6:] # next: transformer decoder (which is a bit more complex because it also includes cross-attention) for i in range(6 ): # read in weights + bias of input projection layer of self-attention A_ : Union[str, Any] = state_dict.pop(f'{prefix}transformer.decoder.layers.{i}.self_attn.in_proj_weight' ) A_ : Dict = state_dict.pop(f'{prefix}transformer.decoder.layers.{i}.self_attn.in_proj_bias' ) # next, add query, keys and values (in that order) to the state dict A_ : List[str] = in_proj_weight[:2_5_6, :] A_ : int = in_proj_bias[:2_5_6] A_ : Any = in_proj_weight[2_5_6:5_1_2, :] A_ : List[str] = in_proj_bias[2_5_6:5_1_2] A_ : Union[str, Any] = in_proj_weight[-2_5_6:, :] A_ : Optional[Any] = in_proj_bias[-2_5_6:] # read in weights + bias of input projection layer of cross-attention A_ : Tuple = state_dict.pop( f'{prefix}transformer.decoder.layers.{i}.multihead_attn.in_proj_weight' ) A_ : Optional[Any] = state_dict.pop(f'{prefix}transformer.decoder.layers.{i}.multihead_attn.in_proj_bias' ) # next, add query, keys and values (in that order) of cross-attention to the state dict A_ : Dict = in_proj_weight_cross_attn[:2_5_6, :] A_ : Tuple = in_proj_bias_cross_attn[:2_5_6] A_ : int = in_proj_weight_cross_attn[2_5_6:5_1_2, :] A_ : List[str] = in_proj_bias_cross_attn[2_5_6:5_1_2] A_ : Any = in_proj_weight_cross_attn[-2_5_6:, :] A_ : Any = in_proj_bias_cross_attn[-2_5_6:] def snake_case__ ( lowerCamelCase__ : List[str] , lowerCamelCase__ : Tuple ) -> Dict: A_ : int = image.size A_ : Tuple = max(lowerCamelCase__ , lowerCamelCase__ ) A_ : Optional[Any] = 8_0_0 if '''detection''' in checkpoint_url else 1_0_0_0 A_ : Union[str, Any] = target_max_size / current_max_size A_ : Any = image.resize((int(round(scale * width ) ), int(round(scale * height ) )) ) return resized_image def snake_case__ ( lowerCamelCase__ : Tuple ) -> str: A_ : Any = F.to_tensor(lowerCamelCase__ ) A_ : Optional[Any] = F.normalize(lowerCamelCase__ , mean=[0.485, 0.456, 0.406] , std=[0.229, 0.224, 0.225] ) return image @torch.no_grad() def snake_case__ ( lowerCamelCase__ : List[Any] , lowerCamelCase__ : int , lowerCamelCase__ : int ) -> str: logger.info('''Converting model...''' ) # load original state dict A_ : Tuple = torch.hub.load_state_dict_from_url(lowerCamelCase__ , map_location='''cpu''' ) # rename keys for src, dest in rename_keys: rename_key(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) A_ : str = rename_backbone_keys(lowerCamelCase__ ) # query, key and value matrices need special treatment read_in_q_k_v(lowerCamelCase__ ) # important: we need to prepend a prefix to each of the base model keys as the head models use different attributes for them A_ : List[Any] = '''model.''' for key in state_dict.copy().keys(): if not key.startswith('''class_labels_classifier''' ) and not key.startswith('''bbox_predictor''' ): A_ : List[Any] = state_dict.pop(lowerCamelCase__ ) A_ : str = val # create HuggingFace model and load state dict A_ : Union[str, Any] = TableTransformerConfig( backbone='''resnet18''' , mask_loss_coefficient=1 , dice_loss_coefficient=1 , ce_loss_coefficient=1 , bbox_loss_coefficient=5 , giou_loss_coefficient=2 , eos_coefficient=0.4 , class_cost=1 , bbox_cost=5 , giou_cost=2 , ) if "detection" in checkpoint_url: A_ : Dict = 1_5 A_ : Dict = 2 A_ : int = {0: '''table''', 1: '''table rotated'''} A_ : List[str] = idalabel A_ : Optional[int] = {v: k for k, v in idalabel.items()} else: A_ : Union[str, Any] = 1_2_5 A_ : Optional[Any] = 6 A_ : Optional[Any] = { 0: '''table''', 1: '''table column''', 2: '''table row''', 3: '''table column header''', 4: '''table projected row header''', 5: '''table spanning cell''', } A_ : int = idalabel A_ : Tuple = {v: k for k, v in idalabel.items()} A_ : Optional[Any] = DetrImageProcessor( format='''coco_detection''' , max_size=8_0_0 if '''detection''' in checkpoint_url else 1_0_0_0 ) A_ : int = TableTransformerForObjectDetection(lowerCamelCase__ ) model.load_state_dict(lowerCamelCase__ ) model.eval() # verify our conversion A_ : Optional[int] = '''example_pdf.png''' if '''detection''' in checkpoint_url else '''example_table.png''' A_ : Union[str, Any] = hf_hub_download(repo_id='''nielsr/example-pdf''' , repo_type='''dataset''' , filename=lowerCamelCase__ ) A_ : Tuple = Image.open(lowerCamelCase__ ).convert('''RGB''' ) A_ : int = normalize(resize(lowerCamelCase__ , lowerCamelCase__ ) ).unsqueeze(0 ) A_ : str = model(lowerCamelCase__ ) if "detection" in checkpoint_url: A_ : str = (1, 1_5, 3) A_ : int = torch.tensor( [[-6.7897, -16.9985, 6.7937], [-8.0186, -22.2192, 6.9677], [-7.3117, -21.0708, 7.4055]] ) A_ : Tuple = torch.tensor([[0.4867, 0.1767, 0.6732], [0.6718, 0.4479, 0.3830], [0.4716, 0.1760, 0.6364]] ) else: A_ : Optional[int] = (1, 1_2_5, 7) A_ : Dict = torch.tensor( [[-18.1430, -8.3214, 4.8274], [-18.4685, -7.1361, -4.2667], [-26.3693, -9.3429, -4.9962]] ) A_ : Any = torch.tensor([[0.4983, 0.5595, 0.9440], [0.4916, 0.6315, 0.5954], [0.6108, 0.8637, 0.1135]] ) assert outputs.logits.shape == expected_shape assert torch.allclose(outputs.logits[0, :3, :3] , lowerCamelCase__ , atol=1e-4 ) assert torch.allclose(outputs.pred_boxes[0, :3, :3] , lowerCamelCase__ , atol=1e-4 ) print('''Looks ok!''' ) if pytorch_dump_folder_path is not None: # Save model and image processor logger.info(f'Saving PyTorch model and image processor to {pytorch_dump_folder_path}...' ) Path(lowerCamelCase__ ).mkdir(exist_ok=lowerCamelCase__ ) model.save_pretrained(lowerCamelCase__ ) image_processor.save_pretrained(lowerCamelCase__ ) if push_to_hub: # Push model to HF hub logger.info('''Pushing model to the hub...''' ) A_ : List[Any] = ( '''microsoft/table-transformer-detection''' if '''detection''' in checkpoint_url else '''microsoft/table-transformer-structure-recognition''' ) model.push_to_hub(lowerCamelCase__ ) image_processor.push_to_hub(lowerCamelCase__ ) if __name__ == "__main__": snake_case__ = argparse.ArgumentParser() parser.add_argument( """--checkpoint_url""", default="""https://pubtables1m.blob.core.windows.net/model/pubtables1m_detection_detr_r18.pth""", type=str, choices=[ """https://pubtables1m.blob.core.windows.net/model/pubtables1m_detection_detr_r18.pth""", """https://pubtables1m.blob.core.windows.net/model/pubtables1m_structure_detr_r18.pth""", ], help="""URL of the Table Transformer checkpoint you'd like to convert.""", ) parser.add_argument( """--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the folder to output PyTorch model.""" ) parser.add_argument( """--push_to_hub""", action="""store_true""", help="""Whether or not to push the converted model to the 🤗 hub.""" ) snake_case__ = parser.parse_args() convert_table_transformer_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub)
365
'''simple docstring''' import inspect import unittest from transformers import BitConfig from transformers.testing_utils import require_torch, require_vision, slow, torch_device from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_backbone_common import BackboneTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from torch import nn from transformers import BitBackbone, BitForImageClassification, BitImageProcessor, BitModel from transformers.models.bit.modeling_bit import BIT_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image class UpperCamelCase_ : """simple docstring""" def __init__( self : Optional[Any] , _lowerCamelCase : int , _lowerCamelCase : List[str]=3 , _lowerCamelCase : Any=32 , _lowerCamelCase : Union[str, Any]=3 , _lowerCamelCase : int=10 , _lowerCamelCase : Union[str, Any]=[8, 16, 32, 64] , _lowerCamelCase : Dict=[1, 1, 2, 1] , _lowerCamelCase : Union[str, Any]=True , _lowerCamelCase : Optional[int]=True , _lowerCamelCase : Any="relu" , _lowerCamelCase : Optional[Any]=3 , _lowerCamelCase : Optional[Any]=None , _lowerCamelCase : Dict=["stage2", "stage3", "stage4"] , _lowerCamelCase : Union[str, Any]=[2, 3, 4] , _lowerCamelCase : Tuple=1 , ): """simple docstring""" A_ : List[str] = parent A_ : List[str] = batch_size A_ : Union[str, Any] = image_size A_ : Tuple = num_channels A_ : Any = embeddings_size A_ : int = hidden_sizes A_ : Optional[Any] = depths A_ : List[Any] = is_training A_ : Optional[int] = use_labels A_ : int = hidden_act A_ : Tuple = num_labels A_ : Union[str, Any] = scope A_ : List[Any] = len(_lowerCamelCase ) A_ : Union[str, Any] = out_features A_ : List[Any] = out_indices A_ : Dict = num_groups def _a ( self : Optional[int] ): """simple docstring""" A_ : Dict = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) A_ : Union[str, Any] = None if self.use_labels: A_ : Any = ids_tensor([self.batch_size] , self.num_labels ) A_ : Any = self.get_config() return config, pixel_values, labels def _a ( self : Union[str, Any] ): """simple docstring""" return BitConfig( num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , out_features=self.out_features , out_indices=self.out_indices , num_groups=self.num_groups , ) def _a ( self : List[Any] , _lowerCamelCase : List[str] , _lowerCamelCase : List[str] , _lowerCamelCase : Optional[Any] ): """simple docstring""" A_ : Any = BitModel(config=_lowerCamelCase ) model.to(_lowerCamelCase ) model.eval() A_ : int = model(_lowerCamelCase ) self.parent.assertEqual( result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , ) def _a ( self : Optional[int] , _lowerCamelCase : List[Any] , _lowerCamelCase : str , _lowerCamelCase : Optional[int] ): """simple docstring""" A_ : Dict = self.num_labels A_ : Optional[Any] = BitForImageClassification(_lowerCamelCase ) model.to(_lowerCamelCase ) model.eval() A_ : List[Any] = model(_lowerCamelCase , labels=_lowerCamelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def _a ( self : Any , _lowerCamelCase : int , _lowerCamelCase : int , _lowerCamelCase : List[Any] ): """simple docstring""" A_ : List[Any] = BitBackbone(config=_lowerCamelCase ) model.to(_lowerCamelCase ) model.eval() A_ : int = model(_lowerCamelCase ) # verify feature maps self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) ) self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[1], 4, 4] ) # verify channels self.parent.assertEqual(len(model.channels ) , len(config.out_features ) ) self.parent.assertListEqual(model.channels , config.hidden_sizes[1:] ) # verify backbone works with out_features=None A_ : Optional[Any] = None A_ : int = BitBackbone(config=_lowerCamelCase ) model.to(_lowerCamelCase ) model.eval() A_ : Optional[int] = model(_lowerCamelCase ) # verify feature maps self.parent.assertEqual(len(result.feature_maps ) , 1 ) self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[-1], 1, 1] ) # verify channels self.parent.assertEqual(len(model.channels ) , 1 ) self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]] ) def _a ( self : List[Any] ): """simple docstring""" A_ : Union[str, Any] = self.prepare_config_and_inputs() A_ ,A_ ,A_ : Union[str, Any] = config_and_inputs A_ : str = {'''pixel_values''': pixel_values} return config, inputs_dict @require_torch class UpperCamelCase_ (a__, a__, unittest.TestCase ): """simple docstring""" _lowerCAmelCase = (BitModel, BitForImageClassification, BitBackbone) if is_torch_available() else () _lowerCAmelCase = ( {'feature-extraction': BitModel, 'image-classification': BitForImageClassification} if is_torch_available() else {} ) _lowerCAmelCase = False _lowerCAmelCase = False _lowerCAmelCase = False _lowerCAmelCase = False _lowerCAmelCase = False def _a ( self : Optional[Any] ): """simple docstring""" A_ : List[str] = BitModelTester(self ) A_ : Optional[Any] = ConfigTester(self , config_class=_lowerCamelCase , has_text_modality=_lowerCamelCase ) def _a ( self : Optional[Any] ): """simple docstring""" self.create_and_test_config_common_properties() self.config_tester.create_and_test_config_to_json_string() self.config_tester.create_and_test_config_to_json_file() self.config_tester.create_and_test_config_from_and_save_pretrained() self.config_tester.create_and_test_config_with_num_labels() self.config_tester.check_config_can_be_init_without_params() self.config_tester.check_config_arguments_init() def _a ( self : List[Any] ): """simple docstring""" return @unittest.skip(reason='''Bit does not output attentions''' ) def _a ( self : str ): """simple docstring""" pass @unittest.skip(reason='''Bit does not use inputs_embeds''' ) def _a ( self : Union[str, Any] ): """simple docstring""" pass @unittest.skip(reason='''Bit does not support input and output embeddings''' ) def _a ( self : Any ): """simple docstring""" pass def _a ( self : List[Any] ): """simple docstring""" A_ ,A_ : str = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: A_ : Dict = model_class(_lowerCamelCase ) A_ : Dict = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic A_ : int = [*signature.parameters.keys()] A_ : Union[str, Any] = ['''pixel_values'''] self.assertListEqual(arg_names[:1] , _lowerCamelCase ) def _a ( self : Optional[Any] ): """simple docstring""" A_ : List[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*_lowerCamelCase ) def _a ( self : Optional[Any] ): """simple docstring""" A_ : Optional[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_backbone(*_lowerCamelCase ) def _a ( self : Tuple ): """simple docstring""" A_ ,A_ : Dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: A_ : str = model_class(config=_lowerCamelCase ) for name, module in model.named_modules(): if isinstance(_lowerCamelCase , (nn.BatchNormad, nn.GroupNorm) ): self.assertTrue( torch.all(module.weight == 1 ) , msg=f'Parameter {name} of model {model_class} seems not properly initialized' , ) self.assertTrue( torch.all(module.bias == 0 ) , msg=f'Parameter {name} of model {model_class} seems not properly initialized' , ) def _a ( self : int ): """simple docstring""" def check_hidden_states_output(_lowerCamelCase : Union[str, Any] , _lowerCamelCase : Dict , _lowerCamelCase : int ): A_ : Union[str, Any] = model_class(_lowerCamelCase ) model.to(_lowerCamelCase ) model.eval() with torch.no_grad(): A_ : Union[str, Any] = model(**self._prepare_for_class(_lowerCamelCase , _lowerCamelCase ) ) A_ : int = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states A_ : List[Any] = self.model_tester.num_stages self.assertEqual(len(_lowerCamelCase ) , expected_num_stages + 1 ) # Bit's feature maps are of shape (batch_size, num_channels, height, width) self.assertListEqual( list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , ) A_ ,A_ : str = self.model_tester.prepare_config_and_inputs_for_common() A_ : Tuple = ['''preactivation''', '''bottleneck'''] for model_class in self.all_model_classes: for layer_type in layers_type: A_ : Tuple = layer_type A_ : Optional[Any] = True check_hidden_states_output(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] A_ : List[str] = True check_hidden_states_output(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) @unittest.skip(reason='''Bit does not use feedforward chunking''' ) def _a ( self : Tuple ): """simple docstring""" pass def _a ( self : str ): """simple docstring""" A_ : Union[str, Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*_lowerCamelCase ) @slow def _a ( self : Union[str, Any] ): """simple docstring""" for model_name in BIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: A_ : List[Any] = BitModel.from_pretrained(_lowerCamelCase ) self.assertIsNotNone(_lowerCamelCase ) def snake_case__ ( ) -> Optional[int]: A_ : Optional[int] = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' ) return image @require_torch @require_vision class UpperCamelCase_ (unittest.TestCase ): """simple docstring""" @cached_property def _a ( self : List[Any] ): """simple docstring""" return ( BitImageProcessor.from_pretrained(BIT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ) if is_vision_available() else None ) @slow def _a ( self : Dict ): """simple docstring""" A_ : Optional[int] = BitForImageClassification.from_pretrained(BIT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ).to(_lowerCamelCase ) A_ : Union[str, Any] = self.default_image_processor A_ : Optional[int] = prepare_img() A_ : int = image_processor(images=_lowerCamelCase , return_tensors='''pt''' ).to(_lowerCamelCase ) # forward pass with torch.no_grad(): A_ : Union[str, Any] = model(**_lowerCamelCase ) # verify the logits A_ : Dict = torch.Size((1, 1000) ) self.assertEqual(outputs.logits.shape , _lowerCamelCase ) A_ : Tuple = torch.tensor([[-0.65_26, -0.52_63, -1.43_98]] ).to(_lowerCamelCase ) self.assertTrue(torch.allclose(outputs.logits[0, :3] , _lowerCamelCase , atol=1E-4 ) ) @require_torch class UpperCamelCase_ (a__, unittest.TestCase ): """simple docstring""" _lowerCAmelCase = (BitBackbone,) if is_torch_available() else () _lowerCAmelCase = BitConfig _lowerCAmelCase = False def _a ( self : List[str] ): """simple docstring""" A_ : Union[str, Any] = BitModelTester(self )
4
0
'''simple docstring''' import argparse import os import re import zipfile import torch from transformers import AutoTokenizer, GPTaConfig def snake_case__ ( lowerCamelCase__ : Optional[Any] , lowerCamelCase__ : str , lowerCamelCase__ : Optional[int]=0 ) -> str: # Format the message. if name is None: A_ : Any = None else: A_ : int = '''.''' * max(0 , spaces - 2 ) + '''# {:''' + str(5_0 - spaces ) + '''s}''' A_ : Tuple = fmt.format(lowerCamelCase__ ) # Print and recurse (if needed). if isinstance(lowerCamelCase__ , lowerCamelCase__ ): if msg is not None: print(lowerCamelCase__ ) for k in val.keys(): recursive_print(lowerCamelCase__ , val[k] , spaces + 2 ) elif isinstance(lowerCamelCase__ , torch.Tensor ): print(lowerCamelCase__ , ''':''' , val.size() ) else: print(lowerCamelCase__ , ''':''' , lowerCamelCase__ ) def snake_case__ ( lowerCamelCase__ : Tuple , lowerCamelCase__ : str , lowerCamelCase__ : str , lowerCamelCase__ : Union[str, Any] , lowerCamelCase__ : Optional[Any] ) -> Tuple: # Permutes layout of param tensor to [num_splits * num_heads * hidden_size, :] # for compatibility with later versions of NVIDIA Megatron-LM. # The inverse operation is performed inside Megatron-LM to read checkpoints: # https://github.com/NVIDIA/Megatron-LM/blob/v2.4/megatron/checkpointing.py#L209 # If param is the weight tensor of the self-attention block, the returned tensor # will have to be transposed one more time to be read by HuggingFace GPT2. A_ : Union[str, Any] = param.size() if checkpoint_version == 1.0: # version 1.0 stores [num_heads * hidden_size * num_splits, :] A_ : Optional[Any] = (num_heads, hidden_size, num_splits) + input_shape[1:] A_ : Dict = param.view(*lowerCamelCase__ ) A_ : str = param.transpose(0 , 2 ) A_ : Any = param.transpose(1 , 2 ).contiguous() elif checkpoint_version >= 2.0: # other versions store [num_heads * num_splits * hidden_size, :] A_ : Union[str, Any] = (num_heads, num_splits, hidden_size) + input_shape[1:] A_ : str = param.view(*lowerCamelCase__ ) A_ : Any = param.transpose(0 , 1 ).contiguous() A_ : Any = param.view(*lowerCamelCase__ ) return param def snake_case__ ( lowerCamelCase__ : List[str] , lowerCamelCase__ : int , lowerCamelCase__ : List[Any] ) -> Optional[Any]: # The converted output model. A_ : List[Any] = {} # old versions did not store training args A_ : str = input_state_dict.get('''args''' , lowerCamelCase__ ) if ds_args is not None: # do not make the user write a config file when the exact dimensions/sizes are already in the checkpoint # from pprint import pprint # pprint(vars(ds_args)) A_ : List[Any] = ds_args.padded_vocab_size A_ : Dict = ds_args.max_position_embeddings A_ : List[Any] = ds_args.hidden_size A_ : str = ds_args.num_layers A_ : Any = ds_args.num_attention_heads A_ : Any = ds_args.ffn_hidden_size # pprint(config) # The number of heads. A_ : int = config.n_head # The hidden_size per head. A_ : str = config.n_embd // config.n_head # Megatron-LM checkpoint version if "checkpoint_version" in input_state_dict.keys(): A_ : Union[str, Any] = input_state_dict['''checkpoint_version'''] else: A_ : Union[str, Any] = 0.0 # The model. A_ : Optional[int] = input_state_dict['''model'''] # The language model. A_ : str = model['''language_model'''] # The embeddings. A_ : str = lm['''embedding'''] # The word embeddings. A_ : Any = embeddings['''word_embeddings''']['''weight'''] # Truncate the embedding table to vocab_size rows. A_ : Union[str, Any] = word_embeddings[: config.vocab_size, :] A_ : Dict = word_embeddings # The position embeddings. A_ : List[Any] = embeddings['''position_embeddings''']['''weight'''] # Read the causal mask dimension (seqlen). [max_sequence_length, hidden_size] A_ : Tuple = pos_embeddings.size(0 ) if n_positions != config.n_positions: raise ValueError( f'pos_embeddings.max_sequence_length={n_positions} and config.n_positions={config.n_positions} don\'t match' ) # Store the position embeddings. A_ : str = pos_embeddings # The transformer. A_ : List[Any] = lm['''transformer'''] if '''transformer''' in lm.keys() else lm['''encoder'''] # The regex to extract layer names. A_ : Optional[Any] = re.compile(R'''layers\.(\d+)\.([a-z0-9_.]+)\.([a-z]+)''' ) # The simple map of names for "automated" rules. A_ : Any = { '''attention.dense''': '''.attn.c_proj.''', '''self_attention.dense''': '''.attn.c_proj.''', '''mlp.dense_h_to_4h''': '''.mlp.c_fc.''', '''mlp.dense_4h_to_h''': '''.mlp.c_proj.''', } # Extract the layers. for key, val in transformer.items(): # Match the name. A_ : List[Any] = layer_re.match(lowerCamelCase__ ) # Stop if that's not a layer if m is None: break # The index of the layer. A_ : Dict = int(m.group(1 ) ) # The name of the operation. A_ : Dict = m.group(2 ) # Is it a weight or a bias? A_ : Dict = m.group(3 ) # The name of the layer. A_ : Tuple = f'transformer.h.{layer_idx}' # For layernorm(s), simply store the layer norm. if op_name.endswith('''layernorm''' ): A_ : Optional[int] = '''ln_1''' if op_name.startswith('''input''' ) else '''ln_2''' A_ : Union[str, Any] = val # Transpose the QKV matrix. elif ( op_name == "attention.query_key_value" or op_name == "self_attention.query_key_value" ) and weight_or_bias == "weight": # Insert a tensor of 1x1xDxD bias. A_ : Optional[Any] = torch.tril(torch.ones((n_positions, n_positions) , dtype=torch.floataa ) ).view( 1 , 1 , lowerCamelCase__ , lowerCamelCase__ ) A_ : Union[str, Any] = causal_mask # Insert a "dummy" tensor for masked_bias. A_ : Optional[int] = torch.tensor(-1e4 , dtype=torch.floataa ) A_ : int = masked_bias A_ : Dict = fix_query_key_value_ordering(lowerCamelCase__ , lowerCamelCase__ , 3 , lowerCamelCase__ , lowerCamelCase__ ) # Megatron stores (3*D) x D but transformers-GPT2 expects D x 3*D. A_ : str = out_val.transpose(0 , 1 ).contiguous() # Store. A_ : int = out_val # Transpose the bias. elif ( op_name == "attention.query_key_value" or op_name == "self_attention.query_key_value" ) and weight_or_bias == "bias": A_ : Optional[int] = fix_query_key_value_ordering(lowerCamelCase__ , lowerCamelCase__ , 3 , lowerCamelCase__ , lowerCamelCase__ ) # Store. No change of shape. A_ : List[Any] = out_val # Transpose the weights. elif weight_or_bias == "weight": A_ : List[str] = megatron_to_transformers[op_name] A_ : List[Any] = val.transpose(0 , 1 ) # Copy the bias. elif weight_or_bias == "bias": A_ : List[str] = megatron_to_transformers[op_name] A_ : Tuple = val # DEBUG. assert config.n_layer == layer_idx + 1 # The final layernorm. A_ : Tuple = transformer['''final_layernorm.weight'''] A_ : Union[str, Any] = transformer['''final_layernorm.bias'''] # For LM head, transformers' wants the matrix to weight embeddings. A_ : Dict = word_embeddings # It should be done! return output_state_dict def snake_case__ ( ) -> Any: # Create the argument parser. A_ : Tuple = argparse.ArgumentParser() parser.add_argument('''--print-checkpoint-structure''' , action='''store_true''' ) parser.add_argument( '''path_to_checkpoint''' , type=lowerCamelCase__ , help='''Path to the checkpoint file (.zip archive or direct .pt file)''' , ) parser.add_argument( '''--config_file''' , default='''''' , type=lowerCamelCase__ , help='''An optional config json file describing the pre-trained model.''' , ) A_ : Union[str, Any] = parser.parse_args() # Extract the basename. A_ : Optional[int] = os.path.dirname(args.path_to_checkpoint ) # Load the model. # the .zip is very optional, let's keep it for backward compatibility print(f'Extracting PyTorch state dictionary from {args.path_to_checkpoint}' ) if args.path_to_checkpoint.endswith('''.zip''' ): with zipfile.ZipFile(args.path_to_checkpoint , '''r''' ) as checkpoint: with checkpoint.open('''release/mp_rank_00/model_optim_rng.pt''' ) as pytorch_dict: A_ : Optional[int] = torch.load(lowerCamelCase__ , map_location='''cpu''' ) else: A_ : Optional[Any] = torch.load(args.path_to_checkpoint , map_location='''cpu''' ) A_ : Optional[Any] = input_state_dict.get('''args''' , lowerCamelCase__ ) # Read the config, or default to the model released by NVIDIA. if args.config_file == "": if ds_args is not None: if ds_args.bias_gelu_fusion: A_ : Union[str, Any] = '''gelu_fast''' elif ds_args.openai_gelu: A_ : Optional[int] = '''gelu_new''' else: A_ : Any = '''gelu''' else: # in the very early days this used to be "gelu_new" A_ : Tuple = '''gelu_new''' # Spell out all parameters in case the defaults change. A_ : List[Any] = GPTaConfig( vocab_size=5_0_2_5_7 , n_positions=1_0_2_4 , n_embd=1_0_2_4 , n_layer=2_4 , n_head=1_6 , n_inner=4_0_9_6 , activation_function=lowerCamelCase__ , resid_pdrop=0.1 , embd_pdrop=0.1 , attn_pdrop=0.1 , layer_norm_epsilon=1e-5 , initializer_range=0.02 , summary_type='''cls_index''' , summary_use_proj=lowerCamelCase__ , summary_activation=lowerCamelCase__ , summary_proj_to_labels=lowerCamelCase__ , summary_first_dropout=0.1 , scale_attn_weights=lowerCamelCase__ , use_cache=lowerCamelCase__ , bos_token_id=5_0_2_5_6 , eos_token_id=5_0_2_5_6 , ) else: A_ : str = GPTaConfig.from_json_file(args.config_file ) A_ : Optional[int] = ['''GPT2LMHeadModel'''] # Convert. print('''Converting''' ) A_ : Union[str, Any] = convert_megatron_checkpoint(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) # Print the structure of converted state dict. if args.print_checkpoint_structure: recursive_print(lowerCamelCase__ , lowerCamelCase__ ) # Add tokenizer class info to config # see https://github.com/huggingface/transformers/issues/13906) if ds_args is not None: A_ : List[Any] = ds_args.tokenizer_type if tokenizer_type == "GPT2BPETokenizer": A_ : List[Any] = '''gpt2''' elif tokenizer_type == "PretrainedFromHF": A_ : Union[str, Any] = ds_args.tokenizer_name_or_path else: raise ValueError(f'Unrecognized tokenizer_type {tokenizer_type}' ) else: A_ : int = '''gpt2''' A_ : Any = AutoTokenizer.from_pretrained(lowerCamelCase__ ) A_ : Any = type(lowerCamelCase__ ).__name__ A_ : Optional[Any] = tokenizer_class # Store the config to file. print('''Saving config''' ) config.save_pretrained(lowerCamelCase__ ) # Save tokenizer based on args print(f'Adding {tokenizer_class} tokenizer files' ) tokenizer.save_pretrained(lowerCamelCase__ ) # Store the state_dict to file. A_ : int = os.path.join(lowerCamelCase__ , '''pytorch_model.bin''' ) print(f'Saving checkpoint to "{output_checkpoint_file}"' ) torch.save(lowerCamelCase__ , lowerCamelCase__ ) #################################################################################################### if __name__ == "__main__": main() ####################################################################################################
366
'''simple docstring''' import pprint import requests snake_case__ = """https://zenquotes.io/api""" def snake_case__ ( ) -> list: return requests.get(API_ENDPOINT_URL + '''/today''' ).json() def snake_case__ ( ) -> list: return requests.get(API_ENDPOINT_URL + '''/random''' ).json() if __name__ == "__main__": snake_case__ = random_quotes() pprint.pprint(response)
4
0
'''simple docstring''' import ast import os import re import shutil import tempfile import unittest from unittest import mock import torch from accelerate.test_utils.examples import compare_against_test from accelerate.test_utils.testing import TempDirTestCase, require_trackers, run_command, slow from accelerate.utils import write_basic_config # DataLoaders built from `test_samples/MRPC` for quick testing # Should mock `{script_name}.get_dataloaders` via: # @mock.patch("{script_name}.get_dataloaders", mocked_dataloaders) snake_case__ = [ """cross_validation.py""", """gradient_accumulation.py""", """local_sgd.py""", """multi_process_metrics.py""", """memory.py""", """automatic_gradient_accumulation.py""", """fsdp_with_peak_mem_tracking.py""", """deepspeed_with_config_support.py""", """megatron_lm_gpt_pretraining.py""", ] class UpperCamelCase_ (unittest.TestCase ): """simple docstring""" def _a ( self : str , _lowerCamelCase : str , _lowerCamelCase : bool , _lowerCamelCase : str = None , _lowerCamelCase : list = None ): """simple docstring""" A_ : Union[str, Any] = None A_ : Tuple = os.path.abspath(os.path.join('''examples''' , '''by_feature''' ) ) A_ : int = os.path.abspath('''examples''' ) for item in os.listdir(_lowerCamelCase ): if item not in EXCLUDE_EXAMPLES: A_ : Optional[Any] = os.path.join(_lowerCamelCase , _lowerCamelCase ) if os.path.isfile(_lowerCamelCase ) and ".py" in item_path: with self.subTest( tested_script=_lowerCamelCase , feature_script=_lowerCamelCase , tested_section='''main()''' if parser_only else '''training_function()''' , ): A_ : Tuple = compare_against_test( os.path.join(_lowerCamelCase , _lowerCamelCase ) , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) A_ : Tuple = '''\n'''.join(_lowerCamelCase ) if special_strings is not None: for string in special_strings: A_ : Optional[int] = diff.replace(_lowerCamelCase , '''''' ) self.assertEqual(_lowerCamelCase , '''''' ) def _a ( self : Tuple ): """simple docstring""" self.one_complete_example('''complete_nlp_example.py''' , _lowerCamelCase ) self.one_complete_example('''complete_nlp_example.py''' , _lowerCamelCase ) def _a ( self : Union[str, Any] ): """simple docstring""" A_ : Tuple = os.path.abspath(os.path.join('''examples''' , '''cv_example.py''' ) ) A_ : Optional[int] = [ ''' ''' * 16 + '''{\n\n''', ''' ''' * 20 + '''"accuracy": eval_metric["accuracy"],\n\n''', ''' ''' * 20 + '''"f1": eval_metric["f1"],\n\n''', ''' ''' * 20 + '''"train_loss": total_loss.item() / len(train_dataloader),\n\n''', ''' ''' * 20 + '''"epoch": epoch,\n\n''', ''' ''' * 16 + '''},\n\n''', ''' ''' * 16 + '''step=epoch,\n''', ''' ''' * 12, ''' ''' * 8 + '''for step, batch in enumerate(active_dataloader):\n''', ] self.one_complete_example('''complete_cv_example.py''' , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) self.one_complete_example('''complete_cv_example.py''' , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) @mock.patch.dict(os.environ, {'TESTING_MOCKED_DATALOADERS': '1'} ) class UpperCamelCase_ (a__ ): """simple docstring""" _lowerCAmelCase = False @classmethod def _a ( cls : int ): """simple docstring""" super().setUpClass() A_ : Optional[Any] = tempfile.mkdtemp() A_ : Any = os.path.join(cls._tmpdir , '''default_config.yml''' ) write_basic_config(save_location=cls.configPath ) A_ : List[str] = ['''accelerate''', '''launch''', '''--config_file''', cls.configPath] @classmethod def _a ( cls : Optional[Any] ): """simple docstring""" super().tearDownClass() shutil.rmtree(cls._tmpdir ) def _a ( self : str ): """simple docstring""" A_ : int = f'\n examples/by_feature/checkpointing.py\n --checkpointing_steps epoch\n --output_dir {self.tmpdir}\n '.split() run_command(self._launch_args + testargs ) self.assertTrue(os.path.exists(os.path.join(self.tmpdir , '''epoch_0''' ) ) ) def _a ( self : List[str] ): """simple docstring""" A_ : List[Any] = f'\n examples/by_feature/checkpointing.py\n --checkpointing_steps 1\n --output_dir {self.tmpdir}\n '.split() A_ : List[str] = run_command(self._launch_args + testargs ) self.assertTrue(os.path.exists(os.path.join(self.tmpdir , '''step_2''' ) ) ) def _a ( self : Tuple ): """simple docstring""" A_ : List[Any] = f'\n examples/by_feature/checkpointing.py\n --resume_from_checkpoint {os.path.join(self.tmpdir , "epoch_0" )}\n '.split() A_ : List[str] = run_command(self._launch_args + testargs , return_stdout=_lowerCamelCase ) self.assertNotIn('''epoch 0:''' , _lowerCamelCase ) self.assertIn('''epoch 1:''' , _lowerCamelCase ) def _a ( self : Union[str, Any] ): """simple docstring""" A_ : Tuple = f'\n examples/by_feature/checkpointing.py\n --resume_from_checkpoint {os.path.join(self.tmpdir , "step_2" )}\n '.split() A_ : int = run_command(self._launch_args + testargs , return_stdout=_lowerCamelCase ) if torch.cuda.is_available(): A_ : str = torch.cuda.device_count() else: A_ : Tuple = 1 if num_processes > 1: self.assertNotIn('''epoch 0:''' , _lowerCamelCase ) self.assertIn('''epoch 1:''' , _lowerCamelCase ) else: self.assertIn('''epoch 0:''' , _lowerCamelCase ) self.assertIn('''epoch 1:''' , _lowerCamelCase ) @slow def _a ( self : Dict ): """simple docstring""" A_ : Optional[Any] = ''' examples/by_feature/cross_validation.py --num_folds 2 '''.split() with mock.patch.dict(os.environ , {'''TESTING_MOCKED_DATALOADERS''': '''0'''} ): A_ : Optional[Any] = run_command(self._launch_args + testargs , return_stdout=_lowerCamelCase ) A_ : int = re.findall('''({.+})''' , _lowerCamelCase ) A_ : List[Any] = [r for r in results if '''accuracy''' in r][-1] A_ : int = ast.literal_eval(_lowerCamelCase ) self.assertGreaterEqual(results['''accuracy'''] , 0.75 ) def _a ( self : Dict ): """simple docstring""" A_ : Optional[int] = ['''examples/by_feature/multi_process_metrics.py'''] run_command(self._launch_args + testargs ) @require_trackers @mock.patch.dict(os.environ , {'''WANDB_MODE''': '''offline'''} ) def _a ( self : List[Any] ): """simple docstring""" with tempfile.TemporaryDirectory() as tmpdir: A_ : Union[str, Any] = f'\n examples/by_feature/tracking.py\n --with_tracking\n --project_dir {tmpdir}\n '.split() run_command(self._launch_args + testargs ) self.assertTrue(os.path.exists(os.path.join(_lowerCamelCase , '''tracking''' ) ) ) def _a ( self : Dict ): """simple docstring""" A_ : List[Any] = ['''examples/by_feature/gradient_accumulation.py'''] run_command(self._launch_args + testargs ) def _a ( self : Dict ): """simple docstring""" A_ : Tuple = ['''examples/by_feature/local_sgd.py'''] run_command(self._launch_args + testargs )
367
'''simple docstring''' from __future__ import annotations class UpperCamelCase_ : """simple docstring""" def __init__( self : Optional[int] , _lowerCamelCase : int ): """simple docstring""" A_ : Union[str, Any] = order # a_{0} ... a_{k} A_ : Union[str, Any] = [1.0] + [0.0] * order # b_{0} ... b_{k} A_ : int = [1.0] + [0.0] * order # x[n-1] ... x[n-k] A_ : str = [0.0] * self.order # y[n-1] ... y[n-k] A_ : Optional[Any] = [0.0] * self.order def _a ( self : Dict , _lowerCamelCase : list[float] , _lowerCamelCase : list[float] ): """simple docstring""" if len(_lowerCamelCase ) < self.order: A_ : Any = [1.0, *a_coeffs] if len(_lowerCamelCase ) != self.order + 1: A_ : List[Any] = ( f'Expected a_coeffs to have {self.order + 1} elements ' f'for {self.order}-order filter, got {len(_lowerCamelCase )}' ) raise ValueError(_lowerCamelCase ) if len(_lowerCamelCase ) != self.order + 1: A_ : Union[str, Any] = ( f'Expected b_coeffs to have {self.order + 1} elements ' f'for {self.order}-order filter, got {len(_lowerCamelCase )}' ) raise ValueError(_lowerCamelCase ) A_ : Tuple = a_coeffs A_ : str = b_coeffs def _a ( self : Tuple , _lowerCamelCase : float ): """simple docstring""" A_ : Any = 0.0 # Start at index 1 and do index 0 at the end. for i in range(1 , self.order + 1 ): result += ( self.b_coeffs[i] * self.input_history[i - 1] - self.a_coeffs[i] * self.output_history[i - 1] ) A_ : str = (result + self.b_coeffs[0] * sample) / self.a_coeffs[0] A_ : Optional[Any] = self.input_history[:-1] A_ : List[str] = self.output_history[:-1] A_ : Tuple = sample A_ : Tuple = result return result
4
0
'''simple docstring''' import itertools from dataclasses import dataclass from typing import List, Optional import pyarrow as pa import pyarrow.parquet as pq import datasets from datasets.table import table_cast snake_case__ = datasets.utils.logging.get_logger(__name__) @dataclass class UpperCamelCase_ (datasets.BuilderConfig ): """simple docstring""" _lowerCAmelCase = 1_0_0_0_0 _lowerCAmelCase = None _lowerCAmelCase = None class UpperCamelCase_ (datasets.ArrowBasedBuilder ): """simple docstring""" _lowerCAmelCase = ParquetConfig def _a ( self : Any ): """simple docstring""" return datasets.DatasetInfo(features=self.config.features ) def _a ( self : str , _lowerCamelCase : Dict ): """simple docstring""" if not self.config.data_files: raise ValueError(f'At least one data file must be specified, but got data_files={self.config.data_files}' ) A_ : List[Any] = dl_manager.download_and_extract(self.config.data_files ) if isinstance(_lowerCamelCase , (str, list, tuple) ): A_ : Union[str, Any] = data_files if isinstance(_lowerCamelCase , _lowerCamelCase ): A_ : Optional[int] = [files] # Use `dl_manager.iter_files` to skip hidden files in an extracted archive A_ : Optional[int] = [dl_manager.iter_files(_lowerCamelCase ) for file in files] return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={'''files''': files} )] A_ : str = [] for split_name, files in data_files.items(): if isinstance(_lowerCamelCase , _lowerCamelCase ): A_ : Union[str, Any] = [files] # Use `dl_manager.iter_files` to skip hidden files in an extracted archive A_ : Optional[int] = [dl_manager.iter_files(_lowerCamelCase ) for file in files] # Infer features is they are stoed in the arrow schema if self.info.features is None: for file in itertools.chain.from_iterable(_lowerCamelCase ): with open(_lowerCamelCase , '''rb''' ) as f: A_ : Dict = datasets.Features.from_arrow_schema(pq.read_schema(_lowerCamelCase ) ) break splits.append(datasets.SplitGenerator(name=_lowerCamelCase , gen_kwargs={'''files''': files} ) ) return splits def _a ( self : Tuple , _lowerCamelCase : pa.Table ): """simple docstring""" if self.info.features is not None: # more expensive cast to support nested features with keys in a different order # allows str <-> int/float or str to Audio for example A_ : Optional[Any] = table_cast(_lowerCamelCase , self.info.features.arrow_schema ) return pa_table def _a ( self : List[str] , _lowerCamelCase : Optional[int] ): """simple docstring""" A_ : List[str] = self.info.features.arrow_schema if self.info.features is not None else None if self.info.features is not None and self.config.columns is not None: if sorted(field.name for field in schema ) != sorted(self.config.columns ): raise ValueError( f'Tried to load parquet data with columns \'{self.config.columns}\' with mismatching features \'{self.info.features}\'' ) for file_idx, file in enumerate(itertools.chain.from_iterable(_lowerCamelCase ) ): with open(_lowerCamelCase , '''rb''' ) as f: A_ : List[str] = pq.ParquetFile(_lowerCamelCase ) try: for batch_idx, record_batch in enumerate( parquet_file.iter_batches(batch_size=self.config.batch_size , columns=self.config.columns ) ): A_ : Any = pa.Table.from_batches([record_batch] ) # Uncomment for debugging (will print the Arrow table size and elements) # logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}") # logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows))) yield f'{file_idx}_{batch_idx}', self._cast_table(_lowerCamelCase ) except ValueError as e: logger.error(f'Failed to read file \'{file}\' with error {type(_lowerCamelCase )}: {e}' ) raise
368
'''simple docstring''' class UpperCamelCase_ : """simple docstring""" def __init__( self : Optional[Any] , _lowerCamelCase : Union[str, Any] ): """simple docstring""" A_ : Union[str, Any] = val A_ : Tuple = None A_ : Any = None def _a ( self : Tuple , _lowerCamelCase : List[Any] ): """simple docstring""" if self.val: if val < self.val: if self.left is None: A_ : int = Node(_lowerCamelCase ) else: self.left.insert(_lowerCamelCase ) elif val > self.val: if self.right is None: A_ : List[str] = Node(_lowerCamelCase ) else: self.right.insert(_lowerCamelCase ) else: A_ : Any = val def snake_case__ ( lowerCamelCase__ : Any , lowerCamelCase__ : Optional[int] ) -> str: # Recursive traversal if root: inorder(root.left , lowerCamelCase__ ) res.append(root.val ) inorder(root.right , lowerCamelCase__ ) def snake_case__ ( lowerCamelCase__ : Optional[int] ) -> Tuple: # Build BST if len(lowerCamelCase__ ) == 0: return arr A_ : Dict = Node(arr[0] ) for i in range(1 , len(lowerCamelCase__ ) ): root.insert(arr[i] ) # Traverse BST in order. A_ : Tuple = [] inorder(lowerCamelCase__ , lowerCamelCase__ ) return res if __name__ == "__main__": print(tree_sort([10, 1, 3, 2, 9, 14, 13]))
4
0
'''simple docstring''' import unittest from transformers import AlbertTokenizer, AlbertTokenizerFast from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow from ...test_tokenization_common import TokenizerTesterMixin snake_case__ = get_tests_dir("""fixtures/spiece.model""") @require_sentencepiece @require_tokenizers class UpperCamelCase_ (a__, unittest.TestCase ): """simple docstring""" _lowerCAmelCase = AlbertTokenizer _lowerCAmelCase = AlbertTokenizerFast _lowerCAmelCase = True _lowerCAmelCase = True _lowerCAmelCase = True def _a ( self : int ): """simple docstring""" super().setUp() # We have a SentencePiece fixture for testing A_ : str = AlbertTokenizer(_lowerCamelCase ) tokenizer.save_pretrained(self.tmpdirname ) def _a ( self : int , _lowerCamelCase : Any ): """simple docstring""" A_ : Any = '''this is a test''' A_ : Dict = '''this is a test''' return input_text, output_text def _a ( self : List[str] ): """simple docstring""" A_ : Union[str, Any] = '''<pad>''' A_ : Optional[int] = 0 self.assertEqual(self.get_tokenizer()._convert_token_to_id(_lowerCamelCase ) , _lowerCamelCase ) self.assertEqual(self.get_tokenizer()._convert_id_to_token(_lowerCamelCase ) , _lowerCamelCase ) def _a ( self : int ): """simple docstring""" A_ : int = list(self.get_tokenizer().get_vocab().keys() ) self.assertEqual(vocab_keys[0] , '''<pad>''' ) self.assertEqual(vocab_keys[1] , '''<unk>''' ) self.assertEqual(vocab_keys[-1] , '''▁eloquent''' ) self.assertEqual(len(_lowerCamelCase ) , 30000 ) def _a ( self : Any ): """simple docstring""" self.assertEqual(self.get_tokenizer().vocab_size , 30000 ) def _a ( self : str ): """simple docstring""" if not self.test_rust_tokenizer: return A_ : Any = self.get_tokenizer() A_ : List[str] = self.get_rust_tokenizer() A_ : List[str] = '''I was born in 92000, and this is falsé.''' A_ : List[Any] = tokenizer.tokenize(_lowerCamelCase ) A_ : List[Any] = rust_tokenizer.tokenize(_lowerCamelCase ) self.assertListEqual(_lowerCamelCase , _lowerCamelCase ) A_ : List[Any] = tokenizer.encode(_lowerCamelCase , add_special_tokens=_lowerCamelCase ) A_ : Any = rust_tokenizer.encode(_lowerCamelCase , add_special_tokens=_lowerCamelCase ) self.assertListEqual(_lowerCamelCase , _lowerCamelCase ) A_ : str = self.get_rust_tokenizer() A_ : List[Any] = tokenizer.encode(_lowerCamelCase ) A_ : List[str] = rust_tokenizer.encode(_lowerCamelCase ) self.assertListEqual(_lowerCamelCase , _lowerCamelCase ) def _a ( self : Tuple ): """simple docstring""" A_ : Union[str, Any] = AlbertTokenizer(_lowerCamelCase , keep_accents=_lowerCamelCase ) A_ : Optional[Any] = tokenizer.tokenize('''This is a test''' ) self.assertListEqual(_lowerCamelCase , ['''▁this''', '''▁is''', '''▁a''', '''▁test'''] ) self.assertListEqual(tokenizer.convert_tokens_to_ids(_lowerCamelCase ) , [48, 25, 21, 1289] ) A_ : Dict = tokenizer.tokenize('''I was born in 92000, and this is falsé.''' ) self.assertListEqual( _lowerCamelCase , ['''▁i''', '''▁was''', '''▁born''', '''▁in''', '''▁9''', '''2000''', ''',''', '''▁and''', '''▁this''', '''▁is''', '''▁fal''', '''s''', '''é''', '''.'''] ) A_ : Tuple = tokenizer.convert_tokens_to_ids(_lowerCamelCase ) self.assertListEqual(_lowerCamelCase , [31, 23, 386, 19, 561, 3050, 15, 17, 48, 25, 8256, 18, 1, 9] ) A_ : Optional[int] = tokenizer.convert_ids_to_tokens(_lowerCamelCase ) self.assertListEqual( _lowerCamelCase , ['''▁i''', '''▁was''', '''▁born''', '''▁in''', '''▁9''', '''2000''', ''',''', '''▁and''', '''▁this''', '''▁is''', '''▁fal''', '''s''', '''<unk>''', '''.'''] , ) def _a ( self : Any ): """simple docstring""" A_ : Dict = AlbertTokenizer(_lowerCamelCase ) A_ : List[Any] = tokenizer.encode('''sequence builders''' ) A_ : Union[str, Any] = tokenizer.encode('''multi-sequence build''' ) A_ : Optional[int] = tokenizer.build_inputs_with_special_tokens(_lowerCamelCase ) A_ : Optional[Any] = tokenizer.build_inputs_with_special_tokens(_lowerCamelCase , _lowerCamelCase ) assert encoded_sentence == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] assert encoded_pair == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] + text_a + [ tokenizer.sep_token_id ] @slow def _a ( self : List[Any] ): """simple docstring""" A_ : Tuple = {'''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], '''input_ids''': [[2, 21970, 13, 5, 6092, 167, 28, 7103, 2153, 673, 8, 7028, 12051, 18, 17, 7103, 2153, 673, 8, 3515, 18684, 8, 4461, 6, 1927, 297, 8, 12060, 2607, 18, 13, 5, 4461, 15, 10538, 38, 8, 135, 15, 822, 58, 15, 993, 10363, 15, 1460, 8005, 4461, 15, 993, 255, 2328, 9, 9, 9, 6, 26, 1112, 816, 3260, 13, 5, 103, 2377, 6, 17, 1112, 816, 2782, 13, 5, 103, 10641, 6, 29, 84, 2512, 2430, 782, 18684, 2761, 19, 808, 2430, 2556, 17, 855, 1480, 9477, 4091, 128, 11712, 15, 7103, 2153, 673, 17, 24883, 9990, 9, 3], [2, 11502, 25, 1006, 20, 782, 8, 11809, 855, 1732, 19393, 18667, 37, 367, 21018, 69, 1854, 34, 11860, 19124, 27, 156, 225, 17, 193, 4141, 19, 65, 9124, 9, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [2, 14, 2231, 886, 2385, 17659, 84, 14, 16792, 1952, 9, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], '''token_type_ids''': [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501 # fmt: on self.tokenizer_integration_test_util( expected_encoding=_lowerCamelCase , model_name='''albert-base-v2''' , revision='''6b6560eaf5ff2e250b00c50f380c5389a9c2d82e''' , )
369
'''simple docstring''' def snake_case__ ( lowerCamelCase__ : list ) -> list: if len(lowerCamelCase__ ) <= 1: return [tuple(lowerCamelCase__ )] A_ : List[str] = [] def generate(lowerCamelCase__ : int , lowerCamelCase__ : list ): if k == 1: res.append(tuple(arr[:] ) ) return generate(k - 1 , lowerCamelCase__ ) for i in range(k - 1 ): if k % 2 == 0: # k is even A_ ,A_ : Optional[int] = arr[k - 1], arr[i] else: # k is odd A_ ,A_ : Union[str, Any] = arr[k - 1], arr[0] generate(k - 1 , lowerCamelCase__ ) generate(len(lowerCamelCase__ ) , lowerCamelCase__ ) return res if __name__ == "__main__": snake_case__ = input("""Enter numbers separated by a comma:\n""").strip() snake_case__ = [int(item) for item in user_input.split(""",""")] print(heaps(arr))
4
0
'''simple docstring''' import gc import unittest import numpy as np import torch from diffusers import StableDiffusionKDiffusionPipeline from diffusers.utils import slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu enable_full_determinism() @slow @require_torch_gpu class UpperCamelCase_ (unittest.TestCase ): """simple docstring""" def _a ( self : Union[str, Any] ): """simple docstring""" super().tearDown() gc.collect() torch.cuda.empty_cache() def _a ( self : Dict ): """simple docstring""" A_ : Any = StableDiffusionKDiffusionPipeline.from_pretrained('''CompVis/stable-diffusion-v1-4''' ) A_ : List[Any] = sd_pipe.to(_lowerCamelCase ) sd_pipe.set_progress_bar_config(disable=_lowerCamelCase ) sd_pipe.set_scheduler('''sample_euler''' ) A_ : Any = '''A painting of a squirrel eating a burger''' A_ : Dict = torch.manual_seed(0 ) A_ : Optional[int] = sd_pipe([prompt] , generator=_lowerCamelCase , guidance_scale=9.0 , num_inference_steps=20 , output_type='''np''' ) A_ : Optional[int] = output.images A_ : Optional[Any] = image[0, -3:, -3:, -1] assert image.shape == (1, 512, 512, 3) A_ : List[str] = np.array([0.04_47, 0.04_92, 0.04_68, 0.04_08, 0.03_83, 0.04_08, 0.03_54, 0.03_80, 0.03_39] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 def _a ( self : List[Any] ): """simple docstring""" A_ : int = StableDiffusionKDiffusionPipeline.from_pretrained('''stabilityai/stable-diffusion-2-1-base''' ) A_ : int = sd_pipe.to(_lowerCamelCase ) sd_pipe.set_progress_bar_config(disable=_lowerCamelCase ) sd_pipe.set_scheduler('''sample_euler''' ) A_ : List[Any] = '''A painting of a squirrel eating a burger''' A_ : Optional[int] = torch.manual_seed(0 ) A_ : Optional[int] = sd_pipe([prompt] , generator=_lowerCamelCase , guidance_scale=9.0 , num_inference_steps=20 , output_type='''np''' ) A_ : Dict = output.images A_ : str = image[0, -3:, -3:, -1] assert image.shape == (1, 512, 512, 3) A_ : Tuple = np.array([0.12_37, 0.13_20, 0.14_38, 0.13_59, 0.13_90, 0.11_32, 0.12_77, 0.11_75, 0.11_12] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 5E-1 def _a ( self : Union[str, Any] ): """simple docstring""" A_ : Any = StableDiffusionKDiffusionPipeline.from_pretrained('''stabilityai/stable-diffusion-2-1-base''' ) A_ : Union[str, Any] = sd_pipe.to(_lowerCamelCase ) sd_pipe.set_progress_bar_config(disable=_lowerCamelCase ) sd_pipe.set_scheduler('''sample_dpmpp_2m''' ) A_ : str = '''A painting of a squirrel eating a burger''' A_ : Union[str, Any] = torch.manual_seed(0 ) A_ : Tuple = sd_pipe( [prompt] , generator=_lowerCamelCase , guidance_scale=7.5 , num_inference_steps=15 , output_type='''np''' , use_karras_sigmas=_lowerCamelCase , ) A_ : List[str] = output.images A_ : List[Any] = image[0, -3:, -3:, -1] assert image.shape == (1, 512, 512, 3) A_ : Any = np.array( [0.11_38_16_89, 0.12_11_29_21, 0.1_38_94_57, 0.12_54_96_06, 0.1_24_49_64, 0.10_83_15_17, 0.11_56_28_66, 0.10_86_78_16, 0.10_49_90_48] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
370
'''simple docstring''' import unittest from queue import Empty from threading import Thread from transformers import AutoTokenizer, TextIteratorStreamer, TextStreamer, is_torch_available from transformers.testing_utils import CaptureStdout, require_torch, torch_device from ..test_modeling_common import ids_tensor if is_torch_available(): import torch from transformers import AutoModelForCausalLM @require_torch class UpperCamelCase_ (unittest.TestCase ): """simple docstring""" def _a ( self : Dict ): """simple docstring""" A_ : Optional[int] = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ) A_ : Tuple = AutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ).to(_lowerCamelCase ) A_ : Dict = -1 A_ : List[str] = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(_lowerCamelCase ) A_ : Any = model.generate(_lowerCamelCase , max_new_tokens=10 , do_sample=_lowerCamelCase ) A_ : List[str] = tokenizer.decode(greedy_ids[0] ) with CaptureStdout() as cs: A_ : List[str] = TextStreamer(_lowerCamelCase ) model.generate(_lowerCamelCase , max_new_tokens=10 , do_sample=_lowerCamelCase , streamer=_lowerCamelCase ) # The greedy text should be printed to stdout, except for the final "\n" in the streamer A_ : Dict = cs.out[:-1] self.assertEqual(_lowerCamelCase , _lowerCamelCase ) def _a ( self : Tuple ): """simple docstring""" A_ : Optional[int] = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ) A_ : List[str] = AutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ).to(_lowerCamelCase ) A_ : Dict = -1 A_ : List[str] = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(_lowerCamelCase ) A_ : Optional[int] = model.generate(_lowerCamelCase , max_new_tokens=10 , do_sample=_lowerCamelCase ) A_ : str = tokenizer.decode(greedy_ids[0] ) A_ : int = TextIteratorStreamer(_lowerCamelCase ) A_ : List[Any] = {'''input_ids''': input_ids, '''max_new_tokens''': 10, '''do_sample''': False, '''streamer''': streamer} A_ : List[Any] = Thread(target=model.generate , kwargs=_lowerCamelCase ) thread.start() A_ : List[Any] = '''''' for new_text in streamer: streamer_text += new_text self.assertEqual(_lowerCamelCase , _lowerCamelCase ) def _a ( self : int ): """simple docstring""" A_ : List[str] = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ) A_ : List[Any] = AutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ).to(_lowerCamelCase ) A_ : List[str] = -1 A_ : Any = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(_lowerCamelCase ) A_ : Tuple = model.generate(_lowerCamelCase , max_new_tokens=10 , do_sample=_lowerCamelCase ) A_ : Tuple = greedy_ids[:, input_ids.shape[1] :] A_ : Tuple = tokenizer.decode(new_greedy_ids[0] ) with CaptureStdout() as cs: A_ : Any = TextStreamer(_lowerCamelCase , skip_prompt=_lowerCamelCase ) model.generate(_lowerCamelCase , max_new_tokens=10 , do_sample=_lowerCamelCase , streamer=_lowerCamelCase ) # The greedy text should be printed to stdout, except for the final "\n" in the streamer A_ : Any = cs.out[:-1] self.assertEqual(_lowerCamelCase , _lowerCamelCase ) def _a ( self : List[Any] ): """simple docstring""" A_ : List[Any] = AutoTokenizer.from_pretrained('''distilgpt2''' ) A_ : Tuple = AutoModelForCausalLM.from_pretrained('''distilgpt2''' ).to(_lowerCamelCase ) A_ : List[Any] = -1 A_ : Union[str, Any] = torch.ones((1, 5) , device=_lowerCamelCase ).long() * model.config.bos_token_id with CaptureStdout() as cs: A_ : List[Any] = TextStreamer(_lowerCamelCase , skip_special_tokens=_lowerCamelCase ) model.generate(_lowerCamelCase , max_new_tokens=1 , do_sample=_lowerCamelCase , streamer=_lowerCamelCase ) # The prompt contains a special token, so the streamer should not print it. As such, the output text, when # re-tokenized, must only contain one token A_ : List[str] = cs.out[:-1] # Remove the final "\n" A_ : List[Any] = tokenizer(_lowerCamelCase , return_tensors='''pt''' ) self.assertEqual(streamer_text_tokenized.input_ids.shape , (1, 1) ) def _a ( self : Union[str, Any] ): """simple docstring""" A_ : str = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ) A_ : str = AutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ).to(_lowerCamelCase ) A_ : Union[str, Any] = -1 A_ : Union[str, Any] = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(_lowerCamelCase ) A_ : List[str] = TextIteratorStreamer(_lowerCamelCase , timeout=0.0_01 ) A_ : str = {'''input_ids''': input_ids, '''max_new_tokens''': 10, '''do_sample''': False, '''streamer''': streamer} A_ : List[str] = Thread(target=model.generate , kwargs=_lowerCamelCase ) thread.start() # The streamer will timeout after 0.001 seconds, so an exception will be raised with self.assertRaises(_lowerCamelCase ): A_ : str = '''''' for new_text in streamer: streamer_text += new_text
4
0
'''simple docstring''' import gc import random import unittest import numpy as np import torch from PIL import Image from transformers import XLMRobertaTokenizerFast from diffusers import DDIMScheduler, KandinskyImgaImgPipeline, KandinskyPriorPipeline, UNetaDConditionModel, VQModel from diffusers.pipelines.kandinsky.text_encoder import MCLIPConfig, MultilingualCLIP from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference enable_full_determinism() class UpperCamelCase_ (a__, unittest.TestCase ): """simple docstring""" _lowerCAmelCase = KandinskyImgaImgPipeline _lowerCAmelCase = ['prompt', 'image_embeds', 'negative_image_embeds', 'image'] _lowerCAmelCase = [ 'prompt', 'negative_prompt', 'image_embeds', 'negative_image_embeds', 'image', ] _lowerCAmelCase = [ 'generator', 'height', 'width', 'strength', 'guidance_scale', 'negative_prompt', 'num_inference_steps', 'return_dict', 'guidance_scale', 'num_images_per_prompt', 'output_type', 'return_dict', ] _lowerCAmelCase = False @property def _a ( self : List[str] ): """simple docstring""" return 32 @property def _a ( self : List[str] ): """simple docstring""" return 32 @property def _a ( self : List[Any] ): """simple docstring""" return self.time_input_dim @property def _a ( self : Tuple ): """simple docstring""" return self.time_input_dim * 4 @property def _a ( self : Optional[Any] ): """simple docstring""" return 100 @property def _a ( self : int ): """simple docstring""" A_ : Dict = XLMRobertaTokenizerFast.from_pretrained('''YiYiXu/tiny-random-mclip-base''' ) return tokenizer @property def _a ( self : Union[str, Any] ): """simple docstring""" torch.manual_seed(0 ) A_ : Tuple = MCLIPConfig( numDims=self.cross_attention_dim , transformerDimensions=self.text_embedder_hidden_size , hidden_size=self.text_embedder_hidden_size , intermediate_size=37 , num_attention_heads=4 , num_hidden_layers=5 , vocab_size=1005 , ) A_ : int = MultilingualCLIP(_lowerCamelCase ) A_ : Dict = text_encoder.eval() return text_encoder @property def _a ( self : Any ): """simple docstring""" torch.manual_seed(0 ) A_ : Optional[Any] = { '''in_channels''': 4, # Out channels is double in channels because predicts mean and variance '''out_channels''': 8, '''addition_embed_type''': '''text_image''', '''down_block_types''': ('''ResnetDownsampleBlock2D''', '''SimpleCrossAttnDownBlock2D'''), '''up_block_types''': ('''SimpleCrossAttnUpBlock2D''', '''ResnetUpsampleBlock2D'''), '''mid_block_type''': '''UNetMidBlock2DSimpleCrossAttn''', '''block_out_channels''': (self.block_out_channels_a, self.block_out_channels_a * 2), '''layers_per_block''': 1, '''encoder_hid_dim''': self.text_embedder_hidden_size, '''encoder_hid_dim_type''': '''text_image_proj''', '''cross_attention_dim''': self.cross_attention_dim, '''attention_head_dim''': 4, '''resnet_time_scale_shift''': '''scale_shift''', '''class_embed_type''': None, } A_ : List[Any] = UNetaDConditionModel(**_lowerCamelCase ) return model @property def _a ( self : List[str] ): """simple docstring""" return { "block_out_channels": [32, 64], "down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"], "in_channels": 3, "latent_channels": 4, "layers_per_block": 1, "norm_num_groups": 8, "norm_type": "spatial", "num_vq_embeddings": 12, "out_channels": 3, "up_block_types": [ "AttnUpDecoderBlock2D", "UpDecoderBlock2D", ], "vq_embed_dim": 4, } @property def _a ( self : Optional[Any] ): """simple docstring""" torch.manual_seed(0 ) A_ : Optional[Any] = VQModel(**self.dummy_movq_kwargs ) return model def _a ( self : str ): """simple docstring""" A_ : Tuple = self.dummy_text_encoder A_ : Optional[int] = self.dummy_tokenizer A_ : Dict = self.dummy_unet A_ : Dict = self.dummy_movq A_ : Any = { '''num_train_timesteps''': 1000, '''beta_schedule''': '''linear''', '''beta_start''': 0.0_00_85, '''beta_end''': 0.0_12, '''clip_sample''': False, '''set_alpha_to_one''': False, '''steps_offset''': 0, '''prediction_type''': '''epsilon''', '''thresholding''': False, } A_ : int = DDIMScheduler(**_lowerCamelCase ) A_ : str = { '''text_encoder''': text_encoder, '''tokenizer''': tokenizer, '''unet''': unet, '''scheduler''': scheduler, '''movq''': movq, } return components def _a ( self : Optional[int] , _lowerCamelCase : Dict , _lowerCamelCase : List[str]=0 ): """simple docstring""" A_ : List[str] = floats_tensor((1, self.cross_attention_dim) , rng=random.Random(_lowerCamelCase ) ).to(_lowerCamelCase ) A_ : Dict = floats_tensor((1, self.cross_attention_dim) , rng=random.Random(seed + 1 ) ).to(_lowerCamelCase ) # create init_image A_ : Optional[int] = floats_tensor((1, 3, 64, 64) , rng=random.Random(_lowerCamelCase ) ).to(_lowerCamelCase ) A_ : str = image.cpu().permute(0 , 2 , 3 , 1 )[0] A_ : Optional[Any] = Image.fromarray(np.uinta(_lowerCamelCase ) ).convert('''RGB''' ).resize((256, 256) ) if str(_lowerCamelCase ).startswith('''mps''' ): A_ : Optional[int] = torch.manual_seed(_lowerCamelCase ) else: A_ : Tuple = torch.Generator(device=_lowerCamelCase ).manual_seed(_lowerCamelCase ) A_ : Optional[Any] = { '''prompt''': '''horse''', '''image''': init_image, '''image_embeds''': image_embeds, '''negative_image_embeds''': negative_image_embeds, '''generator''': generator, '''height''': 64, '''width''': 64, '''num_inference_steps''': 10, '''guidance_scale''': 7.0, '''strength''': 0.2, '''output_type''': '''np''', } return inputs def _a ( self : Any ): """simple docstring""" A_ : Any = '''cpu''' A_ : Optional[Any] = self.get_dummy_components() A_ : Optional[int] = self.pipeline_class(**_lowerCamelCase ) A_ : Dict = pipe.to(_lowerCamelCase ) pipe.set_progress_bar_config(disable=_lowerCamelCase ) A_ : Optional[int] = pipe(**self.get_dummy_inputs(_lowerCamelCase ) ) A_ : Optional[Any] = output.images A_ : Dict = pipe( **self.get_dummy_inputs(_lowerCamelCase ) , return_dict=_lowerCamelCase , )[0] A_ : List[str] = image[0, -3:, -3:, -1] A_ : Optional[int] = image_from_tuple[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) A_ : Any = np.array( [0.61_47_49_43, 0.6_07_35_39, 0.43_30_85_44, 0.5_92_82_69, 0.47_49_35_95, 0.46_75_59_73, 0.4_61_38_38, 0.45_36_87_97, 0.50_11_92_33] ) assert ( np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 ), f' expected_slice {expected_slice}, but got {image_slice.flatten()}' assert ( np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2 ), f' expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}' @slow @require_torch_gpu class UpperCamelCase_ (unittest.TestCase ): """simple docstring""" def _a ( self : str ): """simple docstring""" super().tearDown() gc.collect() torch.cuda.empty_cache() def _a ( self : List[str] ): """simple docstring""" A_ : List[Any] = load_numpy( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main''' '''/kandinsky/kandinsky_img2img_frog.npy''' ) A_ : str = load_image( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main''' '''/kandinsky/cat.png''' ) A_ : Union[str, Any] = '''A red cartoon frog, 4k''' A_ : List[str] = KandinskyPriorPipeline.from_pretrained( '''kandinsky-community/kandinsky-2-1-prior''' , torch_dtype=torch.floataa ) pipe_prior.to(_lowerCamelCase ) A_ : Tuple = KandinskyImgaImgPipeline.from_pretrained( '''kandinsky-community/kandinsky-2-1''' , torch_dtype=torch.floataa ) A_ : Any = pipeline.to(_lowerCamelCase ) pipeline.set_progress_bar_config(disable=_lowerCamelCase ) A_ : List[str] = torch.Generator(device='''cpu''' ).manual_seed(0 ) A_ : Optional[int] = pipe_prior( _lowerCamelCase , generator=_lowerCamelCase , num_inference_steps=5 , negative_prompt='''''' , ).to_tuple() A_ : Optional[Any] = pipeline( _lowerCamelCase , image=_lowerCamelCase , image_embeds=_lowerCamelCase , negative_image_embeds=_lowerCamelCase , generator=_lowerCamelCase , num_inference_steps=100 , height=768 , width=768 , strength=0.2 , output_type='''np''' , ) A_ : Union[str, Any] = output.images[0] assert image.shape == (768, 768, 3) assert_mean_pixel_difference(_lowerCamelCase , _lowerCamelCase )
371
'''simple docstring''' import heapq def snake_case__ ( lowerCamelCase__ : dict ) -> set[int]: A_ : list[list] = [] # for each node and his adjacency list add them and the rank of the node to queue # using heapq module the queue will be filled like a Priority Queue # heapq works with a min priority queue, so I used -1*len(v) to build it for key, value in graph.items(): # O(log(n)) heapq.heappush(lowerCamelCase__ , [-1 * len(lowerCamelCase__ ), (key, value)] ) # chosen_vertices = set of chosen vertices A_ : str = set() # while queue isn't empty and there are still edges # (queue[0][0] is the rank of the node with max rank) while queue and queue[0][0] != 0: # extract vertex with max rank from queue and add it to chosen_vertices A_ : Tuple = heapq.heappop(lowerCamelCase__ )[1][0] chosen_vertices.add(lowerCamelCase__ ) # Remove all arcs adjacent to argmax for elem in queue: # if v haven't adjacent node, skip if elem[0] == 0: continue # if argmax is reachable from elem # remove argmax from elem's adjacent list and update his rank if argmax in elem[1][1]: A_ : List[str] = elem[1][1].index(lowerCamelCase__ ) del elem[1][1][index] elem[0] += 1 # re-order the queue heapq.heapify(lowerCamelCase__ ) return chosen_vertices if __name__ == "__main__": import doctest doctest.testmod() snake_case__ = {0: [1, 3], 1: [0, 3], 2: [0, 3, 4], 3: [0, 1, 2], 4: [2, 3]} print(F'Minimum vertex cover:\n{greedy_min_vertex_cover(graph)}')
4
0
'''simple docstring''' from pathlib import PurePosixPath from typing import Optional import fsspec from fsspec import AbstractFileSystem from huggingface_hub.hf_api import DatasetInfo from ..utils.file_utils import get_authentication_headers_for_url from ..utils.hub import hf_hub_url class UpperCamelCase_ (a__ ): """simple docstring""" _lowerCAmelCase = '' _lowerCAmelCase = 'hf-legacy' # "hf://"" is reserved for hffs def __init__( self : Optional[int] , _lowerCamelCase : Optional[DatasetInfo] = None , _lowerCamelCase : Optional[str] = None , **_lowerCamelCase : Dict , ): """simple docstring""" super().__init__(self , **_lowerCamelCase ) A_ : Any = repo_info A_ : Optional[int] = token A_ : List[Any] = None def _a ( self : List[str] ): """simple docstring""" if self.dir_cache is None: A_ : Tuple = {} for hf_file in self.repo_info.siblings: # TODO(QL): add sizes A_ : int = { '''name''': hf_file.rfilename, '''size''': None, '''type''': '''file''', } self.dir_cache.update( { str(_lowerCamelCase ): {'''name''': str(_lowerCamelCase ), '''size''': None, '''type''': '''directory'''} for d in list(PurePosixPath(hf_file.rfilename ).parents )[:-1] } ) def _a ( self : Union[str, Any] , _lowerCamelCase : str , _lowerCamelCase : str = "rb" , **_lowerCamelCase : Dict , ): """simple docstring""" if not isinstance(self.repo_info , _lowerCamelCase ): raise NotImplementedError(f'Open is only implemented for dataset repositories, but got {self.repo_info}' ) A_ : Tuple = hf_hub_url(self.repo_info.id , _lowerCamelCase , revision=self.repo_info.sha ) return fsspec.open( _lowerCamelCase , mode=_lowerCamelCase , headers=get_authentication_headers_for_url(_lowerCamelCase , use_auth_token=self.token ) , client_kwargs={'''trust_env''': True} , ).open() def _a ( self : Optional[int] , _lowerCamelCase : Union[str, Any] , **_lowerCamelCase : int ): """simple docstring""" self._get_dirs() A_ : int = self._strip_protocol(_lowerCamelCase ) if path in self.dir_cache: return self.dir_cache[path] else: raise FileNotFoundError(_lowerCamelCase ) def _a ( self : Tuple , _lowerCamelCase : List[Any] , _lowerCamelCase : Optional[Any]=False , **_lowerCamelCase : List[str] ): """simple docstring""" self._get_dirs() A_ : str = PurePosixPath(path.strip('''/''' ) ) A_ : Any = {} for p, f in self.dir_cache.items(): A_ : Union[str, Any] = PurePosixPath(p.strip('''/''' ) ) A_ : Optional[Any] = p.parent if root == path: A_ : Union[str, Any] = f A_ : str = list(paths.values() ) if detail: return out else: return sorted(f['''name'''] for f in out )
350
'''simple docstring''' import argparse from collections import OrderedDict from pathlib import Path import torch from huggingface_hub import hf_hub_download from PIL import Image from torchvision.transforms import functional as F from transformers import DetrImageProcessor, TableTransformerConfig, TableTransformerForObjectDetection from transformers.utils import logging logging.set_verbosity_info() snake_case__ = logging.get_logger(__name__) # here we list all keys to be renamed (original name on the left, our name on the right) snake_case__ = [] for i in range(6): # encoder layers: output projection, 2 feedforward neural networks and 2 layernorms rename_keys.append( (F'transformer.encoder.layers.{i}.self_attn.out_proj.weight', F'encoder.layers.{i}.self_attn.out_proj.weight') ) rename_keys.append( (F'transformer.encoder.layers.{i}.self_attn.out_proj.bias', F'encoder.layers.{i}.self_attn.out_proj.bias') ) rename_keys.append((F'transformer.encoder.layers.{i}.linear1.weight', F'encoder.layers.{i}.fc1.weight')) rename_keys.append((F'transformer.encoder.layers.{i}.linear1.bias', F'encoder.layers.{i}.fc1.bias')) rename_keys.append((F'transformer.encoder.layers.{i}.linear2.weight', F'encoder.layers.{i}.fc2.weight')) rename_keys.append((F'transformer.encoder.layers.{i}.linear2.bias', F'encoder.layers.{i}.fc2.bias')) rename_keys.append( (F'transformer.encoder.layers.{i}.norm1.weight', F'encoder.layers.{i}.self_attn_layer_norm.weight') ) rename_keys.append((F'transformer.encoder.layers.{i}.norm1.bias', F'encoder.layers.{i}.self_attn_layer_norm.bias')) rename_keys.append((F'transformer.encoder.layers.{i}.norm2.weight', F'encoder.layers.{i}.final_layer_norm.weight')) rename_keys.append((F'transformer.encoder.layers.{i}.norm2.bias', F'encoder.layers.{i}.final_layer_norm.bias')) # decoder layers: 2 times output projection, 2 feedforward neural networks and 3 layernorms rename_keys.append( (F'transformer.decoder.layers.{i}.self_attn.out_proj.weight', F'decoder.layers.{i}.self_attn.out_proj.weight') ) rename_keys.append( (F'transformer.decoder.layers.{i}.self_attn.out_proj.bias', F'decoder.layers.{i}.self_attn.out_proj.bias') ) rename_keys.append( ( F'transformer.decoder.layers.{i}.multihead_attn.out_proj.weight', F'decoder.layers.{i}.encoder_attn.out_proj.weight', ) ) rename_keys.append( ( F'transformer.decoder.layers.{i}.multihead_attn.out_proj.bias', F'decoder.layers.{i}.encoder_attn.out_proj.bias', ) ) rename_keys.append((F'transformer.decoder.layers.{i}.linear1.weight', F'decoder.layers.{i}.fc1.weight')) rename_keys.append((F'transformer.decoder.layers.{i}.linear1.bias', F'decoder.layers.{i}.fc1.bias')) rename_keys.append((F'transformer.decoder.layers.{i}.linear2.weight', F'decoder.layers.{i}.fc2.weight')) rename_keys.append((F'transformer.decoder.layers.{i}.linear2.bias', F'decoder.layers.{i}.fc2.bias')) rename_keys.append( (F'transformer.decoder.layers.{i}.norm1.weight', F'decoder.layers.{i}.self_attn_layer_norm.weight') ) rename_keys.append((F'transformer.decoder.layers.{i}.norm1.bias', F'decoder.layers.{i}.self_attn_layer_norm.bias')) rename_keys.append( (F'transformer.decoder.layers.{i}.norm2.weight', F'decoder.layers.{i}.encoder_attn_layer_norm.weight') ) rename_keys.append( (F'transformer.decoder.layers.{i}.norm2.bias', F'decoder.layers.{i}.encoder_attn_layer_norm.bias') ) rename_keys.append((F'transformer.decoder.layers.{i}.norm3.weight', F'decoder.layers.{i}.final_layer_norm.weight')) rename_keys.append((F'transformer.decoder.layers.{i}.norm3.bias', F'decoder.layers.{i}.final_layer_norm.bias')) # convolutional projection + query embeddings + layernorm of encoder + layernorm of decoder + class and bounding box heads rename_keys.extend( [ ("""input_proj.weight""", """input_projection.weight"""), ("""input_proj.bias""", """input_projection.bias"""), ("""query_embed.weight""", """query_position_embeddings.weight"""), ("""transformer.encoder.norm.weight""", """encoder.layernorm.weight"""), ("""transformer.encoder.norm.bias""", """encoder.layernorm.bias"""), ("""transformer.decoder.norm.weight""", """decoder.layernorm.weight"""), ("""transformer.decoder.norm.bias""", """decoder.layernorm.bias"""), ("""class_embed.weight""", """class_labels_classifier.weight"""), ("""class_embed.bias""", """class_labels_classifier.bias"""), ("""bbox_embed.layers.0.weight""", """bbox_predictor.layers.0.weight"""), ("""bbox_embed.layers.0.bias""", """bbox_predictor.layers.0.bias"""), ("""bbox_embed.layers.1.weight""", """bbox_predictor.layers.1.weight"""), ("""bbox_embed.layers.1.bias""", """bbox_predictor.layers.1.bias"""), ("""bbox_embed.layers.2.weight""", """bbox_predictor.layers.2.weight"""), ("""bbox_embed.layers.2.bias""", """bbox_predictor.layers.2.bias"""), ] ) def snake_case__ ( lowerCamelCase__ : List[Any] , lowerCamelCase__ : List[Any] , lowerCamelCase__ : List[Any] ) -> Optional[Any]: A_ : Tuple = state_dict.pop(lowerCamelCase__ ) A_ : Optional[Any] = val def snake_case__ ( lowerCamelCase__ : Dict ) -> Any: A_ : int = OrderedDict() for key, value in state_dict.items(): if "backbone.0.body" in key: A_ : int = key.replace('''backbone.0.body''' , '''backbone.conv_encoder.model''' ) A_ : List[str] = value else: A_ : Optional[int] = value return new_state_dict def snake_case__ ( lowerCamelCase__ : Union[str, Any] ) -> Optional[Any]: A_ : Any = '''''' # first: transformer encoder for i in range(6 ): # read in weights + bias of input projection layer (in PyTorch's MultiHeadAttention, this is a single matrix + bias) A_ : Tuple = state_dict.pop(f'{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_weight' ) A_ : Dict = state_dict.pop(f'{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_bias' ) # next, add query, keys and values (in that order) to the state dict A_ : str = in_proj_weight[:2_5_6, :] A_ : Optional[Any] = in_proj_bias[:2_5_6] A_ : Dict = in_proj_weight[2_5_6:5_1_2, :] A_ : Tuple = in_proj_bias[2_5_6:5_1_2] A_ : Tuple = in_proj_weight[-2_5_6:, :] A_ : Optional[int] = in_proj_bias[-2_5_6:] # next: transformer decoder (which is a bit more complex because it also includes cross-attention) for i in range(6 ): # read in weights + bias of input projection layer of self-attention A_ : Union[str, Any] = state_dict.pop(f'{prefix}transformer.decoder.layers.{i}.self_attn.in_proj_weight' ) A_ : Dict = state_dict.pop(f'{prefix}transformer.decoder.layers.{i}.self_attn.in_proj_bias' ) # next, add query, keys and values (in that order) to the state dict A_ : List[str] = in_proj_weight[:2_5_6, :] A_ : int = in_proj_bias[:2_5_6] A_ : Any = in_proj_weight[2_5_6:5_1_2, :] A_ : List[str] = in_proj_bias[2_5_6:5_1_2] A_ : Union[str, Any] = in_proj_weight[-2_5_6:, :] A_ : Optional[Any] = in_proj_bias[-2_5_6:] # read in weights + bias of input projection layer of cross-attention A_ : Tuple = state_dict.pop( f'{prefix}transformer.decoder.layers.{i}.multihead_attn.in_proj_weight' ) A_ : Optional[Any] = state_dict.pop(f'{prefix}transformer.decoder.layers.{i}.multihead_attn.in_proj_bias' ) # next, add query, keys and values (in that order) of cross-attention to the state dict A_ : Dict = in_proj_weight_cross_attn[:2_5_6, :] A_ : Tuple = in_proj_bias_cross_attn[:2_5_6] A_ : int = in_proj_weight_cross_attn[2_5_6:5_1_2, :] A_ : List[str] = in_proj_bias_cross_attn[2_5_6:5_1_2] A_ : Any = in_proj_weight_cross_attn[-2_5_6:, :] A_ : Any = in_proj_bias_cross_attn[-2_5_6:] def snake_case__ ( lowerCamelCase__ : List[str] , lowerCamelCase__ : Tuple ) -> Dict: A_ ,A_ : int = image.size A_ : Tuple = max(lowerCamelCase__ , lowerCamelCase__ ) A_ : Optional[Any] = 8_0_0 if '''detection''' in checkpoint_url else 1_0_0_0 A_ : Union[str, Any] = target_max_size / current_max_size A_ : Any = image.resize((int(round(scale * width ) ), int(round(scale * height ) )) ) return resized_image def snake_case__ ( lowerCamelCase__ : Tuple ) -> str: A_ : Any = F.to_tensor(lowerCamelCase__ ) A_ : Optional[Any] = F.normalize(lowerCamelCase__ , mean=[0.485, 0.456, 0.406] , std=[0.229, 0.224, 0.225] ) return image @torch.no_grad() def snake_case__ ( lowerCamelCase__ : List[Any] , lowerCamelCase__ : int , lowerCamelCase__ : int ) -> str: logger.info('''Converting model...''' ) # load original state dict A_ : Tuple = torch.hub.load_state_dict_from_url(lowerCamelCase__ , map_location='''cpu''' ) # rename keys for src, dest in rename_keys: rename_key(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) A_ : str = rename_backbone_keys(lowerCamelCase__ ) # query, key and value matrices need special treatment read_in_q_k_v(lowerCamelCase__ ) # important: we need to prepend a prefix to each of the base model keys as the head models use different attributes for them A_ : List[Any] = '''model.''' for key in state_dict.copy().keys(): if not key.startswith('''class_labels_classifier''' ) and not key.startswith('''bbox_predictor''' ): A_ : List[Any] = state_dict.pop(lowerCamelCase__ ) A_ : str = val # create HuggingFace model and load state dict A_ : Union[str, Any] = TableTransformerConfig( backbone='''resnet18''' , mask_loss_coefficient=1 , dice_loss_coefficient=1 , ce_loss_coefficient=1 , bbox_loss_coefficient=5 , giou_loss_coefficient=2 , eos_coefficient=0.4 , class_cost=1 , bbox_cost=5 , giou_cost=2 , ) if "detection" in checkpoint_url: A_ : Dict = 1_5 A_ : Dict = 2 A_ : int = {0: '''table''', 1: '''table rotated'''} A_ : List[str] = idalabel A_ : Optional[int] = {v: k for k, v in idalabel.items()} else: A_ : Union[str, Any] = 1_2_5 A_ : Optional[Any] = 6 A_ : Optional[Any] = { 0: '''table''', 1: '''table column''', 2: '''table row''', 3: '''table column header''', 4: '''table projected row header''', 5: '''table spanning cell''', } A_ : int = idalabel A_ : Tuple = {v: k for k, v in idalabel.items()} A_ : Optional[Any] = DetrImageProcessor( format='''coco_detection''' , max_size=8_0_0 if '''detection''' in checkpoint_url else 1_0_0_0 ) A_ : int = TableTransformerForObjectDetection(lowerCamelCase__ ) model.load_state_dict(lowerCamelCase__ ) model.eval() # verify our conversion A_ : Optional[int] = '''example_pdf.png''' if '''detection''' in checkpoint_url else '''example_table.png''' A_ : Union[str, Any] = hf_hub_download(repo_id='''nielsr/example-pdf''' , repo_type='''dataset''' , filename=lowerCamelCase__ ) A_ : Tuple = Image.open(lowerCamelCase__ ).convert('''RGB''' ) A_ : int = normalize(resize(lowerCamelCase__ , lowerCamelCase__ ) ).unsqueeze(0 ) A_ : str = model(lowerCamelCase__ ) if "detection" in checkpoint_url: A_ : str = (1, 1_5, 3) A_ : int = torch.tensor( [[-6.7897, -16.9985, 6.7937], [-8.0186, -22.2192, 6.9677], [-7.3117, -21.0708, 7.4055]] ) A_ : Tuple = torch.tensor([[0.4867, 0.1767, 0.6732], [0.6718, 0.4479, 0.3830], [0.4716, 0.1760, 0.6364]] ) else: A_ : Optional[int] = (1, 1_2_5, 7) A_ : Dict = torch.tensor( [[-18.1430, -8.3214, 4.8274], [-18.4685, -7.1361, -4.2667], [-26.3693, -9.3429, -4.9962]] ) A_ : Any = torch.tensor([[0.4983, 0.5595, 0.9440], [0.4916, 0.6315, 0.5954], [0.6108, 0.8637, 0.1135]] ) assert outputs.logits.shape == expected_shape assert torch.allclose(outputs.logits[0, :3, :3] , lowerCamelCase__ , atol=1e-4 ) assert torch.allclose(outputs.pred_boxes[0, :3, :3] , lowerCamelCase__ , atol=1e-4 ) print('''Looks ok!''' ) if pytorch_dump_folder_path is not None: # Save model and image processor logger.info(f'Saving PyTorch model and image processor to {pytorch_dump_folder_path}...' ) Path(lowerCamelCase__ ).mkdir(exist_ok=lowerCamelCase__ ) model.save_pretrained(lowerCamelCase__ ) image_processor.save_pretrained(lowerCamelCase__ ) if push_to_hub: # Push model to HF hub logger.info('''Pushing model to the hub...''' ) A_ : List[Any] = ( '''microsoft/table-transformer-detection''' if '''detection''' in checkpoint_url else '''microsoft/table-transformer-structure-recognition''' ) model.push_to_hub(lowerCamelCase__ ) image_processor.push_to_hub(lowerCamelCase__ ) if __name__ == "__main__": snake_case__ = argparse.ArgumentParser() parser.add_argument( """--checkpoint_url""", default="""https://pubtables1m.blob.core.windows.net/model/pubtables1m_detection_detr_r18.pth""", type=str, choices=[ """https://pubtables1m.blob.core.windows.net/model/pubtables1m_detection_detr_r18.pth""", """https://pubtables1m.blob.core.windows.net/model/pubtables1m_structure_detr_r18.pth""", ], help="""URL of the Table Transformer checkpoint you'd like to convert.""", ) parser.add_argument( """--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the folder to output PyTorch model.""" ) parser.add_argument( """--push_to_hub""", action="""store_true""", help="""Whether or not to push the converted model to the 🤗 hub.""" ) snake_case__ = parser.parse_args() convert_table_transformer_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub)
4
0
'''simple docstring''' import unittest from transformers import AutoTokenizer, FalconConfig, is_torch_available from transformers.testing_utils import require_torch, slow, torch_device from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( FalconForCausalLM, FalconForQuestionAnswering, FalconForSequenceClassification, FalconForTokenClassification, FalconModel, ) class UpperCamelCase_ : """simple docstring""" def __init__( self : List[str] , _lowerCamelCase : List[str] , _lowerCamelCase : int=3 , _lowerCamelCase : Dict=7 , _lowerCamelCase : str=True , _lowerCamelCase : Union[str, Any]=True , _lowerCamelCase : Tuple=False , _lowerCamelCase : str=True , _lowerCamelCase : Tuple=99 , _lowerCamelCase : str=32 , _lowerCamelCase : Dict=5 , _lowerCamelCase : Union[str, Any]=4 , _lowerCamelCase : Dict=37 , _lowerCamelCase : Any="gelu" , _lowerCamelCase : int=0.1 , _lowerCamelCase : Tuple=0.1 , _lowerCamelCase : Union[str, Any]=512 , _lowerCamelCase : Optional[Any]=16 , _lowerCamelCase : Union[str, Any]=2 , _lowerCamelCase : List[Any]=0.02 , _lowerCamelCase : Any=3 , _lowerCamelCase : Optional[int]=4 , _lowerCamelCase : Any=None , ): """simple docstring""" A_ : List[Any] = parent A_ : Optional[int] = batch_size A_ : Optional[int] = seq_length A_ : Dict = is_training A_ : Optional[int] = use_input_mask A_ : Any = use_token_type_ids A_ : Any = use_labels A_ : Optional[int] = vocab_size A_ : Optional[Any] = hidden_size A_ : Union[str, Any] = num_hidden_layers A_ : str = num_attention_heads A_ : List[Any] = intermediate_size A_ : str = hidden_act A_ : Any = hidden_dropout_prob A_ : int = attention_probs_dropout_prob A_ : Union[str, Any] = max_position_embeddings A_ : Optional[Any] = type_vocab_size A_ : Optional[int] = type_sequence_label_size A_ : Dict = initializer_range A_ : Any = num_labels A_ : int = num_choices A_ : Optional[int] = scope def _a ( self : List[str] ): """simple docstring""" A_ : int = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) A_ : List[str] = None if self.use_input_mask: A_ : Any = random_attention_mask([self.batch_size, self.seq_length] ) A_ : int = None A_ : str = None A_ : Union[str, Any] = None A_ : Dict = None if self.use_labels: A_ : Dict = ids_tensor([self.batch_size] , self.type_sequence_label_size ) A_ : int = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) A_ : str = ids_tensor([self.batch_size] , self.num_choices ) A_ : List[Any] = self.get_config() return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def _a ( self : List[Any] ): """simple docstring""" return FalconConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=_lowerCamelCase , initializer_range=self.initializer_range , pad_token_id=1 , new_decoder_architecture=_lowerCamelCase , ) def _a ( self : Dict , _lowerCamelCase : Tuple , _lowerCamelCase : Optional[Any] , _lowerCamelCase : List[Any] , _lowerCamelCase : Optional[int] , _lowerCamelCase : Optional[Any] , _lowerCamelCase : Optional[Any] , _lowerCamelCase : Dict ): """simple docstring""" A_ : Dict = FalconModel(config=_lowerCamelCase ) model.to(_lowerCamelCase ) model.eval() A_ : Dict = model(_lowerCamelCase , attention_mask=_lowerCamelCase ) A_ : Any = model(_lowerCamelCase ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def _a ( self : int , _lowerCamelCase : Dict , _lowerCamelCase : Any , _lowerCamelCase : Dict , _lowerCamelCase : str , _lowerCamelCase : Any , _lowerCamelCase : Any , _lowerCamelCase : Optional[Any] , _lowerCamelCase : Optional[int] , _lowerCamelCase : List[str] , ): """simple docstring""" A_ : Union[str, Any] = True A_ : Any = FalconModel(_lowerCamelCase ) model.to(_lowerCamelCase ) model.eval() A_ : Optional[int] = model( _lowerCamelCase , attention_mask=_lowerCamelCase , encoder_hidden_states=_lowerCamelCase , encoder_attention_mask=_lowerCamelCase , ) A_ : Optional[int] = model( _lowerCamelCase , attention_mask=_lowerCamelCase , encoder_hidden_states=_lowerCamelCase , ) A_ : Tuple = model(_lowerCamelCase , attention_mask=_lowerCamelCase ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def _a ( self : Union[str, Any] , _lowerCamelCase : Dict , _lowerCamelCase : Any , _lowerCamelCase : str , _lowerCamelCase : Tuple , _lowerCamelCase : Union[str, Any] , _lowerCamelCase : int , _lowerCamelCase : Optional[Any] , _lowerCamelCase : Union[str, Any] , _lowerCamelCase : List[Any] , ): """simple docstring""" A_ : str = FalconForCausalLM(config=_lowerCamelCase ) model.to(_lowerCamelCase ) model.eval() A_ : str = model(_lowerCamelCase , attention_mask=_lowerCamelCase , labels=_lowerCamelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def _a ( self : List[str] , _lowerCamelCase : Union[str, Any] , _lowerCamelCase : Optional[int] , _lowerCamelCase : int , _lowerCamelCase : List[str] , _lowerCamelCase : Union[str, Any] , _lowerCamelCase : Union[str, Any] , _lowerCamelCase : str , _lowerCamelCase : str , _lowerCamelCase : Any , ): """simple docstring""" A_ : List[Any] = True A_ : Optional[int] = True A_ : List[str] = FalconForCausalLM(config=_lowerCamelCase ) model.to(_lowerCamelCase ) model.eval() # first forward pass A_ : List[str] = model( _lowerCamelCase , attention_mask=_lowerCamelCase , encoder_hidden_states=_lowerCamelCase , encoder_attention_mask=_lowerCamelCase , use_cache=_lowerCamelCase , ) A_ : Optional[int] = outputs.past_key_values # create hypothetical multiple next token and extent to next_input_ids A_ : Any = ids_tensor((self.batch_size, 3) , config.vocab_size ) A_ : List[str] = ids_tensor((self.batch_size, 3) , vocab_size=2 ) # append to next input_ids and A_ : List[str] = torch.cat([input_ids, next_tokens] , dim=-1 ) A_ : List[str] = torch.cat([input_mask, next_mask] , dim=-1 ) A_ : str = model( _lowerCamelCase , attention_mask=_lowerCamelCase , encoder_hidden_states=_lowerCamelCase , encoder_attention_mask=_lowerCamelCase , output_hidden_states=_lowerCamelCase , )['''hidden_states'''][0] A_ : Union[str, Any] = model( _lowerCamelCase , attention_mask=_lowerCamelCase , encoder_hidden_states=_lowerCamelCase , encoder_attention_mask=_lowerCamelCase , past_key_values=_lowerCamelCase , output_hidden_states=_lowerCamelCase , )['''hidden_states'''][0] # select random slice A_ : Union[str, Any] = ids_tensor((1,) , output_from_past.shape[-1] ).item() A_ : Optional[Any] = output_from_no_past[:, -3:, random_slice_idx].detach() A_ : List[Any] = output_from_past[:, :, random_slice_idx].detach() self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] ) # test that outputs are equal for slice self.parent.assertTrue(torch.allclose(_lowerCamelCase , _lowerCamelCase , atol=1E-3 ) ) def _a ( self : int ): """simple docstring""" A_ : Dict = self.prepare_config_and_inputs() ( A_ ) : str = config_and_inputs A_ : Union[str, Any] = {'''input_ids''': input_ids, '''attention_mask''': input_mask} return config, inputs_dict @require_torch class UpperCamelCase_ (a__, a__, a__, unittest.TestCase ): """simple docstring""" _lowerCAmelCase = ( ( FalconModel, FalconForCausalLM, FalconForSequenceClassification, FalconForTokenClassification, FalconForQuestionAnswering, ) if is_torch_available() else () ) _lowerCAmelCase = (FalconForCausalLM,) if is_torch_available() else () _lowerCAmelCase = ( { 'feature-extraction': FalconModel, 'text-classification': FalconForSequenceClassification, 'text-generation': FalconForCausalLM, 'question-answering': FalconForQuestionAnswering, 'token-classification': FalconForTokenClassification, 'zero-shot': FalconForSequenceClassification, } if is_torch_available() else {} ) _lowerCAmelCase = False _lowerCAmelCase = False def _a ( self : List[str] ): """simple docstring""" A_ : str = FalconModelTester(self ) A_ : List[Any] = ConfigTester(self , config_class=_lowerCamelCase , hidden_size=37 ) def _a ( self : Any ): """simple docstring""" self.config_tester.run_common_tests() def _a ( self : int ): """simple docstring""" A_ : Dict = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*_lowerCamelCase ) def _a ( self : Optional[Any] ): """simple docstring""" A_ : str = self.model_tester.prepare_config_and_inputs() for alibi in [True, False]: A_ : Dict = alibi self.model_tester.create_and_check_model(_lowerCamelCase , *_lowerCamelCase ) def _a ( self : Union[str, Any] ): """simple docstring""" A_ : int = self.model_tester.prepare_config_and_inputs_for_common() A_ : List[Any] = 3 A_ : Any = input_dict['''input_ids'''] A_ : Tuple = input_ids.ne(1 ).to(_lowerCamelCase ) A_ : List[str] = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size ) A_ : Dict = FalconForSequenceClassification(_lowerCamelCase ) model.to(_lowerCamelCase ) model.eval() A_ : Optional[int] = model(_lowerCamelCase , attention_mask=_lowerCamelCase , labels=_lowerCamelCase ) self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) ) def _a ( self : List[Any] ): """simple docstring""" A_ : Any = self.model_tester.prepare_config_and_inputs_for_common() A_ : Dict = 3 A_ : int = '''single_label_classification''' A_ : Dict = input_dict['''input_ids'''] A_ : Dict = input_ids.ne(1 ).to(_lowerCamelCase ) A_ : Dict = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size ) A_ : List[str] = FalconForSequenceClassification(_lowerCamelCase ) model.to(_lowerCamelCase ) model.eval() A_ : List[Any] = model(_lowerCamelCase , attention_mask=_lowerCamelCase , labels=_lowerCamelCase ) self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) ) def _a ( self : Union[str, Any] ): """simple docstring""" A_ : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common() A_ : Dict = input_dict['''input_ids'''] A_ : List[Any] = FalconForCausalLM(_lowerCamelCase ) model.to(_lowerCamelCase ) model.eval() A_ : Union[str, Any] = model(_lowerCamelCase , use_cache=_lowerCamelCase ) A_ : Union[str, Any] = input_ids.shape[0] A_ : Tuple = model._convert_to_rw_cache(result.past_key_values ) A_ : List[str] = model._convert_cache_to_standard_format(_lowerCamelCase , _lowerCamelCase ) for layer in range(len(_lowerCamelCase ) ): for tensor_idx in range(2 ): self.assertTrue(rw_cache[layer][tensor_idx].ndim == 3 ) self.assertTrue(result.past_key_values[layer][tensor_idx].ndim == 4 ) self.assertTrue( torch.all(result.past_key_values[layer][tensor_idx] == standard_cache[layer][tensor_idx] ) ) def _a ( self : Optional[int] ): """simple docstring""" A_ : int = self.model_tester.prepare_config_and_inputs_for_common() A_ : Tuple = 3 A_ : str = '''multi_label_classification''' A_ : Union[str, Any] = input_dict['''input_ids'''] A_ : List[Any] = input_ids.ne(1 ).to(_lowerCamelCase ) A_ : Tuple = ids_tensor( [self.model_tester.batch_size, config.num_labels] , self.model_tester.type_sequence_label_size ).to(torch.float ) A_ : List[str] = FalconForSequenceClassification(_lowerCamelCase ) model.to(_lowerCamelCase ) model.eval() A_ : Optional[int] = model(_lowerCamelCase , attention_mask=_lowerCamelCase , labels=_lowerCamelCase ) self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) ) def _a ( self : Union[str, Any] ): """simple docstring""" for model_class in self.all_generative_model_classes: A_ : Dict = self.model_tester.prepare_config_and_inputs_for_common() # If it doesn't support cache, pass the test if not hasattr(_lowerCamelCase , '''use_cache''' ): return A_ : Tuple = model_class(_lowerCamelCase ).to(_lowerCamelCase ) if "use_cache" not in inputs: A_ : str = True A_ : Tuple = model(**_lowerCamelCase ) # If "past_key_values" is not returned, pass the test (e.g. RWKV uses a different cache name and format) if "past_key_values" not in outputs: return A_ : List[Any] = ( getattr(_lowerCamelCase , '''decoder_layers''' , _lowerCamelCase ) or getattr(_lowerCamelCase , '''num_decoder_layers''' , _lowerCamelCase ) or config.num_hidden_layers ) A_ : List[Any] = getattr(_lowerCamelCase , '''num_kv_heads''' , config.num_attention_heads ) A_ : Dict = getattr(_lowerCamelCase , '''d_model''' , config.hidden_size ) A_ : Optional[int] = embed_dim // num_attention_heads A_ : List[Any] = outputs['''past_key_values'''] self.assertEqual(len(_lowerCamelCase ) , _lowerCamelCase ) A_ : int = inputs['''input_ids'''].shape for i in range(_lowerCamelCase ): if config.new_decoder_architecture: A_ : int = config.num_attention_heads elif config.multi_query: A_ : List[Any] = 1 self.assertEqual(len(past_kv[0] ) , 2 ) # K V for the decoder = 2 self.assertEqual( past_kv[i][0].shape , (batch_size, num_attention_heads, seq_length, per_head_embed_dim) ) self.assertEqual( past_kv[i][1].shape , (batch_size, num_attention_heads, seq_length, per_head_embed_dim) ) @require_torch class UpperCamelCase_ (unittest.TestCase ): """simple docstring""" @slow def _a ( self : int ): """simple docstring""" A_ : List[Any] = AutoTokenizer.from_pretrained('''Rocketknight1/falcon-rw-1b''' ) A_ : Tuple = FalconForCausalLM.from_pretrained('''Rocketknight1/falcon-rw-1b''' ) model.eval() model.to(_lowerCamelCase ) A_ : List[Any] = tokenizer('''My favorite food is''' , return_tensors='''pt''' ).to(_lowerCamelCase ) A_ : Optional[Any] = ( '''My favorite food is pizza. I love it so much that I have a pizza party every year for my birthday.''' ) A_ : Tuple = model.generate(**_lowerCamelCase , do_sample=_lowerCamelCase , max_new_tokens=19 ) A_ : int = tokenizer.batch_decode(_lowerCamelCase )[0] self.assertEqual(_lowerCamelCase , _lowerCamelCase ) @slow def _a ( self : Union[str, Any] ): """simple docstring""" for repo in ["Rocketknight1/tiny-random-falcon-7b", "Rocketknight1/tiny-random-falcon-40b"]: A_ : str = AutoTokenizer.from_pretrained(_lowerCamelCase ) A_ : str = FalconForCausalLM.from_pretrained(_lowerCamelCase ) model.eval() model.to(_lowerCamelCase ) A_ : int = tokenizer('''My favorite food is''' , return_tensors='''pt''' ).to(_lowerCamelCase ) # We just test that these run without errors - the models are randomly initialized # and so the actual text outputs will be garbage model.generate(**_lowerCamelCase , do_sample=_lowerCamelCase , max_new_tokens=4 ) model.generate(**_lowerCamelCase , do_sample=_lowerCamelCase , max_new_tokens=4 ) model.generate(**_lowerCamelCase , num_beams=2 , max_new_tokens=4 ) @slow def _a ( self : Optional[int] ): """simple docstring""" with torch.no_grad(): for repo in [ "Rocketknight1/falcon-rw-1b", "Rocketknight1/tiny-random-falcon-7b", "Rocketknight1/tiny-random-falcon-40b", ]: A_ : Optional[Any] = AutoTokenizer.from_pretrained(_lowerCamelCase ) A_ : Optional[Any] = FalconForCausalLM.from_pretrained(_lowerCamelCase ) model.eval() model.to(device=_lowerCamelCase ) A_ : Tuple = tokenizer('''My favorite food is''' , return_tensors='''pt''' ).to(_lowerCamelCase ) # Test results are the same with and without cache A_ : str = model.generate(**_lowerCamelCase , do_sample=_lowerCamelCase , max_new_tokens=20 , use_cache=_lowerCamelCase ) A_ : int = model.generate(**_lowerCamelCase , do_sample=_lowerCamelCase , max_new_tokens=20 , use_cache=_lowerCamelCase ) self.assertTrue((outputs_cache - outputs_no_cache).sum().item() == 0 )
351
'''simple docstring''' import logging import os from dataclasses import dataclass from typing import List, Optional, Union import tqdm from filelock import FileLock from transformers import ( BartTokenizer, BartTokenizerFast, DataProcessor, PreTrainedTokenizer, RobertaTokenizer, RobertaTokenizerFast, XLMRobertaTokenizer, is_tf_available, is_torch_available, ) snake_case__ = logging.getLogger(__name__) @dataclass(frozen=a__ ) class UpperCamelCase_ : """simple docstring""" _lowerCAmelCase = 42 _lowerCAmelCase = 42 _lowerCAmelCase = None _lowerCAmelCase = None _lowerCAmelCase = None @dataclass(frozen=a__ ) class UpperCamelCase_ : """simple docstring""" _lowerCAmelCase = 42 _lowerCAmelCase = None _lowerCAmelCase = None _lowerCAmelCase = None _lowerCAmelCase = None if is_torch_available(): import torch from torch.utils.data import Dataset class UpperCamelCase_ (a__ ): """simple docstring""" _lowerCAmelCase = 42 def __init__( self : Optional[int] , _lowerCamelCase : str , _lowerCamelCase : PreTrainedTokenizer , _lowerCamelCase : str , _lowerCamelCase : Optional[int] = None , _lowerCamelCase : List[Any]=False , _lowerCamelCase : bool = False , ): """simple docstring""" A_ : Optional[int] = hans_processors[task]() A_ : int = os.path.join( _lowerCamelCase , '''cached_{}_{}_{}_{}'''.format( '''dev''' if evaluate else '''train''' , tokenizer.__class__.__name__ , str(_lowerCamelCase ) , _lowerCamelCase , ) , ) A_ : Dict = processor.get_labels() if tokenizer.__class__ in ( RobertaTokenizer, RobertaTokenizerFast, XLMRobertaTokenizer, BartTokenizer, BartTokenizerFast, ): # HACK(label indices are swapped in RoBERTa pretrained model) A_ ,A_ : List[str] = label_list[2], label_list[1] A_ : Optional[int] = label_list # Make sure only the first process in distributed training processes the dataset, # and the others will use the cache. A_ : str = cached_features_file + '''.lock''' with FileLock(_lowerCamelCase ): if os.path.exists(_lowerCamelCase ) and not overwrite_cache: logger.info(f'Loading features from cached file {cached_features_file}' ) A_ : List[str] = torch.load(_lowerCamelCase ) else: logger.info(f'Creating features from dataset file at {data_dir}' ) A_ : Optional[int] = ( processor.get_dev_examples(_lowerCamelCase ) if evaluate else processor.get_train_examples(_lowerCamelCase ) ) logger.info('''Training examples: %s''' , len(_lowerCamelCase ) ) A_ : Optional[int] = hans_convert_examples_to_features(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) logger.info('''Saving features into cached file %s''' , _lowerCamelCase ) torch.save(self.features , _lowerCamelCase ) def __len__( self : List[str] ): """simple docstring""" return len(self.features ) def __getitem__( self : List[str] , _lowerCamelCase : Optional[int] ): """simple docstring""" return self.features[i] def _a ( self : str ): """simple docstring""" return self.label_list if is_tf_available(): import tensorflow as tf class UpperCamelCase_ : """simple docstring""" _lowerCAmelCase = 42 def __init__( self : Optional[int] , _lowerCamelCase : str , _lowerCamelCase : PreTrainedTokenizer , _lowerCamelCase : str , _lowerCamelCase : Optional[int] = 128 , _lowerCamelCase : Dict=False , _lowerCamelCase : bool = False , ): """simple docstring""" A_ : Optional[int] = hans_processors[task]() A_ : Optional[int] = processor.get_labels() if tokenizer.__class__ in ( RobertaTokenizer, RobertaTokenizerFast, XLMRobertaTokenizer, BartTokenizer, BartTokenizerFast, ): # HACK(label indices are swapped in RoBERTa pretrained model) A_ ,A_ : Union[str, Any] = label_list[2], label_list[1] A_ : Tuple = label_list A_ : Optional[int] = processor.get_dev_examples(_lowerCamelCase ) if evaluate else processor.get_train_examples(_lowerCamelCase ) A_ : Tuple = hans_convert_examples_to_features(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) def gen(): for ex_index, ex in tqdm.tqdm(enumerate(self.features ) , desc='''convert examples to features''' ): if ex_index % 10000 == 0: logger.info('''Writing example %d of %d''' % (ex_index, len(_lowerCamelCase )) ) yield ( { "example_id": 0, "input_ids": ex.input_ids, "attention_mask": ex.attention_mask, "token_type_ids": ex.token_type_ids, }, ex.label, ) A_ : List[Any] = tf.data.Dataset.from_generator( _lowerCamelCase , ( { '''example_id''': tf.intaa, '''input_ids''': tf.intaa, '''attention_mask''': tf.intaa, '''token_type_ids''': tf.intaa, }, tf.intaa, ) , ( { '''example_id''': tf.TensorShape([] ), '''input_ids''': tf.TensorShape([None, None] ), '''attention_mask''': tf.TensorShape([None, None] ), '''token_type_ids''': tf.TensorShape([None, None] ), }, tf.TensorShape([] ), ) , ) def _a ( self : Any ): """simple docstring""" return self.dataset def __len__( self : Dict ): """simple docstring""" return len(self.features ) def __getitem__( self : Optional[int] , _lowerCamelCase : List[str] ): """simple docstring""" return self.features[i] def _a ( self : Tuple ): """simple docstring""" return self.label_list class UpperCamelCase_ (a__ ): """simple docstring""" def _a ( self : List[str] , _lowerCamelCase : Union[str, Any] ): """simple docstring""" return self._create_examples(self._read_tsv(os.path.join(_lowerCamelCase , '''heuristics_train_set.txt''' ) ) , '''train''' ) def _a ( self : List[str] , _lowerCamelCase : Tuple ): """simple docstring""" return self._create_examples(self._read_tsv(os.path.join(_lowerCamelCase , '''heuristics_evaluation_set.txt''' ) ) , '''dev''' ) def _a ( self : Any ): """simple docstring""" return ["contradiction", "entailment", "neutral"] def _a ( self : Optional[Any] , _lowerCamelCase : Optional[Any] , _lowerCamelCase : Any ): """simple docstring""" A_ : Tuple = [] for i, line in enumerate(_lowerCamelCase ): if i == 0: continue A_ : str = '''%s-%s''' % (set_type, line[0]) A_ : Optional[Any] = line[5] A_ : Union[str, Any] = line[6] A_ : List[str] = line[7][2:] if line[7].startswith('''ex''' ) else line[7] A_ : str = line[0] examples.append(InputExample(guid=_lowerCamelCase , text_a=_lowerCamelCase , text_b=_lowerCamelCase , label=_lowerCamelCase , pairID=_lowerCamelCase ) ) return examples def snake_case__ ( lowerCamelCase__ : List[InputExample] , lowerCamelCase__ : List[str] , lowerCamelCase__ : int , lowerCamelCase__ : PreTrainedTokenizer , ) -> int: A_ : Union[str, Any] = {label: i for i, label in enumerate(lowerCamelCase__ )} A_ : Optional[Any] = [] for ex_index, example in tqdm.tqdm(enumerate(lowerCamelCase__ ) , desc='''convert examples to features''' ): if ex_index % 1_0_0_0_0 == 0: logger.info('''Writing example %d''' % (ex_index) ) A_ : Optional[int] = tokenizer( example.text_a , example.text_b , add_special_tokens=lowerCamelCase__ , max_length=lowerCamelCase__ , padding='''max_length''' , truncation=lowerCamelCase__ , return_overflowing_tokens=lowerCamelCase__ , ) A_ : List[str] = label_map[example.label] if example.label in label_map else 0 A_ : Tuple = int(example.pairID ) features.append(InputFeatures(**lowerCamelCase__ , label=lowerCamelCase__ , pairID=lowerCamelCase__ ) ) for i, example in enumerate(examples[:5] ): logger.info('''*** Example ***''' ) logger.info(f'guid: {example}' ) logger.info(f'features: {features[i]}' ) return features snake_case__ = { """hans""": 3, } snake_case__ = { """hans""": HansProcessor, }
4
0
'''simple docstring''' import random import unittest from torch.utils.data import BatchSampler, DataLoader, IterableDataset from accelerate import Accelerator from accelerate.data_loader import ( BatchSamplerShard, DataLoaderDispatcher, DataLoaderShard, IterableDatasetShard, SkipBatchSampler, SkipDataLoader, skip_first_batches, ) class UpperCamelCase_ (a__ ): """simple docstring""" def __init__( self : Union[str, Any] , _lowerCamelCase : Any=0.01 , _lowerCamelCase : List[str]=1000 ): """simple docstring""" A_ : Optional[Any] = p_stop A_ : Dict = max_length def __iter__( self : Optional[Any] ): """simple docstring""" A_ : Optional[int] = 0 A_ : Optional[int] = False while not stop and count < self.max_length: yield count count += 1 A_ : Tuple = random.random() < self.p_stop class UpperCamelCase_ (unittest.TestCase ): """simple docstring""" def _a ( self : List[str] , _lowerCamelCase : int , _lowerCamelCase : int , _lowerCamelCase : str=False , _lowerCamelCase : List[Any]=True ): """simple docstring""" A_ : Dict = [ BatchSamplerShard(_lowerCamelCase , 2 , _lowerCamelCase , split_batches=_lowerCamelCase , even_batches=_lowerCamelCase ) for i in range(2 ) ] A_ : int = [list(_lowerCamelCase ) for batch_sampler_shard in batch_sampler_shards] if not split_batches: self.assertListEqual([len(_lowerCamelCase ) for shard in batch_sampler_shards] , [len(_lowerCamelCase ) for e in expected] ) self.assertListEqual(_lowerCamelCase , _lowerCamelCase ) def _a ( self : List[str] ): """simple docstring""" A_ : Tuple = BatchSampler(range(24 ) , batch_size=3 , drop_last=_lowerCamelCase ) A_ : Optional[int] = [ [[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]], [[3, 4, 5], [9, 10, 11], [15, 16, 17], [21, 22, 23]], ] self.check_batch_sampler_shards(_lowerCamelCase , _lowerCamelCase ) A_ : List[Any] = BatchSampler(range(24 ) , batch_size=3 , drop_last=_lowerCamelCase ) # Expected shouldn't change self.check_batch_sampler_shards(_lowerCamelCase , _lowerCamelCase ) # Check the shards when the dataset is a round multiple of batch size but not total batch size. A_ : Optional[int] = BatchSampler(range(21 ) , batch_size=3 , drop_last=_lowerCamelCase ) A_ : Any = [ [[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]], [[3, 4, 5], [9, 10, 11], [15, 16, 17], [0, 1, 2]], ] self.check_batch_sampler_shards(_lowerCamelCase , _lowerCamelCase ) A_ : str = BatchSampler(range(21 ) , batch_size=3 , drop_last=_lowerCamelCase ) A_ : Tuple = [ [[0, 1, 2], [6, 7, 8], [12, 13, 14]], [[3, 4, 5], [9, 10, 11], [15, 16, 17]], ] self.check_batch_sampler_shards(_lowerCamelCase , _lowerCamelCase ) # Check the shards when the dataset is not a round multiple of batch size but has a multiple of # num_processes batch. A_ : Dict = BatchSampler(range(22 ) , batch_size=3 , drop_last=_lowerCamelCase ) A_ : Dict = [ [[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]], [[3, 4, 5], [9, 10, 11], [15, 16, 17], [21, 0, 1]], ] self.check_batch_sampler_shards(_lowerCamelCase , _lowerCamelCase ) A_ : Union[str, Any] = BatchSampler(range(22 ) , batch_size=3 , drop_last=_lowerCamelCase ) A_ : List[Any] = [ [[0, 1, 2], [6, 7, 8], [12, 13, 14]], [[3, 4, 5], [9, 10, 11], [15, 16, 17]], ] self.check_batch_sampler_shards(_lowerCamelCase , _lowerCamelCase ) # Check the shards when the dataset is not a round multiple of batch size but and has not a multiple of # num_processes batch. A_ : Any = BatchSampler(range(20 ) , batch_size=3 , drop_last=_lowerCamelCase ) A_ : Dict = [ [[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 0]], [[3, 4, 5], [9, 10, 11], [15, 16, 17], [1, 2, 3]], ] self.check_batch_sampler_shards(_lowerCamelCase , _lowerCamelCase ) A_ : List[Any] = BatchSampler(range(20 ) , batch_size=3 , drop_last=_lowerCamelCase ) A_ : List[str] = [ [[0, 1, 2], [6, 7, 8], [12, 13, 14]], [[3, 4, 5], [9, 10, 11], [15, 16, 17]], ] self.check_batch_sampler_shards(_lowerCamelCase , _lowerCamelCase ) # Check the shards when the dataset is very small. A_ : str = BatchSampler(range(2 ) , batch_size=3 , drop_last=_lowerCamelCase ) A_ : Union[str, Any] = [[[0, 1, 0]], [[1, 0, 1]]] self.check_batch_sampler_shards(_lowerCamelCase , _lowerCamelCase ) A_ : int = BatchSampler(range(2 ) , batch_size=3 , drop_last=_lowerCamelCase ) A_ : Union[str, Any] = [[], []] self.check_batch_sampler_shards(_lowerCamelCase , _lowerCamelCase ) def _a ( self : str ): """simple docstring""" A_ : Dict = BatchSampler(range(24 ) , batch_size=4 , drop_last=_lowerCamelCase ) A_ : List[Any] = [ [[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 21]], [[2, 3], [6, 7], [10, 11], [14, 15], [18, 19], [22, 23]], ] self.check_batch_sampler_shards(_lowerCamelCase , _lowerCamelCase , split_batches=_lowerCamelCase ) A_ : List[str] = BatchSampler(range(24 ) , batch_size=4 , drop_last=_lowerCamelCase ) # Expected shouldn't change self.check_batch_sampler_shards(_lowerCamelCase , _lowerCamelCase , split_batches=_lowerCamelCase ) # Check the shards when the dataset is not a round multiple of batch size. A_ : List[Any] = BatchSampler(range(22 ) , batch_size=4 , drop_last=_lowerCamelCase ) A_ : Any = [ [[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 21]], [[2, 3], [6, 7], [10, 11], [14, 15], [18, 19], [0, 1]], ] self.check_batch_sampler_shards(_lowerCamelCase , _lowerCamelCase , split_batches=_lowerCamelCase ) A_ : Any = BatchSampler(range(22 ) , batch_size=4 , drop_last=_lowerCamelCase ) A_ : List[str] = [ [[0, 1], [4, 5], [8, 9], [12, 13], [16, 17]], [[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]], ] self.check_batch_sampler_shards(_lowerCamelCase , _lowerCamelCase , split_batches=_lowerCamelCase ) # Check the shards when the dataset is not a round multiple of batch size or num_processes. A_ : Dict = BatchSampler(range(21 ) , batch_size=4 , drop_last=_lowerCamelCase ) A_ : Tuple = [ [[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 0]], [[2, 3], [6, 7], [10, 11], [14, 15], [18, 19], [1, 2]], ] self.check_batch_sampler_shards(_lowerCamelCase , _lowerCamelCase , split_batches=_lowerCamelCase ) A_ : Optional[Any] = BatchSampler(range(21 ) , batch_size=4 , drop_last=_lowerCamelCase ) A_ : Optional[int] = [ [[0, 1], [4, 5], [8, 9], [12, 13], [16, 17]], [[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]], ] self.check_batch_sampler_shards(_lowerCamelCase , _lowerCamelCase , split_batches=_lowerCamelCase ) # Check the shards when the dataset is very small. A_ : Optional[Any] = BatchSampler(range(2 ) , batch_size=4 , drop_last=_lowerCamelCase ) A_ : Tuple = [[[0, 1]], [[0, 1]]] self.check_batch_sampler_shards(_lowerCamelCase , _lowerCamelCase , split_batches=_lowerCamelCase ) A_ : str = BatchSampler(range(2 ) , batch_size=4 , drop_last=_lowerCamelCase ) A_ : Any = [[], []] self.check_batch_sampler_shards(_lowerCamelCase , _lowerCamelCase , split_batches=_lowerCamelCase ) def _a ( self : Optional[int] ): """simple docstring""" A_ : Any = BatchSampler(range(24 ) , batch_size=3 , drop_last=_lowerCamelCase ) A_ : Optional[Any] = [ [[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]], [[3, 4, 5], [9, 10, 11], [15, 16, 17], [21, 22, 23]], ] self.check_batch_sampler_shards(_lowerCamelCase , _lowerCamelCase , even_batches=_lowerCamelCase ) A_ : Tuple = BatchSampler(range(24 ) , batch_size=3 , drop_last=_lowerCamelCase ) # Expected shouldn't change self.check_batch_sampler_shards(_lowerCamelCase , _lowerCamelCase , even_batches=_lowerCamelCase ) # Check the shards when the dataset is a round multiple of batch size but not total batch size. A_ : Tuple = BatchSampler(range(21 ) , batch_size=3 , drop_last=_lowerCamelCase ) A_ : List[str] = [ [[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]], [[3, 4, 5], [9, 10, 11], [15, 16, 17]], ] self.check_batch_sampler_shards(_lowerCamelCase , _lowerCamelCase , even_batches=_lowerCamelCase ) A_ : List[str] = BatchSampler(range(21 ) , batch_size=3 , drop_last=_lowerCamelCase ) A_ : Dict = [ [[0, 1, 2], [6, 7, 8], [12, 13, 14]], [[3, 4, 5], [9, 10, 11], [15, 16, 17]], ] self.check_batch_sampler_shards(_lowerCamelCase , _lowerCamelCase , even_batches=_lowerCamelCase ) # Check the shards when the dataset is not a round multiple of batch size but has a multiple of # num_processes batch. A_ : Optional[int] = BatchSampler(range(22 ) , batch_size=3 , drop_last=_lowerCamelCase ) A_ : Optional[Any] = [ [[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]], [[3, 4, 5], [9, 10, 11], [15, 16, 17], [21]], ] self.check_batch_sampler_shards(_lowerCamelCase , _lowerCamelCase , even_batches=_lowerCamelCase ) A_ : List[str] = BatchSampler(range(22 ) , batch_size=3 , drop_last=_lowerCamelCase ) A_ : Any = [ [[0, 1, 2], [6, 7, 8], [12, 13, 14]], [[3, 4, 5], [9, 10, 11], [15, 16, 17]], ] self.check_batch_sampler_shards(_lowerCamelCase , _lowerCamelCase , even_batches=_lowerCamelCase ) # Check the shards when the dataset is not a round multiple of batch size but and has not a multiple of # num_processes batch. A_ : Tuple = BatchSampler(range(20 ) , batch_size=3 , drop_last=_lowerCamelCase ) A_ : str = [ [[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19]], [[3, 4, 5], [9, 10, 11], [15, 16, 17]], ] self.check_batch_sampler_shards(_lowerCamelCase , _lowerCamelCase , even_batches=_lowerCamelCase ) A_ : Optional[int] = BatchSampler(range(20 ) , batch_size=3 , drop_last=_lowerCamelCase ) A_ : Dict = [ [[0, 1, 2], [6, 7, 8], [12, 13, 14]], [[3, 4, 5], [9, 10, 11], [15, 16, 17]], ] self.check_batch_sampler_shards(_lowerCamelCase , _lowerCamelCase , even_batches=_lowerCamelCase ) # Check the shards when the dataset is very small. A_ : str = BatchSampler(range(2 ) , batch_size=3 , drop_last=_lowerCamelCase ) A_ : Optional[Any] = [[[0, 1]], []] self.check_batch_sampler_shards(_lowerCamelCase , _lowerCamelCase , even_batches=_lowerCamelCase ) A_ : List[str] = BatchSampler(range(2 ) , batch_size=3 , drop_last=_lowerCamelCase ) A_ : int = [[], []] self.check_batch_sampler_shards(_lowerCamelCase , _lowerCamelCase , even_batches=_lowerCamelCase ) def _a ( self : Dict ): """simple docstring""" A_ : List[str] = BatchSampler(range(24 ) , batch_size=4 , drop_last=_lowerCamelCase ) A_ : Optional[int] = [ [[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 21]], [[2, 3], [6, 7], [10, 11], [14, 15], [18, 19], [22, 23]], ] self.check_batch_sampler_shards(_lowerCamelCase , _lowerCamelCase , split_batches=_lowerCamelCase , even_batches=_lowerCamelCase ) A_ : Union[str, Any] = BatchSampler(range(24 ) , batch_size=4 , drop_last=_lowerCamelCase ) # Expected shouldn't change self.check_batch_sampler_shards(_lowerCamelCase , _lowerCamelCase , split_batches=_lowerCamelCase , even_batches=_lowerCamelCase ) # Check the shards when the dataset is not a round multiple of batch size. A_ : Optional[int] = BatchSampler(range(22 ) , batch_size=4 , drop_last=_lowerCamelCase ) A_ : int = [ [[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 21]], [[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]], ] self.check_batch_sampler_shards(_lowerCamelCase , _lowerCamelCase , split_batches=_lowerCamelCase , even_batches=_lowerCamelCase ) A_ : int = BatchSampler(range(22 ) , batch_size=4 , drop_last=_lowerCamelCase ) A_ : List[str] = [ [[0, 1], [4, 5], [8, 9], [12, 13], [16, 17]], [[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]], ] self.check_batch_sampler_shards(_lowerCamelCase , _lowerCamelCase , split_batches=_lowerCamelCase , even_batches=_lowerCamelCase ) # Check the shards when the dataset is not a round multiple of batch size or num_processes. A_ : Optional[Any] = BatchSampler(range(21 ) , batch_size=4 , drop_last=_lowerCamelCase ) A_ : List[str] = [ [[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20]], [[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]], ] self.check_batch_sampler_shards(_lowerCamelCase , _lowerCamelCase , split_batches=_lowerCamelCase , even_batches=_lowerCamelCase ) A_ : List[str] = BatchSampler(range(21 ) , batch_size=4 , drop_last=_lowerCamelCase ) A_ : Optional[int] = [ [[0, 1], [4, 5], [8, 9], [12, 13], [16, 17]], [[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]], ] self.check_batch_sampler_shards(_lowerCamelCase , _lowerCamelCase , split_batches=_lowerCamelCase , even_batches=_lowerCamelCase ) # Check the shards when the dataset is very small. A_ : Union[str, Any] = BatchSampler(range(2 ) , batch_size=4 , drop_last=_lowerCamelCase ) A_ : str = [[[0, 1]], []] self.check_batch_sampler_shards(_lowerCamelCase , _lowerCamelCase , split_batches=_lowerCamelCase , even_batches=_lowerCamelCase ) A_ : Any = BatchSampler(range(2 ) , batch_size=4 , drop_last=_lowerCamelCase ) A_ : Union[str, Any] = [[], []] self.check_batch_sampler_shards(_lowerCamelCase , _lowerCamelCase , split_batches=_lowerCamelCase , even_batches=_lowerCamelCase ) def _a ( self : Tuple ): """simple docstring""" A_ : Tuple = [[0, 1, 2], [3, 4], [5, 6, 7, 8], [9, 10, 11], [12, 13]] A_ : Union[str, Any] = [BatchSamplerShard(_lowerCamelCase , 2 , _lowerCamelCase , even_batches=_lowerCamelCase ) for i in range(2 )] self.assertEqual(len(batch_sampler_shards[0] ) , 3 ) self.assertEqual(len(batch_sampler_shards[1] ) , 2 ) self.assertListEqual(list(batch_sampler_shards[0] ) , [[0, 1, 2], [5, 6, 7, 8], [12, 13]] ) self.assertListEqual(list(batch_sampler_shards[1] ) , [[3, 4], [9, 10, 11]] ) def _a ( self : List[str] , _lowerCamelCase : Optional[int] , _lowerCamelCase : Any , _lowerCamelCase : List[str] , _lowerCamelCase : Any=False , _lowerCamelCase : int=2 , _lowerCamelCase : Union[str, Any]=False ): """simple docstring""" random.seed(_lowerCamelCase ) A_ : Dict = list(_lowerCamelCase ) A_ : Dict = [ IterableDatasetShard( _lowerCamelCase , batch_size=_lowerCamelCase , drop_last=_lowerCamelCase , num_processes=_lowerCamelCase , process_index=_lowerCamelCase , split_batches=_lowerCamelCase , ) for i in range(_lowerCamelCase ) ] A_ : List[Any] = [] for iterable_dataset_shard in iterable_dataset_shards: # Since our random iterable dataset will be... random... we need to use a seed to get reproducible results. random.seed(_lowerCamelCase ) iterable_dataset_lists.append(list(_lowerCamelCase ) ) A_ : Any = batch_size // num_processes if split_batches else batch_size # All iterable dataset shard should have the same length, a round multiple of shard_batch_size A_ : int = iterable_dataset_lists[0] for l in iterable_dataset_lists[1:]: self.assertEqual(len(_lowerCamelCase ) , len(_lowerCamelCase ) ) self.assertTrue(len(_lowerCamelCase ) % shard_batch_size == 0 ) A_ : Union[str, Any] = [] for idx in range(0 , len(_lowerCamelCase ) , _lowerCamelCase ): for l in iterable_dataset_lists: observed += l[idx : idx + shard_batch_size] if not drop_last: while len(_lowerCamelCase ) < len(_lowerCamelCase ): reference += reference self.assertListEqual(_lowerCamelCase , reference[: len(_lowerCamelCase )] ) def _a ( self : Optional[int] ): """simple docstring""" A_ : Dict = 42 A_ : Optional[int] = RandomIterableDataset() self.check_iterable_dataset_shards(_lowerCamelCase , _lowerCamelCase , batch_size=4 , drop_last=_lowerCamelCase , split_batches=_lowerCamelCase ) self.check_iterable_dataset_shards(_lowerCamelCase , _lowerCamelCase , batch_size=4 , drop_last=_lowerCamelCase , split_batches=_lowerCamelCase ) self.check_iterable_dataset_shards(_lowerCamelCase , _lowerCamelCase , batch_size=4 , drop_last=_lowerCamelCase , split_batches=_lowerCamelCase ) self.check_iterable_dataset_shards(_lowerCamelCase , _lowerCamelCase , batch_size=4 , drop_last=_lowerCamelCase , split_batches=_lowerCamelCase ) # Edge case with a very small dataset A_ : List[Any] = RandomIterableDataset(max_length=2 ) self.check_iterable_dataset_shards(_lowerCamelCase , _lowerCamelCase , batch_size=4 , drop_last=_lowerCamelCase , split_batches=_lowerCamelCase ) self.check_iterable_dataset_shards(_lowerCamelCase , _lowerCamelCase , batch_size=4 , drop_last=_lowerCamelCase , split_batches=_lowerCamelCase ) self.check_iterable_dataset_shards(_lowerCamelCase , _lowerCamelCase , batch_size=4 , drop_last=_lowerCamelCase , split_batches=_lowerCamelCase ) self.check_iterable_dataset_shards(_lowerCamelCase , _lowerCamelCase , batch_size=4 , drop_last=_lowerCamelCase , split_batches=_lowerCamelCase ) def _a ( self : List[str] ): """simple docstring""" A_ : Union[str, Any] = BatchSampler(range(16 ) , batch_size=4 , drop_last=_lowerCamelCase ) A_ : int = SkipBatchSampler(_lowerCamelCase , 2 ) self.assertListEqual(list(_lowerCamelCase ) , [[8, 9, 10, 11], [12, 13, 14, 15]] ) def _a ( self : List[Any] ): """simple docstring""" A_ : str = SkipDataLoader(list(range(16 ) ) , batch_size=4 , skip_batches=2 ) self.assertListEqual([t.tolist() for t in dataloader] , [[8, 9, 10, 11], [12, 13, 14, 15]] ) def _a ( self : List[Any] ): """simple docstring""" A_ : int = DataLoader(list(range(16 ) ) , batch_size=4 ) A_ : Tuple = skip_first_batches(_lowerCamelCase , num_batches=2 ) self.assertListEqual([t.tolist() for t in new_dataloader] , [[8, 9, 10, 11], [12, 13, 14, 15]] ) def _a ( self : Optional[int] ): """simple docstring""" A_ : Optional[Any] = DataLoaderShard(list(range(16 ) ) , batch_size=4 ) for idx, _ in enumerate(_lowerCamelCase ): self.assertEqual(dataloader.end_of_dataloader , idx == 3 ) # Test it also works on the second iteration for idx, _ in enumerate(_lowerCamelCase ): self.assertEqual(dataloader.end_of_dataloader , idx == 3 ) def _a ( self : Tuple ): """simple docstring""" Accelerator() A_ : int = DataLoaderDispatcher(range(16 ) , batch_size=4 ) for idx, _ in enumerate(_lowerCamelCase ): self.assertEqual(dataloader.end_of_dataloader , idx == 3 ) # Test it also works on the second iteration for idx, _ in enumerate(_lowerCamelCase ): self.assertEqual(dataloader.end_of_dataloader , idx == 3 )
352
'''simple docstring''' import io import itertools import json from dataclasses import dataclass from typing import Optional import pyarrow as pa import pyarrow.json as paj import datasets from datasets.table import table_cast from datasets.utils.file_utils import readline snake_case__ = datasets.utils.logging.get_logger(__name__) @dataclass class UpperCamelCase_ (datasets.BuilderConfig ): """simple docstring""" _lowerCAmelCase = None _lowerCAmelCase = "utf-8" _lowerCAmelCase = None _lowerCAmelCase = None _lowerCAmelCase = True # deprecated _lowerCAmelCase = None # deprecated _lowerCAmelCase = 1_0 << 2_0 # 10MB _lowerCAmelCase = None class UpperCamelCase_ (datasets.ArrowBasedBuilder ): """simple docstring""" _lowerCAmelCase = JsonConfig def _a ( self : int ): """simple docstring""" if self.config.block_size is not None: logger.warning('''The JSON loader parameter `block_size` is deprecated. Please use `chunksize` instead''' ) A_ : List[Any] = self.config.block_size if self.config.use_threads is not True: logger.warning( '''The JSON loader parameter `use_threads` is deprecated and doesn\'t have any effect anymore.''' ) if self.config.newlines_in_values is not None: raise ValueError('''The JSON loader parameter `newlines_in_values` is no longer supported''' ) return datasets.DatasetInfo(features=self.config.features ) def _a ( self : Any , _lowerCamelCase : List[str] ): """simple docstring""" if not self.config.data_files: raise ValueError(f'At least one data file must be specified, but got data_files={self.config.data_files}' ) A_ : int = dl_manager.download_and_extract(self.config.data_files ) if isinstance(_lowerCamelCase , (str, list, tuple) ): A_ : Union[str, Any] = data_files if isinstance(_lowerCamelCase , _lowerCamelCase ): A_ : List[str] = [files] A_ : List[Any] = [dl_manager.iter_files(_lowerCamelCase ) for file in files] return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={'''files''': files} )] A_ : Tuple = [] for split_name, files in data_files.items(): if isinstance(_lowerCamelCase , _lowerCamelCase ): A_ : int = [files] A_ : Union[str, Any] = [dl_manager.iter_files(_lowerCamelCase ) for file in files] splits.append(datasets.SplitGenerator(name=_lowerCamelCase , gen_kwargs={'''files''': files} ) ) return splits def _a ( self : int , _lowerCamelCase : pa.Table ): """simple docstring""" if self.config.features is not None: # adding missing columns for column_name in set(self.config.features ) - set(pa_table.column_names ): A_ : Optional[int] = self.config.features.arrow_schema.field(_lowerCamelCase ).type A_ : Optional[int] = pa_table.append_column(_lowerCamelCase , pa.array([None] * len(_lowerCamelCase ) , type=_lowerCamelCase ) ) # more expensive cast to support nested structures with keys in a different order # allows str <-> int/float or str to Audio for example A_ : str = table_cast(_lowerCamelCase , self.config.features.arrow_schema ) return pa_table def _a ( self : List[str] , _lowerCamelCase : int ): """simple docstring""" for file_idx, file in enumerate(itertools.chain.from_iterable(_lowerCamelCase ) ): # If the file is one json object and if we need to look at the list of items in one specific field if self.config.field is not None: with open(_lowerCamelCase , encoding=self.config.encoding , errors=self.config.encoding_errors ) as f: A_ : int = json.load(_lowerCamelCase ) # We keep only the field we are interested in A_ : List[str] = dataset[self.config.field] # We accept two format: a list of dicts or a dict of lists if isinstance(_lowerCamelCase , (list, tuple) ): A_ : int = set().union(*[row.keys() for row in dataset] ) A_ : List[str] = {col: [row.get(_lowerCamelCase ) for row in dataset] for col in keys} else: A_ : Tuple = dataset A_ : Dict = pa.Table.from_pydict(_lowerCamelCase ) yield file_idx, self._cast_table(_lowerCamelCase ) # If the file has one json object per line else: with open(_lowerCamelCase , '''rb''' ) as f: A_ : int = 0 # Use block_size equal to the chunk size divided by 32 to leverage multithreading # Set a default minimum value of 16kB if the chunk size is really small A_ : int = max(self.config.chunksize // 32 , 16 << 10 ) A_ : int = ( self.config.encoding_errors if self.config.encoding_errors is not None else '''strict''' ) while True: A_ : Any = f.read(self.config.chunksize ) if not batch: break # Finish current line try: batch += f.readline() except (AttributeError, io.UnsupportedOperation): batch += readline(_lowerCamelCase ) # PyArrow only accepts utf-8 encoded bytes if self.config.encoding != "utf-8": A_ : Optional[Any] = batch.decode(self.config.encoding , errors=_lowerCamelCase ).encode('''utf-8''' ) try: while True: try: A_ : List[Any] = paj.read_json( io.BytesIO(_lowerCamelCase ) , read_options=paj.ReadOptions(block_size=_lowerCamelCase ) ) break except (pa.ArrowInvalid, pa.ArrowNotImplementedError) as e: if ( isinstance(_lowerCamelCase , pa.ArrowInvalid ) and "straddling" not in str(_lowerCamelCase ) or block_size > len(_lowerCamelCase ) ): raise else: # Increase the block size in case it was too small. # The block size will be reset for the next file. logger.debug( f'Batch of {len(_lowerCamelCase )} bytes couldn\'t be parsed with block_size={block_size}. Retrying with block_size={block_size * 2}.' ) block_size *= 2 except pa.ArrowInvalid as e: try: with open( _lowerCamelCase , encoding=self.config.encoding , errors=self.config.encoding_errors ) as f: A_ : Optional[Any] = json.load(_lowerCamelCase ) except json.JSONDecodeError: logger.error(f'Failed to read file \'{file}\' with error {type(_lowerCamelCase )}: {e}' ) raise e # If possible, parse the file as a list of json objects and exit the loop if isinstance(_lowerCamelCase , _lowerCamelCase ): # list is the only sequence type supported in JSON try: A_ : Optional[int] = set().union(*[row.keys() for row in dataset] ) A_ : Tuple = {col: [row.get(_lowerCamelCase ) for row in dataset] for col in keys} A_ : int = pa.Table.from_pydict(_lowerCamelCase ) except (pa.ArrowInvalid, AttributeError) as e: logger.error(f'Failed to read file \'{file}\' with error {type(_lowerCamelCase )}: {e}' ) raise ValueError(f'Not able to read records in the JSON file at {file}.' ) from None yield file_idx, self._cast_table(_lowerCamelCase ) break else: logger.error(f'Failed to read file \'{file}\' with error {type(_lowerCamelCase )}: {e}' ) raise ValueError( f'Not able to read records in the JSON file at {file}. ' f'You should probably indicate the field of the JSON file containing your records. ' f'This JSON file contain the following fields: {str(list(dataset.keys() ) )}. ' f'Select the correct one and provide it as `field=\'XXX\'` to the dataset loading method. ' ) from None # Uncomment for debugging (will print the Arrow table size and elements) # logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}") # logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows))) yield (file_idx, batch_idx), self._cast_table(_lowerCamelCase ) batch_idx += 1
4
0
import os import unittest from transformers.models.cpmant.tokenization_cpmant import VOCAB_FILES_NAMES, CpmAntTokenizer from transformers.testing_utils import require_jieba, tooslow from ...test_tokenization_common import TokenizerTesterMixin @require_jieba class UpperCamelCase_ (a__, unittest.TestCase ): """simple docstring""" _lowerCAmelCase = CpmAntTokenizer _lowerCAmelCase = False def _a ( self : Union[str, Any] ): """simple docstring""" super().setUp() A_ : List[str] = [ '''<d>''', '''</d>''', '''<s>''', '''</s>''', '''</_>''', '''<unk>''', '''<pad>''', '''</n>''', '''我''', '''是''', '''C''', '''P''', '''M''', '''A''', '''n''', '''t''', ] A_ : List[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] ) with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer: vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) ) @tooslow def _a ( self : Tuple ): """simple docstring""" A_ : Union[str, Any] = CpmAntTokenizer.from_pretrained('''openbmb/cpm-ant-10b''' ) A_ : Optional[Any] = '''今天天气真好!''' A_ : str = ['''今天''', '''天气''', '''真''', '''好''', '''!'''] A_ : int = tokenizer.tokenize(_lowerCamelCase ) self.assertListEqual(_lowerCamelCase , _lowerCamelCase ) A_ : List[str] = '''今天天气真好!''' A_ : List[Any] = [tokenizer.bos_token] + tokens A_ : Any = [6, 9802, 14962, 2082, 831, 244] self.assertListEqual(tokenizer.convert_tokens_to_ids(_lowerCamelCase ) , _lowerCamelCase ) A_ : List[Any] = tokenizer.decode(_lowerCamelCase ) self.assertEqual(_lowerCamelCase , _lowerCamelCase )
353
'''simple docstring''' from collections import OrderedDict from typing import Mapping from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices snake_case__ = logging.get_logger(__name__) snake_case__ = { """microsoft/swin-tiny-patch4-window7-224""": ( """https://huggingface.co/microsoft/swin-tiny-patch4-window7-224/resolve/main/config.json""" ), # See all Swin models at https://huggingface.co/models?filter=swin } class UpperCamelCase_ (a__, a__ ): """simple docstring""" _lowerCAmelCase = 'swin' _lowerCAmelCase = { 'num_attention_heads': 'num_heads', 'num_hidden_layers': 'num_layers', } def __init__( self : Any , _lowerCamelCase : Optional[Any]=224 , _lowerCamelCase : List[str]=4 , _lowerCamelCase : Optional[Any]=3 , _lowerCamelCase : Tuple=96 , _lowerCamelCase : List[Any]=[2, 2, 6, 2] , _lowerCamelCase : List[str]=[3, 6, 12, 24] , _lowerCamelCase : List[Any]=7 , _lowerCamelCase : Optional[int]=4.0 , _lowerCamelCase : List[str]=True , _lowerCamelCase : List[str]=0.0 , _lowerCamelCase : Any=0.0 , _lowerCamelCase : Dict=0.1 , _lowerCamelCase : List[str]="gelu" , _lowerCamelCase : Tuple=False , _lowerCamelCase : Dict=0.02 , _lowerCamelCase : Optional[Any]=1E-5 , _lowerCamelCase : Any=32 , _lowerCamelCase : Tuple=None , _lowerCamelCase : Any=None , **_lowerCamelCase : str , ): """simple docstring""" super().__init__(**_lowerCamelCase ) A_ : Optional[int] = image_size A_ : Optional[int] = patch_size A_ : Optional[int] = num_channels A_ : Any = embed_dim A_ : List[Any] = depths A_ : Any = len(_lowerCamelCase ) A_ : List[Any] = num_heads A_ : Tuple = window_size A_ : Tuple = mlp_ratio A_ : Dict = qkv_bias A_ : List[str] = hidden_dropout_prob A_ : List[str] = attention_probs_dropout_prob A_ : Any = drop_path_rate A_ : List[Any] = hidden_act A_ : Tuple = use_absolute_embeddings A_ : int = layer_norm_eps A_ : Optional[Any] = initializer_range A_ : Union[str, Any] = encoder_stride # we set the hidden_size attribute in order to make Swin work with VisionEncoderDecoderModel # this indicates the channel dimension after the last stage of the model A_ : str = int(embed_dim * 2 ** (len(_lowerCamelCase ) - 1) ) A_ : str = ['''stem'''] + [f'stage{idx}' for idx in range(1 , len(_lowerCamelCase ) + 1 )] A_ ,A_ : Optional[Any] = get_aligned_output_features_output_indices( out_features=_lowerCamelCase , out_indices=_lowerCamelCase , stage_names=self.stage_names ) class UpperCamelCase_ (a__ ): """simple docstring""" _lowerCAmelCase = version.parse('1.11' ) @property def _a ( self : str ): """simple docstring""" return OrderedDict( [ ('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}), ] ) @property def _a ( self : Union[str, Any] ): """simple docstring""" return 1E-4
4
0
'''simple docstring''' import unittest from transformers import EsmConfig, is_torch_available from transformers.testing_utils import TestCasePlus, require_torch, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers.models.esm.modeling_esmfold import EsmForProteinFolding class UpperCamelCase_ : """simple docstring""" def __init__( self : Optional[Any] , _lowerCamelCase : Union[str, Any] , _lowerCamelCase : Tuple=13 , _lowerCamelCase : Optional[Any]=7 , _lowerCamelCase : List[str]=False , _lowerCamelCase : Dict=True , _lowerCamelCase : List[str]=False , _lowerCamelCase : List[Any]=False , _lowerCamelCase : str=19 , _lowerCamelCase : Dict=32 , _lowerCamelCase : str=5 , _lowerCamelCase : Optional[int]=4 , _lowerCamelCase : List[Any]=37 , _lowerCamelCase : List[str]="gelu" , _lowerCamelCase : Optional[int]=0.1 , _lowerCamelCase : List[Any]=0.1 , _lowerCamelCase : List[Any]=512 , _lowerCamelCase : List[str]=16 , _lowerCamelCase : Union[str, Any]=2 , _lowerCamelCase : Dict=0.02 , _lowerCamelCase : str=3 , _lowerCamelCase : Optional[int]=4 , _lowerCamelCase : str=None , ): """simple docstring""" A_ : List[str] = parent A_ : Tuple = batch_size A_ : Dict = seq_length A_ : List[Any] = is_training A_ : Optional[int] = use_input_mask A_ : int = use_token_type_ids A_ : Dict = use_labels A_ : List[str] = vocab_size A_ : Optional[Any] = hidden_size A_ : Optional[Any] = num_hidden_layers A_ : Optional[Any] = num_attention_heads A_ : Optional[Any] = intermediate_size A_ : int = hidden_act A_ : int = hidden_dropout_prob A_ : List[Any] = attention_probs_dropout_prob A_ : Any = max_position_embeddings A_ : Optional[Any] = type_vocab_size A_ : Optional[Any] = type_sequence_label_size A_ : Tuple = initializer_range A_ : Any = num_labels A_ : Optional[Any] = num_choices A_ : int = scope def _a ( self : List[Any] ): """simple docstring""" A_ : int = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) A_ : int = None if self.use_input_mask: A_ : Dict = random_attention_mask([self.batch_size, self.seq_length] ) A_ : Any = None A_ : Tuple = None A_ : List[str] = None if self.use_labels: A_ : Optional[int] = ids_tensor([self.batch_size] , self.type_sequence_label_size ) A_ : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) A_ : Tuple = ids_tensor([self.batch_size] , self.num_choices ) A_ : List[Any] = self.get_config() return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels def _a ( self : Tuple ): """simple docstring""" A_ : Optional[Any] = EsmConfig( vocab_size=33 , hidden_size=self.hidden_size , pad_token_id=1 , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , is_folding_model=_lowerCamelCase , esmfold_config={'''trunk''': {'''num_blocks''': 2}, '''fp16_esm''': False} , ) return config def _a ( self : Optional[Any] , _lowerCamelCase : List[Any] , _lowerCamelCase : Dict , _lowerCamelCase : Optional[Any] , _lowerCamelCase : Optional[int] , _lowerCamelCase : Optional[int] , _lowerCamelCase : Union[str, Any] ): """simple docstring""" A_ : List[str] = EsmForProteinFolding(config=_lowerCamelCase ).float() model.to(_lowerCamelCase ) model.eval() A_ : int = model(_lowerCamelCase , attention_mask=_lowerCamelCase ) A_ : Dict = model(_lowerCamelCase ) A_ : str = model(_lowerCamelCase ) self.parent.assertEqual(result.positions.shape , (8, self.batch_size, self.seq_length, 14, 3) ) self.parent.assertEqual(result.angles.shape , (8, self.batch_size, self.seq_length, 7, 2) ) def _a ( self : Optional[Any] ): """simple docstring""" A_ : List[str] = self.prepare_config_and_inputs() ( A_ ) : str = config_and_inputs A_ : Any = {'''input_ids''': input_ids, '''attention_mask''': input_mask} return config, inputs_dict @require_torch class UpperCamelCase_ (a__, a__, unittest.TestCase ): """simple docstring""" _lowerCAmelCase : Optional[int] = False _lowerCAmelCase : Optional[int] = (EsmForProteinFolding,) if is_torch_available() else () _lowerCAmelCase : List[str] = () _lowerCAmelCase : Tuple = {} if is_torch_available() else {} _lowerCAmelCase : str = False def _a ( self : Tuple ): """simple docstring""" A_ : List[str] = EsmFoldModelTester(self ) A_ : Union[str, Any] = ConfigTester(self , config_class=_lowerCamelCase , hidden_size=37 ) def _a ( self : Optional[int] ): """simple docstring""" self.config_tester.run_common_tests() def _a ( self : int ): """simple docstring""" A_ : Optional[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*_lowerCamelCase ) @unittest.skip('''Does not support attention outputs''' ) def _a ( self : Tuple ): """simple docstring""" pass @unittest.skip def _a ( self : Dict ): """simple docstring""" pass @unittest.skip('''Esm does not support embedding resizing''' ) def _a ( self : int ): """simple docstring""" pass @unittest.skip('''Esm does not support embedding resizing''' ) def _a ( self : Any ): """simple docstring""" pass @unittest.skip('''ESMFold does not support passing input embeds!''' ) def _a ( self : str ): """simple docstring""" pass @unittest.skip('''ESMFold does not support head pruning.''' ) def _a ( self : Tuple ): """simple docstring""" pass @unittest.skip('''ESMFold does not support head pruning.''' ) def _a ( self : Union[str, Any] ): """simple docstring""" pass @unittest.skip('''ESMFold does not support head pruning.''' ) def _a ( self : List[str] ): """simple docstring""" pass @unittest.skip('''ESMFold does not support head pruning.''' ) def _a ( self : Optional[Any] ): """simple docstring""" pass @unittest.skip('''ESMFold does not support head pruning.''' ) def _a ( self : int ): """simple docstring""" pass @unittest.skip('''ESMFold does not output hidden states in the normal way.''' ) def _a ( self : Tuple ): """simple docstring""" pass @unittest.skip('''ESMfold does not output hidden states in the normal way.''' ) def _a ( self : List[Any] ): """simple docstring""" pass @unittest.skip('''ESMFold only has one output format.''' ) def _a ( self : Dict ): """simple docstring""" pass @unittest.skip('''This test doesn\'t work for ESMFold and doesn\'t test core functionality''' ) def _a ( self : Any ): """simple docstring""" pass @unittest.skip('''ESMFold does not support input chunking.''' ) def _a ( self : Any ): """simple docstring""" pass @unittest.skip('''ESMFold doesn\'t respect you and it certainly doesn\'t respect your initialization arguments.''' ) def _a ( self : List[Any] ): """simple docstring""" pass @unittest.skip('''ESMFold doesn\'t support torchscript compilation.''' ) def _a ( self : Any ): """simple docstring""" pass @unittest.skip('''ESMFold doesn\'t support torchscript compilation.''' ) def _a ( self : Optional[Any] ): """simple docstring""" pass @unittest.skip('''ESMFold doesn\'t support torchscript compilation.''' ) def _a ( self : List[str] ): """simple docstring""" pass @unittest.skip('''ESMFold doesn\'t support data parallel.''' ) def _a ( self : Union[str, Any] ): """simple docstring""" pass @unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''' ) def _a ( self : Optional[int] ): """simple docstring""" pass @require_torch class UpperCamelCase_ (a__ ): """simple docstring""" @slow def _a ( self : Union[str, Any] ): """simple docstring""" A_ : Tuple = EsmForProteinFolding.from_pretrained('''facebook/esmfold_v1''' ).float() model.eval() A_ : List[Any] = torch.tensor([[0, 6, 4, 13, 5, 4, 16, 12, 11, 7, 2]] ) A_ : str = model(_lowerCamelCase )['''positions'''] A_ : Any = torch.tensor([2.58_28, 0.79_93, -10.93_34] , dtype=torch.floataa ) self.assertTrue(torch.allclose(position_outputs[0, 0, 0, 0] , _lowerCamelCase , atol=1E-4 ) )
354
'''simple docstring''' from __future__ import annotations def snake_case__ ( lowerCamelCase__ : list[int] , lowerCamelCase__ : int ) -> list[int]: A_ : int = 0 A_ : str = len(lowerCamelCase__ ) - 1 while i < j: if nums[i] + nums[j] == target: return [i, j] elif nums[i] + nums[j] < target: A_ : Tuple = i + 1 else: A_ : List[str] = j - 1 return [] if __name__ == "__main__": import doctest doctest.testmod() print(F'{two_pointer([2, 7, 11, 15], 9) = }')
4
0
'''simple docstring''' from __future__ import annotations from typing import Any class UpperCamelCase_ : """simple docstring""" def __init__( self : Union[str, Any] , _lowerCamelCase : int , _lowerCamelCase : int , _lowerCamelCase : float = 0 ): """simple docstring""" A_ : Tuple = row, column A_ : Union[str, Any] = [[default_value for c in range(_lowerCamelCase )] for r in range(_lowerCamelCase )] def __str__( self : List[Any] ): """simple docstring""" A_ : List[str] = f'Matrix consist of {self.row} rows and {self.column} columns\n' # Make string identifier A_ : Any = 0 for row_vector in self.array: for obj in row_vector: A_ : int = max(_lowerCamelCase , len(str(_lowerCamelCase ) ) ) A_ : str = f'%{max_element_length}s' # Make string and return def single_line(_lowerCamelCase : list[float] ) -> str: nonlocal string_format_identifier A_ : Optional[int] = '''[''' line += ", ".join(string_format_identifier % (obj,) for obj in row_vector ) line += "]" return line s += "\n".join(single_line(_lowerCamelCase ) for row_vector in self.array ) return s def __repr__( self : List[Any] ): """simple docstring""" return str(self ) def _a ( self : Optional[Any] , _lowerCamelCase : tuple[int, int] ): """simple docstring""" if not (isinstance(_lowerCamelCase , (list, tuple) ) and len(_lowerCamelCase ) == 2): return False elif not (0 <= loc[0] < self.row and 0 <= loc[1] < self.column): return False else: return True def __getitem__( self : Optional[Any] , _lowerCamelCase : tuple[int, int] ): """simple docstring""" assert self.validate_indicies(_lowerCamelCase ) return self.array[loc[0]][loc[1]] def __setitem__( self : Dict , _lowerCamelCase : tuple[int, int] , _lowerCamelCase : float ): """simple docstring""" assert self.validate_indicies(_lowerCamelCase ) A_ : List[str] = value def __add__( self : Tuple , _lowerCamelCase : Matrix ): """simple docstring""" assert isinstance(_lowerCamelCase , _lowerCamelCase ) assert self.row == another.row and self.column == another.column # Add A_ : int = Matrix(self.row , self.column ) for r in range(self.row ): for c in range(self.column ): A_ : Dict = self[r, c] + another[r, c] return result def __neg__( self : List[str] ): """simple docstring""" A_ : List[str] = Matrix(self.row , self.column ) for r in range(self.row ): for c in range(self.column ): A_ : str = -self[r, c] return result def __sub__( self : Union[str, Any] , _lowerCamelCase : Matrix ): """simple docstring""" return self + (-another) def __mul__( self : str , _lowerCamelCase : int | float | Matrix ): """simple docstring""" if isinstance(_lowerCamelCase , (int, float) ): # Scalar multiplication A_ : int = Matrix(self.row , self.column ) for r in range(self.row ): for c in range(self.column ): A_ : Union[str, Any] = self[r, c] * another return result elif isinstance(_lowerCamelCase , _lowerCamelCase ): # Matrix multiplication assert self.column == another.row A_ : int = Matrix(self.row , another.column ) for r in range(self.row ): for c in range(another.column ): for i in range(self.column ): result[r, c] += self[r, i] * another[i, c] return result else: A_ : List[Any] = f'Unsupported type given for another ({type(_lowerCamelCase )})' raise TypeError(_lowerCamelCase ) def _a ( self : Dict ): """simple docstring""" A_ : Optional[Any] = Matrix(self.column , self.row ) for r in range(self.row ): for c in range(self.column ): A_ : Union[str, Any] = self[r, c] return result def _a ( self : str , _lowerCamelCase : Matrix , _lowerCamelCase : Matrix ): """simple docstring""" assert isinstance(_lowerCamelCase , _lowerCamelCase ) and isinstance(_lowerCamelCase , _lowerCamelCase ) assert self.row == self.column == u.row == v.row # u, v should be column vector assert u.column == v.column == 1 # u, v should be column vector # Calculate A_ : Tuple = v.transpose() A_ : Dict = (v_t * self * u)[0, 0] + 1 if numerator_factor == 0: return None # It's not invertable return self - ((self * u) * (v_t * self) * (1.0 / numerator_factor)) # Testing if __name__ == "__main__": def snake_case__ ( ) -> None: # a^(-1) A_ : Union[str, Any] = Matrix(3 , 3 , 0 ) for i in range(3 ): A_ : Optional[Any] = 1 print(f'a^(-1) is {ainv}' ) # u, v A_ : str = Matrix(3 , 1 , 0 ) A_ : int = 1, 2, -3 A_ : str = Matrix(3 , 1 , 0 ) A_ : Optional[Any] = 4, -2, 5 print(f'u is {u}' ) print(f'v is {v}' ) print(f'uv^T is {u * v.transpose()}' ) # Sherman Morrison print(f'(a + uv^T)^(-1) is {ainv.sherman_morrison(lowerCamelCase__ , lowerCamelCase__ )}' ) def snake_case__ ( ) -> None: import doctest doctest.testmod() testa()
355
'''simple docstring''' def snake_case__ ( lowerCamelCase__ : list[int] , lowerCamelCase__ : list[int] , lowerCamelCase__ : int ) -> bool: return not any( neighbour == 1 and colored_vertices[i] == color for i, neighbour in enumerate(lowerCamelCase__ ) ) def snake_case__ ( lowerCamelCase__ : list[list[int]] , lowerCamelCase__ : int , lowerCamelCase__ : list[int] , lowerCamelCase__ : int ) -> bool: # Base Case if index == len(lowerCamelCase__ ): return True # Recursive Step for i in range(lowerCamelCase__ ): if valid_coloring(graph[index] , lowerCamelCase__ , lowerCamelCase__ ): # Color current vertex A_ : int = i # Validate coloring if util_color(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , index + 1 ): return True # Backtrack A_ : str = -1 return False def snake_case__ ( lowerCamelCase__ : list[list[int]] , lowerCamelCase__ : int ) -> list[int]: A_ : List[str] = [-1] * len(lowerCamelCase__ ) if util_color(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , 0 ): return colored_vertices return []
4
0
'''simple docstring''' snake_case__ = { """A""": ["""B""", """C""", """E"""], """B""": ["""A""", """D""", """E"""], """C""": ["""A""", """F""", """G"""], """D""": ["""B"""], """E""": ["""A""", """B""", """D"""], """F""": ["""C"""], """G""": ["""C"""], } def snake_case__ ( lowerCamelCase__ : dict , lowerCamelCase__ : int , lowerCamelCase__ : Optional[Any] ) -> list[str]: A_ : List[str] = set() # keep track of all the paths to be checked A_ : List[Any] = [[start]] # return path if start is goal if start == goal: return [start] # keeps looping until all possible paths have been checked while queue: # pop the first path from the queue A_ : Optional[Any] = queue.pop(0 ) # get the last node from the path A_ : Optional[int] = path[-1] if node not in explored: A_ : Union[str, Any] = graph[node] # go through all neighbour nodes, construct a new path and # push it into the queue for neighbour in neighbours: A_ : Dict = list(lowerCamelCase__ ) new_path.append(lowerCamelCase__ ) queue.append(lowerCamelCase__ ) # return path if neighbour is goal if neighbour == goal: return new_path # mark node as explored explored.add(lowerCamelCase__ ) # in case there's no path between the 2 nodes return [] def snake_case__ ( lowerCamelCase__ : dict , lowerCamelCase__ : Tuple , lowerCamelCase__ : Any ) -> int: if not graph or start not in graph or target not in graph: return -1 if start == target: return 0 A_ : str = [start] A_ : Dict = set(lowerCamelCase__ ) # Keep tab on distances from `start` node. A_ : Optional[Any] = {start: 0, target: -1} while queue: A_ : List[str] = queue.pop(0 ) if node == target: A_ : Tuple = ( dist[node] if dist[target] == -1 else min(dist[target] , dist[node] ) ) for adjacent in graph[node]: if adjacent not in visited: visited.add(lowerCamelCase__ ) queue.append(lowerCamelCase__ ) A_ : List[str] = dist[node] + 1 return dist[target] if __name__ == "__main__": print(bfs_shortest_path(demo_graph, """G""", """D""")) # returns ['G', 'C', 'A', 'B', 'D'] print(bfs_shortest_path_distance(demo_graph, """G""", """D""")) # returns 4
356
'''simple docstring''' from __future__ import annotations from PIL import Image # Define glider example snake_case__ = [ [0, 1, 0, 0, 0, 0, 0, 0], [0, 0, 1, 0, 0, 0, 0, 0], [1, 1, 1, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0], ] # Define blinker example snake_case__ = [[0, 1, 0], [0, 1, 0], [0, 1, 0]] def snake_case__ ( lowerCamelCase__ : list[list[int]] ) -> list[list[int]]: A_ : str = [] for i in range(len(lowerCamelCase__ ) ): A_ : Optional[Any] = [] for j in range(len(cells[i] ) ): # Get the number of live neighbours A_ : Optional[int] = 0 if i > 0 and j > 0: neighbour_count += cells[i - 1][j - 1] if i > 0: neighbour_count += cells[i - 1][j] if i > 0 and j < len(cells[i] ) - 1: neighbour_count += cells[i - 1][j + 1] if j > 0: neighbour_count += cells[i][j - 1] if j < len(cells[i] ) - 1: neighbour_count += cells[i][j + 1] if i < len(lowerCamelCase__ ) - 1 and j > 0: neighbour_count += cells[i + 1][j - 1] if i < len(lowerCamelCase__ ) - 1: neighbour_count += cells[i + 1][j] if i < len(lowerCamelCase__ ) - 1 and j < len(cells[i] ) - 1: neighbour_count += cells[i + 1][j + 1] # Rules of the game of life (excerpt from Wikipedia): # 1. Any live cell with two or three live neighbours survives. # 2. Any dead cell with three live neighbours becomes a live cell. # 3. All other live cells die in the next generation. # Similarly, all other dead cells stay dead. A_ : List[str] = cells[i][j] == 1 if ( (alive and 2 <= neighbour_count <= 3) or not alive and neighbour_count == 3 ): next_generation_row.append(1 ) else: next_generation_row.append(0 ) next_generation.append(lowerCamelCase__ ) return next_generation def snake_case__ ( lowerCamelCase__ : list[list[int]] , lowerCamelCase__ : int ) -> list[Image.Image]: A_ : List[Any] = [] for _ in range(lowerCamelCase__ ): # Create output image A_ : Optional[int] = Image.new('''RGB''' , (len(cells[0] ), len(lowerCamelCase__ )) ) A_ : int = img.load() # Save cells to image for x in range(len(lowerCamelCase__ ) ): for y in range(len(cells[0] ) ): A_ : Optional[Any] = 2_5_5 - cells[y][x] * 2_5_5 A_ : str = (colour, colour, colour) # Save image images.append(lowerCamelCase__ ) A_ : Optional[int] = new_generation(lowerCamelCase__ ) return images if __name__ == "__main__": snake_case__ = generate_images(GLIDER, 16) images[0].save("""out.gif""", save_all=True, append_images=images[1:])
4
0
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available snake_case__ = { """configuration_blip_2""": [ """BLIP_2_PRETRAINED_CONFIG_ARCHIVE_MAP""", """Blip2Config""", """Blip2QFormerConfig""", """Blip2VisionConfig""", ], """processing_blip_2""": ["""Blip2Processor"""], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: snake_case__ = [ """BLIP_2_PRETRAINED_MODEL_ARCHIVE_LIST""", """Blip2Model""", """Blip2QFormerModel""", """Blip2PreTrainedModel""", """Blip2ForConditionalGeneration""", """Blip2VisionModel""", ] if TYPE_CHECKING: from .configuration_blip_a import ( BLIP_2_PRETRAINED_CONFIG_ARCHIVE_MAP, BlipaConfig, BlipaQFormerConfig, BlipaVisionConfig, ) from .processing_blip_a import BlipaProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_blip_a import ( BLIP_2_PRETRAINED_MODEL_ARCHIVE_LIST, BlipaForConditionalGeneration, BlipaModel, BlipaPreTrainedModel, BlipaQFormerModel, BlipaVisionModel, ) else: import sys snake_case__ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
357
'''simple docstring''' import json import os import shutil import tempfile import unittest import numpy as np import pytest from transformers import BertTokenizer, BertTokenizerFast from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES from transformers.testing_utils import require_vision from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available if is_vision_available(): from PIL import Image from transformers import AlignProcessor, EfficientNetImageProcessor @require_vision class UpperCamelCase_ (unittest.TestCase ): """simple docstring""" def _a ( self : Union[str, Any] ): """simple docstring""" A_ : Any = tempfile.mkdtemp() A_ : List[Any] = [ '''[UNK]''', '''[CLS]''', '''[SEP]''', '''[PAD]''', '''[MASK]''', '''want''', '''##want''', '''##ed''', '''wa''', '''un''', '''runn''', '''##ing''', ''',''', '''low''', '''lowest''', ] A_ : str = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] ) with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer: vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) ) A_ : Tuple = { '''do_resize''': True, '''size''': 20, '''do_center_crop''': True, '''crop_size''': 18, '''do_normalize''': True, '''image_mean''': [0.48_14_54_66, 0.4_57_82_75, 0.40_82_10_73], '''image_std''': [0.26_86_29_54, 0.26_13_02_58, 0.27_57_77_11], } A_ : List[Any] = os.path.join(self.tmpdirname , _lowerCamelCase ) with open(self.image_processor_file , '''w''' , encoding='''utf-8''' ) as fp: json.dump(_lowerCamelCase , _lowerCamelCase ) def _a ( self : Dict , **_lowerCamelCase : Tuple ): """simple docstring""" return BertTokenizer.from_pretrained(self.tmpdirname , **_lowerCamelCase ) def _a ( self : Optional[int] , **_lowerCamelCase : Optional[int] ): """simple docstring""" return BertTokenizerFast.from_pretrained(self.tmpdirname , **_lowerCamelCase ) def _a ( self : Optional[Any] , **_lowerCamelCase : Tuple ): """simple docstring""" return EfficientNetImageProcessor.from_pretrained(self.tmpdirname , **_lowerCamelCase ) def _a ( self : Tuple ): """simple docstring""" shutil.rmtree(self.tmpdirname ) def _a ( self : int ): """simple docstring""" A_ : Union[str, Any] = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )] A_ : Any = [Image.fromarray(np.moveaxis(_lowerCamelCase , 0 , -1 ) ) for x in image_inputs] return image_inputs def _a ( self : int ): """simple docstring""" A_ : Tuple = self.get_tokenizer() A_ : Tuple = self.get_rust_tokenizer() A_ : Dict = self.get_image_processor() A_ : List[Any] = AlignProcessor(tokenizer=_lowerCamelCase , image_processor=_lowerCamelCase ) processor_slow.save_pretrained(self.tmpdirname ) A_ : str = AlignProcessor.from_pretrained(self.tmpdirname , use_fast=_lowerCamelCase ) A_ : Any = AlignProcessor(tokenizer=_lowerCamelCase , image_processor=_lowerCamelCase ) processor_fast.save_pretrained(self.tmpdirname ) A_ : List[Any] = AlignProcessor.from_pretrained(self.tmpdirname ) self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() ) self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() ) self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() ) self.assertIsInstance(processor_slow.tokenizer , _lowerCamelCase ) self.assertIsInstance(processor_fast.tokenizer , _lowerCamelCase ) self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() ) self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() ) self.assertIsInstance(processor_slow.image_processor , _lowerCamelCase ) self.assertIsInstance(processor_fast.image_processor , _lowerCamelCase ) def _a ( self : List[Any] ): """simple docstring""" A_ : List[str] = AlignProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() ) processor.save_pretrained(self.tmpdirname ) A_ : Optional[int] = self.get_tokenizer(bos_token='''(BOS)''' , eos_token='''(EOS)''' ) A_ : Tuple = self.get_image_processor(do_normalize=_lowerCamelCase , padding_value=1.0 ) A_ : List[str] = AlignProcessor.from_pretrained( self.tmpdirname , bos_token='''(BOS)''' , eos_token='''(EOS)''' , do_normalize=_lowerCamelCase , padding_value=1.0 ) self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() ) self.assertIsInstance(processor.tokenizer , _lowerCamelCase ) self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() ) self.assertIsInstance(processor.image_processor , _lowerCamelCase ) def _a ( self : Union[str, Any] ): """simple docstring""" A_ : Dict = self.get_image_processor() A_ : Any = self.get_tokenizer() A_ : List[str] = AlignProcessor(tokenizer=_lowerCamelCase , image_processor=_lowerCamelCase ) A_ : Any = self.prepare_image_inputs() A_ : List[Any] = image_processor(_lowerCamelCase , return_tensors='''np''' ) A_ : str = processor(images=_lowerCamelCase , return_tensors='''np''' ) for key in input_image_proc.keys(): self.assertAlmostEqual(input_image_proc[key].sum() , input_processor[key].sum() , delta=1E-2 ) def _a ( self : Dict ): """simple docstring""" A_ : str = self.get_image_processor() A_ : List[str] = self.get_tokenizer() A_ : Optional[int] = AlignProcessor(tokenizer=_lowerCamelCase , image_processor=_lowerCamelCase ) A_ : int = '''lower newer''' A_ : str = processor(text=_lowerCamelCase ) A_ : Dict = tokenizer(_lowerCamelCase , padding='''max_length''' , max_length=64 ) for key in encoded_tok.keys(): self.assertListEqual(encoded_tok[key] , encoded_processor[key] ) def _a ( self : str ): """simple docstring""" A_ : Optional[int] = self.get_image_processor() A_ : Optional[Any] = self.get_tokenizer() A_ : List[str] = AlignProcessor(tokenizer=_lowerCamelCase , image_processor=_lowerCamelCase ) A_ : List[Any] = '''lower newer''' A_ : Optional[int] = self.prepare_image_inputs() A_ : List[Any] = processor(text=_lowerCamelCase , images=_lowerCamelCase ) self.assertListEqual(list(inputs.keys() ) , ['''input_ids''', '''token_type_ids''', '''attention_mask''', '''pixel_values'''] ) # test if it raises when no input is passed with pytest.raises(_lowerCamelCase ): processor() def _a ( self : List[str] ): """simple docstring""" A_ : Optional[Any] = self.get_image_processor() A_ : Optional[int] = self.get_tokenizer() A_ : List[Any] = AlignProcessor(tokenizer=_lowerCamelCase , image_processor=_lowerCamelCase ) A_ : str = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]] A_ : str = processor.batch_decode(_lowerCamelCase ) A_ : Union[str, Any] = tokenizer.batch_decode(_lowerCamelCase ) self.assertListEqual(_lowerCamelCase , _lowerCamelCase ) def _a ( self : Tuple ): """simple docstring""" A_ : str = self.get_image_processor() A_ : Tuple = self.get_tokenizer() A_ : Any = AlignProcessor(tokenizer=_lowerCamelCase , image_processor=_lowerCamelCase ) A_ : str = '''lower newer''' A_ : List[str] = self.prepare_image_inputs() A_ : Tuple = processor(text=_lowerCamelCase , images=_lowerCamelCase ) self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
4
0
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_torch_available, ) snake_case__ = { """configuration_resnet""": ["""RESNET_PRETRAINED_CONFIG_ARCHIVE_MAP""", """ResNetConfig""", """ResNetOnnxConfig"""] } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: snake_case__ = [ """RESNET_PRETRAINED_MODEL_ARCHIVE_LIST""", """ResNetForImageClassification""", """ResNetModel""", """ResNetPreTrainedModel""", """ResNetBackbone""", ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: snake_case__ = [ """TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST""", """TFResNetForImageClassification""", """TFResNetModel""", """TFResNetPreTrainedModel""", ] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: snake_case__ = [ """FlaxResNetForImageClassification""", """FlaxResNetModel""", """FlaxResNetPreTrainedModel""", ] if TYPE_CHECKING: from .configuration_resnet import RESNET_PRETRAINED_CONFIG_ARCHIVE_MAP, ResNetConfig, ResNetOnnxConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_resnet import ( RESNET_PRETRAINED_MODEL_ARCHIVE_LIST, ResNetBackbone, ResNetForImageClassification, ResNetModel, ResNetPreTrainedModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_resnet import ( TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST, TFResNetForImageClassification, TFResNetModel, TFResNetPreTrainedModel, ) try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_resnet import FlaxResNetForImageClassification, FlaxResNetModel, FlaxResNetPreTrainedModel else: import sys snake_case__ = _LazyModule(__name__, globals()["""__file__"""], _import_structure)
358
'''simple docstring''' import json import os from pathlib import Path from shutil import copyfile from typing import Any, Dict, List, Optional, Tuple, Union import sentencepiece from ...tokenization_utils import PreTrainedTokenizer from ...utils import logging snake_case__ = logging.get_logger(__name__) snake_case__ = """▁""" snake_case__ = { """vocab_file""": """vocab.json""", """spm_file""": """sentencepiece.bpe.model""", } snake_case__ = { """vocab_file""": { """facebook/s2t-small-librispeech-asr""": ( """https://huggingface.co/facebook/s2t-small-librispeech-asr/resolve/main/vocab.json""" ), }, """spm_file""": { """facebook/s2t-small-librispeech-asr""": ( """https://huggingface.co/facebook/s2t-small-librispeech-asr/resolve/main/sentencepiece.bpe.model""" ) }, } snake_case__ = { """facebook/s2t-small-librispeech-asr""": 10_24, } snake_case__ = ["""pt""", """fr""", """ru""", """nl""", """ro""", """it""", """es""", """de"""] snake_case__ = {"""mustc""": MUSTC_LANGS} class UpperCamelCase_ (a__ ): """simple docstring""" _lowerCAmelCase = VOCAB_FILES_NAMES _lowerCAmelCase = PRETRAINED_VOCAB_FILES_MAP _lowerCAmelCase = MAX_MODEL_INPUT_SIZES _lowerCAmelCase = ['input_ids', 'attention_mask'] _lowerCAmelCase = [] def __init__( self : Optional[int] , _lowerCamelCase : List[str] , _lowerCamelCase : List[str] , _lowerCamelCase : str="<s>" , _lowerCamelCase : Union[str, Any]="</s>" , _lowerCamelCase : Dict="<pad>" , _lowerCamelCase : str="<unk>" , _lowerCamelCase : Union[str, Any]=False , _lowerCamelCase : int=False , _lowerCamelCase : Any=None , _lowerCamelCase : Any=None , _lowerCamelCase : Optional[Dict[str, Any]] = None , **_lowerCamelCase : Optional[int] , ): """simple docstring""" A_ : Union[str, Any] = {} if sp_model_kwargs is None else sp_model_kwargs super().__init__( bos_token=_lowerCamelCase , eos_token=_lowerCamelCase , unk_token=_lowerCamelCase , pad_token=_lowerCamelCase , do_upper_case=_lowerCamelCase , do_lower_case=_lowerCamelCase , tgt_lang=_lowerCamelCase , lang_codes=_lowerCamelCase , sp_model_kwargs=self.sp_model_kwargs , **_lowerCamelCase , ) A_ : Optional[int] = do_upper_case A_ : Tuple = do_lower_case A_ : Tuple = load_json(_lowerCamelCase ) A_ : Tuple = {v: k for k, v in self.encoder.items()} A_ : List[Any] = spm_file A_ : List[str] = load_spm(_lowerCamelCase , self.sp_model_kwargs ) if lang_codes is not None: A_ : Any = lang_codes A_ : Optional[Any] = LANGUAGES[lang_codes] A_ : Optional[Any] = [f'<lang:{lang}>' for lang in self.langs] A_ : Union[str, Any] = {lang: self.sp_model.PieceToId(f'<lang:{lang}>' ) for lang in self.langs} A_ : Optional[int] = self.lang_tokens A_ : int = tgt_lang if tgt_lang is not None else self.langs[0] self.set_tgt_lang_special_tokens(self._tgt_lang ) else: A_ : Dict = {} @property def _a ( self : Tuple ): """simple docstring""" return len(self.encoder ) @property def _a ( self : int ): """simple docstring""" return self._tgt_lang @tgt_lang.setter def _a ( self : List[str] , _lowerCamelCase : Any ): """simple docstring""" A_ : int = new_tgt_lang self.set_tgt_lang_special_tokens(_lowerCamelCase ) def _a ( self : Tuple , _lowerCamelCase : str ): """simple docstring""" A_ : List[str] = self.lang_code_to_id[tgt_lang] A_ : Optional[Any] = [lang_code_id] def _a ( self : Optional[Any] , _lowerCamelCase : str ): """simple docstring""" return self.sp_model.encode(_lowerCamelCase , out_type=_lowerCamelCase ) def _a ( self : List[Any] , _lowerCamelCase : int ): """simple docstring""" return self.encoder.get(_lowerCamelCase , self.encoder[self.unk_token] ) def _a ( self : int , _lowerCamelCase : int ): """simple docstring""" return self.decoder.get(_lowerCamelCase , self.unk_token ) def _a ( self : int , _lowerCamelCase : List[str] ): """simple docstring""" A_ : List[Any] = [] A_ : Any = '''''' for token in tokens: # make sure that special tokens are not decoded using sentencepiece model if token in self.all_special_tokens: A_ : Union[str, Any] = self.sp_model.decode(_lowerCamelCase ) out_string += (decoded.upper() if self.do_upper_case else decoded) + token + " " A_ : Optional[Any] = [] else: current_sub_tokens.append(_lowerCamelCase ) A_ : Tuple = self.sp_model.decode(_lowerCamelCase ) out_string += decoded.upper() if self.do_upper_case else decoded return out_string.strip() def _a ( self : int , _lowerCamelCase : Dict , _lowerCamelCase : Any=None ): """simple docstring""" if token_ids_a is None: return self.prefix_tokens + token_ids_a + [self.eos_token_id] # We don't expect to process pairs, but leave the pair logic for API consistency return self.prefix_tokens + token_ids_a + token_ids_a + [self.eos_token_id] def _a ( self : List[Any] , _lowerCamelCase : List[int] , _lowerCamelCase : Optional[List[int]] = None , _lowerCamelCase : bool = False ): """simple docstring""" if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=_lowerCamelCase , token_ids_a=_lowerCamelCase , already_has_special_tokens=_lowerCamelCase ) A_ : Tuple = [1] * len(self.prefix_tokens ) A_ : Tuple = [1] if token_ids_a is None: return prefix_ones + ([0] * len(_lowerCamelCase )) + suffix_ones return prefix_ones + ([0] * len(_lowerCamelCase )) + ([0] * len(_lowerCamelCase )) + suffix_ones def _a ( self : Dict ): """simple docstring""" A_ : Union[str, Any] = self.encoder.copy() vocab.update(self.added_tokens_encoder ) return vocab def __getstate__( self : Union[str, Any] ): """simple docstring""" A_ : Dict = self.__dict__.copy() A_ : List[Any] = None return state def __setstate__( self : List[str] , _lowerCamelCase : Dict ): """simple docstring""" A_ : Dict = d # for backward compatibility if not hasattr(self , '''sp_model_kwargs''' ): A_ : Optional[int] = {} A_ : int = load_spm(self.spm_file , self.sp_model_kwargs ) def _a ( self : Optional[Any] , _lowerCamelCase : str , _lowerCamelCase : Optional[str] = None ): """simple docstring""" A_ : Dict = Path(_lowerCamelCase ) assert save_dir.is_dir(), f'{save_directory} should be a directory' A_ : Optional[int] = save_dir / ( (filename_prefix + '''-''' if filename_prefix else '''''') + self.vocab_files_names['''vocab_file'''] ) A_ : Optional[int] = save_dir / ( (filename_prefix + '''-''' if filename_prefix else '''''') + self.vocab_files_names['''spm_file'''] ) save_json(self.encoder , _lowerCamelCase ) if os.path.abspath(self.spm_file ) != os.path.abspath(_lowerCamelCase ) and os.path.isfile(self.spm_file ): copyfile(self.spm_file , _lowerCamelCase ) elif not os.path.isfile(self.spm_file ): with open(_lowerCamelCase , '''wb''' ) as fi: A_ : List[str] = self.sp_model.serialized_model_proto() fi.write(_lowerCamelCase ) return (str(_lowerCamelCase ), str(_lowerCamelCase )) def snake_case__ ( lowerCamelCase__ : str , lowerCamelCase__ : Dict[str, Any] ) -> sentencepiece.SentencePieceProcessor: A_ : Tuple = sentencepiece.SentencePieceProcessor(**lowerCamelCase__ ) spm.Load(str(lowerCamelCase__ ) ) return spm def snake_case__ ( lowerCamelCase__ : str ) -> Union[Dict, List]: with open(lowerCamelCase__ , '''r''' ) as f: return json.load(lowerCamelCase__ ) def snake_case__ ( lowerCamelCase__ : Any , lowerCamelCase__ : str ) -> None: with open(lowerCamelCase__ , '''w''' ) as f: json.dump(lowerCamelCase__ , lowerCamelCase__ , indent=2 )
4
0
'''simple docstring''' import inspect import unittest from transformers import ViTConfig from transformers.testing_utils import ( require_accelerate, require_torch, require_torch_gpu, require_vision, slow, torch_device, ) from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from torch import nn from transformers import ViTForImageClassification, ViTForMaskedImageModeling, ViTModel from transformers.models.vit.modeling_vit import VIT_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import ViTImageProcessor class UpperCamelCase_ : """simple docstring""" def __init__( self : Dict , _lowerCamelCase : Tuple , _lowerCamelCase : Tuple=13 , _lowerCamelCase : Optional[Any]=30 , _lowerCamelCase : str=2 , _lowerCamelCase : Any=3 , _lowerCamelCase : int=True , _lowerCamelCase : List[str]=True , _lowerCamelCase : str=32 , _lowerCamelCase : Any=5 , _lowerCamelCase : int=4 , _lowerCamelCase : str=37 , _lowerCamelCase : Optional[int]="gelu" , _lowerCamelCase : Optional[Any]=0.1 , _lowerCamelCase : Union[str, Any]=0.1 , _lowerCamelCase : Optional[int]=10 , _lowerCamelCase : str=0.02 , _lowerCamelCase : List[str]=None , _lowerCamelCase : Any=2 , ): """simple docstring""" A_ : Dict = parent A_ : int = batch_size A_ : int = image_size A_ : Optional[Any] = patch_size A_ : Optional[int] = num_channels A_ : List[Any] = is_training A_ : Dict = use_labels A_ : Optional[Any] = hidden_size A_ : List[str] = num_hidden_layers A_ : List[Any] = num_attention_heads A_ : Optional[Any] = intermediate_size A_ : Any = hidden_act A_ : Union[str, Any] = hidden_dropout_prob A_ : Tuple = attention_probs_dropout_prob A_ : List[Any] = type_sequence_label_size A_ : Optional[Any] = initializer_range A_ : List[str] = scope A_ : int = encoder_stride # in ViT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token) A_ : Union[str, Any] = (image_size // patch_size) ** 2 A_ : Any = num_patches + 1 def _a ( self : Optional[Any] ): """simple docstring""" A_ : List[str] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) A_ : Optional[Any] = None if self.use_labels: A_ : Optional[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size ) A_ : Optional[int] = self.get_config() return config, pixel_values, labels def _a ( self : Dict ): """simple docstring""" return ViTConfig( image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=_lowerCamelCase , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , ) def _a ( self : Optional[Any] , _lowerCamelCase : Tuple , _lowerCamelCase : Tuple , _lowerCamelCase : Optional[Any] ): """simple docstring""" A_ : Union[str, Any] = ViTModel(config=_lowerCamelCase ) model.to(_lowerCamelCase ) model.eval() A_ : Optional[Any] = model(_lowerCamelCase ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def _a ( self : int , _lowerCamelCase : Dict , _lowerCamelCase : Dict , _lowerCamelCase : Tuple ): """simple docstring""" A_ : Tuple = ViTForMaskedImageModeling(config=_lowerCamelCase ) model.to(_lowerCamelCase ) model.eval() A_ : Any = model(_lowerCamelCase ) self.parent.assertEqual( result.reconstruction.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size) ) # test greyscale images A_ : List[Any] = 1 A_ : int = ViTForMaskedImageModeling(_lowerCamelCase ) model.to(_lowerCamelCase ) model.eval() A_ : List[Any] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] ) A_ : List[str] = model(_lowerCamelCase ) self.parent.assertEqual(result.reconstruction.shape , (self.batch_size, 1, self.image_size, self.image_size) ) def _a ( self : List[str] , _lowerCamelCase : Any , _lowerCamelCase : Dict , _lowerCamelCase : Union[str, Any] ): """simple docstring""" A_ : Optional[int] = self.type_sequence_label_size A_ : Optional[Any] = ViTForImageClassification(_lowerCamelCase ) model.to(_lowerCamelCase ) model.eval() A_ : List[Any] = model(_lowerCamelCase , labels=_lowerCamelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) ) # test greyscale images A_ : str = 1 A_ : Optional[Any] = ViTForImageClassification(_lowerCamelCase ) model.to(_lowerCamelCase ) model.eval() A_ : str = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] ) A_ : List[Any] = model(_lowerCamelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) ) def _a ( self : Optional[Any] ): """simple docstring""" A_ : Any = self.prepare_config_and_inputs() ( A_ ) : Dict = config_and_inputs A_ : List[Any] = {'''pixel_values''': pixel_values} return config, inputs_dict @require_torch class UpperCamelCase_ (a__, a__, unittest.TestCase ): """simple docstring""" _lowerCAmelCase = ( ( ViTModel, ViTForImageClassification, ViTForMaskedImageModeling, ) if is_torch_available() else () ) _lowerCAmelCase = ( {'feature-extraction': ViTModel, 'image-classification': ViTForImageClassification} if is_torch_available() else {} ) _lowerCAmelCase = True _lowerCAmelCase = False _lowerCAmelCase = False _lowerCAmelCase = False def _a ( self : Optional[int] ): """simple docstring""" A_ : Union[str, Any] = ViTModelTester(self ) A_ : str = ConfigTester(self , config_class=_lowerCamelCase , has_text_modality=_lowerCamelCase , hidden_size=37 ) def _a ( self : Optional[Any] ): """simple docstring""" self.config_tester.run_common_tests() @unittest.skip(reason='''ViT does not use inputs_embeds''' ) def _a ( self : str ): """simple docstring""" pass def _a ( self : Tuple ): """simple docstring""" A_ : List[str] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: A_ : Any = model_class(_lowerCamelCase ) self.assertIsInstance(model.get_input_embeddings() , (nn.Module) ) A_ : Union[str, Any] = model.get_output_embeddings() self.assertTrue(x is None or isinstance(_lowerCamelCase , nn.Linear ) ) def _a ( self : int ): """simple docstring""" A_ : Any = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: A_ : List[Any] = model_class(_lowerCamelCase ) A_ : int = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic A_ : Optional[int] = [*signature.parameters.keys()] A_ : List[str] = ['''pixel_values'''] self.assertListEqual(arg_names[:1] , _lowerCamelCase ) def _a ( self : Dict ): """simple docstring""" A_ : Optional[int] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*_lowerCamelCase ) def _a ( self : List[str] ): """simple docstring""" A_ : str = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_image_modeling(*_lowerCamelCase ) def _a ( self : List[str] ): """simple docstring""" A_ : Union[str, Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*_lowerCamelCase ) @slow def _a ( self : Dict ): """simple docstring""" for model_name in VIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: A_ : Optional[int] = ViTModel.from_pretrained(_lowerCamelCase ) self.assertIsNotNone(_lowerCamelCase ) def snake_case__ ( ) -> Optional[int]: A_ : Dict = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' ) return image @require_torch @require_vision class UpperCamelCase_ (unittest.TestCase ): """simple docstring""" @cached_property def _a ( self : Optional[int] ): """simple docstring""" return ViTImageProcessor.from_pretrained('''google/vit-base-patch16-224''' ) if is_vision_available() else None @slow def _a ( self : List[str] ): """simple docstring""" A_ : List[str] = ViTForImageClassification.from_pretrained('''google/vit-base-patch16-224''' ).to(_lowerCamelCase ) A_ : int = self.default_image_processor A_ : Optional[Any] = prepare_img() A_ : Optional[Any] = image_processor(images=_lowerCamelCase , return_tensors='''pt''' ).to(_lowerCamelCase ) # forward pass with torch.no_grad(): A_ : List[Any] = model(**_lowerCamelCase ) # verify the logits A_ : Optional[int] = torch.Size((1, 1000) ) self.assertEqual(outputs.logits.shape , _lowerCamelCase ) A_ : Any = torch.tensor([-0.27_44, 0.82_15, -0.08_36] ).to(_lowerCamelCase ) self.assertTrue(torch.allclose(outputs.logits[0, :3] , _lowerCamelCase , atol=1E-4 ) ) @slow def _a ( self : int ): """simple docstring""" A_ : str = ViTModel.from_pretrained('''facebook/dino-vits8''' ).to(_lowerCamelCase ) A_ : List[Any] = ViTImageProcessor.from_pretrained('''facebook/dino-vits8''' , size=480 ) A_ : Optional[Any] = prepare_img() A_ : List[Any] = image_processor(images=_lowerCamelCase , return_tensors='''pt''' ) A_ : Optional[int] = inputs.pixel_values.to(_lowerCamelCase ) # forward pass with torch.no_grad(): A_ : Union[str, Any] = model(_lowerCamelCase , interpolate_pos_encoding=_lowerCamelCase ) # verify the logits A_ : str = torch.Size((1, 3601, 384) ) self.assertEqual(outputs.last_hidden_state.shape , _lowerCamelCase ) A_ : List[Any] = torch.tensor( [[4.23_40, 4.39_06, -6.66_92], [4.54_63, 1.89_28, -6.72_57], [4.44_29, 0.84_96, -5.85_85]] ).to(_lowerCamelCase ) self.assertTrue(torch.allclose(outputs.last_hidden_state[0, :3, :3] , _lowerCamelCase , atol=1E-4 ) ) @slow @require_accelerate @require_torch_gpu def _a ( self : Tuple ): """simple docstring""" A_ : Tuple = ViTModel.from_pretrained('''facebook/dino-vits8''' , torch_dtype=torch.floataa , device_map='''auto''' ) A_ : Dict = self.default_image_processor A_ : str = prepare_img() A_ : Any = image_processor(images=_lowerCamelCase , return_tensors='''pt''' ) A_ : int = inputs.pixel_values.to(_lowerCamelCase ) # forward pass to make sure inference works in fp16 with torch.no_grad(): A_ : List[str] = model(_lowerCamelCase )
359
'''simple docstring''' import argparse import json import os import sys import tempfile import unittest from argparse import Namespace from dataclasses import dataclass, field from enum import Enum from pathlib import Path from typing import List, Literal, Optional import yaml from transformers import HfArgumentParser, TrainingArguments from transformers.hf_argparser import make_choice_type_function, string_to_bool # Since Python 3.10, we can use the builtin `|` operator for Union types # See PEP 604: https://peps.python.org/pep-0604 snake_case__ = sys.version_info >= (3, 10) def snake_case__ ( lowerCamelCase__ : Union[str, Any]=None , lowerCamelCase__ : str=None ) -> List[Any]: return field(default_factory=lambda: default , metadata=lowerCamelCase__ ) @dataclass class UpperCamelCase_ : """simple docstring""" _lowerCAmelCase = 42 _lowerCAmelCase = 42 _lowerCAmelCase = 42 _lowerCAmelCase = 42 @dataclass class UpperCamelCase_ : """simple docstring""" _lowerCAmelCase = 4_2 _lowerCAmelCase = field(default='toto', metadata={'help': 'help message'} ) @dataclass class UpperCamelCase_ : """simple docstring""" _lowerCAmelCase = False _lowerCAmelCase = True _lowerCAmelCase = None class UpperCamelCase_ (a__ ): """simple docstring""" _lowerCAmelCase = 'titi' _lowerCAmelCase = 'toto' class UpperCamelCase_ (a__ ): """simple docstring""" _lowerCAmelCase = 'titi' _lowerCAmelCase = 'toto' _lowerCAmelCase = 4_2 @dataclass class UpperCamelCase_ : """simple docstring""" _lowerCAmelCase = "toto" def _a ( self : Optional[Any] ): """simple docstring""" A_ : Optional[int] = BasicEnum(self.foo ) @dataclass class UpperCamelCase_ : """simple docstring""" _lowerCAmelCase = "toto" def _a ( self : Tuple ): """simple docstring""" A_ : Optional[Any] = MixedTypeEnum(self.foo ) @dataclass class UpperCamelCase_ : """simple docstring""" _lowerCAmelCase = None _lowerCAmelCase = field(default=a__, metadata={'help': 'help message'} ) _lowerCAmelCase = None _lowerCAmelCase = list_field(default=[] ) _lowerCAmelCase = list_field(default=[] ) @dataclass class UpperCamelCase_ : """simple docstring""" _lowerCAmelCase = list_field(default=[] ) _lowerCAmelCase = list_field(default=[1, 2, 3] ) _lowerCAmelCase = list_field(default=['Hallo', 'Bonjour', 'Hello'] ) _lowerCAmelCase = list_field(default=[0.1, 0.2, 0.3] ) @dataclass class UpperCamelCase_ : """simple docstring""" _lowerCAmelCase = field() _lowerCAmelCase = field() _lowerCAmelCase = field() def _a ( self : Tuple ): """simple docstring""" A_ : Tuple = BasicEnum(self.required_enum ) @dataclass class UpperCamelCase_ : """simple docstring""" _lowerCAmelCase = 42 _lowerCAmelCase = field() _lowerCAmelCase = None _lowerCAmelCase = field(default='toto', metadata={'help': 'help message'} ) _lowerCAmelCase = list_field(default=['Hallo', 'Bonjour', 'Hello'] ) if is_python_no_less_than_3_10: @dataclass class UpperCamelCase_ : """simple docstring""" _lowerCAmelCase = False _lowerCAmelCase = True _lowerCAmelCase = None @dataclass class UpperCamelCase_ : """simple docstring""" _lowerCAmelCase = None _lowerCAmelCase = field(default=a__, metadata={'help': 'help message'} ) _lowerCAmelCase = None _lowerCAmelCase = list_field(default=[] ) _lowerCAmelCase = list_field(default=[] ) class UpperCamelCase_ (unittest.TestCase ): """simple docstring""" def _a ( self : List[str] , _lowerCamelCase : argparse.ArgumentParser , _lowerCamelCase : argparse.ArgumentParser ): """simple docstring""" self.assertEqual(len(a._actions ) , len(b._actions ) ) for x, y in zip(a._actions , b._actions ): A_ : Union[str, Any] = {k: v for k, v in vars(_lowerCamelCase ).items() if k != '''container'''} A_ : Optional[Any] = {k: v for k, v in vars(_lowerCamelCase ).items() if k != '''container'''} # Choices with mixed type have custom function as "type" # So we need to compare results directly for equality if xx.get('''choices''' , _lowerCamelCase ) and yy.get('''choices''' , _lowerCamelCase ): for expected_choice in yy["choices"] + xx["choices"]: self.assertEqual(xx['''type'''](_lowerCamelCase ) , yy['''type'''](_lowerCamelCase ) ) del xx["type"], yy["type"] self.assertEqual(_lowerCamelCase , _lowerCamelCase ) def _a ( self : Optional[int] ): """simple docstring""" A_ : Union[str, Any] = HfArgumentParser(_lowerCamelCase ) A_ : Optional[Any] = argparse.ArgumentParser() expected.add_argument('''--foo''' , type=_lowerCamelCase , required=_lowerCamelCase ) expected.add_argument('''--bar''' , type=_lowerCamelCase , required=_lowerCamelCase ) expected.add_argument('''--baz''' , type=_lowerCamelCase , required=_lowerCamelCase ) expected.add_argument('''--flag''' , type=_lowerCamelCase , default=_lowerCamelCase , const=_lowerCamelCase , nargs='''?''' ) self.argparsersEqual(_lowerCamelCase , _lowerCamelCase ) A_ : Union[str, Any] = ['''--foo''', '''1''', '''--baz''', '''quux''', '''--bar''', '''0.5'''] ((A_) ,) : List[str] = parser.parse_args_into_dataclasses(_lowerCamelCase , look_for_args_file=_lowerCamelCase ) self.assertFalse(example.flag ) def _a ( self : Dict ): """simple docstring""" A_ : int = HfArgumentParser(_lowerCamelCase ) A_ : int = argparse.ArgumentParser() expected.add_argument('''--foo''' , default=42 , type=_lowerCamelCase ) expected.add_argument('''--baz''' , default='''toto''' , type=_lowerCamelCase , help='''help message''' ) self.argparsersEqual(_lowerCamelCase , _lowerCamelCase ) def _a ( self : Dict ): """simple docstring""" A_ : Any = argparse.ArgumentParser() expected.add_argument('''--foo''' , type=_lowerCamelCase , default=_lowerCamelCase , const=_lowerCamelCase , nargs='''?''' ) expected.add_argument('''--baz''' , type=_lowerCamelCase , default=_lowerCamelCase , const=_lowerCamelCase , nargs='''?''' ) # A boolean no_* argument always has to come after its "default: True" regular counter-part # and its default must be set to False expected.add_argument('''--no_baz''' , action='''store_false''' , default=_lowerCamelCase , dest='''baz''' ) expected.add_argument('''--opt''' , type=_lowerCamelCase , default=_lowerCamelCase ) A_ : Dict = [WithDefaultBoolExample] if is_python_no_less_than_3_10: dataclass_types.append(_lowerCamelCase ) for dataclass_type in dataclass_types: A_ : Any = HfArgumentParser(_lowerCamelCase ) self.argparsersEqual(_lowerCamelCase , _lowerCamelCase ) A_ : List[Any] = parser.parse_args([] ) self.assertEqual(_lowerCamelCase , Namespace(foo=_lowerCamelCase , baz=_lowerCamelCase , opt=_lowerCamelCase ) ) A_ : Optional[int] = parser.parse_args(['''--foo''', '''--no_baz'''] ) self.assertEqual(_lowerCamelCase , Namespace(foo=_lowerCamelCase , baz=_lowerCamelCase , opt=_lowerCamelCase ) ) A_ : Union[str, Any] = parser.parse_args(['''--foo''', '''--baz'''] ) self.assertEqual(_lowerCamelCase , Namespace(foo=_lowerCamelCase , baz=_lowerCamelCase , opt=_lowerCamelCase ) ) A_ : List[str] = parser.parse_args(['''--foo''', '''True''', '''--baz''', '''True''', '''--opt''', '''True'''] ) self.assertEqual(_lowerCamelCase , Namespace(foo=_lowerCamelCase , baz=_lowerCamelCase , opt=_lowerCamelCase ) ) A_ : List[Any] = parser.parse_args(['''--foo''', '''False''', '''--baz''', '''False''', '''--opt''', '''False'''] ) self.assertEqual(_lowerCamelCase , Namespace(foo=_lowerCamelCase , baz=_lowerCamelCase , opt=_lowerCamelCase ) ) def _a ( self : List[Any] ): """simple docstring""" A_ : str = HfArgumentParser(_lowerCamelCase ) A_ : Optional[int] = argparse.ArgumentParser() expected.add_argument( '''--foo''' , default='''toto''' , choices=['''titi''', '''toto''', 42] , type=make_choice_type_function(['''titi''', '''toto''', 42] ) , ) self.argparsersEqual(_lowerCamelCase , _lowerCamelCase ) A_ : str = parser.parse_args([] ) self.assertEqual(args.foo , '''toto''' ) A_ : List[Any] = parser.parse_args_into_dataclasses([] )[0] self.assertEqual(enum_ex.foo , MixedTypeEnum.toto ) A_ : int = parser.parse_args(['''--foo''', '''titi'''] ) self.assertEqual(args.foo , '''titi''' ) A_ : Dict = parser.parse_args_into_dataclasses(['''--foo''', '''titi'''] )[0] self.assertEqual(enum_ex.foo , MixedTypeEnum.titi ) A_ : Tuple = parser.parse_args(['''--foo''', '''42'''] ) self.assertEqual(args.foo , 42 ) A_ : List[str] = parser.parse_args_into_dataclasses(['''--foo''', '''42'''] )[0] self.assertEqual(enum_ex.foo , MixedTypeEnum.fourtytwo ) def _a ( self : Optional[int] ): """simple docstring""" @dataclass class UpperCamelCase_ : """simple docstring""" _lowerCAmelCase = "toto" A_ : List[str] = HfArgumentParser(_lowerCamelCase ) A_ : Tuple = argparse.ArgumentParser() expected.add_argument( '''--foo''' , default='''toto''' , choices=('''titi''', '''toto''', 42) , type=make_choice_type_function(['''titi''', '''toto''', 42] ) , ) self.argparsersEqual(_lowerCamelCase , _lowerCamelCase ) A_ : Tuple = parser.parse_args([] ) self.assertEqual(args.foo , '''toto''' ) A_ : List[str] = parser.parse_args(['''--foo''', '''titi'''] ) self.assertEqual(args.foo , '''titi''' ) A_ : int = parser.parse_args(['''--foo''', '''42'''] ) self.assertEqual(args.foo , 42 ) def _a ( self : Dict ): """simple docstring""" A_ : int = HfArgumentParser(_lowerCamelCase ) A_ : List[Any] = argparse.ArgumentParser() expected.add_argument('''--foo_int''' , nargs='''+''' , default=[] , type=_lowerCamelCase ) expected.add_argument('''--bar_int''' , nargs='''+''' , default=[1, 2, 3] , type=_lowerCamelCase ) expected.add_argument('''--foo_str''' , nargs='''+''' , default=['''Hallo''', '''Bonjour''', '''Hello'''] , type=_lowerCamelCase ) expected.add_argument('''--foo_float''' , nargs='''+''' , default=[0.1, 0.2, 0.3] , type=_lowerCamelCase ) self.argparsersEqual(_lowerCamelCase , _lowerCamelCase ) A_ : Optional[int] = parser.parse_args([] ) self.assertEqual( _lowerCamelCase , Namespace(foo_int=[] , bar_int=[1, 2, 3] , foo_str=['''Hallo''', '''Bonjour''', '''Hello'''] , foo_float=[0.1, 0.2, 0.3] ) , ) A_ : str = parser.parse_args('''--foo_int 1 --bar_int 2 3 --foo_str a b c --foo_float 0.1 0.7'''.split() ) self.assertEqual(_lowerCamelCase , Namespace(foo_int=[1] , bar_int=[2, 3] , foo_str=['''a''', '''b''', '''c'''] , foo_float=[0.1, 0.7] ) ) def _a ( self : Dict ): """simple docstring""" A_ : Optional[Any] = argparse.ArgumentParser() expected.add_argument('''--foo''' , default=_lowerCamelCase , type=_lowerCamelCase ) expected.add_argument('''--bar''' , default=_lowerCamelCase , type=_lowerCamelCase , help='''help message''' ) expected.add_argument('''--baz''' , default=_lowerCamelCase , type=_lowerCamelCase ) expected.add_argument('''--ces''' , nargs='''+''' , default=[] , type=_lowerCamelCase ) expected.add_argument('''--des''' , nargs='''+''' , default=[] , type=_lowerCamelCase ) A_ : Tuple = [OptionalExample] if is_python_no_less_than_3_10: dataclass_types.append(_lowerCamelCase ) for dataclass_type in dataclass_types: A_ : int = HfArgumentParser(_lowerCamelCase ) self.argparsersEqual(_lowerCamelCase , _lowerCamelCase ) A_ : List[Any] = parser.parse_args([] ) self.assertEqual(_lowerCamelCase , Namespace(foo=_lowerCamelCase , bar=_lowerCamelCase , baz=_lowerCamelCase , ces=[] , des=[] ) ) A_ : Optional[Any] = parser.parse_args('''--foo 12 --bar 3.14 --baz 42 --ces a b c --des 1 2 3'''.split() ) self.assertEqual(_lowerCamelCase , Namespace(foo=12 , bar=3.14 , baz='''42''' , ces=['''a''', '''b''', '''c'''] , des=[1, 2, 3] ) ) def _a ( self : List[Any] ): """simple docstring""" A_ : List[Any] = HfArgumentParser(_lowerCamelCase ) A_ : Dict = argparse.ArgumentParser() expected.add_argument('''--required_list''' , nargs='''+''' , type=_lowerCamelCase , required=_lowerCamelCase ) expected.add_argument('''--required_str''' , type=_lowerCamelCase , required=_lowerCamelCase ) expected.add_argument( '''--required_enum''' , type=make_choice_type_function(['''titi''', '''toto'''] ) , choices=['''titi''', '''toto'''] , required=_lowerCamelCase , ) self.argparsersEqual(_lowerCamelCase , _lowerCamelCase ) def _a ( self : Optional[Any] ): """simple docstring""" A_ : Union[str, Any] = HfArgumentParser(_lowerCamelCase ) A_ : List[Any] = argparse.ArgumentParser() expected.add_argument('''--foo''' , type=_lowerCamelCase , required=_lowerCamelCase ) expected.add_argument( '''--required_enum''' , type=make_choice_type_function(['''titi''', '''toto'''] ) , choices=['''titi''', '''toto'''] , required=_lowerCamelCase , ) expected.add_argument('''--opt''' , type=_lowerCamelCase , default=_lowerCamelCase ) expected.add_argument('''--baz''' , default='''toto''' , type=_lowerCamelCase , help='''help message''' ) expected.add_argument('''--foo_str''' , nargs='''+''' , default=['''Hallo''', '''Bonjour''', '''Hello'''] , type=_lowerCamelCase ) self.argparsersEqual(_lowerCamelCase , _lowerCamelCase ) def _a ( self : Tuple ): """simple docstring""" A_ : List[Any] = HfArgumentParser(_lowerCamelCase ) A_ : Union[str, Any] = { '''foo''': 12, '''bar''': 3.14, '''baz''': '''42''', '''flag''': True, } A_ : Optional[int] = parser.parse_dict(_lowerCamelCase )[0] A_ : str = BasicExample(**_lowerCamelCase ) self.assertEqual(_lowerCamelCase , _lowerCamelCase ) def _a ( self : List[str] ): """simple docstring""" A_ : Any = HfArgumentParser(_lowerCamelCase ) A_ : List[str] = { '''foo''': 12, '''bar''': 3.14, '''baz''': '''42''', '''flag''': True, '''extra''': 42, } self.assertRaises(_lowerCamelCase , parser.parse_dict , _lowerCamelCase , allow_extra_keys=_lowerCamelCase ) def _a ( self : Optional[Any] ): """simple docstring""" A_ : Union[str, Any] = HfArgumentParser(_lowerCamelCase ) A_ : List[str] = { '''foo''': 12, '''bar''': 3.14, '''baz''': '''42''', '''flag''': True, } with tempfile.TemporaryDirectory() as tmp_dir: A_ : Tuple = os.path.join(_lowerCamelCase , '''temp_json''' ) os.mkdir(_lowerCamelCase ) with open(temp_local_path + '''.json''' , '''w+''' ) as f: json.dump(_lowerCamelCase , _lowerCamelCase ) A_ : List[str] = parser.parse_yaml_file(Path(temp_local_path + '''.json''' ) )[0] A_ : Optional[Any] = BasicExample(**_lowerCamelCase ) self.assertEqual(_lowerCamelCase , _lowerCamelCase ) def _a ( self : int ): """simple docstring""" A_ : int = HfArgumentParser(_lowerCamelCase ) A_ : Tuple = { '''foo''': 12, '''bar''': 3.14, '''baz''': '''42''', '''flag''': True, } with tempfile.TemporaryDirectory() as tmp_dir: A_ : int = os.path.join(_lowerCamelCase , '''temp_yaml''' ) os.mkdir(_lowerCamelCase ) with open(temp_local_path + '''.yaml''' , '''w+''' ) as f: yaml.dump(_lowerCamelCase , _lowerCamelCase ) A_ : Optional[Any] = parser.parse_yaml_file(Path(temp_local_path + '''.yaml''' ) )[0] A_ : int = BasicExample(**_lowerCamelCase ) self.assertEqual(_lowerCamelCase , _lowerCamelCase ) def _a ( self : Union[str, Any] ): """simple docstring""" A_ : Dict = HfArgumentParser(_lowerCamelCase ) self.assertIsNotNone(_lowerCamelCase )
4
0
'''simple docstring''' import copy import os from typing import Union from ...configuration_utils import PretrainedConfig from ...utils import logging snake_case__ = logging.get_logger(__name__) snake_case__ = { """Salesforce/blip-vqa-base""": """https://huggingface.co/Salesforce/blip-vqa-base/resolve/main/config.json""", """Salesforce/blip-vqa-capfit-large""": ( """https://huggingface.co/Salesforce/blip-vqa-base-capfit/resolve/main/config.json""" ), """Salesforce/blip-image-captioning-base""": ( """https://huggingface.co/Salesforce/blip-image-captioning-base/resolve/main/config.json""" ), """Salesforce/blip-image-captioning-large""": ( """https://huggingface.co/Salesforce/blip-image-captioning-large/resolve/main/config.json""" ), """Salesforce/blip-itm-base-coco""": """https://huggingface.co/Salesforce/blip-itm-base-coco/resolve/main/config.json""", """Salesforce/blip-itm-large-coco""": """https://huggingface.co/Salesforce/blip-itm-large-coco/resolve/main/config.json""", """Salesforce/blip-itm-base-flikr""": """https://huggingface.co/Salesforce/blip-itm-base-flikr/resolve/main/config.json""", """Salesforce/blip-itm-large-flikr""": ( """https://huggingface.co/Salesforce/blip-itm-large-flikr/resolve/main/config.json""" ), } class UpperCamelCase_ (a__ ): """simple docstring""" _lowerCAmelCase = 'blip_text_model' def __init__( self : Optional[int] , _lowerCamelCase : List[str]=30524 , _lowerCamelCase : Optional[int]=768 , _lowerCamelCase : Union[str, Any]=768 , _lowerCamelCase : List[Any]=3072 , _lowerCamelCase : Optional[Any]=768 , _lowerCamelCase : Union[str, Any]=12 , _lowerCamelCase : Tuple=8 , _lowerCamelCase : Optional[Any]=512 , _lowerCamelCase : List[str]="gelu" , _lowerCamelCase : Optional[Any]=1E-12 , _lowerCamelCase : int=0.0 , _lowerCamelCase : str=0.0 , _lowerCamelCase : Dict=0.02 , _lowerCamelCase : Optional[Any]=30522 , _lowerCamelCase : Tuple=2 , _lowerCamelCase : int=0 , _lowerCamelCase : int=102 , _lowerCamelCase : List[Any]=True , _lowerCamelCase : List[str]=True , **_lowerCamelCase : Any , ): """simple docstring""" super().__init__( pad_token_id=_lowerCamelCase , bos_token_id=_lowerCamelCase , eos_token_id=_lowerCamelCase , sep_token_id=_lowerCamelCase , **_lowerCamelCase , ) A_ : List[Any] = vocab_size A_ : Tuple = hidden_size A_ : List[Any] = encoder_hidden_size A_ : Union[str, Any] = intermediate_size A_ : Tuple = projection_dim A_ : Union[str, Any] = hidden_dropout_prob A_ : str = num_hidden_layers A_ : List[Any] = num_attention_heads A_ : str = max_position_embeddings A_ : List[str] = layer_norm_eps A_ : str = hidden_act A_ : List[str] = initializer_range A_ : Union[str, Any] = attention_probs_dropout_prob A_ : List[Any] = is_decoder A_ : Union[str, Any] = use_cache @classmethod def _a ( cls : Optional[Any] , _lowerCamelCase : Union[str, os.PathLike] , **_lowerCamelCase : str ): """simple docstring""" cls._set_token_in_kwargs(_lowerCamelCase ) A_ : Dict = cls.get_config_dict(_lowerCamelCase , **_lowerCamelCase ) # get the text config dict if we are loading from BlipConfig if config_dict.get('''model_type''' ) == "blip": A_ : str = config_dict['''text_config'''] if "model_type" in config_dict and hasattr(cls , '''model_type''' ) and config_dict["model_type"] != cls.model_type: logger.warning( f'You are using a model of type {config_dict["model_type"]} to instantiate a model of type ' f'{cls.model_type}. This is not supported for all configurations of models and can yield errors.' ) return cls.from_dict(_lowerCamelCase , **_lowerCamelCase ) class UpperCamelCase_ (a__ ): """simple docstring""" _lowerCAmelCase = 'blip_vision_model' def __init__( self : List[str] , _lowerCamelCase : str=768 , _lowerCamelCase : List[str]=3072 , _lowerCamelCase : Any=512 , _lowerCamelCase : Dict=12 , _lowerCamelCase : Any=12 , _lowerCamelCase : Optional[Any]=384 , _lowerCamelCase : List[Any]=16 , _lowerCamelCase : Dict="gelu" , _lowerCamelCase : int=1E-5 , _lowerCamelCase : Union[str, Any]=0.0 , _lowerCamelCase : Any=1E-10 , **_lowerCamelCase : str , ): """simple docstring""" super().__init__(**_lowerCamelCase ) A_ : Union[str, Any] = hidden_size A_ : int = intermediate_size A_ : List[Any] = projection_dim A_ : Dict = num_hidden_layers A_ : Union[str, Any] = num_attention_heads A_ : Dict = patch_size A_ : Dict = image_size A_ : List[Any] = initializer_range A_ : int = attention_dropout A_ : Optional[Any] = layer_norm_eps A_ : Optional[int] = hidden_act @classmethod def _a ( cls : Dict , _lowerCamelCase : Union[str, os.PathLike] , **_lowerCamelCase : Tuple ): """simple docstring""" cls._set_token_in_kwargs(_lowerCamelCase ) A_ : Dict = cls.get_config_dict(_lowerCamelCase , **_lowerCamelCase ) # get the vision config dict if we are loading from BlipConfig if config_dict.get('''model_type''' ) == "blip": A_ : Union[str, Any] = config_dict['''vision_config'''] if "model_type" in config_dict and hasattr(cls , '''model_type''' ) and config_dict["model_type"] != cls.model_type: logger.warning( f'You are using a model of type {config_dict["model_type"]} to instantiate a model of type ' f'{cls.model_type}. This is not supported for all configurations of models and can yield errors.' ) return cls.from_dict(_lowerCamelCase , **_lowerCamelCase ) class UpperCamelCase_ (a__ ): """simple docstring""" _lowerCAmelCase = 'blip' _lowerCAmelCase = True def __init__( self : Union[str, Any] , _lowerCamelCase : Optional[int]=None , _lowerCamelCase : Tuple=None , _lowerCamelCase : Union[str, Any]=512 , _lowerCamelCase : Union[str, Any]=2.65_92 , _lowerCamelCase : int=256 , **_lowerCamelCase : int , ): """simple docstring""" super().__init__(**_lowerCamelCase ) if text_config is None: A_ : Union[str, Any] = {} logger.info('''`text_config` is `None`. Initializing the `BlipTextConfig` with default values.''' ) if vision_config is None: A_ : Optional[int] = {} logger.info('''`vision_config` is `None`. Initializing the `BlipVisionConfig` with default values.''' ) A_ : Union[str, Any] = BlipTextConfig(**_lowerCamelCase ) A_ : Optional[int] = BlipVisionConfig(**_lowerCamelCase ) A_ : List[str] = self.vision_config.hidden_size A_ : List[str] = projection_dim A_ : Union[str, Any] = logit_scale_init_value A_ : Tuple = 1.0 A_ : Optional[Any] = 0.02 A_ : Union[str, Any] = image_text_hidden_size @classmethod def _a ( cls : str , _lowerCamelCase : BlipTextConfig , _lowerCamelCase : BlipVisionConfig , **_lowerCamelCase : Tuple ): """simple docstring""" return cls(text_config=text_config.to_dict() , vision_config=vision_config.to_dict() , **_lowerCamelCase ) def _a ( self : str ): """simple docstring""" A_ : List[Any] = copy.deepcopy(self.__dict__ ) A_ : Optional[int] = self.text_config.to_dict() A_ : str = self.vision_config.to_dict() A_ : Optional[Any] = self.__class__.model_type return output
360
'''simple docstring''' import sys import tempfile import unittest import unittest.mock as mock from pathlib import Path from huggingface_hub import HfFolder, delete_repo from requests.exceptions import HTTPError from transformers import AutoImageProcessor, ViTImageProcessor from transformers.testing_utils import TOKEN, USER, get_tests_dir, is_staging_test sys.path.append(str(Path(__file__).parent.parent / """utils""")) from test_module.custom_image_processing import CustomImageProcessor # noqa E402 snake_case__ = get_tests_dir("""fixtures""") class UpperCamelCase_ (unittest.TestCase ): """simple docstring""" def _a ( self : List[str] ): """simple docstring""" A_ : List[Any] = mock.Mock() A_ : List[str] = 500 A_ : Tuple = {} A_ : int = HTTPError A_ : Optional[Any] = {} # Download this model to make sure it's in the cache. A_ : Tuple = ViTImageProcessor.from_pretrained('''hf-internal-testing/tiny-random-vit''' ) # Under the mock environment we get a 500 error when trying to reach the model. with mock.patch('''requests.Session.request''' , return_value=_lowerCamelCase ) as mock_head: A_ : List[Any] = ViTImageProcessor.from_pretrained('''hf-internal-testing/tiny-random-vit''' ) # This check we did call the fake head request mock_head.assert_called() def _a ( self : Tuple ): """simple docstring""" A_ : Tuple = ViTImageProcessor.from_pretrained( '''https://huggingface.co/hf-internal-testing/tiny-random-vit/resolve/main/preprocessor_config.json''' ) def _a ( self : Dict ): """simple docstring""" with self.assertRaises(_lowerCamelCase ): # config is in subfolder, the following should not work without specifying the subfolder A_ : Any = AutoImageProcessor.from_pretrained('''hf-internal-testing/stable-diffusion-all-variants''' ) A_ : Tuple = AutoImageProcessor.from_pretrained( '''hf-internal-testing/stable-diffusion-all-variants''' , subfolder='''feature_extractor''' ) self.assertIsNotNone(_lowerCamelCase ) @is_staging_test class UpperCamelCase_ (unittest.TestCase ): """simple docstring""" @classmethod def _a ( cls : Tuple ): """simple docstring""" A_ : int = TOKEN HfFolder.save_token(_lowerCamelCase ) @classmethod def _a ( cls : str ): """simple docstring""" try: delete_repo(token=cls._token , repo_id='''test-image-processor''' ) except HTTPError: pass try: delete_repo(token=cls._token , repo_id='''valid_org/test-image-processor-org''' ) except HTTPError: pass try: delete_repo(token=cls._token , repo_id='''test-dynamic-image-processor''' ) except HTTPError: pass def _a ( self : List[Any] ): """simple docstring""" A_ : Dict = ViTImageProcessor.from_pretrained(_lowerCamelCase ) image_processor.push_to_hub('''test-image-processor''' , use_auth_token=self._token ) A_ : Optional[int] = ViTImageProcessor.from_pretrained(f'{USER}/test-image-processor' ) for k, v in image_processor.__dict__.items(): self.assertEqual(_lowerCamelCase , getattr(_lowerCamelCase , _lowerCamelCase ) ) # Reset repo delete_repo(token=self._token , repo_id='''test-image-processor''' ) # Push to hub via save_pretrained with tempfile.TemporaryDirectory() as tmp_dir: image_processor.save_pretrained( _lowerCamelCase , repo_id='''test-image-processor''' , push_to_hub=_lowerCamelCase , use_auth_token=self._token ) A_ : List[Any] = ViTImageProcessor.from_pretrained(f'{USER}/test-image-processor' ) for k, v in image_processor.__dict__.items(): self.assertEqual(_lowerCamelCase , getattr(_lowerCamelCase , _lowerCamelCase ) ) def _a ( self : Optional[Any] ): """simple docstring""" A_ : int = ViTImageProcessor.from_pretrained(_lowerCamelCase ) image_processor.push_to_hub('''valid_org/test-image-processor''' , use_auth_token=self._token ) A_ : List[str] = ViTImageProcessor.from_pretrained('''valid_org/test-image-processor''' ) for k, v in image_processor.__dict__.items(): self.assertEqual(_lowerCamelCase , getattr(_lowerCamelCase , _lowerCamelCase ) ) # Reset repo delete_repo(token=self._token , repo_id='''valid_org/test-image-processor''' ) # Push to hub via save_pretrained with tempfile.TemporaryDirectory() as tmp_dir: image_processor.save_pretrained( _lowerCamelCase , repo_id='''valid_org/test-image-processor-org''' , push_to_hub=_lowerCamelCase , use_auth_token=self._token ) A_ : Any = ViTImageProcessor.from_pretrained('''valid_org/test-image-processor-org''' ) for k, v in image_processor.__dict__.items(): self.assertEqual(_lowerCamelCase , getattr(_lowerCamelCase , _lowerCamelCase ) ) def _a ( self : Optional[Any] ): """simple docstring""" CustomImageProcessor.register_for_auto_class() A_ : Any = CustomImageProcessor.from_pretrained(_lowerCamelCase ) image_processor.push_to_hub('''test-dynamic-image-processor''' , use_auth_token=self._token ) # This has added the proper auto_map field to the config self.assertDictEqual( image_processor.auto_map , {'''AutoImageProcessor''': '''custom_image_processing.CustomImageProcessor'''} , ) A_ : str = AutoImageProcessor.from_pretrained( f'{USER}/test-dynamic-image-processor' , trust_remote_code=_lowerCamelCase ) # Can't make an isinstance check because the new_image_processor is from the CustomImageProcessor class of a dynamic module self.assertEqual(new_image_processor.__class__.__name__ , '''CustomImageProcessor''' )
4
0
'''simple docstring''' import argparse import struct import unittest class UpperCamelCase_ : """simple docstring""" def __init__( self : Any , _lowerCamelCase : bytes ): """simple docstring""" A_ : Dict = data # Initialize hash values A_ : Optional[Any] = [ 0x6a09e667, 0xbb67ae85, 0x3c6ef372, 0xa54ff53a, 0x510e527f, 0x9b05688c, 0x1f83d9ab, 0x5be0cd19, ] # Initialize round constants A_ : Tuple = [ 0x428a2f98, 0x71374491, 0xb5c0fbcf, 0xe9b5dba5, 0x3956c25b, 0x59f111f1, 0x923f82a4, 0xab1c5ed5, 0xd807aa98, 0x12835b01, 0x243185be, 0x550c7dc3, 0x72be5d74, 0x80deb1fe, 0x9bdc06a7, 0xc19bf174, 0xe49b69c1, 0xefbe4786, 0x0fc19dc6, 0x240ca1cc, 0x2de92c6f, 0x4a7484aa, 0x5cb0a9dc, 0x76f988da, 0x983e5152, 0xa831c66d, 0xb00327c8, 0xbf597fc7, 0xc6e00bf3, 0xd5a79147, 0x06ca6351, 0x14292967, 0x27b70a85, 0x2e1b2138, 0x4d2c6dfc, 0x53380d13, 0x650a7354, 0x766a0abb, 0x81c2c92e, 0x92722c85, 0xa2bfe8a1, 0xa81a664b, 0xc24b8b70, 0xc76c51a3, 0xd192e819, 0xd6990624, 0xf40e3585, 0x106aa070, 0x19a4c116, 0x1e376c08, 0x2748774c, 0x34b0bcb5, 0x391c0cb3, 0x4ed8aa4a, 0x5b9cca4f, 0x682e6ff3, 0x748f82ee, 0x78a5636f, 0x84c87814, 0x8cc70208, 0x90befffa, 0xa4506ceb, 0xbef9a3f7, 0xc67178f2, ] A_ : Union[str, Any] = self.preprocessing(self.data ) self.final_hash() @staticmethod def _a ( _lowerCamelCase : bytes ): """simple docstring""" A_ : Union[str, Any] = B'''\x80''' + (B'''\x00''' * (63 - (len(_lowerCamelCase ) + 8) % 64)) A_ : Optional[int] = struct.pack('''>Q''' , (len(_lowerCamelCase ) * 8) ) return data + padding + big_endian_integer def _a ( self : List[Any] ): """simple docstring""" A_ : Optional[int] = [ self.preprocessed_data[x : x + 64] for x in range(0 , len(self.preprocessed_data ) , 64 ) ] for block in self.blocks: # Convert the given block into a list of 4 byte integers A_ : List[str] = list(struct.unpack('''>16L''' , _lowerCamelCase ) ) # add 48 0-ed integers words += [0] * 48 A_ : str = self.hashes for index in range(0 , 64 ): if index > 15: # modify the zero-ed indexes at the end of the array A_ : int = ( self.ror(words[index - 15] , 7 ) ^ self.ror(words[index - 15] , 18 ) ^ (words[index - 15] >> 3) ) A_ : Union[str, Any] = ( self.ror(words[index - 2] , 17 ) ^ self.ror(words[index - 2] , 19 ) ^ (words[index - 2] >> 10) ) A_ : List[Any] = ( words[index - 16] + sa + words[index - 7] + sa ) % 0x100000000 # Compression A_ : Any = self.ror(_lowerCamelCase , 6 ) ^ self.ror(_lowerCamelCase , 11 ) ^ self.ror(_lowerCamelCase , 25 ) A_ : List[Any] = (e & f) ^ ((~e & 0xffffffff) & g) A_ : List[Any] = ( h + sa + ch + self.round_constants[index] + words[index] ) % 0x100000000 A_ : str = self.ror(_lowerCamelCase , 2 ) ^ self.ror(_lowerCamelCase , 13 ) ^ self.ror(_lowerCamelCase , 22 ) A_ : Optional[Any] = (a & b) ^ (a & c) ^ (b & c) A_ : Dict = (sa + maj) % 0x100000000 A_ : str = ( g, f, e, ((d + tempa) % 0x100000000), c, b, a, ((tempa + tempa) % 0x100000000), ) A_ : Optional[Any] = [a, b, c, d, e, f, g, h] # Modify final values A_ : Union[str, Any] = [ ((element + mutated_hash_values[index]) % 0x100000000) for index, element in enumerate(self.hashes ) ] A_ : Optional[int] = ''''''.join([hex(_lowerCamelCase )[2:].zfill(8 ) for value in self.hashes] ) def _a ( self : int , _lowerCamelCase : int , _lowerCamelCase : int ): """simple docstring""" return 0xffffffff & (value << (32 - rotations)) | (value >> rotations) class UpperCamelCase_ (unittest.TestCase ): """simple docstring""" def _a ( self : Tuple ): """simple docstring""" import hashlib A_ : str = bytes('''Test String''' , '''utf-8''' ) self.assertEqual(SHAaaa(_lowerCamelCase ).hash , hashlib.shaaaa(_lowerCamelCase ).hexdigest() ) def snake_case__ ( ) -> None: import doctest doctest.testmod() A_ : str = argparse.ArgumentParser() parser.add_argument( '''-s''' , '''--string''' , dest='''input_string''' , default='''Hello World!! Welcome to Cryptography''' , help='''Hash the string''' , ) parser.add_argument( '''-f''' , '''--file''' , dest='''input_file''' , help='''Hash contents of a file''' ) A_ : Optional[int] = parser.parse_args() A_ : Optional[int] = args.input_string # hash input should be a bytestring if args.input_file: with open(args.input_file , '''rb''' ) as f: A_ : Optional[Any] = f.read() else: A_ : Optional[Any] = bytes(lowerCamelCase__ , '''utf-8''' ) print(SHAaaa(lowerCamelCase__ ).hash ) if __name__ == "__main__": main()
361
'''simple docstring''' # Copyright 2023 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import numpy as np import torch from ..models.clipseg import CLIPSegForImageSegmentation from ..utils import is_vision_available, requires_backends from .base import PipelineTool if is_vision_available(): from PIL import Image class UpperCamelCase_ (a__ ): """simple docstring""" _lowerCAmelCase = ( 'This is a tool that creates a segmentation mask of an image according to a label. It cannot create an image.' 'It takes two arguments named `image` which should be the original image, and `label` which should be a text ' 'describing the elements what should be identified in the segmentation mask. The tool returns the mask.' ) _lowerCAmelCase = 'CIDAS/clipseg-rd64-refined' _lowerCAmelCase = 'image_segmenter' _lowerCAmelCase = CLIPSegForImageSegmentation _lowerCAmelCase = ['image', 'text'] _lowerCAmelCase = ['image'] def __init__( self : Optional[int] , *_lowerCamelCase : Optional[int] , **_lowerCamelCase : Union[str, Any] ): """simple docstring""" requires_backends(self , ['''vision'''] ) super().__init__(*_lowerCamelCase , **_lowerCamelCase ) def _a ( self : List[str] , _lowerCamelCase : "Image" , _lowerCamelCase : str ): """simple docstring""" return self.pre_processor(text=[label] , images=[image] , padding=_lowerCamelCase , return_tensors='''pt''' ) def _a ( self : Union[str, Any] , _lowerCamelCase : Optional[int] ): """simple docstring""" with torch.no_grad(): A_ : Optional[int] = self.model(**_lowerCamelCase ).logits return logits def _a ( self : List[str] , _lowerCamelCase : Optional[int] ): """simple docstring""" A_ : int = outputs.cpu().detach().numpy() A_ : Tuple = 0 A_ : List[str] = 1 return Image.fromarray((array * 255).astype(np.uinta ) )
4
0
'''simple docstring''' from math import cos, sin, sqrt, tau from audio_filters.iir_filter import IIRFilter def snake_case__ ( lowerCamelCase__ : int , lowerCamelCase__ : int , lowerCamelCase__ : float = 1 / sqrt(2 ) ) -> IIRFilter: A_ : Dict = tau * frequency / samplerate A_ : Union[str, Any] = sin(lowerCamelCase__ ) A_ : str = cos(lowerCamelCase__ ) A_ : Optional[int] = _sin / (2 * q_factor) A_ : Dict = (1 - _cos) / 2 A_ : Optional[int] = 1 - _cos A_ : List[Any] = 1 + alpha A_ : str = -2 * _cos A_ : Optional[int] = 1 - alpha A_ : Union[str, Any] = IIRFilter(2 ) filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] ) return filt def snake_case__ ( lowerCamelCase__ : int , lowerCamelCase__ : int , lowerCamelCase__ : float = 1 / sqrt(2 ) ) -> IIRFilter: A_ : Union[str, Any] = tau * frequency / samplerate A_ : str = sin(lowerCamelCase__ ) A_ : str = cos(lowerCamelCase__ ) A_ : Union[str, Any] = _sin / (2 * q_factor) A_ : str = (1 + _cos) / 2 A_ : Optional[int] = -1 - _cos A_ : Optional[Any] = 1 + alpha A_ : int = -2 * _cos A_ : Tuple = 1 - alpha A_ : Optional[Any] = IIRFilter(2 ) filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] ) return filt def snake_case__ ( lowerCamelCase__ : int , lowerCamelCase__ : int , lowerCamelCase__ : float = 1 / sqrt(2 ) ) -> IIRFilter: A_ : Optional[Any] = tau * frequency / samplerate A_ : List[str] = sin(lowerCamelCase__ ) A_ : str = cos(lowerCamelCase__ ) A_ : Optional[Any] = _sin / (2 * q_factor) A_ : List[str] = _sin / 2 A_ : Optional[int] = 0 A_ : List[str] = -ba A_ : Optional[Any] = 1 + alpha A_ : Optional[int] = -2 * _cos A_ : Tuple = 1 - alpha A_ : Dict = IIRFilter(2 ) filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] ) return filt def snake_case__ ( lowerCamelCase__ : int , lowerCamelCase__ : int , lowerCamelCase__ : float = 1 / sqrt(2 ) ) -> IIRFilter: A_ : Dict = tau * frequency / samplerate A_ : List[Any] = sin(lowerCamelCase__ ) A_ : str = cos(lowerCamelCase__ ) A_ : Optional[int] = _sin / (2 * q_factor) A_ : Optional[Any] = 1 - alpha A_ : List[str] = -2 * _cos A_ : Tuple = 1 + alpha A_ : Any = IIRFilter(2 ) filt.set_coefficients([ba, ba, ba] , [ba, ba, ba] ) return filt def snake_case__ ( lowerCamelCase__ : int , lowerCamelCase__ : int , lowerCamelCase__ : float , lowerCamelCase__ : float = 1 / sqrt(2 ) , ) -> IIRFilter: A_ : str = tau * frequency / samplerate A_ : str = sin(lowerCamelCase__ ) A_ : Union[str, Any] = cos(lowerCamelCase__ ) A_ : Any = _sin / (2 * q_factor) A_ : Tuple = 1_0 ** (gain_db / 4_0) A_ : List[str] = 1 + alpha * big_a A_ : str = -2 * _cos A_ : Optional[int] = 1 - alpha * big_a A_ : List[Any] = 1 + alpha / big_a A_ : Dict = -2 * _cos A_ : Optional[Any] = 1 - alpha / big_a A_ : List[Any] = IIRFilter(2 ) filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] ) return filt def snake_case__ ( lowerCamelCase__ : int , lowerCamelCase__ : int , lowerCamelCase__ : float , lowerCamelCase__ : float = 1 / sqrt(2 ) , ) -> IIRFilter: A_ : Any = tau * frequency / samplerate A_ : Any = sin(lowerCamelCase__ ) A_ : Any = cos(lowerCamelCase__ ) A_ : str = _sin / (2 * q_factor) A_ : Tuple = 1_0 ** (gain_db / 4_0) A_ : List[Any] = (big_a + 1) - (big_a - 1) * _cos A_ : int = (big_a + 1) + (big_a - 1) * _cos A_ : Dict = (big_a - 1) - (big_a + 1) * _cos A_ : str = (big_a - 1) + (big_a + 1) * _cos A_ : List[str] = 2 * sqrt(lowerCamelCase__ ) * alpha A_ : Optional[Any] = big_a * (pmc + aaa) A_ : Any = 2 * big_a * mpc A_ : int = big_a * (pmc - aaa) A_ : Optional[int] = ppmc + aaa A_ : int = -2 * pmpc A_ : Tuple = ppmc - aaa A_ : List[Any] = IIRFilter(2 ) filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] ) return filt def snake_case__ ( lowerCamelCase__ : int , lowerCamelCase__ : int , lowerCamelCase__ : float , lowerCamelCase__ : float = 1 / sqrt(2 ) , ) -> IIRFilter: A_ : Any = tau * frequency / samplerate A_ : int = sin(lowerCamelCase__ ) A_ : str = cos(lowerCamelCase__ ) A_ : Tuple = _sin / (2 * q_factor) A_ : Any = 1_0 ** (gain_db / 4_0) A_ : List[str] = (big_a + 1) - (big_a - 1) * _cos A_ : Any = (big_a + 1) + (big_a - 1) * _cos A_ : Tuple = (big_a - 1) - (big_a + 1) * _cos A_ : Optional[int] = (big_a - 1) + (big_a + 1) * _cos A_ : Any = 2 * sqrt(lowerCamelCase__ ) * alpha A_ : Optional[int] = big_a * (ppmc + aaa) A_ : Optional[Any] = -2 * big_a * pmpc A_ : Union[str, Any] = big_a * (ppmc - aaa) A_ : Tuple = pmc + aaa A_ : List[str] = 2 * mpc A_ : Optional[Any] = pmc - aaa A_ : Optional[Any] = IIRFilter(2 ) filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] ) return filt
362
'''simple docstring''' from collections.abc import Sequence def snake_case__ ( lowerCamelCase__ : Sequence[float] , lowerCamelCase__ : bool = False ) -> float: if not arr: return 0 A_ : Union[str, Any] = 0 if allow_empty_subarrays else float('''-inf''' ) A_ : str = 0.0 for num in arr: A_ : Any = max(0 if allow_empty_subarrays else num , curr_sum + num ) A_ : Tuple = max(lowerCamelCase__ , lowerCamelCase__ ) return max_sum if __name__ == "__main__": from doctest import testmod testmod() snake_case__ = [-2, 1, -3, 4, -1, 2, 1, -5, 4] print(F'{max_subarray_sum(nums) = }')
4
0
'''simple docstring''' import json import pathlib import unittest import numpy as np from transformers.testing_utils import require_torch, require_vision, slow from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import DetrImageProcessor class UpperCamelCase_ (unittest.TestCase ): """simple docstring""" def __init__( self : int , _lowerCamelCase : Tuple , _lowerCamelCase : Any=7 , _lowerCamelCase : Optional[Any]=3 , _lowerCamelCase : Optional[Any]=30 , _lowerCamelCase : Dict=400 , _lowerCamelCase : Dict=True , _lowerCamelCase : str=None , _lowerCamelCase : List[Any]=True , _lowerCamelCase : List[str]=1 / 255 , _lowerCamelCase : Any=True , _lowerCamelCase : str=[0.5, 0.5, 0.5] , _lowerCamelCase : List[str]=[0.5, 0.5, 0.5] , _lowerCamelCase : Any=True , ): """simple docstring""" A_ : Any = size if size is not None else {'''shortest_edge''': 18, '''longest_edge''': 1333} A_ : Any = parent A_ : int = batch_size A_ : str = num_channels A_ : str = min_resolution A_ : Dict = max_resolution A_ : List[Any] = do_resize A_ : str = size A_ : str = do_rescale A_ : List[Any] = rescale_factor A_ : List[str] = do_normalize A_ : Dict = image_mean A_ : int = image_std A_ : int = do_pad def _a ( self : Optional[Any] ): """simple docstring""" return { "do_resize": self.do_resize, "size": self.size, "do_rescale": self.do_rescale, "rescale_factor": self.rescale_factor, "do_normalize": self.do_normalize, "image_mean": self.image_mean, "image_std": self.image_std, "do_pad": self.do_pad, } def _a ( self : Optional[int] , _lowerCamelCase : Any , _lowerCamelCase : Union[str, Any]=False ): """simple docstring""" if not batched: A_ : str = image_inputs[0] if isinstance(_lowerCamelCase , Image.Image ): A_ : Optional[int] = image.size else: A_ : int = image.shape[1], image.shape[2] if w < h: A_ : Optional[int] = int(self.size['''shortest_edge'''] * h / w ) A_ : Any = self.size['''shortest_edge'''] elif w > h: A_ : Dict = self.size['''shortest_edge'''] A_ : List[Any] = int(self.size['''shortest_edge'''] * w / h ) else: A_ : int = self.size['''shortest_edge'''] A_ : List[Any] = self.size['''shortest_edge'''] else: A_ : List[Any] = [] for image in image_inputs: A_ : List[Any] = self.get_expected_values([image] ) expected_values.append((expected_height, expected_width) ) A_ : Any = max(_lowerCamelCase , key=lambda _lowerCamelCase : item[0] )[0] A_ : Any = max(_lowerCamelCase , key=lambda _lowerCamelCase : item[1] )[1] return expected_height, expected_width @require_torch @require_vision class UpperCamelCase_ (a__, unittest.TestCase ): """simple docstring""" _lowerCAmelCase = DetrImageProcessor if is_vision_available() else None def _a ( self : Optional[Any] ): """simple docstring""" A_ : Any = DetrImageProcessingTester(self ) @property def _a ( self : Tuple ): """simple docstring""" return self.image_processor_tester.prepare_image_processor_dict() def _a ( self : Dict ): """simple docstring""" A_ : Optional[Any] = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(_lowerCamelCase , '''image_mean''' ) ) self.assertTrue(hasattr(_lowerCamelCase , '''image_std''' ) ) self.assertTrue(hasattr(_lowerCamelCase , '''do_normalize''' ) ) self.assertTrue(hasattr(_lowerCamelCase , '''do_rescale''' ) ) self.assertTrue(hasattr(_lowerCamelCase , '''rescale_factor''' ) ) self.assertTrue(hasattr(_lowerCamelCase , '''do_resize''' ) ) self.assertTrue(hasattr(_lowerCamelCase , '''size''' ) ) self.assertTrue(hasattr(_lowerCamelCase , '''do_pad''' ) ) def _a ( self : int ): """simple docstring""" A_ : List[str] = self.image_processing_class.from_dict(self.image_processor_dict ) self.assertEqual(image_processor.size , {'''shortest_edge''': 18, '''longest_edge''': 1333} ) self.assertEqual(image_processor.do_pad , _lowerCamelCase ) A_ : Any = self.image_processing_class.from_dict( self.image_processor_dict , size=42 , max_size=84 , pad_and_return_pixel_mask=_lowerCamelCase ) self.assertEqual(image_processor.size , {'''shortest_edge''': 42, '''longest_edge''': 84} ) self.assertEqual(image_processor.do_pad , _lowerCamelCase ) def _a ( self : List[Any] ): """simple docstring""" pass def _a ( self : Dict ): """simple docstring""" A_ : Dict = self.image_processing_class(**self.image_processor_dict ) # create random PIL images A_ : Optional[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=_lowerCamelCase ) for image in image_inputs: self.assertIsInstance(_lowerCamelCase , Image.Image ) # Test not batched input A_ : Any = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values A_ : Any = self.image_processor_tester.get_expected_values(_lowerCamelCase ) self.assertEqual( encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , ) # Test batched A_ : int = self.image_processor_tester.get_expected_values(_lowerCamelCase , batched=_lowerCamelCase ) A_ : Tuple = image_processing(_lowerCamelCase , return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, expected_height, expected_width, ) , ) def _a ( self : Tuple ): """simple docstring""" A_ : List[Any] = self.image_processing_class(**self.image_processor_dict ) # create random numpy tensors A_ : Tuple = prepare_image_inputs(self.image_processor_tester , equal_resolution=_lowerCamelCase , numpify=_lowerCamelCase ) for image in image_inputs: self.assertIsInstance(_lowerCamelCase , np.ndarray ) # Test not batched input A_ : int = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values A_ : Dict = self.image_processor_tester.get_expected_values(_lowerCamelCase ) self.assertEqual( encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , ) # Test batched A_ : Optional[Any] = image_processing(_lowerCamelCase , return_tensors='''pt''' ).pixel_values A_ : Optional[Any] = self.image_processor_tester.get_expected_values(_lowerCamelCase , batched=_lowerCamelCase ) self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, expected_height, expected_width, ) , ) def _a ( self : Optional[Any] ): """simple docstring""" A_ : Union[str, Any] = self.image_processing_class(**self.image_processor_dict ) # create random PyTorch tensors A_ : Union[str, Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=_lowerCamelCase , torchify=_lowerCamelCase ) for image in image_inputs: self.assertIsInstance(_lowerCamelCase , torch.Tensor ) # Test not batched input A_ : Optional[Any] = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values A_ : Dict = self.image_processor_tester.get_expected_values(_lowerCamelCase ) self.assertEqual( encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , ) # Test batched A_ : Dict = image_processing(_lowerCamelCase , return_tensors='''pt''' ).pixel_values A_ : List[str] = self.image_processor_tester.get_expected_values(_lowerCamelCase , batched=_lowerCamelCase ) self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, expected_height, expected_width, ) , ) @slow def _a ( self : List[Any] ): """simple docstring""" A_ : List[Any] = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' ) with open('''./tests/fixtures/tests_samples/COCO/coco_annotations.txt''' , '''r''' ) as f: A_ : Union[str, Any] = json.loads(f.read() ) A_ : Any = {'''image_id''': 39769, '''annotations''': target} # encode them A_ : Union[str, Any] = DetrImageProcessor.from_pretrained('''facebook/detr-resnet-50''' ) A_ : List[str] = image_processing(images=_lowerCamelCase , annotations=_lowerCamelCase , return_tensors='''pt''' ) # verify pixel values A_ : Optional[Any] = torch.Size([1, 3, 800, 1066] ) self.assertEqual(encoding['''pixel_values'''].shape , _lowerCamelCase ) A_ : int = torch.tensor([0.27_96, 0.31_38, 0.34_81] ) self.assertTrue(torch.allclose(encoding['''pixel_values'''][0, 0, 0, :3] , _lowerCamelCase , atol=1E-4 ) ) # verify area A_ : Dict = torch.tensor([5887.9600, 11250.2061, 489353.8438, 837122.7500, 147967.5156, 165732.3438] ) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''area'''] , _lowerCamelCase ) ) # verify boxes A_ : int = torch.Size([6, 4] ) self.assertEqual(encoding['''labels'''][0]['''boxes'''].shape , _lowerCamelCase ) A_ : Dict = torch.tensor([0.55_03, 0.27_65, 0.06_04, 0.22_15] ) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''boxes'''][0] , _lowerCamelCase , atol=1E-3 ) ) # verify image_id A_ : Union[str, Any] = torch.tensor([39769] ) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''image_id'''] , _lowerCamelCase ) ) # verify is_crowd A_ : List[Any] = torch.tensor([0, 0, 0, 0, 0, 0] ) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''iscrowd'''] , _lowerCamelCase ) ) # verify class_labels A_ : Tuple = torch.tensor([75, 75, 63, 65, 17, 17] ) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''class_labels'''] , _lowerCamelCase ) ) # verify orig_size A_ : List[Any] = torch.tensor([480, 640] ) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''orig_size'''] , _lowerCamelCase ) ) # verify size A_ : Optional[int] = torch.tensor([800, 1066] ) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''size'''] , _lowerCamelCase ) ) @slow def _a ( self : Any ): """simple docstring""" A_ : Tuple = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' ) with open('''./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt''' , '''r''' ) as f: A_ : Tuple = json.loads(f.read() ) A_ : Union[str, Any] = {'''file_name''': '''000000039769.png''', '''image_id''': 39769, '''segments_info''': target} A_ : Optional[Any] = pathlib.Path('''./tests/fixtures/tests_samples/COCO/coco_panoptic''' ) # encode them A_ : str = DetrImageProcessor.from_pretrained('''facebook/detr-resnet-50-panoptic''' ) A_ : Tuple = image_processing(images=_lowerCamelCase , annotations=_lowerCamelCase , masks_path=_lowerCamelCase , return_tensors='''pt''' ) # verify pixel values A_ : Dict = torch.Size([1, 3, 800, 1066] ) self.assertEqual(encoding['''pixel_values'''].shape , _lowerCamelCase ) A_ : Optional[Any] = torch.tensor([0.27_96, 0.31_38, 0.34_81] ) self.assertTrue(torch.allclose(encoding['''pixel_values'''][0, 0, 0, :3] , _lowerCamelCase , atol=1E-4 ) ) # verify area A_ : Dict = torch.tensor([147979.6875, 165527.0469, 484638.5938, 11292.9375, 5879.6562, 7634.1147] ) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''area'''] , _lowerCamelCase ) ) # verify boxes A_ : Optional[Any] = torch.Size([6, 4] ) self.assertEqual(encoding['''labels'''][0]['''boxes'''].shape , _lowerCamelCase ) A_ : Any = torch.tensor([0.26_25, 0.54_37, 0.46_88, 0.86_25] ) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''boxes'''][0] , _lowerCamelCase , atol=1E-3 ) ) # verify image_id A_ : Tuple = torch.tensor([39769] ) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''image_id'''] , _lowerCamelCase ) ) # verify is_crowd A_ : Dict = torch.tensor([0, 0, 0, 0, 0, 0] ) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''iscrowd'''] , _lowerCamelCase ) ) # verify class_labels A_ : Union[str, Any] = torch.tensor([17, 17, 63, 75, 75, 93] ) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''class_labels'''] , _lowerCamelCase ) ) # verify masks A_ : int = 822873 self.assertEqual(encoding['''labels'''][0]['''masks'''].sum().item() , _lowerCamelCase ) # verify orig_size A_ : Dict = torch.tensor([480, 640] ) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''orig_size'''] , _lowerCamelCase ) ) # verify size A_ : Any = torch.tensor([800, 1066] ) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''size'''] , _lowerCamelCase ) )
363
'''simple docstring''' from ...configuration_utils import PretrainedConfig from ...utils import logging snake_case__ = logging.get_logger(__name__) snake_case__ = { """facebook/s2t-wav2vec2-large-en-de""": ( """https://huggingface.co/facebook/s2t-wav2vec2-large-en-de/resolve/main/config.json""" ), # See all Speech2Text models at https://huggingface.co/models?filter=speech2text2 } class UpperCamelCase_ (a__ ): """simple docstring""" _lowerCAmelCase = 'speech_to_text_2' _lowerCAmelCase = ['past_key_values'] _lowerCAmelCase = {'num_attention_heads': 'decoder_attention_heads', 'hidden_size': 'd_model'} def __init__( self : Optional[Any] , _lowerCamelCase : Optional[Any]=10000 , _lowerCamelCase : List[Any]=6 , _lowerCamelCase : int=2048 , _lowerCamelCase : Dict=4 , _lowerCamelCase : str=0.0 , _lowerCamelCase : int=True , _lowerCamelCase : int="relu" , _lowerCamelCase : Any=256 , _lowerCamelCase : List[Any]=0.1 , _lowerCamelCase : Tuple=0.0 , _lowerCamelCase : Union[str, Any]=0.0 , _lowerCamelCase : Optional[Any]=0.02 , _lowerCamelCase : int=2 , _lowerCamelCase : List[str]=True , _lowerCamelCase : str=1 , _lowerCamelCase : List[Any]=0 , _lowerCamelCase : Optional[int]=2 , _lowerCamelCase : Tuple=1024 , **_lowerCamelCase : int , ): """simple docstring""" A_ : Optional[int] = vocab_size A_ : Tuple = d_model A_ : List[str] = decoder_ffn_dim A_ : str = decoder_layers A_ : Any = decoder_attention_heads A_ : int = dropout A_ : str = attention_dropout A_ : Optional[int] = activation_dropout A_ : str = activation_function A_ : List[Any] = init_std A_ : Union[str, Any] = decoder_layerdrop A_ : Any = use_cache A_ : Optional[Any] = decoder_layers A_ : Optional[int] = scale_embedding # scale factor will be sqrt(d_model) if True A_ : Optional[Any] = max_target_positions super().__init__( pad_token_id=_lowerCamelCase , bos_token_id=_lowerCamelCase , eos_token_id=_lowerCamelCase , decoder_start_token_id=_lowerCamelCase , **_lowerCamelCase , )
4
0
import argparse from diffusers.pipelines.stable_diffusion.convert_from_ckpt import download_controlnet_from_original_ckpt if __name__ == "__main__": snake_case__ = argparse.ArgumentParser() parser.add_argument( """--checkpoint_path""", default=None, type=str, required=True, help="""Path to the checkpoint to convert.""" ) parser.add_argument( """--original_config_file""", type=str, required=True, help="""The YAML config file corresponding to the original architecture.""", ) parser.add_argument( """--num_in_channels""", default=None, type=int, help="""The number of input channels. If `None` number of input channels will be automatically inferred.""", ) parser.add_argument( """--image_size""", default=5_12, type=int, help=( """The image size that the model was trained on. Use 512 for Stable Diffusion v1.X and Stable Siffusion v2""" """ Base. Use 768 for Stable Diffusion v2.""" ), ) parser.add_argument( """--extract_ema""", action="""store_true""", help=( """Only relevant for checkpoints that have both EMA and non-EMA weights. Whether to extract the EMA weights""" """ or not. Defaults to `False`. Add `--extract_ema` to extract the EMA weights. EMA weights usually yield""" """ higher quality images for inference. Non-EMA weights are usually better to continue fine-tuning.""" ), ) parser.add_argument( """--upcast_attention""", action="""store_true""", help=( """Whether the attention computation should always be upcasted. This is necessary when running stable""" """ diffusion 2.1.""" ), ) parser.add_argument( """--from_safetensors""", action="""store_true""", help="""If `--checkpoint_path` is in `safetensors` format, load checkpoint with safetensors instead of PyTorch.""", ) parser.add_argument( """--to_safetensors""", action="""store_true""", help="""Whether to store pipeline in safetensors format or not.""", ) parser.add_argument("""--dump_path""", default=None, type=str, required=True, help="""Path to the output model.""") parser.add_argument("""--device""", type=str, help="""Device to use (e.g. cpu, cuda:0, cuda:1, etc.)""") def snake_case__ ( lowerCamelCase__ : Optional[int] ) -> List[Any]: if string == "True": return True elif string == "False": return False else: raise ValueError(f'could not parse string as bool {string}' ) parser.add_argument( """--use_linear_projection""", help="""Override for use linear projection""", required=False, type=parse_bool ) parser.add_argument("""--cross_attention_dim""", help="""Override for cross attention_dim""", required=False, type=int) snake_case__ = parser.parse_args() snake_case__ = download_controlnet_from_original_ckpt( checkpoint_path=args.checkpoint_path, original_config_file=args.original_config_file, image_size=args.image_size, extract_ema=args.extract_ema, num_in_channels=args.num_in_channels, upcast_attention=args.upcast_attention, from_safetensors=args.from_safetensors, device=args.device, use_linear_projection=args.use_linear_projection, cross_attention_dim=args.cross_attention_dim, ) controlnet.save_pretrained(args.dump_path, safe_serialization=args.to_safetensors)
364
'''simple docstring''' from collections import OrderedDict from typing import Mapping from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging from ..auto import CONFIG_MAPPING snake_case__ = logging.get_logger(__name__) snake_case__ = { """microsoft/table-transformer-detection""": ( """https://huggingface.co/microsoft/table-transformer-detection/resolve/main/config.json""" ), } class UpperCamelCase_ (a__ ): """simple docstring""" _lowerCAmelCase = 'table-transformer' _lowerCAmelCase = ['past_key_values'] _lowerCAmelCase = { 'hidden_size': 'd_model', 'num_attention_heads': 'encoder_attention_heads', } def __init__( self : Any , _lowerCamelCase : Union[str, Any]=True , _lowerCamelCase : Dict=None , _lowerCamelCase : int=3 , _lowerCamelCase : Any=100 , _lowerCamelCase : List[Any]=6 , _lowerCamelCase : Tuple=2048 , _lowerCamelCase : Any=8 , _lowerCamelCase : Dict=6 , _lowerCamelCase : Tuple=2048 , _lowerCamelCase : int=8 , _lowerCamelCase : Optional[int]=0.0 , _lowerCamelCase : List[Any]=0.0 , _lowerCamelCase : List[Any]=True , _lowerCamelCase : Optional[int]="relu" , _lowerCamelCase : Union[str, Any]=256 , _lowerCamelCase : Any=0.1 , _lowerCamelCase : Tuple=0.0 , _lowerCamelCase : Optional[int]=0.0 , _lowerCamelCase : str=0.02 , _lowerCamelCase : Tuple=1.0 , _lowerCamelCase : Dict=False , _lowerCamelCase : str="sine" , _lowerCamelCase : str="resnet50" , _lowerCamelCase : Any=True , _lowerCamelCase : List[str]=False , _lowerCamelCase : Any=1 , _lowerCamelCase : int=5 , _lowerCamelCase : Tuple=2 , _lowerCamelCase : Optional[int]=1 , _lowerCamelCase : Any=1 , _lowerCamelCase : Dict=5 , _lowerCamelCase : str=2 , _lowerCamelCase : Union[str, Any]=0.1 , **_lowerCamelCase : int , ): """simple docstring""" if backbone_config is not None and use_timm_backbone: raise ValueError('''You can\'t specify both `backbone_config` and `use_timm_backbone`.''' ) if not use_timm_backbone: if backbone_config is None: logger.info('''`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.''' ) A_ : int = CONFIG_MAPPING['''resnet'''](out_features=['''stage4'''] ) elif isinstance(_lowerCamelCase , _lowerCamelCase ): A_ : str = backbone_config.get('''model_type''' ) A_ : Optional[int] = CONFIG_MAPPING[backbone_model_type] A_ : List[str] = config_class.from_dict(_lowerCamelCase ) # set timm attributes to None A_ ,A_ ,A_ : Union[str, Any] = None, None, None A_ : Optional[Any] = use_timm_backbone A_ : Optional[int] = backbone_config A_ : Optional[Any] = num_channels A_ : Dict = num_queries A_ : str = d_model A_ : List[str] = encoder_ffn_dim A_ : int = encoder_layers A_ : Optional[Any] = encoder_attention_heads A_ : List[str] = decoder_ffn_dim A_ : Any = decoder_layers A_ : List[str] = decoder_attention_heads A_ : Tuple = dropout A_ : Optional[Any] = attention_dropout A_ : Any = activation_dropout A_ : List[Any] = activation_function A_ : Dict = init_std A_ : Any = init_xavier_std A_ : List[Any] = encoder_layerdrop A_ : int = decoder_layerdrop A_ : Any = encoder_layers A_ : List[str] = auxiliary_loss A_ : List[Any] = position_embedding_type A_ : Optional[Any] = backbone A_ : Tuple = use_pretrained_backbone A_ : List[Any] = dilation # Hungarian matcher A_ : List[str] = class_cost A_ : str = bbox_cost A_ : Union[str, Any] = giou_cost # Loss coefficients A_ : Any = mask_loss_coefficient A_ : Optional[int] = dice_loss_coefficient A_ : Dict = bbox_loss_coefficient A_ : int = giou_loss_coefficient A_ : int = eos_coefficient super().__init__(is_encoder_decoder=_lowerCamelCase , **_lowerCamelCase ) @property def _a ( self : List[Any] ): """simple docstring""" return self.encoder_attention_heads @property def _a ( self : Any ): """simple docstring""" return self.d_model class UpperCamelCase_ (a__ ): """simple docstring""" _lowerCAmelCase = version.parse('1.11' ) @property def _a ( self : Tuple ): """simple docstring""" return OrderedDict( [ ('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}), ('''pixel_mask''', {0: '''batch'''}), ] ) @property def _a ( self : Optional[int] ): """simple docstring""" return 1E-5 @property def _a ( self : str ): """simple docstring""" return 12
4
0
'''simple docstring''' import argparse import torch from huggingface_hub import hf_hub_download from transformers import AutoTokenizer, RobertaPreLayerNormConfig, RobertaPreLayerNormForMaskedLM from transformers.utils import logging logging.set_verbosity_info() snake_case__ = logging.get_logger(__name__) def snake_case__ ( lowerCamelCase__ : str , lowerCamelCase__ : str ) -> str: A_ : List[str] = RobertaPreLayerNormConfig.from_pretrained( lowerCamelCase__ , architectures=['''RobertaPreLayerNormForMaskedLM'''] ) # convert state_dict A_ : Union[str, Any] = torch.load(hf_hub_download(repo_id=lowerCamelCase__ , filename='''pytorch_model.bin''' ) ) A_ : Dict = {} for tensor_key, tensor_value in original_state_dict.items(): # The transformer implementation gives the model a unique name, rather than overwiriting 'roberta' if tensor_key.startswith('''roberta.''' ): A_ : Tuple = '''roberta_prelayernorm.''' + tensor_key[len('''roberta.''' ) :] # The original implementation contains weights which are not used, remove them from the state_dict if tensor_key.endswith('''.self.LayerNorm.weight''' ) or tensor_key.endswith('''.self.LayerNorm.bias''' ): continue A_ : str = tensor_value A_ : str = RobertaPreLayerNormForMaskedLM.from_pretrained( pretrained_model_name_or_path=lowerCamelCase__ , config=lowerCamelCase__ , state_dict=lowerCamelCase__ ) model.save_pretrained(lowerCamelCase__ ) # convert tokenizer A_ : Optional[Any] = AutoTokenizer.from_pretrained(lowerCamelCase__ ) tokenizer.save_pretrained(lowerCamelCase__ ) if __name__ == "__main__": snake_case__ = argparse.ArgumentParser() # Required parameters parser.add_argument( """--checkpoint-repo""", default=None, type=str, required=True, help="""Path the official PyTorch dump, e.g. 'andreasmadsen/efficient_mlm_m0.40'.""", ) parser.add_argument( """--pytorch_dump_folder_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model.""" ) snake_case__ = parser.parse_args() convert_roberta_prelayernorm_checkpoint_to_pytorch(args.checkpoint_repo, args.pytorch_dump_folder_path)
365
'''simple docstring''' import inspect import unittest from transformers import BitConfig from transformers.testing_utils import require_torch, require_vision, slow, torch_device from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_backbone_common import BackboneTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from torch import nn from transformers import BitBackbone, BitForImageClassification, BitImageProcessor, BitModel from transformers.models.bit.modeling_bit import BIT_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image class UpperCamelCase_ : """simple docstring""" def __init__( self : Optional[Any] , _lowerCamelCase : int , _lowerCamelCase : List[str]=3 , _lowerCamelCase : Any=32 , _lowerCamelCase : Union[str, Any]=3 , _lowerCamelCase : int=10 , _lowerCamelCase : Union[str, Any]=[8, 16, 32, 64] , _lowerCamelCase : Dict=[1, 1, 2, 1] , _lowerCamelCase : Union[str, Any]=True , _lowerCamelCase : Optional[int]=True , _lowerCamelCase : Any="relu" , _lowerCamelCase : Optional[Any]=3 , _lowerCamelCase : Optional[Any]=None , _lowerCamelCase : Dict=["stage2", "stage3", "stage4"] , _lowerCamelCase : Union[str, Any]=[2, 3, 4] , _lowerCamelCase : Tuple=1 , ): """simple docstring""" A_ : List[str] = parent A_ : List[str] = batch_size A_ : Union[str, Any] = image_size A_ : Tuple = num_channels A_ : Any = embeddings_size A_ : int = hidden_sizes A_ : Optional[Any] = depths A_ : List[Any] = is_training A_ : Optional[int] = use_labels A_ : int = hidden_act A_ : Tuple = num_labels A_ : Union[str, Any] = scope A_ : List[Any] = len(_lowerCamelCase ) A_ : Union[str, Any] = out_features A_ : List[Any] = out_indices A_ : Dict = num_groups def _a ( self : Optional[int] ): """simple docstring""" A_ : Dict = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) A_ : Union[str, Any] = None if self.use_labels: A_ : Any = ids_tensor([self.batch_size] , self.num_labels ) A_ : Any = self.get_config() return config, pixel_values, labels def _a ( self : Union[str, Any] ): """simple docstring""" return BitConfig( num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , out_features=self.out_features , out_indices=self.out_indices , num_groups=self.num_groups , ) def _a ( self : List[Any] , _lowerCamelCase : List[str] , _lowerCamelCase : List[str] , _lowerCamelCase : Optional[Any] ): """simple docstring""" A_ : Any = BitModel(config=_lowerCamelCase ) model.to(_lowerCamelCase ) model.eval() A_ : int = model(_lowerCamelCase ) self.parent.assertEqual( result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , ) def _a ( self : Optional[int] , _lowerCamelCase : List[Any] , _lowerCamelCase : str , _lowerCamelCase : Optional[int] ): """simple docstring""" A_ : Dict = self.num_labels A_ : Optional[Any] = BitForImageClassification(_lowerCamelCase ) model.to(_lowerCamelCase ) model.eval() A_ : List[Any] = model(_lowerCamelCase , labels=_lowerCamelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def _a ( self : Any , _lowerCamelCase : int , _lowerCamelCase : int , _lowerCamelCase : List[Any] ): """simple docstring""" A_ : List[Any] = BitBackbone(config=_lowerCamelCase ) model.to(_lowerCamelCase ) model.eval() A_ : int = model(_lowerCamelCase ) # verify feature maps self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) ) self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[1], 4, 4] ) # verify channels self.parent.assertEqual(len(model.channels ) , len(config.out_features ) ) self.parent.assertListEqual(model.channels , config.hidden_sizes[1:] ) # verify backbone works with out_features=None A_ : Optional[Any] = None A_ : int = BitBackbone(config=_lowerCamelCase ) model.to(_lowerCamelCase ) model.eval() A_ : Optional[int] = model(_lowerCamelCase ) # verify feature maps self.parent.assertEqual(len(result.feature_maps ) , 1 ) self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[-1], 1, 1] ) # verify channels self.parent.assertEqual(len(model.channels ) , 1 ) self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]] ) def _a ( self : List[Any] ): """simple docstring""" A_ : Union[str, Any] = self.prepare_config_and_inputs() A_ ,A_ ,A_ : Union[str, Any] = config_and_inputs A_ : str = {'''pixel_values''': pixel_values} return config, inputs_dict @require_torch class UpperCamelCase_ (a__, a__, unittest.TestCase ): """simple docstring""" _lowerCAmelCase = (BitModel, BitForImageClassification, BitBackbone) if is_torch_available() else () _lowerCAmelCase = ( {'feature-extraction': BitModel, 'image-classification': BitForImageClassification} if is_torch_available() else {} ) _lowerCAmelCase = False _lowerCAmelCase = False _lowerCAmelCase = False _lowerCAmelCase = False _lowerCAmelCase = False def _a ( self : Optional[Any] ): """simple docstring""" A_ : List[str] = BitModelTester(self ) A_ : Optional[Any] = ConfigTester(self , config_class=_lowerCamelCase , has_text_modality=_lowerCamelCase ) def _a ( self : Optional[Any] ): """simple docstring""" self.create_and_test_config_common_properties() self.config_tester.create_and_test_config_to_json_string() self.config_tester.create_and_test_config_to_json_file() self.config_tester.create_and_test_config_from_and_save_pretrained() self.config_tester.create_and_test_config_with_num_labels() self.config_tester.check_config_can_be_init_without_params() self.config_tester.check_config_arguments_init() def _a ( self : List[Any] ): """simple docstring""" return @unittest.skip(reason='''Bit does not output attentions''' ) def _a ( self : str ): """simple docstring""" pass @unittest.skip(reason='''Bit does not use inputs_embeds''' ) def _a ( self : Union[str, Any] ): """simple docstring""" pass @unittest.skip(reason='''Bit does not support input and output embeddings''' ) def _a ( self : Any ): """simple docstring""" pass def _a ( self : List[Any] ): """simple docstring""" A_ ,A_ : str = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: A_ : Dict = model_class(_lowerCamelCase ) A_ : Dict = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic A_ : int = [*signature.parameters.keys()] A_ : Union[str, Any] = ['''pixel_values'''] self.assertListEqual(arg_names[:1] , _lowerCamelCase ) def _a ( self : Optional[Any] ): """simple docstring""" A_ : List[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*_lowerCamelCase ) def _a ( self : Optional[Any] ): """simple docstring""" A_ : Optional[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_backbone(*_lowerCamelCase ) def _a ( self : Tuple ): """simple docstring""" A_ ,A_ : Dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: A_ : str = model_class(config=_lowerCamelCase ) for name, module in model.named_modules(): if isinstance(_lowerCamelCase , (nn.BatchNormad, nn.GroupNorm) ): self.assertTrue( torch.all(module.weight == 1 ) , msg=f'Parameter {name} of model {model_class} seems not properly initialized' , ) self.assertTrue( torch.all(module.bias == 0 ) , msg=f'Parameter {name} of model {model_class} seems not properly initialized' , ) def _a ( self : int ): """simple docstring""" def check_hidden_states_output(_lowerCamelCase : Union[str, Any] , _lowerCamelCase : Dict , _lowerCamelCase : int ): A_ : Union[str, Any] = model_class(_lowerCamelCase ) model.to(_lowerCamelCase ) model.eval() with torch.no_grad(): A_ : Union[str, Any] = model(**self._prepare_for_class(_lowerCamelCase , _lowerCamelCase ) ) A_ : int = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states A_ : List[Any] = self.model_tester.num_stages self.assertEqual(len(_lowerCamelCase ) , expected_num_stages + 1 ) # Bit's feature maps are of shape (batch_size, num_channels, height, width) self.assertListEqual( list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , ) A_ ,A_ : str = self.model_tester.prepare_config_and_inputs_for_common() A_ : Tuple = ['''preactivation''', '''bottleneck'''] for model_class in self.all_model_classes: for layer_type in layers_type: A_ : Tuple = layer_type A_ : Optional[Any] = True check_hidden_states_output(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] A_ : List[str] = True check_hidden_states_output(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) @unittest.skip(reason='''Bit does not use feedforward chunking''' ) def _a ( self : Tuple ): """simple docstring""" pass def _a ( self : str ): """simple docstring""" A_ : Union[str, Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*_lowerCamelCase ) @slow def _a ( self : Union[str, Any] ): """simple docstring""" for model_name in BIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: A_ : List[Any] = BitModel.from_pretrained(_lowerCamelCase ) self.assertIsNotNone(_lowerCamelCase ) def snake_case__ ( ) -> Optional[int]: A_ : Optional[int] = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' ) return image @require_torch @require_vision class UpperCamelCase_ (unittest.TestCase ): """simple docstring""" @cached_property def _a ( self : List[Any] ): """simple docstring""" return ( BitImageProcessor.from_pretrained(BIT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ) if is_vision_available() else None ) @slow def _a ( self : Dict ): """simple docstring""" A_ : Optional[int] = BitForImageClassification.from_pretrained(BIT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ).to(_lowerCamelCase ) A_ : Union[str, Any] = self.default_image_processor A_ : Optional[int] = prepare_img() A_ : int = image_processor(images=_lowerCamelCase , return_tensors='''pt''' ).to(_lowerCamelCase ) # forward pass with torch.no_grad(): A_ : Union[str, Any] = model(**_lowerCamelCase ) # verify the logits A_ : Dict = torch.Size((1, 1000) ) self.assertEqual(outputs.logits.shape , _lowerCamelCase ) A_ : Tuple = torch.tensor([[-0.65_26, -0.52_63, -1.43_98]] ).to(_lowerCamelCase ) self.assertTrue(torch.allclose(outputs.logits[0, :3] , _lowerCamelCase , atol=1E-4 ) ) @require_torch class UpperCamelCase_ (a__, unittest.TestCase ): """simple docstring""" _lowerCAmelCase = (BitBackbone,) if is_torch_available() else () _lowerCAmelCase = BitConfig _lowerCAmelCase = False def _a ( self : List[str] ): """simple docstring""" A_ : Union[str, Any] = BitModelTester(self )
4
0
'''simple docstring''' import enum import warnings from ..tokenization_utils import TruncationStrategy from ..utils import add_end_docstrings, is_tf_available, is_torch_available, logging from .base import PIPELINE_INIT_ARGS, Pipeline if is_tf_available(): import tensorflow as tf from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING if is_torch_available(): from ..models.auto.modeling_auto import MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING snake_case__ = logging.get_logger(__name__) class UpperCamelCase_ (enum.Enum ): """simple docstring""" _lowerCAmelCase = 0 _lowerCAmelCase = 1 @add_end_docstrings(a__ ) class UpperCamelCase_ (a__ ): """simple docstring""" _lowerCAmelCase = 'generated' def __init__( self : Optional[Any] , *_lowerCamelCase : Dict , **_lowerCamelCase : List[str] ): """simple docstring""" super().__init__(*_lowerCamelCase , **_lowerCamelCase ) self.check_model_type( TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING if self.framework == '''tf''' else MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING ) def _a ( self : Dict , _lowerCamelCase : Optional[int]=None , _lowerCamelCase : Union[str, Any]=None , _lowerCamelCase : List[Any]=None , _lowerCamelCase : Dict=None , _lowerCamelCase : Optional[int]=None , _lowerCamelCase : Optional[int]=None , **_lowerCamelCase : int , ): """simple docstring""" A_ : Union[str, Any] = {} if truncation is not None: A_ : str = truncation A_ : Dict = generate_kwargs A_ : Any = {} if return_tensors is not None and return_type is None: A_ : str = ReturnType.TENSORS if return_tensors else ReturnType.TEXT if return_type is not None: A_ : str = return_type if clean_up_tokenization_spaces is not None: A_ : Tuple = clean_up_tokenization_spaces if stop_sequence is not None: A_ : Optional[int] = self.tokenizer.encode(_lowerCamelCase , add_special_tokens=_lowerCamelCase ) if len(_lowerCamelCase ) > 1: warnings.warn( '''Stopping on a multiple token sequence is not yet supported on transformers. The first token of''' ''' the stop sequence will be used as the stop sequence string in the interim.''' ) A_ : Any = stop_sequence_ids[0] return preprocess_params, forward_params, postprocess_params def _a ( self : str , _lowerCamelCase : int , _lowerCamelCase : int , _lowerCamelCase : int ): """simple docstring""" return True def _a ( self : int , *_lowerCamelCase : int , _lowerCamelCase : int ): """simple docstring""" A_ : Union[str, Any] = self.model.config.prefix if self.model.config.prefix is not None else '''''' if isinstance(args[0] , _lowerCamelCase ): if self.tokenizer.pad_token_id is None: raise ValueError('''Please make sure that the tokenizer has a pad_token_id when using a batch input''' ) A_ : Optional[int] = ([prefix + arg for arg in args[0]],) A_ : List[str] = True elif isinstance(args[0] , _lowerCamelCase ): A_ : str = (prefix + args[0],) A_ : str = False else: raise ValueError( f' `args[0]`: {args[0]} have the wrong format. The should be either of type `str` or type `list`' ) A_ : Dict = self.tokenizer(*_lowerCamelCase , padding=_lowerCamelCase , truncation=_lowerCamelCase , return_tensors=self.framework ) # This is produced by tokenizers but is an invalid generate kwargs if "token_type_ids" in inputs: del inputs["token_type_ids"] return inputs def __call__( self : str , *_lowerCamelCase : int , **_lowerCamelCase : List[Any] ): """simple docstring""" A_ : str = super().__call__(*_lowerCamelCase , **_lowerCamelCase ) if ( isinstance(args[0] , _lowerCamelCase ) and all(isinstance(_lowerCamelCase , _lowerCamelCase ) for el in args[0] ) and all(len(_lowerCamelCase ) == 1 for res in result ) ): return [res[0] for res in result] return result def _a ( self : str , _lowerCamelCase : Optional[int] , _lowerCamelCase : List[Any]=TruncationStrategy.DO_NOT_TRUNCATE , **_lowerCamelCase : List[str] ): """simple docstring""" A_ : str = self._parse_and_tokenize(_lowerCamelCase , truncation=_lowerCamelCase , **_lowerCamelCase ) return inputs def _a ( self : Any , _lowerCamelCase : Tuple , **_lowerCamelCase : Optional[Any] ): """simple docstring""" if self.framework == "pt": A_ : Dict = model_inputs['''input_ids'''].shape elif self.framework == "tf": A_ : Any = tf.shape(model_inputs['''input_ids'''] ).numpy() A_ : Optional[int] = generate_kwargs.get('''min_length''' , self.model.config.min_length ) A_ : int = generate_kwargs.get('''max_length''' , self.model.config.max_length ) self.check_inputs(_lowerCamelCase , generate_kwargs['''min_length'''] , generate_kwargs['''max_length'''] ) A_ : int = self.model.generate(**_lowerCamelCase , **_lowerCamelCase ) A_ : Optional[int] = output_ids.shape[0] if self.framework == "pt": A_ : Optional[Any] = output_ids.reshape(_lowerCamelCase , out_b // in_b , *output_ids.shape[1:] ) elif self.framework == "tf": A_ : Any = tf.reshape(_lowerCamelCase , (in_b, out_b // in_b, *output_ids.shape[1:]) ) return {"output_ids": output_ids} def _a ( self : Optional[Any] , _lowerCamelCase : Optional[Any] , _lowerCamelCase : int=ReturnType.TEXT , _lowerCamelCase : Any=False ): """simple docstring""" A_ : List[str] = [] for output_ids in model_outputs["output_ids"][0]: if return_type == ReturnType.TENSORS: A_ : Union[str, Any] = {f'{self.return_name}_token_ids': output_ids} elif return_type == ReturnType.TEXT: A_ : Optional[int] = { f'{self.return_name}_text': self.tokenizer.decode( _lowerCamelCase , skip_special_tokens=_lowerCamelCase , clean_up_tokenization_spaces=_lowerCamelCase , ) } records.append(_lowerCamelCase ) return records @add_end_docstrings(a__ ) class UpperCamelCase_ (a__ ): """simple docstring""" _lowerCAmelCase = 'summary' def __call__( self : List[str] , *_lowerCamelCase : Tuple , **_lowerCamelCase : str ): """simple docstring""" return super().__call__(*_lowerCamelCase , **_lowerCamelCase ) def _a ( self : Optional[Any] , _lowerCamelCase : int , _lowerCamelCase : int , _lowerCamelCase : int ): """simple docstring""" if max_length < min_length: logger.warning(f'Your min_length={min_length} must be inferior than your max_length={max_length}.' ) if input_length < max_length: logger.warning( f'Your max_length is set to {max_length}, but your input_length is only {input_length}. Since this is ' '''a summarization task, where outputs shorter than the input are typically wanted, you might ''' f'consider decreasing max_length manually, e.g. summarizer(\'...\', max_length={input_length//2})' ) @add_end_docstrings(a__ ) class UpperCamelCase_ (a__ ): """simple docstring""" _lowerCAmelCase = 'translation' def _a ( self : Optional[Any] , _lowerCamelCase : int , _lowerCamelCase : int , _lowerCamelCase : int ): """simple docstring""" if input_length > 0.9 * max_length: logger.warning( f'Your input_length: {input_length} is bigger than 0.9 * max_length: {max_length}. You might consider ' '''increasing your max_length manually, e.g. translator(\'...\', max_length=400)''' ) return True def _a ( self : int , *_lowerCamelCase : List[Any] , _lowerCamelCase : Tuple=TruncationStrategy.DO_NOT_TRUNCATE , _lowerCamelCase : Optional[Any]=None , _lowerCamelCase : Union[str, Any]=None ): """simple docstring""" if getattr(self.tokenizer , '''_build_translation_inputs''' , _lowerCamelCase ): return self.tokenizer._build_translation_inputs( *_lowerCamelCase , return_tensors=self.framework , truncation=_lowerCamelCase , src_lang=_lowerCamelCase , tgt_lang=_lowerCamelCase ) else: return super()._parse_and_tokenize(*_lowerCamelCase , truncation=_lowerCamelCase ) def _a ( self : int , _lowerCamelCase : Tuple=None , _lowerCamelCase : Optional[int]=None , **_lowerCamelCase : Any ): """simple docstring""" A_ : Optional[Any] = super()._sanitize_parameters(**_lowerCamelCase ) if src_lang is not None: A_ : Dict = src_lang if tgt_lang is not None: A_ : Dict = tgt_lang if src_lang is None and tgt_lang is None: # Backward compatibility, direct arguments use is preferred. A_ : str = kwargs.get('''task''' , self.task ) A_ : Optional[int] = task.split('''_''' ) if task and len(_lowerCamelCase ) == 4: # translation, XX, to YY A_ : List[Any] = items[1] A_ : Any = items[3] return preprocess_params, forward_params, postprocess_params def __call__( self : Tuple , *_lowerCamelCase : Dict , **_lowerCamelCase : Optional[int] ): """simple docstring""" return super().__call__(*_lowerCamelCase , **_lowerCamelCase )
366
'''simple docstring''' import pprint import requests snake_case__ = """https://zenquotes.io/api""" def snake_case__ ( ) -> list: return requests.get(API_ENDPOINT_URL + '''/today''' ).json() def snake_case__ ( ) -> list: return requests.get(API_ENDPOINT_URL + '''/random''' ).json() if __name__ == "__main__": snake_case__ = random_quotes() pprint.pprint(response)
4
0
'''simple docstring''' from collections import OrderedDict from typing import Mapping from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging snake_case__ = logging.get_logger(__name__) snake_case__ = { """roberta-base""": """https://huggingface.co/roberta-base/resolve/main/config.json""", """roberta-large""": """https://huggingface.co/roberta-large/resolve/main/config.json""", """roberta-large-mnli""": """https://huggingface.co/roberta-large-mnli/resolve/main/config.json""", """distilroberta-base""": """https://huggingface.co/distilroberta-base/resolve/main/config.json""", """roberta-base-openai-detector""": """https://huggingface.co/roberta-base-openai-detector/resolve/main/config.json""", """roberta-large-openai-detector""": """https://huggingface.co/roberta-large-openai-detector/resolve/main/config.json""", } class UpperCamelCase_ (a__ ): """simple docstring""" _lowerCAmelCase = 'roberta' def __init__( self : Any , _lowerCamelCase : Tuple=50265 , _lowerCamelCase : int=768 , _lowerCamelCase : Optional[Any]=12 , _lowerCamelCase : Tuple=12 , _lowerCamelCase : List[str]=3072 , _lowerCamelCase : Union[str, Any]="gelu" , _lowerCamelCase : Dict=0.1 , _lowerCamelCase : Tuple=0.1 , _lowerCamelCase : Tuple=512 , _lowerCamelCase : Optional[int]=2 , _lowerCamelCase : List[Any]=0.02 , _lowerCamelCase : Any=1E-12 , _lowerCamelCase : Optional[Any]=1 , _lowerCamelCase : Union[str, Any]=0 , _lowerCamelCase : Optional[Any]=2 , _lowerCamelCase : Optional[Any]="absolute" , _lowerCamelCase : str=True , _lowerCamelCase : Optional[Any]=None , **_lowerCamelCase : Any , ): """simple docstring""" super().__init__(pad_token_id=_lowerCamelCase , bos_token_id=_lowerCamelCase , eos_token_id=_lowerCamelCase , **_lowerCamelCase ) A_ : Dict = vocab_size A_ : List[Any] = hidden_size A_ : str = num_hidden_layers A_ : int = num_attention_heads A_ : Dict = hidden_act A_ : List[str] = intermediate_size A_ : Optional[int] = hidden_dropout_prob A_ : Union[str, Any] = attention_probs_dropout_prob A_ : Union[str, Any] = max_position_embeddings A_ : Dict = type_vocab_size A_ : int = initializer_range A_ : int = layer_norm_eps A_ : Union[str, Any] = position_embedding_type A_ : Any = use_cache A_ : int = classifier_dropout class UpperCamelCase_ (a__ ): """simple docstring""" @property def _a ( self : Tuple ): """simple docstring""" if self.task == "multiple-choice": A_ : List[str] = {0: '''batch''', 1: '''choice''', 2: '''sequence'''} else: A_ : Optional[Any] = {0: '''batch''', 1: '''sequence'''} return OrderedDict( [ ('''input_ids''', dynamic_axis), ('''attention_mask''', dynamic_axis), ] )
367
'''simple docstring''' from __future__ import annotations class UpperCamelCase_ : """simple docstring""" def __init__( self : Optional[int] , _lowerCamelCase : int ): """simple docstring""" A_ : Union[str, Any] = order # a_{0} ... a_{k} A_ : Union[str, Any] = [1.0] + [0.0] * order # b_{0} ... b_{k} A_ : int = [1.0] + [0.0] * order # x[n-1] ... x[n-k] A_ : str = [0.0] * self.order # y[n-1] ... y[n-k] A_ : Optional[Any] = [0.0] * self.order def _a ( self : Dict , _lowerCamelCase : list[float] , _lowerCamelCase : list[float] ): """simple docstring""" if len(_lowerCamelCase ) < self.order: A_ : Any = [1.0, *a_coeffs] if len(_lowerCamelCase ) != self.order + 1: A_ : List[Any] = ( f'Expected a_coeffs to have {self.order + 1} elements ' f'for {self.order}-order filter, got {len(_lowerCamelCase )}' ) raise ValueError(_lowerCamelCase ) if len(_lowerCamelCase ) != self.order + 1: A_ : Union[str, Any] = ( f'Expected b_coeffs to have {self.order + 1} elements ' f'for {self.order}-order filter, got {len(_lowerCamelCase )}' ) raise ValueError(_lowerCamelCase ) A_ : Tuple = a_coeffs A_ : str = b_coeffs def _a ( self : Tuple , _lowerCamelCase : float ): """simple docstring""" A_ : Any = 0.0 # Start at index 1 and do index 0 at the end. for i in range(1 , self.order + 1 ): result += ( self.b_coeffs[i] * self.input_history[i - 1] - self.a_coeffs[i] * self.output_history[i - 1] ) A_ : str = (result + self.b_coeffs[0] * sample) / self.a_coeffs[0] A_ : Optional[Any] = self.input_history[:-1] A_ : List[str] = self.output_history[:-1] A_ : Tuple = sample A_ : Tuple = result return result
4
0
'''simple docstring''' from ..utils import DummyObject, requires_backends class UpperCamelCase_ (metaclass=a__ ): """simple docstring""" _lowerCAmelCase = ['onnx'] def __init__( self : List[Any] , *_lowerCamelCase : List[Any] , **_lowerCamelCase : List[str] ): """simple docstring""" requires_backends(self , ['''onnx'''] ) @classmethod def _a ( cls : str , *_lowerCamelCase : Tuple , **_lowerCamelCase : Tuple ): """simple docstring""" requires_backends(cls , ['''onnx'''] ) @classmethod def _a ( cls : Optional[Any] , *_lowerCamelCase : str , **_lowerCamelCase : Optional[int] ): """simple docstring""" requires_backends(cls , ['''onnx'''] )
368
'''simple docstring''' class UpperCamelCase_ : """simple docstring""" def __init__( self : Optional[Any] , _lowerCamelCase : Union[str, Any] ): """simple docstring""" A_ : Union[str, Any] = val A_ : Tuple = None A_ : Any = None def _a ( self : Tuple , _lowerCamelCase : List[Any] ): """simple docstring""" if self.val: if val < self.val: if self.left is None: A_ : int = Node(_lowerCamelCase ) else: self.left.insert(_lowerCamelCase ) elif val > self.val: if self.right is None: A_ : List[str] = Node(_lowerCamelCase ) else: self.right.insert(_lowerCamelCase ) else: A_ : Any = val def snake_case__ ( lowerCamelCase__ : Any , lowerCamelCase__ : Optional[int] ) -> str: # Recursive traversal if root: inorder(root.left , lowerCamelCase__ ) res.append(root.val ) inorder(root.right , lowerCamelCase__ ) def snake_case__ ( lowerCamelCase__ : Optional[int] ) -> Tuple: # Build BST if len(lowerCamelCase__ ) == 0: return arr A_ : Dict = Node(arr[0] ) for i in range(1 , len(lowerCamelCase__ ) ): root.insert(arr[i] ) # Traverse BST in order. A_ : Tuple = [] inorder(lowerCamelCase__ , lowerCamelCase__ ) return res if __name__ == "__main__": print(tree_sort([10, 1, 3, 2, 9, 14, 13]))
4
0
'''simple docstring''' def snake_case__ ( lowerCamelCase__ : int , lowerCamelCase__ : int , lowerCamelCase__ : int ) -> float: A_ : int = (num_of_terms / 2) * (2 * first_term + (num_of_terms - 1) * common_diff) # formula for sum of series return total def snake_case__ ( ) -> Optional[Any]: print(sum_of_series(1 , 1 , 1_0 ) ) if __name__ == "__main__": import doctest doctest.testmod()
369
'''simple docstring''' def snake_case__ ( lowerCamelCase__ : list ) -> list: if len(lowerCamelCase__ ) <= 1: return [tuple(lowerCamelCase__ )] A_ : List[str] = [] def generate(lowerCamelCase__ : int , lowerCamelCase__ : list ): if k == 1: res.append(tuple(arr[:] ) ) return generate(k - 1 , lowerCamelCase__ ) for i in range(k - 1 ): if k % 2 == 0: # k is even A_ ,A_ : Optional[int] = arr[k - 1], arr[i] else: # k is odd A_ ,A_ : Union[str, Any] = arr[k - 1], arr[0] generate(k - 1 , lowerCamelCase__ ) generate(len(lowerCamelCase__ ) , lowerCamelCase__ ) return res if __name__ == "__main__": snake_case__ = input("""Enter numbers separated by a comma:\n""").strip() snake_case__ = [int(item) for item in user_input.split(""",""")] print(heaps(arr))
4
0
'''simple docstring''' def snake_case__ ( lowerCamelCase__ : list ) -> list: if len(lowerCamelCase__ ) <= 1: return lst A_ : Any = 1 while i < len(lowerCamelCase__ ): if lst[i - 1] <= lst[i]: i += 1 else: A_ : Optional[int] = lst[i], lst[i - 1] i -= 1 if i == 0: A_ : List[Any] = 1 return lst if __name__ == "__main__": snake_case__ = input("""Enter numbers separated by a comma:\n""").strip() snake_case__ = [int(item) for item in user_input.split(""",""")] print(gnome_sort(unsorted))
370
'''simple docstring''' import unittest from queue import Empty from threading import Thread from transformers import AutoTokenizer, TextIteratorStreamer, TextStreamer, is_torch_available from transformers.testing_utils import CaptureStdout, require_torch, torch_device from ..test_modeling_common import ids_tensor if is_torch_available(): import torch from transformers import AutoModelForCausalLM @require_torch class UpperCamelCase_ (unittest.TestCase ): """simple docstring""" def _a ( self : Dict ): """simple docstring""" A_ : Optional[int] = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ) A_ : Tuple = AutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ).to(_lowerCamelCase ) A_ : Dict = -1 A_ : List[str] = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(_lowerCamelCase ) A_ : Any = model.generate(_lowerCamelCase , max_new_tokens=10 , do_sample=_lowerCamelCase ) A_ : List[str] = tokenizer.decode(greedy_ids[0] ) with CaptureStdout() as cs: A_ : List[str] = TextStreamer(_lowerCamelCase ) model.generate(_lowerCamelCase , max_new_tokens=10 , do_sample=_lowerCamelCase , streamer=_lowerCamelCase ) # The greedy text should be printed to stdout, except for the final "\n" in the streamer A_ : Dict = cs.out[:-1] self.assertEqual(_lowerCamelCase , _lowerCamelCase ) def _a ( self : Tuple ): """simple docstring""" A_ : Optional[int] = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ) A_ : List[str] = AutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ).to(_lowerCamelCase ) A_ : Dict = -1 A_ : List[str] = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(_lowerCamelCase ) A_ : Optional[int] = model.generate(_lowerCamelCase , max_new_tokens=10 , do_sample=_lowerCamelCase ) A_ : str = tokenizer.decode(greedy_ids[0] ) A_ : int = TextIteratorStreamer(_lowerCamelCase ) A_ : List[Any] = {'''input_ids''': input_ids, '''max_new_tokens''': 10, '''do_sample''': False, '''streamer''': streamer} A_ : List[Any] = Thread(target=model.generate , kwargs=_lowerCamelCase ) thread.start() A_ : List[Any] = '''''' for new_text in streamer: streamer_text += new_text self.assertEqual(_lowerCamelCase , _lowerCamelCase ) def _a ( self : int ): """simple docstring""" A_ : List[str] = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ) A_ : List[Any] = AutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ).to(_lowerCamelCase ) A_ : List[str] = -1 A_ : Any = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(_lowerCamelCase ) A_ : Tuple = model.generate(_lowerCamelCase , max_new_tokens=10 , do_sample=_lowerCamelCase ) A_ : Tuple = greedy_ids[:, input_ids.shape[1] :] A_ : Tuple = tokenizer.decode(new_greedy_ids[0] ) with CaptureStdout() as cs: A_ : Any = TextStreamer(_lowerCamelCase , skip_prompt=_lowerCamelCase ) model.generate(_lowerCamelCase , max_new_tokens=10 , do_sample=_lowerCamelCase , streamer=_lowerCamelCase ) # The greedy text should be printed to stdout, except for the final "\n" in the streamer A_ : Any = cs.out[:-1] self.assertEqual(_lowerCamelCase , _lowerCamelCase ) def _a ( self : List[Any] ): """simple docstring""" A_ : List[Any] = AutoTokenizer.from_pretrained('''distilgpt2''' ) A_ : Tuple = AutoModelForCausalLM.from_pretrained('''distilgpt2''' ).to(_lowerCamelCase ) A_ : List[Any] = -1 A_ : Union[str, Any] = torch.ones((1, 5) , device=_lowerCamelCase ).long() * model.config.bos_token_id with CaptureStdout() as cs: A_ : List[Any] = TextStreamer(_lowerCamelCase , skip_special_tokens=_lowerCamelCase ) model.generate(_lowerCamelCase , max_new_tokens=1 , do_sample=_lowerCamelCase , streamer=_lowerCamelCase ) # The prompt contains a special token, so the streamer should not print it. As such, the output text, when # re-tokenized, must only contain one token A_ : List[str] = cs.out[:-1] # Remove the final "\n" A_ : List[Any] = tokenizer(_lowerCamelCase , return_tensors='''pt''' ) self.assertEqual(streamer_text_tokenized.input_ids.shape , (1, 1) ) def _a ( self : Union[str, Any] ): """simple docstring""" A_ : str = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ) A_ : str = AutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ).to(_lowerCamelCase ) A_ : Union[str, Any] = -1 A_ : Union[str, Any] = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(_lowerCamelCase ) A_ : List[str] = TextIteratorStreamer(_lowerCamelCase , timeout=0.0_01 ) A_ : str = {'''input_ids''': input_ids, '''max_new_tokens''': 10, '''do_sample''': False, '''streamer''': streamer} A_ : List[str] = Thread(target=model.generate , kwargs=_lowerCamelCase ) thread.start() # The streamer will timeout after 0.001 seconds, so an exception will be raised with self.assertRaises(_lowerCamelCase ): A_ : str = '''''' for new_text in streamer: streamer_text += new_text
4
0
'''simple docstring''' import unittest from transformers import is_torch_available from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow if is_torch_available(): import torch from transformers import XLMRobertaModel @require_sentencepiece @require_tokenizers @require_torch class UpperCamelCase_ (unittest.TestCase ): """simple docstring""" @slow def _a ( self : int ): """simple docstring""" A_ : Optional[Any] = XLMRobertaModel.from_pretrained('''xlm-roberta-base''' ) A_ : Any = torch.tensor([[0, 581, 10269, 83, 99942, 136, 60742, 23, 70, 80583, 18276, 2]] ) # The dog is cute and lives in the garden house A_ : List[str] = torch.Size((1, 12, 768) ) # batch_size, sequence_length, embedding_vector_dim A_ : str = torch.tensor( [[-0.01_01, 0.12_18, -0.08_03, 0.08_01, 0.13_27, 0.07_76, -0.12_15, 0.23_83, 0.33_38, 0.31_06, 0.03_00, 0.02_52]] ) # xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.base') # xlmr.eval() # expected_output_values_last_dim = xlmr.extract_features(input_ids[0])[:, :, -1] with torch.no_grad(): A_ : Union[str, Any] = model(_lowerCamelCase )['''last_hidden_state'''].detach() self.assertEqual(output.shape , _lowerCamelCase ) # compare the actual values for a slice of last dim self.assertTrue(torch.allclose(output[:, :, -1] , _lowerCamelCase , atol=1E-3 ) ) @slow def _a ( self : str ): """simple docstring""" A_ : Optional[Any] = XLMRobertaModel.from_pretrained('''xlm-roberta-large''' ) A_ : Union[str, Any] = torch.tensor([[0, 581, 10269, 83, 99942, 136, 60742, 23, 70, 80583, 18276, 2]] ) # The dog is cute and lives in the garden house A_ : Tuple = torch.Size((1, 12, 1024) ) # batch_size, sequence_length, embedding_vector_dim A_ : Union[str, Any] = torch.tensor( [[-0.06_99, -0.03_18, 0.07_05, -0.12_41, 0.09_99, -0.05_20, 0.10_04, -0.18_38, -0.47_04, 0.14_37, 0.08_21, 0.01_26]] ) # xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.large') # xlmr.eval() # expected_output_values_last_dim = xlmr.extract_features(input_ids[0])[:, :, -1] with torch.no_grad(): A_ : Tuple = model(_lowerCamelCase )['''last_hidden_state'''].detach() self.assertEqual(output.shape , _lowerCamelCase ) # compare the actual values for a slice of last dim self.assertTrue(torch.allclose(output[:, :, -1] , _lowerCamelCase , atol=1E-3 ) )
371
'''simple docstring''' import heapq def snake_case__ ( lowerCamelCase__ : dict ) -> set[int]: A_ : list[list] = [] # for each node and his adjacency list add them and the rank of the node to queue # using heapq module the queue will be filled like a Priority Queue # heapq works with a min priority queue, so I used -1*len(v) to build it for key, value in graph.items(): # O(log(n)) heapq.heappush(lowerCamelCase__ , [-1 * len(lowerCamelCase__ ), (key, value)] ) # chosen_vertices = set of chosen vertices A_ : str = set() # while queue isn't empty and there are still edges # (queue[0][0] is the rank of the node with max rank) while queue and queue[0][0] != 0: # extract vertex with max rank from queue and add it to chosen_vertices A_ : Tuple = heapq.heappop(lowerCamelCase__ )[1][0] chosen_vertices.add(lowerCamelCase__ ) # Remove all arcs adjacent to argmax for elem in queue: # if v haven't adjacent node, skip if elem[0] == 0: continue # if argmax is reachable from elem # remove argmax from elem's adjacent list and update his rank if argmax in elem[1][1]: A_ : List[str] = elem[1][1].index(lowerCamelCase__ ) del elem[1][1][index] elem[0] += 1 # re-order the queue heapq.heapify(lowerCamelCase__ ) return chosen_vertices if __name__ == "__main__": import doctest doctest.testmod() snake_case__ = {0: [1, 3], 1: [0, 3], 2: [0, 3, 4], 3: [0, 1, 2], 4: [2, 3]} print(F'Minimum vertex cover:\n{greedy_min_vertex_cover(graph)}')
4
0
'''simple docstring''' import numpy as np from transformers import BatchFeature from transformers.testing_utils import require_tf, require_torch from .test_feature_extraction_common import FeatureExtractionSavingTestMixin class UpperCamelCase_ (a__ ): """simple docstring""" _lowerCAmelCase = None _lowerCAmelCase = None @property def _a ( self : int ): """simple docstring""" return self.feat_extract_tester.prepare_feat_extract_dict() def _a ( self : Any ): """simple docstring""" A_ : str = self.feature_extraction_class(**self.feat_extract_dict ) self.assertTrue(hasattr(_lowerCamelCase , '''feature_size''' ) ) self.assertTrue(hasattr(_lowerCamelCase , '''sampling_rate''' ) ) self.assertTrue(hasattr(_lowerCamelCase , '''padding_value''' ) ) def _a ( self : Dict ): """simple docstring""" A_ : str = self.feat_extract_tester.prepare_inputs_for_common() A_ : Dict = self.feature_extraction_class(**self.feat_extract_dict ) A_ : Optional[int] = feat_extract.model_input_names[0] A_ : List[str] = BatchFeature({input_name: speech_inputs} ) self.assertTrue(all(len(_lowerCamelCase ) == len(_lowerCamelCase ) for x, y in zip(_lowerCamelCase , processed_features[input_name] ) ) ) A_ : Dict = self.feat_extract_tester.prepare_inputs_for_common(equal_length=_lowerCamelCase ) A_ : Union[str, Any] = BatchFeature({input_name: speech_inputs} , tensor_type='''np''' ) A_ : int = processed_features[input_name] if len(batch_features_input.shape ) < 3: A_ : str = batch_features_input[:, :, None] self.assertTrue( batch_features_input.shape == (self.feat_extract_tester.batch_size, len(speech_inputs[0] ), self.feat_extract_tester.feature_size) ) @require_torch def _a ( self : Any ): """simple docstring""" A_ : List[str] = self.feat_extract_tester.prepare_inputs_for_common(equal_length=_lowerCamelCase ) A_ : Any = self.feature_extraction_class(**self.feat_extract_dict ) A_ : int = feat_extract.model_input_names[0] A_ : List[Any] = BatchFeature({input_name: speech_inputs} , tensor_type='''pt''' ) A_ : List[Any] = processed_features[input_name] if len(batch_features_input.shape ) < 3: A_ : str = batch_features_input[:, :, None] self.assertTrue( batch_features_input.shape == (self.feat_extract_tester.batch_size, len(speech_inputs[0] ), self.feat_extract_tester.feature_size) ) @require_tf def _a ( self : List[Any] ): """simple docstring""" A_ : Any = self.feat_extract_tester.prepare_inputs_for_common(equal_length=_lowerCamelCase ) A_ : int = self.feature_extraction_class(**self.feat_extract_dict ) A_ : List[Any] = feat_extract.model_input_names[0] A_ : Dict = BatchFeature({input_name: speech_inputs} , tensor_type='''tf''' ) A_ : str = processed_features[input_name] if len(batch_features_input.shape ) < 3: A_ : Union[str, Any] = batch_features_input[:, :, None] self.assertTrue( batch_features_input.shape == (self.feat_extract_tester.batch_size, len(speech_inputs[0] ), self.feat_extract_tester.feature_size) ) def _a ( self : Union[str, Any] , _lowerCamelCase : Dict=False ): """simple docstring""" def _inputs_have_equal_length(_lowerCamelCase : str ): A_ : Tuple = len(input[0] ) for input_slice in input[1:]: if len(_lowerCamelCase ) != length: return False return True def _inputs_are_equal(_lowerCamelCase : str , _lowerCamelCase : Tuple ): if len(_lowerCamelCase ) != len(_lowerCamelCase ): return False for input_slice_a, input_slice_a in zip(_lowerCamelCase , _lowerCamelCase ): if not np.allclose(np.asarray(_lowerCamelCase ) , np.asarray(_lowerCamelCase ) , atol=1E-3 ): return False return True A_ : Tuple = self.feature_extraction_class(**self.feat_extract_dict ) A_ : Any = self.feat_extract_tester.prepare_inputs_for_common(numpify=_lowerCamelCase ) A_ : Optional[Any] = feat_extract.model_input_names[0] A_ : Dict = BatchFeature({input_name: speech_inputs} ) A_ : Optional[int] = self.feat_extract_tester.seq_length_diff A_ : Optional[int] = self.feat_extract_tester.max_seq_length + pad_diff A_ : List[str] = self.feat_extract_tester.min_seq_length A_ : Tuple = self.feat_extract_tester.batch_size A_ : Any = self.feat_extract_tester.feature_size # test padding for List[int] + numpy A_ : List[str] = feat_extract.pad(_lowerCamelCase , padding=_lowerCamelCase ) A_ : str = input_a[input_name] A_ : Union[str, Any] = feat_extract.pad(_lowerCamelCase , padding='''longest''' ) A_ : Optional[Any] = input_a[input_name] A_ : Optional[int] = feat_extract.pad(_lowerCamelCase , padding='''max_length''' , max_length=len(speech_inputs[-1] ) ) A_ : Union[str, Any] = input_a[input_name] A_ : Optional[int] = feat_extract.pad(_lowerCamelCase , padding='''longest''' , return_tensors='''np''' ) A_ : Union[str, Any] = input_a[input_name] # max_length parameter has to be provided when setting `padding="max_length"` with self.assertRaises(_lowerCamelCase ): feat_extract.pad(_lowerCamelCase , padding='''max_length''' )[input_name] A_ : List[str] = feat_extract.pad( _lowerCamelCase , padding='''max_length''' , max_length=_lowerCamelCase , return_tensors='''np''' ) A_ : List[Any] = input_a[input_name] self.assertFalse(_inputs_have_equal_length(_lowerCamelCase ) ) self.assertTrue(_inputs_have_equal_length(_lowerCamelCase ) ) self.assertTrue(_inputs_have_equal_length(_lowerCamelCase ) ) self.assertTrue(_inputs_are_equal(_lowerCamelCase , _lowerCamelCase ) ) self.assertTrue(len(input_a[0] ) == pad_min_length ) self.assertTrue(len(input_a[1] ) == pad_min_length + pad_diff ) self.assertTrue(input_a.shape[:2] == (batch_size, len(input_a[0] )) ) self.assertTrue(input_a.shape[:2] == (batch_size, pad_max_length) ) if feature_size > 1: self.assertTrue(input_a.shape[2] == input_a.shape[2] == feature_size ) # test padding for `pad_to_multiple_of` for List[int] + numpy A_ : List[str] = feat_extract.pad(_lowerCamelCase , pad_to_multiple_of=10 ) A_ : int = input_a[input_name] A_ : Optional[Any] = feat_extract.pad(_lowerCamelCase , padding='''longest''' , pad_to_multiple_of=10 ) A_ : Optional[Any] = input_a[input_name] A_ : Any = feat_extract.pad( _lowerCamelCase , padding='''max_length''' , pad_to_multiple_of=10 , max_length=_lowerCamelCase ) A_ : Tuple = input_a[input_name] A_ : Any = feat_extract.pad( _lowerCamelCase , padding='''max_length''' , pad_to_multiple_of=10 , max_length=_lowerCamelCase , return_tensors='''np''' , ) A_ : List[str] = input_a[input_name] self.assertTrue(all(len(_lowerCamelCase ) % 10 == 0 for x in input_a ) ) self.assertTrue(_inputs_are_equal(_lowerCamelCase , _lowerCamelCase ) ) A_ : Union[str, Any] = pad_max_length if pad_max_length % 10 == 0 else (pad_max_length // 10 + 1) * 10 self.assertTrue(all(len(_lowerCamelCase ) == expected_mult_pad_length for x in input_a ) ) self.assertEqual(input_a.shape[:2] , (batch_size, expected_mult_pad_length) ) if feature_size > 1: self.assertTrue(input_a.shape[2] == feature_size ) # Check padding value is correct A_ : Tuple = (np.ones(self.feat_extract_tester.feature_size ) * feat_extract.padding_value).sum() self.assertTrue( abs(np.asarray(input_a[0] )[pad_min_length:].sum() - padding_vector_sum * (pad_max_length - pad_min_length) ) < 1E-3 ) self.assertTrue( abs( np.asarray(input_a[1] )[pad_min_length + pad_diff :].sum() - padding_vector_sum * (pad_max_length - pad_min_length - pad_diff) ) < 1E-3 ) self.assertTrue( abs( np.asarray(input_a[2] )[pad_min_length + 2 * pad_diff :].sum() - padding_vector_sum * (pad_max_length - pad_min_length - 2 * pad_diff) ) < 1E-3 ) self.assertTrue( abs(input_a[0, pad_min_length:].sum() - padding_vector_sum * (pad_max_length - pad_min_length) ) < 1E-3 ) self.assertTrue( abs(input_a[0, pad_min_length:].sum() - padding_vector_sum * (expected_mult_pad_length - pad_min_length) ) < 1E-3 ) def _a ( self : Any , _lowerCamelCase : str=False ): """simple docstring""" def _inputs_have_equal_length(_lowerCamelCase : str ): A_ : Dict = len(input[0] ) for input_slice in input[1:]: if len(_lowerCamelCase ) != length: return False return True def _inputs_are_equal(_lowerCamelCase : Tuple , _lowerCamelCase : int ): if len(_lowerCamelCase ) != len(_lowerCamelCase ): return False for input_slice_a, input_slice_a in zip(_lowerCamelCase , _lowerCamelCase ): if not np.allclose(np.asarray(_lowerCamelCase ) , np.asarray(_lowerCamelCase ) , atol=1E-3 ): return False return True A_ : int = self.feature_extraction_class(**self.feat_extract_dict ) A_ : int = self.feat_extract_tester.prepare_inputs_for_common(numpify=_lowerCamelCase ) A_ : List[str] = feat_extract.model_input_names[0] A_ : Union[str, Any] = BatchFeature({input_name: speech_inputs} ) # truncate to smallest A_ : List[str] = feat_extract.pad( _lowerCamelCase , padding='''max_length''' , max_length=len(speech_inputs[0] ) , truncation=_lowerCamelCase ) A_ : Any = input_a[input_name] A_ : Optional[Any] = feat_extract.pad(_lowerCamelCase , padding='''max_length''' , max_length=len(speech_inputs[0] ) ) A_ : List[str] = input_a[input_name] self.assertTrue(_inputs_have_equal_length(_lowerCamelCase ) ) self.assertFalse(_inputs_have_equal_length(_lowerCamelCase ) ) # truncate to smallest with np A_ : Optional[Any] = feat_extract.pad( _lowerCamelCase , padding='''max_length''' , max_length=len(speech_inputs[0] ) , return_tensors='''np''' , truncation=_lowerCamelCase , ) A_ : str = input_a[input_name] A_ : str = feat_extract.pad( _lowerCamelCase , padding='''max_length''' , max_length=len(speech_inputs[0] ) , return_tensors='''np''' ) A_ : Optional[Any] = input_a[input_name] self.assertTrue(_inputs_have_equal_length(_lowerCamelCase ) ) self.assertTrue(input_a.shape[1] == len(speech_inputs[0] ) ) # since truncation forces padding to be smaller than longest input # function can't return `np.ndarray`, but has to return list self.assertFalse(_inputs_have_equal_length(_lowerCamelCase ) ) # truncate to middle A_ : Optional[Any] = feat_extract.pad( _lowerCamelCase , padding='''max_length''' , max_length=len(speech_inputs[1] ) , truncation=_lowerCamelCase , return_tensors='''np''' , ) A_ : str = input_a[input_name] A_ : Optional[int] = feat_extract.pad( _lowerCamelCase , padding='''max_length''' , max_length=len(speech_inputs[1] ) , truncation=_lowerCamelCase ) A_ : Union[str, Any] = input_a[input_name] A_ : Optional[int] = feat_extract.pad( _lowerCamelCase , padding='''max_length''' , max_length=len(speech_inputs[1] ) , return_tensors='''np''' ) A_ : Union[str, Any] = input_a[input_name] self.assertTrue(input_a.shape[1] == len(speech_inputs[1] ) ) self.assertTrue(_inputs_have_equal_length(_lowerCamelCase ) ) self.assertTrue(_inputs_have_equal_length(_lowerCamelCase ) ) self.assertTrue(_inputs_are_equal(_lowerCamelCase , _lowerCamelCase ) ) # since truncation forces padding to be smaller than longest input # function can't return `np.ndarray`, but has to return list self.assertFalse(_inputs_have_equal_length(_lowerCamelCase ) ) self.assertTrue(len(input_a[-1] ) == len(speech_inputs[-1] ) ) # padding has to be max_length when setting `truncation=True` with self.assertRaises(_lowerCamelCase ): feat_extract.pad(_lowerCamelCase , truncation=_lowerCamelCase )[input_name] # padding has to be max_length when setting `truncation=True` with self.assertRaises(_lowerCamelCase ): feat_extract.pad(_lowerCamelCase , padding='''longest''' , truncation=_lowerCamelCase )[input_name] # padding has to be max_length when setting `truncation=True` with self.assertRaises(_lowerCamelCase ): feat_extract.pad(_lowerCamelCase , padding='''longest''' , truncation=_lowerCamelCase )[input_name] # max_length parameter has to be provided when setting `truncation=True` and padding="max_length" with self.assertRaises(_lowerCamelCase ): feat_extract.pad(_lowerCamelCase , padding='''max_length''' , truncation=_lowerCamelCase )[input_name] # test truncation for `pad_to_multiple_of` for List[int] + numpy A_ : str = 12 A_ : str = feat_extract.pad( _lowerCamelCase , padding='''max_length''' , max_length=len(speech_inputs[0] ) , pad_to_multiple_of=_lowerCamelCase , truncation=_lowerCamelCase , ) A_ : Tuple = input_a[input_name] A_ : str = feat_extract.pad( _lowerCamelCase , padding='''max_length''' , max_length=len(speech_inputs[0] ) , pad_to_multiple_of=_lowerCamelCase , ) A_ : Any = input_a[input_name] # retrieve expected_length as multiple of pad_to_multiple_of A_ : Tuple = len(speech_inputs[0] ) if expected_length % pad_to_multiple_of != 0: A_ : Optional[int] = ((len(speech_inputs[0] ) // pad_to_multiple_of) + 1) * pad_to_multiple_of self.assertTrue(len(input_a[0] ) == expected_length ) self.assertTrue(_inputs_have_equal_length(_lowerCamelCase ) ) self.assertFalse(_inputs_have_equal_length(_lowerCamelCase ) ) def _a ( self : Optional[int] ): """simple docstring""" self._check_padding(numpify=_lowerCamelCase ) def _a ( self : str ): """simple docstring""" self._check_padding(numpify=_lowerCamelCase ) def _a ( self : str ): """simple docstring""" self._check_truncation(numpify=_lowerCamelCase ) def _a ( self : Tuple ): """simple docstring""" self._check_truncation(numpify=_lowerCamelCase ) @require_torch def _a ( self : List[Any] ): """simple docstring""" A_ : Tuple = self.feature_extraction_class(**self.feat_extract_dict ) A_ : Optional[int] = self.feat_extract_tester.prepare_inputs_for_common() A_ : Union[str, Any] = feat_extract.model_input_names[0] A_ : Optional[int] = BatchFeature({input_name: speech_inputs} ) A_ : str = feat_extract.pad(_lowerCamelCase , padding='''longest''' , return_tensors='''np''' )[input_name] A_ : Any = feat_extract.pad(_lowerCamelCase , padding='''longest''' , return_tensors='''pt''' )[input_name] self.assertTrue(abs(input_np.astype(np.floataa ).sum() - input_pt.numpy().astype(np.floataa ).sum() ) < 1E-2 ) @require_tf def _a ( self : Optional[int] ): """simple docstring""" A_ : int = self.feature_extraction_class(**self.feat_extract_dict ) A_ : Optional[int] = self.feat_extract_tester.prepare_inputs_for_common() A_ : Union[str, Any] = feat_extract.model_input_names[0] A_ : Optional[Any] = BatchFeature({input_name: speech_inputs} ) A_ : Tuple = feat_extract.pad(_lowerCamelCase , padding='''longest''' , return_tensors='''np''' )[input_name] A_ : Optional[int] = feat_extract.pad(_lowerCamelCase , padding='''longest''' , return_tensors='''tf''' )[input_name] self.assertTrue(abs(input_np.astype(np.floataa ).sum() - input_tf.numpy().astype(np.floataa ).sum() ) < 1E-2 ) def _a ( self : int ): """simple docstring""" A_ : Optional[Any] = self.feat_extract_dict A_ : Tuple = True A_ : Optional[Any] = self.feature_extraction_class(**_lowerCamelCase ) A_ : Optional[Any] = self.feat_extract_tester.prepare_inputs_for_common() A_ : List[str] = [len(_lowerCamelCase ) for x in speech_inputs] A_ : int = feat_extract.model_input_names[0] A_ : Tuple = BatchFeature({input_name: speech_inputs} ) A_ : Any = feat_extract.pad(_lowerCamelCase , padding='''longest''' , return_tensors='''np''' ) self.assertIn('''attention_mask''' , _lowerCamelCase ) self.assertListEqual(list(processed.attention_mask.shape ) , list(processed[input_name].shape[:2] ) ) self.assertListEqual(processed.attention_mask.sum(-1 ).tolist() , _lowerCamelCase ) def _a ( self : Dict ): """simple docstring""" A_ : Any = self.feat_extract_dict A_ : Dict = True A_ : List[str] = self.feature_extraction_class(**_lowerCamelCase ) A_ : str = self.feat_extract_tester.prepare_inputs_for_common() A_ : Dict = [len(_lowerCamelCase ) for x in speech_inputs] A_ : Union[str, Any] = feat_extract.model_input_names[0] A_ : str = BatchFeature({input_name: speech_inputs} ) A_ : Optional[Any] = min(_lowerCamelCase ) A_ : int = feat_extract.pad( _lowerCamelCase , padding='''max_length''' , max_length=_lowerCamelCase , truncation=_lowerCamelCase , return_tensors='''np''' ) self.assertIn('''attention_mask''' , _lowerCamelCase ) self.assertListEqual( list(processed_pad.attention_mask.shape ) , [processed_pad[input_name].shape[0], max_length] ) self.assertListEqual( processed_pad.attention_mask[:, :max_length].sum(-1 ).tolist() , [max_length for x in speech_inputs] )
350
'''simple docstring''' import argparse from collections import OrderedDict from pathlib import Path import torch from huggingface_hub import hf_hub_download from PIL import Image from torchvision.transforms import functional as F from transformers import DetrImageProcessor, TableTransformerConfig, TableTransformerForObjectDetection from transformers.utils import logging logging.set_verbosity_info() snake_case__ = logging.get_logger(__name__) # here we list all keys to be renamed (original name on the left, our name on the right) snake_case__ = [] for i in range(6): # encoder layers: output projection, 2 feedforward neural networks and 2 layernorms rename_keys.append( (F'transformer.encoder.layers.{i}.self_attn.out_proj.weight', F'encoder.layers.{i}.self_attn.out_proj.weight') ) rename_keys.append( (F'transformer.encoder.layers.{i}.self_attn.out_proj.bias', F'encoder.layers.{i}.self_attn.out_proj.bias') ) rename_keys.append((F'transformer.encoder.layers.{i}.linear1.weight', F'encoder.layers.{i}.fc1.weight')) rename_keys.append((F'transformer.encoder.layers.{i}.linear1.bias', F'encoder.layers.{i}.fc1.bias')) rename_keys.append((F'transformer.encoder.layers.{i}.linear2.weight', F'encoder.layers.{i}.fc2.weight')) rename_keys.append((F'transformer.encoder.layers.{i}.linear2.bias', F'encoder.layers.{i}.fc2.bias')) rename_keys.append( (F'transformer.encoder.layers.{i}.norm1.weight', F'encoder.layers.{i}.self_attn_layer_norm.weight') ) rename_keys.append((F'transformer.encoder.layers.{i}.norm1.bias', F'encoder.layers.{i}.self_attn_layer_norm.bias')) rename_keys.append((F'transformer.encoder.layers.{i}.norm2.weight', F'encoder.layers.{i}.final_layer_norm.weight')) rename_keys.append((F'transformer.encoder.layers.{i}.norm2.bias', F'encoder.layers.{i}.final_layer_norm.bias')) # decoder layers: 2 times output projection, 2 feedforward neural networks and 3 layernorms rename_keys.append( (F'transformer.decoder.layers.{i}.self_attn.out_proj.weight', F'decoder.layers.{i}.self_attn.out_proj.weight') ) rename_keys.append( (F'transformer.decoder.layers.{i}.self_attn.out_proj.bias', F'decoder.layers.{i}.self_attn.out_proj.bias') ) rename_keys.append( ( F'transformer.decoder.layers.{i}.multihead_attn.out_proj.weight', F'decoder.layers.{i}.encoder_attn.out_proj.weight', ) ) rename_keys.append( ( F'transformer.decoder.layers.{i}.multihead_attn.out_proj.bias', F'decoder.layers.{i}.encoder_attn.out_proj.bias', ) ) rename_keys.append((F'transformer.decoder.layers.{i}.linear1.weight', F'decoder.layers.{i}.fc1.weight')) rename_keys.append((F'transformer.decoder.layers.{i}.linear1.bias', F'decoder.layers.{i}.fc1.bias')) rename_keys.append((F'transformer.decoder.layers.{i}.linear2.weight', F'decoder.layers.{i}.fc2.weight')) rename_keys.append((F'transformer.decoder.layers.{i}.linear2.bias', F'decoder.layers.{i}.fc2.bias')) rename_keys.append( (F'transformer.decoder.layers.{i}.norm1.weight', F'decoder.layers.{i}.self_attn_layer_norm.weight') ) rename_keys.append((F'transformer.decoder.layers.{i}.norm1.bias', F'decoder.layers.{i}.self_attn_layer_norm.bias')) rename_keys.append( (F'transformer.decoder.layers.{i}.norm2.weight', F'decoder.layers.{i}.encoder_attn_layer_norm.weight') ) rename_keys.append( (F'transformer.decoder.layers.{i}.norm2.bias', F'decoder.layers.{i}.encoder_attn_layer_norm.bias') ) rename_keys.append((F'transformer.decoder.layers.{i}.norm3.weight', F'decoder.layers.{i}.final_layer_norm.weight')) rename_keys.append((F'transformer.decoder.layers.{i}.norm3.bias', F'decoder.layers.{i}.final_layer_norm.bias')) # convolutional projection + query embeddings + layernorm of encoder + layernorm of decoder + class and bounding box heads rename_keys.extend( [ ("""input_proj.weight""", """input_projection.weight"""), ("""input_proj.bias""", """input_projection.bias"""), ("""query_embed.weight""", """query_position_embeddings.weight"""), ("""transformer.encoder.norm.weight""", """encoder.layernorm.weight"""), ("""transformer.encoder.norm.bias""", """encoder.layernorm.bias"""), ("""transformer.decoder.norm.weight""", """decoder.layernorm.weight"""), ("""transformer.decoder.norm.bias""", """decoder.layernorm.bias"""), ("""class_embed.weight""", """class_labels_classifier.weight"""), ("""class_embed.bias""", """class_labels_classifier.bias"""), ("""bbox_embed.layers.0.weight""", """bbox_predictor.layers.0.weight"""), ("""bbox_embed.layers.0.bias""", """bbox_predictor.layers.0.bias"""), ("""bbox_embed.layers.1.weight""", """bbox_predictor.layers.1.weight"""), ("""bbox_embed.layers.1.bias""", """bbox_predictor.layers.1.bias"""), ("""bbox_embed.layers.2.weight""", """bbox_predictor.layers.2.weight"""), ("""bbox_embed.layers.2.bias""", """bbox_predictor.layers.2.bias"""), ] ) def snake_case__ ( lowerCamelCase__ : List[Any] , lowerCamelCase__ : List[Any] , lowerCamelCase__ : List[Any] ) -> Optional[Any]: A_ : Tuple = state_dict.pop(lowerCamelCase__ ) A_ : Optional[Any] = val def snake_case__ ( lowerCamelCase__ : Dict ) -> Any: A_ : int = OrderedDict() for key, value in state_dict.items(): if "backbone.0.body" in key: A_ : int = key.replace('''backbone.0.body''' , '''backbone.conv_encoder.model''' ) A_ : List[str] = value else: A_ : Optional[int] = value return new_state_dict def snake_case__ ( lowerCamelCase__ : Union[str, Any] ) -> Optional[Any]: A_ : Any = '''''' # first: transformer encoder for i in range(6 ): # read in weights + bias of input projection layer (in PyTorch's MultiHeadAttention, this is a single matrix + bias) A_ : Tuple = state_dict.pop(f'{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_weight' ) A_ : Dict = state_dict.pop(f'{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_bias' ) # next, add query, keys and values (in that order) to the state dict A_ : str = in_proj_weight[:2_5_6, :] A_ : Optional[Any] = in_proj_bias[:2_5_6] A_ : Dict = in_proj_weight[2_5_6:5_1_2, :] A_ : Tuple = in_proj_bias[2_5_6:5_1_2] A_ : Tuple = in_proj_weight[-2_5_6:, :] A_ : Optional[int] = in_proj_bias[-2_5_6:] # next: transformer decoder (which is a bit more complex because it also includes cross-attention) for i in range(6 ): # read in weights + bias of input projection layer of self-attention A_ : Union[str, Any] = state_dict.pop(f'{prefix}transformer.decoder.layers.{i}.self_attn.in_proj_weight' ) A_ : Dict = state_dict.pop(f'{prefix}transformer.decoder.layers.{i}.self_attn.in_proj_bias' ) # next, add query, keys and values (in that order) to the state dict A_ : List[str] = in_proj_weight[:2_5_6, :] A_ : int = in_proj_bias[:2_5_6] A_ : Any = in_proj_weight[2_5_6:5_1_2, :] A_ : List[str] = in_proj_bias[2_5_6:5_1_2] A_ : Union[str, Any] = in_proj_weight[-2_5_6:, :] A_ : Optional[Any] = in_proj_bias[-2_5_6:] # read in weights + bias of input projection layer of cross-attention A_ : Tuple = state_dict.pop( f'{prefix}transformer.decoder.layers.{i}.multihead_attn.in_proj_weight' ) A_ : Optional[Any] = state_dict.pop(f'{prefix}transformer.decoder.layers.{i}.multihead_attn.in_proj_bias' ) # next, add query, keys and values (in that order) of cross-attention to the state dict A_ : Dict = in_proj_weight_cross_attn[:2_5_6, :] A_ : Tuple = in_proj_bias_cross_attn[:2_5_6] A_ : int = in_proj_weight_cross_attn[2_5_6:5_1_2, :] A_ : List[str] = in_proj_bias_cross_attn[2_5_6:5_1_2] A_ : Any = in_proj_weight_cross_attn[-2_5_6:, :] A_ : Any = in_proj_bias_cross_attn[-2_5_6:] def snake_case__ ( lowerCamelCase__ : List[str] , lowerCamelCase__ : Tuple ) -> Dict: A_ ,A_ : int = image.size A_ : Tuple = max(lowerCamelCase__ , lowerCamelCase__ ) A_ : Optional[Any] = 8_0_0 if '''detection''' in checkpoint_url else 1_0_0_0 A_ : Union[str, Any] = target_max_size / current_max_size A_ : Any = image.resize((int(round(scale * width ) ), int(round(scale * height ) )) ) return resized_image def snake_case__ ( lowerCamelCase__ : Tuple ) -> str: A_ : Any = F.to_tensor(lowerCamelCase__ ) A_ : Optional[Any] = F.normalize(lowerCamelCase__ , mean=[0.485, 0.456, 0.406] , std=[0.229, 0.224, 0.225] ) return image @torch.no_grad() def snake_case__ ( lowerCamelCase__ : List[Any] , lowerCamelCase__ : int , lowerCamelCase__ : int ) -> str: logger.info('''Converting model...''' ) # load original state dict A_ : Tuple = torch.hub.load_state_dict_from_url(lowerCamelCase__ , map_location='''cpu''' ) # rename keys for src, dest in rename_keys: rename_key(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) A_ : str = rename_backbone_keys(lowerCamelCase__ ) # query, key and value matrices need special treatment read_in_q_k_v(lowerCamelCase__ ) # important: we need to prepend a prefix to each of the base model keys as the head models use different attributes for them A_ : List[Any] = '''model.''' for key in state_dict.copy().keys(): if not key.startswith('''class_labels_classifier''' ) and not key.startswith('''bbox_predictor''' ): A_ : List[Any] = state_dict.pop(lowerCamelCase__ ) A_ : str = val # create HuggingFace model and load state dict A_ : Union[str, Any] = TableTransformerConfig( backbone='''resnet18''' , mask_loss_coefficient=1 , dice_loss_coefficient=1 , ce_loss_coefficient=1 , bbox_loss_coefficient=5 , giou_loss_coefficient=2 , eos_coefficient=0.4 , class_cost=1 , bbox_cost=5 , giou_cost=2 , ) if "detection" in checkpoint_url: A_ : Dict = 1_5 A_ : Dict = 2 A_ : int = {0: '''table''', 1: '''table rotated'''} A_ : List[str] = idalabel A_ : Optional[int] = {v: k for k, v in idalabel.items()} else: A_ : Union[str, Any] = 1_2_5 A_ : Optional[Any] = 6 A_ : Optional[Any] = { 0: '''table''', 1: '''table column''', 2: '''table row''', 3: '''table column header''', 4: '''table projected row header''', 5: '''table spanning cell''', } A_ : int = idalabel A_ : Tuple = {v: k for k, v in idalabel.items()} A_ : Optional[Any] = DetrImageProcessor( format='''coco_detection''' , max_size=8_0_0 if '''detection''' in checkpoint_url else 1_0_0_0 ) A_ : int = TableTransformerForObjectDetection(lowerCamelCase__ ) model.load_state_dict(lowerCamelCase__ ) model.eval() # verify our conversion A_ : Optional[int] = '''example_pdf.png''' if '''detection''' in checkpoint_url else '''example_table.png''' A_ : Union[str, Any] = hf_hub_download(repo_id='''nielsr/example-pdf''' , repo_type='''dataset''' , filename=lowerCamelCase__ ) A_ : Tuple = Image.open(lowerCamelCase__ ).convert('''RGB''' ) A_ : int = normalize(resize(lowerCamelCase__ , lowerCamelCase__ ) ).unsqueeze(0 ) A_ : str = model(lowerCamelCase__ ) if "detection" in checkpoint_url: A_ : str = (1, 1_5, 3) A_ : int = torch.tensor( [[-6.7897, -16.9985, 6.7937], [-8.0186, -22.2192, 6.9677], [-7.3117, -21.0708, 7.4055]] ) A_ : Tuple = torch.tensor([[0.4867, 0.1767, 0.6732], [0.6718, 0.4479, 0.3830], [0.4716, 0.1760, 0.6364]] ) else: A_ : Optional[int] = (1, 1_2_5, 7) A_ : Dict = torch.tensor( [[-18.1430, -8.3214, 4.8274], [-18.4685, -7.1361, -4.2667], [-26.3693, -9.3429, -4.9962]] ) A_ : Any = torch.tensor([[0.4983, 0.5595, 0.9440], [0.4916, 0.6315, 0.5954], [0.6108, 0.8637, 0.1135]] ) assert outputs.logits.shape == expected_shape assert torch.allclose(outputs.logits[0, :3, :3] , lowerCamelCase__ , atol=1e-4 ) assert torch.allclose(outputs.pred_boxes[0, :3, :3] , lowerCamelCase__ , atol=1e-4 ) print('''Looks ok!''' ) if pytorch_dump_folder_path is not None: # Save model and image processor logger.info(f'Saving PyTorch model and image processor to {pytorch_dump_folder_path}...' ) Path(lowerCamelCase__ ).mkdir(exist_ok=lowerCamelCase__ ) model.save_pretrained(lowerCamelCase__ ) image_processor.save_pretrained(lowerCamelCase__ ) if push_to_hub: # Push model to HF hub logger.info('''Pushing model to the hub...''' ) A_ : List[Any] = ( '''microsoft/table-transformer-detection''' if '''detection''' in checkpoint_url else '''microsoft/table-transformer-structure-recognition''' ) model.push_to_hub(lowerCamelCase__ ) image_processor.push_to_hub(lowerCamelCase__ ) if __name__ == "__main__": snake_case__ = argparse.ArgumentParser() parser.add_argument( """--checkpoint_url""", default="""https://pubtables1m.blob.core.windows.net/model/pubtables1m_detection_detr_r18.pth""", type=str, choices=[ """https://pubtables1m.blob.core.windows.net/model/pubtables1m_detection_detr_r18.pth""", """https://pubtables1m.blob.core.windows.net/model/pubtables1m_structure_detr_r18.pth""", ], help="""URL of the Table Transformer checkpoint you'd like to convert.""", ) parser.add_argument( """--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the folder to output PyTorch model.""" ) parser.add_argument( """--push_to_hub""", action="""store_true""", help="""Whether or not to push the converted model to the 🤗 hub.""" ) snake_case__ = parser.parse_args() convert_table_transformer_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub)
4
0
'''simple docstring''' import logging import sys from dataclasses import dataclass, field from typing import Any, Dict, List, Optional, Union import librosa import torch from datasets import DatasetDict, load_dataset from packaging import version from torch import nn from transformers import ( HfArgumentParser, Trainer, TrainingArguments, WavaVecaConfig, WavaVecaFeatureExtractor, WavaVecaForPreTraining, is_apex_available, trainer_utils, ) from transformers.models.wavaveca.modeling_wavaveca import _compute_mask_indices if is_apex_available(): from apex import amp if version.parse(version.parse(torch.__version__).base_version) >= version.parse("""1.6"""): snake_case__ = True from torch.cuda.amp import autocast snake_case__ = logging.getLogger(__name__) @dataclass class UpperCamelCase_ : """simple docstring""" _lowerCAmelCase = field( metadata={'help': 'Path to pretrained model or model identifier from huggingface.co/models'} ) _lowerCAmelCase = field( default=a__, metadata={'help': 'Where do you want to store the pretrained models downloaded from huggingface.co'}, ) _lowerCAmelCase = field( default=a__, metadata={'help': 'Whether to freeze the feature extractor layers of the model.'} ) _lowerCAmelCase = field( default=a__, metadata={'help': 'Whether to log verbose messages or not.'}, ) _lowerCAmelCase = field( default=2.0, metadata={'help': 'Maximum temperature for gumbel softmax.'} ) _lowerCAmelCase = field( default=0.5, metadata={'help': 'Minimum temperature for gumbel softmax.'} ) _lowerCAmelCase = field( default=0.99_99_95, metadata={'help': 'Decay of gumbel temperature during training.'} ) def snake_case__ ( lowerCamelCase__ : ModelArguments , lowerCamelCase__ : TrainingArguments ) -> str: logging.basicConfig( format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' , datefmt='''%m/%d/%Y %H:%M:%S''' , handlers=[logging.StreamHandler(sys.stdout )] , ) A_ : Any = logging.WARNING if model_args.verbose_logging: A_ : Optional[int] = logging.DEBUG elif trainer_utils.is_main_process(training_args.local_rank ): A_ : List[Any] = logging.INFO logger.setLevel(lowerCamelCase__ ) @dataclass class UpperCamelCase_ : """simple docstring""" _lowerCAmelCase = field( default=a__, metadata={'help': 'The name of the dataset to use (via the datasets library).'} ) _lowerCAmelCase = field( default=a__, metadata={'help': 'The configuration name of the dataset to use (via the datasets library).'} ) _lowerCAmelCase = field( default='train', metadata={ 'help': 'The name of the training data set split to use (via the datasets library). Defaults to \'train\'' }, ) _lowerCAmelCase = field( default='validation', metadata={ 'help': ( 'The name of the validation data set split to use (via the datasets library). Defaults to \'validation\'' ) }, ) _lowerCAmelCase = field( default='file', metadata={'help': 'Column in the dataset that contains speech file path. Defaults to \'file\''}, ) _lowerCAmelCase = field( default=a__, metadata={'help': 'Overwrite the cached preprocessed datasets or not.'} ) _lowerCAmelCase = field( default=1, metadata={ 'help': 'The percentage of the train set used as validation set in case there\'s no validation split' }, ) _lowerCAmelCase = field( default=a__, metadata={'help': 'The number of processes to use for the preprocessing.'}, ) _lowerCAmelCase = field( default=20.0, metadata={'help': 'Filter audio files that are longer than `max_duration_in_seconds` seconds'} ) @dataclass class UpperCamelCase_ : """simple docstring""" _lowerCAmelCase = 4_2 _lowerCAmelCase = 4_2 _lowerCAmelCase = 'longest' _lowerCAmelCase = None _lowerCAmelCase = None def __call__( self : Tuple , _lowerCamelCase : List[Dict[str, Union[List[int], torch.Tensor]]] ): """simple docstring""" A_ : str = self.feature_extractor.pad( _lowerCamelCase , max_length=self.max_length , padding=self.padding , pad_to_multiple_of=self.pad_to_multiple_of , return_tensors='''pt''' , ) A_ : Dict = self.model._get_feat_extract_output_lengths(batch['''input_values'''].shape[-1] ) A_ : Optional[Any] = batch['''input_values'''].shape[0] # make sure that no loss is computed on padded inputs if batch["attention_mask"] is not None: # compute real output lengths according to convolution formula A_ : int = self.model._get_feat_extract_output_lengths(batch['''attention_mask'''].sum(-1 ) ).to( torch.long ) A_ : int = torch.zeros( (batch_size, mask_indices_seq_length) , dtype=torch.long , device=batch['''input_values'''].device ) # these two operations makes sure that all values # before the output lengths indices are attended to A_ : Dict = 1 A_ : int = attention_mask.flip([-1] ).cumsum(-1 ).flip([-1] ).bool() # sample randomly masked indices A_ : List[str] = _compute_mask_indices( (batch_size, mask_indices_seq_length) , self.model.config.mask_time_prob , self.model.config.mask_time_length , attention_mask=_lowerCamelCase , min_masks=2 , ) return batch class UpperCamelCase_ (a__ ): """simple docstring""" def __init__( self : Tuple , *_lowerCamelCase : Any , _lowerCamelCase : List[Any]=1 , _lowerCamelCase : Tuple=0 , _lowerCamelCase : Optional[int]=1.0 , **_lowerCamelCase : Any ): """simple docstring""" super().__init__(*_lowerCamelCase , **_lowerCamelCase ) A_ : Optional[Any] = 0 A_ : Tuple = max_gumbel_temp A_ : Union[str, Any] = min_gumbel_temp A_ : Any = gumbel_temp_decay def _a ( self : Tuple , _lowerCamelCase : nn.Module , _lowerCamelCase : Dict[str, Union[torch.Tensor, Any]] ): """simple docstring""" model.train() A_ : int = self._prepare_inputs(_lowerCamelCase ) if self.use_amp: with autocast(): A_ : Optional[int] = self.compute_loss(_lowerCamelCase , _lowerCamelCase ) else: A_ : Tuple = self.compute_loss(_lowerCamelCase , _lowerCamelCase ) if self.args.n_gpu > 1 or self.deepspeed: if model.module.config.ctc_loss_reduction == "mean": A_ : Optional[int] = loss.mean() elif model.module.config.ctc_loss_reduction == "sum": A_ : Union[str, Any] = loss.sum() / (inputs['''mask_time_indices''']).sum() else: raise ValueError(f'{model.config.ctc_loss_reduction} is not valid. Choose one of [\'mean\', \'sum\']' ) if self.args.gradient_accumulation_steps > 1: A_ : Optional[int] = loss / self.args.gradient_accumulation_steps if self.use_amp: self.scaler.scale(_lowerCamelCase ).backward() elif self.use_apex: with amp.scale_loss(_lowerCamelCase , self.optimizer ) as scaled_loss: scaled_loss.backward() elif self.deepspeed: self.deepspeed.backward(_lowerCamelCase ) else: loss.backward() self.num_update_step += 1 # make sure gumbel softmax temperature is decayed if self.args.n_gpu > 1 or self.deepspeed: model.module.set_gumbel_temperature( max(self.max_gumbel_temp * self.gumbel_temp_decay**self.num_update_step , self.min_gumbel_temp ) ) else: model.set_gumbel_temperature( max(self.max_gumbel_temp * self.gumbel_temp_decay**self.num_update_step , self.min_gumbel_temp ) ) return loss.detach() def snake_case__ ( ) -> Dict: # See all possible arguments in src/transformers/training_args.py # or by passing the --help flag to this script. # We now keep distinct sets of args, for a cleaner separation of concerns. A_ : List[Any] = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) ) A_ : Optional[Any] = parser.parse_args_into_dataclasses() configure_logger(lowerCamelCase__ , lowerCamelCase__ ) # Downloading and loading a dataset from the hub. A_ : int = load_dataset(data_args.dataset_name , data_args.dataset_config_name , cache_dir=model_args.cache_dir ) if "validation" not in datasets.keys(): # make sure only "validation" and "train" keys remain" A_ : Dict = DatasetDict() A_ : Tuple = load_dataset( data_args.dataset_name , data_args.dataset_config_name , split=f'{data_args.train_split_name}[:{data_args.validation_split_percentage}%]' , cache_dir=model_args.cache_dir , ) A_ : str = load_dataset( data_args.dataset_name , data_args.dataset_config_name , split=f'{data_args.train_split_name}[{data_args.validation_split_percentage}%:]' , cache_dir=model_args.cache_dir , ) else: # make sure only "validation" and "train" keys remain" A_ : List[Any] = DatasetDict() A_ : Dict = load_dataset( data_args.dataset_name , data_args.dataset_config_name , split='''validation''' , cache_dir=model_args.cache_dir , ) A_ : Optional[Any] = load_dataset( data_args.dataset_name , data_args.dataset_config_name , split=f'{data_args.train_split_name}' , cache_dir=model_args.cache_dir , ) # only normalized-inputs-training is supported A_ : Union[str, Any] = WavaVecaFeatureExtractor.from_pretrained( model_args.model_name_or_path , cache_dir=model_args.cache_dir , do_normalize=lowerCamelCase__ ) def prepare_dataset(lowerCamelCase__ : List[str] ): # check that all files have the correct sampling rate A_ : str = librosa.load(batch[data_args.speech_file_column] , sr=feature_extractor.sampling_rate ) return batch # load audio files into numpy arrays A_ : Union[str, Any] = datasets.map( lowerCamelCase__ , num_proc=data_args.preprocessing_num_workers , remove_columns=datasets['''train'''].column_names ) # filter audio files that are too long A_ : List[str] = vectorized_datasets.filter( lambda lowerCamelCase__ : len(data['''speech'''] ) < int(data_args.max_duration_in_seconds * feature_extractor.sampling_rate ) ) def normalize(lowerCamelCase__ : Optional[int] ): return feature_extractor(batch['''speech'''] , sampling_rate=feature_extractor.sampling_rate ) # normalize and transform to `BatchFeatures` A_ : Optional[int] = vectorized_datasets.map( lowerCamelCase__ , batched=lowerCamelCase__ , num_proc=data_args.preprocessing_num_workers , load_from_cache_file=not data_args.overwrite_cache , remove_columns=vectorized_datasets['''train'''].column_names , ) # pretraining is only supported for "newer" stable layer norm architecture # apply_spec_augment has to be True, mask_feature_prob has to be 0.0 A_ : int = WavaVecaConfig.from_pretrained( model_args.model_name_or_path , cache_dir=model_args.cache_dir , gradient_checkpointing=training_args.gradient_checkpointing , ) if not config.do_stable_layer_norm or config.feat_extract_norm != "layer": raise ValueError( '''PreTraining is only supported for ``config.do_stable_layer_norm=True`` and''' ''' ``config.feat_extract_norm=\'layer\'''' ) A_ : Optional[int] = WavaVecaForPreTraining(lowerCamelCase__ ) A_ : str = DataCollatorForWavaVecaPretraining(model=lowerCamelCase__ , feature_extractor=lowerCamelCase__ ) A_ : Optional[int] = WavaVecaPreTrainer( model=lowerCamelCase__ , data_collator=lowerCamelCase__ , args=lowerCamelCase__ , train_dataset=vectorized_datasets['''train'''] , eval_dataset=vectorized_datasets['''validation'''] , tokenizer=lowerCamelCase__ , max_gumbel_temp=model_args.max_gumbel_temperature , min_gumbel_temp=model_args.min_gumbel_temperature , gumbel_temp_decay=model_args.gumbel_temperature_decay , ) trainer.train() if __name__ == "__main__": main()
351
'''simple docstring''' import logging import os from dataclasses import dataclass from typing import List, Optional, Union import tqdm from filelock import FileLock from transformers import ( BartTokenizer, BartTokenizerFast, DataProcessor, PreTrainedTokenizer, RobertaTokenizer, RobertaTokenizerFast, XLMRobertaTokenizer, is_tf_available, is_torch_available, ) snake_case__ = logging.getLogger(__name__) @dataclass(frozen=a__ ) class UpperCamelCase_ : """simple docstring""" _lowerCAmelCase = 42 _lowerCAmelCase = 42 _lowerCAmelCase = None _lowerCAmelCase = None _lowerCAmelCase = None @dataclass(frozen=a__ ) class UpperCamelCase_ : """simple docstring""" _lowerCAmelCase = 42 _lowerCAmelCase = None _lowerCAmelCase = None _lowerCAmelCase = None _lowerCAmelCase = None if is_torch_available(): import torch from torch.utils.data import Dataset class UpperCamelCase_ (a__ ): """simple docstring""" _lowerCAmelCase = 42 def __init__( self : Optional[int] , _lowerCamelCase : str , _lowerCamelCase : PreTrainedTokenizer , _lowerCamelCase : str , _lowerCamelCase : Optional[int] = None , _lowerCamelCase : List[Any]=False , _lowerCamelCase : bool = False , ): """simple docstring""" A_ : Optional[int] = hans_processors[task]() A_ : int = os.path.join( _lowerCamelCase , '''cached_{}_{}_{}_{}'''.format( '''dev''' if evaluate else '''train''' , tokenizer.__class__.__name__ , str(_lowerCamelCase ) , _lowerCamelCase , ) , ) A_ : Dict = processor.get_labels() if tokenizer.__class__ in ( RobertaTokenizer, RobertaTokenizerFast, XLMRobertaTokenizer, BartTokenizer, BartTokenizerFast, ): # HACK(label indices are swapped in RoBERTa pretrained model) A_ ,A_ : List[str] = label_list[2], label_list[1] A_ : Optional[int] = label_list # Make sure only the first process in distributed training processes the dataset, # and the others will use the cache. A_ : str = cached_features_file + '''.lock''' with FileLock(_lowerCamelCase ): if os.path.exists(_lowerCamelCase ) and not overwrite_cache: logger.info(f'Loading features from cached file {cached_features_file}' ) A_ : List[str] = torch.load(_lowerCamelCase ) else: logger.info(f'Creating features from dataset file at {data_dir}' ) A_ : Optional[int] = ( processor.get_dev_examples(_lowerCamelCase ) if evaluate else processor.get_train_examples(_lowerCamelCase ) ) logger.info('''Training examples: %s''' , len(_lowerCamelCase ) ) A_ : Optional[int] = hans_convert_examples_to_features(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) logger.info('''Saving features into cached file %s''' , _lowerCamelCase ) torch.save(self.features , _lowerCamelCase ) def __len__( self : List[str] ): """simple docstring""" return len(self.features ) def __getitem__( self : List[str] , _lowerCamelCase : Optional[int] ): """simple docstring""" return self.features[i] def _a ( self : str ): """simple docstring""" return self.label_list if is_tf_available(): import tensorflow as tf class UpperCamelCase_ : """simple docstring""" _lowerCAmelCase = 42 def __init__( self : Optional[int] , _lowerCamelCase : str , _lowerCamelCase : PreTrainedTokenizer , _lowerCamelCase : str , _lowerCamelCase : Optional[int] = 128 , _lowerCamelCase : Dict=False , _lowerCamelCase : bool = False , ): """simple docstring""" A_ : Optional[int] = hans_processors[task]() A_ : Optional[int] = processor.get_labels() if tokenizer.__class__ in ( RobertaTokenizer, RobertaTokenizerFast, XLMRobertaTokenizer, BartTokenizer, BartTokenizerFast, ): # HACK(label indices are swapped in RoBERTa pretrained model) A_ ,A_ : Union[str, Any] = label_list[2], label_list[1] A_ : Tuple = label_list A_ : Optional[int] = processor.get_dev_examples(_lowerCamelCase ) if evaluate else processor.get_train_examples(_lowerCamelCase ) A_ : Tuple = hans_convert_examples_to_features(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) def gen(): for ex_index, ex in tqdm.tqdm(enumerate(self.features ) , desc='''convert examples to features''' ): if ex_index % 10000 == 0: logger.info('''Writing example %d of %d''' % (ex_index, len(_lowerCamelCase )) ) yield ( { "example_id": 0, "input_ids": ex.input_ids, "attention_mask": ex.attention_mask, "token_type_ids": ex.token_type_ids, }, ex.label, ) A_ : List[Any] = tf.data.Dataset.from_generator( _lowerCamelCase , ( { '''example_id''': tf.intaa, '''input_ids''': tf.intaa, '''attention_mask''': tf.intaa, '''token_type_ids''': tf.intaa, }, tf.intaa, ) , ( { '''example_id''': tf.TensorShape([] ), '''input_ids''': tf.TensorShape([None, None] ), '''attention_mask''': tf.TensorShape([None, None] ), '''token_type_ids''': tf.TensorShape([None, None] ), }, tf.TensorShape([] ), ) , ) def _a ( self : Any ): """simple docstring""" return self.dataset def __len__( self : Dict ): """simple docstring""" return len(self.features ) def __getitem__( self : Optional[int] , _lowerCamelCase : List[str] ): """simple docstring""" return self.features[i] def _a ( self : Tuple ): """simple docstring""" return self.label_list class UpperCamelCase_ (a__ ): """simple docstring""" def _a ( self : List[str] , _lowerCamelCase : Union[str, Any] ): """simple docstring""" return self._create_examples(self._read_tsv(os.path.join(_lowerCamelCase , '''heuristics_train_set.txt''' ) ) , '''train''' ) def _a ( self : List[str] , _lowerCamelCase : Tuple ): """simple docstring""" return self._create_examples(self._read_tsv(os.path.join(_lowerCamelCase , '''heuristics_evaluation_set.txt''' ) ) , '''dev''' ) def _a ( self : Any ): """simple docstring""" return ["contradiction", "entailment", "neutral"] def _a ( self : Optional[Any] , _lowerCamelCase : Optional[Any] , _lowerCamelCase : Any ): """simple docstring""" A_ : Tuple = [] for i, line in enumerate(_lowerCamelCase ): if i == 0: continue A_ : str = '''%s-%s''' % (set_type, line[0]) A_ : Optional[Any] = line[5] A_ : Union[str, Any] = line[6] A_ : List[str] = line[7][2:] if line[7].startswith('''ex''' ) else line[7] A_ : str = line[0] examples.append(InputExample(guid=_lowerCamelCase , text_a=_lowerCamelCase , text_b=_lowerCamelCase , label=_lowerCamelCase , pairID=_lowerCamelCase ) ) return examples def snake_case__ ( lowerCamelCase__ : List[InputExample] , lowerCamelCase__ : List[str] , lowerCamelCase__ : int , lowerCamelCase__ : PreTrainedTokenizer , ) -> int: A_ : Union[str, Any] = {label: i for i, label in enumerate(lowerCamelCase__ )} A_ : Optional[Any] = [] for ex_index, example in tqdm.tqdm(enumerate(lowerCamelCase__ ) , desc='''convert examples to features''' ): if ex_index % 1_0_0_0_0 == 0: logger.info('''Writing example %d''' % (ex_index) ) A_ : Optional[int] = tokenizer( example.text_a , example.text_b , add_special_tokens=lowerCamelCase__ , max_length=lowerCamelCase__ , padding='''max_length''' , truncation=lowerCamelCase__ , return_overflowing_tokens=lowerCamelCase__ , ) A_ : List[str] = label_map[example.label] if example.label in label_map else 0 A_ : Tuple = int(example.pairID ) features.append(InputFeatures(**lowerCamelCase__ , label=lowerCamelCase__ , pairID=lowerCamelCase__ ) ) for i, example in enumerate(examples[:5] ): logger.info('''*** Example ***''' ) logger.info(f'guid: {example}' ) logger.info(f'features: {features[i]}' ) return features snake_case__ = { """hans""": 3, } snake_case__ = { """hans""": HansProcessor, }
4
0
'''simple docstring''' from typing import Mapping from ...configuration_utils import PretrainedConfig from ...onnx import OnnxSeqaSeqConfigWithPast from ...utils import logging snake_case__ = logging.get_logger(__name__) snake_case__ = { """t5-small""": """https://huggingface.co/t5-small/resolve/main/config.json""", """t5-base""": """https://huggingface.co/t5-base/resolve/main/config.json""", """t5-large""": """https://huggingface.co/t5-large/resolve/main/config.json""", """t5-3b""": """https://huggingface.co/t5-3b/resolve/main/config.json""", """t5-11b""": """https://huggingface.co/t5-11b/resolve/main/config.json""", } class UpperCamelCase_ (a__ ): """simple docstring""" _lowerCAmelCase = 't5' _lowerCAmelCase = ['past_key_values'] _lowerCAmelCase = {'hidden_size': 'd_model', 'num_attention_heads': 'num_heads', 'num_hidden_layers': 'num_layers'} def __init__( self : List[str] , _lowerCamelCase : Optional[int]=32128 , _lowerCamelCase : Union[str, Any]=512 , _lowerCamelCase : Optional[int]=64 , _lowerCamelCase : List[str]=2048 , _lowerCamelCase : Optional[int]=6 , _lowerCamelCase : List[Any]=None , _lowerCamelCase : List[Any]=8 , _lowerCamelCase : Dict=32 , _lowerCamelCase : int=128 , _lowerCamelCase : Optional[int]=0.1 , _lowerCamelCase : List[Any]=1E-6 , _lowerCamelCase : int=1.0 , _lowerCamelCase : int="relu" , _lowerCamelCase : Tuple=True , _lowerCamelCase : Optional[Any]=True , _lowerCamelCase : str=0 , _lowerCamelCase : str=1 , **_lowerCamelCase : List[Any] , ): """simple docstring""" A_ : Dict = vocab_size A_ : Any = d_model A_ : List[Any] = d_kv A_ : str = d_ff A_ : Tuple = num_layers A_ : int = ( num_decoder_layers if num_decoder_layers is not None else self.num_layers ) # default = symmetry A_ : Optional[Any] = num_heads A_ : List[str] = relative_attention_num_buckets A_ : int = relative_attention_max_distance A_ : str = dropout_rate A_ : Optional[Any] = layer_norm_epsilon A_ : str = initializer_factor A_ : Tuple = feed_forward_proj A_ : int = use_cache A_ : List[Any] = self.feed_forward_proj.split('''-''' ) A_ : int = act_info[-1] A_ : Tuple = act_info[0] == '''gated''' if len(_lowerCamelCase ) > 1 and act_info[0] != "gated" or len(_lowerCamelCase ) > 2: raise ValueError( f'`feed_forward_proj`: {feed_forward_proj} is not a valid activation function of the dense layer.' '''Please make sure `feed_forward_proj` is of the format `gated-{ACT_FN}` or `{ACT_FN}`, e.g. ''' '''\'gated-gelu\' or \'relu\'''' ) # for backwards compatibility if feed_forward_proj == "gated-gelu": A_ : Any = '''gelu_new''' super().__init__( pad_token_id=_lowerCamelCase , eos_token_id=_lowerCamelCase , is_encoder_decoder=_lowerCamelCase , **_lowerCamelCase , ) class UpperCamelCase_ (a__ ): """simple docstring""" @property def _a ( self : str ): """simple docstring""" A_ : int = { '''input_ids''': {0: '''batch''', 1: '''encoder_sequence'''}, '''attention_mask''': {0: '''batch''', 1: '''encoder_sequence'''}, } if self.use_past: A_ : Optional[int] = '''past_encoder_sequence + sequence''' A_ : Optional[Any] = {0: '''batch'''} A_ : Any = {0: '''batch''', 1: '''past_decoder_sequence + sequence'''} else: A_ : Dict = {0: '''batch''', 1: '''decoder_sequence'''} A_ : List[Any] = {0: '''batch''', 1: '''decoder_sequence'''} if self.use_past: self.fill_with_past_key_values_(_lowerCamelCase , direction='''inputs''' ) return common_inputs @property def _a ( self : Optional[Any] ): """simple docstring""" return 13
352
'''simple docstring''' import io import itertools import json from dataclasses import dataclass from typing import Optional import pyarrow as pa import pyarrow.json as paj import datasets from datasets.table import table_cast from datasets.utils.file_utils import readline snake_case__ = datasets.utils.logging.get_logger(__name__) @dataclass class UpperCamelCase_ (datasets.BuilderConfig ): """simple docstring""" _lowerCAmelCase = None _lowerCAmelCase = "utf-8" _lowerCAmelCase = None _lowerCAmelCase = None _lowerCAmelCase = True # deprecated _lowerCAmelCase = None # deprecated _lowerCAmelCase = 1_0 << 2_0 # 10MB _lowerCAmelCase = None class UpperCamelCase_ (datasets.ArrowBasedBuilder ): """simple docstring""" _lowerCAmelCase = JsonConfig def _a ( self : int ): """simple docstring""" if self.config.block_size is not None: logger.warning('''The JSON loader parameter `block_size` is deprecated. Please use `chunksize` instead''' ) A_ : List[Any] = self.config.block_size if self.config.use_threads is not True: logger.warning( '''The JSON loader parameter `use_threads` is deprecated and doesn\'t have any effect anymore.''' ) if self.config.newlines_in_values is not None: raise ValueError('''The JSON loader parameter `newlines_in_values` is no longer supported''' ) return datasets.DatasetInfo(features=self.config.features ) def _a ( self : Any , _lowerCamelCase : List[str] ): """simple docstring""" if not self.config.data_files: raise ValueError(f'At least one data file must be specified, but got data_files={self.config.data_files}' ) A_ : int = dl_manager.download_and_extract(self.config.data_files ) if isinstance(_lowerCamelCase , (str, list, tuple) ): A_ : Union[str, Any] = data_files if isinstance(_lowerCamelCase , _lowerCamelCase ): A_ : List[str] = [files] A_ : List[Any] = [dl_manager.iter_files(_lowerCamelCase ) for file in files] return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={'''files''': files} )] A_ : Tuple = [] for split_name, files in data_files.items(): if isinstance(_lowerCamelCase , _lowerCamelCase ): A_ : int = [files] A_ : Union[str, Any] = [dl_manager.iter_files(_lowerCamelCase ) for file in files] splits.append(datasets.SplitGenerator(name=_lowerCamelCase , gen_kwargs={'''files''': files} ) ) return splits def _a ( self : int , _lowerCamelCase : pa.Table ): """simple docstring""" if self.config.features is not None: # adding missing columns for column_name in set(self.config.features ) - set(pa_table.column_names ): A_ : Optional[int] = self.config.features.arrow_schema.field(_lowerCamelCase ).type A_ : Optional[int] = pa_table.append_column(_lowerCamelCase , pa.array([None] * len(_lowerCamelCase ) , type=_lowerCamelCase ) ) # more expensive cast to support nested structures with keys in a different order # allows str <-> int/float or str to Audio for example A_ : str = table_cast(_lowerCamelCase , self.config.features.arrow_schema ) return pa_table def _a ( self : List[str] , _lowerCamelCase : int ): """simple docstring""" for file_idx, file in enumerate(itertools.chain.from_iterable(_lowerCamelCase ) ): # If the file is one json object and if we need to look at the list of items in one specific field if self.config.field is not None: with open(_lowerCamelCase , encoding=self.config.encoding , errors=self.config.encoding_errors ) as f: A_ : int = json.load(_lowerCamelCase ) # We keep only the field we are interested in A_ : List[str] = dataset[self.config.field] # We accept two format: a list of dicts or a dict of lists if isinstance(_lowerCamelCase , (list, tuple) ): A_ : int = set().union(*[row.keys() for row in dataset] ) A_ : List[str] = {col: [row.get(_lowerCamelCase ) for row in dataset] for col in keys} else: A_ : Tuple = dataset A_ : Dict = pa.Table.from_pydict(_lowerCamelCase ) yield file_idx, self._cast_table(_lowerCamelCase ) # If the file has one json object per line else: with open(_lowerCamelCase , '''rb''' ) as f: A_ : int = 0 # Use block_size equal to the chunk size divided by 32 to leverage multithreading # Set a default minimum value of 16kB if the chunk size is really small A_ : int = max(self.config.chunksize // 32 , 16 << 10 ) A_ : int = ( self.config.encoding_errors if self.config.encoding_errors is not None else '''strict''' ) while True: A_ : Any = f.read(self.config.chunksize ) if not batch: break # Finish current line try: batch += f.readline() except (AttributeError, io.UnsupportedOperation): batch += readline(_lowerCamelCase ) # PyArrow only accepts utf-8 encoded bytes if self.config.encoding != "utf-8": A_ : Optional[Any] = batch.decode(self.config.encoding , errors=_lowerCamelCase ).encode('''utf-8''' ) try: while True: try: A_ : List[Any] = paj.read_json( io.BytesIO(_lowerCamelCase ) , read_options=paj.ReadOptions(block_size=_lowerCamelCase ) ) break except (pa.ArrowInvalid, pa.ArrowNotImplementedError) as e: if ( isinstance(_lowerCamelCase , pa.ArrowInvalid ) and "straddling" not in str(_lowerCamelCase ) or block_size > len(_lowerCamelCase ) ): raise else: # Increase the block size in case it was too small. # The block size will be reset for the next file. logger.debug( f'Batch of {len(_lowerCamelCase )} bytes couldn\'t be parsed with block_size={block_size}. Retrying with block_size={block_size * 2}.' ) block_size *= 2 except pa.ArrowInvalid as e: try: with open( _lowerCamelCase , encoding=self.config.encoding , errors=self.config.encoding_errors ) as f: A_ : Optional[Any] = json.load(_lowerCamelCase ) except json.JSONDecodeError: logger.error(f'Failed to read file \'{file}\' with error {type(_lowerCamelCase )}: {e}' ) raise e # If possible, parse the file as a list of json objects and exit the loop if isinstance(_lowerCamelCase , _lowerCamelCase ): # list is the only sequence type supported in JSON try: A_ : Optional[int] = set().union(*[row.keys() for row in dataset] ) A_ : Tuple = {col: [row.get(_lowerCamelCase ) for row in dataset] for col in keys} A_ : int = pa.Table.from_pydict(_lowerCamelCase ) except (pa.ArrowInvalid, AttributeError) as e: logger.error(f'Failed to read file \'{file}\' with error {type(_lowerCamelCase )}: {e}' ) raise ValueError(f'Not able to read records in the JSON file at {file}.' ) from None yield file_idx, self._cast_table(_lowerCamelCase ) break else: logger.error(f'Failed to read file \'{file}\' with error {type(_lowerCamelCase )}: {e}' ) raise ValueError( f'Not able to read records in the JSON file at {file}. ' f'You should probably indicate the field of the JSON file containing your records. ' f'This JSON file contain the following fields: {str(list(dataset.keys() ) )}. ' f'Select the correct one and provide it as `field=\'XXX\'` to the dataset loading method. ' ) from None # Uncomment for debugging (will print the Arrow table size and elements) # logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}") # logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows))) yield (file_idx, batch_idx), self._cast_table(_lowerCamelCase ) batch_idx += 1
4
0
import secrets from random import shuffle from string import ascii_letters, ascii_lowercase, ascii_uppercase, digits, punctuation def snake_case__ ( lowerCamelCase__ : int = 8 ) -> str: A_ : List[Any] = ascii_letters + digits + punctuation return "".join(secrets.choice(lowerCamelCase__ ) for _ in range(lowerCamelCase__ ) ) def snake_case__ ( lowerCamelCase__ : str , lowerCamelCase__ : int ) -> str: # Password Generator = full boot with random_number, random_letters, and # random_character FUNCTIONS # Put your code here... i -= len(lowerCamelCase__ ) A_ : Tuple = i // 3 A_ : Tuple = i % 3 # chars = chars_incl + random_letters(ascii_letters, i / 3 + remainder) + # random_number(digits, i / 3) + random_characters(punctuation, i / 3) A_ : List[str] = ( chars_incl + random(lowerCamelCase__ , quotient + remainder ) + random(lowerCamelCase__ , lowerCamelCase__ ) + random(lowerCamelCase__ , lowerCamelCase__ ) ) A_ : int = list(lowerCamelCase__ ) shuffle(lowerCamelCase__ ) return "".join(lowerCamelCase__ ) # random is a generalised function for letters, characters and numbers def snake_case__ ( lowerCamelCase__ : str , lowerCamelCase__ : int ) -> str: return "".join(secrets.choice(lowerCamelCase__ ) for _ in range(lowerCamelCase__ ) ) def snake_case__ ( lowerCamelCase__ : Tuple , lowerCamelCase__ : List[str] ) -> List[Any]: pass # Put your code here... def snake_case__ ( lowerCamelCase__ : List[str] , lowerCamelCase__ : Optional[int] ) -> int: pass # Put your code here... def snake_case__ ( lowerCamelCase__ : Optional[Any] , lowerCamelCase__ : Optional[int] ) -> str: pass # Put your code here... def snake_case__ ( lowerCamelCase__ : str , lowerCamelCase__ : int = 8 ) -> bool: if len(lowerCamelCase__ ) < min_length: # Your Password must be at least 8 characters long return False A_ : List[Any] = any(char in ascii_uppercase for char in password ) A_ : Tuple = any(char in ascii_lowercase for char in password ) A_ : Union[str, Any] = any(char in digits for char in password ) A_ : Dict = any(char in punctuation for char in password ) return upper and lower and num and spec_char # Passwords should contain UPPERCASE, lowerase # numbers, and special characters def snake_case__ ( ) -> Optional[Any]: A_ : Optional[int] = int(input('''Please indicate the max length of your password: ''' ).strip() ) A_ : List[str] = input( '''Please indicate the characters that must be in your password: ''' ).strip() print('''Password generated:''' , password_generator(lowerCamelCase__ ) ) print( '''Alternative Password generated:''' , alternative_password_generator(lowerCamelCase__ , lowerCamelCase__ ) , ) print('''[If you are thinking of using this passsword, You better save it.]''' ) if __name__ == "__main__": main()
353
'''simple docstring''' from collections import OrderedDict from typing import Mapping from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices snake_case__ = logging.get_logger(__name__) snake_case__ = { """microsoft/swin-tiny-patch4-window7-224""": ( """https://huggingface.co/microsoft/swin-tiny-patch4-window7-224/resolve/main/config.json""" ), # See all Swin models at https://huggingface.co/models?filter=swin } class UpperCamelCase_ (a__, a__ ): """simple docstring""" _lowerCAmelCase = 'swin' _lowerCAmelCase = { 'num_attention_heads': 'num_heads', 'num_hidden_layers': 'num_layers', } def __init__( self : Any , _lowerCamelCase : Optional[Any]=224 , _lowerCamelCase : List[str]=4 , _lowerCamelCase : Optional[Any]=3 , _lowerCamelCase : Tuple=96 , _lowerCamelCase : List[Any]=[2, 2, 6, 2] , _lowerCamelCase : List[str]=[3, 6, 12, 24] , _lowerCamelCase : List[Any]=7 , _lowerCamelCase : Optional[int]=4.0 , _lowerCamelCase : List[str]=True , _lowerCamelCase : List[str]=0.0 , _lowerCamelCase : Any=0.0 , _lowerCamelCase : Dict=0.1 , _lowerCamelCase : List[str]="gelu" , _lowerCamelCase : Tuple=False , _lowerCamelCase : Dict=0.02 , _lowerCamelCase : Optional[Any]=1E-5 , _lowerCamelCase : Any=32 , _lowerCamelCase : Tuple=None , _lowerCamelCase : Any=None , **_lowerCamelCase : str , ): """simple docstring""" super().__init__(**_lowerCamelCase ) A_ : Optional[int] = image_size A_ : Optional[int] = patch_size A_ : Optional[int] = num_channels A_ : Any = embed_dim A_ : List[Any] = depths A_ : Any = len(_lowerCamelCase ) A_ : List[Any] = num_heads A_ : Tuple = window_size A_ : Tuple = mlp_ratio A_ : Dict = qkv_bias A_ : List[str] = hidden_dropout_prob A_ : List[str] = attention_probs_dropout_prob A_ : Any = drop_path_rate A_ : List[Any] = hidden_act A_ : Tuple = use_absolute_embeddings A_ : int = layer_norm_eps A_ : Optional[Any] = initializer_range A_ : Union[str, Any] = encoder_stride # we set the hidden_size attribute in order to make Swin work with VisionEncoderDecoderModel # this indicates the channel dimension after the last stage of the model A_ : str = int(embed_dim * 2 ** (len(_lowerCamelCase ) - 1) ) A_ : str = ['''stem'''] + [f'stage{idx}' for idx in range(1 , len(_lowerCamelCase ) + 1 )] A_ ,A_ : Optional[Any] = get_aligned_output_features_output_indices( out_features=_lowerCamelCase , out_indices=_lowerCamelCase , stage_names=self.stage_names ) class UpperCamelCase_ (a__ ): """simple docstring""" _lowerCAmelCase = version.parse('1.11' ) @property def _a ( self : str ): """simple docstring""" return OrderedDict( [ ('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}), ] ) @property def _a ( self : Union[str, Any] ): """simple docstring""" return 1E-4
4
0
'''simple docstring''' from collections.abc import Callable import numpy as np def snake_case__ ( lowerCamelCase__ : Callable , lowerCamelCase__ : float , lowerCamelCase__ : float , lowerCamelCase__ : float , lowerCamelCase__ : float ) -> np.array: A_ : str = int(np.ceil((x_end - xa) / step_size ) ) A_ : List[str] = np.zeros((n + 1,) ) A_ : List[str] = ya A_ : Union[str, Any] = xa for k in range(lowerCamelCase__ ): A_ : Union[str, Any] = y[k] + step_size * ode_func(lowerCamelCase__ , y[k] ) A_ : Optional[Any] = y[k] + ( (step_size / 2) * (ode_func(lowerCamelCase__ , y[k] ) + ode_func(x + step_size , lowerCamelCase__ )) ) x += step_size return y if __name__ == "__main__": import doctest doctest.testmod()
354
'''simple docstring''' from __future__ import annotations def snake_case__ ( lowerCamelCase__ : list[int] , lowerCamelCase__ : int ) -> list[int]: A_ : int = 0 A_ : str = len(lowerCamelCase__ ) - 1 while i < j: if nums[i] + nums[j] == target: return [i, j] elif nums[i] + nums[j] < target: A_ : Tuple = i + 1 else: A_ : List[str] = j - 1 return [] if __name__ == "__main__": import doctest doctest.testmod() print(F'{two_pointer([2, 7, 11, 15], 9) = }')
4
0
'''simple docstring''' import asyncio import os import shutil import subprocess import sys import tempfile import unittest from distutils.util import strtobool from functools import partial from pathlib import Path from typing import List, Union from unittest import mock import torch from ..state import AcceleratorState, PartialState from ..utils import ( gather, is_bnb_available, is_comet_ml_available, is_datasets_available, is_deepspeed_available, is_mps_available, is_safetensors_available, is_tensorboard_available, is_torch_version, is_tpu_available, is_transformers_available, is_wandb_available, is_xpu_available, ) def snake_case__ ( lowerCamelCase__ : List[str] , lowerCamelCase__ : Dict=False ) -> List[str]: try: A_ : Union[str, Any] = os.environ[key] except KeyError: # KEY isn't set, default to `default`. A_ : Optional[int] = default else: # KEY is set, convert it to True or False. try: A_ : List[str] = strtobool(lowerCamelCase__ ) except ValueError: # More values are supported, but let's keep the message simple. raise ValueError(f'If set, {key} must be yes or no.' ) return _value snake_case__ = parse_flag_from_env("""RUN_SLOW""", default=False) def snake_case__ ( lowerCamelCase__ : Optional[int] ) -> Dict: return unittest.skip('''Test was skipped''' )(lowerCamelCase__ ) def snake_case__ ( lowerCamelCase__ : Dict ) -> str: return unittest.skipUnless(_run_slow_tests , '''test is slow''' )(lowerCamelCase__ ) def snake_case__ ( lowerCamelCase__ : Any ) -> Optional[int]: return unittest.skipUnless(not torch.cuda.is_available() , '''test requires only a CPU''' )(lowerCamelCase__ ) def snake_case__ ( lowerCamelCase__ : int ) -> Optional[int]: return unittest.skipUnless(torch.cuda.is_available() , '''test requires a GPU''' )(lowerCamelCase__ ) def snake_case__ ( lowerCamelCase__ : Optional[Any] ) -> int: return unittest.skipUnless(is_xpu_available() , '''test requires a XPU''' )(lowerCamelCase__ ) def snake_case__ ( lowerCamelCase__ : int ) -> Dict: return unittest.skipUnless(is_mps_available() , '''test requires a `mps` backend support in `torch`''' )(lowerCamelCase__ ) def snake_case__ ( lowerCamelCase__ : Union[str, Any] ) -> str: return unittest.skipUnless( is_transformers_available() and is_datasets_available() , '''test requires the Hugging Face suite''' )(lowerCamelCase__ ) def snake_case__ ( lowerCamelCase__ : Dict ) -> Optional[int]: return unittest.skipUnless(is_bnb_available() , '''test requires the bitsandbytes library''' )(lowerCamelCase__ ) def snake_case__ ( lowerCamelCase__ : Any ) -> List[Any]: return unittest.skipUnless(is_tpu_available() , '''test requires TPU''' )(lowerCamelCase__ ) def snake_case__ ( lowerCamelCase__ : Optional[Any] ) -> Optional[int]: return unittest.skipUnless(torch.cuda.device_count() == 1 , '''test requires a GPU''' )(lowerCamelCase__ ) def snake_case__ ( lowerCamelCase__ : int ) -> Optional[int]: return unittest.skipUnless(torch.xpu.device_count() == 1 , '''test requires a XPU''' )(lowerCamelCase__ ) def snake_case__ ( lowerCamelCase__ : Any ) -> str: return unittest.skipUnless(torch.cuda.device_count() > 1 , '''test requires multiple GPUs''' )(lowerCamelCase__ ) def snake_case__ ( lowerCamelCase__ : Optional[int] ) -> Optional[int]: return unittest.skipUnless(torch.xpu.device_count() > 1 , '''test requires multiple XPUs''' )(lowerCamelCase__ ) def snake_case__ ( lowerCamelCase__ : Dict ) -> Tuple: return unittest.skipUnless(is_safetensors_available() , '''test requires safetensors''' )(lowerCamelCase__ ) def snake_case__ ( lowerCamelCase__ : List[str] ) -> List[str]: return unittest.skipUnless(is_deepspeed_available() , '''test requires DeepSpeed''' )(lowerCamelCase__ ) def snake_case__ ( lowerCamelCase__ : Union[str, Any] ) -> int: return unittest.skipUnless(is_torch_version('''>=''' , '''1.12.0''' ) , '''test requires torch version >= 1.12.0''' )(lowerCamelCase__ ) def snake_case__ ( lowerCamelCase__ : List[Any]=None , lowerCamelCase__ : int=None ) -> Optional[Any]: if test_case is None: return partial(lowerCamelCase__ , version=lowerCamelCase__ ) return unittest.skipUnless(is_torch_version('''>=''' , lowerCamelCase__ ) , f'test requires torch version >= {version}' )(lowerCamelCase__ ) def snake_case__ ( lowerCamelCase__ : List[str] ) -> Optional[int]: return unittest.skipUnless(is_tensorboard_available() , '''test requires Tensorboard''' )(lowerCamelCase__ ) def snake_case__ ( lowerCamelCase__ : Optional[Any] ) -> Tuple: return unittest.skipUnless(is_wandb_available() , '''test requires wandb''' )(lowerCamelCase__ ) def snake_case__ ( lowerCamelCase__ : Optional[int] ) -> List[str]: return unittest.skipUnless(is_comet_ml_available() , '''test requires comet_ml''' )(lowerCamelCase__ ) snake_case__ = ( any([is_wandb_available(), is_tensorboard_available()]) and not is_comet_ml_available() ) def snake_case__ ( lowerCamelCase__ : List[Any] ) -> Union[str, Any]: return unittest.skipUnless( _atleast_one_tracker_available , '''test requires at least one tracker to be available and for `comet_ml` to not be installed''' , )(lowerCamelCase__ ) class UpperCamelCase_ (unittest.TestCase ): """simple docstring""" _lowerCAmelCase = True @classmethod def _a ( cls : Dict ): """simple docstring""" A_ : int = tempfile.mkdtemp() @classmethod def _a ( cls : Dict ): """simple docstring""" if os.path.exists(cls.tmpdir ): shutil.rmtree(cls.tmpdir ) def _a ( self : str ): """simple docstring""" if self.clear_on_setup: for path in Path(self.tmpdir ).glob('''**/*''' ): if path.is_file(): path.unlink() elif path.is_dir(): shutil.rmtree(_lowerCamelCase ) class UpperCamelCase_ (unittest.TestCase ): """simple docstring""" def _a ( self : Dict ): """simple docstring""" super().tearDown() # Reset the state of the AcceleratorState singleton. AcceleratorState._reset_state() PartialState._reset_state() class UpperCamelCase_ (unittest.TestCase ): """simple docstring""" def _a ( self : Any , _lowerCamelCase : Union[mock.Mock, List[mock.Mock]] ): """simple docstring""" A_ : List[Any] = mocks if isinstance(_lowerCamelCase , (tuple, list) ) else [mocks] for m in self.mocks: m.start() self.addCleanup(m.stop ) def snake_case__ ( lowerCamelCase__ : Optional[Any] ) -> Dict: A_ : Optional[int] = AcceleratorState() A_ : Tuple = tensor[None].clone().to(state.device ) A_ : Tuple = gather(lowerCamelCase__ ).cpu() A_ : int = tensor[0].cpu() for i in range(tensors.shape[0] ): if not torch.equal(tensors[i] , lowerCamelCase__ ): return False return True class UpperCamelCase_ : """simple docstring""" def __init__( self : Dict , _lowerCamelCase : List[Any] , _lowerCamelCase : Optional[Any] , _lowerCamelCase : int ): """simple docstring""" A_ : Any = returncode A_ : str = stdout A_ : Union[str, Any] = stderr async def snake_case__ ( lowerCamelCase__ : Dict , lowerCamelCase__ : List[Any] ) -> Optional[Any]: while True: A_ : List[Any] = await stream.readline() if line: callback(lowerCamelCase__ ) else: break async def snake_case__ ( lowerCamelCase__ : str , lowerCamelCase__ : Union[str, Any]=None , lowerCamelCase__ : Optional[int]=None , lowerCamelCase__ : Optional[Any]=None , lowerCamelCase__ : int=False , lowerCamelCase__ : str=False ) -> _RunOutput: if echo: print('''\nRunning: ''' , ''' '''.join(lowerCamelCase__ ) ) A_ : Union[str, Any] = await asyncio.create_subprocess_exec( cmd[0] , *cmd[1:] , stdin=lowerCamelCase__ , stdout=asyncio.subprocess.PIPE , stderr=asyncio.subprocess.PIPE , env=lowerCamelCase__ , ) # note: there is a warning for a possible deadlock when using `wait` with huge amounts of data in the pipe # https://docs.python.org/3/library/asyncio-subprocess.html#asyncio.asyncio.subprocess.Process.wait # # If it starts hanging, will need to switch to the following code. The problem is that no data # will be seen until it's done and if it hangs for example there will be no debug info. # out, err = await p.communicate() # return _RunOutput(p.returncode, out, err) A_ : Union[str, Any] = [] A_ : str = [] def tee(lowerCamelCase__ : Optional[int] , lowerCamelCase__ : Optional[Any] , lowerCamelCase__ : int , lowerCamelCase__ : Union[str, Any]="" ): A_ : int = line.decode('''utf-8''' ).rstrip() sink.append(lowerCamelCase__ ) if not quiet: print(lowerCamelCase__ , lowerCamelCase__ , file=lowerCamelCase__ ) # XXX: the timeout doesn't seem to make any difference here await asyncio.wait( [ asyncio.create_task(_read_stream(p.stdout , lambda lowerCamelCase__ : tee(lowerCamelCase__ , lowerCamelCase__ , sys.stdout , label='''stdout:''' ) ) ), asyncio.create_task(_read_stream(p.stderr , lambda lowerCamelCase__ : tee(lowerCamelCase__ , lowerCamelCase__ , sys.stderr , label='''stderr:''' ) ) ), ] , timeout=lowerCamelCase__ , ) return _RunOutput(await p.wait() , lowerCamelCase__ , lowerCamelCase__ ) def snake_case__ ( lowerCamelCase__ : Dict , lowerCamelCase__ : List[str]=None , lowerCamelCase__ : Optional[int]=None , lowerCamelCase__ : str=1_8_0 , lowerCamelCase__ : Union[str, Any]=False , lowerCamelCase__ : List[str]=True ) -> _RunOutput: A_ : str = asyncio.get_event_loop() A_ : Dict = loop.run_until_complete( _stream_subprocess(lowerCamelCase__ , env=lowerCamelCase__ , stdin=lowerCamelCase__ , timeout=lowerCamelCase__ , quiet=lowerCamelCase__ , echo=lowerCamelCase__ ) ) A_ : Optional[int] = ''' '''.join(lowerCamelCase__ ) if result.returncode > 0: A_ : Tuple = '''\n'''.join(result.stderr ) raise RuntimeError( f'\'{cmd_str}\' failed with returncode {result.returncode}\n\n' f'The combined stderr from workers follows:\n{stderr}' ) return result class UpperCamelCase_ (a__ ): """simple docstring""" pass def snake_case__ ( lowerCamelCase__ : List[str] , lowerCamelCase__ : Optional[Any]=False ) -> int: try: A_ : List[Any] = subprocess.check_output(lowerCamelCase__ , stderr=subprocess.STDOUT ) if return_stdout: if hasattr(lowerCamelCase__ , '''decode''' ): A_ : Any = output.decode('''utf-8''' ) return output except subprocess.CalledProcessError as e: raise SubprocessCallException( f'Command `{" ".join(lowerCamelCase__ )}` failed with the following error:\n\n{e.output.decode()}' ) from e
355
'''simple docstring''' def snake_case__ ( lowerCamelCase__ : list[int] , lowerCamelCase__ : list[int] , lowerCamelCase__ : int ) -> bool: return not any( neighbour == 1 and colored_vertices[i] == color for i, neighbour in enumerate(lowerCamelCase__ ) ) def snake_case__ ( lowerCamelCase__ : list[list[int]] , lowerCamelCase__ : int , lowerCamelCase__ : list[int] , lowerCamelCase__ : int ) -> bool: # Base Case if index == len(lowerCamelCase__ ): return True # Recursive Step for i in range(lowerCamelCase__ ): if valid_coloring(graph[index] , lowerCamelCase__ , lowerCamelCase__ ): # Color current vertex A_ : int = i # Validate coloring if util_color(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , index + 1 ): return True # Backtrack A_ : str = -1 return False def snake_case__ ( lowerCamelCase__ : list[list[int]] , lowerCamelCase__ : int ) -> list[int]: A_ : List[str] = [-1] * len(lowerCamelCase__ ) if util_color(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , 0 ): return colored_vertices return []
4
0
'''simple docstring''' def snake_case__ ( lowerCamelCase__ : list[int] , lowerCamelCase__ : list[int] , lowerCamelCase__ : int ) -> bool: return not any( neighbour == 1 and colored_vertices[i] == color for i, neighbour in enumerate(lowerCamelCase__ ) ) def snake_case__ ( lowerCamelCase__ : list[list[int]] , lowerCamelCase__ : int , lowerCamelCase__ : list[int] , lowerCamelCase__ : int ) -> bool: # Base Case if index == len(lowerCamelCase__ ): return True # Recursive Step for i in range(lowerCamelCase__ ): if valid_coloring(graph[index] , lowerCamelCase__ , lowerCamelCase__ ): # Color current vertex A_ : int = i # Validate coloring if util_color(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , index + 1 ): return True # Backtrack A_ : str = -1 return False def snake_case__ ( lowerCamelCase__ : list[list[int]] , lowerCamelCase__ : int ) -> list[int]: A_ : List[str] = [-1] * len(lowerCamelCase__ ) if util_color(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , 0 ): return colored_vertices return []
356
'''simple docstring''' from __future__ import annotations from PIL import Image # Define glider example snake_case__ = [ [0, 1, 0, 0, 0, 0, 0, 0], [0, 0, 1, 0, 0, 0, 0, 0], [1, 1, 1, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0], ] # Define blinker example snake_case__ = [[0, 1, 0], [0, 1, 0], [0, 1, 0]] def snake_case__ ( lowerCamelCase__ : list[list[int]] ) -> list[list[int]]: A_ : str = [] for i in range(len(lowerCamelCase__ ) ): A_ : Optional[Any] = [] for j in range(len(cells[i] ) ): # Get the number of live neighbours A_ : Optional[int] = 0 if i > 0 and j > 0: neighbour_count += cells[i - 1][j - 1] if i > 0: neighbour_count += cells[i - 1][j] if i > 0 and j < len(cells[i] ) - 1: neighbour_count += cells[i - 1][j + 1] if j > 0: neighbour_count += cells[i][j - 1] if j < len(cells[i] ) - 1: neighbour_count += cells[i][j + 1] if i < len(lowerCamelCase__ ) - 1 and j > 0: neighbour_count += cells[i + 1][j - 1] if i < len(lowerCamelCase__ ) - 1: neighbour_count += cells[i + 1][j] if i < len(lowerCamelCase__ ) - 1 and j < len(cells[i] ) - 1: neighbour_count += cells[i + 1][j + 1] # Rules of the game of life (excerpt from Wikipedia): # 1. Any live cell with two or three live neighbours survives. # 2. Any dead cell with three live neighbours becomes a live cell. # 3. All other live cells die in the next generation. # Similarly, all other dead cells stay dead. A_ : List[str] = cells[i][j] == 1 if ( (alive and 2 <= neighbour_count <= 3) or not alive and neighbour_count == 3 ): next_generation_row.append(1 ) else: next_generation_row.append(0 ) next_generation.append(lowerCamelCase__ ) return next_generation def snake_case__ ( lowerCamelCase__ : list[list[int]] , lowerCamelCase__ : int ) -> list[Image.Image]: A_ : List[Any] = [] for _ in range(lowerCamelCase__ ): # Create output image A_ : Optional[int] = Image.new('''RGB''' , (len(cells[0] ), len(lowerCamelCase__ )) ) A_ : int = img.load() # Save cells to image for x in range(len(lowerCamelCase__ ) ): for y in range(len(cells[0] ) ): A_ : Optional[Any] = 2_5_5 - cells[y][x] * 2_5_5 A_ : str = (colour, colour, colour) # Save image images.append(lowerCamelCase__ ) A_ : Optional[int] = new_generation(lowerCamelCase__ ) return images if __name__ == "__main__": snake_case__ = generate_images(GLIDER, 16) images[0].save("""out.gif""", save_all=True, append_images=images[1:])
4
0
'''simple docstring''' import argparse import json import os import torch from transformers.file_utils import has_file from diffusers import UNetaDConditionModel, UNetaDModel snake_case__ = False snake_case__ = True snake_case__ = False if __name__ == "__main__": snake_case__ = argparse.ArgumentParser() parser.add_argument( """--repo_path""", default=None, type=str, required=True, help="""The config json file corresponding to the architecture.""", ) parser.add_argument("""--dump_path""", default=None, type=str, required=True, help="""Path to the output model.""") snake_case__ = parser.parse_args() snake_case__ = { """image_size""": """sample_size""", """num_res_blocks""": """layers_per_block""", """block_channels""": """block_out_channels""", """down_blocks""": """down_block_types""", """up_blocks""": """up_block_types""", """downscale_freq_shift""": """freq_shift""", """resnet_num_groups""": """norm_num_groups""", """resnet_act_fn""": """act_fn""", """resnet_eps""": """norm_eps""", """num_head_channels""": """attention_head_dim""", } snake_case__ = { """time_steps""": """time_proj""", """mid""": """mid_block""", """downsample_blocks""": """down_blocks""", """upsample_blocks""": """up_blocks""", } snake_case__ = """""" if has_file(args.repo_path, """config.json""") else """unet""" with open(os.path.join(args.repo_path, subfolder, """config.json"""), """r""", encoding="""utf-8""") as reader: snake_case__ = reader.read() snake_case__ = json.loads(text) if do_only_config: for key in config_parameters_to_change.keys(): config.pop(key, None) if has_file(args.repo_path, """config.json"""): snake_case__ = UNetaDModel(**config) else: snake_case__ = UNetaDConditionModel if """ldm-text2im-large-256""" in args.repo_path else UNetaDModel snake_case__ = class_name(**config) if do_only_config: model.save_config(os.path.join(args.repo_path, subfolder)) snake_case__ = dict(model.config) if do_only_renaming: for key, value in config_parameters_to_change.items(): if key in config: snake_case__ = config[key] del config[key] snake_case__ = [k.replace("""UNetRes""", """""") for k in config["""down_block_types"""]] snake_case__ = [k.replace("""UNetRes""", """""") for k in config["""up_block_types"""]] if do_only_weights: snake_case__ = torch.load(os.path.join(args.repo_path, subfolder, """diffusion_pytorch_model.bin""")) snake_case__ = {} for param_key, param_value in state_dict.items(): if param_key.endswith(""".op.bias""") or param_key.endswith(""".op.weight"""): continue snake_case__ = False for key, new_key in key_parameters_to_change.items(): if not has_changed and param_key.split(""".""")[0] == key: snake_case__ = param_value snake_case__ = True if not has_changed: snake_case__ = param_value model.load_state_dict(new_state_dict) model.save_pretrained(os.path.join(args.repo_path, subfolder))
357
'''simple docstring''' import json import os import shutil import tempfile import unittest import numpy as np import pytest from transformers import BertTokenizer, BertTokenizerFast from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES from transformers.testing_utils import require_vision from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available if is_vision_available(): from PIL import Image from transformers import AlignProcessor, EfficientNetImageProcessor @require_vision class UpperCamelCase_ (unittest.TestCase ): """simple docstring""" def _a ( self : Union[str, Any] ): """simple docstring""" A_ : Any = tempfile.mkdtemp() A_ : List[Any] = [ '''[UNK]''', '''[CLS]''', '''[SEP]''', '''[PAD]''', '''[MASK]''', '''want''', '''##want''', '''##ed''', '''wa''', '''un''', '''runn''', '''##ing''', ''',''', '''low''', '''lowest''', ] A_ : str = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] ) with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer: vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) ) A_ : Tuple = { '''do_resize''': True, '''size''': 20, '''do_center_crop''': True, '''crop_size''': 18, '''do_normalize''': True, '''image_mean''': [0.48_14_54_66, 0.4_57_82_75, 0.40_82_10_73], '''image_std''': [0.26_86_29_54, 0.26_13_02_58, 0.27_57_77_11], } A_ : List[Any] = os.path.join(self.tmpdirname , _lowerCamelCase ) with open(self.image_processor_file , '''w''' , encoding='''utf-8''' ) as fp: json.dump(_lowerCamelCase , _lowerCamelCase ) def _a ( self : Dict , **_lowerCamelCase : Tuple ): """simple docstring""" return BertTokenizer.from_pretrained(self.tmpdirname , **_lowerCamelCase ) def _a ( self : Optional[int] , **_lowerCamelCase : Optional[int] ): """simple docstring""" return BertTokenizerFast.from_pretrained(self.tmpdirname , **_lowerCamelCase ) def _a ( self : Optional[Any] , **_lowerCamelCase : Tuple ): """simple docstring""" return EfficientNetImageProcessor.from_pretrained(self.tmpdirname , **_lowerCamelCase ) def _a ( self : Tuple ): """simple docstring""" shutil.rmtree(self.tmpdirname ) def _a ( self : int ): """simple docstring""" A_ : Union[str, Any] = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )] A_ : Any = [Image.fromarray(np.moveaxis(_lowerCamelCase , 0 , -1 ) ) for x in image_inputs] return image_inputs def _a ( self : int ): """simple docstring""" A_ : Tuple = self.get_tokenizer() A_ : Tuple = self.get_rust_tokenizer() A_ : Dict = self.get_image_processor() A_ : List[Any] = AlignProcessor(tokenizer=_lowerCamelCase , image_processor=_lowerCamelCase ) processor_slow.save_pretrained(self.tmpdirname ) A_ : str = AlignProcessor.from_pretrained(self.tmpdirname , use_fast=_lowerCamelCase ) A_ : Any = AlignProcessor(tokenizer=_lowerCamelCase , image_processor=_lowerCamelCase ) processor_fast.save_pretrained(self.tmpdirname ) A_ : List[Any] = AlignProcessor.from_pretrained(self.tmpdirname ) self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() ) self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() ) self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() ) self.assertIsInstance(processor_slow.tokenizer , _lowerCamelCase ) self.assertIsInstance(processor_fast.tokenizer , _lowerCamelCase ) self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() ) self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() ) self.assertIsInstance(processor_slow.image_processor , _lowerCamelCase ) self.assertIsInstance(processor_fast.image_processor , _lowerCamelCase ) def _a ( self : List[Any] ): """simple docstring""" A_ : List[str] = AlignProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() ) processor.save_pretrained(self.tmpdirname ) A_ : Optional[int] = self.get_tokenizer(bos_token='''(BOS)''' , eos_token='''(EOS)''' ) A_ : Tuple = self.get_image_processor(do_normalize=_lowerCamelCase , padding_value=1.0 ) A_ : List[str] = AlignProcessor.from_pretrained( self.tmpdirname , bos_token='''(BOS)''' , eos_token='''(EOS)''' , do_normalize=_lowerCamelCase , padding_value=1.0 ) self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() ) self.assertIsInstance(processor.tokenizer , _lowerCamelCase ) self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() ) self.assertIsInstance(processor.image_processor , _lowerCamelCase ) def _a ( self : Union[str, Any] ): """simple docstring""" A_ : Dict = self.get_image_processor() A_ : Any = self.get_tokenizer() A_ : List[str] = AlignProcessor(tokenizer=_lowerCamelCase , image_processor=_lowerCamelCase ) A_ : Any = self.prepare_image_inputs() A_ : List[Any] = image_processor(_lowerCamelCase , return_tensors='''np''' ) A_ : str = processor(images=_lowerCamelCase , return_tensors='''np''' ) for key in input_image_proc.keys(): self.assertAlmostEqual(input_image_proc[key].sum() , input_processor[key].sum() , delta=1E-2 ) def _a ( self : Dict ): """simple docstring""" A_ : str = self.get_image_processor() A_ : List[str] = self.get_tokenizer() A_ : Optional[int] = AlignProcessor(tokenizer=_lowerCamelCase , image_processor=_lowerCamelCase ) A_ : int = '''lower newer''' A_ : str = processor(text=_lowerCamelCase ) A_ : Dict = tokenizer(_lowerCamelCase , padding='''max_length''' , max_length=64 ) for key in encoded_tok.keys(): self.assertListEqual(encoded_tok[key] , encoded_processor[key] ) def _a ( self : str ): """simple docstring""" A_ : Optional[int] = self.get_image_processor() A_ : Optional[Any] = self.get_tokenizer() A_ : List[str] = AlignProcessor(tokenizer=_lowerCamelCase , image_processor=_lowerCamelCase ) A_ : List[Any] = '''lower newer''' A_ : Optional[int] = self.prepare_image_inputs() A_ : List[Any] = processor(text=_lowerCamelCase , images=_lowerCamelCase ) self.assertListEqual(list(inputs.keys() ) , ['''input_ids''', '''token_type_ids''', '''attention_mask''', '''pixel_values'''] ) # test if it raises when no input is passed with pytest.raises(_lowerCamelCase ): processor() def _a ( self : List[str] ): """simple docstring""" A_ : Optional[Any] = self.get_image_processor() A_ : Optional[int] = self.get_tokenizer() A_ : List[Any] = AlignProcessor(tokenizer=_lowerCamelCase , image_processor=_lowerCamelCase ) A_ : str = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]] A_ : str = processor.batch_decode(_lowerCamelCase ) A_ : Union[str, Any] = tokenizer.batch_decode(_lowerCamelCase ) self.assertListEqual(_lowerCamelCase , _lowerCamelCase ) def _a ( self : Tuple ): """simple docstring""" A_ : str = self.get_image_processor() A_ : Tuple = self.get_tokenizer() A_ : Any = AlignProcessor(tokenizer=_lowerCamelCase , image_processor=_lowerCamelCase ) A_ : str = '''lower newer''' A_ : List[str] = self.prepare_image_inputs() A_ : Tuple = processor(text=_lowerCamelCase , images=_lowerCamelCase ) self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
4
0
'''simple docstring''' import copy from ...configuration_utils import PretrainedConfig from ...utils import logging from ..bit import BitConfig snake_case__ = logging.get_logger(__name__) snake_case__ = { """Intel/dpt-large""": """https://huggingface.co/Intel/dpt-large/resolve/main/config.json""", # See all DPT models at https://huggingface.co/models?filter=dpt } class UpperCamelCase_ (a__ ): """simple docstring""" _lowerCAmelCase = 'dpt' def __init__( self : int , _lowerCamelCase : Union[str, Any]=768 , _lowerCamelCase : Optional[Any]=12 , _lowerCamelCase : Optional[int]=12 , _lowerCamelCase : Tuple=3072 , _lowerCamelCase : Optional[Any]="gelu" , _lowerCamelCase : Tuple=0.0 , _lowerCamelCase : Union[str, Any]=0.0 , _lowerCamelCase : Any=0.02 , _lowerCamelCase : str=1E-12 , _lowerCamelCase : Dict=384 , _lowerCamelCase : int=16 , _lowerCamelCase : str=3 , _lowerCamelCase : Optional[int]=False , _lowerCamelCase : List[Any]=True , _lowerCamelCase : Any=[2, 5, 8, 11] , _lowerCamelCase : Dict="project" , _lowerCamelCase : Dict=[4, 2, 1, 0.5] , _lowerCamelCase : Tuple=[96, 192, 384, 768] , _lowerCamelCase : Dict=256 , _lowerCamelCase : Tuple=-1 , _lowerCamelCase : Union[str, Any]=False , _lowerCamelCase : Optional[int]=True , _lowerCamelCase : Tuple=0.4 , _lowerCamelCase : str=255 , _lowerCamelCase : int=0.1 , _lowerCamelCase : Dict=[1, 1024, 24, 24] , _lowerCamelCase : List[Any]=[0, 1] , _lowerCamelCase : str=None , **_lowerCamelCase : Optional[int] , ): """simple docstring""" super().__init__(**_lowerCamelCase ) A_ : Union[str, Any] = hidden_size A_ : Tuple = is_hybrid if self.is_hybrid: if backbone_config is None: logger.info('''Initializing the config with a `BiT` backbone.''' ) A_ : Optional[Any] = { '''global_padding''': '''same''', '''layer_type''': '''bottleneck''', '''depths''': [3, 4, 9], '''out_features''': ['''stage1''', '''stage2''', '''stage3'''], '''embedding_dynamic_padding''': True, } A_ : List[str] = BitConfig(**_lowerCamelCase ) elif isinstance(_lowerCamelCase , _lowerCamelCase ): logger.info('''Initializing the config with a `BiT` backbone.''' ) A_ : Optional[Any] = BitConfig(**_lowerCamelCase ) elif isinstance(_lowerCamelCase , _lowerCamelCase ): A_ : List[str] = backbone_config else: raise ValueError( f'backbone_config must be a dictionary or a `PretrainedConfig`, got {backbone_config.__class__}.' ) A_ : Any = backbone_featmap_shape A_ : int = neck_ignore_stages if readout_type != "project": raise ValueError('''Readout type must be \'project\' when using `DPT-hybrid` mode.''' ) else: A_ : Tuple = None A_ : Dict = None A_ : Optional[Any] = [] A_ : Optional[int] = num_hidden_layers A_ : Optional[int] = num_attention_heads A_ : Any = intermediate_size A_ : Tuple = hidden_act A_ : Union[str, Any] = hidden_dropout_prob A_ : str = attention_probs_dropout_prob A_ : Union[str, Any] = initializer_range A_ : int = layer_norm_eps A_ : Tuple = image_size A_ : Optional[Any] = patch_size A_ : Dict = num_channels A_ : str = qkv_bias A_ : Tuple = backbone_out_indices if readout_type not in ["ignore", "add", "project"]: raise ValueError('''Readout_type must be one of [\'ignore\', \'add\', \'project\']''' ) A_ : Any = readout_type A_ : Any = reassemble_factors A_ : Any = neck_hidden_sizes A_ : str = fusion_hidden_size A_ : List[str] = head_in_index A_ : Optional[int] = use_batch_norm_in_fusion_residual # auxiliary head attributes (semantic segmentation) A_ : int = use_auxiliary_head A_ : Optional[int] = auxiliary_loss_weight A_ : Tuple = semantic_loss_ignore_index A_ : List[Any] = semantic_classifier_dropout def _a ( self : str ): """simple docstring""" A_ : int = copy.deepcopy(self.__dict__ ) if output["backbone_config"] is not None: A_ : Any = self.backbone_config.to_dict() A_ : Any = self.__class__.model_type return output
358
'''simple docstring''' import json import os from pathlib import Path from shutil import copyfile from typing import Any, Dict, List, Optional, Tuple, Union import sentencepiece from ...tokenization_utils import PreTrainedTokenizer from ...utils import logging snake_case__ = logging.get_logger(__name__) snake_case__ = """▁""" snake_case__ = { """vocab_file""": """vocab.json""", """spm_file""": """sentencepiece.bpe.model""", } snake_case__ = { """vocab_file""": { """facebook/s2t-small-librispeech-asr""": ( """https://huggingface.co/facebook/s2t-small-librispeech-asr/resolve/main/vocab.json""" ), }, """spm_file""": { """facebook/s2t-small-librispeech-asr""": ( """https://huggingface.co/facebook/s2t-small-librispeech-asr/resolve/main/sentencepiece.bpe.model""" ) }, } snake_case__ = { """facebook/s2t-small-librispeech-asr""": 10_24, } snake_case__ = ["""pt""", """fr""", """ru""", """nl""", """ro""", """it""", """es""", """de"""] snake_case__ = {"""mustc""": MUSTC_LANGS} class UpperCamelCase_ (a__ ): """simple docstring""" _lowerCAmelCase = VOCAB_FILES_NAMES _lowerCAmelCase = PRETRAINED_VOCAB_FILES_MAP _lowerCAmelCase = MAX_MODEL_INPUT_SIZES _lowerCAmelCase = ['input_ids', 'attention_mask'] _lowerCAmelCase = [] def __init__( self : Optional[int] , _lowerCamelCase : List[str] , _lowerCamelCase : List[str] , _lowerCamelCase : str="<s>" , _lowerCamelCase : Union[str, Any]="</s>" , _lowerCamelCase : Dict="<pad>" , _lowerCamelCase : str="<unk>" , _lowerCamelCase : Union[str, Any]=False , _lowerCamelCase : int=False , _lowerCamelCase : Any=None , _lowerCamelCase : Any=None , _lowerCamelCase : Optional[Dict[str, Any]] = None , **_lowerCamelCase : Optional[int] , ): """simple docstring""" A_ : Union[str, Any] = {} if sp_model_kwargs is None else sp_model_kwargs super().__init__( bos_token=_lowerCamelCase , eos_token=_lowerCamelCase , unk_token=_lowerCamelCase , pad_token=_lowerCamelCase , do_upper_case=_lowerCamelCase , do_lower_case=_lowerCamelCase , tgt_lang=_lowerCamelCase , lang_codes=_lowerCamelCase , sp_model_kwargs=self.sp_model_kwargs , **_lowerCamelCase , ) A_ : Optional[int] = do_upper_case A_ : Tuple = do_lower_case A_ : Tuple = load_json(_lowerCamelCase ) A_ : Tuple = {v: k for k, v in self.encoder.items()} A_ : List[Any] = spm_file A_ : List[str] = load_spm(_lowerCamelCase , self.sp_model_kwargs ) if lang_codes is not None: A_ : Any = lang_codes A_ : Optional[Any] = LANGUAGES[lang_codes] A_ : Optional[Any] = [f'<lang:{lang}>' for lang in self.langs] A_ : Union[str, Any] = {lang: self.sp_model.PieceToId(f'<lang:{lang}>' ) for lang in self.langs} A_ : Optional[int] = self.lang_tokens A_ : int = tgt_lang if tgt_lang is not None else self.langs[0] self.set_tgt_lang_special_tokens(self._tgt_lang ) else: A_ : Dict = {} @property def _a ( self : Tuple ): """simple docstring""" return len(self.encoder ) @property def _a ( self : int ): """simple docstring""" return self._tgt_lang @tgt_lang.setter def _a ( self : List[str] , _lowerCamelCase : Any ): """simple docstring""" A_ : int = new_tgt_lang self.set_tgt_lang_special_tokens(_lowerCamelCase ) def _a ( self : Tuple , _lowerCamelCase : str ): """simple docstring""" A_ : List[str] = self.lang_code_to_id[tgt_lang] A_ : Optional[Any] = [lang_code_id] def _a ( self : Optional[Any] , _lowerCamelCase : str ): """simple docstring""" return self.sp_model.encode(_lowerCamelCase , out_type=_lowerCamelCase ) def _a ( self : List[Any] , _lowerCamelCase : int ): """simple docstring""" return self.encoder.get(_lowerCamelCase , self.encoder[self.unk_token] ) def _a ( self : int , _lowerCamelCase : int ): """simple docstring""" return self.decoder.get(_lowerCamelCase , self.unk_token ) def _a ( self : int , _lowerCamelCase : List[str] ): """simple docstring""" A_ : List[Any] = [] A_ : Any = '''''' for token in tokens: # make sure that special tokens are not decoded using sentencepiece model if token in self.all_special_tokens: A_ : Union[str, Any] = self.sp_model.decode(_lowerCamelCase ) out_string += (decoded.upper() if self.do_upper_case else decoded) + token + " " A_ : Optional[Any] = [] else: current_sub_tokens.append(_lowerCamelCase ) A_ : Tuple = self.sp_model.decode(_lowerCamelCase ) out_string += decoded.upper() if self.do_upper_case else decoded return out_string.strip() def _a ( self : int , _lowerCamelCase : Dict , _lowerCamelCase : Any=None ): """simple docstring""" if token_ids_a is None: return self.prefix_tokens + token_ids_a + [self.eos_token_id] # We don't expect to process pairs, but leave the pair logic for API consistency return self.prefix_tokens + token_ids_a + token_ids_a + [self.eos_token_id] def _a ( self : List[Any] , _lowerCamelCase : List[int] , _lowerCamelCase : Optional[List[int]] = None , _lowerCamelCase : bool = False ): """simple docstring""" if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=_lowerCamelCase , token_ids_a=_lowerCamelCase , already_has_special_tokens=_lowerCamelCase ) A_ : Tuple = [1] * len(self.prefix_tokens ) A_ : Tuple = [1] if token_ids_a is None: return prefix_ones + ([0] * len(_lowerCamelCase )) + suffix_ones return prefix_ones + ([0] * len(_lowerCamelCase )) + ([0] * len(_lowerCamelCase )) + suffix_ones def _a ( self : Dict ): """simple docstring""" A_ : Union[str, Any] = self.encoder.copy() vocab.update(self.added_tokens_encoder ) return vocab def __getstate__( self : Union[str, Any] ): """simple docstring""" A_ : Dict = self.__dict__.copy() A_ : List[Any] = None return state def __setstate__( self : List[str] , _lowerCamelCase : Dict ): """simple docstring""" A_ : Dict = d # for backward compatibility if not hasattr(self , '''sp_model_kwargs''' ): A_ : Optional[int] = {} A_ : int = load_spm(self.spm_file , self.sp_model_kwargs ) def _a ( self : Optional[Any] , _lowerCamelCase : str , _lowerCamelCase : Optional[str] = None ): """simple docstring""" A_ : Dict = Path(_lowerCamelCase ) assert save_dir.is_dir(), f'{save_directory} should be a directory' A_ : Optional[int] = save_dir / ( (filename_prefix + '''-''' if filename_prefix else '''''') + self.vocab_files_names['''vocab_file'''] ) A_ : Optional[int] = save_dir / ( (filename_prefix + '''-''' if filename_prefix else '''''') + self.vocab_files_names['''spm_file'''] ) save_json(self.encoder , _lowerCamelCase ) if os.path.abspath(self.spm_file ) != os.path.abspath(_lowerCamelCase ) and os.path.isfile(self.spm_file ): copyfile(self.spm_file , _lowerCamelCase ) elif not os.path.isfile(self.spm_file ): with open(_lowerCamelCase , '''wb''' ) as fi: A_ : List[str] = self.sp_model.serialized_model_proto() fi.write(_lowerCamelCase ) return (str(_lowerCamelCase ), str(_lowerCamelCase )) def snake_case__ ( lowerCamelCase__ : str , lowerCamelCase__ : Dict[str, Any] ) -> sentencepiece.SentencePieceProcessor: A_ : Tuple = sentencepiece.SentencePieceProcessor(**lowerCamelCase__ ) spm.Load(str(lowerCamelCase__ ) ) return spm def snake_case__ ( lowerCamelCase__ : str ) -> Union[Dict, List]: with open(lowerCamelCase__ , '''r''' ) as f: return json.load(lowerCamelCase__ ) def snake_case__ ( lowerCamelCase__ : Any , lowerCamelCase__ : str ) -> None: with open(lowerCamelCase__ , '''w''' ) as f: json.dump(lowerCamelCase__ , lowerCamelCase__ , indent=2 )
4
0
'''simple docstring''' import os import random import sys from . import cryptomath_module as cryptomath from . import rabin_miller snake_case__ = 3 def snake_case__ ( lowerCamelCase__ : int ) -> int: print('''Generating primitive root of p''' ) while True: A_ : List[Any] = random.randrange(3 , lowerCamelCase__ ) if pow(lowerCamelCase__ , 2 , lowerCamelCase__ ) == 1: continue if pow(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) == 1: continue return g def snake_case__ ( lowerCamelCase__ : int ) -> tuple[tuple[int, int, int, int], tuple[int, int]]: print('''Generating prime p...''' ) A_ : List[str] = rabin_miller.generate_large_prime(lowerCamelCase__ ) # select large prime number. A_ : str = primitive_root(lowerCamelCase__ ) # one primitive root on modulo p. A_ : List[str] = random.randrange(3 , lowerCamelCase__ ) # private_key -> have to be greater than 2 for safety. A_ : Tuple = cryptomath.find_mod_inverse(pow(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) , lowerCamelCase__ ) A_ : str = (key_size, e_a, e_a, p) A_ : Dict = (key_size, d) return public_key, private_key def snake_case__ ( lowerCamelCase__ : str , lowerCamelCase__ : int ) -> None: if os.path.exists(f'{name}_pubkey.txt' ) or os.path.exists(f'{name}_privkey.txt' ): print('''\nWARNING:''' ) print( f'"{name}_pubkey.txt" or "{name}_privkey.txt" already exists. \n' '''Use a different name or delete these files and re-run this program.''' ) sys.exit() A_ : Optional[int] = generate_key(lowerCamelCase__ ) print(f'\nWriting public key to file {name}_pubkey.txt...' ) with open(f'{name}_pubkey.txt' , '''w''' ) as fo: fo.write(f'{public_key[0]},{public_key[1]},{public_key[2]},{public_key[3]}' ) print(f'Writing private key to file {name}_privkey.txt...' ) with open(f'{name}_privkey.txt' , '''w''' ) as fo: fo.write(f'{private_key[0]},{private_key[1]}' ) def snake_case__ ( ) -> None: print('''Making key files...''' ) make_key_files('''elgamal''' , 2_0_4_8 ) print('''Key files generation successful''' ) if __name__ == "__main__": main()
359
'''simple docstring''' import argparse import json import os import sys import tempfile import unittest from argparse import Namespace from dataclasses import dataclass, field from enum import Enum from pathlib import Path from typing import List, Literal, Optional import yaml from transformers import HfArgumentParser, TrainingArguments from transformers.hf_argparser import make_choice_type_function, string_to_bool # Since Python 3.10, we can use the builtin `|` operator for Union types # See PEP 604: https://peps.python.org/pep-0604 snake_case__ = sys.version_info >= (3, 10) def snake_case__ ( lowerCamelCase__ : Union[str, Any]=None , lowerCamelCase__ : str=None ) -> List[Any]: return field(default_factory=lambda: default , metadata=lowerCamelCase__ ) @dataclass class UpperCamelCase_ : """simple docstring""" _lowerCAmelCase = 42 _lowerCAmelCase = 42 _lowerCAmelCase = 42 _lowerCAmelCase = 42 @dataclass class UpperCamelCase_ : """simple docstring""" _lowerCAmelCase = 4_2 _lowerCAmelCase = field(default='toto', metadata={'help': 'help message'} ) @dataclass class UpperCamelCase_ : """simple docstring""" _lowerCAmelCase = False _lowerCAmelCase = True _lowerCAmelCase = None class UpperCamelCase_ (a__ ): """simple docstring""" _lowerCAmelCase = 'titi' _lowerCAmelCase = 'toto' class UpperCamelCase_ (a__ ): """simple docstring""" _lowerCAmelCase = 'titi' _lowerCAmelCase = 'toto' _lowerCAmelCase = 4_2 @dataclass class UpperCamelCase_ : """simple docstring""" _lowerCAmelCase = "toto" def _a ( self : Optional[Any] ): """simple docstring""" A_ : Optional[int] = BasicEnum(self.foo ) @dataclass class UpperCamelCase_ : """simple docstring""" _lowerCAmelCase = "toto" def _a ( self : Tuple ): """simple docstring""" A_ : Optional[Any] = MixedTypeEnum(self.foo ) @dataclass class UpperCamelCase_ : """simple docstring""" _lowerCAmelCase = None _lowerCAmelCase = field(default=a__, metadata={'help': 'help message'} ) _lowerCAmelCase = None _lowerCAmelCase = list_field(default=[] ) _lowerCAmelCase = list_field(default=[] ) @dataclass class UpperCamelCase_ : """simple docstring""" _lowerCAmelCase = list_field(default=[] ) _lowerCAmelCase = list_field(default=[1, 2, 3] ) _lowerCAmelCase = list_field(default=['Hallo', 'Bonjour', 'Hello'] ) _lowerCAmelCase = list_field(default=[0.1, 0.2, 0.3] ) @dataclass class UpperCamelCase_ : """simple docstring""" _lowerCAmelCase = field() _lowerCAmelCase = field() _lowerCAmelCase = field() def _a ( self : Tuple ): """simple docstring""" A_ : Tuple = BasicEnum(self.required_enum ) @dataclass class UpperCamelCase_ : """simple docstring""" _lowerCAmelCase = 42 _lowerCAmelCase = field() _lowerCAmelCase = None _lowerCAmelCase = field(default='toto', metadata={'help': 'help message'} ) _lowerCAmelCase = list_field(default=['Hallo', 'Bonjour', 'Hello'] ) if is_python_no_less_than_3_10: @dataclass class UpperCamelCase_ : """simple docstring""" _lowerCAmelCase = False _lowerCAmelCase = True _lowerCAmelCase = None @dataclass class UpperCamelCase_ : """simple docstring""" _lowerCAmelCase = None _lowerCAmelCase = field(default=a__, metadata={'help': 'help message'} ) _lowerCAmelCase = None _lowerCAmelCase = list_field(default=[] ) _lowerCAmelCase = list_field(default=[] ) class UpperCamelCase_ (unittest.TestCase ): """simple docstring""" def _a ( self : List[str] , _lowerCamelCase : argparse.ArgumentParser , _lowerCamelCase : argparse.ArgumentParser ): """simple docstring""" self.assertEqual(len(a._actions ) , len(b._actions ) ) for x, y in zip(a._actions , b._actions ): A_ : Union[str, Any] = {k: v for k, v in vars(_lowerCamelCase ).items() if k != '''container'''} A_ : Optional[Any] = {k: v for k, v in vars(_lowerCamelCase ).items() if k != '''container'''} # Choices with mixed type have custom function as "type" # So we need to compare results directly for equality if xx.get('''choices''' , _lowerCamelCase ) and yy.get('''choices''' , _lowerCamelCase ): for expected_choice in yy["choices"] + xx["choices"]: self.assertEqual(xx['''type'''](_lowerCamelCase ) , yy['''type'''](_lowerCamelCase ) ) del xx["type"], yy["type"] self.assertEqual(_lowerCamelCase , _lowerCamelCase ) def _a ( self : Optional[int] ): """simple docstring""" A_ : Union[str, Any] = HfArgumentParser(_lowerCamelCase ) A_ : Optional[Any] = argparse.ArgumentParser() expected.add_argument('''--foo''' , type=_lowerCamelCase , required=_lowerCamelCase ) expected.add_argument('''--bar''' , type=_lowerCamelCase , required=_lowerCamelCase ) expected.add_argument('''--baz''' , type=_lowerCamelCase , required=_lowerCamelCase ) expected.add_argument('''--flag''' , type=_lowerCamelCase , default=_lowerCamelCase , const=_lowerCamelCase , nargs='''?''' ) self.argparsersEqual(_lowerCamelCase , _lowerCamelCase ) A_ : Union[str, Any] = ['''--foo''', '''1''', '''--baz''', '''quux''', '''--bar''', '''0.5'''] ((A_) ,) : List[str] = parser.parse_args_into_dataclasses(_lowerCamelCase , look_for_args_file=_lowerCamelCase ) self.assertFalse(example.flag ) def _a ( self : Dict ): """simple docstring""" A_ : int = HfArgumentParser(_lowerCamelCase ) A_ : int = argparse.ArgumentParser() expected.add_argument('''--foo''' , default=42 , type=_lowerCamelCase ) expected.add_argument('''--baz''' , default='''toto''' , type=_lowerCamelCase , help='''help message''' ) self.argparsersEqual(_lowerCamelCase , _lowerCamelCase ) def _a ( self : Dict ): """simple docstring""" A_ : Any = argparse.ArgumentParser() expected.add_argument('''--foo''' , type=_lowerCamelCase , default=_lowerCamelCase , const=_lowerCamelCase , nargs='''?''' ) expected.add_argument('''--baz''' , type=_lowerCamelCase , default=_lowerCamelCase , const=_lowerCamelCase , nargs='''?''' ) # A boolean no_* argument always has to come after its "default: True" regular counter-part # and its default must be set to False expected.add_argument('''--no_baz''' , action='''store_false''' , default=_lowerCamelCase , dest='''baz''' ) expected.add_argument('''--opt''' , type=_lowerCamelCase , default=_lowerCamelCase ) A_ : Dict = [WithDefaultBoolExample] if is_python_no_less_than_3_10: dataclass_types.append(_lowerCamelCase ) for dataclass_type in dataclass_types: A_ : Any = HfArgumentParser(_lowerCamelCase ) self.argparsersEqual(_lowerCamelCase , _lowerCamelCase ) A_ : List[Any] = parser.parse_args([] ) self.assertEqual(_lowerCamelCase , Namespace(foo=_lowerCamelCase , baz=_lowerCamelCase , opt=_lowerCamelCase ) ) A_ : Optional[int] = parser.parse_args(['''--foo''', '''--no_baz'''] ) self.assertEqual(_lowerCamelCase , Namespace(foo=_lowerCamelCase , baz=_lowerCamelCase , opt=_lowerCamelCase ) ) A_ : Union[str, Any] = parser.parse_args(['''--foo''', '''--baz'''] ) self.assertEqual(_lowerCamelCase , Namespace(foo=_lowerCamelCase , baz=_lowerCamelCase , opt=_lowerCamelCase ) ) A_ : List[str] = parser.parse_args(['''--foo''', '''True''', '''--baz''', '''True''', '''--opt''', '''True'''] ) self.assertEqual(_lowerCamelCase , Namespace(foo=_lowerCamelCase , baz=_lowerCamelCase , opt=_lowerCamelCase ) ) A_ : List[Any] = parser.parse_args(['''--foo''', '''False''', '''--baz''', '''False''', '''--opt''', '''False'''] ) self.assertEqual(_lowerCamelCase , Namespace(foo=_lowerCamelCase , baz=_lowerCamelCase , opt=_lowerCamelCase ) ) def _a ( self : List[Any] ): """simple docstring""" A_ : str = HfArgumentParser(_lowerCamelCase ) A_ : Optional[int] = argparse.ArgumentParser() expected.add_argument( '''--foo''' , default='''toto''' , choices=['''titi''', '''toto''', 42] , type=make_choice_type_function(['''titi''', '''toto''', 42] ) , ) self.argparsersEqual(_lowerCamelCase , _lowerCamelCase ) A_ : str = parser.parse_args([] ) self.assertEqual(args.foo , '''toto''' ) A_ : List[Any] = parser.parse_args_into_dataclasses([] )[0] self.assertEqual(enum_ex.foo , MixedTypeEnum.toto ) A_ : int = parser.parse_args(['''--foo''', '''titi'''] ) self.assertEqual(args.foo , '''titi''' ) A_ : Dict = parser.parse_args_into_dataclasses(['''--foo''', '''titi'''] )[0] self.assertEqual(enum_ex.foo , MixedTypeEnum.titi ) A_ : Tuple = parser.parse_args(['''--foo''', '''42'''] ) self.assertEqual(args.foo , 42 ) A_ : List[str] = parser.parse_args_into_dataclasses(['''--foo''', '''42'''] )[0] self.assertEqual(enum_ex.foo , MixedTypeEnum.fourtytwo ) def _a ( self : Optional[int] ): """simple docstring""" @dataclass class UpperCamelCase_ : """simple docstring""" _lowerCAmelCase = "toto" A_ : List[str] = HfArgumentParser(_lowerCamelCase ) A_ : Tuple = argparse.ArgumentParser() expected.add_argument( '''--foo''' , default='''toto''' , choices=('''titi''', '''toto''', 42) , type=make_choice_type_function(['''titi''', '''toto''', 42] ) , ) self.argparsersEqual(_lowerCamelCase , _lowerCamelCase ) A_ : Tuple = parser.parse_args([] ) self.assertEqual(args.foo , '''toto''' ) A_ : List[str] = parser.parse_args(['''--foo''', '''titi'''] ) self.assertEqual(args.foo , '''titi''' ) A_ : int = parser.parse_args(['''--foo''', '''42'''] ) self.assertEqual(args.foo , 42 ) def _a ( self : Dict ): """simple docstring""" A_ : int = HfArgumentParser(_lowerCamelCase ) A_ : List[Any] = argparse.ArgumentParser() expected.add_argument('''--foo_int''' , nargs='''+''' , default=[] , type=_lowerCamelCase ) expected.add_argument('''--bar_int''' , nargs='''+''' , default=[1, 2, 3] , type=_lowerCamelCase ) expected.add_argument('''--foo_str''' , nargs='''+''' , default=['''Hallo''', '''Bonjour''', '''Hello'''] , type=_lowerCamelCase ) expected.add_argument('''--foo_float''' , nargs='''+''' , default=[0.1, 0.2, 0.3] , type=_lowerCamelCase ) self.argparsersEqual(_lowerCamelCase , _lowerCamelCase ) A_ : Optional[int] = parser.parse_args([] ) self.assertEqual( _lowerCamelCase , Namespace(foo_int=[] , bar_int=[1, 2, 3] , foo_str=['''Hallo''', '''Bonjour''', '''Hello'''] , foo_float=[0.1, 0.2, 0.3] ) , ) A_ : str = parser.parse_args('''--foo_int 1 --bar_int 2 3 --foo_str a b c --foo_float 0.1 0.7'''.split() ) self.assertEqual(_lowerCamelCase , Namespace(foo_int=[1] , bar_int=[2, 3] , foo_str=['''a''', '''b''', '''c'''] , foo_float=[0.1, 0.7] ) ) def _a ( self : Dict ): """simple docstring""" A_ : Optional[Any] = argparse.ArgumentParser() expected.add_argument('''--foo''' , default=_lowerCamelCase , type=_lowerCamelCase ) expected.add_argument('''--bar''' , default=_lowerCamelCase , type=_lowerCamelCase , help='''help message''' ) expected.add_argument('''--baz''' , default=_lowerCamelCase , type=_lowerCamelCase ) expected.add_argument('''--ces''' , nargs='''+''' , default=[] , type=_lowerCamelCase ) expected.add_argument('''--des''' , nargs='''+''' , default=[] , type=_lowerCamelCase ) A_ : Tuple = [OptionalExample] if is_python_no_less_than_3_10: dataclass_types.append(_lowerCamelCase ) for dataclass_type in dataclass_types: A_ : int = HfArgumentParser(_lowerCamelCase ) self.argparsersEqual(_lowerCamelCase , _lowerCamelCase ) A_ : List[Any] = parser.parse_args([] ) self.assertEqual(_lowerCamelCase , Namespace(foo=_lowerCamelCase , bar=_lowerCamelCase , baz=_lowerCamelCase , ces=[] , des=[] ) ) A_ : Optional[Any] = parser.parse_args('''--foo 12 --bar 3.14 --baz 42 --ces a b c --des 1 2 3'''.split() ) self.assertEqual(_lowerCamelCase , Namespace(foo=12 , bar=3.14 , baz='''42''' , ces=['''a''', '''b''', '''c'''] , des=[1, 2, 3] ) ) def _a ( self : List[Any] ): """simple docstring""" A_ : List[Any] = HfArgumentParser(_lowerCamelCase ) A_ : Dict = argparse.ArgumentParser() expected.add_argument('''--required_list''' , nargs='''+''' , type=_lowerCamelCase , required=_lowerCamelCase ) expected.add_argument('''--required_str''' , type=_lowerCamelCase , required=_lowerCamelCase ) expected.add_argument( '''--required_enum''' , type=make_choice_type_function(['''titi''', '''toto'''] ) , choices=['''titi''', '''toto'''] , required=_lowerCamelCase , ) self.argparsersEqual(_lowerCamelCase , _lowerCamelCase ) def _a ( self : Optional[Any] ): """simple docstring""" A_ : Union[str, Any] = HfArgumentParser(_lowerCamelCase ) A_ : List[Any] = argparse.ArgumentParser() expected.add_argument('''--foo''' , type=_lowerCamelCase , required=_lowerCamelCase ) expected.add_argument( '''--required_enum''' , type=make_choice_type_function(['''titi''', '''toto'''] ) , choices=['''titi''', '''toto'''] , required=_lowerCamelCase , ) expected.add_argument('''--opt''' , type=_lowerCamelCase , default=_lowerCamelCase ) expected.add_argument('''--baz''' , default='''toto''' , type=_lowerCamelCase , help='''help message''' ) expected.add_argument('''--foo_str''' , nargs='''+''' , default=['''Hallo''', '''Bonjour''', '''Hello'''] , type=_lowerCamelCase ) self.argparsersEqual(_lowerCamelCase , _lowerCamelCase ) def _a ( self : Tuple ): """simple docstring""" A_ : List[Any] = HfArgumentParser(_lowerCamelCase ) A_ : Union[str, Any] = { '''foo''': 12, '''bar''': 3.14, '''baz''': '''42''', '''flag''': True, } A_ : Optional[int] = parser.parse_dict(_lowerCamelCase )[0] A_ : str = BasicExample(**_lowerCamelCase ) self.assertEqual(_lowerCamelCase , _lowerCamelCase ) def _a ( self : List[str] ): """simple docstring""" A_ : Any = HfArgumentParser(_lowerCamelCase ) A_ : List[str] = { '''foo''': 12, '''bar''': 3.14, '''baz''': '''42''', '''flag''': True, '''extra''': 42, } self.assertRaises(_lowerCamelCase , parser.parse_dict , _lowerCamelCase , allow_extra_keys=_lowerCamelCase ) def _a ( self : Optional[Any] ): """simple docstring""" A_ : Union[str, Any] = HfArgumentParser(_lowerCamelCase ) A_ : List[str] = { '''foo''': 12, '''bar''': 3.14, '''baz''': '''42''', '''flag''': True, } with tempfile.TemporaryDirectory() as tmp_dir: A_ : Tuple = os.path.join(_lowerCamelCase , '''temp_json''' ) os.mkdir(_lowerCamelCase ) with open(temp_local_path + '''.json''' , '''w+''' ) as f: json.dump(_lowerCamelCase , _lowerCamelCase ) A_ : List[str] = parser.parse_yaml_file(Path(temp_local_path + '''.json''' ) )[0] A_ : Optional[Any] = BasicExample(**_lowerCamelCase ) self.assertEqual(_lowerCamelCase , _lowerCamelCase ) def _a ( self : int ): """simple docstring""" A_ : int = HfArgumentParser(_lowerCamelCase ) A_ : Tuple = { '''foo''': 12, '''bar''': 3.14, '''baz''': '''42''', '''flag''': True, } with tempfile.TemporaryDirectory() as tmp_dir: A_ : int = os.path.join(_lowerCamelCase , '''temp_yaml''' ) os.mkdir(_lowerCamelCase ) with open(temp_local_path + '''.yaml''' , '''w+''' ) as f: yaml.dump(_lowerCamelCase , _lowerCamelCase ) A_ : Optional[Any] = parser.parse_yaml_file(Path(temp_local_path + '''.yaml''' ) )[0] A_ : int = BasicExample(**_lowerCamelCase ) self.assertEqual(_lowerCamelCase , _lowerCamelCase ) def _a ( self : Union[str, Any] ): """simple docstring""" A_ : Dict = HfArgumentParser(_lowerCamelCase ) self.assertIsNotNone(_lowerCamelCase )
4
0
'''simple docstring''' import re from ..utils import cached_file # docstyle-ignore snake_case__ = """ Human: <<task>> Assistant: """ snake_case__ = """huggingface-tools/default-prompts""" snake_case__ = {"""chat""": """chat_prompt_template.txt""", """run""": """run_prompt_template.txt"""} def snake_case__ ( lowerCamelCase__ : Optional[Any] , lowerCamelCase__ : Tuple , lowerCamelCase__ : Dict="run" ) -> Union[str, Any]: if prompt_or_repo_id is None: A_ : Optional[int] = DEFAULT_PROMPTS_REPO # prompt is considered a repo ID when it does not contain any kind of space if re.search('''\\s''' , lowerCamelCase__ ) is not None: return prompt_or_repo_id A_ : Optional[Any] = cached_file( lowerCamelCase__ , PROMPT_FILES[mode] , repo_type='''dataset''' , user_agent={'''agent''': agent_name} ) with open(lowerCamelCase__ , '''r''' , encoding='''utf-8''' ) as f: return f.read()
360
'''simple docstring''' import sys import tempfile import unittest import unittest.mock as mock from pathlib import Path from huggingface_hub import HfFolder, delete_repo from requests.exceptions import HTTPError from transformers import AutoImageProcessor, ViTImageProcessor from transformers.testing_utils import TOKEN, USER, get_tests_dir, is_staging_test sys.path.append(str(Path(__file__).parent.parent / """utils""")) from test_module.custom_image_processing import CustomImageProcessor # noqa E402 snake_case__ = get_tests_dir("""fixtures""") class UpperCamelCase_ (unittest.TestCase ): """simple docstring""" def _a ( self : List[str] ): """simple docstring""" A_ : List[Any] = mock.Mock() A_ : List[str] = 500 A_ : Tuple = {} A_ : int = HTTPError A_ : Optional[Any] = {} # Download this model to make sure it's in the cache. A_ : Tuple = ViTImageProcessor.from_pretrained('''hf-internal-testing/tiny-random-vit''' ) # Under the mock environment we get a 500 error when trying to reach the model. with mock.patch('''requests.Session.request''' , return_value=_lowerCamelCase ) as mock_head: A_ : List[Any] = ViTImageProcessor.from_pretrained('''hf-internal-testing/tiny-random-vit''' ) # This check we did call the fake head request mock_head.assert_called() def _a ( self : Tuple ): """simple docstring""" A_ : Tuple = ViTImageProcessor.from_pretrained( '''https://huggingface.co/hf-internal-testing/tiny-random-vit/resolve/main/preprocessor_config.json''' ) def _a ( self : Dict ): """simple docstring""" with self.assertRaises(_lowerCamelCase ): # config is in subfolder, the following should not work without specifying the subfolder A_ : Any = AutoImageProcessor.from_pretrained('''hf-internal-testing/stable-diffusion-all-variants''' ) A_ : Tuple = AutoImageProcessor.from_pretrained( '''hf-internal-testing/stable-diffusion-all-variants''' , subfolder='''feature_extractor''' ) self.assertIsNotNone(_lowerCamelCase ) @is_staging_test class UpperCamelCase_ (unittest.TestCase ): """simple docstring""" @classmethod def _a ( cls : Tuple ): """simple docstring""" A_ : int = TOKEN HfFolder.save_token(_lowerCamelCase ) @classmethod def _a ( cls : str ): """simple docstring""" try: delete_repo(token=cls._token , repo_id='''test-image-processor''' ) except HTTPError: pass try: delete_repo(token=cls._token , repo_id='''valid_org/test-image-processor-org''' ) except HTTPError: pass try: delete_repo(token=cls._token , repo_id='''test-dynamic-image-processor''' ) except HTTPError: pass def _a ( self : List[Any] ): """simple docstring""" A_ : Dict = ViTImageProcessor.from_pretrained(_lowerCamelCase ) image_processor.push_to_hub('''test-image-processor''' , use_auth_token=self._token ) A_ : Optional[int] = ViTImageProcessor.from_pretrained(f'{USER}/test-image-processor' ) for k, v in image_processor.__dict__.items(): self.assertEqual(_lowerCamelCase , getattr(_lowerCamelCase , _lowerCamelCase ) ) # Reset repo delete_repo(token=self._token , repo_id='''test-image-processor''' ) # Push to hub via save_pretrained with tempfile.TemporaryDirectory() as tmp_dir: image_processor.save_pretrained( _lowerCamelCase , repo_id='''test-image-processor''' , push_to_hub=_lowerCamelCase , use_auth_token=self._token ) A_ : List[Any] = ViTImageProcessor.from_pretrained(f'{USER}/test-image-processor' ) for k, v in image_processor.__dict__.items(): self.assertEqual(_lowerCamelCase , getattr(_lowerCamelCase , _lowerCamelCase ) ) def _a ( self : Optional[Any] ): """simple docstring""" A_ : int = ViTImageProcessor.from_pretrained(_lowerCamelCase ) image_processor.push_to_hub('''valid_org/test-image-processor''' , use_auth_token=self._token ) A_ : List[str] = ViTImageProcessor.from_pretrained('''valid_org/test-image-processor''' ) for k, v in image_processor.__dict__.items(): self.assertEqual(_lowerCamelCase , getattr(_lowerCamelCase , _lowerCamelCase ) ) # Reset repo delete_repo(token=self._token , repo_id='''valid_org/test-image-processor''' ) # Push to hub via save_pretrained with tempfile.TemporaryDirectory() as tmp_dir: image_processor.save_pretrained( _lowerCamelCase , repo_id='''valid_org/test-image-processor-org''' , push_to_hub=_lowerCamelCase , use_auth_token=self._token ) A_ : Any = ViTImageProcessor.from_pretrained('''valid_org/test-image-processor-org''' ) for k, v in image_processor.__dict__.items(): self.assertEqual(_lowerCamelCase , getattr(_lowerCamelCase , _lowerCamelCase ) ) def _a ( self : Optional[Any] ): """simple docstring""" CustomImageProcessor.register_for_auto_class() A_ : Any = CustomImageProcessor.from_pretrained(_lowerCamelCase ) image_processor.push_to_hub('''test-dynamic-image-processor''' , use_auth_token=self._token ) # This has added the proper auto_map field to the config self.assertDictEqual( image_processor.auto_map , {'''AutoImageProcessor''': '''custom_image_processing.CustomImageProcessor'''} , ) A_ : str = AutoImageProcessor.from_pretrained( f'{USER}/test-dynamic-image-processor' , trust_remote_code=_lowerCamelCase ) # Can't make an isinstance check because the new_image_processor is from the CustomImageProcessor class of a dynamic module self.assertEqual(new_image_processor.__class__.__name__ , '''CustomImageProcessor''' )
4
0
'''simple docstring''' snake_case__ = { """joule""": 1.0, """kilojoule""": 10_00, """megajoule""": 1_00_00_00, """gigajoule""": 10_00_00_00_00, """wattsecond""": 1.0, """watthour""": 36_00, """kilowatthour""": 3_60_00_00, """newtonmeter""": 1.0, """calorie_nutr""": 41_86.8, """kilocalorie_nutr""": 4_18_68_00.00, """electronvolt""": 1.6_0217_6634e-19, """britishthermalunit_it""": 10_55.0_55_85, """footpound""": 1.3_5_5_8_1_8, } def snake_case__ ( lowerCamelCase__ : str , lowerCamelCase__ : str , lowerCamelCase__ : float ) -> float: if to_type not in ENERGY_CONVERSION or from_type not in ENERGY_CONVERSION: A_ : Dict = ( f'Incorrect \'from_type\' or \'to_type\' value: {from_type!r}, {to_type!r}\n' f'Valid values are: {", ".join(lowerCamelCase__ )}' ) raise ValueError(lowerCamelCase__ ) return value * ENERGY_CONVERSION[from_type] / ENERGY_CONVERSION[to_type] if __name__ == "__main__": import doctest doctest.testmod()
361
'''simple docstring''' # Copyright 2023 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import numpy as np import torch from ..models.clipseg import CLIPSegForImageSegmentation from ..utils import is_vision_available, requires_backends from .base import PipelineTool if is_vision_available(): from PIL import Image class UpperCamelCase_ (a__ ): """simple docstring""" _lowerCAmelCase = ( 'This is a tool that creates a segmentation mask of an image according to a label. It cannot create an image.' 'It takes two arguments named `image` which should be the original image, and `label` which should be a text ' 'describing the elements what should be identified in the segmentation mask. The tool returns the mask.' ) _lowerCAmelCase = 'CIDAS/clipseg-rd64-refined' _lowerCAmelCase = 'image_segmenter' _lowerCAmelCase = CLIPSegForImageSegmentation _lowerCAmelCase = ['image', 'text'] _lowerCAmelCase = ['image'] def __init__( self : Optional[int] , *_lowerCamelCase : Optional[int] , **_lowerCamelCase : Union[str, Any] ): """simple docstring""" requires_backends(self , ['''vision'''] ) super().__init__(*_lowerCamelCase , **_lowerCamelCase ) def _a ( self : List[str] , _lowerCamelCase : "Image" , _lowerCamelCase : str ): """simple docstring""" return self.pre_processor(text=[label] , images=[image] , padding=_lowerCamelCase , return_tensors='''pt''' ) def _a ( self : Union[str, Any] , _lowerCamelCase : Optional[int] ): """simple docstring""" with torch.no_grad(): A_ : Optional[int] = self.model(**_lowerCamelCase ).logits return logits def _a ( self : List[str] , _lowerCamelCase : Optional[int] ): """simple docstring""" A_ : int = outputs.cpu().detach().numpy() A_ : Tuple = 0 A_ : List[str] = 1 return Image.fromarray((array * 255).astype(np.uinta ) )
4
0
'''simple docstring''' import os import tempfile import unittest from pathlib import Path from transformers import AutoConfig, is_torch_available from transformers.testing_utils import require_torch, torch_device if is_torch_available(): from transformers import PyTorchBenchmark, PyTorchBenchmarkArguments @require_torch class UpperCamelCase_ (unittest.TestCase ): """simple docstring""" def _a ( self : List[str] , _lowerCamelCase : Any ): """simple docstring""" for model_result in results.values(): for batch_size, sequence_length in zip(model_result['''bs'''] , model_result['''ss'''] ): A_ : Optional[int] = model_result['''result'''][batch_size][sequence_length] self.assertIsNotNone(_lowerCamelCase ) def _a ( self : Dict ): """simple docstring""" A_ : Dict = '''sshleifer/tiny-gpt2''' A_ : Optional[int] = PyTorchBenchmarkArguments( models=[MODEL_ID] , training=_lowerCamelCase , inference=_lowerCamelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_lowerCamelCase , ) A_ : Union[str, Any] = PyTorchBenchmark(_lowerCamelCase ) A_ : int = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result ) self.check_results_dict_not_empty(results.memory_inference_result ) def _a ( self : Optional[Any] ): """simple docstring""" A_ : Union[str, Any] = '''sgugger/tiny-distilbert-classification''' A_ : int = PyTorchBenchmarkArguments( models=[MODEL_ID] , training=_lowerCamelCase , inference=_lowerCamelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_lowerCamelCase , only_pretrain_model=_lowerCamelCase , ) A_ : List[str] = PyTorchBenchmark(_lowerCamelCase ) A_ : Optional[int] = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result ) self.check_results_dict_not_empty(results.memory_inference_result ) def _a ( self : List[Any] ): """simple docstring""" A_ : Union[str, Any] = '''sshleifer/tiny-gpt2''' A_ : Any = PyTorchBenchmarkArguments( models=[MODEL_ID] , training=_lowerCamelCase , inference=_lowerCamelCase , torchscript=_lowerCamelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_lowerCamelCase , ) A_ : List[Any] = PyTorchBenchmark(_lowerCamelCase ) A_ : Union[str, Any] = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result ) self.check_results_dict_not_empty(results.memory_inference_result ) @unittest.skipIf(torch_device == '''cpu''' , '''Cant do half precision''' ) def _a ( self : Union[str, Any] ): """simple docstring""" A_ : List[str] = '''sshleifer/tiny-gpt2''' A_ : Dict = PyTorchBenchmarkArguments( models=[MODEL_ID] , training=_lowerCamelCase , inference=_lowerCamelCase , fpaa=_lowerCamelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_lowerCamelCase , ) A_ : int = PyTorchBenchmark(_lowerCamelCase ) A_ : Optional[int] = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result ) self.check_results_dict_not_empty(results.memory_inference_result ) def _a ( self : Tuple ): """simple docstring""" A_ : int = '''sshleifer/tiny-gpt2''' A_ : Dict = AutoConfig.from_pretrained(_lowerCamelCase ) # set architectures equal to `None` A_ : List[str] = None A_ : str = PyTorchBenchmarkArguments( models=[MODEL_ID] , training=_lowerCamelCase , inference=_lowerCamelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_lowerCamelCase , ) A_ : Optional[int] = PyTorchBenchmark(_lowerCamelCase , configs=[config] ) A_ : Optional[int] = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result ) self.check_results_dict_not_empty(results.memory_inference_result ) def _a ( self : Union[str, Any] ): """simple docstring""" A_ : Union[str, Any] = '''sshleifer/tiny-gpt2''' A_ : List[Any] = PyTorchBenchmarkArguments( models=[MODEL_ID] , training=_lowerCamelCase , inference=_lowerCamelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_lowerCamelCase , ) A_ : List[Any] = PyTorchBenchmark(_lowerCamelCase ) A_ : Any = benchmark.run() self.check_results_dict_not_empty(results.time_train_result ) self.check_results_dict_not_empty(results.memory_train_result ) @unittest.skipIf(torch_device == '''cpu''' , '''Can\'t do half precision''' ) def _a ( self : Optional[int] ): """simple docstring""" A_ : Optional[int] = '''sshleifer/tiny-gpt2''' A_ : Union[str, Any] = PyTorchBenchmarkArguments( models=[MODEL_ID] , training=_lowerCamelCase , inference=_lowerCamelCase , sequence_lengths=[8] , batch_sizes=[1] , fpaa=_lowerCamelCase , multi_process=_lowerCamelCase , ) A_ : int = PyTorchBenchmark(_lowerCamelCase ) A_ : str = benchmark.run() self.check_results_dict_not_empty(results.time_train_result ) self.check_results_dict_not_empty(results.memory_train_result ) def _a ( self : Tuple ): """simple docstring""" A_ : Optional[Any] = '''sshleifer/tiny-gpt2''' A_ : int = AutoConfig.from_pretrained(_lowerCamelCase ) A_ : List[str] = PyTorchBenchmarkArguments( models=[MODEL_ID] , training=_lowerCamelCase , inference=_lowerCamelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_lowerCamelCase , ) A_ : str = PyTorchBenchmark(_lowerCamelCase , configs=[config] ) A_ : List[str] = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result ) self.check_results_dict_not_empty(results.memory_inference_result ) def _a ( self : Union[str, Any] ): """simple docstring""" A_ : int = '''sshleifer/tinier_bart''' A_ : str = AutoConfig.from_pretrained(_lowerCamelCase ) A_ : Union[str, Any] = PyTorchBenchmarkArguments( models=[MODEL_ID] , training=_lowerCamelCase , inference=_lowerCamelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_lowerCamelCase , ) A_ : Any = PyTorchBenchmark(_lowerCamelCase , configs=[config] ) A_ : Dict = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result ) self.check_results_dict_not_empty(results.memory_inference_result ) def _a ( self : Dict ): """simple docstring""" A_ : Union[str, Any] = '''sshleifer/tiny-gpt2''' A_ : Optional[int] = AutoConfig.from_pretrained(_lowerCamelCase ) A_ : Any = PyTorchBenchmarkArguments( models=[MODEL_ID] , training=_lowerCamelCase , inference=_lowerCamelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_lowerCamelCase , ) A_ : List[str] = PyTorchBenchmark(_lowerCamelCase , configs=[config] ) A_ : Optional[Any] = benchmark.run() self.check_results_dict_not_empty(results.time_train_result ) self.check_results_dict_not_empty(results.memory_train_result ) def _a ( self : Tuple ): """simple docstring""" A_ : Optional[Any] = '''sshleifer/tinier_bart''' A_ : List[Any] = AutoConfig.from_pretrained(_lowerCamelCase ) A_ : str = PyTorchBenchmarkArguments( models=[MODEL_ID] , training=_lowerCamelCase , inference=_lowerCamelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_lowerCamelCase , ) A_ : List[Any] = PyTorchBenchmark(_lowerCamelCase , configs=[config] ) A_ : int = benchmark.run() self.check_results_dict_not_empty(results.time_train_result ) self.check_results_dict_not_empty(results.memory_train_result ) def _a ( self : int ): """simple docstring""" A_ : str = '''sshleifer/tiny-gpt2''' with tempfile.TemporaryDirectory() as tmp_dir: A_ : Tuple = PyTorchBenchmarkArguments( models=[MODEL_ID] , training=_lowerCamelCase , inference=_lowerCamelCase , save_to_csv=_lowerCamelCase , sequence_lengths=[8] , batch_sizes=[1] , inference_time_csv_file=os.path.join(_lowerCamelCase , '''inf_time.csv''' ) , train_memory_csv_file=os.path.join(_lowerCamelCase , '''train_mem.csv''' ) , inference_memory_csv_file=os.path.join(_lowerCamelCase , '''inf_mem.csv''' ) , train_time_csv_file=os.path.join(_lowerCamelCase , '''train_time.csv''' ) , env_info_csv_file=os.path.join(_lowerCamelCase , '''env.csv''' ) , multi_process=_lowerCamelCase , ) A_ : int = PyTorchBenchmark(_lowerCamelCase ) benchmark.run() self.assertTrue(Path(os.path.join(_lowerCamelCase , '''inf_time.csv''' ) ).exists() ) self.assertTrue(Path(os.path.join(_lowerCamelCase , '''train_time.csv''' ) ).exists() ) self.assertTrue(Path(os.path.join(_lowerCamelCase , '''inf_mem.csv''' ) ).exists() ) self.assertTrue(Path(os.path.join(_lowerCamelCase , '''train_mem.csv''' ) ).exists() ) self.assertTrue(Path(os.path.join(_lowerCamelCase , '''env.csv''' ) ).exists() ) def _a ( self : Optional[int] ): """simple docstring""" A_ : Any = '''sshleifer/tiny-gpt2''' def _check_summary_is_not_empty(_lowerCamelCase : Dict ): self.assertTrue(hasattr(_lowerCamelCase , '''sequential''' ) ) self.assertTrue(hasattr(_lowerCamelCase , '''cumulative''' ) ) self.assertTrue(hasattr(_lowerCamelCase , '''current''' ) ) self.assertTrue(hasattr(_lowerCamelCase , '''total''' ) ) with tempfile.TemporaryDirectory() as tmp_dir: A_ : List[str] = PyTorchBenchmarkArguments( models=[MODEL_ID] , training=_lowerCamelCase , inference=_lowerCamelCase , sequence_lengths=[8] , batch_sizes=[1] , log_filename=os.path.join(_lowerCamelCase , '''log.txt''' ) , log_print=_lowerCamelCase , trace_memory_line_by_line=_lowerCamelCase , multi_process=_lowerCamelCase , ) A_ : str = PyTorchBenchmark(_lowerCamelCase ) A_ : Any = benchmark.run() _check_summary_is_not_empty(result.inference_summary ) _check_summary_is_not_empty(result.train_summary ) self.assertTrue(Path(os.path.join(_lowerCamelCase , '''log.txt''' ) ).exists() )
362
'''simple docstring''' from collections.abc import Sequence def snake_case__ ( lowerCamelCase__ : Sequence[float] , lowerCamelCase__ : bool = False ) -> float: if not arr: return 0 A_ : Union[str, Any] = 0 if allow_empty_subarrays else float('''-inf''' ) A_ : str = 0.0 for num in arr: A_ : Any = max(0 if allow_empty_subarrays else num , curr_sum + num ) A_ : Tuple = max(lowerCamelCase__ , lowerCamelCase__ ) return max_sum if __name__ == "__main__": from doctest import testmod testmod() snake_case__ = [-2, 1, -3, 4, -1, 2, 1, -5, 4] print(F'{max_subarray_sum(nums) = }')
4
0
'''simple docstring''' import argparse import os import evaluate import torch from datasets import load_dataset from torch.optim import AdamW from torch.utils.data import DataLoader from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed from accelerate import Accelerator, DistributedType ######################################################################## # This is a fully working simple example to use Accelerate, # specifically showcasing the experiment tracking capability, # and builds off the `nlp_example.py` script. # # This example trains a Bert base model on GLUE MRPC # in any of the following settings (with the same script): # - single CPU or single GPU # - multi GPUS (using PyTorch distributed mode) # - (multi) TPUs # - fp16 (mixed-precision) or fp32 (normal precision) # # To help focus on the differences in the code, building `DataLoaders` # was refactored into its own function. # New additions from the base script can be found quickly by # looking for the # New Code # tags # # To run it in each of these various modes, follow the instructions # in the readme for examples: # https://github.com/huggingface/accelerate/tree/main/examples # ######################################################################## snake_case__ = 16 snake_case__ = 32 def snake_case__ ( lowerCamelCase__ : Accelerator , lowerCamelCase__ : int = 1_6 ) -> Any: A_ : str = AutoTokenizer.from_pretrained('''bert-base-cased''' ) A_ : Optional[Any] = load_dataset('''glue''' , '''mrpc''' ) def tokenize_function(lowerCamelCase__ : Dict ): # max_length=None => use the model max length (it's actually the default) A_ : List[str] = tokenizer(examples['''sentence1'''] , examples['''sentence2'''] , truncation=lowerCamelCase__ , max_length=lowerCamelCase__ ) return outputs # Apply the method we just defined to all the examples in all the splits of the dataset # starting with the main process first: with accelerator.main_process_first(): A_ : Tuple = datasets.map( lowerCamelCase__ , batched=lowerCamelCase__ , remove_columns=['''idx''', '''sentence1''', '''sentence2'''] , ) # We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the # transformers library A_ : str = tokenized_datasets.rename_column('''label''' , '''labels''' ) def collate_fn(lowerCamelCase__ : Optional[Any] ): # On TPU it's best to pad everything to the same length or training will be very slow. A_ : Dict = 1_2_8 if accelerator.distributed_type == DistributedType.TPU else None # When using mixed precision we want round multiples of 8/16 if accelerator.mixed_precision == "fp8": A_ : int = 1_6 elif accelerator.mixed_precision != "no": A_ : Tuple = 8 else: A_ : List[Any] = None return tokenizer.pad( lowerCamelCase__ , padding='''longest''' , max_length=lowerCamelCase__ , pad_to_multiple_of=lowerCamelCase__ , return_tensors='''pt''' , ) # Instantiate dataloaders. A_ : Dict = DataLoader( tokenized_datasets['''train'''] , shuffle=lowerCamelCase__ , collate_fn=lowerCamelCase__ , batch_size=lowerCamelCase__ ) A_ : Optional[Any] = DataLoader( tokenized_datasets['''validation'''] , shuffle=lowerCamelCase__ , collate_fn=lowerCamelCase__ , batch_size=lowerCamelCase__ ) return train_dataloader, eval_dataloader # For testing only if os.environ.get("""TESTING_MOCKED_DATALOADERS""", None) == "1": from accelerate.test_utils.training import mocked_dataloaders snake_case__ = mocked_dataloaders # noqa: F811 def snake_case__ ( lowerCamelCase__ : int , lowerCamelCase__ : List[Any] ) -> List[str]: # For testing only if os.environ.get('''TESTING_MOCKED_DATALOADERS''' , lowerCamelCase__ ) == "1": A_ : str = 2 # Initialize Accelerator # New Code # # We pass in "all" to `log_with` to grab all available trackers in the environment # Note: If using a custom `Tracker` class, should be passed in here such as: # >>> log_with = ["all", MyCustomTrackerClassInstance()] if args.with_tracking: A_ : Any = Accelerator( cpu=args.cpu , mixed_precision=args.mixed_precision , log_with='''all''' , project_dir=args.project_dir ) else: A_ : int = Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision ) # Sample hyper-parameters for learning rate, batch size, seed and a few other HPs A_ : Optional[Any] = config['''lr'''] A_ : List[Any] = int(config['''num_epochs'''] ) A_ : Union[str, Any] = int(config['''seed'''] ) A_ : int = int(config['''batch_size'''] ) set_seed(lowerCamelCase__ ) A_ : str = get_dataloaders(lowerCamelCase__ , lowerCamelCase__ ) A_ : int = evaluate.load('''glue''' , '''mrpc''' ) # If the batch size is too big we use gradient accumulation A_ : Optional[int] = 1 if batch_size > MAX_GPU_BATCH_SIZE and accelerator.distributed_type != DistributedType.TPU: A_ : Dict = batch_size // MAX_GPU_BATCH_SIZE A_ : Optional[Any] = MAX_GPU_BATCH_SIZE # Instantiate the model (we build the model here so that the seed also control new weights initialization) A_ : Optional[int] = AutoModelForSequenceClassification.from_pretrained('''bert-base-cased''' , return_dict=lowerCamelCase__ ) # We could avoid this line since the accelerator is set with `device_placement=True` (default value). # Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer # creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that). A_ : Tuple = model.to(accelerator.device ) # Instantiate optimizer A_ : List[str] = AdamW(params=model.parameters() , lr=lowerCamelCase__ ) # Instantiate scheduler A_ : Dict = get_linear_schedule_with_warmup( optimizer=lowerCamelCase__ , num_warmup_steps=1_0_0 , num_training_steps=(len(lowerCamelCase__ ) * num_epochs) // gradient_accumulation_steps , ) # Prepare everything # There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the # prepare method. A_ : Dict = accelerator.prepare( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) # New Code # # We need to initialize the trackers we use. Overall configurations can also be stored if args.with_tracking: A_ : Dict = os.path.split(lowerCamelCase__ )[-1].split('''.''' )[0] accelerator.init_trackers(lowerCamelCase__ , lowerCamelCase__ ) # Now we train the model for epoch in range(lowerCamelCase__ ): model.train() # New Code # # For our tracking example, we will log the total loss of each epoch if args.with_tracking: A_ : Tuple = 0 for step, batch in enumerate(lowerCamelCase__ ): # We could avoid this line since we set the accelerator with `device_placement=True`. batch.to(accelerator.device ) A_ : Dict = model(**lowerCamelCase__ ) A_ : Tuple = outputs.loss # New Code # if args.with_tracking: total_loss += loss.detach().float() A_ : str = loss / gradient_accumulation_steps accelerator.backward(lowerCamelCase__ ) if step % gradient_accumulation_steps == 0: optimizer.step() lr_scheduler.step() optimizer.zero_grad() model.eval() for step, batch in enumerate(lowerCamelCase__ ): # We could avoid this line since we set the accelerator with `device_placement=True` (the default). batch.to(accelerator.device ) with torch.no_grad(): A_ : str = model(**lowerCamelCase__ ) A_ : Union[str, Any] = outputs.logits.argmax(dim=-1 ) A_ : List[str] = accelerator.gather_for_metrics((predictions, batch['''labels''']) ) metric.add_batch( predictions=lowerCamelCase__ , references=lowerCamelCase__ , ) A_ : Union[str, Any] = metric.compute() # Use accelerator.print to print only on the main process. accelerator.print(f'epoch {epoch}:' , lowerCamelCase__ ) # New Code # # To actually log, we call `Accelerator.log` # The values passed can be of `str`, `int`, `float` or `dict` of `str` to `float`/`int` if args.with_tracking: accelerator.log( { '''accuracy''': eval_metric['''accuracy'''], '''f1''': eval_metric['''f1'''], '''train_loss''': total_loss.item() / len(lowerCamelCase__ ), '''epoch''': epoch, } , step=lowerCamelCase__ , ) # New Code # # When a run is finished, you should call `accelerator.end_training()` # to close all of the open trackers if args.with_tracking: accelerator.end_training() def snake_case__ ( ) -> Optional[Any]: A_ : List[Any] = argparse.ArgumentParser(description='''Simple example of training script.''' ) parser.add_argument( '''--mixed_precision''' , type=lowerCamelCase__ , default=lowerCamelCase__ , choices=['''no''', '''fp16''', '''bf16''', '''fp8'''] , help='''Whether to use mixed precision. Choose''' '''between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10.''' '''and an Nvidia Ampere GPU.''' , ) parser.add_argument('''--cpu''' , action='''store_true''' , help='''If passed, will train on the CPU.''' ) parser.add_argument( '''--with_tracking''' , action='''store_true''' , help='''Whether to load in all available experiment trackers from the environment and use them for logging.''' , ) parser.add_argument( '''--project_dir''' , type=lowerCamelCase__ , default='''logs''' , help='''Location on where to store experiment tracking logs` and relevent project information''' , ) A_ : Union[str, Any] = parser.parse_args() A_ : Any = {'''lr''': 2e-5, '''num_epochs''': 3, '''seed''': 4_2, '''batch_size''': 1_6} training_function(lowerCamelCase__ , lowerCamelCase__ ) if __name__ == "__main__": main()
363
'''simple docstring''' from ...configuration_utils import PretrainedConfig from ...utils import logging snake_case__ = logging.get_logger(__name__) snake_case__ = { """facebook/s2t-wav2vec2-large-en-de""": ( """https://huggingface.co/facebook/s2t-wav2vec2-large-en-de/resolve/main/config.json""" ), # See all Speech2Text models at https://huggingface.co/models?filter=speech2text2 } class UpperCamelCase_ (a__ ): """simple docstring""" _lowerCAmelCase = 'speech_to_text_2' _lowerCAmelCase = ['past_key_values'] _lowerCAmelCase = {'num_attention_heads': 'decoder_attention_heads', 'hidden_size': 'd_model'} def __init__( self : Optional[Any] , _lowerCamelCase : Optional[Any]=10000 , _lowerCamelCase : List[Any]=6 , _lowerCamelCase : int=2048 , _lowerCamelCase : Dict=4 , _lowerCamelCase : str=0.0 , _lowerCamelCase : int=True , _lowerCamelCase : int="relu" , _lowerCamelCase : Any=256 , _lowerCamelCase : List[Any]=0.1 , _lowerCamelCase : Tuple=0.0 , _lowerCamelCase : Union[str, Any]=0.0 , _lowerCamelCase : Optional[Any]=0.02 , _lowerCamelCase : int=2 , _lowerCamelCase : List[str]=True , _lowerCamelCase : str=1 , _lowerCamelCase : List[Any]=0 , _lowerCamelCase : Optional[int]=2 , _lowerCamelCase : Tuple=1024 , **_lowerCamelCase : int , ): """simple docstring""" A_ : Optional[int] = vocab_size A_ : Tuple = d_model A_ : List[str] = decoder_ffn_dim A_ : str = decoder_layers A_ : Any = decoder_attention_heads A_ : int = dropout A_ : str = attention_dropout A_ : Optional[int] = activation_dropout A_ : str = activation_function A_ : List[Any] = init_std A_ : Union[str, Any] = decoder_layerdrop A_ : Any = use_cache A_ : Optional[Any] = decoder_layers A_ : Optional[int] = scale_embedding # scale factor will be sqrt(d_model) if True A_ : Optional[Any] = max_target_positions super().__init__( pad_token_id=_lowerCamelCase , bos_token_id=_lowerCamelCase , eos_token_id=_lowerCamelCase , decoder_start_token_id=_lowerCamelCase , **_lowerCamelCase , )
4
0
import argparse from collections import OrderedDict from pathlib import Path import requests import torch from PIL import Image from transformers import GLPNConfig, GLPNForDepthEstimation, GLPNImageProcessor from transformers.utils import logging logging.set_verbosity_info() snake_case__ = logging.get_logger(__name__) def snake_case__ ( lowerCamelCase__ : Optional[Any] ) -> Any: A_ : List[str] = OrderedDict() for key, value in state_dict.items(): if key.startswith('''module.encoder''' ): A_ : Dict = key.replace('''module.encoder''' , '''glpn.encoder''' ) if key.startswith('''module.decoder''' ): A_ : Dict = key.replace('''module.decoder''' , '''decoder.stages''' ) if "patch_embed" in key: # replace for example patch_embed1 by patch_embeddings.0 A_ : Dict = key[key.find('''patch_embed''' ) + len('''patch_embed''' )] A_ : Tuple = key.replace(f'patch_embed{idx}' , f'patch_embeddings.{int(lowerCamelCase__ )-1}' ) if "norm" in key: A_ : Dict = key.replace('''norm''' , '''layer_norm''' ) if "glpn.encoder.layer_norm" in key: # replace for example layer_norm1 by layer_norm.0 A_ : Union[str, Any] = key[key.find('''glpn.encoder.layer_norm''' ) + len('''glpn.encoder.layer_norm''' )] A_ : Any = key.replace(f'layer_norm{idx}' , f'layer_norm.{int(lowerCamelCase__ )-1}' ) if "layer_norm1" in key: A_ : Union[str, Any] = key.replace('''layer_norm1''' , '''layer_norm_1''' ) if "layer_norm2" in key: A_ : int = key.replace('''layer_norm2''' , '''layer_norm_2''' ) if "block" in key: # replace for example block1 by block.0 A_ : Any = key[key.find('''block''' ) + len('''block''' )] A_ : Optional[int] = key.replace(f'block{idx}' , f'block.{int(lowerCamelCase__ )-1}' ) if "attn.q" in key: A_ : Any = key.replace('''attn.q''' , '''attention.self.query''' ) if "attn.proj" in key: A_ : Optional[Any] = key.replace('''attn.proj''' , '''attention.output.dense''' ) if "attn" in key: A_ : List[str] = key.replace('''attn''' , '''attention.self''' ) if "fc1" in key: A_ : Tuple = key.replace('''fc1''' , '''dense1''' ) if "fc2" in key: A_ : str = key.replace('''fc2''' , '''dense2''' ) if "linear_pred" in key: A_ : List[Any] = key.replace('''linear_pred''' , '''classifier''' ) if "linear_fuse" in key: A_ : List[str] = key.replace('''linear_fuse.conv''' , '''linear_fuse''' ) A_ : str = key.replace('''linear_fuse.bn''' , '''batch_norm''' ) if "linear_c" in key: # replace for example linear_c4 by linear_c.3 A_ : Union[str, Any] = key[key.find('''linear_c''' ) + len('''linear_c''' )] A_ : int = key.replace(f'linear_c{idx}' , f'linear_c.{int(lowerCamelCase__ )-1}' ) if "bot_conv" in key: A_ : str = key.replace('''bot_conv''' , '''0.convolution''' ) if "skip_conv1" in key: A_ : Dict = key.replace('''skip_conv1''' , '''1.convolution''' ) if "skip_conv2" in key: A_ : Dict = key.replace('''skip_conv2''' , '''2.convolution''' ) if "fusion1" in key: A_ : Tuple = key.replace('''fusion1''' , '''1.fusion''' ) if "fusion2" in key: A_ : Dict = key.replace('''fusion2''' , '''2.fusion''' ) if "fusion3" in key: A_ : int = key.replace('''fusion3''' , '''3.fusion''' ) if "fusion" in key and "conv" in key: A_ : List[str] = key.replace('''conv''' , '''convolutional_layer''' ) if key.startswith('''module.last_layer_depth''' ): A_ : List[str] = key.replace('''module.last_layer_depth''' , '''head.head''' ) A_ : Tuple = value return new_state_dict def snake_case__ ( lowerCamelCase__ : Optional[int] , lowerCamelCase__ : int ) -> Tuple: # for each of the encoder blocks: for i in range(config.num_encoder_blocks ): for j in range(config.depths[i] ): # read in weights + bias of keys and values (which is a single matrix in the original implementation) A_ : List[Any] = state_dict.pop(f'glpn.encoder.block.{i}.{j}.attention.self.kv.weight' ) A_ : List[str] = state_dict.pop(f'glpn.encoder.block.{i}.{j}.attention.self.kv.bias' ) # next, add keys and values (in that order) to the state dict A_ : str = kv_weight[ : config.hidden_sizes[i], : ] A_ : Any = kv_bias[: config.hidden_sizes[i]] A_ : Any = kv_weight[ config.hidden_sizes[i] :, : ] A_ : List[str] = kv_bias[config.hidden_sizes[i] :] def snake_case__ ( ) -> Tuple: A_ : List[Any] = '''http://images.cocodataset.org/val2017/000000039769.jpg''' A_ : List[Any] = Image.open(requests.get(lowerCamelCase__ , stream=lowerCamelCase__ ).raw ) return image @torch.no_grad() def snake_case__ ( lowerCamelCase__ : Optional[Any] , lowerCamelCase__ : Any , lowerCamelCase__ : int=False , lowerCamelCase__ : Union[str, Any]=None ) -> str: A_ : List[str] = GLPNConfig(hidden_sizes=[6_4, 1_2_8, 3_2_0, 5_1_2] , decoder_hidden_size=6_4 , depths=[3, 8, 2_7, 3] ) # load image processor (only resize + rescale) A_ : List[Any] = GLPNImageProcessor() # prepare image A_ : Optional[int] = prepare_img() A_ : List[Any] = image_processor(images=lowerCamelCase__ , return_tensors='''pt''' ).pixel_values logger.info('''Converting model...''' ) # load original state dict A_ : List[str] = torch.load(lowerCamelCase__ , map_location=torch.device('''cpu''' ) ) # rename keys A_ : Optional[int] = rename_keys(lowerCamelCase__ ) # key and value matrices need special treatment read_in_k_v(lowerCamelCase__ , lowerCamelCase__ ) # create HuggingFace model and load state dict A_ : Tuple = GLPNForDepthEstimation(lowerCamelCase__ ) model.load_state_dict(lowerCamelCase__ ) model.eval() # forward pass A_ : Union[str, Any] = model(lowerCamelCase__ ) A_ : Optional[int] = outputs.predicted_depth # verify output if model_name is not None: if "nyu" in model_name: A_ : Tuple = torch.tensor( [[4.4147, 4.0873, 4.0673], [3.7890, 3.2881, 3.1525], [3.7674, 3.5423, 3.4913]] ) elif "kitti" in model_name: A_ : Union[str, Any] = torch.tensor( [[3.4291, 2.7865, 2.5151], [3.2841, 2.7021, 2.3502], [3.1147, 2.4625, 2.2481]] ) else: raise ValueError(f'Unknown model name: {model_name}' ) A_ : List[Any] = torch.Size([1, 4_8_0, 6_4_0] ) assert predicted_depth.shape == expected_shape assert torch.allclose(predicted_depth[0, :3, :3] , lowerCamelCase__ , atol=1e-4 ) print('''Looks ok!''' ) # finally, push to hub if required if push_to_hub: logger.info('''Pushing model and image processor to the hub...''' ) model.push_to_hub( repo_path_or_name=Path(lowerCamelCase__ , lowerCamelCase__ ) , organization='''nielsr''' , commit_message='''Add model''' , use_temp_dir=lowerCamelCase__ , ) image_processor.push_to_hub( repo_path_or_name=Path(lowerCamelCase__ , lowerCamelCase__ ) , organization='''nielsr''' , commit_message='''Add image processor''' , use_temp_dir=lowerCamelCase__ , ) if __name__ == "__main__": snake_case__ = argparse.ArgumentParser() parser.add_argument( """--checkpoint_path""", default=None, type=str, help="""Path to the original PyTorch checkpoint (.pth file).""", ) parser.add_argument( """--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the folder to output PyTorch model.""" ) parser.add_argument( """--push_to_hub""", action="""store_true""", help="""Whether to upload the model to the HuggingFace hub.""" ) parser.add_argument( """--model_name""", default="""glpn-kitti""", type=str, help="""Name of the model in case you're pushing to the hub.""", ) snake_case__ = parser.parse_args() convert_glpn_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub, args.model_name)
364
'''simple docstring''' from collections import OrderedDict from typing import Mapping from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging from ..auto import CONFIG_MAPPING snake_case__ = logging.get_logger(__name__) snake_case__ = { """microsoft/table-transformer-detection""": ( """https://huggingface.co/microsoft/table-transformer-detection/resolve/main/config.json""" ), } class UpperCamelCase_ (a__ ): """simple docstring""" _lowerCAmelCase = 'table-transformer' _lowerCAmelCase = ['past_key_values'] _lowerCAmelCase = { 'hidden_size': 'd_model', 'num_attention_heads': 'encoder_attention_heads', } def __init__( self : Any , _lowerCamelCase : Union[str, Any]=True , _lowerCamelCase : Dict=None , _lowerCamelCase : int=3 , _lowerCamelCase : Any=100 , _lowerCamelCase : List[Any]=6 , _lowerCamelCase : Tuple=2048 , _lowerCamelCase : Any=8 , _lowerCamelCase : Dict=6 , _lowerCamelCase : Tuple=2048 , _lowerCamelCase : int=8 , _lowerCamelCase : Optional[int]=0.0 , _lowerCamelCase : List[Any]=0.0 , _lowerCamelCase : List[Any]=True , _lowerCamelCase : Optional[int]="relu" , _lowerCamelCase : Union[str, Any]=256 , _lowerCamelCase : Any=0.1 , _lowerCamelCase : Tuple=0.0 , _lowerCamelCase : Optional[int]=0.0 , _lowerCamelCase : str=0.02 , _lowerCamelCase : Tuple=1.0 , _lowerCamelCase : Dict=False , _lowerCamelCase : str="sine" , _lowerCamelCase : str="resnet50" , _lowerCamelCase : Any=True , _lowerCamelCase : List[str]=False , _lowerCamelCase : Any=1 , _lowerCamelCase : int=5 , _lowerCamelCase : Tuple=2 , _lowerCamelCase : Optional[int]=1 , _lowerCamelCase : Any=1 , _lowerCamelCase : Dict=5 , _lowerCamelCase : str=2 , _lowerCamelCase : Union[str, Any]=0.1 , **_lowerCamelCase : int , ): """simple docstring""" if backbone_config is not None and use_timm_backbone: raise ValueError('''You can\'t specify both `backbone_config` and `use_timm_backbone`.''' ) if not use_timm_backbone: if backbone_config is None: logger.info('''`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.''' ) A_ : int = CONFIG_MAPPING['''resnet'''](out_features=['''stage4'''] ) elif isinstance(_lowerCamelCase , _lowerCamelCase ): A_ : str = backbone_config.get('''model_type''' ) A_ : Optional[int] = CONFIG_MAPPING[backbone_model_type] A_ : List[str] = config_class.from_dict(_lowerCamelCase ) # set timm attributes to None A_ ,A_ ,A_ : Union[str, Any] = None, None, None A_ : Optional[Any] = use_timm_backbone A_ : Optional[int] = backbone_config A_ : Optional[Any] = num_channels A_ : Dict = num_queries A_ : str = d_model A_ : List[str] = encoder_ffn_dim A_ : int = encoder_layers A_ : Optional[Any] = encoder_attention_heads A_ : List[str] = decoder_ffn_dim A_ : Any = decoder_layers A_ : List[str] = decoder_attention_heads A_ : Tuple = dropout A_ : Optional[Any] = attention_dropout A_ : Any = activation_dropout A_ : List[Any] = activation_function A_ : Dict = init_std A_ : Any = init_xavier_std A_ : List[Any] = encoder_layerdrop A_ : int = decoder_layerdrop A_ : Any = encoder_layers A_ : List[str] = auxiliary_loss A_ : List[Any] = position_embedding_type A_ : Optional[Any] = backbone A_ : Tuple = use_pretrained_backbone A_ : List[Any] = dilation # Hungarian matcher A_ : List[str] = class_cost A_ : str = bbox_cost A_ : Union[str, Any] = giou_cost # Loss coefficients A_ : Any = mask_loss_coefficient A_ : Optional[int] = dice_loss_coefficient A_ : Dict = bbox_loss_coefficient A_ : int = giou_loss_coefficient A_ : int = eos_coefficient super().__init__(is_encoder_decoder=_lowerCamelCase , **_lowerCamelCase ) @property def _a ( self : List[Any] ): """simple docstring""" return self.encoder_attention_heads @property def _a ( self : Any ): """simple docstring""" return self.d_model class UpperCamelCase_ (a__ ): """simple docstring""" _lowerCAmelCase = version.parse('1.11' ) @property def _a ( self : Tuple ): """simple docstring""" return OrderedDict( [ ('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}), ('''pixel_mask''', {0: '''batch'''}), ] ) @property def _a ( self : Optional[int] ): """simple docstring""" return 1E-5 @property def _a ( self : str ): """simple docstring""" return 12
4
0
'''simple docstring''' import argparse import pickle import numpy as np import torch from torch import nn from transformers import ReformerConfig, ReformerModelWithLMHead from transformers.utils import logging logging.set_verbosity_info() def snake_case__ ( lowerCamelCase__ : Union[str, Any] , lowerCamelCase__ : Dict , lowerCamelCase__ : Dict=None ) -> Optional[Any]: # set parameter of one layer assert torch_layer.weight.shape == weight.shape, f'{torch_layer} layer.weight does not match' A_ : Optional[Any] = nn.Parameter(lowerCamelCase__ ) if bias is not None: assert torch_layer.bias.shape == bias.shape, f'{torch_layer} layer.bias does not match' A_ : str = nn.Parameter(lowerCamelCase__ ) def snake_case__ ( lowerCamelCase__ : List[str] , lowerCamelCase__ : Union[str, Any] , lowerCamelCase__ : int ) -> Dict: # set torch weights for 1-to-1 comparison A_ : Dict = np.asarray(weights[0] ) A_ : Optional[int] = np.asarray(weights[1] ) A_ : Union[str, Any] = np.asarray(weights[2] ) set_param( torch_layer.self_attention.query_key , torch.tensor(lowerCamelCase__ ).transpose(1 , 2 ).contiguous().view(-1 , lowerCamelCase__ ) , ) set_param( torch_layer.self_attention.value , torch.tensor(lowerCamelCase__ ).transpose(1 , 2 ).contiguous().view(-1 , lowerCamelCase__ ) , ) set_param( torch_layer.output.dense , torch.tensor(lowerCamelCase__ ).view(-1 , lowerCamelCase__ ).contiguous().transpose(0 , 1 ) , ) def snake_case__ ( lowerCamelCase__ : Optional[Any] , lowerCamelCase__ : List[str] , lowerCamelCase__ : str ) -> Dict: # set torch weights for 1-to-1 comparison A_ : Optional[int] = np.asarray(weights[0] ) A_ : Union[str, Any] = np.asarray(weights[1] ) A_ : Optional[Any] = np.asarray(weights[2] ) A_ : List[str] = np.asarray(weights[3] ) set_param( torch_layer.self_attention.query , torch.tensor(lowerCamelCase__ ).transpose(1 , 2 ).contiguous().view(-1 , lowerCamelCase__ ) , ) set_param( torch_layer.self_attention.key , torch.tensor(lowerCamelCase__ ).transpose(1 , 2 ).contiguous().view(-1 , lowerCamelCase__ ) , ) set_param( torch_layer.self_attention.value , torch.tensor(lowerCamelCase__ ).transpose(1 , 2 ).contiguous().view(-1 , lowerCamelCase__ ) , ) set_param( torch_layer.output.dense , torch.tensor(lowerCamelCase__ ).view(-1 , lowerCamelCase__ ).contiguous().transpose(0 , 1 ) , ) def snake_case__ ( lowerCamelCase__ : Tuple , lowerCamelCase__ : Optional[int] , lowerCamelCase__ : str ) -> int: # layernorm 1 A_ : Optional[Any] = weights[0][0][0] A_ : Optional[int] = np.asarray(layer_norm_a[0] ) A_ : int = np.asarray(layer_norm_a[1] ) set_param( torch_block.attention.layer_norm , torch.tensor(lowerCamelCase__ ) , torch.tensor(lowerCamelCase__ ) , ) # lsh weights + output A_ : Dict = weights[0][1] if len(lowerCamelCase__ ) < 4: set_layer_weights_in_torch_lsh(lowerCamelCase__ , torch_block.attention , lowerCamelCase__ ) else: set_layer_weights_in_torch_local(lowerCamelCase__ , torch_block.attention , lowerCamelCase__ ) # intermediate weighs A_ : Any = weights[2][0][1][2] # Chunked Feed Forward if len(lowerCamelCase__ ) == 4: A_ : List[str] = intermediate_weights[2] # layernorm 2 A_ : Tuple = np.asarray(intermediate_weights[0][0] ) A_ : Tuple = np.asarray(intermediate_weights[0][1] ) set_param( torch_block.feed_forward.layer_norm , torch.tensor(lowerCamelCase__ ) , torch.tensor(lowerCamelCase__ ) , ) # intermediate dense A_ : Optional[int] = np.asarray(intermediate_weights[1][0] ) A_ : int = np.asarray(intermediate_weights[1][1] ) set_param( torch_block.feed_forward.dense.dense , torch.tensor(lowerCamelCase__ ).transpose(0 , 1 ).contiguous() , torch.tensor(lowerCamelCase__ ) , ) # intermediate out A_ : Tuple = np.asarray(intermediate_weights[4][0] ) A_ : Optional[int] = np.asarray(intermediate_weights[4][1] ) set_param( torch_block.feed_forward.output.dense , torch.tensor(lowerCamelCase__ ).transpose(0 , 1 ).contiguous() , torch.tensor(lowerCamelCase__ ) , ) def snake_case__ ( lowerCamelCase__ : int , lowerCamelCase__ : Union[str, Any] , lowerCamelCase__ : str ) -> str: # reformer model A_ : List[Any] = torch_model.reformer # word embeds A_ : List[str] = np.asarray(weights[1] ) set_param( torch_model_reformer.embeddings.word_embeddings , torch.tensor(lowerCamelCase__ ) , ) if isinstance(weights[3] , lowerCamelCase__ ): A_ : int = torch_model_reformer.embeddings.position_embeddings for emb_idx in range(len(position_embeddings.weights ) ): A_ : Tuple = np.asarray(weights[3][emb_idx][0] ) assert ( position_embeddings.weights[emb_idx].shape == emb_weights.shape ), f'{position_embeddings[emb_idx]} emb does not match' A_ : Optional[int] = nn.Parameter(torch.tensor(lowerCamelCase__ ) ) A_ : List[str] = weights[5] assert len(torch_model_reformer.encoder.layers ) * 4 == len( lowerCamelCase__ ), "HF and trax model do not have the same number of layers" for layer_idx, layer in enumerate(torch_model_reformer.encoder.layers ): A_ : List[Any] = trax_layer_weights[4 * layer_idx : 4 * (layer_idx + 1)] set_block_weights_in_torch(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) # output layer norm A_ : Optional[Any] = np.asarray(weights[7][0] ) A_ : Tuple = np.asarray(weights[7][1] ) set_param( torch_model_reformer.encoder.layer_norm , torch.tensor(lowerCamelCase__ ) , torch.tensor(lowerCamelCase__ ) , ) # output embeddings A_ : Optional[int] = np.asarray(weights[9][0] ) A_ : Optional[int] = np.asarray(weights[9][1] ) set_param( torch_model.lm_head.decoder , torch.tensor(lowerCamelCase__ ).transpose(0 , 1 ).contiguous() , torch.tensor(lowerCamelCase__ ) , ) def snake_case__ ( lowerCamelCase__ : str , lowerCamelCase__ : Optional[int] , lowerCamelCase__ : Optional[int] ) -> Tuple: # Initialise PyTorch model A_ : Union[str, Any] = ReformerConfig.from_json_file(lowerCamelCase__ ) print(f'Building PyTorch model from configuration: {config}' ) A_ : Tuple = ReformerModelWithLMHead(lowerCamelCase__ ) with open(lowerCamelCase__ , '''rb''' ) as f: A_ : Tuple = pickle.load(lowerCamelCase__ )['''weights'''] set_model_weights_in_torch(lowerCamelCase__ , lowerCamelCase__ , config.hidden_size ) # Save pytorch-model print(f'Save PyTorch model to {pytorch_dump_path}' ) torch.save(model.state_dict() , lowerCamelCase__ ) if __name__ == "__main__": snake_case__ = argparse.ArgumentParser() # Required parameters parser.add_argument( """--trax_model_pkl_path""", default=None, type=str, required=True, help="""Path to the TensorFlow checkpoint path.""" ) parser.add_argument( """--config_file""", default=None, type=str, required=True, help=( """The config json file corresponding to the pre-trained Reformer model. \n""" """This specifies the model architecture.""" ), ) parser.add_argument( """--pytorch_dump_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model.""" ) snake_case__ = parser.parse_args() convert_trax_checkpoint_to_pytorch(args.trax_model_pkl_path, args.config_file, args.pytorch_dump_path)
365
'''simple docstring''' import inspect import unittest from transformers import BitConfig from transformers.testing_utils import require_torch, require_vision, slow, torch_device from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_backbone_common import BackboneTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from torch import nn from transformers import BitBackbone, BitForImageClassification, BitImageProcessor, BitModel from transformers.models.bit.modeling_bit import BIT_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image class UpperCamelCase_ : """simple docstring""" def __init__( self : Optional[Any] , _lowerCamelCase : int , _lowerCamelCase : List[str]=3 , _lowerCamelCase : Any=32 , _lowerCamelCase : Union[str, Any]=3 , _lowerCamelCase : int=10 , _lowerCamelCase : Union[str, Any]=[8, 16, 32, 64] , _lowerCamelCase : Dict=[1, 1, 2, 1] , _lowerCamelCase : Union[str, Any]=True , _lowerCamelCase : Optional[int]=True , _lowerCamelCase : Any="relu" , _lowerCamelCase : Optional[Any]=3 , _lowerCamelCase : Optional[Any]=None , _lowerCamelCase : Dict=["stage2", "stage3", "stage4"] , _lowerCamelCase : Union[str, Any]=[2, 3, 4] , _lowerCamelCase : Tuple=1 , ): """simple docstring""" A_ : List[str] = parent A_ : List[str] = batch_size A_ : Union[str, Any] = image_size A_ : Tuple = num_channels A_ : Any = embeddings_size A_ : int = hidden_sizes A_ : Optional[Any] = depths A_ : List[Any] = is_training A_ : Optional[int] = use_labels A_ : int = hidden_act A_ : Tuple = num_labels A_ : Union[str, Any] = scope A_ : List[Any] = len(_lowerCamelCase ) A_ : Union[str, Any] = out_features A_ : List[Any] = out_indices A_ : Dict = num_groups def _a ( self : Optional[int] ): """simple docstring""" A_ : Dict = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) A_ : Union[str, Any] = None if self.use_labels: A_ : Any = ids_tensor([self.batch_size] , self.num_labels ) A_ : Any = self.get_config() return config, pixel_values, labels def _a ( self : Union[str, Any] ): """simple docstring""" return BitConfig( num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , out_features=self.out_features , out_indices=self.out_indices , num_groups=self.num_groups , ) def _a ( self : List[Any] , _lowerCamelCase : List[str] , _lowerCamelCase : List[str] , _lowerCamelCase : Optional[Any] ): """simple docstring""" A_ : Any = BitModel(config=_lowerCamelCase ) model.to(_lowerCamelCase ) model.eval() A_ : int = model(_lowerCamelCase ) self.parent.assertEqual( result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , ) def _a ( self : Optional[int] , _lowerCamelCase : List[Any] , _lowerCamelCase : str , _lowerCamelCase : Optional[int] ): """simple docstring""" A_ : Dict = self.num_labels A_ : Optional[Any] = BitForImageClassification(_lowerCamelCase ) model.to(_lowerCamelCase ) model.eval() A_ : List[Any] = model(_lowerCamelCase , labels=_lowerCamelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def _a ( self : Any , _lowerCamelCase : int , _lowerCamelCase : int , _lowerCamelCase : List[Any] ): """simple docstring""" A_ : List[Any] = BitBackbone(config=_lowerCamelCase ) model.to(_lowerCamelCase ) model.eval() A_ : int = model(_lowerCamelCase ) # verify feature maps self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) ) self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[1], 4, 4] ) # verify channels self.parent.assertEqual(len(model.channels ) , len(config.out_features ) ) self.parent.assertListEqual(model.channels , config.hidden_sizes[1:] ) # verify backbone works with out_features=None A_ : Optional[Any] = None A_ : int = BitBackbone(config=_lowerCamelCase ) model.to(_lowerCamelCase ) model.eval() A_ : Optional[int] = model(_lowerCamelCase ) # verify feature maps self.parent.assertEqual(len(result.feature_maps ) , 1 ) self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[-1], 1, 1] ) # verify channels self.parent.assertEqual(len(model.channels ) , 1 ) self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]] ) def _a ( self : List[Any] ): """simple docstring""" A_ : Union[str, Any] = self.prepare_config_and_inputs() A_ ,A_ ,A_ : Union[str, Any] = config_and_inputs A_ : str = {'''pixel_values''': pixel_values} return config, inputs_dict @require_torch class UpperCamelCase_ (a__, a__, unittest.TestCase ): """simple docstring""" _lowerCAmelCase = (BitModel, BitForImageClassification, BitBackbone) if is_torch_available() else () _lowerCAmelCase = ( {'feature-extraction': BitModel, 'image-classification': BitForImageClassification} if is_torch_available() else {} ) _lowerCAmelCase = False _lowerCAmelCase = False _lowerCAmelCase = False _lowerCAmelCase = False _lowerCAmelCase = False def _a ( self : Optional[Any] ): """simple docstring""" A_ : List[str] = BitModelTester(self ) A_ : Optional[Any] = ConfigTester(self , config_class=_lowerCamelCase , has_text_modality=_lowerCamelCase ) def _a ( self : Optional[Any] ): """simple docstring""" self.create_and_test_config_common_properties() self.config_tester.create_and_test_config_to_json_string() self.config_tester.create_and_test_config_to_json_file() self.config_tester.create_and_test_config_from_and_save_pretrained() self.config_tester.create_and_test_config_with_num_labels() self.config_tester.check_config_can_be_init_without_params() self.config_tester.check_config_arguments_init() def _a ( self : List[Any] ): """simple docstring""" return @unittest.skip(reason='''Bit does not output attentions''' ) def _a ( self : str ): """simple docstring""" pass @unittest.skip(reason='''Bit does not use inputs_embeds''' ) def _a ( self : Union[str, Any] ): """simple docstring""" pass @unittest.skip(reason='''Bit does not support input and output embeddings''' ) def _a ( self : Any ): """simple docstring""" pass def _a ( self : List[Any] ): """simple docstring""" A_ ,A_ : str = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: A_ : Dict = model_class(_lowerCamelCase ) A_ : Dict = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic A_ : int = [*signature.parameters.keys()] A_ : Union[str, Any] = ['''pixel_values'''] self.assertListEqual(arg_names[:1] , _lowerCamelCase ) def _a ( self : Optional[Any] ): """simple docstring""" A_ : List[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*_lowerCamelCase ) def _a ( self : Optional[Any] ): """simple docstring""" A_ : Optional[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_backbone(*_lowerCamelCase ) def _a ( self : Tuple ): """simple docstring""" A_ ,A_ : Dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: A_ : str = model_class(config=_lowerCamelCase ) for name, module in model.named_modules(): if isinstance(_lowerCamelCase , (nn.BatchNormad, nn.GroupNorm) ): self.assertTrue( torch.all(module.weight == 1 ) , msg=f'Parameter {name} of model {model_class} seems not properly initialized' , ) self.assertTrue( torch.all(module.bias == 0 ) , msg=f'Parameter {name} of model {model_class} seems not properly initialized' , ) def _a ( self : int ): """simple docstring""" def check_hidden_states_output(_lowerCamelCase : Union[str, Any] , _lowerCamelCase : Dict , _lowerCamelCase : int ): A_ : Union[str, Any] = model_class(_lowerCamelCase ) model.to(_lowerCamelCase ) model.eval() with torch.no_grad(): A_ : Union[str, Any] = model(**self._prepare_for_class(_lowerCamelCase , _lowerCamelCase ) ) A_ : int = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states A_ : List[Any] = self.model_tester.num_stages self.assertEqual(len(_lowerCamelCase ) , expected_num_stages + 1 ) # Bit's feature maps are of shape (batch_size, num_channels, height, width) self.assertListEqual( list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , ) A_ ,A_ : str = self.model_tester.prepare_config_and_inputs_for_common() A_ : Tuple = ['''preactivation''', '''bottleneck'''] for model_class in self.all_model_classes: for layer_type in layers_type: A_ : Tuple = layer_type A_ : Optional[Any] = True check_hidden_states_output(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] A_ : List[str] = True check_hidden_states_output(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) @unittest.skip(reason='''Bit does not use feedforward chunking''' ) def _a ( self : Tuple ): """simple docstring""" pass def _a ( self : str ): """simple docstring""" A_ : Union[str, Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*_lowerCamelCase ) @slow def _a ( self : Union[str, Any] ): """simple docstring""" for model_name in BIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: A_ : List[Any] = BitModel.from_pretrained(_lowerCamelCase ) self.assertIsNotNone(_lowerCamelCase ) def snake_case__ ( ) -> Optional[int]: A_ : Optional[int] = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' ) return image @require_torch @require_vision class UpperCamelCase_ (unittest.TestCase ): """simple docstring""" @cached_property def _a ( self : List[Any] ): """simple docstring""" return ( BitImageProcessor.from_pretrained(BIT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ) if is_vision_available() else None ) @slow def _a ( self : Dict ): """simple docstring""" A_ : Optional[int] = BitForImageClassification.from_pretrained(BIT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ).to(_lowerCamelCase ) A_ : Union[str, Any] = self.default_image_processor A_ : Optional[int] = prepare_img() A_ : int = image_processor(images=_lowerCamelCase , return_tensors='''pt''' ).to(_lowerCamelCase ) # forward pass with torch.no_grad(): A_ : Union[str, Any] = model(**_lowerCamelCase ) # verify the logits A_ : Dict = torch.Size((1, 1000) ) self.assertEqual(outputs.logits.shape , _lowerCamelCase ) A_ : Tuple = torch.tensor([[-0.65_26, -0.52_63, -1.43_98]] ).to(_lowerCamelCase ) self.assertTrue(torch.allclose(outputs.logits[0, :3] , _lowerCamelCase , atol=1E-4 ) ) @require_torch class UpperCamelCase_ (a__, unittest.TestCase ): """simple docstring""" _lowerCAmelCase = (BitBackbone,) if is_torch_available() else () _lowerCAmelCase = BitConfig _lowerCAmelCase = False def _a ( self : List[str] ): """simple docstring""" A_ : Union[str, Any] = BitModelTester(self )
4
0
'''simple docstring''' import multiprocessing import time from arguments import PretokenizationArguments from datasets import load_dataset from transformers import AutoTokenizer, HfArgumentParser def snake_case__ ( lowerCamelCase__ : int ) -> List[str]: A_ : Tuple = {} A_ : Optional[Any] = tokenizer(example['''content'''] , truncation=lowerCamelCase__ )['''input_ids'''] A_ : Optional[Any] = len(example['''content'''] ) / len(output['''input_ids'''] ) return output snake_case__ = HfArgumentParser(PretokenizationArguments) snake_case__ = parser.parse_args() if args.num_workers is None: snake_case__ = multiprocessing.cpu_count() snake_case__ = AutoTokenizer.from_pretrained(args.tokenizer_dir) snake_case__ = time.time() snake_case__ = load_dataset(args.dataset_name, split="""train""") print(F'Dataset loaded in {time.time()-t_start:.2f}s') snake_case__ = time.time() snake_case__ = ds.map( tokenize, num_proc=args.num_workers, remove_columns=[ """repo_name""", """path""", """copies""", """size""", """content""", """license""", """hash""", """line_mean""", """line_max""", """alpha_frac""", """autogenerated""", ], ) print(F'Dataset tokenized in {time.time()-t_start:.2f}s') snake_case__ = time.time() ds.push_to_hub(args.tokenized_data_repo) print(F'Data pushed to the hub in {time.time()-t_start:.2f}s')
366
'''simple docstring''' import pprint import requests snake_case__ = """https://zenquotes.io/api""" def snake_case__ ( ) -> list: return requests.get(API_ENDPOINT_URL + '''/today''' ).json() def snake_case__ ( ) -> list: return requests.get(API_ENDPOINT_URL + '''/random''' ).json() if __name__ == "__main__": snake_case__ = random_quotes() pprint.pprint(response)
4
0
'''simple docstring''' from itertools import product from cva import COLOR_BGR2GRAY, cvtColor, imread, imshow, waitKey from numpy import dot, exp, mgrid, pi, ravel, square, uinta, zeros def snake_case__ ( lowerCamelCase__ : Dict , lowerCamelCase__ : Tuple ) -> Dict: A_ : Optional[int] = k_size // 2 A_ : Optional[int] = mgrid[0 - center : k_size - center, 0 - center : k_size - center] A_ : int = 1 / (2 * pi * sigma) * exp(-(square(lowerCamelCase__ ) + square(lowerCamelCase__ )) / (2 * square(lowerCamelCase__ )) ) return g def snake_case__ ( lowerCamelCase__ : int , lowerCamelCase__ : List[Any] , lowerCamelCase__ : Union[str, Any] ) -> Dict: A_ : Optional[Any] = image.shape[0], image.shape[1] # dst image height and width A_ : int = height - k_size + 1 A_ : int = width - k_size + 1 # im2col, turn the k_size*k_size pixels into a row and np.vstack all rows A_ : Dict = zeros((dst_height * dst_width, k_size * k_size) ) A_ : List[Any] = 0 for i, j in product(range(lowerCamelCase__ ) , range(lowerCamelCase__ ) ): A_ : Any = ravel(image[i : i + k_size, j : j + k_size] ) A_ : List[str] = window row += 1 # turn the kernel into shape(k*k, 1) A_ : str = gen_gaussian_kernel(lowerCamelCase__ , lowerCamelCase__ ) A_ : List[Any] = ravel(lowerCamelCase__ ) # reshape and get the dst image A_ : List[str] = dot(lowerCamelCase__ , lowerCamelCase__ ).reshape(lowerCamelCase__ , lowerCamelCase__ ).astype(lowerCamelCase__ ) return dst if __name__ == "__main__": # read original image snake_case__ = imread(R"""../image_data/lena.jpg""") # turn image in gray scale value snake_case__ = cvtColor(img, COLOR_BGR2GRAY) # get values with two different mask size snake_case__ = gaussian_filter(gray, 3, sigma=1) snake_case__ = gaussian_filter(gray, 5, sigma=0.8) # show result images imshow("""gaussian filter with 3x3 mask""", gaussianaxa) imshow("""gaussian filter with 5x5 mask""", gaussianaxa) waitKey()
367
'''simple docstring''' from __future__ import annotations class UpperCamelCase_ : """simple docstring""" def __init__( self : Optional[int] , _lowerCamelCase : int ): """simple docstring""" A_ : Union[str, Any] = order # a_{0} ... a_{k} A_ : Union[str, Any] = [1.0] + [0.0] * order # b_{0} ... b_{k} A_ : int = [1.0] + [0.0] * order # x[n-1] ... x[n-k] A_ : str = [0.0] * self.order # y[n-1] ... y[n-k] A_ : Optional[Any] = [0.0] * self.order def _a ( self : Dict , _lowerCamelCase : list[float] , _lowerCamelCase : list[float] ): """simple docstring""" if len(_lowerCamelCase ) < self.order: A_ : Any = [1.0, *a_coeffs] if len(_lowerCamelCase ) != self.order + 1: A_ : List[Any] = ( f'Expected a_coeffs to have {self.order + 1} elements ' f'for {self.order}-order filter, got {len(_lowerCamelCase )}' ) raise ValueError(_lowerCamelCase ) if len(_lowerCamelCase ) != self.order + 1: A_ : Union[str, Any] = ( f'Expected b_coeffs to have {self.order + 1} elements ' f'for {self.order}-order filter, got {len(_lowerCamelCase )}' ) raise ValueError(_lowerCamelCase ) A_ : Tuple = a_coeffs A_ : str = b_coeffs def _a ( self : Tuple , _lowerCamelCase : float ): """simple docstring""" A_ : Any = 0.0 # Start at index 1 and do index 0 at the end. for i in range(1 , self.order + 1 ): result += ( self.b_coeffs[i] * self.input_history[i - 1] - self.a_coeffs[i] * self.output_history[i - 1] ) A_ : str = (result + self.b_coeffs[0] * sample) / self.a_coeffs[0] A_ : Optional[Any] = self.input_history[:-1] A_ : List[str] = self.output_history[:-1] A_ : Tuple = sample A_ : Tuple = result return result
4
0
'''simple docstring''' from __future__ import annotations import os from collections.abc import Mapping snake_case__ = tuple[int, int] class UpperCamelCase_ : """simple docstring""" def __init__( self : Tuple , _lowerCamelCase : set[int] , _lowerCamelCase : Mapping[EdgeT, int] ): """simple docstring""" A_ : set[int] = vertices A_ : dict[EdgeT, int] = { (min(_lowerCamelCase ), max(_lowerCamelCase )): weight for edge, weight in edges.items() } def _a ( self : Any , _lowerCamelCase : EdgeT , _lowerCamelCase : int ): """simple docstring""" self.vertices.add(edge[0] ) self.vertices.add(edge[1] ) A_ : List[Any] = weight def _a ( self : Dict ): """simple docstring""" A_ : Graph = Graph({min(self.vertices )} , {} ) A_ : EdgeT A_ : int A_ : EdgeT A_ : int while len(subgraph.vertices ) < len(self.vertices ): A_ : Any = max(self.edges.values() ) + 1 for edge, weight in self.edges.items(): if (edge[0] in subgraph.vertices) ^ (edge[1] in subgraph.vertices): if weight < min_weight: A_ : Optional[Any] = edge A_ : Union[str, Any] = weight subgraph.add_edge(_lowerCamelCase , _lowerCamelCase ) return subgraph def snake_case__ ( lowerCamelCase__ : str = "p107_network.txt" ) -> int: A_ : str = os.path.abspath(os.path.dirname(lowerCamelCase__ ) ) A_ : str = os.path.join(lowerCamelCase__ , lowerCamelCase__ ) A_ : dict[EdgeT, int] = {} A_ : list[str] A_ : int A_ : int with open(lowerCamelCase__ ) as f: A_ : Any = f.read().strip().split('''\n''' ) A_ : Tuple = [line.split(''',''' ) for line in data] for edgea in range(1 , len(lowerCamelCase__ ) ): for edgea in range(lowerCamelCase__ ): if adjaceny_matrix[edgea][edgea] != "-": A_ : Union[str, Any] = int(adjaceny_matrix[edgea][edgea] ) A_ : Graph = Graph(set(range(len(lowerCamelCase__ ) ) ) , lowerCamelCase__ ) A_ : Graph = graph.prims_algorithm() A_ : int = sum(graph.edges.values() ) A_ : int = sum(subgraph.edges.values() ) return initial_total - optimal_total if __name__ == "__main__": print(F'{solution() = }')
368
'''simple docstring''' class UpperCamelCase_ : """simple docstring""" def __init__( self : Optional[Any] , _lowerCamelCase : Union[str, Any] ): """simple docstring""" A_ : Union[str, Any] = val A_ : Tuple = None A_ : Any = None def _a ( self : Tuple , _lowerCamelCase : List[Any] ): """simple docstring""" if self.val: if val < self.val: if self.left is None: A_ : int = Node(_lowerCamelCase ) else: self.left.insert(_lowerCamelCase ) elif val > self.val: if self.right is None: A_ : List[str] = Node(_lowerCamelCase ) else: self.right.insert(_lowerCamelCase ) else: A_ : Any = val def snake_case__ ( lowerCamelCase__ : Any , lowerCamelCase__ : Optional[int] ) -> str: # Recursive traversal if root: inorder(root.left , lowerCamelCase__ ) res.append(root.val ) inorder(root.right , lowerCamelCase__ ) def snake_case__ ( lowerCamelCase__ : Optional[int] ) -> Tuple: # Build BST if len(lowerCamelCase__ ) == 0: return arr A_ : Dict = Node(arr[0] ) for i in range(1 , len(lowerCamelCase__ ) ): root.insert(arr[i] ) # Traverse BST in order. A_ : Tuple = [] inorder(lowerCamelCase__ , lowerCamelCase__ ) return res if __name__ == "__main__": print(tree_sort([10, 1, 3, 2, 9, 14, 13]))
4
0
'''simple docstring''' from dataclasses import dataclass from typing import Optional, Tuple, Union import torch import torch.nn as nn from ..configuration_utils import ConfigMixin, register_to_config from ..utils import BaseOutput, apply_forward_hook from .modeling_utils import ModelMixin from .vae import Decoder, DecoderOutput, Encoder, VectorQuantizer @dataclass class UpperCamelCase_ (a__ ): """simple docstring""" _lowerCAmelCase = 4_2 class UpperCamelCase_ (a__, a__ ): """simple docstring""" @register_to_config def __init__( self : Any , _lowerCamelCase : int = 3 , _lowerCamelCase : int = 3 , _lowerCamelCase : Tuple[str] = ("DownEncoderBlock2D",) , _lowerCamelCase : Tuple[str] = ("UpDecoderBlock2D",) , _lowerCamelCase : Tuple[int] = (64,) , _lowerCamelCase : int = 1 , _lowerCamelCase : str = "silu" , _lowerCamelCase : int = 3 , _lowerCamelCase : int = 32 , _lowerCamelCase : int = 256 , _lowerCamelCase : int = 32 , _lowerCamelCase : Optional[int] = None , _lowerCamelCase : float = 0.1_82_15 , _lowerCamelCase : str = "group" , ): """simple docstring""" super().__init__() # pass init params to Encoder A_ : Optional[Any] = Encoder( in_channels=_lowerCamelCase , out_channels=_lowerCamelCase , down_block_types=_lowerCamelCase , block_out_channels=_lowerCamelCase , layers_per_block=_lowerCamelCase , act_fn=_lowerCamelCase , norm_num_groups=_lowerCamelCase , double_z=_lowerCamelCase , ) A_ : int = vq_embed_dim if vq_embed_dim is not None else latent_channels A_ : Tuple = nn.Convad(_lowerCamelCase , _lowerCamelCase , 1 ) A_ : Dict = VectorQuantizer(_lowerCamelCase , _lowerCamelCase , beta=0.25 , remap=_lowerCamelCase , sane_index_shape=_lowerCamelCase ) A_ : str = nn.Convad(_lowerCamelCase , _lowerCamelCase , 1 ) # pass init params to Decoder A_ : List[Any] = Decoder( in_channels=_lowerCamelCase , out_channels=_lowerCamelCase , up_block_types=_lowerCamelCase , block_out_channels=_lowerCamelCase , layers_per_block=_lowerCamelCase , act_fn=_lowerCamelCase , norm_num_groups=_lowerCamelCase , norm_type=_lowerCamelCase , ) @apply_forward_hook def _a ( self : List[Any] , _lowerCamelCase : torch.FloatTensor , _lowerCamelCase : bool = True ): """simple docstring""" A_ : Any = self.encoder(_lowerCamelCase ) A_ : str = self.quant_conv(_lowerCamelCase ) if not return_dict: return (h,) return VQEncoderOutput(latents=_lowerCamelCase ) @apply_forward_hook def _a ( self : int , _lowerCamelCase : torch.FloatTensor , _lowerCamelCase : bool = False , _lowerCamelCase : bool = True ): """simple docstring""" if not force_not_quantize: A_ : Union[str, Any] = self.quantize(_lowerCamelCase ) else: A_ : Tuple = h A_ : int = self.post_quant_conv(_lowerCamelCase ) A_ : List[Any] = self.decoder(_lowerCamelCase , quant if self.config.norm_type == '''spatial''' else None ) if not return_dict: return (dec,) return DecoderOutput(sample=_lowerCamelCase ) def _a ( self : Optional[Any] , _lowerCamelCase : torch.FloatTensor , _lowerCamelCase : bool = True ): """simple docstring""" A_ : List[str] = sample A_ : Dict = self.encode(_lowerCamelCase ).latents A_ : Optional[Any] = self.decode(_lowerCamelCase ).sample if not return_dict: return (dec,) return DecoderOutput(sample=_lowerCamelCase )
369
'''simple docstring''' def snake_case__ ( lowerCamelCase__ : list ) -> list: if len(lowerCamelCase__ ) <= 1: return [tuple(lowerCamelCase__ )] A_ : List[str] = [] def generate(lowerCamelCase__ : int , lowerCamelCase__ : list ): if k == 1: res.append(tuple(arr[:] ) ) return generate(k - 1 , lowerCamelCase__ ) for i in range(k - 1 ): if k % 2 == 0: # k is even A_ ,A_ : Optional[int] = arr[k - 1], arr[i] else: # k is odd A_ ,A_ : Union[str, Any] = arr[k - 1], arr[0] generate(k - 1 , lowerCamelCase__ ) generate(len(lowerCamelCase__ ) , lowerCamelCase__ ) return res if __name__ == "__main__": snake_case__ = input("""Enter numbers separated by a comma:\n""").strip() snake_case__ = [int(item) for item in user_input.split(""",""")] print(heaps(arr))
4
0
'''simple docstring''' import unittest from transformers import BertGenerationTokenizer from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_torch, slow from transformers.utils import cached_property from ...test_tokenization_common import TokenizerTesterMixin snake_case__ = """▁""" snake_case__ = get_tests_dir("""fixtures/test_sentencepiece.model""") @require_sentencepiece class UpperCamelCase_ (a__, unittest.TestCase ): """simple docstring""" _lowerCAmelCase = BertGenerationTokenizer _lowerCAmelCase = False _lowerCAmelCase = True def _a ( self : Dict ): """simple docstring""" super().setUp() A_ : Optional[int] = BertGenerationTokenizer(_lowerCamelCase , keep_accents=_lowerCamelCase ) tokenizer.save_pretrained(self.tmpdirname ) def _a ( self : Tuple ): """simple docstring""" A_ : str = '''<s>''' A_ : Tuple = 1 self.assertEqual(self.get_tokenizer()._convert_token_to_id(_lowerCamelCase ) , _lowerCamelCase ) self.assertEqual(self.get_tokenizer()._convert_id_to_token(_lowerCamelCase ) , _lowerCamelCase ) def _a ( self : Optional[Any] ): """simple docstring""" A_ : List[str] = list(self.get_tokenizer().get_vocab().keys() ) self.assertEqual(vocab_keys[0] , '''<unk>''' ) self.assertEqual(vocab_keys[1] , '''<s>''' ) self.assertEqual(vocab_keys[-1] , '''<pad>''' ) self.assertEqual(len(_lowerCamelCase ) , 1002 ) def _a ( self : Dict ): """simple docstring""" self.assertEqual(self.get_tokenizer().vocab_size , 1000 ) def _a ( self : Tuple ): """simple docstring""" A_ : int = BertGenerationTokenizer(_lowerCamelCase , keep_accents=_lowerCamelCase ) A_ : List[Any] = tokenizer.tokenize('''This is a test''' ) self.assertListEqual(_lowerCamelCase , ['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est'''] ) self.assertListEqual( tokenizer.convert_tokens_to_ids(_lowerCamelCase ) , [285, 46, 10, 170, 382] , ) A_ : Any = tokenizer.tokenize('''I was born in 92000, and this is falsé.''' ) self.assertListEqual( _lowerCamelCase , [ SPIECE_UNDERLINE + '''I''', SPIECE_UNDERLINE + '''was''', SPIECE_UNDERLINE + '''b''', '''or''', '''n''', SPIECE_UNDERLINE + '''in''', SPIECE_UNDERLINE + '''''', '''9''', '''2''', '''0''', '''0''', '''0''', ''',''', SPIECE_UNDERLINE + '''and''', SPIECE_UNDERLINE + '''this''', SPIECE_UNDERLINE + '''is''', SPIECE_UNDERLINE + '''f''', '''al''', '''s''', '''é''', '''.''', ] , ) A_ : List[Any] = tokenizer.convert_tokens_to_ids(_lowerCamelCase ) self.assertListEqual( _lowerCamelCase , [8, 21, 84, 55, 24, 19, 7, 0, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 0, 4] , ) A_ : List[Any] = tokenizer.convert_ids_to_tokens(_lowerCamelCase ) self.assertListEqual( _lowerCamelCase , [ SPIECE_UNDERLINE + '''I''', SPIECE_UNDERLINE + '''was''', SPIECE_UNDERLINE + '''b''', '''or''', '''n''', SPIECE_UNDERLINE + '''in''', SPIECE_UNDERLINE + '''''', '''<unk>''', '''2''', '''0''', '''0''', '''0''', ''',''', SPIECE_UNDERLINE + '''and''', SPIECE_UNDERLINE + '''this''', SPIECE_UNDERLINE + '''is''', SPIECE_UNDERLINE + '''f''', '''al''', '''s''', '''<unk>''', '''.''', ] , ) @cached_property def _a ( self : List[str] ): """simple docstring""" return BertGenerationTokenizer.from_pretrained('''google/bert_for_seq_generation_L-24_bbc_encoder''' ) @slow def _a ( self : str ): """simple docstring""" A_ : List[str] = '''Hello World!''' A_ : Any = [18536, 2260, 101] self.assertListEqual(_lowerCamelCase , self.big_tokenizer.encode(_lowerCamelCase ) ) @slow def _a ( self : int ): """simple docstring""" A_ : Any = ( '''This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) " [ ] ! : - . Also we will''' ''' add words that should not exsist and be tokenized to <unk>, such as saoneuhaoesuth''' ) A_ : List[str] = [ 871, 419, 358, 946, 991, 2521, 452, 358, 1357, 387, 7751, 3536, 112, 985, 456, 126, 865, 938, 5400, 5734, 458, 1368, 467, 786, 2462, 5246, 1159, 633, 865, 4519, 457, 582, 852, 2557, 427, 916, 508, 405, 34324, 497, 391, 408, 11342, 1244, 385, 100, 938, 985, 456, 574, 362, 12597, 3200, 3129, 1172, ] self.assertListEqual(_lowerCamelCase , self.big_tokenizer.encode(_lowerCamelCase ) ) @require_torch @slow def _a ( self : Optional[int] ): """simple docstring""" import torch from transformers import BertGenerationConfig, BertGenerationEncoder # Build sequence A_ : Union[str, Any] = list(self.big_tokenizer.get_vocab().keys() )[:10] A_ : Dict = ''' '''.join(_lowerCamelCase ) A_ : List[Any] = self.big_tokenizer.encode_plus(_lowerCamelCase , return_tensors='''pt''' , return_token_type_ids=_lowerCamelCase ) A_ : Optional[int] = self.big_tokenizer.batch_encode_plus( [sequence + ''' ''' + sequence] , return_tensors='''pt''' , return_token_type_ids=_lowerCamelCase ) A_ : List[Any] = BertGenerationConfig() A_ : List[Any] = BertGenerationEncoder(_lowerCamelCase ) assert model.get_input_embeddings().weight.shape[0] >= self.big_tokenizer.vocab_size with torch.no_grad(): model(**_lowerCamelCase ) model(**_lowerCamelCase ) @slow def _a ( self : List[Any] ): """simple docstring""" A_ : List[str] = {'''input_ids''': [[39286, 458, 36335, 2001, 456, 13073, 13266, 455, 113, 7746, 1741, 11157, 391, 13073, 13266, 455, 113, 3967, 35412, 113, 4936, 109, 3870, 2377, 113, 30084, 45720, 458, 134, 17496, 112, 503, 11672, 113, 118, 112, 5665, 13347, 38687, 112, 1496, 31389, 112, 3268, 47264, 134, 962, 112, 16377, 8035, 23130, 430, 12169, 15518, 28592, 458, 146, 41697, 109, 391, 12169, 15518, 16689, 458, 146, 41358, 109, 452, 726, 4034, 111, 763, 35412, 5082, 388, 1903, 111, 9051, 391, 2870, 48918, 1900, 1123, 550, 998, 112, 9586, 15985, 455, 391, 410, 22955, 37636, 114], [448, 17496, 419, 3663, 385, 763, 113, 27533, 2870, 3283, 13043, 1639, 24713, 523, 656, 24013, 18550, 2521, 517, 27014, 21244, 420, 1212, 1465, 391, 927, 4833, 388, 578, 11786, 114, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [484, 2169, 7687, 21932, 18146, 726, 363, 17032, 3391, 114, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], '''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501 # fmt: on self.tokenizer_integration_test_util( expected_encoding=_lowerCamelCase , model_name='''google/bert_for_seq_generation_L-24_bbc_encoder''' , revision='''c817d1fd1be2ffa69431227a1fe320544943d4db''' , )
370
'''simple docstring''' import unittest from queue import Empty from threading import Thread from transformers import AutoTokenizer, TextIteratorStreamer, TextStreamer, is_torch_available from transformers.testing_utils import CaptureStdout, require_torch, torch_device from ..test_modeling_common import ids_tensor if is_torch_available(): import torch from transformers import AutoModelForCausalLM @require_torch class UpperCamelCase_ (unittest.TestCase ): """simple docstring""" def _a ( self : Dict ): """simple docstring""" A_ : Optional[int] = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ) A_ : Tuple = AutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ).to(_lowerCamelCase ) A_ : Dict = -1 A_ : List[str] = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(_lowerCamelCase ) A_ : Any = model.generate(_lowerCamelCase , max_new_tokens=10 , do_sample=_lowerCamelCase ) A_ : List[str] = tokenizer.decode(greedy_ids[0] ) with CaptureStdout() as cs: A_ : List[str] = TextStreamer(_lowerCamelCase ) model.generate(_lowerCamelCase , max_new_tokens=10 , do_sample=_lowerCamelCase , streamer=_lowerCamelCase ) # The greedy text should be printed to stdout, except for the final "\n" in the streamer A_ : Dict = cs.out[:-1] self.assertEqual(_lowerCamelCase , _lowerCamelCase ) def _a ( self : Tuple ): """simple docstring""" A_ : Optional[int] = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ) A_ : List[str] = AutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ).to(_lowerCamelCase ) A_ : Dict = -1 A_ : List[str] = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(_lowerCamelCase ) A_ : Optional[int] = model.generate(_lowerCamelCase , max_new_tokens=10 , do_sample=_lowerCamelCase ) A_ : str = tokenizer.decode(greedy_ids[0] ) A_ : int = TextIteratorStreamer(_lowerCamelCase ) A_ : List[Any] = {'''input_ids''': input_ids, '''max_new_tokens''': 10, '''do_sample''': False, '''streamer''': streamer} A_ : List[Any] = Thread(target=model.generate , kwargs=_lowerCamelCase ) thread.start() A_ : List[Any] = '''''' for new_text in streamer: streamer_text += new_text self.assertEqual(_lowerCamelCase , _lowerCamelCase ) def _a ( self : int ): """simple docstring""" A_ : List[str] = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ) A_ : List[Any] = AutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ).to(_lowerCamelCase ) A_ : List[str] = -1 A_ : Any = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(_lowerCamelCase ) A_ : Tuple = model.generate(_lowerCamelCase , max_new_tokens=10 , do_sample=_lowerCamelCase ) A_ : Tuple = greedy_ids[:, input_ids.shape[1] :] A_ : Tuple = tokenizer.decode(new_greedy_ids[0] ) with CaptureStdout() as cs: A_ : Any = TextStreamer(_lowerCamelCase , skip_prompt=_lowerCamelCase ) model.generate(_lowerCamelCase , max_new_tokens=10 , do_sample=_lowerCamelCase , streamer=_lowerCamelCase ) # The greedy text should be printed to stdout, except for the final "\n" in the streamer A_ : Any = cs.out[:-1] self.assertEqual(_lowerCamelCase , _lowerCamelCase ) def _a ( self : List[Any] ): """simple docstring""" A_ : List[Any] = AutoTokenizer.from_pretrained('''distilgpt2''' ) A_ : Tuple = AutoModelForCausalLM.from_pretrained('''distilgpt2''' ).to(_lowerCamelCase ) A_ : List[Any] = -1 A_ : Union[str, Any] = torch.ones((1, 5) , device=_lowerCamelCase ).long() * model.config.bos_token_id with CaptureStdout() as cs: A_ : List[Any] = TextStreamer(_lowerCamelCase , skip_special_tokens=_lowerCamelCase ) model.generate(_lowerCamelCase , max_new_tokens=1 , do_sample=_lowerCamelCase , streamer=_lowerCamelCase ) # The prompt contains a special token, so the streamer should not print it. As such, the output text, when # re-tokenized, must only contain one token A_ : List[str] = cs.out[:-1] # Remove the final "\n" A_ : List[Any] = tokenizer(_lowerCamelCase , return_tensors='''pt''' ) self.assertEqual(streamer_text_tokenized.input_ids.shape , (1, 1) ) def _a ( self : Union[str, Any] ): """simple docstring""" A_ : str = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ) A_ : str = AutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ).to(_lowerCamelCase ) A_ : Union[str, Any] = -1 A_ : Union[str, Any] = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(_lowerCamelCase ) A_ : List[str] = TextIteratorStreamer(_lowerCamelCase , timeout=0.0_01 ) A_ : str = {'''input_ids''': input_ids, '''max_new_tokens''': 10, '''do_sample''': False, '''streamer''': streamer} A_ : List[str] = Thread(target=model.generate , kwargs=_lowerCamelCase ) thread.start() # The streamer will timeout after 0.001 seconds, so an exception will be raised with self.assertRaises(_lowerCamelCase ): A_ : str = '''''' for new_text in streamer: streamer_text += new_text
4
0
'''simple docstring''' import os import unittest from tempfile import TemporaryDirectory import torch import torch.nn as nn from accelerate.utils import ( OffloadedWeightsLoader, extract_submodules_state_dict, load_offloaded_weight, offload_state_dict, offload_weight, ) class UpperCamelCase_ (nn.Module ): """simple docstring""" def __init__( self : List[str] ): """simple docstring""" super().__init__() A_ : Any = nn.Linear(3 , 4 ) A_ : Optional[Any] = nn.BatchNormad(4 ) A_ : str = nn.Linear(4 , 5 ) def _a ( self : Dict , _lowerCamelCase : Dict ): """simple docstring""" return self.lineara(self.batchnorm(self.lineara(_lowerCamelCase ) ) ) class UpperCamelCase_ (unittest.TestCase ): """simple docstring""" def _a ( self : Optional[int] ): """simple docstring""" A_ : Optional[Any] = ModelForTest() with TemporaryDirectory() as tmp_dir: offload_state_dict(_lowerCamelCase , model.state_dict() ) A_ : Tuple = os.path.join(_lowerCamelCase , '''index.json''' ) self.assertTrue(os.path.isfile(_lowerCamelCase ) ) # TODO: add tests on what is inside the index for key in ["linear1.weight", "linear1.bias", "linear2.weight", "linear2.bias"]: A_ : List[Any] = os.path.join(_lowerCamelCase , f'{key}.dat' ) self.assertTrue(os.path.isfile(_lowerCamelCase ) ) # TODO: add tests on the fact weights are properly loaded def _a ( self : Tuple ): """simple docstring""" A_ : List[Any] = [torch.floataa, torch.floataa, torch.bfloataa] for dtype in dtypes: A_ : List[str] = torch.randn(2 , 3 , dtype=_lowerCamelCase ) with TemporaryDirectory() as tmp_dir: A_ : Dict = offload_weight(_lowerCamelCase , '''weight''' , _lowerCamelCase , {} ) A_ : Optional[Any] = os.path.join(_lowerCamelCase , '''weight.dat''' ) self.assertTrue(os.path.isfile(_lowerCamelCase ) ) self.assertDictEqual(_lowerCamelCase , {'''weight''': {'''shape''': [2, 3], '''dtype''': str(_lowerCamelCase ).split('''.''' )[1]}} ) A_ : Any = load_offloaded_weight(_lowerCamelCase , index['''weight'''] ) self.assertTrue(torch.equal(_lowerCamelCase , _lowerCamelCase ) ) def _a ( self : str ): """simple docstring""" A_ : Any = ModelForTest() A_ : Any = model.state_dict() A_ : Optional[Any] = {k: v for k, v in state_dict.items() if '''linear2''' not in k} A_ : str = {k: v for k, v in state_dict.items() if '''linear2''' in k} with TemporaryDirectory() as tmp_dir: offload_state_dict(_lowerCamelCase , _lowerCamelCase ) A_ : Any = OffloadedWeightsLoader(state_dict=_lowerCamelCase , save_folder=_lowerCamelCase ) # Every key is there with the right value self.assertEqual(sorted(_lowerCamelCase ) , sorted(state_dict.keys() ) ) for key, param in state_dict.items(): self.assertTrue(torch.allclose(_lowerCamelCase , weight_map[key] ) ) A_ : List[str] = {k: v for k, v in state_dict.items() if '''weight''' in k} A_ : Optional[Any] = {k: v for k, v in state_dict.items() if '''weight''' not in k} with TemporaryDirectory() as tmp_dir: offload_state_dict(_lowerCamelCase , _lowerCamelCase ) A_ : List[str] = OffloadedWeightsLoader(state_dict=_lowerCamelCase , save_folder=_lowerCamelCase ) # Every key is there with the right value self.assertEqual(sorted(_lowerCamelCase ) , sorted(state_dict.keys() ) ) for key, param in state_dict.items(): self.assertTrue(torch.allclose(_lowerCamelCase , weight_map[key] ) ) with TemporaryDirectory() as tmp_dir: offload_state_dict(_lowerCamelCase , _lowerCamelCase ) # Duplicates are removed A_ : Tuple = OffloadedWeightsLoader(state_dict=_lowerCamelCase , save_folder=_lowerCamelCase ) # Every key is there with the right value self.assertEqual(sorted(_lowerCamelCase ) , sorted(state_dict.keys() ) ) for key, param in state_dict.items(): self.assertTrue(torch.allclose(_lowerCamelCase , weight_map[key] ) ) def _a ( self : Dict ): """simple docstring""" A_ : Optional[int] = {'''a.1''': 0, '''a.10''': 1, '''a.2''': 2} A_ : Tuple = extract_submodules_state_dict(_lowerCamelCase , ['''a.1''', '''a.2'''] ) self.assertDictEqual(_lowerCamelCase , {'''a.1''': 0, '''a.2''': 2} ) A_ : Optional[int] = {'''a.1.a''': 0, '''a.10.a''': 1, '''a.2.a''': 2} A_ : Optional[int] = extract_submodules_state_dict(_lowerCamelCase , ['''a.1''', '''a.2'''] ) self.assertDictEqual(_lowerCamelCase , {'''a.1.a''': 0, '''a.2.a''': 2} )
371
'''simple docstring''' import heapq def snake_case__ ( lowerCamelCase__ : dict ) -> set[int]: A_ : list[list] = [] # for each node and his adjacency list add them and the rank of the node to queue # using heapq module the queue will be filled like a Priority Queue # heapq works with a min priority queue, so I used -1*len(v) to build it for key, value in graph.items(): # O(log(n)) heapq.heappush(lowerCamelCase__ , [-1 * len(lowerCamelCase__ ), (key, value)] ) # chosen_vertices = set of chosen vertices A_ : str = set() # while queue isn't empty and there are still edges # (queue[0][0] is the rank of the node with max rank) while queue and queue[0][0] != 0: # extract vertex with max rank from queue and add it to chosen_vertices A_ : Tuple = heapq.heappop(lowerCamelCase__ )[1][0] chosen_vertices.add(lowerCamelCase__ ) # Remove all arcs adjacent to argmax for elem in queue: # if v haven't adjacent node, skip if elem[0] == 0: continue # if argmax is reachable from elem # remove argmax from elem's adjacent list and update his rank if argmax in elem[1][1]: A_ : List[str] = elem[1][1].index(lowerCamelCase__ ) del elem[1][1][index] elem[0] += 1 # re-order the queue heapq.heapify(lowerCamelCase__ ) return chosen_vertices if __name__ == "__main__": import doctest doctest.testmod() snake_case__ = {0: [1, 3], 1: [0, 3], 2: [0, 3, 4], 3: [0, 1, 2], 4: [2, 3]} print(F'Minimum vertex cover:\n{greedy_min_vertex_cover(graph)}')
4
0