code
stringlengths
81
54k
code_codestyle
int64
0
721
style_context
stringlengths
91
41.9k
style_context_codestyle
int64
0
699
label
int64
0
1
import inspect import unittest from transformers import SegformerConfig, is_torch_available, is_vision_available from transformers.models.auto import get_values from transformers.testing_utils import require_torch, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( MODEL_MAPPING, SegformerForImageClassification, SegformerForSemanticSegmentation, SegformerModel, ) from transformers.models.segformer.modeling_segformer import SEGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import SegformerImageProcessor class _a (_UpperCamelCase): """simple docstring""" def UpperCamelCase ( self ) -> int: _SCREAMING_SNAKE_CASE = self.config_class(**self.inputs_dict ) self.parent.assertTrue(hasattr(A__ , """hidden_sizes""" ) ) self.parent.assertTrue(hasattr(A__ , """num_attention_heads""" ) ) self.parent.assertTrue(hasattr(A__ , """num_encoder_blocks""" ) ) class _a : """simple docstring""" def __init__( self , A__ , A__=13 , A__=64 , A__=3 , A__=4 , A__=[2, 2, 2, 2] , A__=[8, 4, 2, 1] , A__=[16, 32, 64, 1_28] , A__=[1, 4, 8, 16] , A__=[1, 2, 4, 8] , A__=True , A__=True , A__="gelu" , A__=0.1 , A__=0.1 , A__=0.02 , A__=3 , A__=None , ) -> List[str]: _SCREAMING_SNAKE_CASE = parent _SCREAMING_SNAKE_CASE = batch_size _SCREAMING_SNAKE_CASE = image_size _SCREAMING_SNAKE_CASE = num_channels _SCREAMING_SNAKE_CASE = num_encoder_blocks _SCREAMING_SNAKE_CASE = sr_ratios _SCREAMING_SNAKE_CASE = depths _SCREAMING_SNAKE_CASE = hidden_sizes _SCREAMING_SNAKE_CASE = downsampling_rates _SCREAMING_SNAKE_CASE = num_attention_heads _SCREAMING_SNAKE_CASE = is_training _SCREAMING_SNAKE_CASE = use_labels _SCREAMING_SNAKE_CASE = hidden_act _SCREAMING_SNAKE_CASE = hidden_dropout_prob _SCREAMING_SNAKE_CASE = attention_probs_dropout_prob _SCREAMING_SNAKE_CASE = initializer_range _SCREAMING_SNAKE_CASE = num_labels _SCREAMING_SNAKE_CASE = scope def UpperCamelCase ( self ) -> Tuple: _SCREAMING_SNAKE_CASE = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) _SCREAMING_SNAKE_CASE = None if self.use_labels: _SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels ) _SCREAMING_SNAKE_CASE = self.get_config() return config, pixel_values, labels def UpperCamelCase ( self ) -> Any: return SegformerConfig( image_size=self.image_size , num_channels=self.num_channels , num_encoder_blocks=self.num_encoder_blocks , depths=self.depths , hidden_sizes=self.hidden_sizes , num_attention_heads=self.num_attention_heads , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , initializer_range=self.initializer_range , ) def UpperCamelCase ( self , A__ , A__ , A__ ) -> Any: _SCREAMING_SNAKE_CASE = SegformerModel(config=A__ ) model.to(A__ ) model.eval() _SCREAMING_SNAKE_CASE = model(A__ ) _SCREAMING_SNAKE_CASE = self.image_size // (self.downsampling_rates[-1] * 2) self.parent.assertEqual( result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], expected_height, expected_width) ) def UpperCamelCase ( self , A__ , A__ , A__ ) -> Dict: _SCREAMING_SNAKE_CASE = self.num_labels _SCREAMING_SNAKE_CASE = SegformerForSemanticSegmentation(A__ ) model.to(A__ ) model.eval() _SCREAMING_SNAKE_CASE = model(A__ ) self.parent.assertEqual( result.logits.shape , (self.batch_size, self.num_labels, self.image_size // 4, self.image_size // 4) ) _SCREAMING_SNAKE_CASE = model(A__ , labels=A__ ) self.parent.assertEqual( result.logits.shape , (self.batch_size, self.num_labels, self.image_size // 4, self.image_size // 4) ) self.parent.assertGreater(result.loss , 0.0 ) def UpperCamelCase ( self , A__ , A__ , A__ ) -> Union[str, Any]: _SCREAMING_SNAKE_CASE = 1 _SCREAMING_SNAKE_CASE = SegformerForSemanticSegmentation(config=A__ ) model.to(A__ ) model.eval() _SCREAMING_SNAKE_CASE = torch.randint(0 , 1 , (self.batch_size, self.image_size, self.image_size) ).to(A__ ) _SCREAMING_SNAKE_CASE = model(A__ , labels=A__ ) self.parent.assertGreater(result.loss , 0.0 ) def UpperCamelCase ( self ) -> Optional[int]: _SCREAMING_SNAKE_CASE = self.prepare_config_and_inputs() _SCREAMING_SNAKE_CASE = config_and_inputs _SCREAMING_SNAKE_CASE = {"pixel_values": pixel_values} return config, inputs_dict @require_torch class _a (_UpperCamelCase , _UpperCamelCase , unittest.TestCase): """simple docstring""" SCREAMING_SNAKE_CASE = ( ( SegformerModel, SegformerForSemanticSegmentation, SegformerForImageClassification, ) if is_torch_available() else () ) SCREAMING_SNAKE_CASE = ( { 'feature-extraction': SegformerModel, 'image-classification': SegformerForImageClassification, 'image-segmentation': SegformerForSemanticSegmentation, } if is_torch_available() else {} ) SCREAMING_SNAKE_CASE = True SCREAMING_SNAKE_CASE = False SCREAMING_SNAKE_CASE = False SCREAMING_SNAKE_CASE = False def UpperCamelCase ( self ) -> List[Any]: _SCREAMING_SNAKE_CASE = SegformerModelTester(self ) _SCREAMING_SNAKE_CASE = SegformerConfigTester(self , config_class=A__ ) def UpperCamelCase ( self ) -> Tuple: self.config_tester.run_common_tests() def UpperCamelCase ( self ) -> str: _SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*A__ ) def UpperCamelCase ( self ) -> Any: _SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_binary_image_segmentation(*A__ ) def UpperCamelCase ( self ) -> List[str]: _SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_segmentation(*A__ ) @unittest.skip("""SegFormer does not use inputs_embeds""" ) def UpperCamelCase ( self ) -> List[str]: pass @unittest.skip("""SegFormer does not have get_input_embeddings method and get_output_embeddings methods""" ) def UpperCamelCase ( self ) -> Optional[Any]: pass def UpperCamelCase ( self ) -> Union[str, Any]: _SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: _SCREAMING_SNAKE_CASE = model_class(A__ ) _SCREAMING_SNAKE_CASE = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic _SCREAMING_SNAKE_CASE = [*signature.parameters.keys()] _SCREAMING_SNAKE_CASE = ["pixel_values"] self.assertListEqual(arg_names[:1] , A__ ) def UpperCamelCase ( self ) -> Tuple: _SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common() _SCREAMING_SNAKE_CASE = True for model_class in self.all_model_classes: _SCREAMING_SNAKE_CASE = True _SCREAMING_SNAKE_CASE = False _SCREAMING_SNAKE_CASE = True _SCREAMING_SNAKE_CASE = model_class(A__ ) model.to(A__ ) model.eval() with torch.no_grad(): _SCREAMING_SNAKE_CASE = model(**self._prepare_for_class(A__ , A__ ) ) _SCREAMING_SNAKE_CASE = outputs.attentions _SCREAMING_SNAKE_CASE = sum(self.model_tester.depths ) self.assertEqual(len(A__ ) , A__ ) # check that output_attentions also work using config del inputs_dict["output_attentions"] _SCREAMING_SNAKE_CASE = True _SCREAMING_SNAKE_CASE = model_class(A__ ) model.to(A__ ) model.eval() with torch.no_grad(): _SCREAMING_SNAKE_CASE = model(**self._prepare_for_class(A__ , A__ ) ) _SCREAMING_SNAKE_CASE = outputs.attentions self.assertEqual(len(A__ ) , A__ ) # verify the first attentions (first block, first layer) _SCREAMING_SNAKE_CASE = (self.model_tester.image_size // 4) ** 2 _SCREAMING_SNAKE_CASE = (self.model_tester.image_size // (4 * self.model_tester.sr_ratios[0])) ** 2 self.assertListEqual( list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads[0], expected_seq_len, expected_reduced_seq_len] , ) # verify the last attentions (last block, last layer) _SCREAMING_SNAKE_CASE = (self.model_tester.image_size // 32) ** 2 _SCREAMING_SNAKE_CASE = (self.model_tester.image_size // (32 * self.model_tester.sr_ratios[-1])) ** 2 self.assertListEqual( list(attentions[-1].shape[-3:] ) , [self.model_tester.num_attention_heads[-1], expected_seq_len, expected_reduced_seq_len] , ) _SCREAMING_SNAKE_CASE = len(A__ ) # Check attention is always last and order is fine _SCREAMING_SNAKE_CASE = True _SCREAMING_SNAKE_CASE = True _SCREAMING_SNAKE_CASE = model_class(A__ ) model.to(A__ ) model.eval() with torch.no_grad(): _SCREAMING_SNAKE_CASE = model(**self._prepare_for_class(A__ , A__ ) ) self.assertEqual(out_len + 1 , len(A__ ) ) _SCREAMING_SNAKE_CASE = outputs.attentions self.assertEqual(len(A__ ) , A__ ) # verify the first attentions (first block, first layer) _SCREAMING_SNAKE_CASE = (self.model_tester.image_size // 4) ** 2 _SCREAMING_SNAKE_CASE = (self.model_tester.image_size // (4 * self.model_tester.sr_ratios[0])) ** 2 self.assertListEqual( list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads[0], expected_seq_len, expected_reduced_seq_len] , ) def UpperCamelCase ( self ) -> Optional[Any]: def check_hidden_states_output(A__ , A__ , A__ ): _SCREAMING_SNAKE_CASE = model_class(A__ ) model.to(A__ ) model.eval() with torch.no_grad(): _SCREAMING_SNAKE_CASE = model(**self._prepare_for_class(A__ , A__ ) ) _SCREAMING_SNAKE_CASE = outputs.hidden_states _SCREAMING_SNAKE_CASE = self.model_tester.num_encoder_blocks self.assertEqual(len(A__ ) , A__ ) # verify the first hidden states (first block) self.assertListEqual( list(hidden_states[0].shape[-3:] ) , [ self.model_tester.hidden_sizes[0], self.model_tester.image_size // 4, self.model_tester.image_size // 4, ] , ) _SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: _SCREAMING_SNAKE_CASE = True check_hidden_states_output(A__ , A__ , A__ ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] _SCREAMING_SNAKE_CASE = True check_hidden_states_output(A__ , A__ , A__ ) def UpperCamelCase ( self ) -> Dict: if not self.model_tester.is_training: return _SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common() _SCREAMING_SNAKE_CASE = True for model_class in self.all_model_classes: if model_class in get_values(A__ ): continue _SCREAMING_SNAKE_CASE = model_class(A__ ) model.to(A__ ) model.train() _SCREAMING_SNAKE_CASE = self._prepare_for_class(A__ , A__ , return_labels=A__ ) _SCREAMING_SNAKE_CASE = model(**A__ ).loss loss.backward() @unittest.skip("""Will be fixed soon by reducing the size of the model used for common tests.""" ) def UpperCamelCase ( self ) -> str: pass @slow def UpperCamelCase ( self ) -> Dict: for model_name in SEGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: _SCREAMING_SNAKE_CASE = SegformerModel.from_pretrained(A__ ) self.assertIsNotNone(A__ ) def lowerCAmelCase_ ( ) -> Optional[int]: """simple docstring""" _SCREAMING_SNAKE_CASE = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" ) return image @require_torch class _a (unittest.TestCase): """simple docstring""" @slow def UpperCamelCase ( self ) -> Any: # only resize + normalize _SCREAMING_SNAKE_CASE = SegformerImageProcessor( image_scale=(5_12, 5_12) , keep_ratio=A__ , align=A__ , do_random_crop=A__ ) _SCREAMING_SNAKE_CASE = SegformerForSemanticSegmentation.from_pretrained("""nvidia/segformer-b0-finetuned-ade-512-512""" ).to( A__ ) _SCREAMING_SNAKE_CASE = prepare_img() _SCREAMING_SNAKE_CASE = image_processor(images=A__ , return_tensors="""pt""" ) _SCREAMING_SNAKE_CASE = encoded_inputs.pixel_values.to(A__ ) with torch.no_grad(): _SCREAMING_SNAKE_CASE = model(A__ ) _SCREAMING_SNAKE_CASE = torch.Size((1, model.config.num_labels, 1_28, 1_28) ) self.assertEqual(outputs.logits.shape , A__ ) _SCREAMING_SNAKE_CASE = torch.tensor( [ [[-4.6310, -5.5232, -6.2356], [-5.1921, -6.1444, -6.5996], [-5.4424, -6.2790, -6.7574]], [[-12.1391, -13.3122, -13.9554], [-12.8732, -13.9352, -14.3563], [-12.9438, -13.8226, -14.2513]], [[-12.5134, -13.4686, -14.4915], [-12.8669, -14.4343, -14.7758], [-13.2523, -14.5819, -15.0694]], ] ).to(A__ ) self.assertTrue(torch.allclose(outputs.logits[0, :3, :3, :3] , A__ , atol=1E-4 ) ) @slow def UpperCamelCase ( self ) -> Any: # only resize + normalize _SCREAMING_SNAKE_CASE = SegformerImageProcessor( image_scale=(5_12, 5_12) , keep_ratio=A__ , align=A__ , do_random_crop=A__ ) _SCREAMING_SNAKE_CASE = SegformerForSemanticSegmentation.from_pretrained( """nvidia/segformer-b1-finetuned-cityscapes-1024-1024""" ).to(A__ ) _SCREAMING_SNAKE_CASE = prepare_img() _SCREAMING_SNAKE_CASE = image_processor(images=A__ , return_tensors="""pt""" ) _SCREAMING_SNAKE_CASE = encoded_inputs.pixel_values.to(A__ ) with torch.no_grad(): _SCREAMING_SNAKE_CASE = model(A__ ) _SCREAMING_SNAKE_CASE = torch.Size((1, model.config.num_labels, 1_28, 1_28) ) self.assertEqual(outputs.logits.shape , A__ ) _SCREAMING_SNAKE_CASE = torch.tensor( [ [[-13.5748, -13.9111, -12.6500], [-14.3500, -15.3683, -14.2328], [-14.7532, -16.0424, -15.6087]], [[-17.1651, -15.8725, -12.9653], [-17.2580, -17.3718, -14.8223], [-16.6058, -16.8783, -16.7452]], [[-3.6456, -3.0209, -1.4203], [-3.0797, -3.1959, -2.0000], [-1.8757, -1.9217, -1.6997]], ] ).to(A__ ) self.assertTrue(torch.allclose(outputs.logits[0, :3, :3, :3] , A__ , atol=1E-1 ) ) @slow def UpperCamelCase ( self ) -> int: # only resize + normalize _SCREAMING_SNAKE_CASE = SegformerImageProcessor( image_scale=(5_12, 5_12) , keep_ratio=A__ , align=A__ , do_random_crop=A__ ) _SCREAMING_SNAKE_CASE = SegformerForSemanticSegmentation.from_pretrained("""nvidia/segformer-b0-finetuned-ade-512-512""" ).to( A__ ) _SCREAMING_SNAKE_CASE = prepare_img() _SCREAMING_SNAKE_CASE = image_processor(images=A__ , return_tensors="""pt""" ) _SCREAMING_SNAKE_CASE = encoded_inputs.pixel_values.to(A__ ) with torch.no_grad(): _SCREAMING_SNAKE_CASE = model(A__ ) _SCREAMING_SNAKE_CASE = outputs.logits.detach().cpu() _SCREAMING_SNAKE_CASE = image_processor.post_process_semantic_segmentation(outputs=A__ , target_sizes=[(5_00, 3_00)] ) _SCREAMING_SNAKE_CASE = torch.Size((5_00, 3_00) ) self.assertEqual(segmentation[0].shape , A__ ) _SCREAMING_SNAKE_CASE = image_processor.post_process_semantic_segmentation(outputs=A__ ) _SCREAMING_SNAKE_CASE = torch.Size((1_28, 1_28) ) self.assertEqual(segmentation[0].shape , A__ )
714
'''simple docstring''' def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE_ ) -> str: """simple docstring""" return "".join([hex(SCREAMING_SNAKE_CASE_ )[2:].zfill(2 ).upper() for byte in list(SCREAMING_SNAKE_CASE_ )] ) def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE_ ) -> bytes: """simple docstring""" # Check data validity, following RFC3548 # https://www.ietf.org/rfc/rfc3548.txt if (len(SCREAMING_SNAKE_CASE_ ) % 2) != 0: raise ValueError( """Base16 encoded data is invalid: Data does not have an even number of hex digits.""" ) # Check the character set - the standard base16 alphabet # is uppercase according to RFC3548 section 6 if not set(SCREAMING_SNAKE_CASE_ ) <= set("""0123456789ABCDEF""" ): raise ValueError( """Base16 encoded data is invalid: Data is not uppercase hex or it contains invalid characters.""" ) # For every two hexadecimal digits (= a byte), turn it into an integer. # Then, string the result together into bytes, and return it. return bytes(int(data[i] + data[i + 1] , 16 ) for i in range(0 , len(SCREAMING_SNAKE_CASE_ ) , 2 ) ) if __name__ == "__main__": import doctest doctest.testmod()
0
0
'''simple docstring''' from __future__ import annotations import math def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE_ ) -> bool: """simple docstring""" if 1 < number < 4: # 2 and 3 are primes return True elif number < 2 or number % 2 == 0 or number % 3 == 0: # Negatives, 0, 1, all even numbers, all multiples of 3 are not primes return False # All primes number are in format of 6k +/- 1 for i in range(5 , int(math.sqrt(a__ ) + 1 ) , 6 ): if number % i == 0 or number % (i + 2) == 0: return False return True UpperCamelCase__ : int = [num for num in range(3, 100_001, 2) if not is_prime(num)] def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE_ ) -> list[int]: """simple docstring""" if not isinstance(a__ , a__ ): raise ValueError("""n must be an integer""" ) if n <= 0: raise ValueError("""n must be >= 0""" ) _SCREAMING_SNAKE_CASE = [] for num in range(len(a__ ) ): _SCREAMING_SNAKE_CASE = 0 while 2 * i * i <= odd_composites[num]: _SCREAMING_SNAKE_CASE = odd_composites[num] - 2 * i * i if is_prime(a__ ): break i += 1 else: list_nums.append(odd_composites[num] ) if len(a__ ) == n: return list_nums return [] def lowerCAmelCase_ ( ) -> int: """simple docstring""" return compute_nums(1 )[0] if __name__ == "__main__": print(f"""{solution() = }""")
715
'''simple docstring''' import pytest import requests from datasets.utils.file_utils import http_head from .utils import OfflineSimulationMode, RequestWouldHangIndefinitelyError, offline @pytest.mark.integration def lowerCAmelCase_ ( ) -> List[Any]: """simple docstring""" with offline(OfflineSimulationMode.CONNECTION_TIMES_OUT ): with pytest.raises(SCREAMING_SNAKE_CASE_ ): requests.request("""GET""" , """https://huggingface.co""" ) with pytest.raises(requests.exceptions.ConnectTimeout ): requests.request("""GET""" , """https://huggingface.co""" , timeout=1.0 ) @pytest.mark.integration def lowerCAmelCase_ ( ) -> int: """simple docstring""" with offline(OfflineSimulationMode.CONNECTION_FAILS ): with pytest.raises(requests.exceptions.ConnectionError ): requests.request("""GET""" , """https://huggingface.co""" ) def lowerCAmelCase_ ( ) -> Optional[Any]: """simple docstring""" with offline(OfflineSimulationMode.HF_DATASETS_OFFLINE_SET_TO_1 ): with pytest.raises(SCREAMING_SNAKE_CASE_ ): http_head("""https://huggingface.co""" )
0
0
# Copyright 2021 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from packaging import version from .. import __version__ from .constants import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, IMAGENET_STANDARD_MEAN, IMAGENET_STANDARD_STD from .doc import ( add_code_sample_docstrings, add_end_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, copy_func, replace_return_docstrings, ) from .generic import ( ContextManagers, ExplicitEnum, ModelOutput, PaddingStrategy, TensorType, add_model_info_to_auto_map, cached_property, can_return_loss, expand_dims, find_labels, flatten_dict, infer_framework, is_jax_tensor, is_numpy_array, is_tensor, is_tf_symbolic_tensor, is_tf_tensor, is_torch_device, is_torch_dtype, is_torch_tensor, reshape, squeeze, strtobool, tensor_size, to_numpy, to_py_obj, transpose, working_or_temp_dir, ) from .hub import ( CLOUDFRONT_DISTRIB_PREFIX, DISABLE_TELEMETRY, HF_MODULES_CACHE, HUGGINGFACE_CO_PREFIX, HUGGINGFACE_CO_RESOLVE_ENDPOINT, PYTORCH_PRETRAINED_BERT_CACHE, PYTORCH_TRANSFORMERS_CACHE, S3_BUCKET_PREFIX, TRANSFORMERS_CACHE, TRANSFORMERS_DYNAMIC_MODULE_NAME, EntryNotFoundError, PushToHubMixin, RepositoryNotFoundError, RevisionNotFoundError, cached_file, default_cache_path, define_sagemaker_information, download_url, extract_commit_hash, get_cached_models, get_file_from_repo, get_full_repo_name, has_file, http_user_agent, is_offline_mode, is_remote_url, move_cache, send_example_telemetry, try_to_load_from_cache, ) from .import_utils import ( ENV_VARS_TRUE_AND_AUTO_VALUES, ENV_VARS_TRUE_VALUES, TORCH_FX_REQUIRED_VERSION, USE_JAX, USE_TF, USE_TORCH, DummyObject, OptionalDependencyNotAvailable, _LazyModule, ccl_version, direct_transformers_import, get_torch_version, is_accelerate_available, is_apex_available, is_bitsandbytes_available, is_bsa_available, is_coloredlogs_available, is_cython_available, is_datasets_available, is_decord_available, is_detectrona_available, is_faiss_available, is_flax_available, is_ftfy_available, is_in_notebook, is_ipex_available, is_jieba_available, is_jumanpp_available, is_kenlm_available, is_keras_nlp_available, is_librosa_available, is_natten_available, is_ninja_available, is_onnx_available, is_openai_available, is_optimum_available, is_pandas_available, is_peft_available, is_phonemizer_available, is_protobuf_available, is_psutil_available, is_pyanvml_available, is_pyctcdecode_available, is_pytesseract_available, is_pytest_available, is_pytorch_quantization_available, is_rjieba_available, is_sacremoses_available, is_safetensors_available, is_sagemaker_dp_enabled, is_sagemaker_mp_enabled, is_scipy_available, is_sentencepiece_available, is_seqio_available, is_sklearn_available, is_soundfile_availble, is_spacy_available, is_speech_available, is_sudachi_available, is_tensorflow_probability_available, is_tensorflow_text_available, is_tfaonnx_available, is_tf_available, is_timm_available, is_tokenizers_available, is_torch_available, is_torch_bfaa_available, is_torch_bfaa_cpu_available, is_torch_bfaa_gpu_available, is_torch_compile_available, is_torch_cuda_available, is_torch_fx_available, is_torch_fx_proxy, is_torch_mps_available, is_torch_neuroncore_available, is_torch_tensorrt_fx_available, is_torch_tfaa_available, is_torch_tpu_available, is_torchaudio_available, is_torchdistx_available, is_torchdynamo_available, is_torchvision_available, is_training_run_on_sagemaker, is_vision_available, requires_backends, torch_only_method, ) UpperCamelCase__ : List[str] = "pytorch_model.bin" UpperCamelCase__ : Optional[Any] = "pytorch_model.bin.index.json" UpperCamelCase__ : Tuple = "adapter_config.json" UpperCamelCase__ : Union[str, Any] = "adapter_model.bin" UpperCamelCase__ : List[str] = "adapter_model.safetensors" UpperCamelCase__ : int = "tf_model.h5" UpperCamelCase__ : Any = "tf_model.h5.index.json" UpperCamelCase__ : Any = "model.ckpt" UpperCamelCase__ : Tuple = "flax_model.msgpack" UpperCamelCase__ : Optional[int] = "flax_model.msgpack.index.json" UpperCamelCase__ : Union[str, Any] = "model.safetensors" UpperCamelCase__ : List[str] = "model.safetensors.index.json" UpperCamelCase__ : List[Any] = "config.json" UpperCamelCase__ : Optional[int] = "preprocessor_config.json" UpperCamelCase__ : List[Any] = FEATURE_EXTRACTOR_NAME UpperCamelCase__ : Dict = "generation_config.json" UpperCamelCase__ : Tuple = "modelcard.json" UpperCamelCase__ : Tuple = "▁" UpperCamelCase__ : Union[str, Any] = SENTENCEPIECE_UNDERLINE # Kept for backward compatibility UpperCamelCase__ : Optional[Any] = [ [[0, 1, 0, 1], [1, 0, 0, 1]] ] * 2 # Needs to have 0s and 1s only since XLM uses it for langs too. UpperCamelCase__ : Union[str, Any] = [[7, 6, 0, 0, 1], [1, 2, 3, 0, 0], [0, 0, 0, 4, 5]] UpperCamelCase__ : Optional[int] = [[1, 1, 1, 1, 1], [1, 1, 1, 0, 0], [0, 0, 0, 1, 1]] def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE_ ) -> Any: """simple docstring""" if version.parse(snake_case__ ) < version.parse(snake_case__ ): if "dev" in min_version: _SCREAMING_SNAKE_CASE = ( """This example requires a source install from HuggingFace Transformers (see """ """`https://huggingface.co/docs/transformers/installation#install-from-source`),""" ) else: _SCREAMING_SNAKE_CASE = F"This example requires a minimum version of {min_version}," error_message += F" but the version found is {__version__}.\n" raise ImportError( error_message + """Check out https://github.com/huggingface/transformers/tree/main/examples#important-note for the examples corresponding to other """ """versions of HuggingFace Transformers.""" )
716
'''simple docstring''' import math from collections.abc import Iterator from itertools import takewhile def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE_ ) -> bool: """simple docstring""" if 1 < number < 4: # 2 and 3 are primes return True elif number < 2 or number % 2 == 0 or number % 3 == 0: # Negatives, 0, 1, all even numbers, all multiples of 3 are not primes return False # All primes number are in format of 6k +/- 1 for i in range(5 , int(math.sqrt(SCREAMING_SNAKE_CASE_ ) + 1 ) , 6 ): if number % i == 0 or number % (i + 2) == 0: return False return True def lowerCAmelCase_ ( ) -> Iterator[int]: """simple docstring""" _SCREAMING_SNAKE_CASE = 2 while True: if is_prime(SCREAMING_SNAKE_CASE_ ): yield num num += 1 def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE_ = 2_00_00_00 ) -> int: """simple docstring""" return sum(takewhile(lambda SCREAMING_SNAKE_CASE_ : x < n , prime_generator() ) ) if __name__ == "__main__": print(f"""{solution() = }""")
0
0
'''simple docstring''' def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE_ ) -> int: """simple docstring""" _SCREAMING_SNAKE_CASE = 0 while num > 0: digit_sum += num % 10 num //= 10 return digit_sum def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE_ = 1_00 ) -> int: """simple docstring""" _SCREAMING_SNAKE_CASE = 1 _SCREAMING_SNAKE_CASE = 2 for i in range(2 , max_n + 1 ): _SCREAMING_SNAKE_CASE = pre_numerator _SCREAMING_SNAKE_CASE = 2 * i // 3 if i % 3 == 0 else 1 _SCREAMING_SNAKE_CASE = cur_numerator _SCREAMING_SNAKE_CASE = e_cont * pre_numerator + temp return sum_digits(SCREAMING_SNAKE_CASE_ ) if __name__ == "__main__": print(f"""{solution() = }""")
717
'''simple docstring''' import unittest import numpy as np from transformers.testing_utils import require_flax, require_tf, require_torch from transformers.utils import ( expand_dims, flatten_dict, is_flax_available, is_tf_available, is_torch_available, reshape, squeeze, transpose, ) if is_flax_available(): import jax.numpy as jnp if is_tf_available(): import tensorflow as tf if is_torch_available(): import torch class _a (unittest.TestCase): """simple docstring""" def UpperCamelCase ( self ) -> Optional[Any]: _SCREAMING_SNAKE_CASE = { """task_specific_params""": { """summarization""": {"""length_penalty""": 1.0, """max_length""": 1_28, """min_length""": 12, """num_beams""": 4}, """summarization_cnn""": {"""length_penalty""": 2.0, """max_length""": 1_42, """min_length""": 56, """num_beams""": 4}, """summarization_xsum""": {"""length_penalty""": 1.0, """max_length""": 62, """min_length""": 11, """num_beams""": 6}, } } _SCREAMING_SNAKE_CASE = { """task_specific_params.summarization.length_penalty""": 1.0, """task_specific_params.summarization.max_length""": 1_28, """task_specific_params.summarization.min_length""": 12, """task_specific_params.summarization.num_beams""": 4, """task_specific_params.summarization_cnn.length_penalty""": 2.0, """task_specific_params.summarization_cnn.max_length""": 1_42, """task_specific_params.summarization_cnn.min_length""": 56, """task_specific_params.summarization_cnn.num_beams""": 4, """task_specific_params.summarization_xsum.length_penalty""": 1.0, """task_specific_params.summarization_xsum.max_length""": 62, """task_specific_params.summarization_xsum.min_length""": 11, """task_specific_params.summarization_xsum.num_beams""": 6, } self.assertEqual(flatten_dict(A__ ) , A__ ) def UpperCamelCase ( self ) -> int: _SCREAMING_SNAKE_CASE = np.random.randn(3 , 4 ) self.assertTrue(np.allclose(transpose(A__ ) , x.transpose() ) ) _SCREAMING_SNAKE_CASE = np.random.randn(3 , 4 , 5 ) self.assertTrue(np.allclose(transpose(A__ , axes=(1, 2, 0) ) , x.transpose((1, 2, 0) ) ) ) @require_torch def UpperCamelCase ( self ) -> Optional[Any]: _SCREAMING_SNAKE_CASE = np.random.randn(3 , 4 ) _SCREAMING_SNAKE_CASE = torch.tensor(A__ ) self.assertTrue(np.allclose(transpose(A__ ) , transpose(A__ ).numpy() ) ) _SCREAMING_SNAKE_CASE = np.random.randn(3 , 4 , 5 ) _SCREAMING_SNAKE_CASE = torch.tensor(A__ ) self.assertTrue(np.allclose(transpose(A__ , axes=(1, 2, 0) ) , transpose(A__ , axes=(1, 2, 0) ).numpy() ) ) @require_tf def UpperCamelCase ( self ) -> Optional[int]: _SCREAMING_SNAKE_CASE = np.random.randn(3 , 4 ) _SCREAMING_SNAKE_CASE = tf.constant(A__ ) self.assertTrue(np.allclose(transpose(A__ ) , transpose(A__ ).numpy() ) ) _SCREAMING_SNAKE_CASE = np.random.randn(3 , 4 , 5 ) _SCREAMING_SNAKE_CASE = tf.constant(A__ ) self.assertTrue(np.allclose(transpose(A__ , axes=(1, 2, 0) ) , transpose(A__ , axes=(1, 2, 0) ).numpy() ) ) @require_flax def UpperCamelCase ( self ) -> List[str]: _SCREAMING_SNAKE_CASE = np.random.randn(3 , 4 ) _SCREAMING_SNAKE_CASE = jnp.array(A__ ) self.assertTrue(np.allclose(transpose(A__ ) , np.asarray(transpose(A__ ) ) ) ) _SCREAMING_SNAKE_CASE = np.random.randn(3 , 4 , 5 ) _SCREAMING_SNAKE_CASE = jnp.array(A__ ) self.assertTrue(np.allclose(transpose(A__ , axes=(1, 2, 0) ) , np.asarray(transpose(A__ , axes=(1, 2, 0) ) ) ) ) def UpperCamelCase ( self ) -> Optional[int]: _SCREAMING_SNAKE_CASE = np.random.randn(3 , 4 ) self.assertTrue(np.allclose(reshape(A__ , (4, 3) ) , np.reshape(A__ , (4, 3) ) ) ) _SCREAMING_SNAKE_CASE = np.random.randn(3 , 4 , 5 ) self.assertTrue(np.allclose(reshape(A__ , (12, 5) ) , np.reshape(A__ , (12, 5) ) ) ) @require_torch def UpperCamelCase ( self ) -> Optional[int]: _SCREAMING_SNAKE_CASE = np.random.randn(3 , 4 ) _SCREAMING_SNAKE_CASE = torch.tensor(A__ ) self.assertTrue(np.allclose(reshape(A__ , (4, 3) ) , reshape(A__ , (4, 3) ).numpy() ) ) _SCREAMING_SNAKE_CASE = np.random.randn(3 , 4 , 5 ) _SCREAMING_SNAKE_CASE = torch.tensor(A__ ) self.assertTrue(np.allclose(reshape(A__ , (12, 5) ) , reshape(A__ , (12, 5) ).numpy() ) ) @require_tf def UpperCamelCase ( self ) -> Tuple: _SCREAMING_SNAKE_CASE = np.random.randn(3 , 4 ) _SCREAMING_SNAKE_CASE = tf.constant(A__ ) self.assertTrue(np.allclose(reshape(A__ , (4, 3) ) , reshape(A__ , (4, 3) ).numpy() ) ) _SCREAMING_SNAKE_CASE = np.random.randn(3 , 4 , 5 ) _SCREAMING_SNAKE_CASE = tf.constant(A__ ) self.assertTrue(np.allclose(reshape(A__ , (12, 5) ) , reshape(A__ , (12, 5) ).numpy() ) ) @require_flax def UpperCamelCase ( self ) -> List[Any]: _SCREAMING_SNAKE_CASE = np.random.randn(3 , 4 ) _SCREAMING_SNAKE_CASE = jnp.array(A__ ) self.assertTrue(np.allclose(reshape(A__ , (4, 3) ) , np.asarray(reshape(A__ , (4, 3) ) ) ) ) _SCREAMING_SNAKE_CASE = np.random.randn(3 , 4 , 5 ) _SCREAMING_SNAKE_CASE = jnp.array(A__ ) self.assertTrue(np.allclose(reshape(A__ , (12, 5) ) , np.asarray(reshape(A__ , (12, 5) ) ) ) ) def UpperCamelCase ( self ) -> Any: _SCREAMING_SNAKE_CASE = np.random.randn(1 , 3 , 4 ) self.assertTrue(np.allclose(squeeze(A__ ) , np.squeeze(A__ ) ) ) _SCREAMING_SNAKE_CASE = np.random.randn(1 , 4 , 1 , 5 ) self.assertTrue(np.allclose(squeeze(A__ , axis=2 ) , np.squeeze(A__ , axis=2 ) ) ) @require_torch def UpperCamelCase ( self ) -> Any: _SCREAMING_SNAKE_CASE = np.random.randn(1 , 3 , 4 ) _SCREAMING_SNAKE_CASE = torch.tensor(A__ ) self.assertTrue(np.allclose(squeeze(A__ ) , squeeze(A__ ).numpy() ) ) _SCREAMING_SNAKE_CASE = np.random.randn(1 , 4 , 1 , 5 ) _SCREAMING_SNAKE_CASE = torch.tensor(A__ ) self.assertTrue(np.allclose(squeeze(A__ , axis=2 ) , squeeze(A__ , axis=2 ).numpy() ) ) @require_tf def UpperCamelCase ( self ) -> List[str]: _SCREAMING_SNAKE_CASE = np.random.randn(1 , 3 , 4 ) _SCREAMING_SNAKE_CASE = tf.constant(A__ ) self.assertTrue(np.allclose(squeeze(A__ ) , squeeze(A__ ).numpy() ) ) _SCREAMING_SNAKE_CASE = np.random.randn(1 , 4 , 1 , 5 ) _SCREAMING_SNAKE_CASE = tf.constant(A__ ) self.assertTrue(np.allclose(squeeze(A__ , axis=2 ) , squeeze(A__ , axis=2 ).numpy() ) ) @require_flax def UpperCamelCase ( self ) -> Optional[int]: _SCREAMING_SNAKE_CASE = np.random.randn(1 , 3 , 4 ) _SCREAMING_SNAKE_CASE = jnp.array(A__ ) self.assertTrue(np.allclose(squeeze(A__ ) , np.asarray(squeeze(A__ ) ) ) ) _SCREAMING_SNAKE_CASE = np.random.randn(1 , 4 , 1 , 5 ) _SCREAMING_SNAKE_CASE = jnp.array(A__ ) self.assertTrue(np.allclose(squeeze(A__ , axis=2 ) , np.asarray(squeeze(A__ , axis=2 ) ) ) ) def UpperCamelCase ( self ) -> Optional[Any]: _SCREAMING_SNAKE_CASE = np.random.randn(3 , 4 ) self.assertTrue(np.allclose(expand_dims(A__ , axis=1 ) , np.expand_dims(A__ , axis=1 ) ) ) @require_torch def UpperCamelCase ( self ) -> Optional[int]: _SCREAMING_SNAKE_CASE = np.random.randn(3 , 4 ) _SCREAMING_SNAKE_CASE = torch.tensor(A__ ) self.assertTrue(np.allclose(expand_dims(A__ , axis=1 ) , expand_dims(A__ , axis=1 ).numpy() ) ) @require_tf def UpperCamelCase ( self ) -> str: _SCREAMING_SNAKE_CASE = np.random.randn(3 , 4 ) _SCREAMING_SNAKE_CASE = tf.constant(A__ ) self.assertTrue(np.allclose(expand_dims(A__ , axis=1 ) , expand_dims(A__ , axis=1 ).numpy() ) ) @require_flax def UpperCamelCase ( self ) -> Any: _SCREAMING_SNAKE_CASE = np.random.randn(3 , 4 ) _SCREAMING_SNAKE_CASE = jnp.array(A__ ) self.assertTrue(np.allclose(expand_dims(A__ , axis=1 ) , np.asarray(expand_dims(A__ , axis=1 ) ) ) )
0
0
'''simple docstring''' from __future__ import annotations from fractions import Fraction def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> bool: """simple docstring""" return ( num != den and num % 10 == den // 10 and (num // 10) / (den % 10) == num / den ) def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE_ ) -> list[str]: """simple docstring""" _SCREAMING_SNAKE_CASE = [] _SCREAMING_SNAKE_CASE = 11 _SCREAMING_SNAKE_CASE = int("""1""" + """0""" * digit_len ) for num in range(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ): while den <= 99: if (num != den) and (num % 10 == den // 10) and (den % 10 != 0): if is_digit_cancelling(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ): solutions.append(F"{num}/{den}" ) den += 1 num += 1 _SCREAMING_SNAKE_CASE = 10 return solutions def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE_ = 2 ) -> int: """simple docstring""" _SCREAMING_SNAKE_CASE = 1.0 for fraction in fraction_list(SCREAMING_SNAKE_CASE_ ): _SCREAMING_SNAKE_CASE = Fraction(SCREAMING_SNAKE_CASE_ ) result *= frac.denominator / frac.numerator return int(SCREAMING_SNAKE_CASE_ ) if __name__ == "__main__": print(solution())
718
'''simple docstring''' from pathlib import PurePosixPath from typing import Optional import fsspec from fsspec import AbstractFileSystem from huggingface_hub.hf_api import DatasetInfo from ..utils.file_utils import get_authentication_headers_for_url from ..utils.hub import hf_hub_url class _a (_lowerCamelCase): """simple docstring""" SCREAMING_SNAKE_CASE = '' SCREAMING_SNAKE_CASE = 'hf-legacy' # "hf://"" is reserved for hffs def __init__( self , A__ = None , A__ = None , **A__ , ) -> Optional[int]: super().__init__(self , **A__ ) _SCREAMING_SNAKE_CASE = repo_info _SCREAMING_SNAKE_CASE = token _SCREAMING_SNAKE_CASE = None def UpperCamelCase ( self ) -> Tuple: if self.dir_cache is None: _SCREAMING_SNAKE_CASE = {} for hf_file in self.repo_info.siblings: # TODO(QL): add sizes _SCREAMING_SNAKE_CASE = { """name""": hf_file.rfilename, """size""": None, """type""": """file""", } self.dir_cache.update( { str(A__ ): {"""name""": str(A__ ), """size""": None, """type""": """directory"""} for d in list(PurePosixPath(hf_file.rfilename ).parents )[:-1] } ) def UpperCamelCase ( self , A__ , A__ = "rb" , **A__ , ) -> Optional[int]: if not isinstance(self.repo_info , A__ ): raise NotImplementedError(F"Open is only implemented for dataset repositories, but got {self.repo_info}" ) _SCREAMING_SNAKE_CASE = hf_hub_url(self.repo_info.id , A__ , revision=self.repo_info.sha ) return fsspec.open( A__ , mode=A__ , headers=get_authentication_headers_for_url(A__ , use_auth_token=self.token ) , client_kwargs={"""trust_env""": True} , ).open() def UpperCamelCase ( self , A__ , **A__ ) -> str: self._get_dirs() _SCREAMING_SNAKE_CASE = self._strip_protocol(A__ ) if path in self.dir_cache: return self.dir_cache[path] else: raise FileNotFoundError(A__ ) def UpperCamelCase ( self , A__ , A__=False , **A__ ) -> List[Any]: self._get_dirs() _SCREAMING_SNAKE_CASE = PurePosixPath(path.strip("""/""" ) ) _SCREAMING_SNAKE_CASE = {} for p, f in self.dir_cache.items(): _SCREAMING_SNAKE_CASE = PurePosixPath(p.strip("""/""" ) ) _SCREAMING_SNAKE_CASE = p.parent if root == path: _SCREAMING_SNAKE_CASE = f _SCREAMING_SNAKE_CASE = list(paths.values() ) if detail: return out else: return sorted(f["""name"""] for f in out )
0
0
# Usage: # ./gen-card-facebook-wmt19.py import os from pathlib import Path def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> List[Any]: """simple docstring""" _SCREAMING_SNAKE_CASE = { """en""": """Machine learning is great, isn't it?""", """ru""": """Машинное обучение - это здорово, не так ли?""", """de""": """Maschinelles Lernen ist großartig, oder?""", } # BLUE scores as follows: # "pair": [fairseq, transformers] _SCREAMING_SNAKE_CASE = { """ru-en""": ["""[41.3](http://matrix.statmt.org/matrix/output/1907?run_id=6937)""", """39.20"""], """en-ru""": ["""[36.4](http://matrix.statmt.org/matrix/output/1914?run_id=6724)""", """33.47"""], """en-de""": ["""[43.1](http://matrix.statmt.org/matrix/output/1909?run_id=6862)""", """42.83"""], """de-en""": ["""[42.3](http://matrix.statmt.org/matrix/output/1902?run_id=6750)""", """41.35"""], } _SCREAMING_SNAKE_CASE = F"{src_lang}-{tgt_lang}" _SCREAMING_SNAKE_CASE = F"\n---\nlanguage: \n- {src_lang}\n- {tgt_lang}\nthumbnail:\ntags:\n- translation\n- wmt19\n- facebook\nlicense: apache-2.0\ndatasets:\n- wmt19\nmetrics:\n- bleu\n---\n\n# FSMT\n\n## Model description\n\nThis is a ported version of [fairseq wmt19 transformer](https://github.com/pytorch/fairseq/blob/master/examples/wmt19/README.md) for {src_lang}-{tgt_lang}.\n\nFor more details, please see, [Facebook FAIR's WMT19 News Translation Task Submission](https://arxiv.org/abs/1907.06616).\n\nThe abbreviation FSMT stands for FairSeqMachineTranslation\n\nAll four models are available:\n\n* [wmt19-en-ru](https://huggingface.co/facebook/wmt19-en-ru)\n* [wmt19-ru-en](https://huggingface.co/facebook/wmt19-ru-en)\n* [wmt19-en-de](https://huggingface.co/facebook/wmt19-en-de)\n* [wmt19-de-en](https://huggingface.co/facebook/wmt19-de-en)\n\n## Intended uses & limitations\n\n#### How to use\n\n```python\nfrom transformers import FSMTForConditionalGeneration, FSMTTokenizer\nmname = \"facebook/wmt19-{src_lang}-{tgt_lang}\"\ntokenizer = FSMTTokenizer.from_pretrained(mname)\nmodel = FSMTForConditionalGeneration.from_pretrained(mname)\n\ninput = \"{texts[src_lang]}\"\ninput_ids = tokenizer.encode(input, return_tensors=\"pt\")\noutputs = model.generate(input_ids)\ndecoded = tokenizer.decode(outputs[0], skip_special_tokens=True)\nprint(decoded) # {texts[tgt_lang]}\n\n```\n\n#### Limitations and bias\n\n- The original (and this ported model) doesn't seem to handle well inputs with repeated sub-phrases, [content gets truncated](https://discuss.huggingface.co/t/issues-with-translating-inputs-containing-repeated-phrases/981)\n\n## Training data\n\nPretrained weights were left identical to the original model released by fairseq. For more details, please, see the [paper](https://arxiv.org/abs/1907.06616).\n\n## Eval results\n\npair | fairseq | transformers\n-------|---------|----------\n{pair} | {scores[pair][0]} | {scores[pair][1]}\n\nThe score is slightly below the score reported by `fairseq`, since `transformers`` currently doesn't support:\n- model ensemble, therefore the best performing checkpoint was ported (``model4.pt``).\n- re-ranking\n\nThe score was calculated using this code:\n\n```bash\ngit clone https://github.com/huggingface/transformers\ncd transformers\nexport PAIR={pair}\nexport DATA_DIR=data/$PAIR\nexport SAVE_DIR=data/$PAIR\nexport BS=8\nexport NUM_BEAMS=15\nmkdir -p $DATA_DIR\nsacrebleu -t wmt19 -l $PAIR --echo src > $DATA_DIR/val.source\nsacrebleu -t wmt19 -l $PAIR --echo ref > $DATA_DIR/val.target\necho $PAIR\nPYTHONPATH=\"src:examples/seq2seq\" python examples/seq2seq/run_eval.py facebook/wmt19-$PAIR $DATA_DIR/val.source $SAVE_DIR/test_translations.txt --reference_path $DATA_DIR/val.target --score_path $SAVE_DIR/test_bleu.json --bs $BS --task translation --num_beams $NUM_BEAMS\n```\nnote: fairseq reports using a beam of 50, so you should get a slightly higher score if re-run with `--num_beams 50`.\n\n## Data Sources\n\n- [training, etc.](http://www.statmt.org/wmt19/)\n- [test set](http://matrix.statmt.org/test_sets/newstest2019.tgz?1556572561)\n\n\n### BibTeX entry and citation info\n\n```bibtex\n@inproceedings{{...,\n year={{2020}},\n title={{Facebook FAIR's WMT19 News Translation Task Submission}},\n author={{Ng, Nathan and Yee, Kyra and Baevski, Alexei and Ott, Myle and Auli, Michael and Edunov, Sergey}},\n booktitle={{Proc. of WMT}},\n}}\n```\n\n\n## TODO\n\n- port model ensemble (fairseq uses 4 model checkpoints)\n\n" os.makedirs(__a , exist_ok=__a ) _SCREAMING_SNAKE_CASE = os.path.join(__a , """README.md""" ) print(F"Generating {path}" ) with open(__a , """w""" , encoding="""utf-8""" ) as f: f.write(__a ) # make sure we are under the root of the project UpperCamelCase__ : int = Path(__file__).resolve().parent.parent.parent UpperCamelCase__ : Optional[Any] = repo_dir / "model_cards" for model_name in ["wmt19-ru-en", "wmt19-en-ru", "wmt19-en-de", "wmt19-de-en"]: UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ : Any = model_name.split("-") UpperCamelCase__ : Union[str, Any] = model_cards_dir / "facebook" / model_name write_model_card(model_card_dir, src_lang=src_lang, tgt_lang=tgt_lang)
719
'''simple docstring''' import pyarrow.parquet as pq import pytest from datasets import Audio, Dataset, DatasetDict, Features, NamedSplit, Sequence, Value, config from datasets.features.image import Image from datasets.io.parquet import ParquetDatasetReader, ParquetDatasetWriter, get_writer_batch_size from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> Optional[int]: """simple docstring""" assert isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) assert dataset.num_rows == 4 assert dataset.num_columns == 3 assert dataset.column_names == ["col_1", "col_2", "col_3"] for feature, expected_dtype in expected_features.items(): assert dataset.features[feature].dtype == expected_dtype @pytest.mark.parametrize("""keep_in_memory""" , [False, True] ) def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> List[Any]: """simple docstring""" _SCREAMING_SNAKE_CASE = tmp_path / """cache""" _SCREAMING_SNAKE_CASE = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""} with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase(): _SCREAMING_SNAKE_CASE = ParquetDatasetReader(SCREAMING_SNAKE_CASE_ , cache_dir=SCREAMING_SNAKE_CASE_ , keep_in_memory=SCREAMING_SNAKE_CASE_ ).read() _check_parquet_dataset(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) @pytest.mark.parametrize( """features""" , [ None, {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""}, {"""col_1""": """string""", """col_2""": """string""", """col_3""": """string"""}, {"""col_1""": """int32""", """col_2""": """int32""", """col_3""": """int32"""}, {"""col_1""": """float32""", """col_2""": """float32""", """col_3""": """float32"""}, ] , ) def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> Optional[int]: """simple docstring""" _SCREAMING_SNAKE_CASE = tmp_path / """cache""" _SCREAMING_SNAKE_CASE = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""} _SCREAMING_SNAKE_CASE = features.copy() if features else default_expected_features _SCREAMING_SNAKE_CASE = ( Features({feature: Value(SCREAMING_SNAKE_CASE_ ) for feature, dtype in features.items()} ) if features is not None else None ) _SCREAMING_SNAKE_CASE = ParquetDatasetReader(SCREAMING_SNAKE_CASE_ , features=SCREAMING_SNAKE_CASE_ , cache_dir=SCREAMING_SNAKE_CASE_ ).read() _check_parquet_dataset(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) @pytest.mark.parametrize("""split""" , [None, NamedSplit("""train""" ), """train""", """test"""] ) def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> List[str]: """simple docstring""" _SCREAMING_SNAKE_CASE = tmp_path / """cache""" _SCREAMING_SNAKE_CASE = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""} _SCREAMING_SNAKE_CASE = ParquetDatasetReader(SCREAMING_SNAKE_CASE_ , cache_dir=SCREAMING_SNAKE_CASE_ , split=SCREAMING_SNAKE_CASE_ ).read() _check_parquet_dataset(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) assert dataset.split == split if split else "train" @pytest.mark.parametrize("""path_type""" , [str, list] ) def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> Optional[int]: """simple docstring""" if issubclass(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ): _SCREAMING_SNAKE_CASE = parquet_path elif issubclass(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ): _SCREAMING_SNAKE_CASE = [parquet_path] _SCREAMING_SNAKE_CASE = tmp_path / """cache""" _SCREAMING_SNAKE_CASE = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""} _SCREAMING_SNAKE_CASE = ParquetDatasetReader(SCREAMING_SNAKE_CASE_ , cache_dir=SCREAMING_SNAKE_CASE_ ).read() _check_parquet_dataset(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=("train",) ) -> List[str]: """simple docstring""" assert isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) for split in splits: _SCREAMING_SNAKE_CASE = dataset_dict[split] assert dataset.num_rows == 4 assert dataset.num_columns == 3 assert dataset.column_names == ["col_1", "col_2", "col_3"] for feature, expected_dtype in expected_features.items(): assert dataset.features[feature].dtype == expected_dtype @pytest.mark.parametrize("""keep_in_memory""" , [False, True] ) def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> List[Any]: """simple docstring""" _SCREAMING_SNAKE_CASE = tmp_path / """cache""" _SCREAMING_SNAKE_CASE = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""} with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase(): _SCREAMING_SNAKE_CASE = ParquetDatasetReader( {"""train""": parquet_path} , cache_dir=SCREAMING_SNAKE_CASE_ , keep_in_memory=SCREAMING_SNAKE_CASE_ ).read() _check_parquet_datasetdict(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) @pytest.mark.parametrize( """features""" , [ None, {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""}, {"""col_1""": """string""", """col_2""": """string""", """col_3""": """string"""}, {"""col_1""": """int32""", """col_2""": """int32""", """col_3""": """int32"""}, {"""col_1""": """float32""", """col_2""": """float32""", """col_3""": """float32"""}, ] , ) def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> Optional[Any]: """simple docstring""" _SCREAMING_SNAKE_CASE = tmp_path / """cache""" _SCREAMING_SNAKE_CASE = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""} _SCREAMING_SNAKE_CASE = features.copy() if features else default_expected_features _SCREAMING_SNAKE_CASE = ( Features({feature: Value(SCREAMING_SNAKE_CASE_ ) for feature, dtype in features.items()} ) if features is not None else None ) _SCREAMING_SNAKE_CASE = ParquetDatasetReader({"""train""": parquet_path} , features=SCREAMING_SNAKE_CASE_ , cache_dir=SCREAMING_SNAKE_CASE_ ).read() _check_parquet_datasetdict(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) @pytest.mark.parametrize("""split""" , [None, NamedSplit("""train""" ), """train""", """test"""] ) def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> Optional[Any]: """simple docstring""" if split: _SCREAMING_SNAKE_CASE = {split: parquet_path} else: _SCREAMING_SNAKE_CASE = """train""" _SCREAMING_SNAKE_CASE = {"""train""": parquet_path, """test""": parquet_path} _SCREAMING_SNAKE_CASE = tmp_path / """cache""" _SCREAMING_SNAKE_CASE = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""} _SCREAMING_SNAKE_CASE = ParquetDatasetReader(SCREAMING_SNAKE_CASE_ , cache_dir=SCREAMING_SNAKE_CASE_ ).read() _check_parquet_datasetdict(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , splits=list(path.keys() ) ) assert all(dataset[split].split == split for split in path.keys() ) def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> Dict: """simple docstring""" _SCREAMING_SNAKE_CASE = ParquetDatasetWriter(SCREAMING_SNAKE_CASE_ , tmp_path / """foo.parquet""" ) assert writer.write() > 0 _SCREAMING_SNAKE_CASE = pq.ParquetFile(tmp_path / """foo.parquet""" ) _SCREAMING_SNAKE_CASE = pf.read() assert dataset.data.table == output_table def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> Optional[Any]: """simple docstring""" _SCREAMING_SNAKE_CASE = str(shared_datadir / """test_image_rgb.jpg""" ) _SCREAMING_SNAKE_CASE = {"""image""": [image_path]} _SCREAMING_SNAKE_CASE = Features({"""image""": Image()} ) _SCREAMING_SNAKE_CASE = Dataset.from_dict(SCREAMING_SNAKE_CASE_ , features=SCREAMING_SNAKE_CASE_ ) _SCREAMING_SNAKE_CASE = ParquetDatasetWriter(SCREAMING_SNAKE_CASE_ , tmp_path / """foo.parquet""" ) assert writer.write() > 0 _SCREAMING_SNAKE_CASE = Dataset.from_parquet(str(tmp_path / """foo.parquet""" ) ) assert dataset.features == reloaded_dataset.features _SCREAMING_SNAKE_CASE = ParquetDatasetReader(str(tmp_path / """foo.parquet""" ) , streaming=SCREAMING_SNAKE_CASE_ ).read() assert dataset.features == reloaded_iterable_dataset.features @pytest.mark.parametrize( """feature, expected""" , [ (Features({"""foo""": Value("""int32""" )} ), None), (Features({"""image""": Image(), """foo""": Value("""int32""" )} ), config.PARQUET_ROW_GROUP_SIZE_FOR_IMAGE_DATASETS), (Features({"""nested""": Sequence(Audio() )} ), config.PARQUET_ROW_GROUP_SIZE_FOR_AUDIO_DATASETS), ] , ) def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> str: """simple docstring""" assert get_writer_batch_size(SCREAMING_SNAKE_CASE_ ) == expected
0
0
'''simple docstring''' def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE_ ) -> List[Any]: """simple docstring""" if not isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ): raise TypeError("""Input value must be an 'int' type""" ) _SCREAMING_SNAKE_CASE = 0 while number: position += 1 number >>= 1 return position if __name__ == "__main__": import doctest doctest.testmod()
720
'''simple docstring''' def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE_ ) -> int: """simple docstring""" if not isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ): raise ValueError("""multiplicative_persistence() only accepts integral values""" ) if num < 0: raise ValueError("""multiplicative_persistence() does not accept negative values""" ) _SCREAMING_SNAKE_CASE = 0 _SCREAMING_SNAKE_CASE = str(SCREAMING_SNAKE_CASE_ ) while len(SCREAMING_SNAKE_CASE_ ) != 1: _SCREAMING_SNAKE_CASE = [int(SCREAMING_SNAKE_CASE_ ) for i in num_string] _SCREAMING_SNAKE_CASE = 1 for i in range(0 , len(SCREAMING_SNAKE_CASE_ ) ): total *= numbers[i] _SCREAMING_SNAKE_CASE = str(SCREAMING_SNAKE_CASE_ ) steps += 1 return steps def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE_ ) -> int: """simple docstring""" if not isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ): raise ValueError("""additive_persistence() only accepts integral values""" ) if num < 0: raise ValueError("""additive_persistence() does not accept negative values""" ) _SCREAMING_SNAKE_CASE = 0 _SCREAMING_SNAKE_CASE = str(SCREAMING_SNAKE_CASE_ ) while len(SCREAMING_SNAKE_CASE_ ) != 1: _SCREAMING_SNAKE_CASE = [int(SCREAMING_SNAKE_CASE_ ) for i in num_string] _SCREAMING_SNAKE_CASE = 0 for i in range(0 , len(SCREAMING_SNAKE_CASE_ ) ): total += numbers[i] _SCREAMING_SNAKE_CASE = str(SCREAMING_SNAKE_CASE_ ) steps += 1 return steps if __name__ == "__main__": import doctest doctest.testmod()
0
0
'''simple docstring''' import copy import unittest from transformers.models.auto import get_values from transformers.testing_utils import require_torch, slow, torch_device from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( MODEL_FOR_MULTIPLE_CHOICE_MAPPING, MODEL_FOR_QUESTION_ANSWERING_MAPPING, MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING, MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING, LayoutLMvaConfig, LayoutLMvaForQuestionAnswering, LayoutLMvaForSequenceClassification, LayoutLMvaForTokenClassification, LayoutLMvaModel, ) from transformers.models.layoutlmva.modeling_layoutlmva import LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import LayoutLMvaImageProcessor class _a : """simple docstring""" def __init__( self , A__ , A__=2 , A__=3 , A__=4 , A__=2 , A__=7 , A__=True , A__=True , A__=True , A__=True , A__=99 , A__=36 , A__=3 , A__=4 , A__=37 , A__="gelu" , A__=0.1 , A__=0.1 , A__=5_12 , A__=16 , A__=2 , A__=0.02 , A__=6 , A__=6 , A__=3 , A__=4 , A__=None , A__=10_00 , ) -> Union[str, Any]: _SCREAMING_SNAKE_CASE = parent _SCREAMING_SNAKE_CASE = batch_size _SCREAMING_SNAKE_CASE = num_channels _SCREAMING_SNAKE_CASE = image_size _SCREAMING_SNAKE_CASE = patch_size _SCREAMING_SNAKE_CASE = text_seq_length _SCREAMING_SNAKE_CASE = is_training _SCREAMING_SNAKE_CASE = use_input_mask _SCREAMING_SNAKE_CASE = use_token_type_ids _SCREAMING_SNAKE_CASE = use_labels _SCREAMING_SNAKE_CASE = vocab_size _SCREAMING_SNAKE_CASE = hidden_size _SCREAMING_SNAKE_CASE = num_hidden_layers _SCREAMING_SNAKE_CASE = num_attention_heads _SCREAMING_SNAKE_CASE = intermediate_size _SCREAMING_SNAKE_CASE = hidden_act _SCREAMING_SNAKE_CASE = hidden_dropout_prob _SCREAMING_SNAKE_CASE = attention_probs_dropout_prob _SCREAMING_SNAKE_CASE = max_position_embeddings _SCREAMING_SNAKE_CASE = type_vocab_size _SCREAMING_SNAKE_CASE = type_sequence_label_size _SCREAMING_SNAKE_CASE = initializer_range _SCREAMING_SNAKE_CASE = coordinate_size _SCREAMING_SNAKE_CASE = shape_size _SCREAMING_SNAKE_CASE = num_labels _SCREAMING_SNAKE_CASE = num_choices _SCREAMING_SNAKE_CASE = scope _SCREAMING_SNAKE_CASE = range_bbox # LayoutLMv3's sequence length equals the number of text tokens + number of patches + 1 (we add 1 for the CLS token) _SCREAMING_SNAKE_CASE = text_seq_length _SCREAMING_SNAKE_CASE = (image_size // patch_size) ** 2 + 1 _SCREAMING_SNAKE_CASE = self.text_seq_length + self.image_seq_length def UpperCamelCase ( self ) -> Optional[int]: _SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.text_seq_length] , self.vocab_size ) _SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.text_seq_length, 4] , self.range_bbox ) # Ensure that bbox is legal for i in range(bbox.shape[0] ): for j in range(bbox.shape[1] ): if bbox[i, j, 3] < bbox[i, j, 1]: _SCREAMING_SNAKE_CASE = bbox[i, j, 3] _SCREAMING_SNAKE_CASE = bbox[i, j, 1] _SCREAMING_SNAKE_CASE = t if bbox[i, j, 2] < bbox[i, j, 0]: _SCREAMING_SNAKE_CASE = bbox[i, j, 2] _SCREAMING_SNAKE_CASE = bbox[i, j, 0] _SCREAMING_SNAKE_CASE = t _SCREAMING_SNAKE_CASE = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) _SCREAMING_SNAKE_CASE = None if self.use_input_mask: _SCREAMING_SNAKE_CASE = random_attention_mask([self.batch_size, self.text_seq_length] ) _SCREAMING_SNAKE_CASE = None if self.use_token_type_ids: _SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.text_seq_length] , self.type_vocab_size ) _SCREAMING_SNAKE_CASE = None _SCREAMING_SNAKE_CASE = None if self.use_labels: _SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size] , self.type_sequence_label_size ) _SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.text_seq_length] , self.num_labels ) _SCREAMING_SNAKE_CASE = LayoutLMvaConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , coordinate_size=self.coordinate_size , shape_size=self.shape_size , input_size=self.image_size , patch_size=self.patch_size , ) return config, input_ids, bbox, pixel_values, token_type_ids, input_mask, sequence_labels, token_labels def UpperCamelCase ( self , A__ , A__ , A__ , A__ , A__ , A__ , A__ , A__ ) -> Optional[int]: _SCREAMING_SNAKE_CASE = LayoutLMvaModel(config=UpperCAmelCase__ ) model.to(UpperCAmelCase__ ) model.eval() # text + image _SCREAMING_SNAKE_CASE = model(UpperCAmelCase__ , pixel_values=UpperCAmelCase__ ) _SCREAMING_SNAKE_CASE = model( UpperCAmelCase__ , bbox=UpperCAmelCase__ , pixel_values=UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , token_type_ids=UpperCAmelCase__ ) _SCREAMING_SNAKE_CASE = model(UpperCAmelCase__ , bbox=UpperCAmelCase__ , pixel_values=UpperCAmelCase__ , token_type_ids=UpperCAmelCase__ ) _SCREAMING_SNAKE_CASE = model(UpperCAmelCase__ , bbox=UpperCAmelCase__ , pixel_values=UpperCAmelCase__ ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) # text only _SCREAMING_SNAKE_CASE = model(UpperCAmelCase__ ) self.parent.assertEqual( result.last_hidden_state.shape , (self.batch_size, self.text_seq_length, self.hidden_size) ) # image only _SCREAMING_SNAKE_CASE = model(pixel_values=UpperCAmelCase__ ) self.parent.assertEqual( result.last_hidden_state.shape , (self.batch_size, self.image_seq_length, self.hidden_size) ) def UpperCamelCase ( self , A__ , A__ , A__ , A__ , A__ , A__ , A__ , A__ ) -> int: _SCREAMING_SNAKE_CASE = self.num_labels _SCREAMING_SNAKE_CASE = LayoutLMvaForSequenceClassification(UpperCAmelCase__ ) model.to(UpperCAmelCase__ ) model.eval() _SCREAMING_SNAKE_CASE = model( UpperCAmelCase__ , bbox=UpperCAmelCase__ , pixel_values=UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , token_type_ids=UpperCAmelCase__ , labels=UpperCAmelCase__ , ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def UpperCamelCase ( self , A__ , A__ , A__ , A__ , A__ , A__ , A__ , A__ ) -> Optional[Any]: _SCREAMING_SNAKE_CASE = self.num_labels _SCREAMING_SNAKE_CASE = LayoutLMvaForTokenClassification(config=UpperCAmelCase__ ) model.to(UpperCAmelCase__ ) model.eval() _SCREAMING_SNAKE_CASE = model( UpperCAmelCase__ , bbox=UpperCAmelCase__ , pixel_values=UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , token_type_ids=UpperCAmelCase__ , labels=UpperCAmelCase__ , ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.text_seq_length, self.num_labels) ) def UpperCamelCase ( self , A__ , A__ , A__ , A__ , A__ , A__ , A__ , A__ ) -> int: _SCREAMING_SNAKE_CASE = LayoutLMvaForQuestionAnswering(config=UpperCAmelCase__ ) model.to(UpperCAmelCase__ ) model.eval() _SCREAMING_SNAKE_CASE = model( UpperCAmelCase__ , bbox=UpperCAmelCase__ , pixel_values=UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , token_type_ids=UpperCAmelCase__ , start_positions=UpperCAmelCase__ , end_positions=UpperCAmelCase__ , ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def UpperCamelCase ( self ) -> Optional[Any]: _SCREAMING_SNAKE_CASE = self.prepare_config_and_inputs() ( _SCREAMING_SNAKE_CASE ) = config_and_inputs _SCREAMING_SNAKE_CASE = { '''input_ids''': input_ids, '''bbox''': bbox, '''pixel_values''': pixel_values, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask, } return config, inputs_dict @require_torch class _a (lowercase__ , lowercase__ , unittest.TestCase): """simple docstring""" SCREAMING_SNAKE_CASE = False SCREAMING_SNAKE_CASE = False SCREAMING_SNAKE_CASE = False SCREAMING_SNAKE_CASE = ( ( LayoutLMvaModel, LayoutLMvaForSequenceClassification, LayoutLMvaForTokenClassification, LayoutLMvaForQuestionAnswering, ) if is_torch_available() else () ) SCREAMING_SNAKE_CASE = ( {'document-question-answering': LayoutLMvaForQuestionAnswering, 'feature-extraction': LayoutLMvaModel} if is_torch_available() else {} ) def UpperCamelCase ( self , A__ , A__ , A__ , A__ , A__ ) -> Optional[Any]: return True def UpperCamelCase ( self ) -> Union[str, Any]: _SCREAMING_SNAKE_CASE = LayoutLMvaModelTester(self ) _SCREAMING_SNAKE_CASE = ConfigTester(self , config_class=UpperCAmelCase__ , hidden_size=37 ) def UpperCamelCase ( self , A__ , A__ , A__=False ) -> Tuple: _SCREAMING_SNAKE_CASE = copy.deepcopy(UpperCAmelCase__ ) if model_class in get_values(UpperCAmelCase__ ): _SCREAMING_SNAKE_CASE = { k: v.unsqueeze(1 ).expand(-1 , self.model_tester.num_choices , -1 ).contiguous() if isinstance(UpperCAmelCase__ , torch.Tensor ) and v.ndim > 1 else v for k, v in inputs_dict.items() } if return_labels: if model_class in get_values(UpperCAmelCase__ ): _SCREAMING_SNAKE_CASE = torch.ones(self.model_tester.batch_size , dtype=torch.long , device=UpperCAmelCase__ ) elif model_class in get_values(UpperCAmelCase__ ): _SCREAMING_SNAKE_CASE = torch.zeros( self.model_tester.batch_size , dtype=torch.long , device=UpperCAmelCase__ ) _SCREAMING_SNAKE_CASE = torch.zeros( self.model_tester.batch_size , dtype=torch.long , device=UpperCAmelCase__ ) elif model_class in [ *get_values(UpperCAmelCase__ ), ]: _SCREAMING_SNAKE_CASE = torch.zeros( self.model_tester.batch_size , dtype=torch.long , device=UpperCAmelCase__ ) elif model_class in [ *get_values(UpperCAmelCase__ ), ]: _SCREAMING_SNAKE_CASE = torch.zeros( (self.model_tester.batch_size, self.model_tester.text_seq_length) , dtype=torch.long , device=UpperCAmelCase__ , ) return inputs_dict def UpperCamelCase ( self ) -> Optional[int]: self.config_tester.run_common_tests() def UpperCamelCase ( self ) -> Optional[Any]: _SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*UpperCAmelCase__ ) def UpperCamelCase ( self ) -> Any: _SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs() for type in ["absolute", "relative_key", "relative_key_query"]: _SCREAMING_SNAKE_CASE = type self.model_tester.create_and_check_model(*UpperCAmelCase__ ) def UpperCamelCase ( self ) -> Tuple: _SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_sequence_classification(*UpperCAmelCase__ ) def UpperCamelCase ( self ) -> List[Any]: _SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_token_classification(*UpperCAmelCase__ ) def UpperCamelCase ( self ) -> Tuple: _SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_question_answering(*UpperCAmelCase__ ) @slow def UpperCamelCase ( self ) -> str: for model_name in LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: _SCREAMING_SNAKE_CASE = LayoutLMvaModel.from_pretrained(UpperCAmelCase__ ) self.assertIsNotNone(UpperCAmelCase__ ) def lowerCAmelCase_ ( ) -> List[str]: """simple docstring""" _SCREAMING_SNAKE_CASE = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" ) return image @require_torch class _a (unittest.TestCase): """simple docstring""" @cached_property def UpperCamelCase ( self ) -> Optional[int]: return LayoutLMvaImageProcessor(apply_ocr=UpperCAmelCase__ ) if is_vision_available() else None @slow def UpperCamelCase ( self ) -> str: _SCREAMING_SNAKE_CASE = LayoutLMvaModel.from_pretrained("""microsoft/layoutlmv3-base""" ).to(UpperCAmelCase__ ) _SCREAMING_SNAKE_CASE = self.default_image_processor _SCREAMING_SNAKE_CASE = prepare_img() _SCREAMING_SNAKE_CASE = image_processor(images=UpperCAmelCase__ , return_tensors="""pt""" ).pixel_values.to(UpperCAmelCase__ ) _SCREAMING_SNAKE_CASE = torch.tensor([[1, 2]] ) _SCREAMING_SNAKE_CASE = torch.tensor([[1, 2, 3, 4], [5, 6, 7, 8]] ).unsqueeze(0 ) # forward pass _SCREAMING_SNAKE_CASE = model( input_ids=input_ids.to(UpperCAmelCase__ ) , bbox=bbox.to(UpperCAmelCase__ ) , pixel_values=pixel_values.to(UpperCAmelCase__ ) , ) # verify the logits _SCREAMING_SNAKE_CASE = torch.Size((1, 1_99, 7_68) ) self.assertEqual(outputs.last_hidden_state.shape , UpperCAmelCase__ ) _SCREAMING_SNAKE_CASE = torch.tensor( [[-0.0529, 0.3618, 0.1632], [-0.1587, -0.1667, -0.0400], [-0.1557, -0.1671, -0.0505]] ).to(UpperCAmelCase__ ) self.assertTrue(torch.allclose(outputs.last_hidden_state[0, :3, :3] , UpperCAmelCase__ , atol=1E-4 ) )
721
'''simple docstring''' import math import os import re import sys import unittest from pathlib import Path from typing import Tuple from unittest.mock import patch from parameterized import parameterized from transformers.testing_utils import ( CaptureStderr, ExtendSysPath, TestCasePlus, execute_subprocess_async, get_gpu_count, get_torch_dist_unique_port, require_apex, require_bitsandbytes, require_fairscale, require_torch, require_torch_gpu, require_torch_multi_gpu, require_torch_non_multi_gpu, slow, ) from transformers.trainer_callback import TrainerState from transformers.trainer_utils import set_seed UpperCamelCase__ : Tuple = os.path.abspath(os.path.dirname(__file__)) with ExtendSysPath(f"""{bindir}/../../examples/pytorch/translation"""): from run_translation import main # noqa set_seed(42) UpperCamelCase__ : Union[str, Any] = "sshleifer/student_marian_en_ro_6_1" UpperCamelCase__ : str = "sshleifer/tiny-mbart" @require_torch class _a (_lowerCamelCase): """simple docstring""" def UpperCamelCase ( self , A__=False , A__=None , A__=True , A__=True , A__=True , A__=True , ) -> Optional[Any]: _SCREAMING_SNAKE_CASE = self.run_trainer( eval_steps=1 , max_len=12 , model_name=A__ , num_train_epochs=1 , distributed=A__ , extra_args_str=A__ , predict_with_generate=A__ , do_train=A__ , do_eval=A__ , do_predict=A__ , ) _SCREAMING_SNAKE_CASE = TrainerState.load_from_json(os.path.join(A__ , """trainer_state.json""" ) ).log_history if not do_eval: return _SCREAMING_SNAKE_CASE = [log for log in logs if """eval_loss""" in log.keys()] _SCREAMING_SNAKE_CASE = eval_metrics[0] if predict_with_generate: assert "eval_bleu" in first_step_stats _SCREAMING_SNAKE_CASE = eval_metrics[-1] assert isinstance(last_step_stats["""eval_bleu"""] , A__ ) assert not math.isnan(float(last_step_stats["""eval_loss"""] ) ), "eval_loss must not be `nan`" @require_torch_non_multi_gpu def UpperCamelCase ( self ) -> Optional[int]: self.run_seqaseq_quick() @require_torch_multi_gpu def UpperCamelCase ( self ) -> Optional[Any]: self.run_seqaseq_quick(distributed=A__ ) @require_torch_multi_gpu def UpperCamelCase ( self ) -> Union[str, Any]: self.run_seqaseq_quick(distributed=A__ ) @unittest.skip("""Requires an update of the env running those tests""" ) @require_torch_multi_gpu @require_fairscale def UpperCamelCase ( self ) -> Any: self.run_seqaseq_quick(distributed=A__ , extra_args_str="""--sharded_ddp simple""" ) @unittest.skip("""Requires an update of the env running those tests""" ) @require_torch_multi_gpu @require_fairscale def UpperCamelCase ( self ) -> Tuple: self.run_seqaseq_quick(distributed=A__ , extra_args_str="""--sharded_ddp simple --fp16""" ) @unittest.skip("""Requires an update of the env running those tests""" ) @require_torch_multi_gpu @require_fairscale def UpperCamelCase ( self ) -> str: self.run_seqaseq_quick(distributed=A__ , extra_args_str="""--sharded_ddp zero_dp_2""" , predict_with_generate=A__ ) @unittest.skip("""Requires an update of the env running those tests""" ) @require_torch_multi_gpu @require_fairscale def UpperCamelCase ( self ) -> List[str]: self.run_seqaseq_quick( distributed=A__ , extra_args_str="""--sharded_ddp zero_dp_2 --fp16""" , predict_with_generate=A__ ) @require_apex @require_torch_gpu def UpperCamelCase ( self ) -> Optional[Any]: # XXX: apex breaks the trainer if it's run twice e.g. run_seq2seq.main() from the same # program and it breaks other tests that run from the same pytest worker, therefore until this is # sorted out it must be run only in an external program, that is distributed=True in this # test and only under one or more gpus - if we want cpu will need to make a special test # # specifically to the problem traced it to self.optimizer.step() - if it's run 2nd time via # 2nd main() call it botches the future eval. # self.run_seqaseq_quick(distributed=A__ , extra_args_str="""--fp16 --fp16_backend=apex""" ) # test 2nd time - was getting eval_loss': nan' # to reproduce the problem set distributed=False self.run_seqaseq_quick(distributed=A__ , extra_args_str="""--fp16 --fp16_backend=apex""" ) @parameterized.expand(["""base""", """low""", """high""", """mixed"""] ) @require_torch_multi_gpu def UpperCamelCase ( self , A__ ) -> List[Any]: # as each sub-test is slow-ish split into multiple sub-tests to avoid CI timeout _SCREAMING_SNAKE_CASE = { # test with the default log_level - should be info and thus log info once """base""": {"""extra_args_str""": """""", """n_matches""": 1}, # test with low log_level and log_level_replica - should be noisy on all processes # now the info string should appear twice on 2 processes """low""": {"""extra_args_str""": """--log_level debug --log_level_replica debug""", """n_matches""": 2}, # test with high log_level and low log_level_replica # now the info string should appear once only on the replica """high""": {"""extra_args_str""": """--log_level error --log_level_replica debug""", """n_matches""": 1}, # test with high log_level and log_level_replica - should be quiet on all processes """mixed""": {"""extra_args_str""": """--log_level error --log_level_replica error""", """n_matches""": 0}, } _SCREAMING_SNAKE_CASE = experiments[experiment_id] _SCREAMING_SNAKE_CASE = {"""distributed""": True, """predict_with_generate""": False, """do_eval""": False, """do_predict""": False} _SCREAMING_SNAKE_CASE = """Running training""" with CaptureStderr() as cl: self.run_seqaseq_quick(**A__ , extra_args_str=data["""extra_args_str"""] ) _SCREAMING_SNAKE_CASE = len(re.findall(A__ , cl.err ) ) self.assertEqual(A__ , data["""n_matches"""] ) @slow def UpperCamelCase ( self ) -> Any: _SCREAMING_SNAKE_CASE = self.run_trainer( eval_steps=2 , max_len=1_28 , model_name=A__ , learning_rate=3E-4 , num_train_epochs=10 , distributed=A__ , ) # Check metrics _SCREAMING_SNAKE_CASE = TrainerState.load_from_json(os.path.join(A__ , """trainer_state.json""" ) ).log_history _SCREAMING_SNAKE_CASE = [log for log in logs if """eval_loss""" in log.keys()] _SCREAMING_SNAKE_CASE = eval_metrics[0] _SCREAMING_SNAKE_CASE = eval_metrics[-1] assert first_step_stats["eval_loss"] > last_step_stats["eval_loss"], "model learned nothing" assert isinstance(last_step_stats["""eval_bleu"""] , A__ ) # test if do_predict saves generations and metrics _SCREAMING_SNAKE_CASE = os.listdir(A__ ) _SCREAMING_SNAKE_CASE = {os.path.basename(A__ ) for p in contents} assert "generated_predictions.txt" in contents assert "predict_results.json" in contents @slow @require_bitsandbytes def UpperCamelCase ( self ) -> Dict: from transformers.training_args import OptimizerNames def train_and_return_metrics(A__ ) -> Tuple[int, float]: _SCREAMING_SNAKE_CASE = """--skip_memory_metrics 0""" _SCREAMING_SNAKE_CASE = self.run_trainer( max_len=1_28 , model_name=A__ , learning_rate=3E-4 , num_train_epochs=1 , optim=A__ , distributed=A__ , extra_args_str=A__ , do_eval=A__ , do_predict=A__ , n_gpus_to_use=1 , ) # Check metrics _SCREAMING_SNAKE_CASE = TrainerState.load_from_json(Path(A__ , """trainer_state.json""" ) ).log_history _SCREAMING_SNAKE_CASE = int(logs[0]["""train_mem_gpu_peaked_delta"""] / 2**20 ) _SCREAMING_SNAKE_CASE = int(logs[0]["""train_mem_gpu_alloc_delta"""] / 2**20 ) _SCREAMING_SNAKE_CASE = logs[0]["""train_loss"""] return gpu_peak_mem_mb, gpu_alloc_mem_mb, loss _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = train_and_return_metrics(OptimizerNames.ADAMW_TORCH.value ) _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = train_and_return_metrics(OptimizerNames.ADAMW_BNB.value ) _SCREAMING_SNAKE_CASE = gpu_alloc_mem_orig - gpu_alloc_mem_bnb _SCREAMING_SNAKE_CASE = gpu_peak_mem_orig + gpu_alloc_mem_orig _SCREAMING_SNAKE_CASE = gpu_peak_mem_bnb + gpu_alloc_mem_bnb _SCREAMING_SNAKE_CASE = gpu_total_mem_orig - gpu_total_mem_bnb # sshleifer/student_marian_en_ro_6_1 has 54M parameter, 29M of which is `nn.Embedding` which # doesn't get quantized and remains in fp32. Therefore we only have 25M parameters quantized # in 2 bytes and the diff in optim memory usage is derived as so: # # - normal 25*8=~200MB (8 bytes per param) # - bnb 25*2= ~50MB (2 bytes per param) # # Thus we should expect ~150MB total memory saved. # # Peak memory should be the same - the total should be different by about that same margin # # After leaving a small margin to accommodate for differences between gpus let's check # that we have at least 120MB in savings _SCREAMING_SNAKE_CASE = 1_20 # uncomment the following if this test starts failing - requires py38 for a new print feature # gpu_peak_mem_diff = gpu_peak_mem_orig - gpu_peak_mem_bnb # print(f"{gpu_alloc_mem_orig=}MB {gpu_peak_mem_orig=}MB {gpu_alloc_mem_orig+gpu_peak_mem_orig=}MB") # print(f" {gpu_alloc_mem_bnb=}MB {gpu_peak_mem_bnb=}MB {gpu_alloc_mem_bnb+gpu_peak_mem_bnb=}MB") # print(f"{gpu_alloc_mem_diff=}MB") # print(f"{gpu_peak_mem_diff=}MB") # print(f"{gpu_total_mem_orig=}MB, {gpu_total_mem_bnb=}MB") # print(f"{gpu_total_mem_diff=}MB, {gpu_total_mem_diff=}MB") self.assertGreater( A__ , A__ , """should use ~150MB less alloc gpu memory with BNB, compared to without it for this model but got""" F" a difference of {gpu_alloc_mem_diff}MB, with gpu_alloc_mem_orig={gpu_alloc_mem_orig}MB and" F" gpu_alloc_mem_bnb={gpu_alloc_mem_bnb}MB" , ) self.assertGreater( A__ , A__ , """should use ~150MB less total gpu memory with BNB, compared to without it for this model but got""" F" a difference of {gpu_total_mem_diff}MB, with gpu_total_mem_orig={gpu_total_mem_orig}MB and" F" gpu_total_mem_bnb={gpu_total_mem_bnb}MB" , ) self.assertEqual( A__ , A__ , F"loss should be the same, but got loss_orig={loss_orig}, loss_bnb={loss_bnb}" ) def UpperCamelCase ( self , A__ , A__ , A__ , A__ = 3E-3 , A__ = "adafactor" , A__ = False , A__ = None , A__ = 0 , A__ = True , A__ = True , A__ = True , A__ = True , A__ = None , ) -> Dict: _SCREAMING_SNAKE_CASE = self.test_file_dir / """../fixtures/tests_samples/wmt_en_ro""" _SCREAMING_SNAKE_CASE = self.get_auto_remove_tmp_dir() _SCREAMING_SNAKE_CASE = F"\n --model_name_or_path {model_name}\n --train_file {data_dir}/train.json\n --validation_file {data_dir}/val.json\n --test_file {data_dir}/test.json\n --output_dir {output_dir}\n --overwrite_output_dir\n --max_train_samples 8\n --max_source_length {max_len}\n --max_target_length {max_len}\n --do_train\n --num_train_epochs {str(A__ )}\n --per_device_train_batch_size 4\n --learning_rate {learning_rate}\n --warmup_steps 8\n --logging_steps 0\n --logging_strategy no\n --save_steps {str(A__ )}\n --group_by_length\n --label_smoothing_factor 0.1\n --target_lang ro_RO\n --source_lang en_XX\n ".split() _SCREAMING_SNAKE_CASE = F"\n --do_eval\n --per_device_eval_batch_size 4\n --max_eval_samples 8\n --val_max_target_length {max_len}\n --evaluation_strategy steps\n --eval_steps {str(A__ )}\n ".split() _SCREAMING_SNAKE_CASE = """ --do_predict """.split() _SCREAMING_SNAKE_CASE = [] if do_train: args += args_train if do_eval: args += args_eval if do_predict: args += args_predict if predict_with_generate: args += "--predict_with_generate".split() if do_train: if optim == "adafactor": args += "--adafactor".split() else: args += F"--optim {optim}".split() if extra_args_str is not None: args += extra_args_str.split() if distributed: if n_gpus_to_use is None: _SCREAMING_SNAKE_CASE = get_gpu_count() _SCREAMING_SNAKE_CASE = get_torch_dist_unique_port() _SCREAMING_SNAKE_CASE = F"\n -m torch.distributed.run\n --nproc_per_node={n_gpus_to_use}\n --master_port={master_port}\n {self.examples_dir_str}/pytorch/translation/run_translation.py\n ".split() _SCREAMING_SNAKE_CASE = [sys.executable] + distributed_args + args # keep for quick debug # print(" ".join([f"\nPYTHONPATH={self.src_dir_str}"] +cmd)); die execute_subprocess_async(A__ , env=self.get_env() ) else: _SCREAMING_SNAKE_CASE = ["""run_translation.py"""] + args with patch.object(A__ , """argv""" , A__ ): main() return output_dir
0
0
'''simple docstring''' import random import unittest import numpy as np import transformers from transformers import is_flax_available, is_torch_available from transformers.testing_utils import is_pt_flax_cross_test, require_flax if is_flax_available(): import os import jax.numpy as jnp from jax import jit from transformers import AutoTokenizer, FlaxAutoModelForCausalLM from transformers.modeling_flax_pytorch_utils import load_flax_weights_in_pytorch_model UpperCamelCase__ : Tuple = "0.12" # assumed parallelism: 8 if is_torch_available(): import torch def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=None ) -> Union[str, Any]: """simple docstring""" if rng is None: _SCREAMING_SNAKE_CASE = random.Random() _SCREAMING_SNAKE_CASE = 1 for dim in shape: total_dims *= dim _SCREAMING_SNAKE_CASE = [] for _ in range(_lowerCAmelCase ): values.append(rng.randint(0 , vocab_size - 1 ) ) _SCREAMING_SNAKE_CASE = np.array(_lowerCAmelCase , dtype=jnp.intaa ).reshape(_lowerCAmelCase ) return output def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=None ) -> int: """simple docstring""" _SCREAMING_SNAKE_CASE = ids_tensor(_lowerCAmelCase , vocab_size=2 , rng=_lowerCAmelCase ) # make sure that at least one token is attended to for each batch _SCREAMING_SNAKE_CASE = 1 return attn_mask @require_flax class _a : SCREAMING_SNAKE_CASE = None SCREAMING_SNAKE_CASE = () def UpperCamelCase ( self ) -> List[str]: _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common() # cut to half length & take max batch_size 3 _SCREAMING_SNAKE_CASE = 2 _SCREAMING_SNAKE_CASE = inputs["""input_ids"""].shape[-1] // 2 _SCREAMING_SNAKE_CASE = inputs["""input_ids"""][:max_batch_size, :sequence_length] _SCREAMING_SNAKE_CASE = jnp.ones_like(_lowerCamelCase ) _SCREAMING_SNAKE_CASE = attention_mask[:max_batch_size, :sequence_length] # generate max 5 tokens _SCREAMING_SNAKE_CASE = input_ids.shape[-1] + 5 if config.eos_token_id is not None and config.pad_token_id is None: # hack to allow generate for models such as GPT2 as is done in `generate()` _SCREAMING_SNAKE_CASE = config.eos_token_id return config, input_ids, attention_mask, max_length @is_pt_flax_cross_test def UpperCamelCase ( self ) -> Any: _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = self._get_input_ids_and_config() _SCREAMING_SNAKE_CASE = False _SCREAMING_SNAKE_CASE = max_length _SCREAMING_SNAKE_CASE = 0 for model_class in self.all_generative_model_classes: _SCREAMING_SNAKE_CASE = model_class(_lowerCamelCase ) _SCREAMING_SNAKE_CASE = model_class.__name__[4:] # Skip the "Flax" at the beginning _SCREAMING_SNAKE_CASE = getattr(_lowerCamelCase , _lowerCamelCase ) _SCREAMING_SNAKE_CASE = pt_model_class(_lowerCamelCase ).eval() _SCREAMING_SNAKE_CASE = load_flax_weights_in_pytorch_model(_lowerCamelCase , flax_model.params ) _SCREAMING_SNAKE_CASE = flax_model.generate(_lowerCamelCase ).sequences _SCREAMING_SNAKE_CASE = pt_model.generate(torch.tensor(_lowerCamelCase , dtype=torch.long ) ) if flax_generation_outputs.shape[-1] > pt_generation_outputs.shape[-1]: _SCREAMING_SNAKE_CASE = flax_generation_outputs[:, : pt_generation_outputs.shape[-1]] self.assertListEqual(pt_generation_outputs.numpy().tolist() , flax_generation_outputs.tolist() ) def UpperCamelCase ( self ) -> Tuple: _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = self._get_input_ids_and_config() _SCREAMING_SNAKE_CASE = False _SCREAMING_SNAKE_CASE = max_length for model_class in self.all_generative_model_classes: _SCREAMING_SNAKE_CASE = model_class(_lowerCamelCase ) _SCREAMING_SNAKE_CASE = model.generate(_lowerCamelCase ).sequences self.assertEqual(generation_outputs.shape[-1] , _lowerCamelCase ) _SCREAMING_SNAKE_CASE = jit(model.generate ) _SCREAMING_SNAKE_CASE = jit_generate(_lowerCamelCase ).sequences self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() ) def UpperCamelCase ( self ) -> Union[str, Any]: _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = self._get_input_ids_and_config() _SCREAMING_SNAKE_CASE = True _SCREAMING_SNAKE_CASE = max_length for model_class in self.all_generative_model_classes: _SCREAMING_SNAKE_CASE = model_class(_lowerCamelCase ) _SCREAMING_SNAKE_CASE = model.generate(_lowerCamelCase ).sequences self.assertEqual(generation_outputs.shape[-1] , _lowerCamelCase ) _SCREAMING_SNAKE_CASE = jit(model.generate ) _SCREAMING_SNAKE_CASE = jit_generate(_lowerCamelCase ).sequences self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() ) def UpperCamelCase ( self ) -> int: _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = self._get_input_ids_and_config() _SCREAMING_SNAKE_CASE = False _SCREAMING_SNAKE_CASE = max_length _SCREAMING_SNAKE_CASE = 2 for model_class in self.all_generative_model_classes: _SCREAMING_SNAKE_CASE = model_class(_lowerCamelCase ) _SCREAMING_SNAKE_CASE = model.generate(_lowerCamelCase ).sequences self.assertEqual(generation_outputs.shape[-1] , _lowerCamelCase ) _SCREAMING_SNAKE_CASE = jit(model.generate ) _SCREAMING_SNAKE_CASE = jit_generate(_lowerCamelCase ).sequences self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() ) def UpperCamelCase ( self ) -> Union[str, Any]: _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = self._get_input_ids_and_config() _SCREAMING_SNAKE_CASE = False _SCREAMING_SNAKE_CASE = max_length _SCREAMING_SNAKE_CASE = 2 _SCREAMING_SNAKE_CASE = 2 for model_class in self.all_generative_model_classes: _SCREAMING_SNAKE_CASE = model_class(_lowerCamelCase ) _SCREAMING_SNAKE_CASE = model.generate(_lowerCamelCase ).sequences self.assertEqual(generation_outputs.shape[0] , input_ids.shape[0] * config.num_return_sequences ) def UpperCamelCase ( self ) -> Tuple: _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = self._get_input_ids_and_config() _SCREAMING_SNAKE_CASE = True _SCREAMING_SNAKE_CASE = max_length _SCREAMING_SNAKE_CASE = 0.8 _SCREAMING_SNAKE_CASE = 10 _SCREAMING_SNAKE_CASE = 0.3 _SCREAMING_SNAKE_CASE = 1 _SCREAMING_SNAKE_CASE = 8 _SCREAMING_SNAKE_CASE = 9 for model_class in self.all_generative_model_classes: _SCREAMING_SNAKE_CASE = model_class(_lowerCamelCase ) _SCREAMING_SNAKE_CASE = model.generate(_lowerCamelCase ).sequences self.assertEqual(generation_outputs.shape[-1] , _lowerCamelCase ) _SCREAMING_SNAKE_CASE = jit(model.generate ) _SCREAMING_SNAKE_CASE = jit_generate(_lowerCamelCase ).sequences self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() ) def UpperCamelCase ( self ) -> List[Any]: _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = self._get_input_ids_and_config() _SCREAMING_SNAKE_CASE = max_length _SCREAMING_SNAKE_CASE = 1 _SCREAMING_SNAKE_CASE = 8 _SCREAMING_SNAKE_CASE = 9 for model_class in self.all_generative_model_classes: _SCREAMING_SNAKE_CASE = model_class(_lowerCamelCase ) _SCREAMING_SNAKE_CASE = model.generate(_lowerCamelCase ).sequences self.assertEqual(generation_outputs.shape[-1] , _lowerCamelCase ) _SCREAMING_SNAKE_CASE = jit(model.generate ) _SCREAMING_SNAKE_CASE = jit_generate(_lowerCamelCase ).sequences self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() ) def UpperCamelCase ( self ) -> Tuple: _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = self._get_input_ids_and_config() _SCREAMING_SNAKE_CASE = max_length _SCREAMING_SNAKE_CASE = 2 _SCREAMING_SNAKE_CASE = 1 _SCREAMING_SNAKE_CASE = 8 _SCREAMING_SNAKE_CASE = 9 for model_class in self.all_generative_model_classes: _SCREAMING_SNAKE_CASE = model_class(_lowerCamelCase ) _SCREAMING_SNAKE_CASE = model.generate(_lowerCamelCase ).sequences self.assertEqual(generation_outputs.shape[-1] , _lowerCamelCase ) _SCREAMING_SNAKE_CASE = jit(model.generate ) _SCREAMING_SNAKE_CASE = jit_generate(_lowerCamelCase ).sequences self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() ) def UpperCamelCase ( self ) -> str: _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = self._get_input_ids_and_config() # pad attention mask on the left _SCREAMING_SNAKE_CASE = attention_mask.at[(0, 0)].set(0 ) _SCREAMING_SNAKE_CASE = False _SCREAMING_SNAKE_CASE = max_length for model_class in self.all_generative_model_classes: _SCREAMING_SNAKE_CASE = model_class(_lowerCamelCase ) _SCREAMING_SNAKE_CASE = model.generate(_lowerCamelCase , attention_mask=_lowerCamelCase ).sequences self.assertEqual(generation_outputs.shape[-1] , _lowerCamelCase ) _SCREAMING_SNAKE_CASE = jit(model.generate ) _SCREAMING_SNAKE_CASE = jit_generate(_lowerCamelCase , attention_mask=_lowerCamelCase ).sequences self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() ) def UpperCamelCase ( self ) -> str: _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = self._get_input_ids_and_config() # pad attention mask on the left _SCREAMING_SNAKE_CASE = attention_mask.at[(0, 0)].set(0 ) _SCREAMING_SNAKE_CASE = True _SCREAMING_SNAKE_CASE = max_length for model_class in self.all_generative_model_classes: _SCREAMING_SNAKE_CASE = model_class(_lowerCamelCase ) _SCREAMING_SNAKE_CASE = model.generate(_lowerCamelCase , attention_mask=_lowerCamelCase ).sequences self.assertEqual(generation_outputs.shape[-1] , _lowerCamelCase ) _SCREAMING_SNAKE_CASE = jit(model.generate ) _SCREAMING_SNAKE_CASE = jit_generate(_lowerCamelCase , attention_mask=_lowerCamelCase ).sequences self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() ) def UpperCamelCase ( self ) -> Tuple: _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = self._get_input_ids_and_config() # pad attention mask on the left _SCREAMING_SNAKE_CASE = attention_mask.at[(0, 0)].set(0 ) _SCREAMING_SNAKE_CASE = 2 _SCREAMING_SNAKE_CASE = max_length for model_class in self.all_generative_model_classes: _SCREAMING_SNAKE_CASE = model_class(_lowerCamelCase ) _SCREAMING_SNAKE_CASE = model.generate(_lowerCamelCase , attention_mask=_lowerCamelCase ).sequences self.assertEqual(generation_outputs.shape[-1] , _lowerCamelCase ) _SCREAMING_SNAKE_CASE = jit(model.generate ) _SCREAMING_SNAKE_CASE = jit_generate(_lowerCamelCase , attention_mask=_lowerCamelCase ).sequences self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() ) @require_flax class _a (unittest.TestCase): def UpperCamelCase ( self ) -> List[str]: _SCREAMING_SNAKE_CASE = AutoTokenizer.from_pretrained("""hf-internal-testing/tiny-bert""" ) _SCREAMING_SNAKE_CASE = FlaxAutoModelForCausalLM.from_pretrained("""hf-internal-testing/tiny-bert-flax-only""" ) _SCREAMING_SNAKE_CASE = """Hello world""" _SCREAMING_SNAKE_CASE = tokenizer(_lowerCamelCase , return_tensors="""np""" ).input_ids # typos are quickly detected (the correct argument is `do_sample`) with self.assertRaisesRegex(_lowerCamelCase , """do_samples""" ): model.generate(_lowerCamelCase , do_samples=_lowerCamelCase ) # arbitrary arguments that will not be used anywhere are also not accepted with self.assertRaisesRegex(_lowerCamelCase , """foo""" ): _SCREAMING_SNAKE_CASE = {"""foo""": """bar"""} model.generate(_lowerCamelCase , **_lowerCamelCase )
700
'''simple docstring''' import sys UpperCamelCase__ : int = ( "73167176531330624919225119674426574742355349194934" "96983520312774506326239578318016984801869478851843" "85861560789112949495459501737958331952853208805511" "12540698747158523863050715693290963295227443043557" "66896648950445244523161731856403098711121722383113" "62229893423380308135336276614282806444486645238749" "30358907296290491560440772390713810515859307960866" "70172427121883998797908792274921901699720888093776" "65727333001053367881220235421809751254540594752243" "52584907711670556013604839586446706324415722155397" "53697817977846174064955149290862569321978468622482" "83972241375657056057490261407972968652414535100474" "82166370484403199890008895243450658541227588666881" "16427171479924442928230863465674813919123162824586" "17866458359124566529476545682848912883142607690042" "24219022671055626321111109370544217506941658960408" "07198403850962455444362981230987879927244284909188" "84580156166097919133875499200524063689912560717606" "05886116467109405077541002256983155200055935729725" "71636269561882670428252483600823257530420752963450" ) def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE_ = N ) -> int: """simple docstring""" _SCREAMING_SNAKE_CASE = -sys.maxsize - 1 for i in range(len(SCREAMING_SNAKE_CASE_ ) - 12 ): _SCREAMING_SNAKE_CASE = 1 for j in range(13 ): product *= int(n[i + j] ) if product > largest_product: _SCREAMING_SNAKE_CASE = product return largest_product if __name__ == "__main__": print(f"""{solution() = }""")
0
0
'''simple docstring''' from __future__ import annotations import unittest from transformers import RoFormerConfig, is_tf_available from transformers.testing_utils import require_tf, slow from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import ( TFRoFormerForCausalLM, TFRoFormerForMaskedLM, TFRoFormerForMultipleChoice, TFRoFormerForQuestionAnswering, TFRoFormerForSequenceClassification, TFRoFormerForTokenClassification, TFRoFormerModel, ) from transformers.models.roformer.modeling_tf_roformer import ( TFRoFormerSelfAttention, TFRoFormerSinusoidalPositionalEmbedding, ) class _a : """simple docstring""" def __init__( self , A__ , A__=13 , A__=7 , A__=True , A__=True , A__=True , A__=True , A__=99 , A__=32 , A__=2 , A__=4 , A__=37 , A__="gelu" , A__=0.1 , A__=0.1 , A__=5_12 , A__=16 , A__=2 , A__=0.02 , A__=3 , A__=4 , A__=None , ) -> Optional[Any]: _SCREAMING_SNAKE_CASE = parent _SCREAMING_SNAKE_CASE = 13 _SCREAMING_SNAKE_CASE = 7 _SCREAMING_SNAKE_CASE = True _SCREAMING_SNAKE_CASE = True _SCREAMING_SNAKE_CASE = True _SCREAMING_SNAKE_CASE = True _SCREAMING_SNAKE_CASE = 99 _SCREAMING_SNAKE_CASE = 32 _SCREAMING_SNAKE_CASE = 2 _SCREAMING_SNAKE_CASE = 4 _SCREAMING_SNAKE_CASE = 37 _SCREAMING_SNAKE_CASE = '''gelu''' _SCREAMING_SNAKE_CASE = 0.1 _SCREAMING_SNAKE_CASE = 0.1 _SCREAMING_SNAKE_CASE = 5_12 _SCREAMING_SNAKE_CASE = 16 _SCREAMING_SNAKE_CASE = 2 _SCREAMING_SNAKE_CASE = 0.02 _SCREAMING_SNAKE_CASE = 3 _SCREAMING_SNAKE_CASE = 4 _SCREAMING_SNAKE_CASE = None def UpperCamelCase ( self ) -> Union[str, Any]: _SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) _SCREAMING_SNAKE_CASE = None if self.use_input_mask: _SCREAMING_SNAKE_CASE = random_attention_mask([self.batch_size, self.seq_length] ) _SCREAMING_SNAKE_CASE = None if self.use_token_type_ids: _SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) _SCREAMING_SNAKE_CASE = None _SCREAMING_SNAKE_CASE = None _SCREAMING_SNAKE_CASE = None if self.use_labels: _SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size] , self.type_sequence_label_size ) _SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) _SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size] , self.num_choices ) _SCREAMING_SNAKE_CASE = RoFormerConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , return_dict=__lowerCamelCase , ) return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def UpperCamelCase ( self , A__ , A__ , A__ , A__ , A__ , A__ , A__ ) -> Optional[int]: _SCREAMING_SNAKE_CASE = TFRoFormerModel(config=__lowerCamelCase ) _SCREAMING_SNAKE_CASE = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids} _SCREAMING_SNAKE_CASE = [input_ids, input_mask] _SCREAMING_SNAKE_CASE = model(__lowerCamelCase ) _SCREAMING_SNAKE_CASE = model(__lowerCamelCase ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def UpperCamelCase ( self , A__ , A__ , A__ , A__ , A__ , A__ , A__ ) -> int: _SCREAMING_SNAKE_CASE = True _SCREAMING_SNAKE_CASE = TFRoFormerForCausalLM(config=__lowerCamelCase ) _SCREAMING_SNAKE_CASE = { '''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids, } _SCREAMING_SNAKE_CASE = model(__lowerCamelCase )['''logits'''] self.parent.assertListEqual( list(prediction_scores.numpy().shape ) , [self.batch_size, self.seq_length, self.vocab_size] ) def UpperCamelCase ( self , A__ , A__ , A__ , A__ , A__ , A__ , A__ ) -> Any: _SCREAMING_SNAKE_CASE = TFRoFormerForMaskedLM(config=__lowerCamelCase ) _SCREAMING_SNAKE_CASE = { '''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids, } _SCREAMING_SNAKE_CASE = model(__lowerCamelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def UpperCamelCase ( self , A__ , A__ , A__ , A__ , A__ , A__ , A__ ) -> List[Any]: _SCREAMING_SNAKE_CASE = self.num_labels _SCREAMING_SNAKE_CASE = TFRoFormerForSequenceClassification(config=__lowerCamelCase ) _SCREAMING_SNAKE_CASE = { '''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids, } _SCREAMING_SNAKE_CASE = model(__lowerCamelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def UpperCamelCase ( self , A__ , A__ , A__ , A__ , A__ , A__ , A__ ) -> List[Any]: _SCREAMING_SNAKE_CASE = self.num_choices _SCREAMING_SNAKE_CASE = TFRoFormerForMultipleChoice(config=__lowerCamelCase ) _SCREAMING_SNAKE_CASE = tf.tile(tf.expand_dims(__lowerCamelCase , 1 ) , (1, self.num_choices, 1) ) _SCREAMING_SNAKE_CASE = tf.tile(tf.expand_dims(__lowerCamelCase , 1 ) , (1, self.num_choices, 1) ) _SCREAMING_SNAKE_CASE = tf.tile(tf.expand_dims(__lowerCamelCase , 1 ) , (1, self.num_choices, 1) ) _SCREAMING_SNAKE_CASE = { '''input_ids''': multiple_choice_inputs_ids, '''attention_mask''': multiple_choice_input_mask, '''token_type_ids''': multiple_choice_token_type_ids, } _SCREAMING_SNAKE_CASE = model(__lowerCamelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) ) def UpperCamelCase ( self , A__ , A__ , A__ , A__ , A__ , A__ , A__ ) -> Union[str, Any]: _SCREAMING_SNAKE_CASE = self.num_labels _SCREAMING_SNAKE_CASE = TFRoFormerForTokenClassification(config=__lowerCamelCase ) _SCREAMING_SNAKE_CASE = { '''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids, } _SCREAMING_SNAKE_CASE = model(__lowerCamelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def UpperCamelCase ( self , A__ , A__ , A__ , A__ , A__ , A__ , A__ ) -> Dict: _SCREAMING_SNAKE_CASE = TFRoFormerForQuestionAnswering(config=__lowerCamelCase ) _SCREAMING_SNAKE_CASE = { '''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids, } _SCREAMING_SNAKE_CASE = model(__lowerCamelCase ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def UpperCamelCase ( self ) -> Union[str, Any]: _SCREAMING_SNAKE_CASE = self.prepare_config_and_inputs() ( _SCREAMING_SNAKE_CASE ) = config_and_inputs _SCREAMING_SNAKE_CASE = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask} return config, inputs_dict @require_tf class _a (lowerCamelCase__ , lowerCamelCase__ , unittest.TestCase): """simple docstring""" SCREAMING_SNAKE_CASE = ( ( TFRoFormerModel, TFRoFormerForCausalLM, TFRoFormerForMaskedLM, TFRoFormerForQuestionAnswering, TFRoFormerForSequenceClassification, TFRoFormerForTokenClassification, TFRoFormerForMultipleChoice, ) if is_tf_available() else () ) SCREAMING_SNAKE_CASE = ( { 'feature-extraction': TFRoFormerModel, 'fill-mask': TFRoFormerForMaskedLM, 'question-answering': TFRoFormerForQuestionAnswering, 'text-classification': TFRoFormerForSequenceClassification, 'text-generation': TFRoFormerForCausalLM, 'token-classification': TFRoFormerForTokenClassification, 'zero-shot': TFRoFormerForSequenceClassification, } if is_tf_available() else {} ) SCREAMING_SNAKE_CASE = False SCREAMING_SNAKE_CASE = False def UpperCamelCase ( self , A__ , A__ , A__ , A__ , A__ ) -> str: if pipeline_test_casse_name == "TextGenerationPipelineTests": return True return False def UpperCamelCase ( self ) -> Any: _SCREAMING_SNAKE_CASE = TFRoFormerModelTester(self ) _SCREAMING_SNAKE_CASE = ConfigTester(self , config_class=__lowerCamelCase , hidden_size=37 ) def UpperCamelCase ( self ) -> Optional[Any]: self.config_tester.run_common_tests() def UpperCamelCase ( self ) -> int: _SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*__lowerCamelCase ) def UpperCamelCase ( self ) -> str: _SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_lm(*__lowerCamelCase ) def UpperCamelCase ( self ) -> Dict: _SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_lm_head(*__lowerCamelCase ) def UpperCamelCase ( self ) -> Optional[int]: _SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_multiple_choice(*__lowerCamelCase ) def UpperCamelCase ( self ) -> str: _SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_question_answering(*__lowerCamelCase ) def UpperCamelCase ( self ) -> List[str]: _SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_sequence_classification(*__lowerCamelCase ) def UpperCamelCase ( self ) -> int: _SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_token_classification(*__lowerCamelCase ) @slow def UpperCamelCase ( self ) -> str: _SCREAMING_SNAKE_CASE = TFRoFormerModel.from_pretrained("""junnyu/roformer_chinese_base""" ) self.assertIsNotNone(__lowerCamelCase ) @require_tf class _a (unittest.TestCase): """simple docstring""" @slow def UpperCamelCase ( self ) -> Tuple: _SCREAMING_SNAKE_CASE = TFRoFormerForMaskedLM.from_pretrained("""junnyu/roformer_chinese_base""" ) _SCREAMING_SNAKE_CASE = tf.constant([[0, 1, 2, 3, 4, 5]] ) _SCREAMING_SNAKE_CASE = model(__lowerCamelCase )[0] # TODO Replace vocab size _SCREAMING_SNAKE_CASE = 5_00_00 _SCREAMING_SNAKE_CASE = [1, 6, vocab_size] self.assertEqual(output.shape , __lowerCamelCase ) print(output[:, :3, :3] ) # TODO Replace values below with what was printed above. _SCREAMING_SNAKE_CASE = tf.constant( [ [ [-0.1205_3341, -1.026_4901, 0.2922_1946], [-1.513_3783, 0.19_7433, 0.1519_0607], [-5.013_5403, -3.90_0256, -0.8403_8764], ] ] ) tf.debugging.assert_near(output[:, :3, :3] , __lowerCamelCase , atol=1E-4 ) @require_tf class _a (unittest.TestCase): """simple docstring""" SCREAMING_SNAKE_CASE = 1E-4 def UpperCamelCase ( self ) -> Optional[Any]: _SCREAMING_SNAKE_CASE = tf.constant([[4, 10]] ) _SCREAMING_SNAKE_CASE = TFRoFormerSinusoidalPositionalEmbedding(num_positions=6 , embedding_dim=6 ) _SCREAMING_SNAKE_CASE = emba(input_ids.shape ) _SCREAMING_SNAKE_CASE = tf.constant( [[0.0000, 0.0000, 0.0000, 1.0000, 1.0000, 1.0000], [0.8415, 0.0464, 0.0022, 0.5403, 0.9989, 1.0000]] ) tf.debugging.assert_near(__lowerCamelCase , __lowerCamelCase , atol=self.tolerance ) def UpperCamelCase ( self ) -> Union[str, Any]: _SCREAMING_SNAKE_CASE = tf.constant( [ [0.0000, 0.0000, 0.0000, 0.0000, 0.0000], [0.8415, 0.8219, 0.8020, 0.7819, 0.7617], [0.9093, 0.9364, 0.9581, 0.9749, 0.9870], ] ) _SCREAMING_SNAKE_CASE = TFRoFormerSinusoidalPositionalEmbedding(num_positions=5_12 , embedding_dim=5_12 ) emba([2, 16, 5_12] ) _SCREAMING_SNAKE_CASE = emba.weight[:3, :5] tf.debugging.assert_near(__lowerCamelCase , __lowerCamelCase , atol=self.tolerance ) @require_tf class _a (unittest.TestCase): """simple docstring""" SCREAMING_SNAKE_CASE = 1E-4 def UpperCamelCase ( self ) -> Optional[Any]: _SCREAMING_SNAKE_CASE = tf.reshape(tf.range(2 * 12 * 16 * 64 , dtype=tf.floataa ) , shape=(2, 12, 16, 64) ) / 1_00 _SCREAMING_SNAKE_CASE = -tf.reshape(tf.range(2 * 12 * 16 * 64 , dtype=tf.floataa ) , shape=(2, 12, 16, 64) ) / 1_00 _SCREAMING_SNAKE_CASE = TFRoFormerSinusoidalPositionalEmbedding(num_positions=32 , embedding_dim=64 ) _SCREAMING_SNAKE_CASE = embed_positions([2, 16, 7_68] )[None, None, :, :] _SCREAMING_SNAKE_CASE = TFRoFormerSelfAttention.apply_rotary_position_embeddings( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) _SCREAMING_SNAKE_CASE = tf.constant( [ [0.0000, 0.0100, 0.0200, 0.0300, 0.0400, 0.0500, 0.0600, 0.0700], [-0.2012, 0.8897, 0.0263, 0.9401, 0.2074, 0.9463, 0.3481, 0.9343], [-1.7057, 0.6271, -1.2145, 1.3897, -0.6303, 1.7647, -0.1173, 1.8985], [-2.1731, -1.6397, -2.7358, 0.2854, -2.1840, 1.7183, -1.3018, 2.4871], [0.2717, -3.6173, -2.9206, -2.1988, -3.6638, 0.3858, -2.9155, 2.2980], [3.9859, -2.1580, -0.7984, -4.4904, -4.1181, -2.0252, -4.4782, 1.1253], ] ) _SCREAMING_SNAKE_CASE = tf.constant( [ [0.0000, -0.0100, -0.0200, -0.0300, -0.0400, -0.0500, -0.0600, -0.0700], [0.2012, -0.8897, -0.0263, -0.9401, -0.2074, -0.9463, -0.3481, -0.9343], [1.7057, -0.6271, 1.2145, -1.3897, 0.6303, -1.7647, 0.1173, -1.8985], [2.1731, 1.6397, 2.7358, -0.2854, 2.1840, -1.7183, 1.3018, -2.4871], [-0.2717, 3.6173, 2.9206, 2.1988, 3.6638, -0.3858, 2.9155, -2.2980], [-3.9859, 2.1580, 0.7984, 4.4904, 4.1181, 2.0252, 4.4782, -1.1253], ] ) tf.debugging.assert_near(query_layer[0, 0, :6, :8] , __lowerCamelCase , atol=self.tolerance ) tf.debugging.assert_near(key_layer[0, 0, :6, :8] , __lowerCamelCase , atol=self.tolerance )
701
'''simple docstring''' UpperCamelCase__ : Dict = { "a": "AAAAA", "b": "AAAAB", "c": "AAABA", "d": "AAABB", "e": "AABAA", "f": "AABAB", "g": "AABBA", "h": "AABBB", "i": "ABAAA", "j": "BBBAA", "k": "ABAAB", "l": "ABABA", "m": "ABABB", "n": "ABBAA", "o": "ABBAB", "p": "ABBBA", "q": "ABBBB", "r": "BAAAA", "s": "BAAAB", "t": "BAABA", "u": "BAABB", "v": "BBBAB", "w": "BABAA", "x": "BABAB", "y": "BABBA", "z": "BABBB", " ": " ", } UpperCamelCase__ : str = {value: key for key, value in encode_dict.items()} def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE_ ) -> str: """simple docstring""" _SCREAMING_SNAKE_CASE = """""" for letter in word.lower(): if letter.isalpha() or letter == " ": encoded += encode_dict[letter] else: raise Exception("""encode() accepts only letters of the alphabet and spaces""" ) return encoded def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE_ ) -> str: """simple docstring""" if set(SCREAMING_SNAKE_CASE_ ) - {"A", "B", " "} != set(): raise Exception("""decode() accepts only 'A', 'B' and spaces""" ) _SCREAMING_SNAKE_CASE = """""" for word in coded.split(): while len(SCREAMING_SNAKE_CASE_ ) != 0: decoded += decode_dict[word[:5]] _SCREAMING_SNAKE_CASE = word[5:] decoded += " " return decoded.strip() if __name__ == "__main__": from doctest import testmod testmod()
0
0
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available UpperCamelCase__ : Optional[Any] = {} try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase__ : int = ["MLukeTokenizer"] if TYPE_CHECKING: try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_mluke import MLukeTokenizer else: import sys UpperCamelCase__ : Any = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
702
'''simple docstring''' import argparse import torch from torch import nn from transformers import MaMaaaConfig, MaMaaaForConditionalGeneration def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE_ ) -> Any: """simple docstring""" _SCREAMING_SNAKE_CASE = [ """encoder.version""", """decoder.version""", """model.encoder.version""", """model.decoder.version""", """decoder.output_projection.weight""", """_float_tensor""", """encoder.embed_positions._float_tensor""", """decoder.embed_positions._float_tensor""", ] for k in ignore_keys: state_dict.pop(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE_ ) -> str: """simple docstring""" _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = emb.weight.shape _SCREAMING_SNAKE_CASE = nn.Linear(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , bias=SCREAMING_SNAKE_CASE_ ) _SCREAMING_SNAKE_CASE = emb.weight.data return lin_layer def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE_ ) -> List[Any]: """simple docstring""" _SCREAMING_SNAKE_CASE = torch.load(SCREAMING_SNAKE_CASE_ , map_location="""cpu""" ) _SCREAMING_SNAKE_CASE = mam_aaa["""args"""] or mam_aaa["""cfg"""]["""model"""] _SCREAMING_SNAKE_CASE = mam_aaa["""model"""] remove_ignore_keys_(SCREAMING_SNAKE_CASE_ ) _SCREAMING_SNAKE_CASE = state_dict["""encoder.embed_tokens.weight"""].shape[0] _SCREAMING_SNAKE_CASE = MaMaaaConfig( vocab_size=SCREAMING_SNAKE_CASE_ , max_position_embeddings=10_24 , encoder_layers=args.encoder_layers , decoder_layers=args.decoder_layers , encoder_attention_heads=args.encoder_attention_heads , decoder_attention_heads=args.decoder_attention_heads , encoder_ffn_dim=args.encoder_ffn_embed_dim , decoder_ffn_dim=args.decoder_ffn_embed_dim , d_model=args.encoder_embed_dim , encoder_layerdrop=args.encoder_layerdrop , decoder_layerdrop=args.decoder_layerdrop , dropout=args.dropout , attention_dropout=args.attention_dropout , activation_dropout=args.activation_dropout , activation_function="""relu""" , ) _SCREAMING_SNAKE_CASE = state_dict["""decoder.embed_tokens.weight"""] _SCREAMING_SNAKE_CASE = MaMaaaForConditionalGeneration(SCREAMING_SNAKE_CASE_ ) model.model.load_state_dict(SCREAMING_SNAKE_CASE_ , strict=SCREAMING_SNAKE_CASE_ ) _SCREAMING_SNAKE_CASE = make_linear_from_emb(model.model.shared ) return model if __name__ == "__main__": UpperCamelCase__ : Dict = argparse.ArgumentParser() # Required parameters parser.add_argument("fairseq_path", type=str, help="path to a model.pt on local filesystem.") parser.add_argument("pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.") UpperCamelCase__ : List[str] = parser.parse_args() UpperCamelCase__ : Any = convert_fairseq_mamaaa_checkpoint_from_disk(args.fairseq_pathß) model.save_pretrained(args.pytorch_dump_folder_path)
0
0
'''simple docstring''' import os import zipfile import requests from get_ci_error_statistics import download_artifact, get_artifacts_links def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=7 ) -> Any: """simple docstring""" _SCREAMING_SNAKE_CASE = None if token is not None: _SCREAMING_SNAKE_CASE = {"Accept": "application/vnd.github+json", "Authorization": F"Bearer {token}"} # The id of a workflow (not of a workflow run) _SCREAMING_SNAKE_CASE = "636036" _SCREAMING_SNAKE_CASE = F"https://api.github.com/repos/huggingface/transformers/actions/workflows/{workflow_id}/runs" # On `main` branch + event being `schedule` + not returning PRs + only `num_runs` results url += F"?branch=main&event=schedule&exclude_pull_requests=true&per_page={num_runs}" _SCREAMING_SNAKE_CASE = requests.get(__SCREAMING_SNAKE_CASE , headers=__SCREAMING_SNAKE_CASE ).json() return result["workflow_runs"] def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE_ ) -> int: """simple docstring""" _SCREAMING_SNAKE_CASE = get_daily_ci_runs(__SCREAMING_SNAKE_CASE ) _SCREAMING_SNAKE_CASE = None for workflow_run in workflow_runs: if workflow_run["status"] == "completed": _SCREAMING_SNAKE_CASE = workflow_run["id"] break return workflow_run_id def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> List[str]: """simple docstring""" _SCREAMING_SNAKE_CASE = get_last_daily_ci_runs(__SCREAMING_SNAKE_CASE ) if workflow_run_id is not None: _SCREAMING_SNAKE_CASE = get_artifacts_links(worflow_run_id=__SCREAMING_SNAKE_CASE , token=__SCREAMING_SNAKE_CASE ) for artifact_name in artifact_names: if artifact_name in artifacts_links: _SCREAMING_SNAKE_CASE = artifacts_links[artifact_name] download_artifact( artifact_name=__SCREAMING_SNAKE_CASE , artifact_url=__SCREAMING_SNAKE_CASE , output_dir=__SCREAMING_SNAKE_CASE , token=__SCREAMING_SNAKE_CASE ) def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> List[Any]: """simple docstring""" get_last_daily_ci_artifacts(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) _SCREAMING_SNAKE_CASE = {} for artifact_name in artifact_names: _SCREAMING_SNAKE_CASE = os.path.join(__SCREAMING_SNAKE_CASE , F"{artifact_name}.zip" ) if os.path.isfile(__SCREAMING_SNAKE_CASE ): _SCREAMING_SNAKE_CASE = {} with zipfile.ZipFile(__SCREAMING_SNAKE_CASE ) as z: for filename in z.namelist(): if not os.path.isdir(__SCREAMING_SNAKE_CASE ): # read the file with z.open(__SCREAMING_SNAKE_CASE ) as f: _SCREAMING_SNAKE_CASE = f.read().decode("""UTF-8""" ) return results
703
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available UpperCamelCase__ : str = { "configuration_canine": ["CANINE_PRETRAINED_CONFIG_ARCHIVE_MAP", "CanineConfig"], "tokenization_canine": ["CanineTokenizer"], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase__ : List[Any] = [ "CANINE_PRETRAINED_MODEL_ARCHIVE_LIST", "CanineForMultipleChoice", "CanineForQuestionAnswering", "CanineForSequenceClassification", "CanineForTokenClassification", "CanineLayer", "CanineModel", "CaninePreTrainedModel", "load_tf_weights_in_canine", ] if TYPE_CHECKING: from .configuration_canine import CANINE_PRETRAINED_CONFIG_ARCHIVE_MAP, CanineConfig from .tokenization_canine import CanineTokenizer try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_canine import ( CANINE_PRETRAINED_MODEL_ARCHIVE_LIST, CanineForMultipleChoice, CanineForQuestionAnswering, CanineForSequenceClassification, CanineForTokenClassification, CanineLayer, CanineModel, CaninePreTrainedModel, load_tf_weights_in_canine, ) else: import sys UpperCamelCase__ : str = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
0
0
from __future__ import annotations def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> int: # noqa: E741 """simple docstring""" while r - l > 1: _SCREAMING_SNAKE_CASE = (l + r) // 2 if v[m] >= key: _SCREAMING_SNAKE_CASE = m else: _SCREAMING_SNAKE_CASE = m # noqa: E741 return r def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE_ ) -> Optional[int]: """simple docstring""" if len(SCREAMING_SNAKE_CASE_ ) == 0: return 0 _SCREAMING_SNAKE_CASE = [0] * len(SCREAMING_SNAKE_CASE_ ) _SCREAMING_SNAKE_CASE = 1 _SCREAMING_SNAKE_CASE = v[0] for i in range(1 , len(SCREAMING_SNAKE_CASE_ ) ): if v[i] < tail[0]: _SCREAMING_SNAKE_CASE = v[i] elif v[i] > tail[length - 1]: _SCREAMING_SNAKE_CASE = v[i] length += 1 else: _SCREAMING_SNAKE_CASE = v[i] return length if __name__ == "__main__": import doctest doctest.testmod()
704
'''simple docstring''' import warnings from ...processing_utils import ProcessorMixin from ...tokenization_utils_base import BatchEncoding class _a (_lowerCamelCase): """simple docstring""" SCREAMING_SNAKE_CASE = ['image_processor', 'tokenizer'] SCREAMING_SNAKE_CASE = 'ChineseCLIPImageProcessor' SCREAMING_SNAKE_CASE = ('BertTokenizer', 'BertTokenizerFast') def __init__( self , A__=None , A__=None , **A__ ) -> int: _SCREAMING_SNAKE_CASE = None if "feature_extractor" in kwargs: warnings.warn( """The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`""" """ instead.""" , A__ , ) _SCREAMING_SNAKE_CASE = kwargs.pop("""feature_extractor""" ) _SCREAMING_SNAKE_CASE = image_processor if image_processor is not None else feature_extractor if image_processor is None: raise ValueError("""You need to specify an `image_processor`.""" ) if tokenizer is None: raise ValueError("""You need to specify a `tokenizer`.""" ) super().__init__(A__ , A__ ) _SCREAMING_SNAKE_CASE = self.image_processor def __call__( self , A__=None , A__=None , A__=None , **A__ ) -> Optional[int]: if text is None and images is None: raise ValueError("""You have to specify either text or images. Both cannot be none.""" ) if text is not None: _SCREAMING_SNAKE_CASE = self.tokenizer(A__ , return_tensors=A__ , **A__ ) if images is not None: _SCREAMING_SNAKE_CASE = self.image_processor(A__ , return_tensors=A__ , **A__ ) if text is not None and images is not None: _SCREAMING_SNAKE_CASE = image_features.pixel_values return encoding elif text is not None: return encoding else: return BatchEncoding(data=dict(**A__ ) , tensor_type=A__ ) def UpperCamelCase ( self , *A__ , **A__ ) -> Dict: return self.tokenizer.batch_decode(*A__ , **A__ ) def UpperCamelCase ( self , *A__ , **A__ ) -> Optional[Any]: return self.tokenizer.decode(*A__ , **A__ ) @property def UpperCamelCase ( self ) -> List[Any]: _SCREAMING_SNAKE_CASE = self.tokenizer.model_input_names _SCREAMING_SNAKE_CASE = self.image_processor.model_input_names return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) ) @property def UpperCamelCase ( self ) -> Optional[int]: warnings.warn( """`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.""" , A__ , ) return self.image_processor_class
0
0
import collections import inspect import unittest from transformers import FocalNetConfig from transformers.testing_utils import require_torch, require_vision, slow, torch_device from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_backbone_common import BackboneTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from torch import nn from transformers import ( FocalNetBackbone, FocalNetForImageClassification, FocalNetForMaskedImageModeling, FocalNetModel, ) from transformers.models.focalnet.modeling_focalnet import FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import AutoImageProcessor class _a : """simple docstring""" def __init__( self , A__ , A__=13 , A__=32 , A__=2 , A__=3 , A__=16 , A__=[32, 64, 1_28] , A__=[1, 2, 1] , A__=[2, 2, 4] , A__=2 , A__=2.0 , A__=True , A__=0.0 , A__=0.0 , A__=0.1 , A__="gelu" , A__=False , A__=True , A__=0.02 , A__=1E-5 , A__=True , A__=None , A__=True , A__=10 , A__=8 , A__=["stage1", "stage2"] , A__=[1, 2] , ) -> List[Any]: _SCREAMING_SNAKE_CASE = parent _SCREAMING_SNAKE_CASE = batch_size _SCREAMING_SNAKE_CASE = image_size _SCREAMING_SNAKE_CASE = patch_size _SCREAMING_SNAKE_CASE = num_channels _SCREAMING_SNAKE_CASE = embed_dim _SCREAMING_SNAKE_CASE = hidden_sizes _SCREAMING_SNAKE_CASE = depths _SCREAMING_SNAKE_CASE = num_heads _SCREAMING_SNAKE_CASE = window_size _SCREAMING_SNAKE_CASE = mlp_ratio _SCREAMING_SNAKE_CASE = qkv_bias _SCREAMING_SNAKE_CASE = hidden_dropout_prob _SCREAMING_SNAKE_CASE = attention_probs_dropout_prob _SCREAMING_SNAKE_CASE = drop_path_rate _SCREAMING_SNAKE_CASE = hidden_act _SCREAMING_SNAKE_CASE = use_absolute_embeddings _SCREAMING_SNAKE_CASE = patch_norm _SCREAMING_SNAKE_CASE = layer_norm_eps _SCREAMING_SNAKE_CASE = initializer_range _SCREAMING_SNAKE_CASE = is_training _SCREAMING_SNAKE_CASE = scope _SCREAMING_SNAKE_CASE = use_labels _SCREAMING_SNAKE_CASE = type_sequence_label_size _SCREAMING_SNAKE_CASE = encoder_stride _SCREAMING_SNAKE_CASE = out_features _SCREAMING_SNAKE_CASE = out_indices def UpperCamelCase ( self ) -> Dict: _SCREAMING_SNAKE_CASE = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) _SCREAMING_SNAKE_CASE = None if self.use_labels: _SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size] , self.type_sequence_label_size ) _SCREAMING_SNAKE_CASE = self.get_config() return config, pixel_values, labels def UpperCamelCase ( self ) -> Optional[int]: return FocalNetConfig( image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , embed_dim=self.embed_dim , hidden_sizes=self.hidden_sizes , depths=self.depths , num_heads=self.num_heads , window_size=self.window_size , mlp_ratio=self.mlp_ratio , qkv_bias=self.qkv_bias , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , drop_path_rate=self.drop_path_rate , hidden_act=self.hidden_act , use_absolute_embeddings=self.use_absolute_embeddings , path_norm=self.patch_norm , layer_norm_eps=self.layer_norm_eps , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , out_features=self.out_features , out_indices=self.out_indices , ) def UpperCamelCase ( self , A__ , A__ , A__ ) -> List[Any]: _SCREAMING_SNAKE_CASE = FocalNetModel(config=A__ ) model.to(A__ ) model.eval() _SCREAMING_SNAKE_CASE = model(A__ ) _SCREAMING_SNAKE_CASE = ((config.image_size // config.patch_size) ** 2) // (4 ** (len(config.depths ) - 1)) _SCREAMING_SNAKE_CASE = int(config.embed_dim * 2 ** (len(config.depths ) - 1) ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, expected_seq_len, expected_dim) ) def UpperCamelCase ( self , A__ , A__ , A__ ) -> Union[str, Any]: _SCREAMING_SNAKE_CASE = FocalNetBackbone(config=A__ ) model.to(A__ ) model.eval() _SCREAMING_SNAKE_CASE = model(A__ ) # verify feature maps self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) ) self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.image_size, 8, 8] ) # verify channels self.parent.assertEqual(len(model.channels ) , len(config.out_features ) ) self.parent.assertListEqual(model.channels , config.hidden_sizes[:-1] ) # verify backbone works with out_features=None _SCREAMING_SNAKE_CASE = None _SCREAMING_SNAKE_CASE = FocalNetBackbone(config=A__ ) model.to(A__ ) model.eval() _SCREAMING_SNAKE_CASE = model(A__ ) # verify feature maps self.parent.assertEqual(len(result.feature_maps ) , 1 ) self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.image_size * 2, 4, 4] ) # verify channels self.parent.assertEqual(len(model.channels ) , 1 ) self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]] ) def UpperCamelCase ( self , A__ , A__ , A__ ) -> Tuple: _SCREAMING_SNAKE_CASE = FocalNetForMaskedImageModeling(config=A__ ) model.to(A__ ) model.eval() _SCREAMING_SNAKE_CASE = model(A__ ) self.parent.assertEqual( result.reconstruction.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size) ) # test greyscale images _SCREAMING_SNAKE_CASE = 1 _SCREAMING_SNAKE_CASE = FocalNetForMaskedImageModeling(A__ ) model.to(A__ ) model.eval() _SCREAMING_SNAKE_CASE = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] ) _SCREAMING_SNAKE_CASE = model(A__ ) self.parent.assertEqual(result.reconstruction.shape , (self.batch_size, 1, self.image_size, self.image_size) ) def UpperCamelCase ( self , A__ , A__ , A__ ) -> Any: _SCREAMING_SNAKE_CASE = self.type_sequence_label_size _SCREAMING_SNAKE_CASE = FocalNetForImageClassification(A__ ) model.to(A__ ) model.eval() _SCREAMING_SNAKE_CASE = model(A__ , labels=A__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) ) # test greyscale images _SCREAMING_SNAKE_CASE = 1 _SCREAMING_SNAKE_CASE = FocalNetForImageClassification(A__ ) model.to(A__ ) model.eval() _SCREAMING_SNAKE_CASE = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] ) _SCREAMING_SNAKE_CASE = model(A__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) ) def UpperCamelCase ( self ) -> List[Any]: _SCREAMING_SNAKE_CASE = self.prepare_config_and_inputs() _SCREAMING_SNAKE_CASE = config_and_inputs _SCREAMING_SNAKE_CASE = {"pixel_values": pixel_values} return config, inputs_dict @require_torch class _a (__lowercase , __lowercase , unittest.TestCase): """simple docstring""" SCREAMING_SNAKE_CASE = ( ( FocalNetModel, FocalNetForImageClassification, FocalNetForMaskedImageModeling, FocalNetBackbone, ) if is_torch_available() else () ) SCREAMING_SNAKE_CASE = ( {'feature-extraction': FocalNetModel, 'image-classification': FocalNetForImageClassification} if is_torch_available() else {} ) SCREAMING_SNAKE_CASE = False SCREAMING_SNAKE_CASE = False SCREAMING_SNAKE_CASE = False SCREAMING_SNAKE_CASE = False SCREAMING_SNAKE_CASE = False def UpperCamelCase ( self ) -> Tuple: _SCREAMING_SNAKE_CASE = FocalNetModelTester(self ) _SCREAMING_SNAKE_CASE = ConfigTester(self , config_class=A__ , embed_dim=37 , has_text_modality=A__ ) def UpperCamelCase ( self ) -> Tuple: self.create_and_test_config_common_properties() self.config_tester.create_and_test_config_to_json_string() self.config_tester.create_and_test_config_to_json_file() self.config_tester.create_and_test_config_from_and_save_pretrained() self.config_tester.create_and_test_config_with_num_labels() self.config_tester.check_config_can_be_init_without_params() self.config_tester.check_config_arguments_init() def UpperCamelCase ( self ) -> Any: return def UpperCamelCase ( self ) -> int: _SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*A__ ) def UpperCamelCase ( self ) -> Tuple: _SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_backbone(*A__ ) def UpperCamelCase ( self ) -> str: _SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_image_modeling(*A__ ) def UpperCamelCase ( self ) -> List[str]: _SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*A__ ) @unittest.skip(reason="""FocalNet does not use inputs_embeds""" ) def UpperCamelCase ( self ) -> Any: pass @unittest.skip(reason="""FocalNet does not use feedforward chunking""" ) def UpperCamelCase ( self ) -> str: pass def UpperCamelCase ( self ) -> str: _SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes[:-1]: _SCREAMING_SNAKE_CASE = model_class(A__ ) self.assertIsInstance(model.get_input_embeddings() , (nn.Module) ) _SCREAMING_SNAKE_CASE = model.get_output_embeddings() self.assertTrue(x is None or isinstance(A__ , nn.Linear ) ) def UpperCamelCase ( self ) -> Any: _SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes[:-1]: _SCREAMING_SNAKE_CASE = model_class(A__ ) _SCREAMING_SNAKE_CASE = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic _SCREAMING_SNAKE_CASE = [*signature.parameters.keys()] _SCREAMING_SNAKE_CASE = ["pixel_values"] self.assertListEqual(arg_names[:1] , A__ ) def UpperCamelCase ( self , A__ , A__ , A__ , A__ ) -> List[str]: _SCREAMING_SNAKE_CASE = model_class(A__ ) model.to(A__ ) model.eval() with torch.no_grad(): _SCREAMING_SNAKE_CASE = model(**self._prepare_for_class(A__ , A__ ) ) _SCREAMING_SNAKE_CASE = outputs.hidden_states _SCREAMING_SNAKE_CASE = getattr( self.model_tester , """expected_num_hidden_layers""" , len(self.model_tester.depths ) + 1 ) self.assertEqual(len(A__ ) , A__ ) # FocalNet has a different seq_length _SCREAMING_SNAKE_CASE = ( config.patch_size if isinstance(config.patch_size , collections.abc.Iterable ) else (config.patch_size, config.patch_size) ) _SCREAMING_SNAKE_CASE = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0]) self.assertListEqual( list(hidden_states[0].shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , ) _SCREAMING_SNAKE_CASE = outputs.reshaped_hidden_states self.assertEqual(len(A__ ) , A__ ) _SCREAMING_SNAKE_CASE = reshaped_hidden_states[0].shape _SCREAMING_SNAKE_CASE = ( reshaped_hidden_states[0].view(A__ , A__ , height * width ).permute(0 , 2 , 1 ) ) self.assertListEqual( list(reshaped_hidden_states.shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , ) def UpperCamelCase ( self ) -> Optional[int]: _SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common() _SCREAMING_SNAKE_CASE = ( self.model_tester.image_size if isinstance(self.model_tester.image_size , collections.abc.Iterable ) else (self.model_tester.image_size, self.model_tester.image_size) ) for model_class in self.all_model_classes[:-1]: _SCREAMING_SNAKE_CASE = True self.check_hidden_states_output(A__ , A__ , A__ , A__ ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] _SCREAMING_SNAKE_CASE = True self.check_hidden_states_output(A__ , A__ , A__ , A__ ) def UpperCamelCase ( self ) -> Tuple: _SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common() _SCREAMING_SNAKE_CASE = 3 _SCREAMING_SNAKE_CASE = ( self.model_tester.image_size if isinstance(self.model_tester.image_size , collections.abc.Iterable ) else (self.model_tester.image_size, self.model_tester.image_size) ) _SCREAMING_SNAKE_CASE = ( config.patch_size if isinstance(config.patch_size , collections.abc.Iterable ) else (config.patch_size, config.patch_size) ) _SCREAMING_SNAKE_CASE = image_size[0] + patch_size[0] - (image_size[0] % patch_size[0]) _SCREAMING_SNAKE_CASE = image_size[1] + patch_size[1] - (image_size[1] % patch_size[1]) for model_class in self.all_model_classes[:-1]: _SCREAMING_SNAKE_CASE = True self.check_hidden_states_output(A__ , A__ , A__ , (padded_height, padded_width) ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] _SCREAMING_SNAKE_CASE = True self.check_hidden_states_output(A__ , A__ , A__ , (padded_height, padded_width) ) @slow def UpperCamelCase ( self ) -> Optional[int]: for model_name in FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: _SCREAMING_SNAKE_CASE = FocalNetModel.from_pretrained(A__ ) self.assertIsNotNone(A__ ) def UpperCamelCase ( self ) -> List[str]: _SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common() _SCREAMING_SNAKE_CASE = _config_zero_init(A__ ) for model_class in self.all_model_classes: _SCREAMING_SNAKE_CASE = model_class(config=A__ ) for name, param in model.named_parameters(): if "embeddings" not in name and param.requires_grad: self.assertIn( ((param.data.mean() * 1E9).round() / 1E9).item() , [0.0, 1.0] , msg=F"Parameter {name} of model {model_class} seems not properly initialized" , ) @require_vision @require_torch class _a (unittest.TestCase): """simple docstring""" @cached_property def UpperCamelCase ( self ) -> List[str]: # TODO update organization return AutoImageProcessor.from_pretrained("""microsoft/focalnet-tiny""" ) if is_vision_available() else None @slow def UpperCamelCase ( self ) -> Optional[Any]: _SCREAMING_SNAKE_CASE = FocalNetForImageClassification.from_pretrained("""microsoft/focalnet-tiny""" ).to(A__ ) _SCREAMING_SNAKE_CASE = self.default_image_processor _SCREAMING_SNAKE_CASE = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" ) _SCREAMING_SNAKE_CASE = image_processor(images=A__ , return_tensors="""pt""" ).to(A__ ) # forward pass with torch.no_grad(): _SCREAMING_SNAKE_CASE = model(**A__ ) # verify the logits _SCREAMING_SNAKE_CASE = torch.Size((1, 10_00) ) self.assertEqual(outputs.logits.shape , A__ ) _SCREAMING_SNAKE_CASE = torch.tensor([0.2166, -0.4368, 0.2191] ).to(A__ ) self.assertTrue(torch.allclose(outputs.logits[0, :3] , A__ , atol=1E-4 ) ) self.assertTrue(outputs.logits.argmax(dim=-1 ).item() , 2_81 ) @require_torch class _a (__lowercase , unittest.TestCase): """simple docstring""" SCREAMING_SNAKE_CASE = (FocalNetBackbone,) if is_torch_available() else () SCREAMING_SNAKE_CASE = FocalNetConfig SCREAMING_SNAKE_CASE = False def UpperCamelCase ( self ) -> Optional[int]: _SCREAMING_SNAKE_CASE = FocalNetModelTester(self )
705
'''simple docstring''' from sklearn.metrics import matthews_corrcoef import datasets UpperCamelCase__ : List[str] = "\nCompute the Matthews correlation coefficient (MCC)\n\nThe Matthews correlation coefficient is used in machine learning as a\nmeasure of the quality of binary and multiclass classifications. It takes\ninto account true and false positives and negatives and is generally\nregarded as a balanced measure which can be used even if the classes are of\nvery different sizes. The MCC is in essence a correlation coefficient value\nbetween -1 and +1. A coefficient of +1 represents a perfect prediction, 0\nan average random prediction and -1 an inverse prediction. The statistic\nis also known as the phi coefficient. [source: Wikipedia]\n" UpperCamelCase__ : List[Any] = "\nArgs:\n predictions (list of int): Predicted labels, as returned by a model.\n references (list of int): Ground truth labels.\n sample_weight (list of int, float, or bool): Sample weights. Defaults to `None`.\nReturns:\n matthews_correlation (dict containing float): Matthews correlation.\nExamples:\n Example 1, a basic example with only predictions and references as inputs:\n >>> matthews_metric = datasets.load_metric(\"matthews_correlation\")\n >>> results = matthews_metric.compute(references=[1, 3, 2, 0, 3, 2],\n ... predictions=[1, 2, 2, 0, 3, 3])\n >>> print(round(results['matthews_correlation'], 2))\n 0.54\n\n Example 2, the same example as above, but also including sample weights:\n >>> matthews_metric = datasets.load_metric(\"matthews_correlation\")\n >>> results = matthews_metric.compute(references=[1, 3, 2, 0, 3, 2],\n ... predictions=[1, 2, 2, 0, 3, 3],\n ... sample_weight=[0.5, 3, 1, 1, 1, 2])\n >>> print(round(results['matthews_correlation'], 2))\n 0.1\n\n Example 3, the same example as above, but with sample weights that cause a negative correlation:\n >>> matthews_metric = datasets.load_metric(\"matthews_correlation\")\n >>> results = matthews_metric.compute(references=[1, 3, 2, 0, 3, 2],\n ... predictions=[1, 2, 2, 0, 3, 3],\n ... sample_weight=[0.5, 1, 0, 0, 0, 1])\n >>> print(round(results['matthews_correlation'], 2))\n -0.25\n" UpperCamelCase__ : Any = "\\n@article{scikit-learn,\n title={Scikit-learn: Machine Learning in {P}ython},\n author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V.\n and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P.\n and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and\n Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.},\n journal={Journal of Machine Learning Research},\n volume={12},\n pages={2825--2830},\n year={2011}\n}\n" @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION) class _a (datasets.Metric): """simple docstring""" def UpperCamelCase ( self ) -> Optional[Any]: return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { """predictions""": datasets.Value("""int32""" ), """references""": datasets.Value("""int32""" ), } ) , reference_urls=[ """https://scikit-learn.org/stable/modules/generated/sklearn.metrics.matthews_corrcoef.html""" ] , ) def UpperCamelCase ( self , A__ , A__ , A__=None ) -> List[str]: return { "matthews_correlation": float(matthews_corrcoef(A__ , A__ , sample_weight=A__ ) ), }
0
0
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE_ ) -> str: """simple docstring""" return sum(i for i in range(1 , number // 2 + 1 ) if number % i == 0 ) == number if __name__ == "__main__": print("Program to check whether a number is a Perfect number or not...") UpperCamelCase__ : Any = int(input("Enter number: ").strip()) print(f"""{number} is {"" if perfect(number) else "not "}a Perfect Number.""")
706
'''simple docstring''' from __future__ import annotations def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> Tuple: """simple docstring""" print(F"Vertex\tShortest Distance from vertex {src}" ) for i, d in enumerate(SCREAMING_SNAKE_CASE_ ): print(F"{i}\t\t{d}" ) def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> List[Any]: """simple docstring""" for j in range(SCREAMING_SNAKE_CASE_ ): _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = (graph[j][k] for k in ["""src""", """dst""", """weight"""]) if distance[u] != float("""inf""" ) and distance[u] + w < distance[v]: return True return False def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> list[float]: """simple docstring""" _SCREAMING_SNAKE_CASE = [float("""inf""" )] * vertex_count _SCREAMING_SNAKE_CASE = 0.0 for _ in range(vertex_count - 1 ): for j in range(SCREAMING_SNAKE_CASE_ ): _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = (graph[j][k] for k in ["""src""", """dst""", """weight"""]) if distance[u] != float("""inf""" ) and distance[u] + w < distance[v]: _SCREAMING_SNAKE_CASE = distance[u] + w _SCREAMING_SNAKE_CASE = check_negative_cycle(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) if negative_cycle_exists: raise Exception("""Negative cycle found""" ) return distance if __name__ == "__main__": import doctest doctest.testmod() UpperCamelCase__ : int = int(input("Enter number of vertices: ").strip()) UpperCamelCase__ : int = int(input("Enter number of edges: ").strip()) UpperCamelCase__ : list[dict[str, int]] = [{} for _ in range(E)] for i in range(E): print("Edge ", i + 1) UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ : Dict = ( int(x) for x in input("Enter source, destination, weight: ").strip().split(" ") ) UpperCamelCase__ : Optional[Any] = {"src": src, "dst": dest, "weight": weight} UpperCamelCase__ : Optional[Any] = int(input("\nEnter shortest path source:").strip()) UpperCamelCase__ : Any = bellman_ford(graph, V, E, source) print_distance(shortest_distance, 0)
0
0
'''simple docstring''' def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE_ ) -> str: """simple docstring""" return " ".join(input_str.split()[::-1] ) if __name__ == "__main__": import doctest doctest.testmod()
707
'''simple docstring''' from __future__ import annotations import unittest from transformers import RoFormerConfig, is_tf_available from transformers.testing_utils import require_tf, slow from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import ( TFRoFormerForCausalLM, TFRoFormerForMaskedLM, TFRoFormerForMultipleChoice, TFRoFormerForQuestionAnswering, TFRoFormerForSequenceClassification, TFRoFormerForTokenClassification, TFRoFormerModel, ) from transformers.models.roformer.modeling_tf_roformer import ( TFRoFormerSelfAttention, TFRoFormerSinusoidalPositionalEmbedding, ) class _a : """simple docstring""" def __init__( self , A__ , A__=13 , A__=7 , A__=True , A__=True , A__=True , A__=True , A__=99 , A__=32 , A__=2 , A__=4 , A__=37 , A__="gelu" , A__=0.1 , A__=0.1 , A__=5_12 , A__=16 , A__=2 , A__=0.02 , A__=3 , A__=4 , A__=None , ) -> int: _SCREAMING_SNAKE_CASE = parent _SCREAMING_SNAKE_CASE = 13 _SCREAMING_SNAKE_CASE = 7 _SCREAMING_SNAKE_CASE = True _SCREAMING_SNAKE_CASE = True _SCREAMING_SNAKE_CASE = True _SCREAMING_SNAKE_CASE = True _SCREAMING_SNAKE_CASE = 99 _SCREAMING_SNAKE_CASE = 32 _SCREAMING_SNAKE_CASE = 2 _SCREAMING_SNAKE_CASE = 4 _SCREAMING_SNAKE_CASE = 37 _SCREAMING_SNAKE_CASE = """gelu""" _SCREAMING_SNAKE_CASE = 0.1 _SCREAMING_SNAKE_CASE = 0.1 _SCREAMING_SNAKE_CASE = 5_12 _SCREAMING_SNAKE_CASE = 16 _SCREAMING_SNAKE_CASE = 2 _SCREAMING_SNAKE_CASE = 0.02 _SCREAMING_SNAKE_CASE = 3 _SCREAMING_SNAKE_CASE = 4 _SCREAMING_SNAKE_CASE = None def UpperCamelCase ( self ) -> Tuple: _SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) _SCREAMING_SNAKE_CASE = None if self.use_input_mask: _SCREAMING_SNAKE_CASE = random_attention_mask([self.batch_size, self.seq_length] ) _SCREAMING_SNAKE_CASE = None if self.use_token_type_ids: _SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) _SCREAMING_SNAKE_CASE = None _SCREAMING_SNAKE_CASE = None _SCREAMING_SNAKE_CASE = None if self.use_labels: _SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size] , self.type_sequence_label_size ) _SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) _SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size] , self.num_choices ) _SCREAMING_SNAKE_CASE = RoFormerConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , return_dict=A__ , ) return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def UpperCamelCase ( self , A__ , A__ , A__ , A__ , A__ , A__ , A__ ) -> Union[str, Any]: _SCREAMING_SNAKE_CASE = TFRoFormerModel(config=A__ ) _SCREAMING_SNAKE_CASE = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids} _SCREAMING_SNAKE_CASE = [input_ids, input_mask] _SCREAMING_SNAKE_CASE = model(A__ ) _SCREAMING_SNAKE_CASE = model(A__ ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def UpperCamelCase ( self , A__ , A__ , A__ , A__ , A__ , A__ , A__ ) -> str: _SCREAMING_SNAKE_CASE = True _SCREAMING_SNAKE_CASE = TFRoFormerForCausalLM(config=A__ ) _SCREAMING_SNAKE_CASE = { """input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids, } _SCREAMING_SNAKE_CASE = model(A__ )["""logits"""] self.parent.assertListEqual( list(prediction_scores.numpy().shape ) , [self.batch_size, self.seq_length, self.vocab_size] ) def UpperCamelCase ( self , A__ , A__ , A__ , A__ , A__ , A__ , A__ ) -> Dict: _SCREAMING_SNAKE_CASE = TFRoFormerForMaskedLM(config=A__ ) _SCREAMING_SNAKE_CASE = { """input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids, } _SCREAMING_SNAKE_CASE = model(A__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def UpperCamelCase ( self , A__ , A__ , A__ , A__ , A__ , A__ , A__ ) -> List[str]: _SCREAMING_SNAKE_CASE = self.num_labels _SCREAMING_SNAKE_CASE = TFRoFormerForSequenceClassification(config=A__ ) _SCREAMING_SNAKE_CASE = { """input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids, } _SCREAMING_SNAKE_CASE = model(A__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def UpperCamelCase ( self , A__ , A__ , A__ , A__ , A__ , A__ , A__ ) -> Any: _SCREAMING_SNAKE_CASE = self.num_choices _SCREAMING_SNAKE_CASE = TFRoFormerForMultipleChoice(config=A__ ) _SCREAMING_SNAKE_CASE = tf.tile(tf.expand_dims(A__ , 1 ) , (1, self.num_choices, 1) ) _SCREAMING_SNAKE_CASE = tf.tile(tf.expand_dims(A__ , 1 ) , (1, self.num_choices, 1) ) _SCREAMING_SNAKE_CASE = tf.tile(tf.expand_dims(A__ , 1 ) , (1, self.num_choices, 1) ) _SCREAMING_SNAKE_CASE = { """input_ids""": multiple_choice_inputs_ids, """attention_mask""": multiple_choice_input_mask, """token_type_ids""": multiple_choice_token_type_ids, } _SCREAMING_SNAKE_CASE = model(A__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) ) def UpperCamelCase ( self , A__ , A__ , A__ , A__ , A__ , A__ , A__ ) -> List[str]: _SCREAMING_SNAKE_CASE = self.num_labels _SCREAMING_SNAKE_CASE = TFRoFormerForTokenClassification(config=A__ ) _SCREAMING_SNAKE_CASE = { """input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids, } _SCREAMING_SNAKE_CASE = model(A__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def UpperCamelCase ( self , A__ , A__ , A__ , A__ , A__ , A__ , A__ ) -> Tuple: _SCREAMING_SNAKE_CASE = TFRoFormerForQuestionAnswering(config=A__ ) _SCREAMING_SNAKE_CASE = { """input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids, } _SCREAMING_SNAKE_CASE = model(A__ ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def UpperCamelCase ( self ) -> List[str]: _SCREAMING_SNAKE_CASE = self.prepare_config_and_inputs() ( ( _SCREAMING_SNAKE_CASE ) , ( _SCREAMING_SNAKE_CASE ) , ( _SCREAMING_SNAKE_CASE ) , ( _SCREAMING_SNAKE_CASE ) , ( _SCREAMING_SNAKE_CASE ) , ( _SCREAMING_SNAKE_CASE ) , ( _SCREAMING_SNAKE_CASE ) , ) = config_and_inputs _SCREAMING_SNAKE_CASE = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": input_mask} return config, inputs_dict @require_tf class _a (_lowerCamelCase , _lowerCamelCase , unittest.TestCase): """simple docstring""" SCREAMING_SNAKE_CASE = ( ( TFRoFormerModel, TFRoFormerForCausalLM, TFRoFormerForMaskedLM, TFRoFormerForQuestionAnswering, TFRoFormerForSequenceClassification, TFRoFormerForTokenClassification, TFRoFormerForMultipleChoice, ) if is_tf_available() else () ) SCREAMING_SNAKE_CASE = ( { 'feature-extraction': TFRoFormerModel, 'fill-mask': TFRoFormerForMaskedLM, 'question-answering': TFRoFormerForQuestionAnswering, 'text-classification': TFRoFormerForSequenceClassification, 'text-generation': TFRoFormerForCausalLM, 'token-classification': TFRoFormerForTokenClassification, 'zero-shot': TFRoFormerForSequenceClassification, } if is_tf_available() else {} ) SCREAMING_SNAKE_CASE = False SCREAMING_SNAKE_CASE = False def UpperCamelCase ( self , A__ , A__ , A__ , A__ , A__ ) -> str: if pipeline_test_casse_name == "TextGenerationPipelineTests": return True return False def UpperCamelCase ( self ) -> Union[str, Any]: _SCREAMING_SNAKE_CASE = TFRoFormerModelTester(self ) _SCREAMING_SNAKE_CASE = ConfigTester(self , config_class=A__ , hidden_size=37 ) def UpperCamelCase ( self ) -> Optional[Any]: self.config_tester.run_common_tests() def UpperCamelCase ( self ) -> List[Any]: _SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*A__ ) def UpperCamelCase ( self ) -> str: _SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_lm(*A__ ) def UpperCamelCase ( self ) -> int: _SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_lm_head(*A__ ) def UpperCamelCase ( self ) -> Dict: _SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_multiple_choice(*A__ ) def UpperCamelCase ( self ) -> Union[str, Any]: _SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_question_answering(*A__ ) def UpperCamelCase ( self ) -> Tuple: _SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_sequence_classification(*A__ ) def UpperCamelCase ( self ) -> List[Any]: _SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_token_classification(*A__ ) @slow def UpperCamelCase ( self ) -> str: _SCREAMING_SNAKE_CASE = TFRoFormerModel.from_pretrained("""junnyu/roformer_chinese_base""" ) self.assertIsNotNone(A__ ) @require_tf class _a (unittest.TestCase): """simple docstring""" @slow def UpperCamelCase ( self ) -> Optional[Any]: _SCREAMING_SNAKE_CASE = TFRoFormerForMaskedLM.from_pretrained("""junnyu/roformer_chinese_base""" ) _SCREAMING_SNAKE_CASE = tf.constant([[0, 1, 2, 3, 4, 5]] ) _SCREAMING_SNAKE_CASE = model(A__ )[0] # TODO Replace vocab size _SCREAMING_SNAKE_CASE = 5_00_00 _SCREAMING_SNAKE_CASE = [1, 6, vocab_size] self.assertEqual(output.shape , A__ ) print(output[:, :3, :3] ) # TODO Replace values below with what was printed above. _SCREAMING_SNAKE_CASE = tf.constant( [ [ [-0.1205_3341, -1.026_4901, 0.2922_1946], [-1.513_3783, 0.19_7433, 0.1519_0607], [-5.013_5403, -3.90_0256, -0.8403_8764], ] ] ) tf.debugging.assert_near(output[:, :3, :3] , A__ , atol=1E-4 ) @require_tf class _a (unittest.TestCase): """simple docstring""" SCREAMING_SNAKE_CASE = 1E-4 def UpperCamelCase ( self ) -> List[Any]: _SCREAMING_SNAKE_CASE = tf.constant([[4, 10]] ) _SCREAMING_SNAKE_CASE = TFRoFormerSinusoidalPositionalEmbedding(num_positions=6 , embedding_dim=6 ) _SCREAMING_SNAKE_CASE = emba(input_ids.shape ) _SCREAMING_SNAKE_CASE = tf.constant( [[0.0000, 0.0000, 0.0000, 1.0000, 1.0000, 1.0000], [0.8415, 0.0464, 0.0022, 0.5403, 0.9989, 1.0000]] ) tf.debugging.assert_near(A__ , A__ , atol=self.tolerance ) def UpperCamelCase ( self ) -> List[Any]: _SCREAMING_SNAKE_CASE = tf.constant( [ [0.0000, 0.0000, 0.0000, 0.0000, 0.0000], [0.8415, 0.8219, 0.8020, 0.7819, 0.7617], [0.9093, 0.9364, 0.9581, 0.9749, 0.9870], ] ) _SCREAMING_SNAKE_CASE = TFRoFormerSinusoidalPositionalEmbedding(num_positions=5_12 , embedding_dim=5_12 ) emba([2, 16, 5_12] ) _SCREAMING_SNAKE_CASE = emba.weight[:3, :5] tf.debugging.assert_near(A__ , A__ , atol=self.tolerance ) @require_tf class _a (unittest.TestCase): """simple docstring""" SCREAMING_SNAKE_CASE = 1E-4 def UpperCamelCase ( self ) -> int: # 2,12,16,64 _SCREAMING_SNAKE_CASE = tf.reshape(tf.range(2 * 12 * 16 * 64 , dtype=tf.floataa ) , shape=(2, 12, 16, 64) ) / 1_00 _SCREAMING_SNAKE_CASE = -tf.reshape(tf.range(2 * 12 * 16 * 64 , dtype=tf.floataa ) , shape=(2, 12, 16, 64) ) / 1_00 _SCREAMING_SNAKE_CASE = TFRoFormerSinusoidalPositionalEmbedding(num_positions=32 , embedding_dim=64 ) _SCREAMING_SNAKE_CASE = embed_positions([2, 16, 7_68] )[None, None, :, :] _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = TFRoFormerSelfAttention.apply_rotary_position_embeddings( A__ , A__ , A__ ) _SCREAMING_SNAKE_CASE = tf.constant( [ [0.0000, 0.0100, 0.0200, 0.0300, 0.0400, 0.0500, 0.0600, 0.0700], [-0.2012, 0.8897, 0.0263, 0.9401, 0.2074, 0.9463, 0.3481, 0.9343], [-1.7057, 0.6271, -1.2145, 1.3897, -0.6303, 1.7647, -0.1173, 1.8985], [-2.1731, -1.6397, -2.7358, 0.2854, -2.1840, 1.7183, -1.3018, 2.4871], [0.2717, -3.6173, -2.9206, -2.1988, -3.6638, 0.3858, -2.9155, 2.2980], [3.9859, -2.1580, -0.7984, -4.4904, -4.1181, -2.0252, -4.4782, 1.1253], ] ) _SCREAMING_SNAKE_CASE = tf.constant( [ [0.0000, -0.0100, -0.0200, -0.0300, -0.0400, -0.0500, -0.0600, -0.0700], [0.2012, -0.8897, -0.0263, -0.9401, -0.2074, -0.9463, -0.3481, -0.9343], [1.7057, -0.6271, 1.2145, -1.3897, 0.6303, -1.7647, 0.1173, -1.8985], [2.1731, 1.6397, 2.7358, -0.2854, 2.1840, -1.7183, 1.3018, -2.4871], [-0.2717, 3.6173, 2.9206, 2.1988, 3.6638, -0.3858, 2.9155, -2.2980], [-3.9859, 2.1580, 0.7984, 4.4904, 4.1181, 2.0252, 4.4782, -1.1253], ] ) tf.debugging.assert_near(query_layer[0, 0, :6, :8] , A__ , atol=self.tolerance ) tf.debugging.assert_near(key_layer[0, 0, :6, :8] , A__ , atol=self.tolerance )
0
0
'''simple docstring''' from __future__ import annotations from math import pi, sqrt def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> str: """simple docstring""" if inductance <= 0: raise ValueError("""Inductance cannot be 0 or negative""" ) elif capacitance <= 0: raise ValueError("""Capacitance cannot be 0 or negative""" ) else: return ( "Resonant frequency", float(1 / (2 * pi * (sqrt(inductance * capacitance ))) ), ) if __name__ == "__main__": import doctest doctest.testmod()
708
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available UpperCamelCase__ : int = {"tokenization_herbert": ["HerbertTokenizer"]} try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase__ : Tuple = ["HerbertTokenizerFast"] if TYPE_CHECKING: from .tokenization_herbert import HerbertTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_herbert_fast import HerbertTokenizerFast else: import sys UpperCamelCase__ : Tuple = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
0
0
'''simple docstring''' import inspect import os import sys import unittest import accelerate from accelerate.test_utils import execute_subprocess_async, require_tpu class _a (unittest.TestCase): """simple docstring""" def UpperCamelCase ( self ) -> Dict: _SCREAMING_SNAKE_CASE = inspect.getfile(accelerate.test_utils ) _SCREAMING_SNAKE_CASE = os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ["""scripts""", """test_script.py"""] ) _SCREAMING_SNAKE_CASE = os.path.sep.join(inspect.getfile(self.__class__ ).split(os.path.sep )[:-1] ) @require_tpu def UpperCamelCase ( self ) -> int: _SCREAMING_SNAKE_CASE = F"\n {self.test_dir}/xla_spawn.py\n --num_cores 8\n {self.test_file_path}\n ".split() _SCREAMING_SNAKE_CASE = [sys.executable] + distributed_args execute_subprocess_async(lowerCamelCase_ , env=os.environ.copy() )
709
'''simple docstring''' import argparse import gdown import numpy as np import torch from huggingface_hub import hf_hub_download from transformers import ( CLIPTokenizer, CLIPTokenizerFast, VideoMAEImageProcessor, XCLIPConfig, XCLIPModel, XCLIPProcessor, XCLIPTextConfig, XCLIPVisionConfig, ) def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> Dict: """simple docstring""" _SCREAMING_SNAKE_CASE = XCLIPTextConfig() # derive patch size from model name _SCREAMING_SNAKE_CASE = model_name.find("""patch""" ) _SCREAMING_SNAKE_CASE = int(model_name[start_idx + len("""patch""" ) : start_idx + len("""patch""" ) + 2] ) _SCREAMING_SNAKE_CASE = XCLIPVisionConfig(patch_size=SCREAMING_SNAKE_CASE_ , num_frames=SCREAMING_SNAKE_CASE_ ) if "large" in model_name: _SCREAMING_SNAKE_CASE = 7_68 _SCREAMING_SNAKE_CASE = 30_72 _SCREAMING_SNAKE_CASE = 12 _SCREAMING_SNAKE_CASE = 10_24 _SCREAMING_SNAKE_CASE = 40_96 _SCREAMING_SNAKE_CASE = 16 _SCREAMING_SNAKE_CASE = 24 _SCREAMING_SNAKE_CASE = 7_68 _SCREAMING_SNAKE_CASE = 30_72 if model_name == "xclip-large-patch14-16-frames": _SCREAMING_SNAKE_CASE = 3_36 _SCREAMING_SNAKE_CASE = XCLIPConfig.from_text_vision_configs(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) if "large" in model_name: _SCREAMING_SNAKE_CASE = 7_68 return config def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE_ ) -> Dict: """simple docstring""" # text encoder if name == "token_embedding.weight": _SCREAMING_SNAKE_CASE = name.replace("""token_embedding.weight""" , """text_model.embeddings.token_embedding.weight""" ) if name == "positional_embedding": _SCREAMING_SNAKE_CASE = name.replace("""positional_embedding""" , """text_model.embeddings.position_embedding.weight""" ) if "ln_1" in name: _SCREAMING_SNAKE_CASE = name.replace("""ln_1""" , """layer_norm1""" ) if "ln_2" in name: _SCREAMING_SNAKE_CASE = name.replace("""ln_2""" , """layer_norm2""" ) if "c_fc" in name: _SCREAMING_SNAKE_CASE = name.replace("""c_fc""" , """fc1""" ) if "c_proj" in name: _SCREAMING_SNAKE_CASE = name.replace("""c_proj""" , """fc2""" ) if name.startswith("""transformer.resblocks""" ): _SCREAMING_SNAKE_CASE = name.replace("""transformer.resblocks""" , """text_model.encoder.layers""" ) if "attn.out_proj" in name and "message" not in name: _SCREAMING_SNAKE_CASE = name.replace("""attn.out_proj""" , """self_attn.out_proj""" ) if "ln_final" in name: _SCREAMING_SNAKE_CASE = name.replace("""ln_final""" , """text_model.final_layer_norm""" ) # visual encoder if name == "visual.class_embedding": _SCREAMING_SNAKE_CASE = name.replace("""visual.class_embedding""" , """vision_model.embeddings.class_embedding""" ) if name == "visual.positional_embedding": _SCREAMING_SNAKE_CASE = name.replace("""visual.positional_embedding""" , """vision_model.embeddings.position_embedding.weight""" ) if name.startswith("""visual.transformer.resblocks""" ): _SCREAMING_SNAKE_CASE = name.replace("""visual.transformer.resblocks""" , """vision_model.encoder.layers""" ) if "visual.conv1" in name: _SCREAMING_SNAKE_CASE = name.replace("""visual.conv1""" , """vision_model.embeddings.patch_embedding""" ) if "visual.ln_pre" in name: _SCREAMING_SNAKE_CASE = name.replace("""visual.ln_pre""" , """vision_model.pre_layernorm""" ) if "visual.ln_post" in name: _SCREAMING_SNAKE_CASE = name.replace("""visual.ln_post""" , """vision_model.post_layernorm""" ) if "visual.proj" in name: _SCREAMING_SNAKE_CASE = name.replace("""visual.proj""" , """visual_projection.weight""" ) if "text_projection" in name: _SCREAMING_SNAKE_CASE = name.replace("""text_projection""" , """text_projection.weight""" ) # things on top if "prompts_visual_proj" in name: _SCREAMING_SNAKE_CASE = name.replace("""prompts_visual_proj""" , """prompts_visual_projection""" ) if "prompts_visual_ln" in name: _SCREAMING_SNAKE_CASE = name.replace("""prompts_visual_ln""" , """prompts_visual_layernorm""" ) # mit if name == "mit.positional_embedding": _SCREAMING_SNAKE_CASE = name.replace("""positional""" , """position""" ) if name.startswith("""mit.resblocks""" ): _SCREAMING_SNAKE_CASE = name.replace("""mit.resblocks""" , """mit.encoder.layers""" ) # prompts generator if name.startswith("""prompts_generator.norm""" ): _SCREAMING_SNAKE_CASE = name.replace("""prompts_generator.norm""" , """prompts_generator.layernorm""" ) return name def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> Optional[int]: """simple docstring""" for key in orig_state_dict.copy().keys(): _SCREAMING_SNAKE_CASE = orig_state_dict.pop(SCREAMING_SNAKE_CASE_ ) if "attn.in_proj" in key: _SCREAMING_SNAKE_CASE = key.split(""".""" ) if key.startswith("""visual""" ): _SCREAMING_SNAKE_CASE = key_split[3] _SCREAMING_SNAKE_CASE = config.vision_config.hidden_size if "message_attn" in key: if "weight" in key: _SCREAMING_SNAKE_CASE = val[ :dim, : ] _SCREAMING_SNAKE_CASE = val[ dim : dim * 2, : ] _SCREAMING_SNAKE_CASE = val[ -dim:, : ] else: _SCREAMING_SNAKE_CASE = val[ :dim ] _SCREAMING_SNAKE_CASE = val[ dim : dim * 2 ] _SCREAMING_SNAKE_CASE = val[ -dim: ] else: if "weight" in key: _SCREAMING_SNAKE_CASE = val[ :dim, : ] _SCREAMING_SNAKE_CASE = val[ dim : dim * 2, : ] _SCREAMING_SNAKE_CASE = val[ -dim:, : ] else: _SCREAMING_SNAKE_CASE = val[:dim] _SCREAMING_SNAKE_CASE = val[ dim : dim * 2 ] _SCREAMING_SNAKE_CASE = val[-dim:] elif key.startswith("""mit""" ): _SCREAMING_SNAKE_CASE = key_split[2] _SCREAMING_SNAKE_CASE = config.vision_config.mit_hidden_size if "weight" in key: _SCREAMING_SNAKE_CASE = val[:dim, :] _SCREAMING_SNAKE_CASE = val[dim : dim * 2, :] _SCREAMING_SNAKE_CASE = val[-dim:, :] else: _SCREAMING_SNAKE_CASE = val[:dim] _SCREAMING_SNAKE_CASE = val[dim : dim * 2] _SCREAMING_SNAKE_CASE = val[-dim:] else: _SCREAMING_SNAKE_CASE = key_split[2] _SCREAMING_SNAKE_CASE = config.text_config.hidden_size if "weight" in key: _SCREAMING_SNAKE_CASE = val[:dim, :] _SCREAMING_SNAKE_CASE = val[ dim : dim * 2, : ] _SCREAMING_SNAKE_CASE = val[-dim:, :] else: _SCREAMING_SNAKE_CASE = val[:dim] _SCREAMING_SNAKE_CASE = val[ dim : dim * 2 ] _SCREAMING_SNAKE_CASE = val[-dim:] else: _SCREAMING_SNAKE_CASE = rename_key(SCREAMING_SNAKE_CASE_ ) if new_key_name in ["visual_projection.weight", "text_projection.weight"]: _SCREAMING_SNAKE_CASE = val.T _SCREAMING_SNAKE_CASE = val return orig_state_dict def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE_ ) -> Optional[Any]: """simple docstring""" if num_frames == 8: _SCREAMING_SNAKE_CASE = """eating_spaghetti_8_frames.npy""" elif num_frames == 16: _SCREAMING_SNAKE_CASE = """eating_spaghetti.npy""" elif num_frames == 32: _SCREAMING_SNAKE_CASE = """eating_spaghetti_32_frames.npy""" _SCREAMING_SNAKE_CASE = hf_hub_download( repo_id="""hf-internal-testing/spaghetti-video""" , filename=SCREAMING_SNAKE_CASE_ , repo_type="""dataset""" , ) _SCREAMING_SNAKE_CASE = np.load(SCREAMING_SNAKE_CASE_ ) return list(SCREAMING_SNAKE_CASE_ ) def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=False ) -> Optional[int]: """simple docstring""" _SCREAMING_SNAKE_CASE = { # fully supervised kinetics-400 checkpoints """xclip-base-patch32""": """https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_32_8.pth""", """xclip-base-patch32-16-frames""": ( """https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_32_16.pth""" ), """xclip-base-patch16""": """https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_16_8.pth""", """xclip-base-patch16-16-frames""": ( """https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_16_16.pth""" ), """xclip-large-patch14""": """https://drive.google.com/u/0/uc?id=1NUOImq0o5DlQTST17iIP3vG7DgmHQuCx&amp;export=download&amp;confirm=t&amp;uuid=b26caedc-88e2-473e-830a-9d158b653cdb""", """xclip-large-patch14-16-frames""": """https://drive.google.com/u/0/uc?id=1FOYgnJc097OJ4lGwtRCCydQyVPJEOH7d&amp;export=download&amp;confirm=t&amp;uuid=538fa810-e671-4050-b385-9a623f89804f""", # fully supervised kinetics-600 checkpoints """xclip-base-patch16-kinetics-600""": ( """https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k600_16_8.pth""" ), """xclip-base-patch16-kinetics-600-16-frames""": ( """https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k600_16_16.pth""" ), """xclip-large-patch14-kinetics-600""": """https://drive.google.com/u/0/uc?id=1FV8C1INuM91sLAN4ImjzePLIlpMSihwV&amp;export=download&amp;confirm=t&amp;uuid=141d4977-4a65-44ae-864f-4b0c19f838be""", # few shot """xclip-base-patch16-hmdb-2-shot""": ( """https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_2.pth""" ), """xclip-base-patch16-hmdb-4-shot""": ( """https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_4.pth""" ), """xclip-base-patch16-hmdb-8-shot""": ( """https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_8.pth""" ), """xclip-base-patch16-hmdb-16-shot""": ( """https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_16.pth""" ), """xclip-base-patch16-ucf-2-shot""": ( """https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_2.pth""" ), """xclip-base-patch16-ucf-4-shot""": ( """https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_4.pth""" ), """xclip-base-patch16-ucf-8-shot""": ( """https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_8.pth""" ), """xclip-base-patch16-ucf-16-shot""": ( """https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_16.pth""" ), # zero shot """xclip-base-patch16-zero-shot""": """https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/zero.pth""", } _SCREAMING_SNAKE_CASE = model_to_url[model_name] _SCREAMING_SNAKE_CASE = 8 if "16-frames" in model_name: _SCREAMING_SNAKE_CASE = 16 elif "shot" in model_name: _SCREAMING_SNAKE_CASE = 32 _SCREAMING_SNAKE_CASE = get_xclip_config(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) _SCREAMING_SNAKE_CASE = XCLIPModel(SCREAMING_SNAKE_CASE_ ) model.eval() if "drive" in checkpoint_url: _SCREAMING_SNAKE_CASE = """pytorch_model.bin""" gdown.cached_download(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , quiet=SCREAMING_SNAKE_CASE_ ) _SCREAMING_SNAKE_CASE = torch.load(SCREAMING_SNAKE_CASE_ , map_location="""cpu""" )["""model"""] else: _SCREAMING_SNAKE_CASE = torch.hub.load_state_dict_from_url(SCREAMING_SNAKE_CASE_ )["""model"""] _SCREAMING_SNAKE_CASE = convert_state_dict(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) _SCREAMING_SNAKE_CASE = XCLIPModel(SCREAMING_SNAKE_CASE_ ) _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = model.load_state_dict(SCREAMING_SNAKE_CASE_ , strict=SCREAMING_SNAKE_CASE_ ) assert missing_keys == ["text_model.embeddings.position_ids", "vision_model.embeddings.position_ids"] model.eval() _SCREAMING_SNAKE_CASE = 3_36 if model_name == """xclip-large-patch14-16-frames""" else 2_24 _SCREAMING_SNAKE_CASE = VideoMAEImageProcessor(size=SCREAMING_SNAKE_CASE_ ) _SCREAMING_SNAKE_CASE = CLIPTokenizer.from_pretrained("""openai/clip-vit-base-patch32""" ) _SCREAMING_SNAKE_CASE = CLIPTokenizerFast.from_pretrained("""openai/clip-vit-base-patch32""" ) _SCREAMING_SNAKE_CASE = XCLIPProcessor(image_processor=SCREAMING_SNAKE_CASE_ , tokenizer=SCREAMING_SNAKE_CASE_ ) _SCREAMING_SNAKE_CASE = prepare_video(SCREAMING_SNAKE_CASE_ ) _SCREAMING_SNAKE_CASE = processor( text=["""playing sports""", """eating spaghetti""", """go shopping"""] , videos=SCREAMING_SNAKE_CASE_ , return_tensors="""pt""" , padding=SCREAMING_SNAKE_CASE_ ) print("""Shape of pixel values:""" , inputs.pixel_values.shape ) with torch.no_grad(): _SCREAMING_SNAKE_CASE = model(**SCREAMING_SNAKE_CASE_ ) # Verify outputs _SCREAMING_SNAKE_CASE = outputs.logits_per_video _SCREAMING_SNAKE_CASE = logits_per_video.softmax(dim=1 ) print("""Probs:""" , SCREAMING_SNAKE_CASE_ ) # kinetics-400 if model_name == "xclip-base-patch32": _SCREAMING_SNAKE_CASE = torch.tensor([[0.0019, 0.9951, 0.0030]] ) elif model_name == "xclip-base-patch32-16-frames": _SCREAMING_SNAKE_CASE = torch.tensor([[7.0999e-04, 9.9883e-01, 4.5580e-04]] ) elif model_name == "xclip-base-patch16": _SCREAMING_SNAKE_CASE = torch.tensor([[0.0083, 0.9681, 0.0236]] ) elif model_name == "xclip-base-patch16-16-frames": _SCREAMING_SNAKE_CASE = torch.tensor([[7.6937e-04, 9.9728e-01, 1.9473e-03]] ) elif model_name == "xclip-large-patch14": _SCREAMING_SNAKE_CASE = torch.tensor([[0.0062, 0.9864, 0.0075]] ) elif model_name == "xclip-large-patch14-16-frames": _SCREAMING_SNAKE_CASE = torch.tensor([[3.3877e-04, 9.9937e-01, 2.8888e-04]] ) # kinetics-600 elif model_name == "xclip-base-patch16-kinetics-600": _SCREAMING_SNAKE_CASE = torch.tensor([[0.0555, 0.8914, 0.0531]] ) elif model_name == "xclip-base-patch16-kinetics-600-16-frames": _SCREAMING_SNAKE_CASE = torch.tensor([[3.8554e-04, 9.9929e-01, 3.2754e-04]] ) elif model_name == "xclip-large-patch14-kinetics-600": _SCREAMING_SNAKE_CASE = torch.tensor([[0.0036, 0.9920, 0.0045]] ) # few shot elif model_name == "xclip-base-patch16-hmdb-2-shot": _SCREAMING_SNAKE_CASE = torch.tensor([[7.1890e-06, 9.9994e-01, 5.6559e-05]] ) elif model_name == "xclip-base-patch16-hmdb-4-shot": _SCREAMING_SNAKE_CASE = torch.tensor([[1.0320e-05, 9.9993e-01, 6.2435e-05]] ) elif model_name == "xclip-base-patch16-hmdb-8-shot": _SCREAMING_SNAKE_CASE = torch.tensor([[4.1377e-06, 9.9990e-01, 9.8386e-05]] ) elif model_name == "xclip-base-patch16-hmdb-16-shot": _SCREAMING_SNAKE_CASE = torch.tensor([[4.1347e-05, 9.9962e-01, 3.3411e-04]] ) elif model_name == "xclip-base-patch16-ucf-2-shot": _SCREAMING_SNAKE_CASE = torch.tensor([[8.5857e-05, 9.9928e-01, 6.3291e-04]] ) elif model_name == "xclip-base-patch16-ucf-4-shot": _SCREAMING_SNAKE_CASE = torch.tensor([[8.5857e-05, 9.9928e-01, 6.3291e-04]] ) elif model_name == "xclip-base-patch16-ucf-8-shot": _SCREAMING_SNAKE_CASE = torch.tensor([[0.0027, 0.9904, 0.0070]] ) elif model_name == "xclip-base-patch16-ucf-16-shot": _SCREAMING_SNAKE_CASE = torch.tensor([[9.8219e-04, 9.9593e-01, 3.0863e-03]] ) # zero shot elif model_name == "xclip-base-patch16-zero-shot": _SCREAMING_SNAKE_CASE = torch.tensor([[3.5082e-04, 9.9785e-01, 1.7966e-03]] ) else: raise ValueError(F"Model name {model_name} not supported" ) assert torch.allclose(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , atol=1e-3 ) print("""Looks ok!""" ) if pytorch_dump_folder_path is not None: print(F"Saving model {model_name} to {pytorch_dump_folder_path}" ) model.save_pretrained(SCREAMING_SNAKE_CASE_ ) if push_to_hub: print("""Pushing model, processor and slow tokenizer files to the hub...""" ) model.push_to_hub(SCREAMING_SNAKE_CASE_ , organization="""nielsr""" ) processor.push_to_hub(SCREAMING_SNAKE_CASE_ , organization="""nielsr""" ) slow_tokenizer.push_to_hub(SCREAMING_SNAKE_CASE_ , organization="""nielsr""" ) if __name__ == "__main__": UpperCamelCase__ : Union[str, Any] = argparse.ArgumentParser() # Required parameters parser.add_argument( "--model_name", default="xclip-base-patch32", type=str, help="Name of the model.", ) parser.add_argument( "--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory." ) parser.add_argument( "--push_to_hub", action="store_true", help="Whether or not to push the converted model to the 🤗 hub." ) UpperCamelCase__ : str = parser.parse_args() convert_xclip_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
0
0
'''simple docstring''' import requests from bsa import BeautifulSoup def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> List[str]: """simple docstring""" _SCREAMING_SNAKE_CASE = BeautifulSoup(requests.get(SCREAMING_SNAKE_CASE_ , params=SCREAMING_SNAKE_CASE_ ).content , """html.parser""" ) _SCREAMING_SNAKE_CASE = soup.find("""div""" , attrs={"""class""": """gs_ri"""} ) _SCREAMING_SNAKE_CASE = div.find("""div""" , attrs={"""class""": """gs_fl"""} ).find_all("""a""" ) return anchors[2].get_text() if __name__ == "__main__": UpperCamelCase__ = { "title": ( "Precisely geometry controlled microsupercapacitors for ultrahigh areal " "capacitance, volumetric capacitance, and energy density" ), "journal": "Chem. Mater.", "volume": 30, "pages": "3979-3990", "year": 2_018, "hl": "en", } print(get_citation("https://scholar.google.com/scholar_lookup", params=params))
710
'''simple docstring''' import numpy as np import torch from torch.utils.data import Dataset from utils import logger class _a (_lowerCamelCase): """simple docstring""" def __init__( self , A__ , A__ ) -> Any: _SCREAMING_SNAKE_CASE = params _SCREAMING_SNAKE_CASE = np.array(A__ ) _SCREAMING_SNAKE_CASE = np.array([len(A__ ) for t in data] ) self.check() self.remove_long_sequences() self.remove_empty_sequences() self.remove_unknown_sequences() self.check() self.print_statistics() def __getitem__( self , A__ ) -> Dict: return (self.token_ids[index], self.lengths[index]) def __len__( self ) -> Tuple: return len(self.lengths ) def UpperCamelCase ( self ) -> Dict: assert len(self.token_ids ) == len(self.lengths ) assert all(self.lengths[i] == len(self.token_ids[i] ) for i in range(len(self.lengths ) ) ) def UpperCamelCase ( self ) -> List[str]: _SCREAMING_SNAKE_CASE = self.params.max_model_input_size _SCREAMING_SNAKE_CASE = self.lengths > max_len logger.info(F"Splitting {sum(A__ )} too long sequences." ) def divide_chunks(A__ , A__ ): return [l[i : i + n] for i in range(0 , len(A__ ) , A__ )] _SCREAMING_SNAKE_CASE = [] _SCREAMING_SNAKE_CASE = [] if self.params.mlm: _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = self.params.special_tok_ids["""cls_token"""], self.params.special_tok_ids["""sep_token"""] else: _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = self.params.special_tok_ids["""bos_token"""], self.params.special_tok_ids["""eos_token"""] for seq_, len_ in zip(self.token_ids , self.lengths ): assert (seq_[0] == cls_id) and (seq_[-1] == sep_id), seq_ if len_ <= max_len: new_tok_ids.append(seq_ ) new_lengths.append(len_ ) else: _SCREAMING_SNAKE_CASE = [] for sub_s in divide_chunks(seq_ , max_len - 2 ): if sub_s[0] != cls_id: _SCREAMING_SNAKE_CASE = np.insert(A__ , 0 , A__ ) if sub_s[-1] != sep_id: _SCREAMING_SNAKE_CASE = np.insert(A__ , len(A__ ) , A__ ) assert len(A__ ) <= max_len assert (sub_s[0] == cls_id) and (sub_s[-1] == sep_id), sub_s sub_seqs.append(A__ ) new_tok_ids.extend(A__ ) new_lengths.extend([len(A__ ) for l in sub_seqs] ) _SCREAMING_SNAKE_CASE = np.array(A__ ) _SCREAMING_SNAKE_CASE = np.array(A__ ) def UpperCamelCase ( self ) -> List[str]: _SCREAMING_SNAKE_CASE = len(self ) _SCREAMING_SNAKE_CASE = self.lengths > 11 _SCREAMING_SNAKE_CASE = self.token_ids[indices] _SCREAMING_SNAKE_CASE = self.lengths[indices] _SCREAMING_SNAKE_CASE = len(self ) logger.info(F"Remove {init_size - new_size} too short (<=11 tokens) sequences." ) def UpperCamelCase ( self ) -> int: if "unk_token" not in self.params.special_tok_ids: return else: _SCREAMING_SNAKE_CASE = self.params.special_tok_ids["""unk_token"""] _SCREAMING_SNAKE_CASE = len(self ) _SCREAMING_SNAKE_CASE = np.array([np.count_nonzero(a == unk_token_id ) for a in self.token_ids] ) _SCREAMING_SNAKE_CASE = (unk_occs / self.lengths) < 0.5 _SCREAMING_SNAKE_CASE = self.token_ids[indices] _SCREAMING_SNAKE_CASE = self.lengths[indices] _SCREAMING_SNAKE_CASE = len(self ) logger.info(F"Remove {init_size - new_size} sequences with a high level of unknown tokens (50%)." ) def UpperCamelCase ( self ) -> Optional[Any]: if not self.params.is_master: return logger.info(F"{len(self )} sequences" ) # data_len = sum(self.lengths) # nb_unique_tokens = len(Counter(list(chain(*self.token_ids)))) # logger.info(f'{data_len} tokens ({nb_unique_tokens} unique)') # unk_idx = self.params.special_tok_ids['unk_token'] # nb_unknown = sum([(t==unk_idx).sum() for t in self.token_ids]) # logger.info(f'{nb_unknown} unknown tokens (covering {100*nb_unknown/data_len:.2f}% of the data)') def UpperCamelCase ( self , A__ ) -> Any: _SCREAMING_SNAKE_CASE = [t[0] for t in batch] _SCREAMING_SNAKE_CASE = [t[1] for t in batch] assert len(A__ ) == len(A__ ) # Max for paddings _SCREAMING_SNAKE_CASE = max(A__ ) # Pad token ids if self.params.mlm: _SCREAMING_SNAKE_CASE = self.params.special_tok_ids["""pad_token"""] else: _SCREAMING_SNAKE_CASE = self.params.special_tok_ids["""unk_token"""] _SCREAMING_SNAKE_CASE = [list(t.astype(A__ ) ) + [pad_idx] * (max_seq_len_ - len(A__ )) for t in token_ids] assert len(tk_ ) == len(A__ ) assert all(len(A__ ) == max_seq_len_ for t in tk_ ) _SCREAMING_SNAKE_CASE = torch.tensor(tk_ ) # (bs, max_seq_len_) _SCREAMING_SNAKE_CASE = torch.tensor(A__ ) # (bs) return tk_t, lg_t
0
0
'''simple docstring''' import random import unittest import torch from diffusers import IFInpaintingPipeline from diffusers.utils import floats_tensor from diffusers.utils.import_utils import is_xformers_available from diffusers.utils.testing_utils import skip_mps, torch_device from ..pipeline_params import ( TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS, TEXT_GUIDED_IMAGE_INPAINTING_PARAMS, ) from ..test_pipelines_common import PipelineTesterMixin from . import IFPipelineTesterMixin @skip_mps class _a (__UpperCAmelCase , __UpperCAmelCase , unittest.TestCase): """simple docstring""" SCREAMING_SNAKE_CASE = IFInpaintingPipeline SCREAMING_SNAKE_CASE = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS - {'width', 'height'} SCREAMING_SNAKE_CASE = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS SCREAMING_SNAKE_CASE = PipelineTesterMixin.required_optional_params - {'latents'} def UpperCamelCase ( self ) -> Optional[Any]: return self._get_dummy_components() def UpperCamelCase ( self , A__ , A__=0 ) -> List[Any]: if str(lowerCAmelCase_ ).startswith("""mps""" ): _SCREAMING_SNAKE_CASE = torch.manual_seed(lowerCAmelCase_ ) else: _SCREAMING_SNAKE_CASE = torch.Generator(device=lowerCAmelCase_ ).manual_seed(lowerCAmelCase_ ) _SCREAMING_SNAKE_CASE = floats_tensor((1, 3, 32, 32) , rng=random.Random(lowerCAmelCase_ ) ).to(lowerCAmelCase_ ) _SCREAMING_SNAKE_CASE = floats_tensor((1, 3, 32, 32) , rng=random.Random(lowerCAmelCase_ ) ).to(lowerCAmelCase_ ) _SCREAMING_SNAKE_CASE = { """prompt""": """A painting of a squirrel eating a burger""", """image""": image, """mask_image""": mask_image, """generator""": generator, """num_inference_steps""": 2, """output_type""": """numpy""", } return inputs @unittest.skipIf( torch_device != """cuda""" or not is_xformers_available() , reason="""XFormers attention is only available with CUDA and `xformers` installed""" , ) def UpperCamelCase ( self ) -> str: self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1E-3 ) def UpperCamelCase ( self ) -> int: self._test_save_load_optional_components() @unittest.skipIf(torch_device != """cuda""" , reason="""float16 requires CUDA""" ) def UpperCamelCase ( self ) -> List[str]: super().test_save_load_floataa(expected_max_diff=1E-1 ) def UpperCamelCase ( self ) -> List[Any]: self._test_attention_slicing_forward_pass(expected_max_diff=1E-2 ) def UpperCamelCase ( self ) -> Union[str, Any]: self._test_save_load_local() def UpperCamelCase ( self ) -> List[Any]: self._test_inference_batch_single_identical( expected_max_diff=1E-2 , )
711
'''simple docstring''' import os from shutil import copyfile from typing import Any, Dict, List, Optional, Tuple import sentencepiece as spm from ...tokenization_utils import PreTrainedTokenizer from ...utils import logging UpperCamelCase__ : List[Any] = logging.get_logger(__name__) UpperCamelCase__ : Any = "▁" UpperCamelCase__ : Any = {"vocab_file": "spiece.model"} UpperCamelCase__ : int = { "vocab_file": { "google/reformer-crime-and-punishment": ( "https://huggingface.co/google/reformer-crime-and-punishment/resolve/main/spiece.model" ) } } UpperCamelCase__ : Optional[int] = { "google/reformer-crime-and-punishment": 524_288, } class _a (_lowerCamelCase): """simple docstring""" SCREAMING_SNAKE_CASE = VOCAB_FILES_NAMES SCREAMING_SNAKE_CASE = PRETRAINED_VOCAB_FILES_MAP SCREAMING_SNAKE_CASE = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES SCREAMING_SNAKE_CASE = ['input_ids', 'attention_mask'] def __init__( self , A__ , A__="</s>" , A__="<unk>" , A__=[] , A__ = None , **A__ , ) -> None: _SCREAMING_SNAKE_CASE = {} if sp_model_kwargs is None else sp_model_kwargs super().__init__( eos_token=A__ , unk_token=A__ , additional_special_tokens=A__ , sp_model_kwargs=self.sp_model_kwargs , **A__ , ) _SCREAMING_SNAKE_CASE = vocab_file _SCREAMING_SNAKE_CASE = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(A__ ) @property def UpperCamelCase ( self ) -> Any: return self.sp_model.get_piece_size() def UpperCamelCase ( self ) -> Dict[str, int]: _SCREAMING_SNAKE_CASE = {self.convert_ids_to_tokens(A__ ): i for i in range(self.vocab_size )} vocab.update(self.added_tokens_encoder ) return vocab def __getstate__( self ) -> int: _SCREAMING_SNAKE_CASE = self.__dict__.copy() _SCREAMING_SNAKE_CASE = None return state def __setstate__( self , A__ ) -> str: _SCREAMING_SNAKE_CASE = d # for backward compatibility if not hasattr(self , """sp_model_kwargs""" ): _SCREAMING_SNAKE_CASE = {} _SCREAMING_SNAKE_CASE = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(self.vocab_file ) def UpperCamelCase ( self , A__ ) -> List[str]: return self.sp_model.encode(A__ , out_type=A__ ) def UpperCamelCase ( self , A__ ) -> Union[str, Any]: return self.sp_model.piece_to_id(A__ ) def UpperCamelCase ( self , A__ ) -> List[Any]: if index < self.sp_model.get_piece_size(): _SCREAMING_SNAKE_CASE = self.sp_model.IdToPiece(A__ ) return token def UpperCamelCase ( self , A__ ) -> str: _SCREAMING_SNAKE_CASE = [] _SCREAMING_SNAKE_CASE = """""" for token in tokens: # make sure that special tokens are not decoded using sentencepiece model if token in self.all_special_tokens: out_string += self.sp_model.decode(A__ ) + token _SCREAMING_SNAKE_CASE = [] else: current_sub_tokens.append(A__ ) out_string += self.sp_model.decode(A__ ) return out_string.strip() def UpperCamelCase ( self , A__ , A__ = None ) -> Tuple[str]: if not os.path.isdir(A__ ): logger.error(F"Vocabulary path ({save_directory}) should be a directory" ) return _SCREAMING_SNAKE_CASE = os.path.join( A__ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(A__ ) and os.path.isfile(self.vocab_file ): copyfile(self.vocab_file , A__ ) elif not os.path.isfile(self.vocab_file ): with open(A__ , """wb""" ) as fi: _SCREAMING_SNAKE_CASE = self.sp_model.serialized_model_proto() fi.write(A__ ) return (out_vocab_file,)
0
0
'''simple docstring''' def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> int: """simple docstring""" def update_area_of_max_square(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> int: # BASE CASE if row >= rows or col >= cols: return 0 _SCREAMING_SNAKE_CASE = update_area_of_max_square(a_ , col + 1 ) _SCREAMING_SNAKE_CASE = update_area_of_max_square(row + 1 , col + 1 ) _SCREAMING_SNAKE_CASE = update_area_of_max_square(row + 1 , a_ ) if mat[row][col]: _SCREAMING_SNAKE_CASE = 1 + min([right, diagonal, down] ) _SCREAMING_SNAKE_CASE = max(largest_square_area[0] , a_ ) return sub_problem_sol else: return 0 _SCREAMING_SNAKE_CASE = [0] update_area_of_max_square(0 , 0 ) return largest_square_area[0] def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> int: """simple docstring""" def update_area_of_max_square_using_dp_array( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> int: if row >= rows or col >= cols: return 0 if dp_array[row][col] != -1: return dp_array[row][col] _SCREAMING_SNAKE_CASE = update_area_of_max_square_using_dp_array(a_ , col + 1 , a_ ) _SCREAMING_SNAKE_CASE = update_area_of_max_square_using_dp_array(row + 1 , col + 1 , a_ ) _SCREAMING_SNAKE_CASE = update_area_of_max_square_using_dp_array(row + 1 , a_ , a_ ) if mat[row][col]: _SCREAMING_SNAKE_CASE = 1 + min([right, diagonal, down] ) _SCREAMING_SNAKE_CASE = max(largest_square_area[0] , a_ ) _SCREAMING_SNAKE_CASE = sub_problem_sol return sub_problem_sol else: return 0 _SCREAMING_SNAKE_CASE = [0] _SCREAMING_SNAKE_CASE = [[-1] * cols for _ in range(a_ )] update_area_of_max_square_using_dp_array(0 , 0 , a_ ) return largest_square_area[0] def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> int: """simple docstring""" _SCREAMING_SNAKE_CASE = [[0] * (cols + 1) for _ in range(rows + 1 )] _SCREAMING_SNAKE_CASE = 0 for row in range(rows - 1 , -1 , -1 ): for col in range(cols - 1 , -1 , -1 ): _SCREAMING_SNAKE_CASE = dp_array[row][col + 1] _SCREAMING_SNAKE_CASE = dp_array[row + 1][col + 1] _SCREAMING_SNAKE_CASE = dp_array[row + 1][col] if mat[row][col] == 1: _SCREAMING_SNAKE_CASE = 1 + min(a_ , a_ , a_ ) _SCREAMING_SNAKE_CASE = max(dp_array[row][col] , a_ ) else: _SCREAMING_SNAKE_CASE = 0 return largest_square_area def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> int: """simple docstring""" _SCREAMING_SNAKE_CASE = [0] * (cols + 1) _SCREAMING_SNAKE_CASE = [0] * (cols + 1) _SCREAMING_SNAKE_CASE = 0 for row in range(rows - 1 , -1 , -1 ): for col in range(cols - 1 , -1 , -1 ): _SCREAMING_SNAKE_CASE = current_row[col + 1] _SCREAMING_SNAKE_CASE = next_row[col + 1] _SCREAMING_SNAKE_CASE = next_row[col] if mat[row][col] == 1: _SCREAMING_SNAKE_CASE = 1 + min(a_ , a_ , a_ ) _SCREAMING_SNAKE_CASE = max(current_row[col] , a_ ) else: _SCREAMING_SNAKE_CASE = 0 _SCREAMING_SNAKE_CASE = current_row return largest_square_area if __name__ == "__main__": import doctest doctest.testmod() print(largest_square_area_in_matrix_bottom_up(2, 2, [[1, 1], [1, 1]]))
712
'''simple docstring''' import os import unittest from transformers import MobileBertTokenizer, MobileBertTokenizerFast from transformers.models.bert.tokenization_bert import ( VOCAB_FILES_NAMES, BasicTokenizer, WordpieceTokenizer, _is_control, _is_punctuation, _is_whitespace, ) from transformers.testing_utils import require_tokenizers, slow from ...test_tokenization_common import TokenizerTesterMixin, filter_non_english @require_tokenizers class _a (_lowerCamelCase , unittest.TestCase): """simple docstring""" SCREAMING_SNAKE_CASE = MobileBertTokenizer SCREAMING_SNAKE_CASE = MobileBertTokenizerFast SCREAMING_SNAKE_CASE = True SCREAMING_SNAKE_CASE = True SCREAMING_SNAKE_CASE = filter_non_english SCREAMING_SNAKE_CASE = 'google/mobilebert-uncased' def UpperCamelCase ( self ) -> Any: super().setUp() _SCREAMING_SNAKE_CASE = [ """[UNK]""", """[CLS]""", """[SEP]""", """[PAD]""", """[MASK]""", """want""", """##want""", """##ed""", """wa""", """un""", """runn""", """##ing""", """,""", """low""", """lowest""", ] _SCREAMING_SNAKE_CASE = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] ) with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as vocab_writer: vocab_writer.write("""""".join([x + """\n""" for x in vocab_tokens] ) ) _SCREAMING_SNAKE_CASE = [ (tokenizer_def[0], self.pre_trained_model_path, tokenizer_def[2]) # else the 'google/' prefix is stripped for tokenizer_def in self.tokenizers_list ] def UpperCamelCase ( self , A__ ) -> List[str]: _SCREAMING_SNAKE_CASE = """UNwant\u00E9d,running""" _SCREAMING_SNAKE_CASE = """unwanted, running""" return input_text, output_text def UpperCamelCase ( self ) -> Any: _SCREAMING_SNAKE_CASE = self.tokenizer_class(self.vocab_file ) _SCREAMING_SNAKE_CASE = tokenizer.tokenize("""UNwant\u00E9d,running""" ) self.assertListEqual(A__ , ["""un""", """##want""", """##ed""", """,""", """runn""", """##ing"""] ) self.assertListEqual(tokenizer.convert_tokens_to_ids(A__ ) , [9, 6, 7, 12, 10, 11] ) def UpperCamelCase ( self ) -> Optional[int]: if not self.test_rust_tokenizer: return _SCREAMING_SNAKE_CASE = self.get_tokenizer() _SCREAMING_SNAKE_CASE = self.get_rust_tokenizer() _SCREAMING_SNAKE_CASE = """UNwant\u00E9d,running""" _SCREAMING_SNAKE_CASE = tokenizer.tokenize(A__ ) _SCREAMING_SNAKE_CASE = rust_tokenizer.tokenize(A__ ) self.assertListEqual(A__ , A__ ) _SCREAMING_SNAKE_CASE = tokenizer.encode(A__ , add_special_tokens=A__ ) _SCREAMING_SNAKE_CASE = rust_tokenizer.encode(A__ , add_special_tokens=A__ ) self.assertListEqual(A__ , A__ ) _SCREAMING_SNAKE_CASE = self.get_rust_tokenizer() _SCREAMING_SNAKE_CASE = tokenizer.encode(A__ ) _SCREAMING_SNAKE_CASE = rust_tokenizer.encode(A__ ) self.assertListEqual(A__ , A__ ) # With lower casing _SCREAMING_SNAKE_CASE = self.get_tokenizer(do_lower_case=A__ ) _SCREAMING_SNAKE_CASE = self.get_rust_tokenizer(do_lower_case=A__ ) _SCREAMING_SNAKE_CASE = """UNwant\u00E9d,running""" _SCREAMING_SNAKE_CASE = tokenizer.tokenize(A__ ) _SCREAMING_SNAKE_CASE = rust_tokenizer.tokenize(A__ ) self.assertListEqual(A__ , A__ ) _SCREAMING_SNAKE_CASE = tokenizer.encode(A__ , add_special_tokens=A__ ) _SCREAMING_SNAKE_CASE = rust_tokenizer.encode(A__ , add_special_tokens=A__ ) self.assertListEqual(A__ , A__ ) _SCREAMING_SNAKE_CASE = self.get_rust_tokenizer() _SCREAMING_SNAKE_CASE = tokenizer.encode(A__ ) _SCREAMING_SNAKE_CASE = rust_tokenizer.encode(A__ ) self.assertListEqual(A__ , A__ ) def UpperCamelCase ( self ) -> Tuple: _SCREAMING_SNAKE_CASE = BasicTokenizer() self.assertListEqual(tokenizer.tokenize("""ah\u535A\u63A8zz""" ) , ["""ah""", """\u535A""", """\u63A8""", """zz"""] ) def UpperCamelCase ( self ) -> List[Any]: _SCREAMING_SNAKE_CASE = BasicTokenizer(do_lower_case=A__ ) self.assertListEqual( tokenizer.tokenize(""" \tHeLLo!how \n Are yoU? """ ) , ["""hello""", """!""", """how""", """are""", """you""", """?"""] ) self.assertListEqual(tokenizer.tokenize("""H\u00E9llo""" ) , ["""hello"""] ) def UpperCamelCase ( self ) -> Optional[Any]: _SCREAMING_SNAKE_CASE = BasicTokenizer(do_lower_case=A__ , strip_accents=A__ ) self.assertListEqual( tokenizer.tokenize(""" \tHäLLo!how \n Are yoU? """ ) , ["""hällo""", """!""", """how""", """are""", """you""", """?"""] ) self.assertListEqual(tokenizer.tokenize("""H\u00E9llo""" ) , ["""h\u00E9llo"""] ) def UpperCamelCase ( self ) -> Any: _SCREAMING_SNAKE_CASE = BasicTokenizer(do_lower_case=A__ , strip_accents=A__ ) self.assertListEqual( tokenizer.tokenize(""" \tHäLLo!how \n Are yoU? """ ) , ["""hallo""", """!""", """how""", """are""", """you""", """?"""] ) self.assertListEqual(tokenizer.tokenize("""H\u00E9llo""" ) , ["""hello"""] ) def UpperCamelCase ( self ) -> Any: _SCREAMING_SNAKE_CASE = BasicTokenizer(do_lower_case=A__ ) self.assertListEqual( tokenizer.tokenize(""" \tHäLLo!how \n Are yoU? """ ) , ["""hallo""", """!""", """how""", """are""", """you""", """?"""] ) self.assertListEqual(tokenizer.tokenize("""H\u00E9llo""" ) , ["""hello"""] ) def UpperCamelCase ( self ) -> str: _SCREAMING_SNAKE_CASE = BasicTokenizer(do_lower_case=A__ ) self.assertListEqual( tokenizer.tokenize(""" \tHeLLo!how \n Are yoU? """ ) , ["""HeLLo""", """!""", """how""", """Are""", """yoU""", """?"""] ) def UpperCamelCase ( self ) -> Dict: _SCREAMING_SNAKE_CASE = BasicTokenizer(do_lower_case=A__ , strip_accents=A__ ) self.assertListEqual( tokenizer.tokenize(""" \tHäLLo!how \n Are yoU? """ ) , ["""HäLLo""", """!""", """how""", """Are""", """yoU""", """?"""] ) def UpperCamelCase ( self ) -> Union[str, Any]: _SCREAMING_SNAKE_CASE = BasicTokenizer(do_lower_case=A__ , strip_accents=A__ ) self.assertListEqual( tokenizer.tokenize(""" \tHäLLo!how \n Are yoU? """ ) , ["""HaLLo""", """!""", """how""", """Are""", """yoU""", """?"""] ) def UpperCamelCase ( self ) -> str: _SCREAMING_SNAKE_CASE = BasicTokenizer(do_lower_case=A__ , never_split=["""[UNK]"""] ) self.assertListEqual( tokenizer.tokenize(""" \tHeLLo!how \n Are yoU? [UNK]""" ) , ["""HeLLo""", """!""", """how""", """Are""", """yoU""", """?""", """[UNK]"""] ) def UpperCamelCase ( self ) -> Tuple: _SCREAMING_SNAKE_CASE = ["""[UNK]""", """[CLS]""", """[SEP]""", """want""", """##want""", """##ed""", """wa""", """un""", """runn""", """##ing"""] _SCREAMING_SNAKE_CASE = {} for i, token in enumerate(A__ ): _SCREAMING_SNAKE_CASE = i _SCREAMING_SNAKE_CASE = WordpieceTokenizer(vocab=A__ , unk_token="""[UNK]""" ) self.assertListEqual(tokenizer.tokenize("""""" ) , [] ) self.assertListEqual(tokenizer.tokenize("""unwanted running""" ) , ["""un""", """##want""", """##ed""", """runn""", """##ing"""] ) self.assertListEqual(tokenizer.tokenize("""unwantedX running""" ) , ["""[UNK]""", """runn""", """##ing"""] ) def UpperCamelCase ( self ) -> str: self.assertTrue(_is_whitespace(""" """ ) ) self.assertTrue(_is_whitespace("""\t""" ) ) self.assertTrue(_is_whitespace("""\r""" ) ) self.assertTrue(_is_whitespace("""\n""" ) ) self.assertTrue(_is_whitespace("""\u00A0""" ) ) self.assertFalse(_is_whitespace("""A""" ) ) self.assertFalse(_is_whitespace("""-""" ) ) def UpperCamelCase ( self ) -> Union[str, Any]: self.assertTrue(_is_control("""\u0005""" ) ) self.assertFalse(_is_control("""A""" ) ) self.assertFalse(_is_control(""" """ ) ) self.assertFalse(_is_control("""\t""" ) ) self.assertFalse(_is_control("""\r""" ) ) def UpperCamelCase ( self ) -> Dict: self.assertTrue(_is_punctuation("""-""" ) ) self.assertTrue(_is_punctuation("""$""" ) ) self.assertTrue(_is_punctuation("""`""" ) ) self.assertTrue(_is_punctuation(""".""" ) ) self.assertFalse(_is_punctuation("""A""" ) ) self.assertFalse(_is_punctuation(""" """ ) ) def UpperCamelCase ( self ) -> str: _SCREAMING_SNAKE_CASE = self.get_tokenizer() _SCREAMING_SNAKE_CASE = self.get_rust_tokenizer() # Example taken from the issue https://github.com/huggingface/tokenizers/issues/340 self.assertListEqual([tokenizer.tokenize(A__ ) for t in ["""Test""", """\xad""", """test"""]] , [["""[UNK]"""], [], ["""[UNK]"""]] ) self.assertListEqual( [rust_tokenizer.tokenize(A__ ) for t in ["""Test""", """\xad""", """test"""]] , [["""[UNK]"""], [], ["""[UNK]"""]] ) @slow def UpperCamelCase ( self ) -> Any: _SCREAMING_SNAKE_CASE = self.tokenizer_class.from_pretrained("""google/mobilebert-uncased""" ) _SCREAMING_SNAKE_CASE = tokenizer.encode("""sequence builders""" , add_special_tokens=A__ ) _SCREAMING_SNAKE_CASE = tokenizer.encode("""multi-sequence build""" , add_special_tokens=A__ ) _SCREAMING_SNAKE_CASE = tokenizer.build_inputs_with_special_tokens(A__ ) _SCREAMING_SNAKE_CASE = tokenizer.build_inputs_with_special_tokens(A__ , A__ ) assert encoded_sentence == [1_01] + text + [1_02] assert encoded_pair == [1_01] + text + [1_02] + text_a + [1_02] def UpperCamelCase ( self ) -> List[str]: for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(F"{tokenizer.__class__.__name__} ({pretrained_name})" ): _SCREAMING_SNAKE_CASE = self.rust_tokenizer_class.from_pretrained(A__ , **A__ ) _SCREAMING_SNAKE_CASE = F"A, naïve {tokenizer_r.mask_token} AllenNLP sentence." _SCREAMING_SNAKE_CASE = tokenizer_r.encode_plus( A__ , return_attention_mask=A__ , return_token_type_ids=A__ , return_offsets_mapping=A__ , add_special_tokens=A__ , ) _SCREAMING_SNAKE_CASE = tokenizer_r.do_lower_case if hasattr(A__ , """do_lower_case""" ) else False _SCREAMING_SNAKE_CASE = ( [ ((0, 0), tokenizer_r.cls_token), ((0, 1), """A"""), ((1, 2), ""","""), ((3, 5), """na"""), ((5, 6), """##ï"""), ((6, 8), """##ve"""), ((9, 15), tokenizer_r.mask_token), ((16, 21), """Allen"""), ((21, 23), """##NL"""), ((23, 24), """##P"""), ((25, 33), """sentence"""), ((33, 34), """."""), ((0, 0), tokenizer_r.sep_token), ] if not do_lower_case else [ ((0, 0), tokenizer_r.cls_token), ((0, 1), """a"""), ((1, 2), ""","""), ((3, 8), """naive"""), ((9, 15), tokenizer_r.mask_token), ((16, 21), """allen"""), ((21, 23), """##nl"""), ((23, 24), """##p"""), ((25, 33), """sentence"""), ((33, 34), """."""), ((0, 0), tokenizer_r.sep_token), ] ) self.assertEqual( [e[1] for e in expected_results] , tokenizer_r.convert_ids_to_tokens(tokens["""input_ids"""] ) ) self.assertEqual([e[0] for e in expected_results] , tokens["""offset_mapping"""] ) def UpperCamelCase ( self ) -> Any: _SCREAMING_SNAKE_CASE = ["""的""", """人""", """有"""] _SCREAMING_SNAKE_CASE = """""".join(A__ ) for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(F"{tokenizer.__class__.__name__} ({pretrained_name})" ): _SCREAMING_SNAKE_CASE = True _SCREAMING_SNAKE_CASE = self.tokenizer_class.from_pretrained(A__ , **A__ ) _SCREAMING_SNAKE_CASE = self.rust_tokenizer_class.from_pretrained(A__ , **A__ ) _SCREAMING_SNAKE_CASE = tokenizer_p.encode(A__ , add_special_tokens=A__ ) _SCREAMING_SNAKE_CASE = tokenizer_r.encode(A__ , add_special_tokens=A__ ) _SCREAMING_SNAKE_CASE = tokenizer_r.convert_ids_to_tokens(A__ ) _SCREAMING_SNAKE_CASE = tokenizer_p.convert_ids_to_tokens(A__ ) # it is expected that each Chinese character is not preceded by "##" self.assertListEqual(A__ , A__ ) self.assertListEqual(A__ , A__ ) _SCREAMING_SNAKE_CASE = False _SCREAMING_SNAKE_CASE = self.rust_tokenizer_class.from_pretrained(A__ , **A__ ) _SCREAMING_SNAKE_CASE = self.tokenizer_class.from_pretrained(A__ , **A__ ) _SCREAMING_SNAKE_CASE = tokenizer_r.encode(A__ , add_special_tokens=A__ ) _SCREAMING_SNAKE_CASE = tokenizer_p.encode(A__ , add_special_tokens=A__ ) _SCREAMING_SNAKE_CASE = tokenizer_r.convert_ids_to_tokens(A__ ) _SCREAMING_SNAKE_CASE = tokenizer_p.convert_ids_to_tokens(A__ ) # it is expected that only the first Chinese character is not preceded by "##". _SCREAMING_SNAKE_CASE = [ F"##{token}" if idx != 0 else token for idx, token in enumerate(A__ ) ] self.assertListEqual(A__ , A__ ) self.assertListEqual(A__ , A__ )
0
0
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_torch_available, ) UpperCamelCase__ : List[str] = {"configuration_unispeech": ["UNISPEECH_PRETRAINED_CONFIG_ARCHIVE_MAP", "UniSpeechConfig"]} try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase__ : Optional[Any] = [ "UNISPEECH_PRETRAINED_MODEL_ARCHIVE_LIST", "UniSpeechForCTC", "UniSpeechForPreTraining", "UniSpeechForSequenceClassification", "UniSpeechModel", "UniSpeechPreTrainedModel", ] if TYPE_CHECKING: from .configuration_unispeech import UNISPEECH_PRETRAINED_CONFIG_ARCHIVE_MAP, UniSpeechConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_unispeech import ( UNISPEECH_PRETRAINED_MODEL_ARCHIVE_LIST, UniSpeechForCTC, UniSpeechForPreTraining, UniSpeechForSequenceClassification, UniSpeechModel, UniSpeechPreTrainedModel, ) else: import sys UpperCamelCase__ : Any = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
713
'''simple docstring''' import logging import os import quant_trainer import torch from torch.utils.data import DataLoader from transformers import Trainer, is_torch_tpu_available from transformers.trainer_utils import PredictionOutput UpperCamelCase__ : Tuple = logging.getLogger(__name__) if is_torch_tpu_available(check_device=False): import torch_xla.core.xla_model as xm import torch_xla.debug.metrics as met class _a (_lowerCamelCase): """simple docstring""" def __init__( self , *A__ , A__=None , A__=None , A__=None , **A__ ) -> Optional[int]: super().__init__(*A__ , **A__ ) _SCREAMING_SNAKE_CASE = eval_examples _SCREAMING_SNAKE_CASE = post_process_function _SCREAMING_SNAKE_CASE = quant_trainer_args _SCREAMING_SNAKE_CASE = 1_28 # default number of calibration samples def UpperCamelCase ( self , A__=None ) -> Union[str, Any]: if calib_dataset is None and self.calib_dataset is None: raise ValueError("""Trainer: calibration requires an calib_dataset.""" ) _SCREAMING_SNAKE_CASE = calib_dataset if calib_dataset is not None else self.calib_dataset _SCREAMING_SNAKE_CASE = self._remove_unused_columns(A__ , description="""Calibration""" ) return DataLoader( A__ , batch_size=self.args.eval_batch_size , collate_fn=self.data_collator , drop_last=self.args.dataloader_drop_last , num_workers=self.args.dataloader_num_workers , pin_memory=self.args.dataloader_pin_memory , shuffle=A__ , ) def UpperCamelCase ( self , A__=None ) -> str: _SCREAMING_SNAKE_CASE = self.train_dataset if calib_dataset is None else calib_dataset _SCREAMING_SNAKE_CASE = self.get_calib_dataloader(A__ ) _SCREAMING_SNAKE_CASE = self.model quant_trainer.configure_model(A__ , self.quant_trainer_args , calib=A__ ) model.eval() quant_trainer.enable_calibration(A__ ) logger.info("""***** Running calibration *****""" ) logger.info(F" Num examples = {self.calib_num}" ) logger.info(F" Batch size = {calib_dataloader.batch_size}" ) for step, inputs in enumerate(A__ ): # Prediction step _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = self.prediction_step(A__ , A__ , prediction_loss_only=A__ ) if (step + 1) * calib_dataloader.batch_size >= self.calib_num: break quant_trainer.finish_calibration(A__ , self.quant_trainer_args ) _SCREAMING_SNAKE_CASE = model def UpperCamelCase ( self , A__=None , A__=None , A__=None , A__ = "eval" ) -> List[Any]: _SCREAMING_SNAKE_CASE = self.eval_dataset if eval_dataset is None else eval_dataset _SCREAMING_SNAKE_CASE = self.get_eval_dataloader(A__ ) _SCREAMING_SNAKE_CASE = self.eval_examples if eval_examples is None else eval_examples # Temporarily disable metric computation, we will do it in the loop here. _SCREAMING_SNAKE_CASE = self.compute_metrics _SCREAMING_SNAKE_CASE = None _SCREAMING_SNAKE_CASE = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop try: _SCREAMING_SNAKE_CASE = eval_loop( A__ , description="""Evaluation""" , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=A__ , ) finally: _SCREAMING_SNAKE_CASE = compute_metrics if self.post_process_function is not None and self.compute_metrics is not None: _SCREAMING_SNAKE_CASE = self.post_process_function(A__ , A__ , output.predictions ) _SCREAMING_SNAKE_CASE = self.compute_metrics(A__ ) # Prefix all keys with metric_key_prefix + '_' for key in list(metrics.keys() ): if not key.startswith(F"{metric_key_prefix}_" ): _SCREAMING_SNAKE_CASE = metrics.pop(A__ ) self.log(A__ ) else: _SCREAMING_SNAKE_CASE = {} if self.args.tpu_metrics_debug or self.args.debug: # tpu-comment: Logging debug metrics for PyTorch/XLA (compile, execute times, ops, etc.) xm.master_print(met.metrics_report() ) _SCREAMING_SNAKE_CASE = self.callback_handler.on_evaluate(self.args , self.state , self.control , A__ ) return metrics def UpperCamelCase ( self , A__ , A__ , A__=None , A__ = "test" ) -> List[str]: _SCREAMING_SNAKE_CASE = self.get_test_dataloader(A__ ) # Temporarily disable metric computation, we will do it in the loop here. _SCREAMING_SNAKE_CASE = self.compute_metrics _SCREAMING_SNAKE_CASE = None _SCREAMING_SNAKE_CASE = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop try: _SCREAMING_SNAKE_CASE = eval_loop( A__ , description="""Prediction""" , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=A__ , ) finally: _SCREAMING_SNAKE_CASE = compute_metrics if self.post_process_function is None or self.compute_metrics is None: return output _SCREAMING_SNAKE_CASE = self.post_process_function(A__ , A__ , output.predictions , """predict""" ) _SCREAMING_SNAKE_CASE = self.compute_metrics(A__ ) # Prefix all keys with metric_key_prefix + '_' for key in list(metrics.keys() ): if not key.startswith(F"{metric_key_prefix}_" ): _SCREAMING_SNAKE_CASE = metrics.pop(A__ ) return PredictionOutput(predictions=predictions.predictions , label_ids=predictions.label_ids , metrics=A__ ) def UpperCamelCase ( self , A__="./" ) -> Tuple: _SCREAMING_SNAKE_CASE = self.eval_dataset _SCREAMING_SNAKE_CASE = self.get_eval_dataloader(A__ ) _SCREAMING_SNAKE_CASE = next(iter(A__ ) ) # saving device - to make it consistent _SCREAMING_SNAKE_CASE = torch.device("""cuda""" if torch.cuda.is_available() else """cpu""" ) # convert to tuple _SCREAMING_SNAKE_CASE = tuple(v.to(A__ ) for k, v in batch.items() ) logger.info("""Converting model to be onnx compatible""" ) from pytorch_quantization.nn import TensorQuantizer _SCREAMING_SNAKE_CASE = True _SCREAMING_SNAKE_CASE = self.model.to(A__ ) model.eval() model.float() _SCREAMING_SNAKE_CASE = model.module if hasattr(A__ , """module""" ) else model quant_trainer.configure_model(A__ , self.quant_trainer_args ) _SCREAMING_SNAKE_CASE = os.path.join(A__ , """model.onnx""" ) logger.info(F"exporting model to {output_model_file}" ) _SCREAMING_SNAKE_CASE = {0: """batch_size""", 1: """seq_len"""} torch.onnx.export( A__ , A__ , A__ , export_params=A__ , opset_version=13 , do_constant_folding=A__ , input_names=["""input_ids""", """attention_mask""", """token_type_ids"""] , output_names=["""output_start_logits""", """output_end_logits"""] , dynamic_axes={ """input_ids""": axes, """attention_mask""": axes, """token_type_ids""": axes, """output_start_logits""": axes, """output_end_logits""": axes, } , verbose=A__ , ) logger.info("""onnx export finished""" )
0
0
# This script creates a super tiny model that is useful inside tests, when we just want to test that # the machinery works, without needing to the check the quality of the outcomes. # # This version creates a tiny vocab first, and then a tiny model - so the outcome is truly tiny - # all files ~60KB. As compared to taking a full-size model, reducing to the minimum its layers and # emb dimensions, but keeping the full vocab + merges files, leading to ~3MB in total for all files. # The latter is done by `fsmt-make-super-tiny-model.py`. # # It will be used then as "stas/tiny-wmt19-en-ru" from pathlib import Path import json import tempfile from transformers import FSMTTokenizer, FSMTConfig, FSMTForConditionalGeneration from transformers.models.fsmt.tokenization_fsmt import VOCAB_FILES_NAMES UpperCamelCase__ : Dict = "tiny-wmt19-en-ru" # Build # borrowed from a test UpperCamelCase__ : str = [ "l", "o", "w", "e", "r", "s", "t", "i", "d", "n", "w</w>", "r</w>", "t</w>", "lo", "low", "er</w>", "low</w>", "lowest</w>", "newer</w>", "wider</w>", "<unk>", ] UpperCamelCase__ : Optional[Any] = dict(zip(vocab, range(len(vocab)))) UpperCamelCase__ : Optional[Any] = ["l o 123", "lo w 1456", "e r</w> 1789", ""] with tempfile.TemporaryDirectory() as tmpdirname: UpperCamelCase__ : int = Path(tmpdirname) UpperCamelCase__ : Union[str, Any] = build_dir / VOCAB_FILES_NAMES["src_vocab_file"] UpperCamelCase__ : List[str] = build_dir / VOCAB_FILES_NAMES["tgt_vocab_file"] UpperCamelCase__ : Union[str, Any] = build_dir / VOCAB_FILES_NAMES["merges_file"] with open(src_vocab_file, "w") as fp: fp.write(json.dumps(vocab_tokens)) with open(tgt_vocab_file, "w") as fp: fp.write(json.dumps(vocab_tokens)) with open(merges_file, "w") as fp: fp.write("\n".join(merges)) UpperCamelCase__ : str = FSMTTokenizer( langs=["en", "ru"], src_vocab_size=len(vocab), tgt_vocab_size=len(vocab), src_vocab_file=src_vocab_file, tgt_vocab_file=tgt_vocab_file, merges_file=merges_file, ) UpperCamelCase__ : Dict = FSMTConfig( langs=["ru", "en"], src_vocab_size=1_000, tgt_vocab_size=1_000, d_model=4, encoder_layers=1, decoder_layers=1, encoder_ffn_dim=4, decoder_ffn_dim=4, encoder_attention_heads=1, decoder_attention_heads=1, ) UpperCamelCase__ : int = FSMTForConditionalGeneration(config) print(f"""num of params {tiny_model.num_parameters()}""") # Test UpperCamelCase__ : Optional[Any] = tokenizer(["Making tiny model"], return_tensors="pt") UpperCamelCase__ : Tuple = tiny_model(**batch) print("test output:", len(outputs.logits[0])) # Save tiny_model.half() # makes it smaller tiny_model.save_pretrained(mname_tiny) tokenizer.save_pretrained(mname_tiny) print(f"""Generated {mname_tiny}""") # Upload # transformers-cli upload tiny-wmt19-en-ru
714
'''simple docstring''' def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE_ ) -> str: """simple docstring""" return "".join([hex(SCREAMING_SNAKE_CASE_ )[2:].zfill(2 ).upper() for byte in list(SCREAMING_SNAKE_CASE_ )] ) def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE_ ) -> bytes: """simple docstring""" # Check data validity, following RFC3548 # https://www.ietf.org/rfc/rfc3548.txt if (len(SCREAMING_SNAKE_CASE_ ) % 2) != 0: raise ValueError( """Base16 encoded data is invalid: Data does not have an even number of hex digits.""" ) # Check the character set - the standard base16 alphabet # is uppercase according to RFC3548 section 6 if not set(SCREAMING_SNAKE_CASE_ ) <= set("""0123456789ABCDEF""" ): raise ValueError( """Base16 encoded data is invalid: Data is not uppercase hex or it contains invalid characters.""" ) # For every two hexadecimal digits (= a byte), turn it into an integer. # Then, string the result together into bytes, and return it. return bytes(int(data[i] + data[i + 1] , 16 ) for i in range(0 , len(SCREAMING_SNAKE_CASE_ ) , 2 ) ) if __name__ == "__main__": import doctest doctest.testmod()
0
0
'''simple docstring''' from ...utils import ( OptionalDependencyNotAvailable, is_torch_available, is_transformers_available, is_transformers_version, ) try: if not (is_transformers_available() and is_torch_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ...utils.dummy_torch_and_transformers_objects import ShapEPipeline else: from .camera import create_pan_cameras from .pipeline_shap_e import ShapEPipeline from .pipeline_shap_e_img2img import ShapEImgaImgPipeline from .renderer import ( BoundingBoxVolume, ImportanceRaySampler, MLPNeRFModelOutput, MLPNeRSTFModel, ShapEParamsProjModel, ShapERenderer, StratifiedRaySampler, VoidNeRFModel, )
715
'''simple docstring''' import pytest import requests from datasets.utils.file_utils import http_head from .utils import OfflineSimulationMode, RequestWouldHangIndefinitelyError, offline @pytest.mark.integration def lowerCAmelCase_ ( ) -> List[Any]: """simple docstring""" with offline(OfflineSimulationMode.CONNECTION_TIMES_OUT ): with pytest.raises(SCREAMING_SNAKE_CASE_ ): requests.request("""GET""" , """https://huggingface.co""" ) with pytest.raises(requests.exceptions.ConnectTimeout ): requests.request("""GET""" , """https://huggingface.co""" , timeout=1.0 ) @pytest.mark.integration def lowerCAmelCase_ ( ) -> int: """simple docstring""" with offline(OfflineSimulationMode.CONNECTION_FAILS ): with pytest.raises(requests.exceptions.ConnectionError ): requests.request("""GET""" , """https://huggingface.co""" ) def lowerCAmelCase_ ( ) -> Optional[Any]: """simple docstring""" with offline(OfflineSimulationMode.HF_DATASETS_OFFLINE_SET_TO_1 ): with pytest.raises(SCREAMING_SNAKE_CASE_ ): http_head("""https://huggingface.co""" )
0
0
import warnings from ...processing_utils import ProcessorMixin from ...tokenization_utils_base import BatchEncoding class _a (__UpperCAmelCase): """simple docstring""" SCREAMING_SNAKE_CASE = ['image_processor', 'tokenizer'] SCREAMING_SNAKE_CASE = 'ChineseCLIPImageProcessor' SCREAMING_SNAKE_CASE = ('BertTokenizer', 'BertTokenizerFast') def __init__( self , A__=None , A__=None , **A__ ) -> List[Any]: _SCREAMING_SNAKE_CASE = None if "feature_extractor" in kwargs: warnings.warn( """The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`""" """ instead.""" , A__ , ) _SCREAMING_SNAKE_CASE = kwargs.pop("""feature_extractor""" ) _SCREAMING_SNAKE_CASE = image_processor if image_processor is not None else feature_extractor if image_processor is None: raise ValueError("""You need to specify an `image_processor`.""" ) if tokenizer is None: raise ValueError("""You need to specify a `tokenizer`.""" ) super().__init__(A__ , A__ ) _SCREAMING_SNAKE_CASE = self.image_processor def __call__( self , A__=None , A__=None , A__=None , **A__ ) -> List[str]: if text is None and images is None: raise ValueError("""You have to specify either text or images. Both cannot be none.""" ) if text is not None: _SCREAMING_SNAKE_CASE = self.tokenizer(A__ , return_tensors=A__ , **A__ ) if images is not None: _SCREAMING_SNAKE_CASE = self.image_processor(A__ , return_tensors=A__ , **A__ ) if text is not None and images is not None: _SCREAMING_SNAKE_CASE = image_features.pixel_values return encoding elif text is not None: return encoding else: return BatchEncoding(data=dict(**A__ ) , tensor_type=A__ ) def UpperCamelCase ( self , *A__ , **A__ ) -> int: return self.tokenizer.batch_decode(*A__ , **A__ ) def UpperCamelCase ( self , *A__ , **A__ ) -> Any: return self.tokenizer.decode(*A__ , **A__ ) @property def UpperCamelCase ( self ) -> Optional[int]: _SCREAMING_SNAKE_CASE = self.tokenizer.model_input_names _SCREAMING_SNAKE_CASE = self.image_processor.model_input_names return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) ) @property def UpperCamelCase ( self ) -> List[Any]: warnings.warn( """`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.""" , A__ , ) return self.image_processor_class
716
'''simple docstring''' import math from collections.abc import Iterator from itertools import takewhile def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE_ ) -> bool: """simple docstring""" if 1 < number < 4: # 2 and 3 are primes return True elif number < 2 or number % 2 == 0 or number % 3 == 0: # Negatives, 0, 1, all even numbers, all multiples of 3 are not primes return False # All primes number are in format of 6k +/- 1 for i in range(5 , int(math.sqrt(SCREAMING_SNAKE_CASE_ ) + 1 ) , 6 ): if number % i == 0 or number % (i + 2) == 0: return False return True def lowerCAmelCase_ ( ) -> Iterator[int]: """simple docstring""" _SCREAMING_SNAKE_CASE = 2 while True: if is_prime(SCREAMING_SNAKE_CASE_ ): yield num num += 1 def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE_ = 2_00_00_00 ) -> int: """simple docstring""" return sum(takewhile(lambda SCREAMING_SNAKE_CASE_ : x < n , prime_generator() ) ) if __name__ == "__main__": print(f"""{solution() = }""")
0
0
'''simple docstring''' import fire from utils import calculate_rouge, save_json def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=None , **SCREAMING_SNAKE_CASE_ ) -> List[Any]: """simple docstring""" _SCREAMING_SNAKE_CASE = [x.strip() for x in open(SCREAMING_SNAKE_CASE_ ).readlines()] _SCREAMING_SNAKE_CASE = [x.strip() for x in open(SCREAMING_SNAKE_CASE_ ).readlines()][: len(SCREAMING_SNAKE_CASE_ )] _SCREAMING_SNAKE_CASE = calculate_rouge(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ) if save_path is not None: save_json(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , indent=SCREAMING_SNAKE_CASE_ ) return metrics # these print nicely if __name__ == "__main__": fire.Fire(calculate_rouge_path)
717
'''simple docstring''' import unittest import numpy as np from transformers.testing_utils import require_flax, require_tf, require_torch from transformers.utils import ( expand_dims, flatten_dict, is_flax_available, is_tf_available, is_torch_available, reshape, squeeze, transpose, ) if is_flax_available(): import jax.numpy as jnp if is_tf_available(): import tensorflow as tf if is_torch_available(): import torch class _a (unittest.TestCase): """simple docstring""" def UpperCamelCase ( self ) -> Optional[Any]: _SCREAMING_SNAKE_CASE = { """task_specific_params""": { """summarization""": {"""length_penalty""": 1.0, """max_length""": 1_28, """min_length""": 12, """num_beams""": 4}, """summarization_cnn""": {"""length_penalty""": 2.0, """max_length""": 1_42, """min_length""": 56, """num_beams""": 4}, """summarization_xsum""": {"""length_penalty""": 1.0, """max_length""": 62, """min_length""": 11, """num_beams""": 6}, } } _SCREAMING_SNAKE_CASE = { """task_specific_params.summarization.length_penalty""": 1.0, """task_specific_params.summarization.max_length""": 1_28, """task_specific_params.summarization.min_length""": 12, """task_specific_params.summarization.num_beams""": 4, """task_specific_params.summarization_cnn.length_penalty""": 2.0, """task_specific_params.summarization_cnn.max_length""": 1_42, """task_specific_params.summarization_cnn.min_length""": 56, """task_specific_params.summarization_cnn.num_beams""": 4, """task_specific_params.summarization_xsum.length_penalty""": 1.0, """task_specific_params.summarization_xsum.max_length""": 62, """task_specific_params.summarization_xsum.min_length""": 11, """task_specific_params.summarization_xsum.num_beams""": 6, } self.assertEqual(flatten_dict(A__ ) , A__ ) def UpperCamelCase ( self ) -> int: _SCREAMING_SNAKE_CASE = np.random.randn(3 , 4 ) self.assertTrue(np.allclose(transpose(A__ ) , x.transpose() ) ) _SCREAMING_SNAKE_CASE = np.random.randn(3 , 4 , 5 ) self.assertTrue(np.allclose(transpose(A__ , axes=(1, 2, 0) ) , x.transpose((1, 2, 0) ) ) ) @require_torch def UpperCamelCase ( self ) -> Optional[Any]: _SCREAMING_SNAKE_CASE = np.random.randn(3 , 4 ) _SCREAMING_SNAKE_CASE = torch.tensor(A__ ) self.assertTrue(np.allclose(transpose(A__ ) , transpose(A__ ).numpy() ) ) _SCREAMING_SNAKE_CASE = np.random.randn(3 , 4 , 5 ) _SCREAMING_SNAKE_CASE = torch.tensor(A__ ) self.assertTrue(np.allclose(transpose(A__ , axes=(1, 2, 0) ) , transpose(A__ , axes=(1, 2, 0) ).numpy() ) ) @require_tf def UpperCamelCase ( self ) -> Optional[int]: _SCREAMING_SNAKE_CASE = np.random.randn(3 , 4 ) _SCREAMING_SNAKE_CASE = tf.constant(A__ ) self.assertTrue(np.allclose(transpose(A__ ) , transpose(A__ ).numpy() ) ) _SCREAMING_SNAKE_CASE = np.random.randn(3 , 4 , 5 ) _SCREAMING_SNAKE_CASE = tf.constant(A__ ) self.assertTrue(np.allclose(transpose(A__ , axes=(1, 2, 0) ) , transpose(A__ , axes=(1, 2, 0) ).numpy() ) ) @require_flax def UpperCamelCase ( self ) -> List[str]: _SCREAMING_SNAKE_CASE = np.random.randn(3 , 4 ) _SCREAMING_SNAKE_CASE = jnp.array(A__ ) self.assertTrue(np.allclose(transpose(A__ ) , np.asarray(transpose(A__ ) ) ) ) _SCREAMING_SNAKE_CASE = np.random.randn(3 , 4 , 5 ) _SCREAMING_SNAKE_CASE = jnp.array(A__ ) self.assertTrue(np.allclose(transpose(A__ , axes=(1, 2, 0) ) , np.asarray(transpose(A__ , axes=(1, 2, 0) ) ) ) ) def UpperCamelCase ( self ) -> Optional[int]: _SCREAMING_SNAKE_CASE = np.random.randn(3 , 4 ) self.assertTrue(np.allclose(reshape(A__ , (4, 3) ) , np.reshape(A__ , (4, 3) ) ) ) _SCREAMING_SNAKE_CASE = np.random.randn(3 , 4 , 5 ) self.assertTrue(np.allclose(reshape(A__ , (12, 5) ) , np.reshape(A__ , (12, 5) ) ) ) @require_torch def UpperCamelCase ( self ) -> Optional[int]: _SCREAMING_SNAKE_CASE = np.random.randn(3 , 4 ) _SCREAMING_SNAKE_CASE = torch.tensor(A__ ) self.assertTrue(np.allclose(reshape(A__ , (4, 3) ) , reshape(A__ , (4, 3) ).numpy() ) ) _SCREAMING_SNAKE_CASE = np.random.randn(3 , 4 , 5 ) _SCREAMING_SNAKE_CASE = torch.tensor(A__ ) self.assertTrue(np.allclose(reshape(A__ , (12, 5) ) , reshape(A__ , (12, 5) ).numpy() ) ) @require_tf def UpperCamelCase ( self ) -> Tuple: _SCREAMING_SNAKE_CASE = np.random.randn(3 , 4 ) _SCREAMING_SNAKE_CASE = tf.constant(A__ ) self.assertTrue(np.allclose(reshape(A__ , (4, 3) ) , reshape(A__ , (4, 3) ).numpy() ) ) _SCREAMING_SNAKE_CASE = np.random.randn(3 , 4 , 5 ) _SCREAMING_SNAKE_CASE = tf.constant(A__ ) self.assertTrue(np.allclose(reshape(A__ , (12, 5) ) , reshape(A__ , (12, 5) ).numpy() ) ) @require_flax def UpperCamelCase ( self ) -> List[Any]: _SCREAMING_SNAKE_CASE = np.random.randn(3 , 4 ) _SCREAMING_SNAKE_CASE = jnp.array(A__ ) self.assertTrue(np.allclose(reshape(A__ , (4, 3) ) , np.asarray(reshape(A__ , (4, 3) ) ) ) ) _SCREAMING_SNAKE_CASE = np.random.randn(3 , 4 , 5 ) _SCREAMING_SNAKE_CASE = jnp.array(A__ ) self.assertTrue(np.allclose(reshape(A__ , (12, 5) ) , np.asarray(reshape(A__ , (12, 5) ) ) ) ) def UpperCamelCase ( self ) -> Any: _SCREAMING_SNAKE_CASE = np.random.randn(1 , 3 , 4 ) self.assertTrue(np.allclose(squeeze(A__ ) , np.squeeze(A__ ) ) ) _SCREAMING_SNAKE_CASE = np.random.randn(1 , 4 , 1 , 5 ) self.assertTrue(np.allclose(squeeze(A__ , axis=2 ) , np.squeeze(A__ , axis=2 ) ) ) @require_torch def UpperCamelCase ( self ) -> Any: _SCREAMING_SNAKE_CASE = np.random.randn(1 , 3 , 4 ) _SCREAMING_SNAKE_CASE = torch.tensor(A__ ) self.assertTrue(np.allclose(squeeze(A__ ) , squeeze(A__ ).numpy() ) ) _SCREAMING_SNAKE_CASE = np.random.randn(1 , 4 , 1 , 5 ) _SCREAMING_SNAKE_CASE = torch.tensor(A__ ) self.assertTrue(np.allclose(squeeze(A__ , axis=2 ) , squeeze(A__ , axis=2 ).numpy() ) ) @require_tf def UpperCamelCase ( self ) -> List[str]: _SCREAMING_SNAKE_CASE = np.random.randn(1 , 3 , 4 ) _SCREAMING_SNAKE_CASE = tf.constant(A__ ) self.assertTrue(np.allclose(squeeze(A__ ) , squeeze(A__ ).numpy() ) ) _SCREAMING_SNAKE_CASE = np.random.randn(1 , 4 , 1 , 5 ) _SCREAMING_SNAKE_CASE = tf.constant(A__ ) self.assertTrue(np.allclose(squeeze(A__ , axis=2 ) , squeeze(A__ , axis=2 ).numpy() ) ) @require_flax def UpperCamelCase ( self ) -> Optional[int]: _SCREAMING_SNAKE_CASE = np.random.randn(1 , 3 , 4 ) _SCREAMING_SNAKE_CASE = jnp.array(A__ ) self.assertTrue(np.allclose(squeeze(A__ ) , np.asarray(squeeze(A__ ) ) ) ) _SCREAMING_SNAKE_CASE = np.random.randn(1 , 4 , 1 , 5 ) _SCREAMING_SNAKE_CASE = jnp.array(A__ ) self.assertTrue(np.allclose(squeeze(A__ , axis=2 ) , np.asarray(squeeze(A__ , axis=2 ) ) ) ) def UpperCamelCase ( self ) -> Optional[Any]: _SCREAMING_SNAKE_CASE = np.random.randn(3 , 4 ) self.assertTrue(np.allclose(expand_dims(A__ , axis=1 ) , np.expand_dims(A__ , axis=1 ) ) ) @require_torch def UpperCamelCase ( self ) -> Optional[int]: _SCREAMING_SNAKE_CASE = np.random.randn(3 , 4 ) _SCREAMING_SNAKE_CASE = torch.tensor(A__ ) self.assertTrue(np.allclose(expand_dims(A__ , axis=1 ) , expand_dims(A__ , axis=1 ).numpy() ) ) @require_tf def UpperCamelCase ( self ) -> str: _SCREAMING_SNAKE_CASE = np.random.randn(3 , 4 ) _SCREAMING_SNAKE_CASE = tf.constant(A__ ) self.assertTrue(np.allclose(expand_dims(A__ , axis=1 ) , expand_dims(A__ , axis=1 ).numpy() ) ) @require_flax def UpperCamelCase ( self ) -> Any: _SCREAMING_SNAKE_CASE = np.random.randn(3 , 4 ) _SCREAMING_SNAKE_CASE = jnp.array(A__ ) self.assertTrue(np.allclose(expand_dims(A__ , axis=1 ) , np.asarray(expand_dims(A__ , axis=1 ) ) ) )
0
0
'''simple docstring''' import os try: from .build_directory_md import good_file_paths except ImportError: from build_directory_md import good_file_paths # type: ignore UpperCamelCase__ : Optional[Any] = list(good_file_paths()) assert filepaths, "good_file_paths() failed!" UpperCamelCase__ : Optional[Any] = [file for file in filepaths if file != file.lower()] if upper_files: print(f"""{len(upper_files)} files contain uppercase characters:""") print("\n".join(upper_files) + "\n") UpperCamelCase__ : int = [file for file in filepaths if ' ' in file] if space_files: print(f"""{len(space_files)} files contain space characters:""") print("\n".join(space_files) + "\n") UpperCamelCase__ : Any = [file for file in filepaths if '-' in file] if hyphen_files: print(f"""{len(hyphen_files)} files contain hyphen characters:""") print("\n".join(hyphen_files) + "\n") UpperCamelCase__ : Optional[int] = [file for file in filepaths if os.sep not in file] if nodir_files: print(f"""{len(nodir_files)} files are not in a directory:""") print("\n".join(nodir_files) + "\n") UpperCamelCase__ : List[str] = len(upper_files + space_files + hyphen_files + nodir_files) if bad_files: import sys sys.exit(bad_files)
718
'''simple docstring''' from pathlib import PurePosixPath from typing import Optional import fsspec from fsspec import AbstractFileSystem from huggingface_hub.hf_api import DatasetInfo from ..utils.file_utils import get_authentication_headers_for_url from ..utils.hub import hf_hub_url class _a (_lowerCamelCase): """simple docstring""" SCREAMING_SNAKE_CASE = '' SCREAMING_SNAKE_CASE = 'hf-legacy' # "hf://"" is reserved for hffs def __init__( self , A__ = None , A__ = None , **A__ , ) -> Optional[int]: super().__init__(self , **A__ ) _SCREAMING_SNAKE_CASE = repo_info _SCREAMING_SNAKE_CASE = token _SCREAMING_SNAKE_CASE = None def UpperCamelCase ( self ) -> Tuple: if self.dir_cache is None: _SCREAMING_SNAKE_CASE = {} for hf_file in self.repo_info.siblings: # TODO(QL): add sizes _SCREAMING_SNAKE_CASE = { """name""": hf_file.rfilename, """size""": None, """type""": """file""", } self.dir_cache.update( { str(A__ ): {"""name""": str(A__ ), """size""": None, """type""": """directory"""} for d in list(PurePosixPath(hf_file.rfilename ).parents )[:-1] } ) def UpperCamelCase ( self , A__ , A__ = "rb" , **A__ , ) -> Optional[int]: if not isinstance(self.repo_info , A__ ): raise NotImplementedError(F"Open is only implemented for dataset repositories, but got {self.repo_info}" ) _SCREAMING_SNAKE_CASE = hf_hub_url(self.repo_info.id , A__ , revision=self.repo_info.sha ) return fsspec.open( A__ , mode=A__ , headers=get_authentication_headers_for_url(A__ , use_auth_token=self.token ) , client_kwargs={"""trust_env""": True} , ).open() def UpperCamelCase ( self , A__ , **A__ ) -> str: self._get_dirs() _SCREAMING_SNAKE_CASE = self._strip_protocol(A__ ) if path in self.dir_cache: return self.dir_cache[path] else: raise FileNotFoundError(A__ ) def UpperCamelCase ( self , A__ , A__=False , **A__ ) -> List[Any]: self._get_dirs() _SCREAMING_SNAKE_CASE = PurePosixPath(path.strip("""/""" ) ) _SCREAMING_SNAKE_CASE = {} for p, f in self.dir_cache.items(): _SCREAMING_SNAKE_CASE = PurePosixPath(p.strip("""/""" ) ) _SCREAMING_SNAKE_CASE = p.parent if root == path: _SCREAMING_SNAKE_CASE = f _SCREAMING_SNAKE_CASE = list(paths.values() ) if detail: return out else: return sorted(f["""name"""] for f in out )
0
0
# This model implementation is heavily inspired by https://github.com/haofanwang/ControlNet-for-Diffusers/ import gc import random import tempfile import unittest import numpy as np import torch from PIL import Image from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer from diffusers import ( AutoencoderKL, ControlNetModel, DDIMScheduler, StableDiffusionControlNetImgaImgPipeline, UNetaDConditionModel, ) from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_controlnet import MultiControlNetModel from diffusers.utils import floats_tensor, load_image, load_numpy, randn_tensor, slow, torch_device from diffusers.utils.import_utils import is_xformers_available from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu from ..pipeline_params import ( IMAGE_TO_IMAGE_IMAGE_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS, ) from ..test_pipelines_common import ( PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin, ) enable_full_determinism() class _a (lowercase_ , lowercase_ , lowercase_ , unittest.TestCase): """simple docstring""" SCREAMING_SNAKE_CASE = StableDiffusionControlNetImgaImgPipeline SCREAMING_SNAKE_CASE = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {'''height''', '''width'''} SCREAMING_SNAKE_CASE = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS SCREAMING_SNAKE_CASE = IMAGE_TO_IMAGE_IMAGE_PARAMS.union({'control_image'}) SCREAMING_SNAKE_CASE = IMAGE_TO_IMAGE_IMAGE_PARAMS def UpperCamelCase ( self ) -> Union[str, Any]: torch.manual_seed(0 ) _SCREAMING_SNAKE_CASE = UNetaDConditionModel( block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D""") , cross_attention_dim=32 , ) torch.manual_seed(0 ) _SCREAMING_SNAKE_CASE = ControlNetModel( block_out_channels=(32, 64) , layers_per_block=2 , in_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , cross_attention_dim=32 , conditioning_embedding_out_channels=(16, 32) , ) torch.manual_seed(0 ) _SCREAMING_SNAKE_CASE = DDIMScheduler( beta_start=0.0_0085 , beta_end=0.012 , beta_schedule="""scaled_linear""" , clip_sample=A__ , set_alpha_to_one=A__ , ) torch.manual_seed(0 ) _SCREAMING_SNAKE_CASE = AutoencoderKL( block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , ) torch.manual_seed(0 ) _SCREAMING_SNAKE_CASE = CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , ) _SCREAMING_SNAKE_CASE = CLIPTextModel(A__ ) _SCREAMING_SNAKE_CASE = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" ) _SCREAMING_SNAKE_CASE = { """unet""": unet, """controlnet""": controlnet, """scheduler""": scheduler, """vae""": vae, """text_encoder""": text_encoder, """tokenizer""": tokenizer, """safety_checker""": None, """feature_extractor""": None, } return components def UpperCamelCase ( self , A__ , A__=0 ) -> List[str]: if str(A__ ).startswith("""mps""" ): _SCREAMING_SNAKE_CASE = torch.manual_seed(A__ ) else: _SCREAMING_SNAKE_CASE = torch.Generator(device=A__ ).manual_seed(A__ ) _SCREAMING_SNAKE_CASE = 2 _SCREAMING_SNAKE_CASE = randn_tensor( (1, 3, 32 * controlnet_embedder_scale_factor, 32 * controlnet_embedder_scale_factor) , generator=A__ , device=torch.device(A__ ) , ) _SCREAMING_SNAKE_CASE = floats_tensor(control_image.shape , rng=random.Random(A__ ) ).to(A__ ) _SCREAMING_SNAKE_CASE = image.cpu().permute(0 , 2 , 3 , 1 )[0] _SCREAMING_SNAKE_CASE = Image.fromarray(np.uinta(A__ ) ).convert("""RGB""" ).resize((64, 64) ) _SCREAMING_SNAKE_CASE = { """prompt""": """A painting of a squirrel eating a burger""", """generator""": generator, """num_inference_steps""": 2, """guidance_scale""": 6.0, """output_type""": """numpy""", """image""": image, """control_image""": control_image, } return inputs def UpperCamelCase ( self ) -> str: return self._test_attention_slicing_forward_pass(expected_max_diff=2E-3 ) @unittest.skipIf( torch_device != """cuda""" or not is_xformers_available() , reason="""XFormers attention is only available with CUDA and `xformers` installed""" , ) def UpperCamelCase ( self ) -> Tuple: self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=2E-3 ) def UpperCamelCase ( self ) -> List[str]: self._test_inference_batch_single_identical(expected_max_diff=2E-3 ) class _a (lowercase_ , lowercase_ , unittest.TestCase): """simple docstring""" SCREAMING_SNAKE_CASE = StableDiffusionControlNetImgaImgPipeline SCREAMING_SNAKE_CASE = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {'''height''', '''width'''} SCREAMING_SNAKE_CASE = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS SCREAMING_SNAKE_CASE = frozenset([]) # TO_DO: add image_params once refactored VaeImageProcessor.preprocess def UpperCamelCase ( self ) -> Optional[Any]: torch.manual_seed(0 ) _SCREAMING_SNAKE_CASE = UNetaDConditionModel( block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D""") , cross_attention_dim=32 , ) torch.manual_seed(0 ) def init_weights(A__ ): if isinstance(A__ , torch.nn.Convad ): torch.nn.init.normal(m.weight ) m.bias.data.fill_(1.0 ) _SCREAMING_SNAKE_CASE = ControlNetModel( block_out_channels=(32, 64) , layers_per_block=2 , in_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , cross_attention_dim=32 , conditioning_embedding_out_channels=(16, 32) , ) controlneta.controlnet_down_blocks.apply(A__ ) torch.manual_seed(0 ) _SCREAMING_SNAKE_CASE = ControlNetModel( block_out_channels=(32, 64) , layers_per_block=2 , in_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , cross_attention_dim=32 , conditioning_embedding_out_channels=(16, 32) , ) controlneta.controlnet_down_blocks.apply(A__ ) torch.manual_seed(0 ) _SCREAMING_SNAKE_CASE = DDIMScheduler( beta_start=0.0_0085 , beta_end=0.012 , beta_schedule="""scaled_linear""" , clip_sample=A__ , set_alpha_to_one=A__ , ) torch.manual_seed(0 ) _SCREAMING_SNAKE_CASE = AutoencoderKL( block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , ) torch.manual_seed(0 ) _SCREAMING_SNAKE_CASE = CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , ) _SCREAMING_SNAKE_CASE = CLIPTextModel(A__ ) _SCREAMING_SNAKE_CASE = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" ) _SCREAMING_SNAKE_CASE = MultiControlNetModel([controlneta, controlneta] ) _SCREAMING_SNAKE_CASE = { """unet""": unet, """controlnet""": controlnet, """scheduler""": scheduler, """vae""": vae, """text_encoder""": text_encoder, """tokenizer""": tokenizer, """safety_checker""": None, """feature_extractor""": None, } return components def UpperCamelCase ( self , A__ , A__=0 ) -> List[str]: if str(A__ ).startswith("""mps""" ): _SCREAMING_SNAKE_CASE = torch.manual_seed(A__ ) else: _SCREAMING_SNAKE_CASE = torch.Generator(device=A__ ).manual_seed(A__ ) _SCREAMING_SNAKE_CASE = 2 _SCREAMING_SNAKE_CASE = [ randn_tensor( (1, 3, 32 * controlnet_embedder_scale_factor, 32 * controlnet_embedder_scale_factor) , generator=A__ , device=torch.device(A__ ) , ), randn_tensor( (1, 3, 32 * controlnet_embedder_scale_factor, 32 * controlnet_embedder_scale_factor) , generator=A__ , device=torch.device(A__ ) , ), ] _SCREAMING_SNAKE_CASE = floats_tensor(control_image[0].shape , rng=random.Random(A__ ) ).to(A__ ) _SCREAMING_SNAKE_CASE = image.cpu().permute(0 , 2 , 3 , 1 )[0] _SCREAMING_SNAKE_CASE = Image.fromarray(np.uinta(A__ ) ).convert("""RGB""" ).resize((64, 64) ) _SCREAMING_SNAKE_CASE = { """prompt""": """A painting of a squirrel eating a burger""", """generator""": generator, """num_inference_steps""": 2, """guidance_scale""": 6.0, """output_type""": """numpy""", """image""": image, """control_image""": control_image, } return inputs def UpperCamelCase ( self ) -> str: _SCREAMING_SNAKE_CASE = self.get_dummy_components() _SCREAMING_SNAKE_CASE = self.pipeline_class(**A__ ) pipe.to(A__ ) _SCREAMING_SNAKE_CASE = 10.0 _SCREAMING_SNAKE_CASE = 4 _SCREAMING_SNAKE_CASE = self.get_dummy_inputs(A__ ) _SCREAMING_SNAKE_CASE = steps _SCREAMING_SNAKE_CASE = scale _SCREAMING_SNAKE_CASE = pipe(**A__ )[0] _SCREAMING_SNAKE_CASE = self.get_dummy_inputs(A__ ) _SCREAMING_SNAKE_CASE = steps _SCREAMING_SNAKE_CASE = scale _SCREAMING_SNAKE_CASE = pipe(**A__ , control_guidance_start=0.1 , control_guidance_end=0.2 )[0] _SCREAMING_SNAKE_CASE = self.get_dummy_inputs(A__ ) _SCREAMING_SNAKE_CASE = steps _SCREAMING_SNAKE_CASE = scale _SCREAMING_SNAKE_CASE = pipe(**A__ , control_guidance_start=[0.1, 0.3] , control_guidance_end=[0.2, 0.7] )[0] _SCREAMING_SNAKE_CASE = self.get_dummy_inputs(A__ ) _SCREAMING_SNAKE_CASE = steps _SCREAMING_SNAKE_CASE = scale _SCREAMING_SNAKE_CASE = pipe(**A__ , control_guidance_start=0.4 , control_guidance_end=[0.5, 0.8] )[0] # make sure that all outputs are different assert np.sum(np.abs(output_a - output_a ) ) > 1E-3 assert np.sum(np.abs(output_a - output_a ) ) > 1E-3 assert np.sum(np.abs(output_a - output_a ) ) > 1E-3 def UpperCamelCase ( self ) -> List[Any]: return self._test_attention_slicing_forward_pass(expected_max_diff=2E-3 ) @unittest.skipIf( torch_device != """cuda""" or not is_xformers_available() , reason="""XFormers attention is only available with CUDA and `xformers` installed""" , ) def UpperCamelCase ( self ) -> Optional[Any]: self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=2E-3 ) def UpperCamelCase ( self ) -> Union[str, Any]: self._test_inference_batch_single_identical(expected_max_diff=2E-3 ) def UpperCamelCase ( self ) -> Union[str, Any]: _SCREAMING_SNAKE_CASE = self.get_dummy_components() _SCREAMING_SNAKE_CASE = self.pipeline_class(**A__ ) pipe.to(A__ ) pipe.set_progress_bar_config(disable=A__ ) with tempfile.TemporaryDirectory() as tmpdir: try: # save_pretrained is not implemented for Multi-ControlNet pipe.save_pretrained(A__ ) except NotImplementedError: pass @slow @require_torch_gpu class _a (unittest.TestCase): """simple docstring""" def UpperCamelCase ( self ) -> str: super().tearDown() gc.collect() torch.cuda.empty_cache() def UpperCamelCase ( self ) -> List[str]: _SCREAMING_SNAKE_CASE = ControlNetModel.from_pretrained("""lllyasviel/sd-controlnet-canny""" ) _SCREAMING_SNAKE_CASE = StableDiffusionControlNetImgaImgPipeline.from_pretrained( """runwayml/stable-diffusion-v1-5""" , safety_checker=A__ , controlnet=A__ ) pipe.enable_model_cpu_offload() pipe.set_progress_bar_config(disable=A__ ) _SCREAMING_SNAKE_CASE = torch.Generator(device="""cpu""" ).manual_seed(0 ) _SCREAMING_SNAKE_CASE = """evil space-punk bird""" _SCREAMING_SNAKE_CASE = load_image( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/bird_canny.png""" ).resize((5_12, 5_12) ) _SCREAMING_SNAKE_CASE = load_image( """https://huggingface.co/lllyasviel/sd-controlnet-canny/resolve/main/images/bird.png""" ).resize((5_12, 5_12) ) _SCREAMING_SNAKE_CASE = pipe( A__ , A__ , control_image=A__ , generator=A__ , output_type="""np""" , num_inference_steps=50 , strength=0.6 , ) _SCREAMING_SNAKE_CASE = output.images[0] assert image.shape == (5_12, 5_12, 3) _SCREAMING_SNAKE_CASE = load_numpy( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/img2img.npy""" ) assert np.abs(expected_image - image ).max() < 9E-2
719
'''simple docstring''' import pyarrow.parquet as pq import pytest from datasets import Audio, Dataset, DatasetDict, Features, NamedSplit, Sequence, Value, config from datasets.features.image import Image from datasets.io.parquet import ParquetDatasetReader, ParquetDatasetWriter, get_writer_batch_size from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> Optional[int]: """simple docstring""" assert isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) assert dataset.num_rows == 4 assert dataset.num_columns == 3 assert dataset.column_names == ["col_1", "col_2", "col_3"] for feature, expected_dtype in expected_features.items(): assert dataset.features[feature].dtype == expected_dtype @pytest.mark.parametrize("""keep_in_memory""" , [False, True] ) def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> List[Any]: """simple docstring""" _SCREAMING_SNAKE_CASE = tmp_path / """cache""" _SCREAMING_SNAKE_CASE = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""} with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase(): _SCREAMING_SNAKE_CASE = ParquetDatasetReader(SCREAMING_SNAKE_CASE_ , cache_dir=SCREAMING_SNAKE_CASE_ , keep_in_memory=SCREAMING_SNAKE_CASE_ ).read() _check_parquet_dataset(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) @pytest.mark.parametrize( """features""" , [ None, {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""}, {"""col_1""": """string""", """col_2""": """string""", """col_3""": """string"""}, {"""col_1""": """int32""", """col_2""": """int32""", """col_3""": """int32"""}, {"""col_1""": """float32""", """col_2""": """float32""", """col_3""": """float32"""}, ] , ) def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> Optional[int]: """simple docstring""" _SCREAMING_SNAKE_CASE = tmp_path / """cache""" _SCREAMING_SNAKE_CASE = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""} _SCREAMING_SNAKE_CASE = features.copy() if features else default_expected_features _SCREAMING_SNAKE_CASE = ( Features({feature: Value(SCREAMING_SNAKE_CASE_ ) for feature, dtype in features.items()} ) if features is not None else None ) _SCREAMING_SNAKE_CASE = ParquetDatasetReader(SCREAMING_SNAKE_CASE_ , features=SCREAMING_SNAKE_CASE_ , cache_dir=SCREAMING_SNAKE_CASE_ ).read() _check_parquet_dataset(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) @pytest.mark.parametrize("""split""" , [None, NamedSplit("""train""" ), """train""", """test"""] ) def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> List[str]: """simple docstring""" _SCREAMING_SNAKE_CASE = tmp_path / """cache""" _SCREAMING_SNAKE_CASE = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""} _SCREAMING_SNAKE_CASE = ParquetDatasetReader(SCREAMING_SNAKE_CASE_ , cache_dir=SCREAMING_SNAKE_CASE_ , split=SCREAMING_SNAKE_CASE_ ).read() _check_parquet_dataset(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) assert dataset.split == split if split else "train" @pytest.mark.parametrize("""path_type""" , [str, list] ) def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> Optional[int]: """simple docstring""" if issubclass(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ): _SCREAMING_SNAKE_CASE = parquet_path elif issubclass(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ): _SCREAMING_SNAKE_CASE = [parquet_path] _SCREAMING_SNAKE_CASE = tmp_path / """cache""" _SCREAMING_SNAKE_CASE = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""} _SCREAMING_SNAKE_CASE = ParquetDatasetReader(SCREAMING_SNAKE_CASE_ , cache_dir=SCREAMING_SNAKE_CASE_ ).read() _check_parquet_dataset(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=("train",) ) -> List[str]: """simple docstring""" assert isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) for split in splits: _SCREAMING_SNAKE_CASE = dataset_dict[split] assert dataset.num_rows == 4 assert dataset.num_columns == 3 assert dataset.column_names == ["col_1", "col_2", "col_3"] for feature, expected_dtype in expected_features.items(): assert dataset.features[feature].dtype == expected_dtype @pytest.mark.parametrize("""keep_in_memory""" , [False, True] ) def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> List[Any]: """simple docstring""" _SCREAMING_SNAKE_CASE = tmp_path / """cache""" _SCREAMING_SNAKE_CASE = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""} with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase(): _SCREAMING_SNAKE_CASE = ParquetDatasetReader( {"""train""": parquet_path} , cache_dir=SCREAMING_SNAKE_CASE_ , keep_in_memory=SCREAMING_SNAKE_CASE_ ).read() _check_parquet_datasetdict(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) @pytest.mark.parametrize( """features""" , [ None, {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""}, {"""col_1""": """string""", """col_2""": """string""", """col_3""": """string"""}, {"""col_1""": """int32""", """col_2""": """int32""", """col_3""": """int32"""}, {"""col_1""": """float32""", """col_2""": """float32""", """col_3""": """float32"""}, ] , ) def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> Optional[Any]: """simple docstring""" _SCREAMING_SNAKE_CASE = tmp_path / """cache""" _SCREAMING_SNAKE_CASE = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""} _SCREAMING_SNAKE_CASE = features.copy() if features else default_expected_features _SCREAMING_SNAKE_CASE = ( Features({feature: Value(SCREAMING_SNAKE_CASE_ ) for feature, dtype in features.items()} ) if features is not None else None ) _SCREAMING_SNAKE_CASE = ParquetDatasetReader({"""train""": parquet_path} , features=SCREAMING_SNAKE_CASE_ , cache_dir=SCREAMING_SNAKE_CASE_ ).read() _check_parquet_datasetdict(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) @pytest.mark.parametrize("""split""" , [None, NamedSplit("""train""" ), """train""", """test"""] ) def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> Optional[Any]: """simple docstring""" if split: _SCREAMING_SNAKE_CASE = {split: parquet_path} else: _SCREAMING_SNAKE_CASE = """train""" _SCREAMING_SNAKE_CASE = {"""train""": parquet_path, """test""": parquet_path} _SCREAMING_SNAKE_CASE = tmp_path / """cache""" _SCREAMING_SNAKE_CASE = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""} _SCREAMING_SNAKE_CASE = ParquetDatasetReader(SCREAMING_SNAKE_CASE_ , cache_dir=SCREAMING_SNAKE_CASE_ ).read() _check_parquet_datasetdict(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , splits=list(path.keys() ) ) assert all(dataset[split].split == split for split in path.keys() ) def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> Dict: """simple docstring""" _SCREAMING_SNAKE_CASE = ParquetDatasetWriter(SCREAMING_SNAKE_CASE_ , tmp_path / """foo.parquet""" ) assert writer.write() > 0 _SCREAMING_SNAKE_CASE = pq.ParquetFile(tmp_path / """foo.parquet""" ) _SCREAMING_SNAKE_CASE = pf.read() assert dataset.data.table == output_table def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> Optional[Any]: """simple docstring""" _SCREAMING_SNAKE_CASE = str(shared_datadir / """test_image_rgb.jpg""" ) _SCREAMING_SNAKE_CASE = {"""image""": [image_path]} _SCREAMING_SNAKE_CASE = Features({"""image""": Image()} ) _SCREAMING_SNAKE_CASE = Dataset.from_dict(SCREAMING_SNAKE_CASE_ , features=SCREAMING_SNAKE_CASE_ ) _SCREAMING_SNAKE_CASE = ParquetDatasetWriter(SCREAMING_SNAKE_CASE_ , tmp_path / """foo.parquet""" ) assert writer.write() > 0 _SCREAMING_SNAKE_CASE = Dataset.from_parquet(str(tmp_path / """foo.parquet""" ) ) assert dataset.features == reloaded_dataset.features _SCREAMING_SNAKE_CASE = ParquetDatasetReader(str(tmp_path / """foo.parquet""" ) , streaming=SCREAMING_SNAKE_CASE_ ).read() assert dataset.features == reloaded_iterable_dataset.features @pytest.mark.parametrize( """feature, expected""" , [ (Features({"""foo""": Value("""int32""" )} ), None), (Features({"""image""": Image(), """foo""": Value("""int32""" )} ), config.PARQUET_ROW_GROUP_SIZE_FOR_IMAGE_DATASETS), (Features({"""nested""": Sequence(Audio() )} ), config.PARQUET_ROW_GROUP_SIZE_FOR_AUDIO_DATASETS), ] , ) def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> str: """simple docstring""" assert get_writer_batch_size(SCREAMING_SNAKE_CASE_ ) == expected
0
0
'''simple docstring''' def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=False ) -> Optional[Any]: """simple docstring""" if isinstance(lowerCamelCase__ , lowerCamelCase__ ) and isinstance(lowerCamelCase__ , lowerCamelCase__ ): _SCREAMING_SNAKE_CASE = len(set_a.intersection(lowerCamelCase__ ) ) if alternative_union: _SCREAMING_SNAKE_CASE = len(lowerCamelCase__ ) + len(lowerCamelCase__ ) else: _SCREAMING_SNAKE_CASE = len(set_a.union(lowerCamelCase__ ) ) return intersection / union if isinstance(lowerCamelCase__ , (list, tuple) ) and isinstance(lowerCamelCase__ , (list, tuple) ): _SCREAMING_SNAKE_CASE = [element for element in set_a if element in set_b] if alternative_union: _SCREAMING_SNAKE_CASE = len(lowerCamelCase__ ) + len(lowerCamelCase__ ) return len(lowerCamelCase__ ) / union else: _SCREAMING_SNAKE_CASE = set_a + [element for element in set_b if element not in set_a] return len(lowerCamelCase__ ) / len(lowerCamelCase__ ) return len(lowerCamelCase__ ) / len(lowerCamelCase__ ) return None if __name__ == "__main__": UpperCamelCase__ : List[str] = {"a", "b", "c", "d", "e"} UpperCamelCase__ : List[Any] = {"c", "d", "e", "f", "h", "i"} print(jaccard_similarity(set_a, set_b))
720
'''simple docstring''' def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE_ ) -> int: """simple docstring""" if not isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ): raise ValueError("""multiplicative_persistence() only accepts integral values""" ) if num < 0: raise ValueError("""multiplicative_persistence() does not accept negative values""" ) _SCREAMING_SNAKE_CASE = 0 _SCREAMING_SNAKE_CASE = str(SCREAMING_SNAKE_CASE_ ) while len(SCREAMING_SNAKE_CASE_ ) != 1: _SCREAMING_SNAKE_CASE = [int(SCREAMING_SNAKE_CASE_ ) for i in num_string] _SCREAMING_SNAKE_CASE = 1 for i in range(0 , len(SCREAMING_SNAKE_CASE_ ) ): total *= numbers[i] _SCREAMING_SNAKE_CASE = str(SCREAMING_SNAKE_CASE_ ) steps += 1 return steps def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE_ ) -> int: """simple docstring""" if not isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ): raise ValueError("""additive_persistence() only accepts integral values""" ) if num < 0: raise ValueError("""additive_persistence() does not accept negative values""" ) _SCREAMING_SNAKE_CASE = 0 _SCREAMING_SNAKE_CASE = str(SCREAMING_SNAKE_CASE_ ) while len(SCREAMING_SNAKE_CASE_ ) != 1: _SCREAMING_SNAKE_CASE = [int(SCREAMING_SNAKE_CASE_ ) for i in num_string] _SCREAMING_SNAKE_CASE = 0 for i in range(0 , len(SCREAMING_SNAKE_CASE_ ) ): total += numbers[i] _SCREAMING_SNAKE_CASE = str(SCREAMING_SNAKE_CASE_ ) steps += 1 return steps if __name__ == "__main__": import doctest doctest.testmod()
0
0
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available UpperCamelCase__ : List[str] = { "configuration_clap": [ "CLAP_PRETRAINED_MODEL_ARCHIVE_LIST", "ClapAudioConfig", "ClapConfig", "ClapTextConfig", ], "processing_clap": ["ClapProcessor"], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase__ : List[Any] = [ "CLAP_PRETRAINED_MODEL_ARCHIVE_LIST", "ClapModel", "ClapPreTrainedModel", "ClapTextModel", "ClapTextModelWithProjection", "ClapAudioModel", "ClapAudioModelWithProjection", ] UpperCamelCase__ : Dict = ["ClapFeatureExtractor"] if TYPE_CHECKING: from .configuration_clap import ( CLAP_PRETRAINED_MODEL_ARCHIVE_LIST, ClapAudioConfig, ClapConfig, ClapTextConfig, ) from .processing_clap import ClapProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .feature_extraction_clap import ClapFeatureExtractor from .modeling_clap import ( CLAP_PRETRAINED_MODEL_ARCHIVE_LIST, ClapAudioModel, ClapAudioModelWithProjection, ClapModel, ClapPreTrainedModel, ClapTextModel, ClapTextModelWithProjection, ) else: import sys UpperCamelCase__ : int = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
721
'''simple docstring''' import math import os import re import sys import unittest from pathlib import Path from typing import Tuple from unittest.mock import patch from parameterized import parameterized from transformers.testing_utils import ( CaptureStderr, ExtendSysPath, TestCasePlus, execute_subprocess_async, get_gpu_count, get_torch_dist_unique_port, require_apex, require_bitsandbytes, require_fairscale, require_torch, require_torch_gpu, require_torch_multi_gpu, require_torch_non_multi_gpu, slow, ) from transformers.trainer_callback import TrainerState from transformers.trainer_utils import set_seed UpperCamelCase__ : Tuple = os.path.abspath(os.path.dirname(__file__)) with ExtendSysPath(f"""{bindir}/../../examples/pytorch/translation"""): from run_translation import main # noqa set_seed(42) UpperCamelCase__ : Union[str, Any] = "sshleifer/student_marian_en_ro_6_1" UpperCamelCase__ : str = "sshleifer/tiny-mbart" @require_torch class _a (_lowerCamelCase): """simple docstring""" def UpperCamelCase ( self , A__=False , A__=None , A__=True , A__=True , A__=True , A__=True , ) -> Optional[Any]: _SCREAMING_SNAKE_CASE = self.run_trainer( eval_steps=1 , max_len=12 , model_name=A__ , num_train_epochs=1 , distributed=A__ , extra_args_str=A__ , predict_with_generate=A__ , do_train=A__ , do_eval=A__ , do_predict=A__ , ) _SCREAMING_SNAKE_CASE = TrainerState.load_from_json(os.path.join(A__ , """trainer_state.json""" ) ).log_history if not do_eval: return _SCREAMING_SNAKE_CASE = [log for log in logs if """eval_loss""" in log.keys()] _SCREAMING_SNAKE_CASE = eval_metrics[0] if predict_with_generate: assert "eval_bleu" in first_step_stats _SCREAMING_SNAKE_CASE = eval_metrics[-1] assert isinstance(last_step_stats["""eval_bleu"""] , A__ ) assert not math.isnan(float(last_step_stats["""eval_loss"""] ) ), "eval_loss must not be `nan`" @require_torch_non_multi_gpu def UpperCamelCase ( self ) -> Optional[int]: self.run_seqaseq_quick() @require_torch_multi_gpu def UpperCamelCase ( self ) -> Optional[Any]: self.run_seqaseq_quick(distributed=A__ ) @require_torch_multi_gpu def UpperCamelCase ( self ) -> Union[str, Any]: self.run_seqaseq_quick(distributed=A__ ) @unittest.skip("""Requires an update of the env running those tests""" ) @require_torch_multi_gpu @require_fairscale def UpperCamelCase ( self ) -> Any: self.run_seqaseq_quick(distributed=A__ , extra_args_str="""--sharded_ddp simple""" ) @unittest.skip("""Requires an update of the env running those tests""" ) @require_torch_multi_gpu @require_fairscale def UpperCamelCase ( self ) -> Tuple: self.run_seqaseq_quick(distributed=A__ , extra_args_str="""--sharded_ddp simple --fp16""" ) @unittest.skip("""Requires an update of the env running those tests""" ) @require_torch_multi_gpu @require_fairscale def UpperCamelCase ( self ) -> str: self.run_seqaseq_quick(distributed=A__ , extra_args_str="""--sharded_ddp zero_dp_2""" , predict_with_generate=A__ ) @unittest.skip("""Requires an update of the env running those tests""" ) @require_torch_multi_gpu @require_fairscale def UpperCamelCase ( self ) -> List[str]: self.run_seqaseq_quick( distributed=A__ , extra_args_str="""--sharded_ddp zero_dp_2 --fp16""" , predict_with_generate=A__ ) @require_apex @require_torch_gpu def UpperCamelCase ( self ) -> Optional[Any]: # XXX: apex breaks the trainer if it's run twice e.g. run_seq2seq.main() from the same # program and it breaks other tests that run from the same pytest worker, therefore until this is # sorted out it must be run only in an external program, that is distributed=True in this # test and only under one or more gpus - if we want cpu will need to make a special test # # specifically to the problem traced it to self.optimizer.step() - if it's run 2nd time via # 2nd main() call it botches the future eval. # self.run_seqaseq_quick(distributed=A__ , extra_args_str="""--fp16 --fp16_backend=apex""" ) # test 2nd time - was getting eval_loss': nan' # to reproduce the problem set distributed=False self.run_seqaseq_quick(distributed=A__ , extra_args_str="""--fp16 --fp16_backend=apex""" ) @parameterized.expand(["""base""", """low""", """high""", """mixed"""] ) @require_torch_multi_gpu def UpperCamelCase ( self , A__ ) -> List[Any]: # as each sub-test is slow-ish split into multiple sub-tests to avoid CI timeout _SCREAMING_SNAKE_CASE = { # test with the default log_level - should be info and thus log info once """base""": {"""extra_args_str""": """""", """n_matches""": 1}, # test with low log_level and log_level_replica - should be noisy on all processes # now the info string should appear twice on 2 processes """low""": {"""extra_args_str""": """--log_level debug --log_level_replica debug""", """n_matches""": 2}, # test with high log_level and low log_level_replica # now the info string should appear once only on the replica """high""": {"""extra_args_str""": """--log_level error --log_level_replica debug""", """n_matches""": 1}, # test with high log_level and log_level_replica - should be quiet on all processes """mixed""": {"""extra_args_str""": """--log_level error --log_level_replica error""", """n_matches""": 0}, } _SCREAMING_SNAKE_CASE = experiments[experiment_id] _SCREAMING_SNAKE_CASE = {"""distributed""": True, """predict_with_generate""": False, """do_eval""": False, """do_predict""": False} _SCREAMING_SNAKE_CASE = """Running training""" with CaptureStderr() as cl: self.run_seqaseq_quick(**A__ , extra_args_str=data["""extra_args_str"""] ) _SCREAMING_SNAKE_CASE = len(re.findall(A__ , cl.err ) ) self.assertEqual(A__ , data["""n_matches"""] ) @slow def UpperCamelCase ( self ) -> Any: _SCREAMING_SNAKE_CASE = self.run_trainer( eval_steps=2 , max_len=1_28 , model_name=A__ , learning_rate=3E-4 , num_train_epochs=10 , distributed=A__ , ) # Check metrics _SCREAMING_SNAKE_CASE = TrainerState.load_from_json(os.path.join(A__ , """trainer_state.json""" ) ).log_history _SCREAMING_SNAKE_CASE = [log for log in logs if """eval_loss""" in log.keys()] _SCREAMING_SNAKE_CASE = eval_metrics[0] _SCREAMING_SNAKE_CASE = eval_metrics[-1] assert first_step_stats["eval_loss"] > last_step_stats["eval_loss"], "model learned nothing" assert isinstance(last_step_stats["""eval_bleu"""] , A__ ) # test if do_predict saves generations and metrics _SCREAMING_SNAKE_CASE = os.listdir(A__ ) _SCREAMING_SNAKE_CASE = {os.path.basename(A__ ) for p in contents} assert "generated_predictions.txt" in contents assert "predict_results.json" in contents @slow @require_bitsandbytes def UpperCamelCase ( self ) -> Dict: from transformers.training_args import OptimizerNames def train_and_return_metrics(A__ ) -> Tuple[int, float]: _SCREAMING_SNAKE_CASE = """--skip_memory_metrics 0""" _SCREAMING_SNAKE_CASE = self.run_trainer( max_len=1_28 , model_name=A__ , learning_rate=3E-4 , num_train_epochs=1 , optim=A__ , distributed=A__ , extra_args_str=A__ , do_eval=A__ , do_predict=A__ , n_gpus_to_use=1 , ) # Check metrics _SCREAMING_SNAKE_CASE = TrainerState.load_from_json(Path(A__ , """trainer_state.json""" ) ).log_history _SCREAMING_SNAKE_CASE = int(logs[0]["""train_mem_gpu_peaked_delta"""] / 2**20 ) _SCREAMING_SNAKE_CASE = int(logs[0]["""train_mem_gpu_alloc_delta"""] / 2**20 ) _SCREAMING_SNAKE_CASE = logs[0]["""train_loss"""] return gpu_peak_mem_mb, gpu_alloc_mem_mb, loss _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = train_and_return_metrics(OptimizerNames.ADAMW_TORCH.value ) _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = train_and_return_metrics(OptimizerNames.ADAMW_BNB.value ) _SCREAMING_SNAKE_CASE = gpu_alloc_mem_orig - gpu_alloc_mem_bnb _SCREAMING_SNAKE_CASE = gpu_peak_mem_orig + gpu_alloc_mem_orig _SCREAMING_SNAKE_CASE = gpu_peak_mem_bnb + gpu_alloc_mem_bnb _SCREAMING_SNAKE_CASE = gpu_total_mem_orig - gpu_total_mem_bnb # sshleifer/student_marian_en_ro_6_1 has 54M parameter, 29M of which is `nn.Embedding` which # doesn't get quantized and remains in fp32. Therefore we only have 25M parameters quantized # in 2 bytes and the diff in optim memory usage is derived as so: # # - normal 25*8=~200MB (8 bytes per param) # - bnb 25*2= ~50MB (2 bytes per param) # # Thus we should expect ~150MB total memory saved. # # Peak memory should be the same - the total should be different by about that same margin # # After leaving a small margin to accommodate for differences between gpus let's check # that we have at least 120MB in savings _SCREAMING_SNAKE_CASE = 1_20 # uncomment the following if this test starts failing - requires py38 for a new print feature # gpu_peak_mem_diff = gpu_peak_mem_orig - gpu_peak_mem_bnb # print(f"{gpu_alloc_mem_orig=}MB {gpu_peak_mem_orig=}MB {gpu_alloc_mem_orig+gpu_peak_mem_orig=}MB") # print(f" {gpu_alloc_mem_bnb=}MB {gpu_peak_mem_bnb=}MB {gpu_alloc_mem_bnb+gpu_peak_mem_bnb=}MB") # print(f"{gpu_alloc_mem_diff=}MB") # print(f"{gpu_peak_mem_diff=}MB") # print(f"{gpu_total_mem_orig=}MB, {gpu_total_mem_bnb=}MB") # print(f"{gpu_total_mem_diff=}MB, {gpu_total_mem_diff=}MB") self.assertGreater( A__ , A__ , """should use ~150MB less alloc gpu memory with BNB, compared to without it for this model but got""" F" a difference of {gpu_alloc_mem_diff}MB, with gpu_alloc_mem_orig={gpu_alloc_mem_orig}MB and" F" gpu_alloc_mem_bnb={gpu_alloc_mem_bnb}MB" , ) self.assertGreater( A__ , A__ , """should use ~150MB less total gpu memory with BNB, compared to without it for this model but got""" F" a difference of {gpu_total_mem_diff}MB, with gpu_total_mem_orig={gpu_total_mem_orig}MB and" F" gpu_total_mem_bnb={gpu_total_mem_bnb}MB" , ) self.assertEqual( A__ , A__ , F"loss should be the same, but got loss_orig={loss_orig}, loss_bnb={loss_bnb}" ) def UpperCamelCase ( self , A__ , A__ , A__ , A__ = 3E-3 , A__ = "adafactor" , A__ = False , A__ = None , A__ = 0 , A__ = True , A__ = True , A__ = True , A__ = True , A__ = None , ) -> Dict: _SCREAMING_SNAKE_CASE = self.test_file_dir / """../fixtures/tests_samples/wmt_en_ro""" _SCREAMING_SNAKE_CASE = self.get_auto_remove_tmp_dir() _SCREAMING_SNAKE_CASE = F"\n --model_name_or_path {model_name}\n --train_file {data_dir}/train.json\n --validation_file {data_dir}/val.json\n --test_file {data_dir}/test.json\n --output_dir {output_dir}\n --overwrite_output_dir\n --max_train_samples 8\n --max_source_length {max_len}\n --max_target_length {max_len}\n --do_train\n --num_train_epochs {str(A__ )}\n --per_device_train_batch_size 4\n --learning_rate {learning_rate}\n --warmup_steps 8\n --logging_steps 0\n --logging_strategy no\n --save_steps {str(A__ )}\n --group_by_length\n --label_smoothing_factor 0.1\n --target_lang ro_RO\n --source_lang en_XX\n ".split() _SCREAMING_SNAKE_CASE = F"\n --do_eval\n --per_device_eval_batch_size 4\n --max_eval_samples 8\n --val_max_target_length {max_len}\n --evaluation_strategy steps\n --eval_steps {str(A__ )}\n ".split() _SCREAMING_SNAKE_CASE = """ --do_predict """.split() _SCREAMING_SNAKE_CASE = [] if do_train: args += args_train if do_eval: args += args_eval if do_predict: args += args_predict if predict_with_generate: args += "--predict_with_generate".split() if do_train: if optim == "adafactor": args += "--adafactor".split() else: args += F"--optim {optim}".split() if extra_args_str is not None: args += extra_args_str.split() if distributed: if n_gpus_to_use is None: _SCREAMING_SNAKE_CASE = get_gpu_count() _SCREAMING_SNAKE_CASE = get_torch_dist_unique_port() _SCREAMING_SNAKE_CASE = F"\n -m torch.distributed.run\n --nproc_per_node={n_gpus_to_use}\n --master_port={master_port}\n {self.examples_dir_str}/pytorch/translation/run_translation.py\n ".split() _SCREAMING_SNAKE_CASE = [sys.executable] + distributed_args + args # keep for quick debug # print(" ".join([f"\nPYTHONPATH={self.src_dir_str}"] +cmd)); die execute_subprocess_async(A__ , env=self.get_env() ) else: _SCREAMING_SNAKE_CASE = ["""run_translation.py"""] + args with patch.object(A__ , """argv""" , A__ ): main() return output_dir
0
0
'''simple docstring''' import math from dataclasses import dataclass from typing import List, Optional, Tuple, Union import numpy as np import torch from diffusers.configuration_utils import ConfigMixin, register_to_config from diffusers.schedulers.scheduling_utils import SchedulerMixin from diffusers.utils import BaseOutput, deprecate @dataclass # Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->DDIM class _a (_lowerCamelCase): SCREAMING_SNAKE_CASE = 42 SCREAMING_SNAKE_CASE = None def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=0.999 , SCREAMING_SNAKE_CASE_="cosine" , ) -> List[Any]: """simple docstring""" if alpha_transform_type == "cosine": def alpha_bar_fn(SCREAMING_SNAKE_CASE_ ): return math.cos((t + 0.008) / 1.008 * math.pi / 2 ) ** 2 elif alpha_transform_type == "exp": def alpha_bar_fn(SCREAMING_SNAKE_CASE_ ): return math.exp(t * -12.0 ) else: raise ValueError(F"Unsupported alpha_tranform_type: {alpha_transform_type}" ) _SCREAMING_SNAKE_CASE = [] for i in range(SCREAMING_SNAKE_CASE_ ): _SCREAMING_SNAKE_CASE = i / num_diffusion_timesteps _SCREAMING_SNAKE_CASE = (i + 1) / num_diffusion_timesteps betas.append(min(1 - alpha_bar_fn(SCREAMING_SNAKE_CASE_ ) / alpha_bar_fn(SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ ) ) return torch.tensor(SCREAMING_SNAKE_CASE_ , dtype=torch.floataa ) class _a (_lowerCamelCase , _lowerCamelCase): SCREAMING_SNAKE_CASE = 1 @register_to_config def __init__( self , A__ = 10_00 , A__ = 0.0001 , A__ = 0.02 , A__ = "linear" , A__ = None , A__ = True , A__ = True , A__ = 0 , A__ = "epsilon" , A__ = 1.0 , **A__ , ) -> Any: if kwargs.get("""set_alpha_to_one""" , A__ ) is not None: _SCREAMING_SNAKE_CASE = ( """The `set_alpha_to_one` argument is deprecated. Please use `set_alpha_to_zero` instead.""" ) deprecate("""set_alpha_to_one""" , """1.0.0""" , A__ , standard_warn=A__ ) _SCREAMING_SNAKE_CASE = kwargs["""set_alpha_to_one"""] if trained_betas is not None: _SCREAMING_SNAKE_CASE = torch.tensor(A__ , dtype=torch.floataa ) elif beta_schedule == "linear": _SCREAMING_SNAKE_CASE = torch.linspace(A__ , A__ , A__ , dtype=torch.floataa ) elif beta_schedule == "scaled_linear": # this schedule is very specific to the latent diffusion model. _SCREAMING_SNAKE_CASE = ( torch.linspace(beta_start**0.5 , beta_end**0.5 , A__ , dtype=torch.floataa ) ** 2 ) elif beta_schedule == "squaredcos_cap_v2": # Glide cosine schedule _SCREAMING_SNAKE_CASE = betas_for_alpha_bar(A__ ) else: raise NotImplementedError(F"{beta_schedule} does is not implemented for {self.__class__}" ) _SCREAMING_SNAKE_CASE = 1.0 - self.betas _SCREAMING_SNAKE_CASE = torch.cumprod(self.alphas , dim=0 ) # At every step in inverted ddim, we are looking into the next alphas_cumprod # For the final step, there is no next alphas_cumprod, and the index is out of bounds # `set_alpha_to_zero` decides whether we set this parameter simply to zero # in this case, self.step() just output the predicted noise # or whether we use the final alpha of the "non-previous" one. _SCREAMING_SNAKE_CASE = torch.tensor(0.0 ) if set_alpha_to_zero else self.alphas_cumprod[-1] # standard deviation of the initial noise distribution _SCREAMING_SNAKE_CASE = 1.0 # setable values _SCREAMING_SNAKE_CASE = None _SCREAMING_SNAKE_CASE = torch.from_numpy(np.arange(0 , A__ ).copy().astype(np.intaa ) ) def UpperCamelCase ( self , A__ , A__ = None ) -> torch.FloatTensor: return sample def UpperCamelCase ( self , A__ , A__ = None ) -> List[Any]: if num_inference_steps > self.config.num_train_timesteps: raise ValueError( F"`num_inference_steps`: {num_inference_steps} cannot be larger than `self.config.train_timesteps`:" F" {self.config.num_train_timesteps} as the unet model trained with this scheduler can only handle" F" maximal {self.config.num_train_timesteps} timesteps." ) _SCREAMING_SNAKE_CASE = num_inference_steps _SCREAMING_SNAKE_CASE = self.config.num_train_timesteps // self.num_inference_steps # creates integer timesteps by multiplying by ratio # casting to int to avoid issues when num_inference_step is power of 3 _SCREAMING_SNAKE_CASE = (np.arange(0 , A__ ) * step_ratio).round().copy().astype(np.intaa ) _SCREAMING_SNAKE_CASE = torch.from_numpy(A__ ).to(A__ ) self.timesteps += self.config.steps_offset def UpperCamelCase ( self , A__ , A__ , A__ , A__ = 0.0 , A__ = False , A__ = None , A__ = True , ) -> Union[DDIMSchedulerOutput, Tuple]: # 1. get previous step value (=t+1) _SCREAMING_SNAKE_CASE = timestep + self.config.num_train_timesteps // self.num_inference_steps # 2. compute alphas, betas # change original implementation to exactly match noise levels for analogous forward process _SCREAMING_SNAKE_CASE = self.alphas_cumprod[timestep] _SCREAMING_SNAKE_CASE = ( self.alphas_cumprod[prev_timestep] if prev_timestep < self.config.num_train_timesteps else self.final_alpha_cumprod ) _SCREAMING_SNAKE_CASE = 1 - alpha_prod_t # 3. compute predicted original sample from predicted noise also called # "predicted x_0" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf if self.config.prediction_type == "epsilon": _SCREAMING_SNAKE_CASE = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5 _SCREAMING_SNAKE_CASE = model_output elif self.config.prediction_type == "sample": _SCREAMING_SNAKE_CASE = model_output _SCREAMING_SNAKE_CASE = (sample - alpha_prod_t ** 0.5 * pred_original_sample) / beta_prod_t ** 0.5 elif self.config.prediction_type == "v_prediction": _SCREAMING_SNAKE_CASE = (alpha_prod_t**0.5) * sample - (beta_prod_t**0.5) * model_output _SCREAMING_SNAKE_CASE = (alpha_prod_t**0.5) * model_output + (beta_prod_t**0.5) * sample else: raise ValueError( F"prediction_type given as {self.config.prediction_type} must be one of `epsilon`, `sample`, or" """ `v_prediction`""" ) # 4. Clip or threshold "predicted x_0" if self.config.clip_sample: _SCREAMING_SNAKE_CASE = pred_original_sample.clamp( -self.config.clip_sample_range , self.config.clip_sample_range ) # 5. compute "direction pointing to x_t" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf _SCREAMING_SNAKE_CASE = (1 - alpha_prod_t_prev) ** 0.5 * pred_epsilon # 6. compute x_t without "random noise" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf _SCREAMING_SNAKE_CASE = alpha_prod_t_prev ** 0.5 * pred_original_sample + pred_sample_direction if not return_dict: return (prev_sample, pred_original_sample) return DDIMSchedulerOutput(prev_sample=A__ , pred_original_sample=A__ ) def __len__( self ) -> List[Any]: return self.config.num_train_timesteps
700
'''simple docstring''' import sys UpperCamelCase__ : int = ( "73167176531330624919225119674426574742355349194934" "96983520312774506326239578318016984801869478851843" "85861560789112949495459501737958331952853208805511" "12540698747158523863050715693290963295227443043557" "66896648950445244523161731856403098711121722383113" "62229893423380308135336276614282806444486645238749" "30358907296290491560440772390713810515859307960866" "70172427121883998797908792274921901699720888093776" "65727333001053367881220235421809751254540594752243" "52584907711670556013604839586446706324415722155397" "53697817977846174064955149290862569321978468622482" "83972241375657056057490261407972968652414535100474" "82166370484403199890008895243450658541227588666881" "16427171479924442928230863465674813919123162824586" "17866458359124566529476545682848912883142607690042" "24219022671055626321111109370544217506941658960408" "07198403850962455444362981230987879927244284909188" "84580156166097919133875499200524063689912560717606" "05886116467109405077541002256983155200055935729725" "71636269561882670428252483600823257530420752963450" ) def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE_ = N ) -> int: """simple docstring""" _SCREAMING_SNAKE_CASE = -sys.maxsize - 1 for i in range(len(SCREAMING_SNAKE_CASE_ ) - 12 ): _SCREAMING_SNAKE_CASE = 1 for j in range(13 ): product *= int(n[i + j] ) if product > largest_product: _SCREAMING_SNAKE_CASE = product return largest_product if __name__ == "__main__": print(f"""{solution() = }""")
0
0
'''simple docstring''' from ...configuration_utils import PretrainedConfig from ...utils import logging UpperCamelCase__ : Optional[Any] = logging.get_logger(__name__) UpperCamelCase__ : List[str] = { "studio-ousia/luke-base": "https://huggingface.co/studio-ousia/luke-base/resolve/main/config.json", "studio-ousia/luke-large": "https://huggingface.co/studio-ousia/luke-large/resolve/main/config.json", } class _a (_lowerCamelCase): """simple docstring""" SCREAMING_SNAKE_CASE = 'luke' def __init__( self , A__=5_02_67 , A__=50_00_00 , A__=7_68 , A__=2_56 , A__=12 , A__=12 , A__=30_72 , A__="gelu" , A__=0.1 , A__=0.1 , A__=5_12 , A__=2 , A__=0.02 , A__=1E-12 , A__=True , A__=None , A__=1 , A__=0 , A__=2 , **A__ , ) -> int: super().__init__(pad_token_id=A__ , bos_token_id=A__ , eos_token_id=A__ , **A__ ) _SCREAMING_SNAKE_CASE = vocab_size _SCREAMING_SNAKE_CASE = entity_vocab_size _SCREAMING_SNAKE_CASE = hidden_size _SCREAMING_SNAKE_CASE = entity_emb_size _SCREAMING_SNAKE_CASE = num_hidden_layers _SCREAMING_SNAKE_CASE = num_attention_heads _SCREAMING_SNAKE_CASE = hidden_act _SCREAMING_SNAKE_CASE = intermediate_size _SCREAMING_SNAKE_CASE = hidden_dropout_prob _SCREAMING_SNAKE_CASE = attention_probs_dropout_prob _SCREAMING_SNAKE_CASE = max_position_embeddings _SCREAMING_SNAKE_CASE = type_vocab_size _SCREAMING_SNAKE_CASE = initializer_range _SCREAMING_SNAKE_CASE = layer_norm_eps _SCREAMING_SNAKE_CASE = use_entity_aware_attention _SCREAMING_SNAKE_CASE = classifier_dropout
701
'''simple docstring''' UpperCamelCase__ : Dict = { "a": "AAAAA", "b": "AAAAB", "c": "AAABA", "d": "AAABB", "e": "AABAA", "f": "AABAB", "g": "AABBA", "h": "AABBB", "i": "ABAAA", "j": "BBBAA", "k": "ABAAB", "l": "ABABA", "m": "ABABB", "n": "ABBAA", "o": "ABBAB", "p": "ABBBA", "q": "ABBBB", "r": "BAAAA", "s": "BAAAB", "t": "BAABA", "u": "BAABB", "v": "BBBAB", "w": "BABAA", "x": "BABAB", "y": "BABBA", "z": "BABBB", " ": " ", } UpperCamelCase__ : str = {value: key for key, value in encode_dict.items()} def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE_ ) -> str: """simple docstring""" _SCREAMING_SNAKE_CASE = """""" for letter in word.lower(): if letter.isalpha() or letter == " ": encoded += encode_dict[letter] else: raise Exception("""encode() accepts only letters of the alphabet and spaces""" ) return encoded def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE_ ) -> str: """simple docstring""" if set(SCREAMING_SNAKE_CASE_ ) - {"A", "B", " "} != set(): raise Exception("""decode() accepts only 'A', 'B' and spaces""" ) _SCREAMING_SNAKE_CASE = """""" for word in coded.split(): while len(SCREAMING_SNAKE_CASE_ ) != 0: decoded += decode_dict[word[:5]] _SCREAMING_SNAKE_CASE = word[5:] decoded += " " return decoded.strip() if __name__ == "__main__": from doctest import testmod testmod()
0
0
'''simple docstring''' def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = False ) -> bool: """simple docstring""" if n == 2: return True if not n % 2 or n < 2: return False if n > 5 and n % 10 not in (1, 3, 7, 9): # can quickly check last digit return False if n > 3_31_70_44_06_46_79_88_73_85_96_19_81 and not allow_probable: raise ValueError( """Warning: upper bound of deterministic test is exceeded. """ """Pass allow_probable=True to allow probabilistic test. """ """A return value of True indicates a probable prime.""" ) # array bounds provided by analysis _SCREAMING_SNAKE_CASE = [ 20_47, 1_37_36_53, 25_32_60_01, 32_15_03_17_51, 2_15_23_02_89_87_47, 3_47_47_49_66_03_83, 3_41_55_00_71_72_83_21, 1, 3_82_51_23_05_65_46_41_30_51, 1, 1, 31_86_65_85_78_34_03_11_51_16_74_61, 3_31_70_44_06_46_79_88_73_85_96_19_81, ] _SCREAMING_SNAKE_CASE = [2, 3, 5, 7, 11, 13, 17, 19, 23, 29, 31, 37, 41] for idx, _p in enumerate(SCREAMING_SNAKE_CASE_ , 1 ): if n < _p: # then we have our last prime to check _SCREAMING_SNAKE_CASE = primes[:idx] break _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = n - 1, 0 # break up n -1 into a power of 2 (s) and # remaining odd component # essentially, solve for d * 2 ** s == n - 1 while d % 2 == 0: d //= 2 s += 1 for prime in plist: _SCREAMING_SNAKE_CASE = False for r in range(SCREAMING_SNAKE_CASE_ ): _SCREAMING_SNAKE_CASE = pow(SCREAMING_SNAKE_CASE_ , d * 2**r , SCREAMING_SNAKE_CASE_ ) # see article for analysis explanation for m if (r == 0 and m == 1) or ((m + 1) % n == 0): _SCREAMING_SNAKE_CASE = True # this loop will not determine compositeness break if pr: continue # if pr is False, then the above loop never evaluated to true, # and the n MUST be composite return False return True def lowerCAmelCase_ ( ) -> None: """simple docstring""" assert not miller_rabin(5_61 ) assert miller_rabin(5_63 ) # 2047 assert not miller_rabin(83_82_01 ) assert miller_rabin(83_82_07 ) # 1_373_653 assert not miller_rabin(17_31_60_01 ) assert miller_rabin(17_31_60_17 ) # 25_326_001 assert not miller_rabin(30_78_38_66_41 ) assert miller_rabin(30_78_38_66_53 ) # 3_215_031_751 assert not miller_rabin(1_71_30_45_57_48_01 ) assert miller_rabin(1_71_30_45_57_48_19 ) # 2_152_302_898_747 assert not miller_rabin(2_77_97_99_72_83_07 ) assert miller_rabin(2_77_97_99_72_83_27 ) # 3_474_749_660_383 assert not miller_rabin(1_13_85_00_23_90_94_41 ) assert miller_rabin(1_13_85_00_23_90_95_27 ) # 341_550_071_728_321 assert not miller_rabin(1_27_50_41_01_88_48_80_43_51 ) assert miller_rabin(1_27_50_41_01_88_48_80_43_91 ) # 3_825_123_056_546_413_051 assert not miller_rabin(7_96_66_46_44_58_50_77_87_79_18_67 ) assert miller_rabin(7_96_66_46_44_58_50_77_87_79_19_51 ) # 318_665_857_834_031_151_167_461 assert not miller_rabin(55_28_40_67_74_46_64_78_97_66_03_33 ) assert miller_rabin(55_28_40_67_74_46_64_78_97_66_03_59 ) # 3_317_044_064_679_887_385_961_981 # upper limit for probabilistic test if __name__ == "__main__": test_miller_rabin()
702
'''simple docstring''' import argparse import torch from torch import nn from transformers import MaMaaaConfig, MaMaaaForConditionalGeneration def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE_ ) -> Any: """simple docstring""" _SCREAMING_SNAKE_CASE = [ """encoder.version""", """decoder.version""", """model.encoder.version""", """model.decoder.version""", """decoder.output_projection.weight""", """_float_tensor""", """encoder.embed_positions._float_tensor""", """decoder.embed_positions._float_tensor""", ] for k in ignore_keys: state_dict.pop(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE_ ) -> str: """simple docstring""" _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = emb.weight.shape _SCREAMING_SNAKE_CASE = nn.Linear(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , bias=SCREAMING_SNAKE_CASE_ ) _SCREAMING_SNAKE_CASE = emb.weight.data return lin_layer def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE_ ) -> List[Any]: """simple docstring""" _SCREAMING_SNAKE_CASE = torch.load(SCREAMING_SNAKE_CASE_ , map_location="""cpu""" ) _SCREAMING_SNAKE_CASE = mam_aaa["""args"""] or mam_aaa["""cfg"""]["""model"""] _SCREAMING_SNAKE_CASE = mam_aaa["""model"""] remove_ignore_keys_(SCREAMING_SNAKE_CASE_ ) _SCREAMING_SNAKE_CASE = state_dict["""encoder.embed_tokens.weight"""].shape[0] _SCREAMING_SNAKE_CASE = MaMaaaConfig( vocab_size=SCREAMING_SNAKE_CASE_ , max_position_embeddings=10_24 , encoder_layers=args.encoder_layers , decoder_layers=args.decoder_layers , encoder_attention_heads=args.encoder_attention_heads , decoder_attention_heads=args.decoder_attention_heads , encoder_ffn_dim=args.encoder_ffn_embed_dim , decoder_ffn_dim=args.decoder_ffn_embed_dim , d_model=args.encoder_embed_dim , encoder_layerdrop=args.encoder_layerdrop , decoder_layerdrop=args.decoder_layerdrop , dropout=args.dropout , attention_dropout=args.attention_dropout , activation_dropout=args.activation_dropout , activation_function="""relu""" , ) _SCREAMING_SNAKE_CASE = state_dict["""decoder.embed_tokens.weight"""] _SCREAMING_SNAKE_CASE = MaMaaaForConditionalGeneration(SCREAMING_SNAKE_CASE_ ) model.model.load_state_dict(SCREAMING_SNAKE_CASE_ , strict=SCREAMING_SNAKE_CASE_ ) _SCREAMING_SNAKE_CASE = make_linear_from_emb(model.model.shared ) return model if __name__ == "__main__": UpperCamelCase__ : Dict = argparse.ArgumentParser() # Required parameters parser.add_argument("fairseq_path", type=str, help="path to a model.pt on local filesystem.") parser.add_argument("pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.") UpperCamelCase__ : List[str] = parser.parse_args() UpperCamelCase__ : Any = convert_fairseq_mamaaa_checkpoint_from_disk(args.fairseq_pathß) model.save_pretrained(args.pytorch_dump_folder_path)
0
0
'''simple docstring''' import copy from dataclasses import dataclass from pathlib import Path from typing import Dict, Optional, Union @dataclass class _a : """simple docstring""" SCREAMING_SNAKE_CASE = None SCREAMING_SNAKE_CASE = False SCREAMING_SNAKE_CASE = False SCREAMING_SNAKE_CASE = False SCREAMING_SNAKE_CASE = None SCREAMING_SNAKE_CASE = None SCREAMING_SNAKE_CASE = False SCREAMING_SNAKE_CASE = False SCREAMING_SNAKE_CASE = False SCREAMING_SNAKE_CASE = True SCREAMING_SNAKE_CASE = None SCREAMING_SNAKE_CASE = 1 SCREAMING_SNAKE_CASE = None SCREAMING_SNAKE_CASE = False SCREAMING_SNAKE_CASE = None SCREAMING_SNAKE_CASE = None def UpperCamelCase ( self ) -> "DownloadConfig": return self.__class__(**{k: copy.deepcopy(A__ ) for k, v in self.__dict__.items()} )
703
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available UpperCamelCase__ : str = { "configuration_canine": ["CANINE_PRETRAINED_CONFIG_ARCHIVE_MAP", "CanineConfig"], "tokenization_canine": ["CanineTokenizer"], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase__ : List[Any] = [ "CANINE_PRETRAINED_MODEL_ARCHIVE_LIST", "CanineForMultipleChoice", "CanineForQuestionAnswering", "CanineForSequenceClassification", "CanineForTokenClassification", "CanineLayer", "CanineModel", "CaninePreTrainedModel", "load_tf_weights_in_canine", ] if TYPE_CHECKING: from .configuration_canine import CANINE_PRETRAINED_CONFIG_ARCHIVE_MAP, CanineConfig from .tokenization_canine import CanineTokenizer try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_canine import ( CANINE_PRETRAINED_MODEL_ARCHIVE_LIST, CanineForMultipleChoice, CanineForQuestionAnswering, CanineForSequenceClassification, CanineForTokenClassification, CanineLayer, CanineModel, CaninePreTrainedModel, load_tf_weights_in_canine, ) else: import sys UpperCamelCase__ : str = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
0
0
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE_ ) -> int: """simple docstring""" if not isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ): raise ValueError("""Input must be an integer""" ) if input_num <= 0: raise ValueError("""Input must be positive""" ) return sum( divisor for divisor in range(1 , input_num // 2 + 1 ) if input_num % divisor == 0 ) if __name__ == "__main__": import doctest doctest.testmod()
704
'''simple docstring''' import warnings from ...processing_utils import ProcessorMixin from ...tokenization_utils_base import BatchEncoding class _a (_lowerCamelCase): """simple docstring""" SCREAMING_SNAKE_CASE = ['image_processor', 'tokenizer'] SCREAMING_SNAKE_CASE = 'ChineseCLIPImageProcessor' SCREAMING_SNAKE_CASE = ('BertTokenizer', 'BertTokenizerFast') def __init__( self , A__=None , A__=None , **A__ ) -> int: _SCREAMING_SNAKE_CASE = None if "feature_extractor" in kwargs: warnings.warn( """The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`""" """ instead.""" , A__ , ) _SCREAMING_SNAKE_CASE = kwargs.pop("""feature_extractor""" ) _SCREAMING_SNAKE_CASE = image_processor if image_processor is not None else feature_extractor if image_processor is None: raise ValueError("""You need to specify an `image_processor`.""" ) if tokenizer is None: raise ValueError("""You need to specify a `tokenizer`.""" ) super().__init__(A__ , A__ ) _SCREAMING_SNAKE_CASE = self.image_processor def __call__( self , A__=None , A__=None , A__=None , **A__ ) -> Optional[int]: if text is None and images is None: raise ValueError("""You have to specify either text or images. Both cannot be none.""" ) if text is not None: _SCREAMING_SNAKE_CASE = self.tokenizer(A__ , return_tensors=A__ , **A__ ) if images is not None: _SCREAMING_SNAKE_CASE = self.image_processor(A__ , return_tensors=A__ , **A__ ) if text is not None and images is not None: _SCREAMING_SNAKE_CASE = image_features.pixel_values return encoding elif text is not None: return encoding else: return BatchEncoding(data=dict(**A__ ) , tensor_type=A__ ) def UpperCamelCase ( self , *A__ , **A__ ) -> Dict: return self.tokenizer.batch_decode(*A__ , **A__ ) def UpperCamelCase ( self , *A__ , **A__ ) -> Optional[Any]: return self.tokenizer.decode(*A__ , **A__ ) @property def UpperCamelCase ( self ) -> List[Any]: _SCREAMING_SNAKE_CASE = self.tokenizer.model_input_names _SCREAMING_SNAKE_CASE = self.image_processor.model_input_names return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) ) @property def UpperCamelCase ( self ) -> Optional[int]: warnings.warn( """`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.""" , A__ , ) return self.image_processor_class
0
0
import argparse import torch from transformers import YosoConfig, YosoForMaskedLM def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE_ ) -> List[Any]: """simple docstring""" if "model" in orig_key: _SCREAMING_SNAKE_CASE = orig_key.replace("""model.""" , """""" ) if "norm1" in orig_key: _SCREAMING_SNAKE_CASE = orig_key.replace("""norm1""" , """attention.output.LayerNorm""" ) if "norm2" in orig_key: _SCREAMING_SNAKE_CASE = orig_key.replace("""norm2""" , """output.LayerNorm""" ) if "norm" in orig_key: _SCREAMING_SNAKE_CASE = orig_key.replace("""norm""" , """LayerNorm""" ) if "transformer" in orig_key: _SCREAMING_SNAKE_CASE = orig_key.split(""".""" )[0].split("""_""" )[-1] _SCREAMING_SNAKE_CASE = orig_key.replace(F"transformer_{layer_num}" , F"encoder.layer.{layer_num}" ) if "mha.attn" in orig_key: _SCREAMING_SNAKE_CASE = orig_key.replace("""mha.attn""" , """attention.self""" ) if "mha" in orig_key: _SCREAMING_SNAKE_CASE = orig_key.replace("""mha""" , """attention""" ) if "W_q" in orig_key: _SCREAMING_SNAKE_CASE = orig_key.replace("""W_q""" , """self.query""" ) if "W_k" in orig_key: _SCREAMING_SNAKE_CASE = orig_key.replace("""W_k""" , """self.key""" ) if "W_v" in orig_key: _SCREAMING_SNAKE_CASE = orig_key.replace("""W_v""" , """self.value""" ) if "ff1" in orig_key: _SCREAMING_SNAKE_CASE = orig_key.replace("""ff1""" , """intermediate.dense""" ) if "ff2" in orig_key: _SCREAMING_SNAKE_CASE = orig_key.replace("""ff2""" , """output.dense""" ) if "ff" in orig_key: _SCREAMING_SNAKE_CASE = orig_key.replace("""ff""" , """output.dense""" ) if "mlm_class" in orig_key: _SCREAMING_SNAKE_CASE = orig_key.replace("""mlm.mlm_class""" , """cls.predictions.decoder""" ) if "mlm" in orig_key: _SCREAMING_SNAKE_CASE = orig_key.replace("""mlm""" , """cls.predictions.transform""" ) if "cls" not in orig_key: _SCREAMING_SNAKE_CASE = """yoso.""" + orig_key return orig_key def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> Optional[int]: """simple docstring""" for key in orig_state_dict.copy().keys(): _SCREAMING_SNAKE_CASE = orig_state_dict.pop(SCREAMING_SNAKE_CASE_ ) if ("pooler" in key) or ("sen_class" in key): continue else: _SCREAMING_SNAKE_CASE = val _SCREAMING_SNAKE_CASE = orig_state_dict["""cls.predictions.decoder.bias"""] _SCREAMING_SNAKE_CASE = torch.arange(SCREAMING_SNAKE_CASE_ ).expand((1, -1) ) + 2 return orig_state_dict def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> Union[str, Any]: """simple docstring""" _SCREAMING_SNAKE_CASE = torch.load(SCREAMING_SNAKE_CASE_ , map_location="""cpu""" )["""model_state_dict"""] _SCREAMING_SNAKE_CASE = YosoConfig.from_json_file(SCREAMING_SNAKE_CASE_ ) _SCREAMING_SNAKE_CASE = YosoForMaskedLM(SCREAMING_SNAKE_CASE_ ) _SCREAMING_SNAKE_CASE = convert_checkpoint_helper(config.max_position_embeddings , SCREAMING_SNAKE_CASE_ ) print(model.load_state_dict(SCREAMING_SNAKE_CASE_ ) ) model.eval() model.save_pretrained(SCREAMING_SNAKE_CASE_ ) print(F"Checkpoint successfuly converted. Model saved at {pytorch_dump_path}" ) if __name__ == "__main__": UpperCamelCase__ : int = argparse.ArgumentParser() # Required parameters parser.add_argument( "--pytorch_model_path", default=None, type=str, required=True, help="Path to YOSO pytorch checkpoint." ) parser.add_argument( "--config_file", default=None, type=str, required=True, help="The json file for YOSO model config.", ) parser.add_argument( "--pytorch_dump_path", default=None, type=str, required=True, help="Path to the output PyTorch model." ) UpperCamelCase__ : int = parser.parse_args() convert_yoso_checkpoint(args.pytorch_model_path, args.config_file, args.pytorch_dump_path)
705
'''simple docstring''' from sklearn.metrics import matthews_corrcoef import datasets UpperCamelCase__ : List[str] = "\nCompute the Matthews correlation coefficient (MCC)\n\nThe Matthews correlation coefficient is used in machine learning as a\nmeasure of the quality of binary and multiclass classifications. It takes\ninto account true and false positives and negatives and is generally\nregarded as a balanced measure which can be used even if the classes are of\nvery different sizes. The MCC is in essence a correlation coefficient value\nbetween -1 and +1. A coefficient of +1 represents a perfect prediction, 0\nan average random prediction and -1 an inverse prediction. The statistic\nis also known as the phi coefficient. [source: Wikipedia]\n" UpperCamelCase__ : List[Any] = "\nArgs:\n predictions (list of int): Predicted labels, as returned by a model.\n references (list of int): Ground truth labels.\n sample_weight (list of int, float, or bool): Sample weights. Defaults to `None`.\nReturns:\n matthews_correlation (dict containing float): Matthews correlation.\nExamples:\n Example 1, a basic example with only predictions and references as inputs:\n >>> matthews_metric = datasets.load_metric(\"matthews_correlation\")\n >>> results = matthews_metric.compute(references=[1, 3, 2, 0, 3, 2],\n ... predictions=[1, 2, 2, 0, 3, 3])\n >>> print(round(results['matthews_correlation'], 2))\n 0.54\n\n Example 2, the same example as above, but also including sample weights:\n >>> matthews_metric = datasets.load_metric(\"matthews_correlation\")\n >>> results = matthews_metric.compute(references=[1, 3, 2, 0, 3, 2],\n ... predictions=[1, 2, 2, 0, 3, 3],\n ... sample_weight=[0.5, 3, 1, 1, 1, 2])\n >>> print(round(results['matthews_correlation'], 2))\n 0.1\n\n Example 3, the same example as above, but with sample weights that cause a negative correlation:\n >>> matthews_metric = datasets.load_metric(\"matthews_correlation\")\n >>> results = matthews_metric.compute(references=[1, 3, 2, 0, 3, 2],\n ... predictions=[1, 2, 2, 0, 3, 3],\n ... sample_weight=[0.5, 1, 0, 0, 0, 1])\n >>> print(round(results['matthews_correlation'], 2))\n -0.25\n" UpperCamelCase__ : Any = "\\n@article{scikit-learn,\n title={Scikit-learn: Machine Learning in {P}ython},\n author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V.\n and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P.\n and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and\n Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.},\n journal={Journal of Machine Learning Research},\n volume={12},\n pages={2825--2830},\n year={2011}\n}\n" @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION) class _a (datasets.Metric): """simple docstring""" def UpperCamelCase ( self ) -> Optional[Any]: return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { """predictions""": datasets.Value("""int32""" ), """references""": datasets.Value("""int32""" ), } ) , reference_urls=[ """https://scikit-learn.org/stable/modules/generated/sklearn.metrics.matthews_corrcoef.html""" ] , ) def UpperCamelCase ( self , A__ , A__ , A__=None ) -> List[str]: return { "matthews_correlation": float(matthews_corrcoef(A__ , A__ , sample_weight=A__ ) ), }
0
0
import fire from utils import calculate_rouge, save_json def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=None , **SCREAMING_SNAKE_CASE_ ) -> Union[str, Any]: """simple docstring""" _SCREAMING_SNAKE_CASE = [x.strip() for x in open(SCREAMING_SNAKE_CASE_ ).readlines()] _SCREAMING_SNAKE_CASE = [x.strip() for x in open(SCREAMING_SNAKE_CASE_ ).readlines()][: len(SCREAMING_SNAKE_CASE_ )] _SCREAMING_SNAKE_CASE = calculate_rouge(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ) if save_path is not None: save_json(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , indent=SCREAMING_SNAKE_CASE_ ) return metrics # these print nicely if __name__ == "__main__": fire.Fire(calculate_rouge_path)
706
'''simple docstring''' from __future__ import annotations def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> Tuple: """simple docstring""" print(F"Vertex\tShortest Distance from vertex {src}" ) for i, d in enumerate(SCREAMING_SNAKE_CASE_ ): print(F"{i}\t\t{d}" ) def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> List[Any]: """simple docstring""" for j in range(SCREAMING_SNAKE_CASE_ ): _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = (graph[j][k] for k in ["""src""", """dst""", """weight"""]) if distance[u] != float("""inf""" ) and distance[u] + w < distance[v]: return True return False def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> list[float]: """simple docstring""" _SCREAMING_SNAKE_CASE = [float("""inf""" )] * vertex_count _SCREAMING_SNAKE_CASE = 0.0 for _ in range(vertex_count - 1 ): for j in range(SCREAMING_SNAKE_CASE_ ): _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = (graph[j][k] for k in ["""src""", """dst""", """weight"""]) if distance[u] != float("""inf""" ) and distance[u] + w < distance[v]: _SCREAMING_SNAKE_CASE = distance[u] + w _SCREAMING_SNAKE_CASE = check_negative_cycle(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) if negative_cycle_exists: raise Exception("""Negative cycle found""" ) return distance if __name__ == "__main__": import doctest doctest.testmod() UpperCamelCase__ : int = int(input("Enter number of vertices: ").strip()) UpperCamelCase__ : int = int(input("Enter number of edges: ").strip()) UpperCamelCase__ : list[dict[str, int]] = [{} for _ in range(E)] for i in range(E): print("Edge ", i + 1) UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ : Dict = ( int(x) for x in input("Enter source, destination, weight: ").strip().split(" ") ) UpperCamelCase__ : Optional[Any] = {"src": src, "dst": dest, "weight": weight} UpperCamelCase__ : Optional[Any] = int(input("\nEnter shortest path source:").strip()) UpperCamelCase__ : Any = bellman_ford(graph, V, E, source) print_distance(shortest_distance, 0)
0
0
'''simple docstring''' import argparse from transformers import ( TapasConfig, TapasForMaskedLM, TapasForQuestionAnswering, TapasForSequenceClassification, TapasModel, TapasTokenizer, load_tf_weights_in_tapas, ) from transformers.utils import logging logging.set_verbosity_info() def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> Optional[Any]: """simple docstring""" _SCREAMING_SNAKE_CASE = TapasConfig.from_json_file(SCREAMING_SNAKE_CASE_ ) # set absolute/relative position embeddings parameter _SCREAMING_SNAKE_CASE = reset_position_index_per_cell # set remaining parameters of TapasConfig as well as the model based on the task if task == "SQA": _SCREAMING_SNAKE_CASE = TapasForQuestionAnswering(config=SCREAMING_SNAKE_CASE_ ) elif task == "WTQ": # run_task_main.py hparams _SCREAMING_SNAKE_CASE = 4 _SCREAMING_SNAKE_CASE = True # hparam_utils.py hparams _SCREAMING_SNAKE_CASE = 0.66_4694 _SCREAMING_SNAKE_CASE = 0.20_7951 _SCREAMING_SNAKE_CASE = 0.12_1194 _SCREAMING_SNAKE_CASE = True _SCREAMING_SNAKE_CASE = True _SCREAMING_SNAKE_CASE = False _SCREAMING_SNAKE_CASE = 0.035_2513 _SCREAMING_SNAKE_CASE = TapasForQuestionAnswering(config=SCREAMING_SNAKE_CASE_ ) elif task == "WIKISQL_SUPERVISED": # run_task_main.py hparams _SCREAMING_SNAKE_CASE = 4 _SCREAMING_SNAKE_CASE = False # hparam_utils.py hparams _SCREAMING_SNAKE_CASE = 36.4519 _SCREAMING_SNAKE_CASE = 0.90_3421 _SCREAMING_SNAKE_CASE = 222.088 _SCREAMING_SNAKE_CASE = True _SCREAMING_SNAKE_CASE = True _SCREAMING_SNAKE_CASE = True _SCREAMING_SNAKE_CASE = 0.76_3141 _SCREAMING_SNAKE_CASE = TapasForQuestionAnswering(config=SCREAMING_SNAKE_CASE_ ) elif task == "TABFACT": _SCREAMING_SNAKE_CASE = TapasForSequenceClassification(config=SCREAMING_SNAKE_CASE_ ) elif task == "MLM": _SCREAMING_SNAKE_CASE = TapasForMaskedLM(config=SCREAMING_SNAKE_CASE_ ) elif task == "INTERMEDIATE_PRETRAINING": _SCREAMING_SNAKE_CASE = TapasModel(config=SCREAMING_SNAKE_CASE_ ) else: raise ValueError(F"Task {task} not supported." ) print(F"Building PyTorch model from configuration: {config}" ) # Load weights from tf checkpoint load_tf_weights_in_tapas(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) # Save pytorch-model (weights and configuration) print(F"Save PyTorch model to {pytorch_dump_path}" ) model.save_pretrained(SCREAMING_SNAKE_CASE_ ) # Save tokenizer files print(F"Save tokenizer files to {pytorch_dump_path}" ) _SCREAMING_SNAKE_CASE = TapasTokenizer(vocab_file=tf_checkpoint_path[:-10] + """vocab.txt""" , model_max_length=5_12 ) tokenizer.save_pretrained(SCREAMING_SNAKE_CASE_ ) print("""Used relative position embeddings:""" , model.config.reset_position_index_per_cell ) if __name__ == "__main__": UpperCamelCase__ : List[str] = argparse.ArgumentParser() # Required parameters parser.add_argument( "--task", default="SQA", type=str, help="Model task for which to convert a checkpoint. Defaults to SQA." ) parser.add_argument( "--reset_position_index_per_cell", default=False, action="store_true", help="Whether to use relative position embeddings or not. Defaults to True.", ) parser.add_argument( "--tf_checkpoint_path", default=None, type=str, required=True, help="Path to the TensorFlow checkpoint path." ) parser.add_argument( "--tapas_config_file", default=None, type=str, required=True, help=( "The config json file corresponding to the pre-trained TAPAS model. \n" "This specifies the model architecture." ), ) parser.add_argument( "--pytorch_dump_path", default=None, type=str, required=True, help="Path to the output PyTorch model." ) UpperCamelCase__ : Optional[int] = parser.parse_args() convert_tf_checkpoint_to_pytorch( args.task, args.reset_position_index_per_cell, args.tf_checkpoint_path, args.tapas_config_file, args.pytorch_dump_path, )
707
'''simple docstring''' from __future__ import annotations import unittest from transformers import RoFormerConfig, is_tf_available from transformers.testing_utils import require_tf, slow from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import ( TFRoFormerForCausalLM, TFRoFormerForMaskedLM, TFRoFormerForMultipleChoice, TFRoFormerForQuestionAnswering, TFRoFormerForSequenceClassification, TFRoFormerForTokenClassification, TFRoFormerModel, ) from transformers.models.roformer.modeling_tf_roformer import ( TFRoFormerSelfAttention, TFRoFormerSinusoidalPositionalEmbedding, ) class _a : """simple docstring""" def __init__( self , A__ , A__=13 , A__=7 , A__=True , A__=True , A__=True , A__=True , A__=99 , A__=32 , A__=2 , A__=4 , A__=37 , A__="gelu" , A__=0.1 , A__=0.1 , A__=5_12 , A__=16 , A__=2 , A__=0.02 , A__=3 , A__=4 , A__=None , ) -> int: _SCREAMING_SNAKE_CASE = parent _SCREAMING_SNAKE_CASE = 13 _SCREAMING_SNAKE_CASE = 7 _SCREAMING_SNAKE_CASE = True _SCREAMING_SNAKE_CASE = True _SCREAMING_SNAKE_CASE = True _SCREAMING_SNAKE_CASE = True _SCREAMING_SNAKE_CASE = 99 _SCREAMING_SNAKE_CASE = 32 _SCREAMING_SNAKE_CASE = 2 _SCREAMING_SNAKE_CASE = 4 _SCREAMING_SNAKE_CASE = 37 _SCREAMING_SNAKE_CASE = """gelu""" _SCREAMING_SNAKE_CASE = 0.1 _SCREAMING_SNAKE_CASE = 0.1 _SCREAMING_SNAKE_CASE = 5_12 _SCREAMING_SNAKE_CASE = 16 _SCREAMING_SNAKE_CASE = 2 _SCREAMING_SNAKE_CASE = 0.02 _SCREAMING_SNAKE_CASE = 3 _SCREAMING_SNAKE_CASE = 4 _SCREAMING_SNAKE_CASE = None def UpperCamelCase ( self ) -> Tuple: _SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) _SCREAMING_SNAKE_CASE = None if self.use_input_mask: _SCREAMING_SNAKE_CASE = random_attention_mask([self.batch_size, self.seq_length] ) _SCREAMING_SNAKE_CASE = None if self.use_token_type_ids: _SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) _SCREAMING_SNAKE_CASE = None _SCREAMING_SNAKE_CASE = None _SCREAMING_SNAKE_CASE = None if self.use_labels: _SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size] , self.type_sequence_label_size ) _SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) _SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size] , self.num_choices ) _SCREAMING_SNAKE_CASE = RoFormerConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , return_dict=A__ , ) return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def UpperCamelCase ( self , A__ , A__ , A__ , A__ , A__ , A__ , A__ ) -> Union[str, Any]: _SCREAMING_SNAKE_CASE = TFRoFormerModel(config=A__ ) _SCREAMING_SNAKE_CASE = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids} _SCREAMING_SNAKE_CASE = [input_ids, input_mask] _SCREAMING_SNAKE_CASE = model(A__ ) _SCREAMING_SNAKE_CASE = model(A__ ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def UpperCamelCase ( self , A__ , A__ , A__ , A__ , A__ , A__ , A__ ) -> str: _SCREAMING_SNAKE_CASE = True _SCREAMING_SNAKE_CASE = TFRoFormerForCausalLM(config=A__ ) _SCREAMING_SNAKE_CASE = { """input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids, } _SCREAMING_SNAKE_CASE = model(A__ )["""logits"""] self.parent.assertListEqual( list(prediction_scores.numpy().shape ) , [self.batch_size, self.seq_length, self.vocab_size] ) def UpperCamelCase ( self , A__ , A__ , A__ , A__ , A__ , A__ , A__ ) -> Dict: _SCREAMING_SNAKE_CASE = TFRoFormerForMaskedLM(config=A__ ) _SCREAMING_SNAKE_CASE = { """input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids, } _SCREAMING_SNAKE_CASE = model(A__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def UpperCamelCase ( self , A__ , A__ , A__ , A__ , A__ , A__ , A__ ) -> List[str]: _SCREAMING_SNAKE_CASE = self.num_labels _SCREAMING_SNAKE_CASE = TFRoFormerForSequenceClassification(config=A__ ) _SCREAMING_SNAKE_CASE = { """input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids, } _SCREAMING_SNAKE_CASE = model(A__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def UpperCamelCase ( self , A__ , A__ , A__ , A__ , A__ , A__ , A__ ) -> Any: _SCREAMING_SNAKE_CASE = self.num_choices _SCREAMING_SNAKE_CASE = TFRoFormerForMultipleChoice(config=A__ ) _SCREAMING_SNAKE_CASE = tf.tile(tf.expand_dims(A__ , 1 ) , (1, self.num_choices, 1) ) _SCREAMING_SNAKE_CASE = tf.tile(tf.expand_dims(A__ , 1 ) , (1, self.num_choices, 1) ) _SCREAMING_SNAKE_CASE = tf.tile(tf.expand_dims(A__ , 1 ) , (1, self.num_choices, 1) ) _SCREAMING_SNAKE_CASE = { """input_ids""": multiple_choice_inputs_ids, """attention_mask""": multiple_choice_input_mask, """token_type_ids""": multiple_choice_token_type_ids, } _SCREAMING_SNAKE_CASE = model(A__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) ) def UpperCamelCase ( self , A__ , A__ , A__ , A__ , A__ , A__ , A__ ) -> List[str]: _SCREAMING_SNAKE_CASE = self.num_labels _SCREAMING_SNAKE_CASE = TFRoFormerForTokenClassification(config=A__ ) _SCREAMING_SNAKE_CASE = { """input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids, } _SCREAMING_SNAKE_CASE = model(A__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def UpperCamelCase ( self , A__ , A__ , A__ , A__ , A__ , A__ , A__ ) -> Tuple: _SCREAMING_SNAKE_CASE = TFRoFormerForQuestionAnswering(config=A__ ) _SCREAMING_SNAKE_CASE = { """input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids, } _SCREAMING_SNAKE_CASE = model(A__ ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def UpperCamelCase ( self ) -> List[str]: _SCREAMING_SNAKE_CASE = self.prepare_config_and_inputs() ( ( _SCREAMING_SNAKE_CASE ) , ( _SCREAMING_SNAKE_CASE ) , ( _SCREAMING_SNAKE_CASE ) , ( _SCREAMING_SNAKE_CASE ) , ( _SCREAMING_SNAKE_CASE ) , ( _SCREAMING_SNAKE_CASE ) , ( _SCREAMING_SNAKE_CASE ) , ) = config_and_inputs _SCREAMING_SNAKE_CASE = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": input_mask} return config, inputs_dict @require_tf class _a (_lowerCamelCase , _lowerCamelCase , unittest.TestCase): """simple docstring""" SCREAMING_SNAKE_CASE = ( ( TFRoFormerModel, TFRoFormerForCausalLM, TFRoFormerForMaskedLM, TFRoFormerForQuestionAnswering, TFRoFormerForSequenceClassification, TFRoFormerForTokenClassification, TFRoFormerForMultipleChoice, ) if is_tf_available() else () ) SCREAMING_SNAKE_CASE = ( { 'feature-extraction': TFRoFormerModel, 'fill-mask': TFRoFormerForMaskedLM, 'question-answering': TFRoFormerForQuestionAnswering, 'text-classification': TFRoFormerForSequenceClassification, 'text-generation': TFRoFormerForCausalLM, 'token-classification': TFRoFormerForTokenClassification, 'zero-shot': TFRoFormerForSequenceClassification, } if is_tf_available() else {} ) SCREAMING_SNAKE_CASE = False SCREAMING_SNAKE_CASE = False def UpperCamelCase ( self , A__ , A__ , A__ , A__ , A__ ) -> str: if pipeline_test_casse_name == "TextGenerationPipelineTests": return True return False def UpperCamelCase ( self ) -> Union[str, Any]: _SCREAMING_SNAKE_CASE = TFRoFormerModelTester(self ) _SCREAMING_SNAKE_CASE = ConfigTester(self , config_class=A__ , hidden_size=37 ) def UpperCamelCase ( self ) -> Optional[Any]: self.config_tester.run_common_tests() def UpperCamelCase ( self ) -> List[Any]: _SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*A__ ) def UpperCamelCase ( self ) -> str: _SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_lm(*A__ ) def UpperCamelCase ( self ) -> int: _SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_lm_head(*A__ ) def UpperCamelCase ( self ) -> Dict: _SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_multiple_choice(*A__ ) def UpperCamelCase ( self ) -> Union[str, Any]: _SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_question_answering(*A__ ) def UpperCamelCase ( self ) -> Tuple: _SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_sequence_classification(*A__ ) def UpperCamelCase ( self ) -> List[Any]: _SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_token_classification(*A__ ) @slow def UpperCamelCase ( self ) -> str: _SCREAMING_SNAKE_CASE = TFRoFormerModel.from_pretrained("""junnyu/roformer_chinese_base""" ) self.assertIsNotNone(A__ ) @require_tf class _a (unittest.TestCase): """simple docstring""" @slow def UpperCamelCase ( self ) -> Optional[Any]: _SCREAMING_SNAKE_CASE = TFRoFormerForMaskedLM.from_pretrained("""junnyu/roformer_chinese_base""" ) _SCREAMING_SNAKE_CASE = tf.constant([[0, 1, 2, 3, 4, 5]] ) _SCREAMING_SNAKE_CASE = model(A__ )[0] # TODO Replace vocab size _SCREAMING_SNAKE_CASE = 5_00_00 _SCREAMING_SNAKE_CASE = [1, 6, vocab_size] self.assertEqual(output.shape , A__ ) print(output[:, :3, :3] ) # TODO Replace values below with what was printed above. _SCREAMING_SNAKE_CASE = tf.constant( [ [ [-0.1205_3341, -1.026_4901, 0.2922_1946], [-1.513_3783, 0.19_7433, 0.1519_0607], [-5.013_5403, -3.90_0256, -0.8403_8764], ] ] ) tf.debugging.assert_near(output[:, :3, :3] , A__ , atol=1E-4 ) @require_tf class _a (unittest.TestCase): """simple docstring""" SCREAMING_SNAKE_CASE = 1E-4 def UpperCamelCase ( self ) -> List[Any]: _SCREAMING_SNAKE_CASE = tf.constant([[4, 10]] ) _SCREAMING_SNAKE_CASE = TFRoFormerSinusoidalPositionalEmbedding(num_positions=6 , embedding_dim=6 ) _SCREAMING_SNAKE_CASE = emba(input_ids.shape ) _SCREAMING_SNAKE_CASE = tf.constant( [[0.0000, 0.0000, 0.0000, 1.0000, 1.0000, 1.0000], [0.8415, 0.0464, 0.0022, 0.5403, 0.9989, 1.0000]] ) tf.debugging.assert_near(A__ , A__ , atol=self.tolerance ) def UpperCamelCase ( self ) -> List[Any]: _SCREAMING_SNAKE_CASE = tf.constant( [ [0.0000, 0.0000, 0.0000, 0.0000, 0.0000], [0.8415, 0.8219, 0.8020, 0.7819, 0.7617], [0.9093, 0.9364, 0.9581, 0.9749, 0.9870], ] ) _SCREAMING_SNAKE_CASE = TFRoFormerSinusoidalPositionalEmbedding(num_positions=5_12 , embedding_dim=5_12 ) emba([2, 16, 5_12] ) _SCREAMING_SNAKE_CASE = emba.weight[:3, :5] tf.debugging.assert_near(A__ , A__ , atol=self.tolerance ) @require_tf class _a (unittest.TestCase): """simple docstring""" SCREAMING_SNAKE_CASE = 1E-4 def UpperCamelCase ( self ) -> int: # 2,12,16,64 _SCREAMING_SNAKE_CASE = tf.reshape(tf.range(2 * 12 * 16 * 64 , dtype=tf.floataa ) , shape=(2, 12, 16, 64) ) / 1_00 _SCREAMING_SNAKE_CASE = -tf.reshape(tf.range(2 * 12 * 16 * 64 , dtype=tf.floataa ) , shape=(2, 12, 16, 64) ) / 1_00 _SCREAMING_SNAKE_CASE = TFRoFormerSinusoidalPositionalEmbedding(num_positions=32 , embedding_dim=64 ) _SCREAMING_SNAKE_CASE = embed_positions([2, 16, 7_68] )[None, None, :, :] _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = TFRoFormerSelfAttention.apply_rotary_position_embeddings( A__ , A__ , A__ ) _SCREAMING_SNAKE_CASE = tf.constant( [ [0.0000, 0.0100, 0.0200, 0.0300, 0.0400, 0.0500, 0.0600, 0.0700], [-0.2012, 0.8897, 0.0263, 0.9401, 0.2074, 0.9463, 0.3481, 0.9343], [-1.7057, 0.6271, -1.2145, 1.3897, -0.6303, 1.7647, -0.1173, 1.8985], [-2.1731, -1.6397, -2.7358, 0.2854, -2.1840, 1.7183, -1.3018, 2.4871], [0.2717, -3.6173, -2.9206, -2.1988, -3.6638, 0.3858, -2.9155, 2.2980], [3.9859, -2.1580, -0.7984, -4.4904, -4.1181, -2.0252, -4.4782, 1.1253], ] ) _SCREAMING_SNAKE_CASE = tf.constant( [ [0.0000, -0.0100, -0.0200, -0.0300, -0.0400, -0.0500, -0.0600, -0.0700], [0.2012, -0.8897, -0.0263, -0.9401, -0.2074, -0.9463, -0.3481, -0.9343], [1.7057, -0.6271, 1.2145, -1.3897, 0.6303, -1.7647, 0.1173, -1.8985], [2.1731, 1.6397, 2.7358, -0.2854, 2.1840, -1.7183, 1.3018, -2.4871], [-0.2717, 3.6173, 2.9206, 2.1988, 3.6638, -0.3858, 2.9155, -2.2980], [-3.9859, 2.1580, 0.7984, 4.4904, 4.1181, 2.0252, 4.4782, -1.1253], ] ) tf.debugging.assert_near(query_layer[0, 0, :6, :8] , A__ , atol=self.tolerance ) tf.debugging.assert_near(key_layer[0, 0, :6, :8] , A__ , atol=self.tolerance )
0
0
'''simple docstring''' from typing import Optional, Tuple, Union import torch from diffusers import DiffusionPipeline, ImagePipelineOutput class _a (_lowerCamelCase): """simple docstring""" def __init__( self , A__ , A__ ) -> Union[str, Any]: super().__init__() self.register_modules(unet=A__ , scheduler=A__ ) @torch.no_grad() def __call__( self , A__ = 1 , A__ = None , A__ = 50 , A__ = "pil" , A__ = True , **A__ , ) -> Union[ImagePipelineOutput, Tuple]: _SCREAMING_SNAKE_CASE = torch.randn( (batch_size, self.unet.config.in_channels, self.unet.config.sample_size, self.unet.config.sample_size) , generator=A__ , ) _SCREAMING_SNAKE_CASE = image.to(self.device ) # set step values self.scheduler.set_timesteps(A__ ) for t in self.progress_bar(self.scheduler.timesteps ): # 1. predict noise model_output _SCREAMING_SNAKE_CASE = self.unet(A__ , A__ ).sample # 2. predict previous mean of image x_t-1 and add variance depending on eta # eta corresponds to η in paper and should be between [0, 1] # do x_t -> x_t-1 _SCREAMING_SNAKE_CASE = self.scheduler.step(A__ , A__ , A__ ).prev_sample _SCREAMING_SNAKE_CASE = (image / 2 + 0.5).clamp(0 , 1 ) _SCREAMING_SNAKE_CASE = image.cpu().permute(0 , 2 , 3 , 1 ).numpy() if output_type == "pil": _SCREAMING_SNAKE_CASE = self.numpy_to_pil(A__ ) if not return_dict: return (image,), "This is a local test" return ImagePipelineOutput(images=A__ ), "This is a local test"
708
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available UpperCamelCase__ : int = {"tokenization_herbert": ["HerbertTokenizer"]} try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase__ : Tuple = ["HerbertTokenizerFast"] if TYPE_CHECKING: from .tokenization_herbert import HerbertTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_herbert_fast import HerbertTokenizerFast else: import sys UpperCamelCase__ : Tuple = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
0
0
'''simple docstring''' import logging import os import sys from dataclasses import dataclass, field from typing import Optional from seqaseq_trainer import SeqaSeqTrainer from seqaseq_training_args import SeqaSeqTrainingArguments import transformers from transformers import ( AutoConfig, AutoModelForSeqaSeqLM, AutoTokenizer, HfArgumentParser, MBartTokenizer, MBartTokenizerFast, set_seed, ) from transformers.trainer_utils import EvaluationStrategy, is_main_process from transformers.training_args import ParallelMode from utils import ( SeqaSeqDataCollator, SeqaSeqDataset, assert_all_frozen, build_compute_metrics_fn, check_output_dir, freeze_embeds, freeze_params, lmap, save_json, use_task_specific_params, write_txt_file, ) UpperCamelCase__ : str = logging.getLogger(__name__) @dataclass class _a : """simple docstring""" SCREAMING_SNAKE_CASE = field( metadata={'help': 'Path to pretrained model or model identifier from huggingface.co/models'}) SCREAMING_SNAKE_CASE = field( default=_lowerCamelCase , metadata={'help': 'Pretrained config name or path if not the same as model_name'}) SCREAMING_SNAKE_CASE = field( default=_lowerCamelCase , metadata={'help': 'Pretrained tokenizer name or path if not the same as model_name'}) SCREAMING_SNAKE_CASE = field( default=_lowerCamelCase , metadata={'help': 'Where do you want to store the pretrained models downloaded from huggingface.co'} , ) SCREAMING_SNAKE_CASE = field(default=_lowerCamelCase , metadata={'help': 'Whether tp freeze the encoder.'}) SCREAMING_SNAKE_CASE = field(default=_lowerCamelCase , metadata={'help': 'Whether to freeze the embeddings.'}) @dataclass class _a : """simple docstring""" SCREAMING_SNAKE_CASE = field( metadata={'help': 'The input data dir. Should contain the .tsv files (or other data files) for the task.'}) SCREAMING_SNAKE_CASE = field( default='summarization' , metadata={'help': 'Task name, summarization (or summarization_{dataset} for pegasus) or translation'} , ) SCREAMING_SNAKE_CASE = field( default=10_24 , metadata={ 'help': ( 'The maximum total input sequence length after tokenization. Sequences longer ' 'than this will be truncated, sequences shorter will be padded.' ) } , ) SCREAMING_SNAKE_CASE = field( default=1_28 , metadata={ 'help': ( 'The maximum total sequence length for target text after tokenization. Sequences longer ' 'than this will be truncated, sequences shorter will be padded.' ) } , ) SCREAMING_SNAKE_CASE = field( default=1_42 , metadata={ 'help': ( 'The maximum total sequence length for validation target text after tokenization. Sequences longer ' 'than this will be truncated, sequences shorter will be padded. ' 'This argument is also used to override the ``max_length`` param of ``model.generate``, which is used ' 'during ``evaluate`` and ``predict``.' ) } , ) SCREAMING_SNAKE_CASE = field( default=1_42 , metadata={ 'help': ( 'The maximum total sequence length for test target text after tokenization. Sequences longer ' 'than this will be truncated, sequences shorter will be padded.' ) } , ) SCREAMING_SNAKE_CASE = field(default=-1 , metadata={'help': '# training examples. -1 means use all.'}) SCREAMING_SNAKE_CASE = field(default=-1 , metadata={'help': '# validation examples. -1 means use all.'}) SCREAMING_SNAKE_CASE = field(default=-1 , metadata={'help': '# test examples. -1 means use all.'}) SCREAMING_SNAKE_CASE = field(default=_lowerCamelCase , metadata={'help': 'Source language id for translation.'}) SCREAMING_SNAKE_CASE = field(default=_lowerCamelCase , metadata={'help': 'Target language id for translation.'}) SCREAMING_SNAKE_CASE = field(default=_lowerCamelCase , metadata={'help': '# num_beams to use for evaluation.'}) SCREAMING_SNAKE_CASE = field( default=_lowerCamelCase , metadata={'help': 'If only pad tokens should be ignored. This assumes that `config.pad_token_id` is defined.'} , ) def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> Union[str, Any]: """simple docstring""" logger.info(F"***** {split} metrics *****" ) for key in sorted(metrics.keys() ): logger.info(F" {key} = {metrics[key]}" ) save_json(SCREAMING_SNAKE_CASE_ , os.path.join(SCREAMING_SNAKE_CASE_ , F"{split}_results.json" ) ) def lowerCAmelCase_ ( ) -> Optional[int]: """simple docstring""" _SCREAMING_SNAKE_CASE = HfArgumentParser((ModelArguments, DataTrainingArguments, SeqaSeqTrainingArguments) ) if len(sys.argv ) == 2 and sys.argv[1].endswith(""".json""" ): # If we pass only one argument to the script and it's the path to a json file, # let's parse it to get our arguments. _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) ) else: _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = parser.parse_args_into_dataclasses() check_output_dir(SCREAMING_SNAKE_CASE_ ) # Setup logging logging.basicConfig( format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""" , datefmt="""%m/%d/%Y %H:%M:%S""" , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , ) logger.warning( """Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s""" , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.parallel_mode == ParallelMode.DISTRIBUTED ) , training_args.fpaa , ) transformers.utils.logging.enable_default_handler() transformers.utils.logging.enable_explicit_format() # Set the verbosity to info of the Transformers logger (on main process only): if is_main_process(training_args.local_rank ): transformers.utils.logging.set_verbosity_info() logger.info("""Training/evaluation parameters %s""" , SCREAMING_SNAKE_CASE_ ) # Set seed set_seed(training_args.seed ) # Load pretrained model and tokenizer # # Distributed training: # The .from_pretrained methods guarantee that only one local process can concurrently # download model & vocab. _SCREAMING_SNAKE_CASE = AutoConfig.from_pretrained( model_args.config_name if model_args.config_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , ) _SCREAMING_SNAKE_CASE = ("""encoder_layerdrop""", """decoder_layerdrop""", """dropout""", """attention_dropout""") for p in extra_model_params: if getattr(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ): assert hasattr(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ), F"({config.__class__.__name__}) doesn't have a `{p}` attribute" setattr(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , getattr(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) ) _SCREAMING_SNAKE_CASE = AutoTokenizer.from_pretrained( model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , ) _SCREAMING_SNAKE_CASE = AutoModelForSeqaSeqLM.from_pretrained( model_args.model_name_or_path , from_tf=""".ckpt""" in model_args.model_name_or_path , config=SCREAMING_SNAKE_CASE_ , cache_dir=model_args.cache_dir , ) # use task specific params use_task_specific_params(SCREAMING_SNAKE_CASE_ , data_args.task ) # set num_beams for evaluation if data_args.eval_beams is None: _SCREAMING_SNAKE_CASE = model.config.num_beams # set decoder_start_token_id for MBart if model.config.decoder_start_token_id is None and isinstance(SCREAMING_SNAKE_CASE_ , (MBartTokenizer, MBartTokenizerFast) ): assert ( data_args.tgt_lang is not None and data_args.src_lang is not None ), "mBart requires --tgt_lang and --src_lang" if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ): _SCREAMING_SNAKE_CASE = tokenizer.lang_code_to_id[data_args.tgt_lang] else: _SCREAMING_SNAKE_CASE = tokenizer.convert_tokens_to_ids(data_args.tgt_lang ) if model_args.freeze_embeds: freeze_embeds(SCREAMING_SNAKE_CASE_ ) if model_args.freeze_encoder: freeze_params(model.get_encoder() ) assert_all_frozen(model.get_encoder() ) _SCREAMING_SNAKE_CASE = SeqaSeqDataset # Get datasets _SCREAMING_SNAKE_CASE = ( dataset_class( SCREAMING_SNAKE_CASE_ , type_path="""train""" , data_dir=data_args.data_dir , n_obs=data_args.n_train , max_target_length=data_args.max_target_length , max_source_length=data_args.max_source_length , prefix=model.config.prefix or """""" , ) if training_args.do_train else None ) _SCREAMING_SNAKE_CASE = ( dataset_class( SCREAMING_SNAKE_CASE_ , type_path="""val""" , data_dir=data_args.data_dir , n_obs=data_args.n_val , max_target_length=data_args.val_max_target_length , max_source_length=data_args.max_source_length , prefix=model.config.prefix or """""" , ) if training_args.do_eval or training_args.evaluation_strategy != EvaluationStrategy.NO else None ) _SCREAMING_SNAKE_CASE = ( dataset_class( SCREAMING_SNAKE_CASE_ , type_path="""test""" , data_dir=data_args.data_dir , n_obs=data_args.n_test , max_target_length=data_args.test_max_target_length , max_source_length=data_args.max_source_length , prefix=model.config.prefix or """""" , ) if training_args.do_predict else None ) # Initialize our Trainer _SCREAMING_SNAKE_CASE = ( build_compute_metrics_fn(data_args.task , SCREAMING_SNAKE_CASE_ ) if training_args.predict_with_generate else None ) _SCREAMING_SNAKE_CASE = SeqaSeqTrainer( model=SCREAMING_SNAKE_CASE_ , args=SCREAMING_SNAKE_CASE_ , data_args=SCREAMING_SNAKE_CASE_ , train_dataset=SCREAMING_SNAKE_CASE_ , eval_dataset=SCREAMING_SNAKE_CASE_ , data_collator=SeqaSeqDataCollator( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , model.config.decoder_start_token_id , training_args.tpu_num_cores ) , compute_metrics=SCREAMING_SNAKE_CASE_ , tokenizer=SCREAMING_SNAKE_CASE_ , ) _SCREAMING_SNAKE_CASE = {} # Training if training_args.do_train: logger.info("""*** Train ***""" ) _SCREAMING_SNAKE_CASE = trainer.train( model_path=model_args.model_name_or_path if os.path.isdir(model_args.model_name_or_path ) else None ) _SCREAMING_SNAKE_CASE = train_result.metrics _SCREAMING_SNAKE_CASE = data_args.n_train trainer.save_model() # this also saves the tokenizer if trainer.is_world_process_zero(): handle_metrics("""train""" , SCREAMING_SNAKE_CASE_ , training_args.output_dir ) all_metrics.update(SCREAMING_SNAKE_CASE_ ) # Need to save the state, since Trainer.save_model saves only the tokenizer with the model trainer.state.save_to_json(os.path.join(training_args.output_dir , """trainer_state.json""" ) ) # For convenience, we also re-save the tokenizer to the same directory, # so that you can share your model easily on huggingface.co/models =) tokenizer.save_pretrained(training_args.output_dir ) # Evaluation if training_args.do_eval: logger.info("""*** Evaluate ***""" ) _SCREAMING_SNAKE_CASE = trainer.evaluate(metric_key_prefix="""val""" ) _SCREAMING_SNAKE_CASE = data_args.n_val _SCREAMING_SNAKE_CASE = round(metrics["""val_loss"""] , 4 ) if trainer.is_world_process_zero(): handle_metrics("""val""" , SCREAMING_SNAKE_CASE_ , training_args.output_dir ) all_metrics.update(SCREAMING_SNAKE_CASE_ ) if training_args.do_predict: logger.info("""*** Predict ***""" ) _SCREAMING_SNAKE_CASE = trainer.predict(test_dataset=SCREAMING_SNAKE_CASE_ , metric_key_prefix="""test""" ) _SCREAMING_SNAKE_CASE = test_output.metrics _SCREAMING_SNAKE_CASE = data_args.n_test if trainer.is_world_process_zero(): _SCREAMING_SNAKE_CASE = round(metrics["""test_loss"""] , 4 ) handle_metrics("""test""" , SCREAMING_SNAKE_CASE_ , training_args.output_dir ) all_metrics.update(SCREAMING_SNAKE_CASE_ ) if training_args.predict_with_generate: _SCREAMING_SNAKE_CASE = tokenizer.batch_decode( test_output.predictions , skip_special_tokens=SCREAMING_SNAKE_CASE_ , clean_up_tokenization_spaces=SCREAMING_SNAKE_CASE_ ) _SCREAMING_SNAKE_CASE = lmap(str.strip , SCREAMING_SNAKE_CASE_ ) write_txt_file(SCREAMING_SNAKE_CASE_ , os.path.join(training_args.output_dir , """test_generations.txt""" ) ) if trainer.is_world_process_zero(): save_json(SCREAMING_SNAKE_CASE_ , os.path.join(training_args.output_dir , """all_results.json""" ) ) return all_metrics def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE_ ) -> List[str]: """simple docstring""" main() if __name__ == "__main__": main()
709
'''simple docstring''' import argparse import gdown import numpy as np import torch from huggingface_hub import hf_hub_download from transformers import ( CLIPTokenizer, CLIPTokenizerFast, VideoMAEImageProcessor, XCLIPConfig, XCLIPModel, XCLIPProcessor, XCLIPTextConfig, XCLIPVisionConfig, ) def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> Dict: """simple docstring""" _SCREAMING_SNAKE_CASE = XCLIPTextConfig() # derive patch size from model name _SCREAMING_SNAKE_CASE = model_name.find("""patch""" ) _SCREAMING_SNAKE_CASE = int(model_name[start_idx + len("""patch""" ) : start_idx + len("""patch""" ) + 2] ) _SCREAMING_SNAKE_CASE = XCLIPVisionConfig(patch_size=SCREAMING_SNAKE_CASE_ , num_frames=SCREAMING_SNAKE_CASE_ ) if "large" in model_name: _SCREAMING_SNAKE_CASE = 7_68 _SCREAMING_SNAKE_CASE = 30_72 _SCREAMING_SNAKE_CASE = 12 _SCREAMING_SNAKE_CASE = 10_24 _SCREAMING_SNAKE_CASE = 40_96 _SCREAMING_SNAKE_CASE = 16 _SCREAMING_SNAKE_CASE = 24 _SCREAMING_SNAKE_CASE = 7_68 _SCREAMING_SNAKE_CASE = 30_72 if model_name == "xclip-large-patch14-16-frames": _SCREAMING_SNAKE_CASE = 3_36 _SCREAMING_SNAKE_CASE = XCLIPConfig.from_text_vision_configs(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) if "large" in model_name: _SCREAMING_SNAKE_CASE = 7_68 return config def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE_ ) -> Dict: """simple docstring""" # text encoder if name == "token_embedding.weight": _SCREAMING_SNAKE_CASE = name.replace("""token_embedding.weight""" , """text_model.embeddings.token_embedding.weight""" ) if name == "positional_embedding": _SCREAMING_SNAKE_CASE = name.replace("""positional_embedding""" , """text_model.embeddings.position_embedding.weight""" ) if "ln_1" in name: _SCREAMING_SNAKE_CASE = name.replace("""ln_1""" , """layer_norm1""" ) if "ln_2" in name: _SCREAMING_SNAKE_CASE = name.replace("""ln_2""" , """layer_norm2""" ) if "c_fc" in name: _SCREAMING_SNAKE_CASE = name.replace("""c_fc""" , """fc1""" ) if "c_proj" in name: _SCREAMING_SNAKE_CASE = name.replace("""c_proj""" , """fc2""" ) if name.startswith("""transformer.resblocks""" ): _SCREAMING_SNAKE_CASE = name.replace("""transformer.resblocks""" , """text_model.encoder.layers""" ) if "attn.out_proj" in name and "message" not in name: _SCREAMING_SNAKE_CASE = name.replace("""attn.out_proj""" , """self_attn.out_proj""" ) if "ln_final" in name: _SCREAMING_SNAKE_CASE = name.replace("""ln_final""" , """text_model.final_layer_norm""" ) # visual encoder if name == "visual.class_embedding": _SCREAMING_SNAKE_CASE = name.replace("""visual.class_embedding""" , """vision_model.embeddings.class_embedding""" ) if name == "visual.positional_embedding": _SCREAMING_SNAKE_CASE = name.replace("""visual.positional_embedding""" , """vision_model.embeddings.position_embedding.weight""" ) if name.startswith("""visual.transformer.resblocks""" ): _SCREAMING_SNAKE_CASE = name.replace("""visual.transformer.resblocks""" , """vision_model.encoder.layers""" ) if "visual.conv1" in name: _SCREAMING_SNAKE_CASE = name.replace("""visual.conv1""" , """vision_model.embeddings.patch_embedding""" ) if "visual.ln_pre" in name: _SCREAMING_SNAKE_CASE = name.replace("""visual.ln_pre""" , """vision_model.pre_layernorm""" ) if "visual.ln_post" in name: _SCREAMING_SNAKE_CASE = name.replace("""visual.ln_post""" , """vision_model.post_layernorm""" ) if "visual.proj" in name: _SCREAMING_SNAKE_CASE = name.replace("""visual.proj""" , """visual_projection.weight""" ) if "text_projection" in name: _SCREAMING_SNAKE_CASE = name.replace("""text_projection""" , """text_projection.weight""" ) # things on top if "prompts_visual_proj" in name: _SCREAMING_SNAKE_CASE = name.replace("""prompts_visual_proj""" , """prompts_visual_projection""" ) if "prompts_visual_ln" in name: _SCREAMING_SNAKE_CASE = name.replace("""prompts_visual_ln""" , """prompts_visual_layernorm""" ) # mit if name == "mit.positional_embedding": _SCREAMING_SNAKE_CASE = name.replace("""positional""" , """position""" ) if name.startswith("""mit.resblocks""" ): _SCREAMING_SNAKE_CASE = name.replace("""mit.resblocks""" , """mit.encoder.layers""" ) # prompts generator if name.startswith("""prompts_generator.norm""" ): _SCREAMING_SNAKE_CASE = name.replace("""prompts_generator.norm""" , """prompts_generator.layernorm""" ) return name def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> Optional[int]: """simple docstring""" for key in orig_state_dict.copy().keys(): _SCREAMING_SNAKE_CASE = orig_state_dict.pop(SCREAMING_SNAKE_CASE_ ) if "attn.in_proj" in key: _SCREAMING_SNAKE_CASE = key.split(""".""" ) if key.startswith("""visual""" ): _SCREAMING_SNAKE_CASE = key_split[3] _SCREAMING_SNAKE_CASE = config.vision_config.hidden_size if "message_attn" in key: if "weight" in key: _SCREAMING_SNAKE_CASE = val[ :dim, : ] _SCREAMING_SNAKE_CASE = val[ dim : dim * 2, : ] _SCREAMING_SNAKE_CASE = val[ -dim:, : ] else: _SCREAMING_SNAKE_CASE = val[ :dim ] _SCREAMING_SNAKE_CASE = val[ dim : dim * 2 ] _SCREAMING_SNAKE_CASE = val[ -dim: ] else: if "weight" in key: _SCREAMING_SNAKE_CASE = val[ :dim, : ] _SCREAMING_SNAKE_CASE = val[ dim : dim * 2, : ] _SCREAMING_SNAKE_CASE = val[ -dim:, : ] else: _SCREAMING_SNAKE_CASE = val[:dim] _SCREAMING_SNAKE_CASE = val[ dim : dim * 2 ] _SCREAMING_SNAKE_CASE = val[-dim:] elif key.startswith("""mit""" ): _SCREAMING_SNAKE_CASE = key_split[2] _SCREAMING_SNAKE_CASE = config.vision_config.mit_hidden_size if "weight" in key: _SCREAMING_SNAKE_CASE = val[:dim, :] _SCREAMING_SNAKE_CASE = val[dim : dim * 2, :] _SCREAMING_SNAKE_CASE = val[-dim:, :] else: _SCREAMING_SNAKE_CASE = val[:dim] _SCREAMING_SNAKE_CASE = val[dim : dim * 2] _SCREAMING_SNAKE_CASE = val[-dim:] else: _SCREAMING_SNAKE_CASE = key_split[2] _SCREAMING_SNAKE_CASE = config.text_config.hidden_size if "weight" in key: _SCREAMING_SNAKE_CASE = val[:dim, :] _SCREAMING_SNAKE_CASE = val[ dim : dim * 2, : ] _SCREAMING_SNAKE_CASE = val[-dim:, :] else: _SCREAMING_SNAKE_CASE = val[:dim] _SCREAMING_SNAKE_CASE = val[ dim : dim * 2 ] _SCREAMING_SNAKE_CASE = val[-dim:] else: _SCREAMING_SNAKE_CASE = rename_key(SCREAMING_SNAKE_CASE_ ) if new_key_name in ["visual_projection.weight", "text_projection.weight"]: _SCREAMING_SNAKE_CASE = val.T _SCREAMING_SNAKE_CASE = val return orig_state_dict def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE_ ) -> Optional[Any]: """simple docstring""" if num_frames == 8: _SCREAMING_SNAKE_CASE = """eating_spaghetti_8_frames.npy""" elif num_frames == 16: _SCREAMING_SNAKE_CASE = """eating_spaghetti.npy""" elif num_frames == 32: _SCREAMING_SNAKE_CASE = """eating_spaghetti_32_frames.npy""" _SCREAMING_SNAKE_CASE = hf_hub_download( repo_id="""hf-internal-testing/spaghetti-video""" , filename=SCREAMING_SNAKE_CASE_ , repo_type="""dataset""" , ) _SCREAMING_SNAKE_CASE = np.load(SCREAMING_SNAKE_CASE_ ) return list(SCREAMING_SNAKE_CASE_ ) def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=False ) -> Optional[int]: """simple docstring""" _SCREAMING_SNAKE_CASE = { # fully supervised kinetics-400 checkpoints """xclip-base-patch32""": """https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_32_8.pth""", """xclip-base-patch32-16-frames""": ( """https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_32_16.pth""" ), """xclip-base-patch16""": """https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_16_8.pth""", """xclip-base-patch16-16-frames""": ( """https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_16_16.pth""" ), """xclip-large-patch14""": """https://drive.google.com/u/0/uc?id=1NUOImq0o5DlQTST17iIP3vG7DgmHQuCx&amp;export=download&amp;confirm=t&amp;uuid=b26caedc-88e2-473e-830a-9d158b653cdb""", """xclip-large-patch14-16-frames""": """https://drive.google.com/u/0/uc?id=1FOYgnJc097OJ4lGwtRCCydQyVPJEOH7d&amp;export=download&amp;confirm=t&amp;uuid=538fa810-e671-4050-b385-9a623f89804f""", # fully supervised kinetics-600 checkpoints """xclip-base-patch16-kinetics-600""": ( """https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k600_16_8.pth""" ), """xclip-base-patch16-kinetics-600-16-frames""": ( """https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k600_16_16.pth""" ), """xclip-large-patch14-kinetics-600""": """https://drive.google.com/u/0/uc?id=1FV8C1INuM91sLAN4ImjzePLIlpMSihwV&amp;export=download&amp;confirm=t&amp;uuid=141d4977-4a65-44ae-864f-4b0c19f838be""", # few shot """xclip-base-patch16-hmdb-2-shot""": ( """https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_2.pth""" ), """xclip-base-patch16-hmdb-4-shot""": ( """https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_4.pth""" ), """xclip-base-patch16-hmdb-8-shot""": ( """https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_8.pth""" ), """xclip-base-patch16-hmdb-16-shot""": ( """https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_16.pth""" ), """xclip-base-patch16-ucf-2-shot""": ( """https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_2.pth""" ), """xclip-base-patch16-ucf-4-shot""": ( """https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_4.pth""" ), """xclip-base-patch16-ucf-8-shot""": ( """https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_8.pth""" ), """xclip-base-patch16-ucf-16-shot""": ( """https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_16.pth""" ), # zero shot """xclip-base-patch16-zero-shot""": """https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/zero.pth""", } _SCREAMING_SNAKE_CASE = model_to_url[model_name] _SCREAMING_SNAKE_CASE = 8 if "16-frames" in model_name: _SCREAMING_SNAKE_CASE = 16 elif "shot" in model_name: _SCREAMING_SNAKE_CASE = 32 _SCREAMING_SNAKE_CASE = get_xclip_config(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) _SCREAMING_SNAKE_CASE = XCLIPModel(SCREAMING_SNAKE_CASE_ ) model.eval() if "drive" in checkpoint_url: _SCREAMING_SNAKE_CASE = """pytorch_model.bin""" gdown.cached_download(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , quiet=SCREAMING_SNAKE_CASE_ ) _SCREAMING_SNAKE_CASE = torch.load(SCREAMING_SNAKE_CASE_ , map_location="""cpu""" )["""model"""] else: _SCREAMING_SNAKE_CASE = torch.hub.load_state_dict_from_url(SCREAMING_SNAKE_CASE_ )["""model"""] _SCREAMING_SNAKE_CASE = convert_state_dict(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) _SCREAMING_SNAKE_CASE = XCLIPModel(SCREAMING_SNAKE_CASE_ ) _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = model.load_state_dict(SCREAMING_SNAKE_CASE_ , strict=SCREAMING_SNAKE_CASE_ ) assert missing_keys == ["text_model.embeddings.position_ids", "vision_model.embeddings.position_ids"] model.eval() _SCREAMING_SNAKE_CASE = 3_36 if model_name == """xclip-large-patch14-16-frames""" else 2_24 _SCREAMING_SNAKE_CASE = VideoMAEImageProcessor(size=SCREAMING_SNAKE_CASE_ ) _SCREAMING_SNAKE_CASE = CLIPTokenizer.from_pretrained("""openai/clip-vit-base-patch32""" ) _SCREAMING_SNAKE_CASE = CLIPTokenizerFast.from_pretrained("""openai/clip-vit-base-patch32""" ) _SCREAMING_SNAKE_CASE = XCLIPProcessor(image_processor=SCREAMING_SNAKE_CASE_ , tokenizer=SCREAMING_SNAKE_CASE_ ) _SCREAMING_SNAKE_CASE = prepare_video(SCREAMING_SNAKE_CASE_ ) _SCREAMING_SNAKE_CASE = processor( text=["""playing sports""", """eating spaghetti""", """go shopping"""] , videos=SCREAMING_SNAKE_CASE_ , return_tensors="""pt""" , padding=SCREAMING_SNAKE_CASE_ ) print("""Shape of pixel values:""" , inputs.pixel_values.shape ) with torch.no_grad(): _SCREAMING_SNAKE_CASE = model(**SCREAMING_SNAKE_CASE_ ) # Verify outputs _SCREAMING_SNAKE_CASE = outputs.logits_per_video _SCREAMING_SNAKE_CASE = logits_per_video.softmax(dim=1 ) print("""Probs:""" , SCREAMING_SNAKE_CASE_ ) # kinetics-400 if model_name == "xclip-base-patch32": _SCREAMING_SNAKE_CASE = torch.tensor([[0.0019, 0.9951, 0.0030]] ) elif model_name == "xclip-base-patch32-16-frames": _SCREAMING_SNAKE_CASE = torch.tensor([[7.0999e-04, 9.9883e-01, 4.5580e-04]] ) elif model_name == "xclip-base-patch16": _SCREAMING_SNAKE_CASE = torch.tensor([[0.0083, 0.9681, 0.0236]] ) elif model_name == "xclip-base-patch16-16-frames": _SCREAMING_SNAKE_CASE = torch.tensor([[7.6937e-04, 9.9728e-01, 1.9473e-03]] ) elif model_name == "xclip-large-patch14": _SCREAMING_SNAKE_CASE = torch.tensor([[0.0062, 0.9864, 0.0075]] ) elif model_name == "xclip-large-patch14-16-frames": _SCREAMING_SNAKE_CASE = torch.tensor([[3.3877e-04, 9.9937e-01, 2.8888e-04]] ) # kinetics-600 elif model_name == "xclip-base-patch16-kinetics-600": _SCREAMING_SNAKE_CASE = torch.tensor([[0.0555, 0.8914, 0.0531]] ) elif model_name == "xclip-base-patch16-kinetics-600-16-frames": _SCREAMING_SNAKE_CASE = torch.tensor([[3.8554e-04, 9.9929e-01, 3.2754e-04]] ) elif model_name == "xclip-large-patch14-kinetics-600": _SCREAMING_SNAKE_CASE = torch.tensor([[0.0036, 0.9920, 0.0045]] ) # few shot elif model_name == "xclip-base-patch16-hmdb-2-shot": _SCREAMING_SNAKE_CASE = torch.tensor([[7.1890e-06, 9.9994e-01, 5.6559e-05]] ) elif model_name == "xclip-base-patch16-hmdb-4-shot": _SCREAMING_SNAKE_CASE = torch.tensor([[1.0320e-05, 9.9993e-01, 6.2435e-05]] ) elif model_name == "xclip-base-patch16-hmdb-8-shot": _SCREAMING_SNAKE_CASE = torch.tensor([[4.1377e-06, 9.9990e-01, 9.8386e-05]] ) elif model_name == "xclip-base-patch16-hmdb-16-shot": _SCREAMING_SNAKE_CASE = torch.tensor([[4.1347e-05, 9.9962e-01, 3.3411e-04]] ) elif model_name == "xclip-base-patch16-ucf-2-shot": _SCREAMING_SNAKE_CASE = torch.tensor([[8.5857e-05, 9.9928e-01, 6.3291e-04]] ) elif model_name == "xclip-base-patch16-ucf-4-shot": _SCREAMING_SNAKE_CASE = torch.tensor([[8.5857e-05, 9.9928e-01, 6.3291e-04]] ) elif model_name == "xclip-base-patch16-ucf-8-shot": _SCREAMING_SNAKE_CASE = torch.tensor([[0.0027, 0.9904, 0.0070]] ) elif model_name == "xclip-base-patch16-ucf-16-shot": _SCREAMING_SNAKE_CASE = torch.tensor([[9.8219e-04, 9.9593e-01, 3.0863e-03]] ) # zero shot elif model_name == "xclip-base-patch16-zero-shot": _SCREAMING_SNAKE_CASE = torch.tensor([[3.5082e-04, 9.9785e-01, 1.7966e-03]] ) else: raise ValueError(F"Model name {model_name} not supported" ) assert torch.allclose(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , atol=1e-3 ) print("""Looks ok!""" ) if pytorch_dump_folder_path is not None: print(F"Saving model {model_name} to {pytorch_dump_folder_path}" ) model.save_pretrained(SCREAMING_SNAKE_CASE_ ) if push_to_hub: print("""Pushing model, processor and slow tokenizer files to the hub...""" ) model.push_to_hub(SCREAMING_SNAKE_CASE_ , organization="""nielsr""" ) processor.push_to_hub(SCREAMING_SNAKE_CASE_ , organization="""nielsr""" ) slow_tokenizer.push_to_hub(SCREAMING_SNAKE_CASE_ , organization="""nielsr""" ) if __name__ == "__main__": UpperCamelCase__ : Union[str, Any] = argparse.ArgumentParser() # Required parameters parser.add_argument( "--model_name", default="xclip-base-patch32", type=str, help="Name of the model.", ) parser.add_argument( "--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory." ) parser.add_argument( "--push_to_hub", action="store_true", help="Whether or not to push the converted model to the 🤗 hub." ) UpperCamelCase__ : str = parser.parse_args() convert_xclip_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
0
0
'''simple docstring''' from .glue import GlueDataset, GlueDataTrainingArguments from .language_modeling import ( LineByLineTextDataset, LineByLineWithRefDataset, LineByLineWithSOPTextDataset, TextDataset, TextDatasetForNextSentencePrediction, ) from .squad import SquadDataset, SquadDataTrainingArguments
710
'''simple docstring''' import numpy as np import torch from torch.utils.data import Dataset from utils import logger class _a (_lowerCamelCase): """simple docstring""" def __init__( self , A__ , A__ ) -> Any: _SCREAMING_SNAKE_CASE = params _SCREAMING_SNAKE_CASE = np.array(A__ ) _SCREAMING_SNAKE_CASE = np.array([len(A__ ) for t in data] ) self.check() self.remove_long_sequences() self.remove_empty_sequences() self.remove_unknown_sequences() self.check() self.print_statistics() def __getitem__( self , A__ ) -> Dict: return (self.token_ids[index], self.lengths[index]) def __len__( self ) -> Tuple: return len(self.lengths ) def UpperCamelCase ( self ) -> Dict: assert len(self.token_ids ) == len(self.lengths ) assert all(self.lengths[i] == len(self.token_ids[i] ) for i in range(len(self.lengths ) ) ) def UpperCamelCase ( self ) -> List[str]: _SCREAMING_SNAKE_CASE = self.params.max_model_input_size _SCREAMING_SNAKE_CASE = self.lengths > max_len logger.info(F"Splitting {sum(A__ )} too long sequences." ) def divide_chunks(A__ , A__ ): return [l[i : i + n] for i in range(0 , len(A__ ) , A__ )] _SCREAMING_SNAKE_CASE = [] _SCREAMING_SNAKE_CASE = [] if self.params.mlm: _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = self.params.special_tok_ids["""cls_token"""], self.params.special_tok_ids["""sep_token"""] else: _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = self.params.special_tok_ids["""bos_token"""], self.params.special_tok_ids["""eos_token"""] for seq_, len_ in zip(self.token_ids , self.lengths ): assert (seq_[0] == cls_id) and (seq_[-1] == sep_id), seq_ if len_ <= max_len: new_tok_ids.append(seq_ ) new_lengths.append(len_ ) else: _SCREAMING_SNAKE_CASE = [] for sub_s in divide_chunks(seq_ , max_len - 2 ): if sub_s[0] != cls_id: _SCREAMING_SNAKE_CASE = np.insert(A__ , 0 , A__ ) if sub_s[-1] != sep_id: _SCREAMING_SNAKE_CASE = np.insert(A__ , len(A__ ) , A__ ) assert len(A__ ) <= max_len assert (sub_s[0] == cls_id) and (sub_s[-1] == sep_id), sub_s sub_seqs.append(A__ ) new_tok_ids.extend(A__ ) new_lengths.extend([len(A__ ) for l in sub_seqs] ) _SCREAMING_SNAKE_CASE = np.array(A__ ) _SCREAMING_SNAKE_CASE = np.array(A__ ) def UpperCamelCase ( self ) -> List[str]: _SCREAMING_SNAKE_CASE = len(self ) _SCREAMING_SNAKE_CASE = self.lengths > 11 _SCREAMING_SNAKE_CASE = self.token_ids[indices] _SCREAMING_SNAKE_CASE = self.lengths[indices] _SCREAMING_SNAKE_CASE = len(self ) logger.info(F"Remove {init_size - new_size} too short (<=11 tokens) sequences." ) def UpperCamelCase ( self ) -> int: if "unk_token" not in self.params.special_tok_ids: return else: _SCREAMING_SNAKE_CASE = self.params.special_tok_ids["""unk_token"""] _SCREAMING_SNAKE_CASE = len(self ) _SCREAMING_SNAKE_CASE = np.array([np.count_nonzero(a == unk_token_id ) for a in self.token_ids] ) _SCREAMING_SNAKE_CASE = (unk_occs / self.lengths) < 0.5 _SCREAMING_SNAKE_CASE = self.token_ids[indices] _SCREAMING_SNAKE_CASE = self.lengths[indices] _SCREAMING_SNAKE_CASE = len(self ) logger.info(F"Remove {init_size - new_size} sequences with a high level of unknown tokens (50%)." ) def UpperCamelCase ( self ) -> Optional[Any]: if not self.params.is_master: return logger.info(F"{len(self )} sequences" ) # data_len = sum(self.lengths) # nb_unique_tokens = len(Counter(list(chain(*self.token_ids)))) # logger.info(f'{data_len} tokens ({nb_unique_tokens} unique)') # unk_idx = self.params.special_tok_ids['unk_token'] # nb_unknown = sum([(t==unk_idx).sum() for t in self.token_ids]) # logger.info(f'{nb_unknown} unknown tokens (covering {100*nb_unknown/data_len:.2f}% of the data)') def UpperCamelCase ( self , A__ ) -> Any: _SCREAMING_SNAKE_CASE = [t[0] for t in batch] _SCREAMING_SNAKE_CASE = [t[1] for t in batch] assert len(A__ ) == len(A__ ) # Max for paddings _SCREAMING_SNAKE_CASE = max(A__ ) # Pad token ids if self.params.mlm: _SCREAMING_SNAKE_CASE = self.params.special_tok_ids["""pad_token"""] else: _SCREAMING_SNAKE_CASE = self.params.special_tok_ids["""unk_token"""] _SCREAMING_SNAKE_CASE = [list(t.astype(A__ ) ) + [pad_idx] * (max_seq_len_ - len(A__ )) for t in token_ids] assert len(tk_ ) == len(A__ ) assert all(len(A__ ) == max_seq_len_ for t in tk_ ) _SCREAMING_SNAKE_CASE = torch.tensor(tk_ ) # (bs, max_seq_len_) _SCREAMING_SNAKE_CASE = torch.tensor(A__ ) # (bs) return tk_t, lg_t
0
0
'''simple docstring''' from typing import List from ...configuration_utils import PretrainedConfig from ...utils import logging UpperCamelCase__ : Union[str, Any] = logging.get_logger(__name__) UpperCamelCase__ : Optional[Any] = { "snap-research/efficientformer-l1-300": ( "https://huggingface.co/snap-research/efficientformer-l1-300/resolve/main/config.json" ), } class _a (_lowerCamelCase): """simple docstring""" SCREAMING_SNAKE_CASE = 'efficientformer' def __init__( self , A__ = [3, 2, 6, 4] , A__ = [48, 96, 2_24, 4_48] , A__ = [True, True, True, True] , A__ = 4_48 , A__ = 32 , A__ = 4 , A__ = 7 , A__ = 5 , A__ = 8 , A__ = 4 , A__ = 0.0 , A__ = 16 , A__ = 3 , A__ = 3 , A__ = 3 , A__ = 2 , A__ = 1 , A__ = 0.0 , A__ = 1 , A__ = True , A__ = True , A__ = 1E-5 , A__ = "gelu" , A__ = 0.02 , A__ = 1E-12 , A__ = 2_24 , A__ = 1E-05 , **A__ , ) -> None: super().__init__(**A__ ) _SCREAMING_SNAKE_CASE = hidden_act _SCREAMING_SNAKE_CASE = hidden_dropout_prob _SCREAMING_SNAKE_CASE = hidden_sizes _SCREAMING_SNAKE_CASE = num_hidden_layers _SCREAMING_SNAKE_CASE = num_attention_heads _SCREAMING_SNAKE_CASE = initializer_range _SCREAMING_SNAKE_CASE = layer_norm_eps _SCREAMING_SNAKE_CASE = patch_size _SCREAMING_SNAKE_CASE = num_channels _SCREAMING_SNAKE_CASE = depths _SCREAMING_SNAKE_CASE = mlp_expansion_ratio _SCREAMING_SNAKE_CASE = downsamples _SCREAMING_SNAKE_CASE = dim _SCREAMING_SNAKE_CASE = key_dim _SCREAMING_SNAKE_CASE = attention_ratio _SCREAMING_SNAKE_CASE = resolution _SCREAMING_SNAKE_CASE = pool_size _SCREAMING_SNAKE_CASE = downsample_patch_size _SCREAMING_SNAKE_CASE = downsample_stride _SCREAMING_SNAKE_CASE = downsample_pad _SCREAMING_SNAKE_CASE = drop_path_rate _SCREAMING_SNAKE_CASE = num_metaad_blocks _SCREAMING_SNAKE_CASE = distillation _SCREAMING_SNAKE_CASE = use_layer_scale _SCREAMING_SNAKE_CASE = layer_scale_init_value _SCREAMING_SNAKE_CASE = image_size _SCREAMING_SNAKE_CASE = batch_norm_eps
711
'''simple docstring''' import os from shutil import copyfile from typing import Any, Dict, List, Optional, Tuple import sentencepiece as spm from ...tokenization_utils import PreTrainedTokenizer from ...utils import logging UpperCamelCase__ : List[Any] = logging.get_logger(__name__) UpperCamelCase__ : Any = "▁" UpperCamelCase__ : Any = {"vocab_file": "spiece.model"} UpperCamelCase__ : int = { "vocab_file": { "google/reformer-crime-and-punishment": ( "https://huggingface.co/google/reformer-crime-and-punishment/resolve/main/spiece.model" ) } } UpperCamelCase__ : Optional[int] = { "google/reformer-crime-and-punishment": 524_288, } class _a (_lowerCamelCase): """simple docstring""" SCREAMING_SNAKE_CASE = VOCAB_FILES_NAMES SCREAMING_SNAKE_CASE = PRETRAINED_VOCAB_FILES_MAP SCREAMING_SNAKE_CASE = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES SCREAMING_SNAKE_CASE = ['input_ids', 'attention_mask'] def __init__( self , A__ , A__="</s>" , A__="<unk>" , A__=[] , A__ = None , **A__ , ) -> None: _SCREAMING_SNAKE_CASE = {} if sp_model_kwargs is None else sp_model_kwargs super().__init__( eos_token=A__ , unk_token=A__ , additional_special_tokens=A__ , sp_model_kwargs=self.sp_model_kwargs , **A__ , ) _SCREAMING_SNAKE_CASE = vocab_file _SCREAMING_SNAKE_CASE = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(A__ ) @property def UpperCamelCase ( self ) -> Any: return self.sp_model.get_piece_size() def UpperCamelCase ( self ) -> Dict[str, int]: _SCREAMING_SNAKE_CASE = {self.convert_ids_to_tokens(A__ ): i for i in range(self.vocab_size )} vocab.update(self.added_tokens_encoder ) return vocab def __getstate__( self ) -> int: _SCREAMING_SNAKE_CASE = self.__dict__.copy() _SCREAMING_SNAKE_CASE = None return state def __setstate__( self , A__ ) -> str: _SCREAMING_SNAKE_CASE = d # for backward compatibility if not hasattr(self , """sp_model_kwargs""" ): _SCREAMING_SNAKE_CASE = {} _SCREAMING_SNAKE_CASE = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(self.vocab_file ) def UpperCamelCase ( self , A__ ) -> List[str]: return self.sp_model.encode(A__ , out_type=A__ ) def UpperCamelCase ( self , A__ ) -> Union[str, Any]: return self.sp_model.piece_to_id(A__ ) def UpperCamelCase ( self , A__ ) -> List[Any]: if index < self.sp_model.get_piece_size(): _SCREAMING_SNAKE_CASE = self.sp_model.IdToPiece(A__ ) return token def UpperCamelCase ( self , A__ ) -> str: _SCREAMING_SNAKE_CASE = [] _SCREAMING_SNAKE_CASE = """""" for token in tokens: # make sure that special tokens are not decoded using sentencepiece model if token in self.all_special_tokens: out_string += self.sp_model.decode(A__ ) + token _SCREAMING_SNAKE_CASE = [] else: current_sub_tokens.append(A__ ) out_string += self.sp_model.decode(A__ ) return out_string.strip() def UpperCamelCase ( self , A__ , A__ = None ) -> Tuple[str]: if not os.path.isdir(A__ ): logger.error(F"Vocabulary path ({save_directory}) should be a directory" ) return _SCREAMING_SNAKE_CASE = os.path.join( A__ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(A__ ) and os.path.isfile(self.vocab_file ): copyfile(self.vocab_file , A__ ) elif not os.path.isfile(self.vocab_file ): with open(A__ , """wb""" ) as fi: _SCREAMING_SNAKE_CASE = self.sp_model.serialized_model_proto() fi.write(A__ ) return (out_vocab_file,)
0
0
'''simple docstring''' from __future__ import annotations from collections.abc import Callable from typing import Generic, TypeVar UpperCamelCase__ : Any = TypeVar("T") UpperCamelCase__ : Optional[int] = TypeVar("U") class _a (Generic[T, U]): """simple docstring""" def __init__( self , A__ , A__ ) -> Union[str, Any]: _SCREAMING_SNAKE_CASE = key _SCREAMING_SNAKE_CASE = val _SCREAMING_SNAKE_CASE = None _SCREAMING_SNAKE_CASE = None def __repr__( self ) -> str: return ( F"Node: key: {self.key}, val: {self.val}, " F"has next: {bool(self.next )}, has prev: {bool(self.prev )}" ) class _a (Generic[T, U]): """simple docstring""" def __init__( self ) -> None: _SCREAMING_SNAKE_CASE = DoubleLinkedListNode(A__ , A__ ) _SCREAMING_SNAKE_CASE = DoubleLinkedListNode(A__ , A__ ) _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = self.rear, self.head def __repr__( self ) -> str: _SCREAMING_SNAKE_CASE = ["""DoubleLinkedList"""] _SCREAMING_SNAKE_CASE = self.head while node.next is not None: rep.append(str(A__ ) ) _SCREAMING_SNAKE_CASE = node.next rep.append(str(self.rear ) ) return ",\n ".join(A__ ) def UpperCamelCase ( self , A__ ) -> None: _SCREAMING_SNAKE_CASE = self.rear.prev # All nodes other than self.head are guaranteed to have non-None previous assert previous is not None _SCREAMING_SNAKE_CASE = node _SCREAMING_SNAKE_CASE = previous _SCREAMING_SNAKE_CASE = node _SCREAMING_SNAKE_CASE = self.rear def UpperCamelCase ( self , A__ ) -> DoubleLinkedListNode[T, U] | None: if node.prev is None or node.next is None: return None _SCREAMING_SNAKE_CASE = node.next _SCREAMING_SNAKE_CASE = node.prev _SCREAMING_SNAKE_CASE = None _SCREAMING_SNAKE_CASE = None return node class _a (Generic[T, U]): """simple docstring""" SCREAMING_SNAKE_CASE = {} def __init__( self , A__ ) -> Optional[Any]: _SCREAMING_SNAKE_CASE = DoubleLinkedList() _SCREAMING_SNAKE_CASE = capacity _SCREAMING_SNAKE_CASE = 0 _SCREAMING_SNAKE_CASE = 0 _SCREAMING_SNAKE_CASE = 0 _SCREAMING_SNAKE_CASE = {} def __repr__( self ) -> str: return ( F"CacheInfo(hits={self.hits}, misses={self.miss}, " F"capacity={self.capacity}, current size={self.num_keys})" ) def __contains__( self , A__ ) -> bool: return key in self.cache def UpperCamelCase ( self , A__ ) -> U | None: # Note: pythonic interface would throw KeyError rather than return None if key in self.cache: self.hits += 1 _SCREAMING_SNAKE_CASE = self.cache[key] _SCREAMING_SNAKE_CASE = self.list.remove(self.cache[key] ) assert node == value_node # node is guaranteed not None because it is in self.cache assert node is not None self.list.add(A__ ) return node.val self.miss += 1 return None def UpperCamelCase ( self , A__ , A__ ) -> None: if key not in self.cache: if self.num_keys >= self.capacity: # delete first node (oldest) when over capacity _SCREAMING_SNAKE_CASE = self.list.head.next # guaranteed to have a non-None first node when num_keys > 0 # explain to type checker via assertions assert first_node is not None assert first_node.key is not None assert ( self.list.remove(A__ ) is not None ) # node guaranteed to be in list assert node.key is not None del self.cache[first_node.key] self.num_keys -= 1 _SCREAMING_SNAKE_CASE = DoubleLinkedListNode(A__ , A__ ) self.list.add(self.cache[key] ) self.num_keys += 1 else: # bump node to the end of the list, update value _SCREAMING_SNAKE_CASE = self.list.remove(self.cache[key] ) assert node is not None # node guaranteed to be in list _SCREAMING_SNAKE_CASE = value self.list.add(A__ ) @classmethod def UpperCamelCase ( cls , A__ = 1_28 ) -> Callable[[Callable[[T], U]], Callable[..., U]]: def cache_decorator_inner(A__ ) -> Callable[..., U]: def cache_decorator_wrapper(*A__ ) -> U: if func not in cls.decorator_function_to_instance_map: _SCREAMING_SNAKE_CASE = LRUCache(A__ ) _SCREAMING_SNAKE_CASE = cls.decorator_function_to_instance_map[func].get(args[0] ) if result is None: _SCREAMING_SNAKE_CASE = func(*A__ ) cls.decorator_function_to_instance_map[func].put(args[0] , A__ ) return result def cache_info() -> LRUCache[T, U]: return cls.decorator_function_to_instance_map[func] setattr(A__ , """cache_info""" , A__ ) # noqa: B010 return cache_decorator_wrapper return cache_decorator_inner if __name__ == "__main__": import doctest doctest.testmod()
712
'''simple docstring''' import os import unittest from transformers import MobileBertTokenizer, MobileBertTokenizerFast from transformers.models.bert.tokenization_bert import ( VOCAB_FILES_NAMES, BasicTokenizer, WordpieceTokenizer, _is_control, _is_punctuation, _is_whitespace, ) from transformers.testing_utils import require_tokenizers, slow from ...test_tokenization_common import TokenizerTesterMixin, filter_non_english @require_tokenizers class _a (_lowerCamelCase , unittest.TestCase): """simple docstring""" SCREAMING_SNAKE_CASE = MobileBertTokenizer SCREAMING_SNAKE_CASE = MobileBertTokenizerFast SCREAMING_SNAKE_CASE = True SCREAMING_SNAKE_CASE = True SCREAMING_SNAKE_CASE = filter_non_english SCREAMING_SNAKE_CASE = 'google/mobilebert-uncased' def UpperCamelCase ( self ) -> Any: super().setUp() _SCREAMING_SNAKE_CASE = [ """[UNK]""", """[CLS]""", """[SEP]""", """[PAD]""", """[MASK]""", """want""", """##want""", """##ed""", """wa""", """un""", """runn""", """##ing""", """,""", """low""", """lowest""", ] _SCREAMING_SNAKE_CASE = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] ) with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as vocab_writer: vocab_writer.write("""""".join([x + """\n""" for x in vocab_tokens] ) ) _SCREAMING_SNAKE_CASE = [ (tokenizer_def[0], self.pre_trained_model_path, tokenizer_def[2]) # else the 'google/' prefix is stripped for tokenizer_def in self.tokenizers_list ] def UpperCamelCase ( self , A__ ) -> List[str]: _SCREAMING_SNAKE_CASE = """UNwant\u00E9d,running""" _SCREAMING_SNAKE_CASE = """unwanted, running""" return input_text, output_text def UpperCamelCase ( self ) -> Any: _SCREAMING_SNAKE_CASE = self.tokenizer_class(self.vocab_file ) _SCREAMING_SNAKE_CASE = tokenizer.tokenize("""UNwant\u00E9d,running""" ) self.assertListEqual(A__ , ["""un""", """##want""", """##ed""", """,""", """runn""", """##ing"""] ) self.assertListEqual(tokenizer.convert_tokens_to_ids(A__ ) , [9, 6, 7, 12, 10, 11] ) def UpperCamelCase ( self ) -> Optional[int]: if not self.test_rust_tokenizer: return _SCREAMING_SNAKE_CASE = self.get_tokenizer() _SCREAMING_SNAKE_CASE = self.get_rust_tokenizer() _SCREAMING_SNAKE_CASE = """UNwant\u00E9d,running""" _SCREAMING_SNAKE_CASE = tokenizer.tokenize(A__ ) _SCREAMING_SNAKE_CASE = rust_tokenizer.tokenize(A__ ) self.assertListEqual(A__ , A__ ) _SCREAMING_SNAKE_CASE = tokenizer.encode(A__ , add_special_tokens=A__ ) _SCREAMING_SNAKE_CASE = rust_tokenizer.encode(A__ , add_special_tokens=A__ ) self.assertListEqual(A__ , A__ ) _SCREAMING_SNAKE_CASE = self.get_rust_tokenizer() _SCREAMING_SNAKE_CASE = tokenizer.encode(A__ ) _SCREAMING_SNAKE_CASE = rust_tokenizer.encode(A__ ) self.assertListEqual(A__ , A__ ) # With lower casing _SCREAMING_SNAKE_CASE = self.get_tokenizer(do_lower_case=A__ ) _SCREAMING_SNAKE_CASE = self.get_rust_tokenizer(do_lower_case=A__ ) _SCREAMING_SNAKE_CASE = """UNwant\u00E9d,running""" _SCREAMING_SNAKE_CASE = tokenizer.tokenize(A__ ) _SCREAMING_SNAKE_CASE = rust_tokenizer.tokenize(A__ ) self.assertListEqual(A__ , A__ ) _SCREAMING_SNAKE_CASE = tokenizer.encode(A__ , add_special_tokens=A__ ) _SCREAMING_SNAKE_CASE = rust_tokenizer.encode(A__ , add_special_tokens=A__ ) self.assertListEqual(A__ , A__ ) _SCREAMING_SNAKE_CASE = self.get_rust_tokenizer() _SCREAMING_SNAKE_CASE = tokenizer.encode(A__ ) _SCREAMING_SNAKE_CASE = rust_tokenizer.encode(A__ ) self.assertListEqual(A__ , A__ ) def UpperCamelCase ( self ) -> Tuple: _SCREAMING_SNAKE_CASE = BasicTokenizer() self.assertListEqual(tokenizer.tokenize("""ah\u535A\u63A8zz""" ) , ["""ah""", """\u535A""", """\u63A8""", """zz"""] ) def UpperCamelCase ( self ) -> List[Any]: _SCREAMING_SNAKE_CASE = BasicTokenizer(do_lower_case=A__ ) self.assertListEqual( tokenizer.tokenize(""" \tHeLLo!how \n Are yoU? """ ) , ["""hello""", """!""", """how""", """are""", """you""", """?"""] ) self.assertListEqual(tokenizer.tokenize("""H\u00E9llo""" ) , ["""hello"""] ) def UpperCamelCase ( self ) -> Optional[Any]: _SCREAMING_SNAKE_CASE = BasicTokenizer(do_lower_case=A__ , strip_accents=A__ ) self.assertListEqual( tokenizer.tokenize(""" \tHäLLo!how \n Are yoU? """ ) , ["""hällo""", """!""", """how""", """are""", """you""", """?"""] ) self.assertListEqual(tokenizer.tokenize("""H\u00E9llo""" ) , ["""h\u00E9llo"""] ) def UpperCamelCase ( self ) -> Any: _SCREAMING_SNAKE_CASE = BasicTokenizer(do_lower_case=A__ , strip_accents=A__ ) self.assertListEqual( tokenizer.tokenize(""" \tHäLLo!how \n Are yoU? """ ) , ["""hallo""", """!""", """how""", """are""", """you""", """?"""] ) self.assertListEqual(tokenizer.tokenize("""H\u00E9llo""" ) , ["""hello"""] ) def UpperCamelCase ( self ) -> Any: _SCREAMING_SNAKE_CASE = BasicTokenizer(do_lower_case=A__ ) self.assertListEqual( tokenizer.tokenize(""" \tHäLLo!how \n Are yoU? """ ) , ["""hallo""", """!""", """how""", """are""", """you""", """?"""] ) self.assertListEqual(tokenizer.tokenize("""H\u00E9llo""" ) , ["""hello"""] ) def UpperCamelCase ( self ) -> str: _SCREAMING_SNAKE_CASE = BasicTokenizer(do_lower_case=A__ ) self.assertListEqual( tokenizer.tokenize(""" \tHeLLo!how \n Are yoU? """ ) , ["""HeLLo""", """!""", """how""", """Are""", """yoU""", """?"""] ) def UpperCamelCase ( self ) -> Dict: _SCREAMING_SNAKE_CASE = BasicTokenizer(do_lower_case=A__ , strip_accents=A__ ) self.assertListEqual( tokenizer.tokenize(""" \tHäLLo!how \n Are yoU? """ ) , ["""HäLLo""", """!""", """how""", """Are""", """yoU""", """?"""] ) def UpperCamelCase ( self ) -> Union[str, Any]: _SCREAMING_SNAKE_CASE = BasicTokenizer(do_lower_case=A__ , strip_accents=A__ ) self.assertListEqual( tokenizer.tokenize(""" \tHäLLo!how \n Are yoU? """ ) , ["""HaLLo""", """!""", """how""", """Are""", """yoU""", """?"""] ) def UpperCamelCase ( self ) -> str: _SCREAMING_SNAKE_CASE = BasicTokenizer(do_lower_case=A__ , never_split=["""[UNK]"""] ) self.assertListEqual( tokenizer.tokenize(""" \tHeLLo!how \n Are yoU? [UNK]""" ) , ["""HeLLo""", """!""", """how""", """Are""", """yoU""", """?""", """[UNK]"""] ) def UpperCamelCase ( self ) -> Tuple: _SCREAMING_SNAKE_CASE = ["""[UNK]""", """[CLS]""", """[SEP]""", """want""", """##want""", """##ed""", """wa""", """un""", """runn""", """##ing"""] _SCREAMING_SNAKE_CASE = {} for i, token in enumerate(A__ ): _SCREAMING_SNAKE_CASE = i _SCREAMING_SNAKE_CASE = WordpieceTokenizer(vocab=A__ , unk_token="""[UNK]""" ) self.assertListEqual(tokenizer.tokenize("""""" ) , [] ) self.assertListEqual(tokenizer.tokenize("""unwanted running""" ) , ["""un""", """##want""", """##ed""", """runn""", """##ing"""] ) self.assertListEqual(tokenizer.tokenize("""unwantedX running""" ) , ["""[UNK]""", """runn""", """##ing"""] ) def UpperCamelCase ( self ) -> str: self.assertTrue(_is_whitespace(""" """ ) ) self.assertTrue(_is_whitespace("""\t""" ) ) self.assertTrue(_is_whitespace("""\r""" ) ) self.assertTrue(_is_whitespace("""\n""" ) ) self.assertTrue(_is_whitespace("""\u00A0""" ) ) self.assertFalse(_is_whitespace("""A""" ) ) self.assertFalse(_is_whitespace("""-""" ) ) def UpperCamelCase ( self ) -> Union[str, Any]: self.assertTrue(_is_control("""\u0005""" ) ) self.assertFalse(_is_control("""A""" ) ) self.assertFalse(_is_control(""" """ ) ) self.assertFalse(_is_control("""\t""" ) ) self.assertFalse(_is_control("""\r""" ) ) def UpperCamelCase ( self ) -> Dict: self.assertTrue(_is_punctuation("""-""" ) ) self.assertTrue(_is_punctuation("""$""" ) ) self.assertTrue(_is_punctuation("""`""" ) ) self.assertTrue(_is_punctuation(""".""" ) ) self.assertFalse(_is_punctuation("""A""" ) ) self.assertFalse(_is_punctuation(""" """ ) ) def UpperCamelCase ( self ) -> str: _SCREAMING_SNAKE_CASE = self.get_tokenizer() _SCREAMING_SNAKE_CASE = self.get_rust_tokenizer() # Example taken from the issue https://github.com/huggingface/tokenizers/issues/340 self.assertListEqual([tokenizer.tokenize(A__ ) for t in ["""Test""", """\xad""", """test"""]] , [["""[UNK]"""], [], ["""[UNK]"""]] ) self.assertListEqual( [rust_tokenizer.tokenize(A__ ) for t in ["""Test""", """\xad""", """test"""]] , [["""[UNK]"""], [], ["""[UNK]"""]] ) @slow def UpperCamelCase ( self ) -> Any: _SCREAMING_SNAKE_CASE = self.tokenizer_class.from_pretrained("""google/mobilebert-uncased""" ) _SCREAMING_SNAKE_CASE = tokenizer.encode("""sequence builders""" , add_special_tokens=A__ ) _SCREAMING_SNAKE_CASE = tokenizer.encode("""multi-sequence build""" , add_special_tokens=A__ ) _SCREAMING_SNAKE_CASE = tokenizer.build_inputs_with_special_tokens(A__ ) _SCREAMING_SNAKE_CASE = tokenizer.build_inputs_with_special_tokens(A__ , A__ ) assert encoded_sentence == [1_01] + text + [1_02] assert encoded_pair == [1_01] + text + [1_02] + text_a + [1_02] def UpperCamelCase ( self ) -> List[str]: for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(F"{tokenizer.__class__.__name__} ({pretrained_name})" ): _SCREAMING_SNAKE_CASE = self.rust_tokenizer_class.from_pretrained(A__ , **A__ ) _SCREAMING_SNAKE_CASE = F"A, naïve {tokenizer_r.mask_token} AllenNLP sentence." _SCREAMING_SNAKE_CASE = tokenizer_r.encode_plus( A__ , return_attention_mask=A__ , return_token_type_ids=A__ , return_offsets_mapping=A__ , add_special_tokens=A__ , ) _SCREAMING_SNAKE_CASE = tokenizer_r.do_lower_case if hasattr(A__ , """do_lower_case""" ) else False _SCREAMING_SNAKE_CASE = ( [ ((0, 0), tokenizer_r.cls_token), ((0, 1), """A"""), ((1, 2), ""","""), ((3, 5), """na"""), ((5, 6), """##ï"""), ((6, 8), """##ve"""), ((9, 15), tokenizer_r.mask_token), ((16, 21), """Allen"""), ((21, 23), """##NL"""), ((23, 24), """##P"""), ((25, 33), """sentence"""), ((33, 34), """."""), ((0, 0), tokenizer_r.sep_token), ] if not do_lower_case else [ ((0, 0), tokenizer_r.cls_token), ((0, 1), """a"""), ((1, 2), ""","""), ((3, 8), """naive"""), ((9, 15), tokenizer_r.mask_token), ((16, 21), """allen"""), ((21, 23), """##nl"""), ((23, 24), """##p"""), ((25, 33), """sentence"""), ((33, 34), """."""), ((0, 0), tokenizer_r.sep_token), ] ) self.assertEqual( [e[1] for e in expected_results] , tokenizer_r.convert_ids_to_tokens(tokens["""input_ids"""] ) ) self.assertEqual([e[0] for e in expected_results] , tokens["""offset_mapping"""] ) def UpperCamelCase ( self ) -> Any: _SCREAMING_SNAKE_CASE = ["""的""", """人""", """有"""] _SCREAMING_SNAKE_CASE = """""".join(A__ ) for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(F"{tokenizer.__class__.__name__} ({pretrained_name})" ): _SCREAMING_SNAKE_CASE = True _SCREAMING_SNAKE_CASE = self.tokenizer_class.from_pretrained(A__ , **A__ ) _SCREAMING_SNAKE_CASE = self.rust_tokenizer_class.from_pretrained(A__ , **A__ ) _SCREAMING_SNAKE_CASE = tokenizer_p.encode(A__ , add_special_tokens=A__ ) _SCREAMING_SNAKE_CASE = tokenizer_r.encode(A__ , add_special_tokens=A__ ) _SCREAMING_SNAKE_CASE = tokenizer_r.convert_ids_to_tokens(A__ ) _SCREAMING_SNAKE_CASE = tokenizer_p.convert_ids_to_tokens(A__ ) # it is expected that each Chinese character is not preceded by "##" self.assertListEqual(A__ , A__ ) self.assertListEqual(A__ , A__ ) _SCREAMING_SNAKE_CASE = False _SCREAMING_SNAKE_CASE = self.rust_tokenizer_class.from_pretrained(A__ , **A__ ) _SCREAMING_SNAKE_CASE = self.tokenizer_class.from_pretrained(A__ , **A__ ) _SCREAMING_SNAKE_CASE = tokenizer_r.encode(A__ , add_special_tokens=A__ ) _SCREAMING_SNAKE_CASE = tokenizer_p.encode(A__ , add_special_tokens=A__ ) _SCREAMING_SNAKE_CASE = tokenizer_r.convert_ids_to_tokens(A__ ) _SCREAMING_SNAKE_CASE = tokenizer_p.convert_ids_to_tokens(A__ ) # it is expected that only the first Chinese character is not preceded by "##". _SCREAMING_SNAKE_CASE = [ F"##{token}" if idx != 0 else token for idx, token in enumerate(A__ ) ] self.assertListEqual(A__ , A__ ) self.assertListEqual(A__ , A__ )
0
0
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_torch_available UpperCamelCase__ : str = { "configuration_longt5": ["LONGT5_PRETRAINED_CONFIG_ARCHIVE_MAP", "LongT5Config", "LongT5OnnxConfig"], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase__ : Tuple = [ "LONGT5_PRETRAINED_MODEL_ARCHIVE_LIST", "LongT5EncoderModel", "LongT5ForConditionalGeneration", "LongT5Model", "LongT5PreTrainedModel", ] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase__ : Dict = [ "FlaxLongT5ForConditionalGeneration", "FlaxLongT5Model", "FlaxLongT5PreTrainedModel", ] if TYPE_CHECKING: from .configuration_longta import LONGT5_PRETRAINED_CONFIG_ARCHIVE_MAP, LongTaConfig, LongTaOnnxConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_longta import ( LONGT5_PRETRAINED_MODEL_ARCHIVE_LIST, LongTaEncoderModel, LongTaForConditionalGeneration, LongTaModel, LongTaPreTrainedModel, ) try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_longta import ( FlaxLongTaForConditionalGeneration, FlaxLongTaModel, FlaxLongTaPreTrainedModel, ) else: import sys UpperCamelCase__ : Union[str, Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
713
'''simple docstring''' import logging import os import quant_trainer import torch from torch.utils.data import DataLoader from transformers import Trainer, is_torch_tpu_available from transformers.trainer_utils import PredictionOutput UpperCamelCase__ : Tuple = logging.getLogger(__name__) if is_torch_tpu_available(check_device=False): import torch_xla.core.xla_model as xm import torch_xla.debug.metrics as met class _a (_lowerCamelCase): """simple docstring""" def __init__( self , *A__ , A__=None , A__=None , A__=None , **A__ ) -> Optional[int]: super().__init__(*A__ , **A__ ) _SCREAMING_SNAKE_CASE = eval_examples _SCREAMING_SNAKE_CASE = post_process_function _SCREAMING_SNAKE_CASE = quant_trainer_args _SCREAMING_SNAKE_CASE = 1_28 # default number of calibration samples def UpperCamelCase ( self , A__=None ) -> Union[str, Any]: if calib_dataset is None and self.calib_dataset is None: raise ValueError("""Trainer: calibration requires an calib_dataset.""" ) _SCREAMING_SNAKE_CASE = calib_dataset if calib_dataset is not None else self.calib_dataset _SCREAMING_SNAKE_CASE = self._remove_unused_columns(A__ , description="""Calibration""" ) return DataLoader( A__ , batch_size=self.args.eval_batch_size , collate_fn=self.data_collator , drop_last=self.args.dataloader_drop_last , num_workers=self.args.dataloader_num_workers , pin_memory=self.args.dataloader_pin_memory , shuffle=A__ , ) def UpperCamelCase ( self , A__=None ) -> str: _SCREAMING_SNAKE_CASE = self.train_dataset if calib_dataset is None else calib_dataset _SCREAMING_SNAKE_CASE = self.get_calib_dataloader(A__ ) _SCREAMING_SNAKE_CASE = self.model quant_trainer.configure_model(A__ , self.quant_trainer_args , calib=A__ ) model.eval() quant_trainer.enable_calibration(A__ ) logger.info("""***** Running calibration *****""" ) logger.info(F" Num examples = {self.calib_num}" ) logger.info(F" Batch size = {calib_dataloader.batch_size}" ) for step, inputs in enumerate(A__ ): # Prediction step _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = self.prediction_step(A__ , A__ , prediction_loss_only=A__ ) if (step + 1) * calib_dataloader.batch_size >= self.calib_num: break quant_trainer.finish_calibration(A__ , self.quant_trainer_args ) _SCREAMING_SNAKE_CASE = model def UpperCamelCase ( self , A__=None , A__=None , A__=None , A__ = "eval" ) -> List[Any]: _SCREAMING_SNAKE_CASE = self.eval_dataset if eval_dataset is None else eval_dataset _SCREAMING_SNAKE_CASE = self.get_eval_dataloader(A__ ) _SCREAMING_SNAKE_CASE = self.eval_examples if eval_examples is None else eval_examples # Temporarily disable metric computation, we will do it in the loop here. _SCREAMING_SNAKE_CASE = self.compute_metrics _SCREAMING_SNAKE_CASE = None _SCREAMING_SNAKE_CASE = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop try: _SCREAMING_SNAKE_CASE = eval_loop( A__ , description="""Evaluation""" , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=A__ , ) finally: _SCREAMING_SNAKE_CASE = compute_metrics if self.post_process_function is not None and self.compute_metrics is not None: _SCREAMING_SNAKE_CASE = self.post_process_function(A__ , A__ , output.predictions ) _SCREAMING_SNAKE_CASE = self.compute_metrics(A__ ) # Prefix all keys with metric_key_prefix + '_' for key in list(metrics.keys() ): if not key.startswith(F"{metric_key_prefix}_" ): _SCREAMING_SNAKE_CASE = metrics.pop(A__ ) self.log(A__ ) else: _SCREAMING_SNAKE_CASE = {} if self.args.tpu_metrics_debug or self.args.debug: # tpu-comment: Logging debug metrics for PyTorch/XLA (compile, execute times, ops, etc.) xm.master_print(met.metrics_report() ) _SCREAMING_SNAKE_CASE = self.callback_handler.on_evaluate(self.args , self.state , self.control , A__ ) return metrics def UpperCamelCase ( self , A__ , A__ , A__=None , A__ = "test" ) -> List[str]: _SCREAMING_SNAKE_CASE = self.get_test_dataloader(A__ ) # Temporarily disable metric computation, we will do it in the loop here. _SCREAMING_SNAKE_CASE = self.compute_metrics _SCREAMING_SNAKE_CASE = None _SCREAMING_SNAKE_CASE = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop try: _SCREAMING_SNAKE_CASE = eval_loop( A__ , description="""Prediction""" , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=A__ , ) finally: _SCREAMING_SNAKE_CASE = compute_metrics if self.post_process_function is None or self.compute_metrics is None: return output _SCREAMING_SNAKE_CASE = self.post_process_function(A__ , A__ , output.predictions , """predict""" ) _SCREAMING_SNAKE_CASE = self.compute_metrics(A__ ) # Prefix all keys with metric_key_prefix + '_' for key in list(metrics.keys() ): if not key.startswith(F"{metric_key_prefix}_" ): _SCREAMING_SNAKE_CASE = metrics.pop(A__ ) return PredictionOutput(predictions=predictions.predictions , label_ids=predictions.label_ids , metrics=A__ ) def UpperCamelCase ( self , A__="./" ) -> Tuple: _SCREAMING_SNAKE_CASE = self.eval_dataset _SCREAMING_SNAKE_CASE = self.get_eval_dataloader(A__ ) _SCREAMING_SNAKE_CASE = next(iter(A__ ) ) # saving device - to make it consistent _SCREAMING_SNAKE_CASE = torch.device("""cuda""" if torch.cuda.is_available() else """cpu""" ) # convert to tuple _SCREAMING_SNAKE_CASE = tuple(v.to(A__ ) for k, v in batch.items() ) logger.info("""Converting model to be onnx compatible""" ) from pytorch_quantization.nn import TensorQuantizer _SCREAMING_SNAKE_CASE = True _SCREAMING_SNAKE_CASE = self.model.to(A__ ) model.eval() model.float() _SCREAMING_SNAKE_CASE = model.module if hasattr(A__ , """module""" ) else model quant_trainer.configure_model(A__ , self.quant_trainer_args ) _SCREAMING_SNAKE_CASE = os.path.join(A__ , """model.onnx""" ) logger.info(F"exporting model to {output_model_file}" ) _SCREAMING_SNAKE_CASE = {0: """batch_size""", 1: """seq_len"""} torch.onnx.export( A__ , A__ , A__ , export_params=A__ , opset_version=13 , do_constant_folding=A__ , input_names=["""input_ids""", """attention_mask""", """token_type_ids"""] , output_names=["""output_start_logits""", """output_end_logits"""] , dynamic_axes={ """input_ids""": axes, """attention_mask""": axes, """token_type_ids""": axes, """output_start_logits""": axes, """output_end_logits""": axes, } , verbose=A__ , ) logger.info("""onnx export finished""" )
0
0
import argparse import collections import torch from flax import traverse_util from tax import checkpoints from transformers import TaConfig, TaEncoderModel, TaForConditionalGeneration from transformers.utils import logging logging.set_verbosity_info() def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_="attention" ) -> Tuple: """simple docstring""" _SCREAMING_SNAKE_CASE = params[F"{prefix}/layers_{i}/{layer_name}/key/kernel"] _SCREAMING_SNAKE_CASE = params[F"{prefix}/layers_{i}/{layer_name}/out/kernel"] _SCREAMING_SNAKE_CASE = params[F"{prefix}/layers_{i}/{layer_name}/query/kernel"] _SCREAMING_SNAKE_CASE = params[F"{prefix}/layers_{i}/{layer_name}/value/kernel"] return k, o, q, v def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=False ) -> Dict: """simple docstring""" if split_mlp_wi: _SCREAMING_SNAKE_CASE = params[F"{prefix}/layers_{i}/mlp/wi_0/kernel"] _SCREAMING_SNAKE_CASE = params[F"{prefix}/layers_{i}/mlp/wi_1/kernel"] _SCREAMING_SNAKE_CASE = (wi_a, wi_a) else: _SCREAMING_SNAKE_CASE = params[F"{prefix}/layers_{i}/mlp/wi/kernel"] _SCREAMING_SNAKE_CASE = params[F"{prefix}/layers_{i}/mlp/wo/kernel"] return wi, wo def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> List[Any]: """simple docstring""" return params[F"{prefix}/layers_{i}/{layer_name}/scale"] def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE_ , *, SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> int: """simple docstring""" _SCREAMING_SNAKE_CASE = traverse_util.flatten_dict(variables["""target"""] ) _SCREAMING_SNAKE_CASE = {"""/""".join(SCREAMING_SNAKE_CASE_ ): v for k, v in old.items()} # v1.1 models have a gated GeLU with wi_0 and wi_1 instead of wi _SCREAMING_SNAKE_CASE = """encoder/layers_0/mlp/wi_0/kernel""" in old print("""Split MLP:""" , SCREAMING_SNAKE_CASE_ ) _SCREAMING_SNAKE_CASE = collections.OrderedDict() # Shared embeddings. _SCREAMING_SNAKE_CASE = old["""token_embedder/embedding"""] # Encoder. for i in range(SCREAMING_SNAKE_CASE_ ): # Block i, layer 0 (Self Attention). _SCREAMING_SNAKE_CASE = tax_layer_norm_lookup(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , """encoder""" , """pre_attention_layer_norm""" ) _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = tax_attention_lookup(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , """encoder""" , """attention""" ) _SCREAMING_SNAKE_CASE = layer_norm _SCREAMING_SNAKE_CASE = k.T _SCREAMING_SNAKE_CASE = o.T _SCREAMING_SNAKE_CASE = q.T _SCREAMING_SNAKE_CASE = v.T # Block i, layer 1 (MLP). _SCREAMING_SNAKE_CASE = tax_layer_norm_lookup(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , """encoder""" , """pre_mlp_layer_norm""" ) _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = tax_mlp_lookup(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , """encoder""" , SCREAMING_SNAKE_CASE_ ) _SCREAMING_SNAKE_CASE = layer_norm if split_mlp_wi: _SCREAMING_SNAKE_CASE = wi[0].T _SCREAMING_SNAKE_CASE = wi[1].T else: _SCREAMING_SNAKE_CASE = wi.T _SCREAMING_SNAKE_CASE = wo.T _SCREAMING_SNAKE_CASE = old[ """encoder/relpos_bias/rel_embedding""" ].T _SCREAMING_SNAKE_CASE = old["""encoder/encoder_norm/scale"""] if not is_encoder_only: # Decoder. for i in range(SCREAMING_SNAKE_CASE_ ): # Block i, layer 0 (Self Attention). _SCREAMING_SNAKE_CASE = tax_layer_norm_lookup(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , """decoder""" , """pre_self_attention_layer_norm""" ) _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = tax_attention_lookup(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , """decoder""" , """self_attention""" ) _SCREAMING_SNAKE_CASE = layer_norm _SCREAMING_SNAKE_CASE = k.T _SCREAMING_SNAKE_CASE = o.T _SCREAMING_SNAKE_CASE = q.T _SCREAMING_SNAKE_CASE = v.T # Block i, layer 1 (Cross Attention). _SCREAMING_SNAKE_CASE = tax_layer_norm_lookup(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , """decoder""" , """pre_cross_attention_layer_norm""" ) _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = tax_attention_lookup(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , """decoder""" , """encoder_decoder_attention""" ) _SCREAMING_SNAKE_CASE = layer_norm _SCREAMING_SNAKE_CASE = k.T _SCREAMING_SNAKE_CASE = o.T _SCREAMING_SNAKE_CASE = q.T _SCREAMING_SNAKE_CASE = v.T # Block i, layer 2 (MLP). _SCREAMING_SNAKE_CASE = tax_layer_norm_lookup(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , """decoder""" , """pre_mlp_layer_norm""" ) _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = tax_mlp_lookup(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , """decoder""" , SCREAMING_SNAKE_CASE_ ) _SCREAMING_SNAKE_CASE = layer_norm if split_mlp_wi: _SCREAMING_SNAKE_CASE = wi[0].T _SCREAMING_SNAKE_CASE = wi[1].T else: _SCREAMING_SNAKE_CASE = wi.T _SCREAMING_SNAKE_CASE = wo.T _SCREAMING_SNAKE_CASE = old["""decoder/decoder_norm/scale"""] _SCREAMING_SNAKE_CASE = old[ """decoder/relpos_bias/rel_embedding""" ].T # LM Head (only in v1.1 checkpoints, in v1.0 embeddings are used instead) if "decoder/logits_dense/kernel" in old: _SCREAMING_SNAKE_CASE = old["""decoder/logits_dense/kernel"""].T return new def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> Dict: """simple docstring""" _SCREAMING_SNAKE_CASE = collections.OrderedDict([(k, torch.from_numpy(v.copy() )) for (k, v) in converted_params.items()] ) # Add what is missing. if "encoder.embed_tokens.weight" not in state_dict: _SCREAMING_SNAKE_CASE = state_dict["""shared.weight"""] if not is_encoder_only: if "decoder.embed_tokens.weight" not in state_dict: _SCREAMING_SNAKE_CASE = state_dict["""shared.weight"""] if "lm_head.weight" not in state_dict: # For old 1.0 models. print("""Using shared word embeddings as lm_head.""" ) _SCREAMING_SNAKE_CASE = state_dict["""shared.weight"""] return state_dict def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> Dict: """simple docstring""" _SCREAMING_SNAKE_CASE = checkpoints.load_tax_checkpoint(SCREAMING_SNAKE_CASE_ ) _SCREAMING_SNAKE_CASE = convert_tax_to_pytorch(SCREAMING_SNAKE_CASE_ , num_layers=config.num_layers , is_encoder_only=SCREAMING_SNAKE_CASE_ ) _SCREAMING_SNAKE_CASE = make_state_dict(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) model.load_state_dict(SCREAMING_SNAKE_CASE_ , strict=SCREAMING_SNAKE_CASE_ ) def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = False ) -> Optional[Any]: """simple docstring""" _SCREAMING_SNAKE_CASE = TaConfig.from_json_file(SCREAMING_SNAKE_CASE_ ) print(F"Building PyTorch model from configuration: {config}" ) # Non-v1.1 checkpoints could also use T5Model, but this works for all. # The v1.0 checkpoints will simply have an LM head that is the word embeddings. if is_encoder_only: _SCREAMING_SNAKE_CASE = TaEncoderModel(SCREAMING_SNAKE_CASE_ ) else: _SCREAMING_SNAKE_CASE = TaForConditionalGeneration(SCREAMING_SNAKE_CASE_ ) # Load weights from tf checkpoint load_tax_weights_in_ta(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) # Save pytorch-model print(F"Save PyTorch model to {pytorch_dump_path}" ) model.save_pretrained(SCREAMING_SNAKE_CASE_ ) # Verify that we can load the checkpoint. model.from_pretrained(SCREAMING_SNAKE_CASE_ ) print("""Done""" ) if __name__ == "__main__": UpperCamelCase__ : Optional[Any] = argparse.ArgumentParser(description="Converts a native T5X checkpoint into a PyTorch checkpoint.") # Required parameters parser.add_argument( "--t5x_checkpoint_path", default=None, type=str, required=True, help="Path to the T5X checkpoint." ) parser.add_argument( "--config_file", default=None, type=str, required=True, help="The config json file corresponding to the pre-trained T5 model.\nThis specifies the model architecture.", ) parser.add_argument( "--pytorch_dump_path", default=None, type=str, required=True, help="Path to the output PyTorch model." ) parser.add_argument( "--is_encoder_only", action="store_true", help="Check if the model is encoder-decoder model", default=False ) UpperCamelCase__ : int = parser.parse_args() convert_tax_checkpoint_to_pytorch( args.tax_checkpoint_path, args.config_file, args.pytorch_dump_path, args.is_encoder_only )
714
'''simple docstring''' def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE_ ) -> str: """simple docstring""" return "".join([hex(SCREAMING_SNAKE_CASE_ )[2:].zfill(2 ).upper() for byte in list(SCREAMING_SNAKE_CASE_ )] ) def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE_ ) -> bytes: """simple docstring""" # Check data validity, following RFC3548 # https://www.ietf.org/rfc/rfc3548.txt if (len(SCREAMING_SNAKE_CASE_ ) % 2) != 0: raise ValueError( """Base16 encoded data is invalid: Data does not have an even number of hex digits.""" ) # Check the character set - the standard base16 alphabet # is uppercase according to RFC3548 section 6 if not set(SCREAMING_SNAKE_CASE_ ) <= set("""0123456789ABCDEF""" ): raise ValueError( """Base16 encoded data is invalid: Data is not uppercase hex or it contains invalid characters.""" ) # For every two hexadecimal digits (= a byte), turn it into an integer. # Then, string the result together into bytes, and return it. return bytes(int(data[i] + data[i + 1] , 16 ) for i in range(0 , len(SCREAMING_SNAKE_CASE_ ) , 2 ) ) if __name__ == "__main__": import doctest doctest.testmod()
0
0
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available, is_vision_available, ) UpperCamelCase__ : Any = { "configuration_mobilevit": ["MOBILEVIT_PRETRAINED_CONFIG_ARCHIVE_MAP", "MobileViTConfig", "MobileViTOnnxConfig"], } try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase__ : Dict = ["MobileViTFeatureExtractor"] UpperCamelCase__ : int = ["MobileViTImageProcessor"] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase__ : Any = [ "MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST", "MobileViTForImageClassification", "MobileViTForSemanticSegmentation", "MobileViTModel", "MobileViTPreTrainedModel", ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase__ : Any = [ "TF_MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST", "TFMobileViTForImageClassification", "TFMobileViTForSemanticSegmentation", "TFMobileViTModel", "TFMobileViTPreTrainedModel", ] if TYPE_CHECKING: from .configuration_mobilevit import MOBILEVIT_PRETRAINED_CONFIG_ARCHIVE_MAP, MobileViTConfig, MobileViTOnnxConfig try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .feature_extraction_mobilevit import MobileViTFeatureExtractor from .image_processing_mobilevit import MobileViTImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_mobilevit import ( MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST, MobileViTForImageClassification, MobileViTForSemanticSegmentation, MobileViTModel, MobileViTPreTrainedModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_mobilevit import ( TF_MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST, TFMobileViTForImageClassification, TFMobileViTForSemanticSegmentation, TFMobileViTModel, TFMobileViTPreTrainedModel, ) else: import sys UpperCamelCase__ : Any = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
715
'''simple docstring''' import pytest import requests from datasets.utils.file_utils import http_head from .utils import OfflineSimulationMode, RequestWouldHangIndefinitelyError, offline @pytest.mark.integration def lowerCAmelCase_ ( ) -> List[Any]: """simple docstring""" with offline(OfflineSimulationMode.CONNECTION_TIMES_OUT ): with pytest.raises(SCREAMING_SNAKE_CASE_ ): requests.request("""GET""" , """https://huggingface.co""" ) with pytest.raises(requests.exceptions.ConnectTimeout ): requests.request("""GET""" , """https://huggingface.co""" , timeout=1.0 ) @pytest.mark.integration def lowerCAmelCase_ ( ) -> int: """simple docstring""" with offline(OfflineSimulationMode.CONNECTION_FAILS ): with pytest.raises(requests.exceptions.ConnectionError ): requests.request("""GET""" , """https://huggingface.co""" ) def lowerCAmelCase_ ( ) -> Optional[Any]: """simple docstring""" with offline(OfflineSimulationMode.HF_DATASETS_OFFLINE_SET_TO_1 ): with pytest.raises(SCREAMING_SNAKE_CASE_ ): http_head("""https://huggingface.co""" )
0
0
import os import unittest from transformers import MobileBertTokenizer, MobileBertTokenizerFast from transformers.models.bert.tokenization_bert import ( VOCAB_FILES_NAMES, BasicTokenizer, WordpieceTokenizer, _is_control, _is_punctuation, _is_whitespace, ) from transformers.testing_utils import require_tokenizers, slow from ...test_tokenization_common import TokenizerTesterMixin, filter_non_english @require_tokenizers class _a (_lowerCamelCase , unittest.TestCase): """simple docstring""" SCREAMING_SNAKE_CASE = MobileBertTokenizer SCREAMING_SNAKE_CASE = MobileBertTokenizerFast SCREAMING_SNAKE_CASE = True SCREAMING_SNAKE_CASE = True SCREAMING_SNAKE_CASE = filter_non_english SCREAMING_SNAKE_CASE = 'google/mobilebert-uncased' def UpperCamelCase ( self ) -> Any: super().setUp() _SCREAMING_SNAKE_CASE = [ """[UNK]""", """[CLS]""", """[SEP]""", """[PAD]""", """[MASK]""", """want""", """##want""", """##ed""", """wa""", """un""", """runn""", """##ing""", """,""", """low""", """lowest""", ] _SCREAMING_SNAKE_CASE = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] ) with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as vocab_writer: vocab_writer.write("""""".join([x + """\n""" for x in vocab_tokens] ) ) _SCREAMING_SNAKE_CASE = [ (tokenizer_def[0], self.pre_trained_model_path, tokenizer_def[2]) # else the 'google/' prefix is stripped for tokenizer_def in self.tokenizers_list ] def UpperCamelCase ( self , A__ ) -> List[str]: _SCREAMING_SNAKE_CASE = """UNwant\u00E9d,running""" _SCREAMING_SNAKE_CASE = """unwanted, running""" return input_text, output_text def UpperCamelCase ( self ) -> Any: _SCREAMING_SNAKE_CASE = self.tokenizer_class(self.vocab_file ) _SCREAMING_SNAKE_CASE = tokenizer.tokenize("""UNwant\u00E9d,running""" ) self.assertListEqual(A__ , ["""un""", """##want""", """##ed""", """,""", """runn""", """##ing"""] ) self.assertListEqual(tokenizer.convert_tokens_to_ids(A__ ) , [9, 6, 7, 12, 10, 11] ) def UpperCamelCase ( self ) -> Optional[int]: if not self.test_rust_tokenizer: return _SCREAMING_SNAKE_CASE = self.get_tokenizer() _SCREAMING_SNAKE_CASE = self.get_rust_tokenizer() _SCREAMING_SNAKE_CASE = """UNwant\u00E9d,running""" _SCREAMING_SNAKE_CASE = tokenizer.tokenize(A__ ) _SCREAMING_SNAKE_CASE = rust_tokenizer.tokenize(A__ ) self.assertListEqual(A__ , A__ ) _SCREAMING_SNAKE_CASE = tokenizer.encode(A__ , add_special_tokens=A__ ) _SCREAMING_SNAKE_CASE = rust_tokenizer.encode(A__ , add_special_tokens=A__ ) self.assertListEqual(A__ , A__ ) _SCREAMING_SNAKE_CASE = self.get_rust_tokenizer() _SCREAMING_SNAKE_CASE = tokenizer.encode(A__ ) _SCREAMING_SNAKE_CASE = rust_tokenizer.encode(A__ ) self.assertListEqual(A__ , A__ ) # With lower casing _SCREAMING_SNAKE_CASE = self.get_tokenizer(do_lower_case=A__ ) _SCREAMING_SNAKE_CASE = self.get_rust_tokenizer(do_lower_case=A__ ) _SCREAMING_SNAKE_CASE = """UNwant\u00E9d,running""" _SCREAMING_SNAKE_CASE = tokenizer.tokenize(A__ ) _SCREAMING_SNAKE_CASE = rust_tokenizer.tokenize(A__ ) self.assertListEqual(A__ , A__ ) _SCREAMING_SNAKE_CASE = tokenizer.encode(A__ , add_special_tokens=A__ ) _SCREAMING_SNAKE_CASE = rust_tokenizer.encode(A__ , add_special_tokens=A__ ) self.assertListEqual(A__ , A__ ) _SCREAMING_SNAKE_CASE = self.get_rust_tokenizer() _SCREAMING_SNAKE_CASE = tokenizer.encode(A__ ) _SCREAMING_SNAKE_CASE = rust_tokenizer.encode(A__ ) self.assertListEqual(A__ , A__ ) def UpperCamelCase ( self ) -> Tuple: _SCREAMING_SNAKE_CASE = BasicTokenizer() self.assertListEqual(tokenizer.tokenize("""ah\u535A\u63A8zz""" ) , ["""ah""", """\u535A""", """\u63A8""", """zz"""] ) def UpperCamelCase ( self ) -> List[Any]: _SCREAMING_SNAKE_CASE = BasicTokenizer(do_lower_case=A__ ) self.assertListEqual( tokenizer.tokenize(""" \tHeLLo!how \n Are yoU? """ ) , ["""hello""", """!""", """how""", """are""", """you""", """?"""] ) self.assertListEqual(tokenizer.tokenize("""H\u00E9llo""" ) , ["""hello"""] ) def UpperCamelCase ( self ) -> Optional[Any]: _SCREAMING_SNAKE_CASE = BasicTokenizer(do_lower_case=A__ , strip_accents=A__ ) self.assertListEqual( tokenizer.tokenize(""" \tHäLLo!how \n Are yoU? """ ) , ["""hällo""", """!""", """how""", """are""", """you""", """?"""] ) self.assertListEqual(tokenizer.tokenize("""H\u00E9llo""" ) , ["""h\u00E9llo"""] ) def UpperCamelCase ( self ) -> Any: _SCREAMING_SNAKE_CASE = BasicTokenizer(do_lower_case=A__ , strip_accents=A__ ) self.assertListEqual( tokenizer.tokenize(""" \tHäLLo!how \n Are yoU? """ ) , ["""hallo""", """!""", """how""", """are""", """you""", """?"""] ) self.assertListEqual(tokenizer.tokenize("""H\u00E9llo""" ) , ["""hello"""] ) def UpperCamelCase ( self ) -> Any: _SCREAMING_SNAKE_CASE = BasicTokenizer(do_lower_case=A__ ) self.assertListEqual( tokenizer.tokenize(""" \tHäLLo!how \n Are yoU? """ ) , ["""hallo""", """!""", """how""", """are""", """you""", """?"""] ) self.assertListEqual(tokenizer.tokenize("""H\u00E9llo""" ) , ["""hello"""] ) def UpperCamelCase ( self ) -> str: _SCREAMING_SNAKE_CASE = BasicTokenizer(do_lower_case=A__ ) self.assertListEqual( tokenizer.tokenize(""" \tHeLLo!how \n Are yoU? """ ) , ["""HeLLo""", """!""", """how""", """Are""", """yoU""", """?"""] ) def UpperCamelCase ( self ) -> Dict: _SCREAMING_SNAKE_CASE = BasicTokenizer(do_lower_case=A__ , strip_accents=A__ ) self.assertListEqual( tokenizer.tokenize(""" \tHäLLo!how \n Are yoU? """ ) , ["""HäLLo""", """!""", """how""", """Are""", """yoU""", """?"""] ) def UpperCamelCase ( self ) -> Union[str, Any]: _SCREAMING_SNAKE_CASE = BasicTokenizer(do_lower_case=A__ , strip_accents=A__ ) self.assertListEqual( tokenizer.tokenize(""" \tHäLLo!how \n Are yoU? """ ) , ["""HaLLo""", """!""", """how""", """Are""", """yoU""", """?"""] ) def UpperCamelCase ( self ) -> str: _SCREAMING_SNAKE_CASE = BasicTokenizer(do_lower_case=A__ , never_split=["""[UNK]"""] ) self.assertListEqual( tokenizer.tokenize(""" \tHeLLo!how \n Are yoU? [UNK]""" ) , ["""HeLLo""", """!""", """how""", """Are""", """yoU""", """?""", """[UNK]"""] ) def UpperCamelCase ( self ) -> Tuple: _SCREAMING_SNAKE_CASE = ["""[UNK]""", """[CLS]""", """[SEP]""", """want""", """##want""", """##ed""", """wa""", """un""", """runn""", """##ing"""] _SCREAMING_SNAKE_CASE = {} for i, token in enumerate(A__ ): _SCREAMING_SNAKE_CASE = i _SCREAMING_SNAKE_CASE = WordpieceTokenizer(vocab=A__ , unk_token="""[UNK]""" ) self.assertListEqual(tokenizer.tokenize("""""" ) , [] ) self.assertListEqual(tokenizer.tokenize("""unwanted running""" ) , ["""un""", """##want""", """##ed""", """runn""", """##ing"""] ) self.assertListEqual(tokenizer.tokenize("""unwantedX running""" ) , ["""[UNK]""", """runn""", """##ing"""] ) def UpperCamelCase ( self ) -> str: self.assertTrue(_is_whitespace(""" """ ) ) self.assertTrue(_is_whitespace("""\t""" ) ) self.assertTrue(_is_whitespace("""\r""" ) ) self.assertTrue(_is_whitespace("""\n""" ) ) self.assertTrue(_is_whitespace("""\u00A0""" ) ) self.assertFalse(_is_whitespace("""A""" ) ) self.assertFalse(_is_whitespace("""-""" ) ) def UpperCamelCase ( self ) -> Union[str, Any]: self.assertTrue(_is_control("""\u0005""" ) ) self.assertFalse(_is_control("""A""" ) ) self.assertFalse(_is_control(""" """ ) ) self.assertFalse(_is_control("""\t""" ) ) self.assertFalse(_is_control("""\r""" ) ) def UpperCamelCase ( self ) -> Dict: self.assertTrue(_is_punctuation("""-""" ) ) self.assertTrue(_is_punctuation("""$""" ) ) self.assertTrue(_is_punctuation("""`""" ) ) self.assertTrue(_is_punctuation(""".""" ) ) self.assertFalse(_is_punctuation("""A""" ) ) self.assertFalse(_is_punctuation(""" """ ) ) def UpperCamelCase ( self ) -> str: _SCREAMING_SNAKE_CASE = self.get_tokenizer() _SCREAMING_SNAKE_CASE = self.get_rust_tokenizer() # Example taken from the issue https://github.com/huggingface/tokenizers/issues/340 self.assertListEqual([tokenizer.tokenize(A__ ) for t in ["""Test""", """\xad""", """test"""]] , [["""[UNK]"""], [], ["""[UNK]"""]] ) self.assertListEqual( [rust_tokenizer.tokenize(A__ ) for t in ["""Test""", """\xad""", """test"""]] , [["""[UNK]"""], [], ["""[UNK]"""]] ) @slow def UpperCamelCase ( self ) -> Any: _SCREAMING_SNAKE_CASE = self.tokenizer_class.from_pretrained("""google/mobilebert-uncased""" ) _SCREAMING_SNAKE_CASE = tokenizer.encode("""sequence builders""" , add_special_tokens=A__ ) _SCREAMING_SNAKE_CASE = tokenizer.encode("""multi-sequence build""" , add_special_tokens=A__ ) _SCREAMING_SNAKE_CASE = tokenizer.build_inputs_with_special_tokens(A__ ) _SCREAMING_SNAKE_CASE = tokenizer.build_inputs_with_special_tokens(A__ , A__ ) assert encoded_sentence == [1_01] + text + [1_02] assert encoded_pair == [1_01] + text + [1_02] + text_a + [1_02] def UpperCamelCase ( self ) -> List[str]: for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(F"{tokenizer.__class__.__name__} ({pretrained_name})" ): _SCREAMING_SNAKE_CASE = self.rust_tokenizer_class.from_pretrained(A__ , **A__ ) _SCREAMING_SNAKE_CASE = F"A, naïve {tokenizer_r.mask_token} AllenNLP sentence." _SCREAMING_SNAKE_CASE = tokenizer_r.encode_plus( A__ , return_attention_mask=A__ , return_token_type_ids=A__ , return_offsets_mapping=A__ , add_special_tokens=A__ , ) _SCREAMING_SNAKE_CASE = tokenizer_r.do_lower_case if hasattr(A__ , """do_lower_case""" ) else False _SCREAMING_SNAKE_CASE = ( [ ((0, 0), tokenizer_r.cls_token), ((0, 1), """A"""), ((1, 2), ""","""), ((3, 5), """na"""), ((5, 6), """##ï"""), ((6, 8), """##ve"""), ((9, 15), tokenizer_r.mask_token), ((16, 21), """Allen"""), ((21, 23), """##NL"""), ((23, 24), """##P"""), ((25, 33), """sentence"""), ((33, 34), """."""), ((0, 0), tokenizer_r.sep_token), ] if not do_lower_case else [ ((0, 0), tokenizer_r.cls_token), ((0, 1), """a"""), ((1, 2), ""","""), ((3, 8), """naive"""), ((9, 15), tokenizer_r.mask_token), ((16, 21), """allen"""), ((21, 23), """##nl"""), ((23, 24), """##p"""), ((25, 33), """sentence"""), ((33, 34), """."""), ((0, 0), tokenizer_r.sep_token), ] ) self.assertEqual( [e[1] for e in expected_results] , tokenizer_r.convert_ids_to_tokens(tokens["""input_ids"""] ) ) self.assertEqual([e[0] for e in expected_results] , tokens["""offset_mapping"""] ) def UpperCamelCase ( self ) -> Any: _SCREAMING_SNAKE_CASE = ["""的""", """人""", """有"""] _SCREAMING_SNAKE_CASE = """""".join(A__ ) for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(F"{tokenizer.__class__.__name__} ({pretrained_name})" ): _SCREAMING_SNAKE_CASE = True _SCREAMING_SNAKE_CASE = self.tokenizer_class.from_pretrained(A__ , **A__ ) _SCREAMING_SNAKE_CASE = self.rust_tokenizer_class.from_pretrained(A__ , **A__ ) _SCREAMING_SNAKE_CASE = tokenizer_p.encode(A__ , add_special_tokens=A__ ) _SCREAMING_SNAKE_CASE = tokenizer_r.encode(A__ , add_special_tokens=A__ ) _SCREAMING_SNAKE_CASE = tokenizer_r.convert_ids_to_tokens(A__ ) _SCREAMING_SNAKE_CASE = tokenizer_p.convert_ids_to_tokens(A__ ) # it is expected that each Chinese character is not preceded by "##" self.assertListEqual(A__ , A__ ) self.assertListEqual(A__ , A__ ) _SCREAMING_SNAKE_CASE = False _SCREAMING_SNAKE_CASE = self.rust_tokenizer_class.from_pretrained(A__ , **A__ ) _SCREAMING_SNAKE_CASE = self.tokenizer_class.from_pretrained(A__ , **A__ ) _SCREAMING_SNAKE_CASE = tokenizer_r.encode(A__ , add_special_tokens=A__ ) _SCREAMING_SNAKE_CASE = tokenizer_p.encode(A__ , add_special_tokens=A__ ) _SCREAMING_SNAKE_CASE = tokenizer_r.convert_ids_to_tokens(A__ ) _SCREAMING_SNAKE_CASE = tokenizer_p.convert_ids_to_tokens(A__ ) # it is expected that only the first Chinese character is not preceded by "##". _SCREAMING_SNAKE_CASE = [ F"##{token}" if idx != 0 else token for idx, token in enumerate(A__ ) ] self.assertListEqual(A__ , A__ ) self.assertListEqual(A__ , A__ )
716
'''simple docstring''' import math from collections.abc import Iterator from itertools import takewhile def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE_ ) -> bool: """simple docstring""" if 1 < number < 4: # 2 and 3 are primes return True elif number < 2 or number % 2 == 0 or number % 3 == 0: # Negatives, 0, 1, all even numbers, all multiples of 3 are not primes return False # All primes number are in format of 6k +/- 1 for i in range(5 , int(math.sqrt(SCREAMING_SNAKE_CASE_ ) + 1 ) , 6 ): if number % i == 0 or number % (i + 2) == 0: return False return True def lowerCAmelCase_ ( ) -> Iterator[int]: """simple docstring""" _SCREAMING_SNAKE_CASE = 2 while True: if is_prime(SCREAMING_SNAKE_CASE_ ): yield num num += 1 def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE_ = 2_00_00_00 ) -> int: """simple docstring""" return sum(takewhile(lambda SCREAMING_SNAKE_CASE_ : x < n , prime_generator() ) ) if __name__ == "__main__": print(f"""{solution() = }""")
0
0
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available UpperCamelCase__ : int = { "configuration_poolformer": [ "POOLFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP", "PoolFormerConfig", "PoolFormerOnnxConfig", ] } try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase__ : Optional[int] = ["PoolFormerFeatureExtractor"] UpperCamelCase__ : Dict = ["PoolFormerImageProcessor"] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase__ : Tuple = [ "POOLFORMER_PRETRAINED_MODEL_ARCHIVE_LIST", "PoolFormerForImageClassification", "PoolFormerModel", "PoolFormerPreTrainedModel", ] if TYPE_CHECKING: from .configuration_poolformer import ( POOLFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, PoolFormerConfig, PoolFormerOnnxConfig, ) try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .feature_extraction_poolformer import PoolFormerFeatureExtractor from .image_processing_poolformer import PoolFormerImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_poolformer import ( POOLFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, PoolFormerForImageClassification, PoolFormerModel, PoolFormerPreTrainedModel, ) else: import sys UpperCamelCase__ : Any = _LazyModule(__name__, globals()["__file__"], _import_structure)
717
'''simple docstring''' import unittest import numpy as np from transformers.testing_utils import require_flax, require_tf, require_torch from transformers.utils import ( expand_dims, flatten_dict, is_flax_available, is_tf_available, is_torch_available, reshape, squeeze, transpose, ) if is_flax_available(): import jax.numpy as jnp if is_tf_available(): import tensorflow as tf if is_torch_available(): import torch class _a (unittest.TestCase): """simple docstring""" def UpperCamelCase ( self ) -> Optional[Any]: _SCREAMING_SNAKE_CASE = { """task_specific_params""": { """summarization""": {"""length_penalty""": 1.0, """max_length""": 1_28, """min_length""": 12, """num_beams""": 4}, """summarization_cnn""": {"""length_penalty""": 2.0, """max_length""": 1_42, """min_length""": 56, """num_beams""": 4}, """summarization_xsum""": {"""length_penalty""": 1.0, """max_length""": 62, """min_length""": 11, """num_beams""": 6}, } } _SCREAMING_SNAKE_CASE = { """task_specific_params.summarization.length_penalty""": 1.0, """task_specific_params.summarization.max_length""": 1_28, """task_specific_params.summarization.min_length""": 12, """task_specific_params.summarization.num_beams""": 4, """task_specific_params.summarization_cnn.length_penalty""": 2.0, """task_specific_params.summarization_cnn.max_length""": 1_42, """task_specific_params.summarization_cnn.min_length""": 56, """task_specific_params.summarization_cnn.num_beams""": 4, """task_specific_params.summarization_xsum.length_penalty""": 1.0, """task_specific_params.summarization_xsum.max_length""": 62, """task_specific_params.summarization_xsum.min_length""": 11, """task_specific_params.summarization_xsum.num_beams""": 6, } self.assertEqual(flatten_dict(A__ ) , A__ ) def UpperCamelCase ( self ) -> int: _SCREAMING_SNAKE_CASE = np.random.randn(3 , 4 ) self.assertTrue(np.allclose(transpose(A__ ) , x.transpose() ) ) _SCREAMING_SNAKE_CASE = np.random.randn(3 , 4 , 5 ) self.assertTrue(np.allclose(transpose(A__ , axes=(1, 2, 0) ) , x.transpose((1, 2, 0) ) ) ) @require_torch def UpperCamelCase ( self ) -> Optional[Any]: _SCREAMING_SNAKE_CASE = np.random.randn(3 , 4 ) _SCREAMING_SNAKE_CASE = torch.tensor(A__ ) self.assertTrue(np.allclose(transpose(A__ ) , transpose(A__ ).numpy() ) ) _SCREAMING_SNAKE_CASE = np.random.randn(3 , 4 , 5 ) _SCREAMING_SNAKE_CASE = torch.tensor(A__ ) self.assertTrue(np.allclose(transpose(A__ , axes=(1, 2, 0) ) , transpose(A__ , axes=(1, 2, 0) ).numpy() ) ) @require_tf def UpperCamelCase ( self ) -> Optional[int]: _SCREAMING_SNAKE_CASE = np.random.randn(3 , 4 ) _SCREAMING_SNAKE_CASE = tf.constant(A__ ) self.assertTrue(np.allclose(transpose(A__ ) , transpose(A__ ).numpy() ) ) _SCREAMING_SNAKE_CASE = np.random.randn(3 , 4 , 5 ) _SCREAMING_SNAKE_CASE = tf.constant(A__ ) self.assertTrue(np.allclose(transpose(A__ , axes=(1, 2, 0) ) , transpose(A__ , axes=(1, 2, 0) ).numpy() ) ) @require_flax def UpperCamelCase ( self ) -> List[str]: _SCREAMING_SNAKE_CASE = np.random.randn(3 , 4 ) _SCREAMING_SNAKE_CASE = jnp.array(A__ ) self.assertTrue(np.allclose(transpose(A__ ) , np.asarray(transpose(A__ ) ) ) ) _SCREAMING_SNAKE_CASE = np.random.randn(3 , 4 , 5 ) _SCREAMING_SNAKE_CASE = jnp.array(A__ ) self.assertTrue(np.allclose(transpose(A__ , axes=(1, 2, 0) ) , np.asarray(transpose(A__ , axes=(1, 2, 0) ) ) ) ) def UpperCamelCase ( self ) -> Optional[int]: _SCREAMING_SNAKE_CASE = np.random.randn(3 , 4 ) self.assertTrue(np.allclose(reshape(A__ , (4, 3) ) , np.reshape(A__ , (4, 3) ) ) ) _SCREAMING_SNAKE_CASE = np.random.randn(3 , 4 , 5 ) self.assertTrue(np.allclose(reshape(A__ , (12, 5) ) , np.reshape(A__ , (12, 5) ) ) ) @require_torch def UpperCamelCase ( self ) -> Optional[int]: _SCREAMING_SNAKE_CASE = np.random.randn(3 , 4 ) _SCREAMING_SNAKE_CASE = torch.tensor(A__ ) self.assertTrue(np.allclose(reshape(A__ , (4, 3) ) , reshape(A__ , (4, 3) ).numpy() ) ) _SCREAMING_SNAKE_CASE = np.random.randn(3 , 4 , 5 ) _SCREAMING_SNAKE_CASE = torch.tensor(A__ ) self.assertTrue(np.allclose(reshape(A__ , (12, 5) ) , reshape(A__ , (12, 5) ).numpy() ) ) @require_tf def UpperCamelCase ( self ) -> Tuple: _SCREAMING_SNAKE_CASE = np.random.randn(3 , 4 ) _SCREAMING_SNAKE_CASE = tf.constant(A__ ) self.assertTrue(np.allclose(reshape(A__ , (4, 3) ) , reshape(A__ , (4, 3) ).numpy() ) ) _SCREAMING_SNAKE_CASE = np.random.randn(3 , 4 , 5 ) _SCREAMING_SNAKE_CASE = tf.constant(A__ ) self.assertTrue(np.allclose(reshape(A__ , (12, 5) ) , reshape(A__ , (12, 5) ).numpy() ) ) @require_flax def UpperCamelCase ( self ) -> List[Any]: _SCREAMING_SNAKE_CASE = np.random.randn(3 , 4 ) _SCREAMING_SNAKE_CASE = jnp.array(A__ ) self.assertTrue(np.allclose(reshape(A__ , (4, 3) ) , np.asarray(reshape(A__ , (4, 3) ) ) ) ) _SCREAMING_SNAKE_CASE = np.random.randn(3 , 4 , 5 ) _SCREAMING_SNAKE_CASE = jnp.array(A__ ) self.assertTrue(np.allclose(reshape(A__ , (12, 5) ) , np.asarray(reshape(A__ , (12, 5) ) ) ) ) def UpperCamelCase ( self ) -> Any: _SCREAMING_SNAKE_CASE = np.random.randn(1 , 3 , 4 ) self.assertTrue(np.allclose(squeeze(A__ ) , np.squeeze(A__ ) ) ) _SCREAMING_SNAKE_CASE = np.random.randn(1 , 4 , 1 , 5 ) self.assertTrue(np.allclose(squeeze(A__ , axis=2 ) , np.squeeze(A__ , axis=2 ) ) ) @require_torch def UpperCamelCase ( self ) -> Any: _SCREAMING_SNAKE_CASE = np.random.randn(1 , 3 , 4 ) _SCREAMING_SNAKE_CASE = torch.tensor(A__ ) self.assertTrue(np.allclose(squeeze(A__ ) , squeeze(A__ ).numpy() ) ) _SCREAMING_SNAKE_CASE = np.random.randn(1 , 4 , 1 , 5 ) _SCREAMING_SNAKE_CASE = torch.tensor(A__ ) self.assertTrue(np.allclose(squeeze(A__ , axis=2 ) , squeeze(A__ , axis=2 ).numpy() ) ) @require_tf def UpperCamelCase ( self ) -> List[str]: _SCREAMING_SNAKE_CASE = np.random.randn(1 , 3 , 4 ) _SCREAMING_SNAKE_CASE = tf.constant(A__ ) self.assertTrue(np.allclose(squeeze(A__ ) , squeeze(A__ ).numpy() ) ) _SCREAMING_SNAKE_CASE = np.random.randn(1 , 4 , 1 , 5 ) _SCREAMING_SNAKE_CASE = tf.constant(A__ ) self.assertTrue(np.allclose(squeeze(A__ , axis=2 ) , squeeze(A__ , axis=2 ).numpy() ) ) @require_flax def UpperCamelCase ( self ) -> Optional[int]: _SCREAMING_SNAKE_CASE = np.random.randn(1 , 3 , 4 ) _SCREAMING_SNAKE_CASE = jnp.array(A__ ) self.assertTrue(np.allclose(squeeze(A__ ) , np.asarray(squeeze(A__ ) ) ) ) _SCREAMING_SNAKE_CASE = np.random.randn(1 , 4 , 1 , 5 ) _SCREAMING_SNAKE_CASE = jnp.array(A__ ) self.assertTrue(np.allclose(squeeze(A__ , axis=2 ) , np.asarray(squeeze(A__ , axis=2 ) ) ) ) def UpperCamelCase ( self ) -> Optional[Any]: _SCREAMING_SNAKE_CASE = np.random.randn(3 , 4 ) self.assertTrue(np.allclose(expand_dims(A__ , axis=1 ) , np.expand_dims(A__ , axis=1 ) ) ) @require_torch def UpperCamelCase ( self ) -> Optional[int]: _SCREAMING_SNAKE_CASE = np.random.randn(3 , 4 ) _SCREAMING_SNAKE_CASE = torch.tensor(A__ ) self.assertTrue(np.allclose(expand_dims(A__ , axis=1 ) , expand_dims(A__ , axis=1 ).numpy() ) ) @require_tf def UpperCamelCase ( self ) -> str: _SCREAMING_SNAKE_CASE = np.random.randn(3 , 4 ) _SCREAMING_SNAKE_CASE = tf.constant(A__ ) self.assertTrue(np.allclose(expand_dims(A__ , axis=1 ) , expand_dims(A__ , axis=1 ).numpy() ) ) @require_flax def UpperCamelCase ( self ) -> Any: _SCREAMING_SNAKE_CASE = np.random.randn(3 , 4 ) _SCREAMING_SNAKE_CASE = jnp.array(A__ ) self.assertTrue(np.allclose(expand_dims(A__ , axis=1 ) , np.asarray(expand_dims(A__ , axis=1 ) ) ) )
0
0
'''simple docstring''' import os import tempfile import unittest import numpy as np from diffusers.utils import is_flax_available from diffusers.utils.testing_utils import require_flax, slow if is_flax_available(): import jax import jax.numpy as jnp from flax.jax_utils import replicate from flax.training.common_utils import shard from diffusers import FlaxDDIMScheduler, FlaxDiffusionPipeline, FlaxStableDiffusionPipeline @require_flax class _a (unittest.TestCase): """simple docstring""" def UpperCamelCase ( self ) -> str: with tempfile.TemporaryDirectory() as tmpdirname: # pipeline has Flax weights _SCREAMING_SNAKE_CASE = FlaxDiffusionPipeline.from_pretrained( """hf-internal-testing/tiny-stable-diffusion-pipe""" , safety_checker=A__ , cache_dir=A__ ) _SCREAMING_SNAKE_CASE = [t[-1] for t in os.walk(os.path.join(A__ , os.listdir(A__ )[0] , """snapshots""" ) )] _SCREAMING_SNAKE_CASE = [item for sublist in all_root_files for item in sublist] # None of the downloaded files should be a PyTorch file even if we have some here: # https://huggingface.co/hf-internal-testing/tiny-stable-diffusion-pipe/blob/main/unet/diffusion_pytorch_model.bin assert not any(f.endswith(""".bin""" ) for f in files ) @slow @require_flax class _a (unittest.TestCase): """simple docstring""" def UpperCamelCase ( self ) -> Optional[Any]: _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = FlaxStableDiffusionPipeline.from_pretrained( """hf-internal-testing/tiny-stable-diffusion-pipe""" , safety_checker=A__ ) _SCREAMING_SNAKE_CASE = ( """A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of""" """ field, close up, split lighting, cinematic""" ) _SCREAMING_SNAKE_CASE = jax.random.PRNGKey(0 ) _SCREAMING_SNAKE_CASE = 4 _SCREAMING_SNAKE_CASE = jax.device_count() _SCREAMING_SNAKE_CASE = num_samples * [prompt] _SCREAMING_SNAKE_CASE = pipeline.prepare_inputs(A__ ) # shard inputs and rng _SCREAMING_SNAKE_CASE = replicate(A__ ) _SCREAMING_SNAKE_CASE = jax.random.split(A__ , A__ ) _SCREAMING_SNAKE_CASE = shard(A__ ) _SCREAMING_SNAKE_CASE = pipeline(A__ , A__ , A__ , A__ , jit=A__ ).images assert images.shape == (num_samples, 1, 64, 64, 3) if jax.device_count() == 8: assert np.abs(np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa ).sum() - 4.151_4745 ) < 1E-3 assert np.abs(np.abs(A__ , dtype=np.floataa ).sum() - 4_99_47.8_75 ) < 5E-1 _SCREAMING_SNAKE_CASE = pipeline.numpy_to_pil(np.asarray(images.reshape((num_samples,) + images.shape[-3:] ) ) ) assert len(A__ ) == num_samples def UpperCamelCase ( self ) -> Optional[int]: _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = FlaxStableDiffusionPipeline.from_pretrained( """CompVis/stable-diffusion-v1-4""" , revision="""flax""" , safety_checker=A__ ) _SCREAMING_SNAKE_CASE = ( """A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of""" """ field, close up, split lighting, cinematic""" ) _SCREAMING_SNAKE_CASE = jax.random.PRNGKey(0 ) _SCREAMING_SNAKE_CASE = 50 _SCREAMING_SNAKE_CASE = jax.device_count() _SCREAMING_SNAKE_CASE = num_samples * [prompt] _SCREAMING_SNAKE_CASE = pipeline.prepare_inputs(A__ ) # shard inputs and rng _SCREAMING_SNAKE_CASE = replicate(A__ ) _SCREAMING_SNAKE_CASE = jax.random.split(A__ , A__ ) _SCREAMING_SNAKE_CASE = shard(A__ ) _SCREAMING_SNAKE_CASE = pipeline(A__ , A__ , A__ , A__ , jit=A__ ).images assert images.shape == (num_samples, 1, 5_12, 5_12, 3) if jax.device_count() == 8: assert np.abs((np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa ).sum() - 0.0565_2401) ) < 1E-3 assert np.abs((np.abs(A__ , dtype=np.floataa ).sum() - 2_38_38_08.2) ) < 5E-1 def UpperCamelCase ( self ) -> Any: _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = FlaxStableDiffusionPipeline.from_pretrained( """CompVis/stable-diffusion-v1-4""" , revision="""bf16""" , dtype=jnp.bfloataa , safety_checker=A__ ) _SCREAMING_SNAKE_CASE = ( """A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of""" """ field, close up, split lighting, cinematic""" ) _SCREAMING_SNAKE_CASE = jax.random.PRNGKey(0 ) _SCREAMING_SNAKE_CASE = 50 _SCREAMING_SNAKE_CASE = jax.device_count() _SCREAMING_SNAKE_CASE = num_samples * [prompt] _SCREAMING_SNAKE_CASE = pipeline.prepare_inputs(A__ ) # shard inputs and rng _SCREAMING_SNAKE_CASE = replicate(A__ ) _SCREAMING_SNAKE_CASE = jax.random.split(A__ , A__ ) _SCREAMING_SNAKE_CASE = shard(A__ ) _SCREAMING_SNAKE_CASE = pipeline(A__ , A__ , A__ , A__ , jit=A__ ).images assert images.shape == (num_samples, 1, 5_12, 5_12, 3) if jax.device_count() == 8: assert np.abs((np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa ).sum() - 0.0400_3906) ) < 1E-3 assert np.abs((np.abs(A__ , dtype=np.floataa ).sum() - 2_37_35_16.75) ) < 5E-1 def UpperCamelCase ( self ) -> int: _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = FlaxStableDiffusionPipeline.from_pretrained( """CompVis/stable-diffusion-v1-4""" , revision="""bf16""" , dtype=jnp.bfloataa ) _SCREAMING_SNAKE_CASE = ( """A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of""" """ field, close up, split lighting, cinematic""" ) _SCREAMING_SNAKE_CASE = jax.random.PRNGKey(0 ) _SCREAMING_SNAKE_CASE = 50 _SCREAMING_SNAKE_CASE = jax.device_count() _SCREAMING_SNAKE_CASE = num_samples * [prompt] _SCREAMING_SNAKE_CASE = pipeline.prepare_inputs(A__ ) # shard inputs and rng _SCREAMING_SNAKE_CASE = replicate(A__ ) _SCREAMING_SNAKE_CASE = jax.random.split(A__ , A__ ) _SCREAMING_SNAKE_CASE = shard(A__ ) _SCREAMING_SNAKE_CASE = pipeline(A__ , A__ , A__ , A__ , jit=A__ ).images assert images.shape == (num_samples, 1, 5_12, 5_12, 3) if jax.device_count() == 8: assert np.abs((np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa ).sum() - 0.0400_3906) ) < 1E-3 assert np.abs((np.abs(A__ , dtype=np.floataa ).sum() - 2_37_35_16.75) ) < 5E-1 def UpperCamelCase ( self ) -> Tuple: _SCREAMING_SNAKE_CASE = FlaxDDIMScheduler( beta_start=0.0_0085 , beta_end=0.012 , beta_schedule="""scaled_linear""" , set_alpha_to_one=A__ , steps_offset=1 , ) _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = FlaxStableDiffusionPipeline.from_pretrained( """CompVis/stable-diffusion-v1-4""" , revision="""bf16""" , dtype=jnp.bfloataa , scheduler=A__ , safety_checker=A__ , ) _SCREAMING_SNAKE_CASE = scheduler.create_state() _SCREAMING_SNAKE_CASE = scheduler_state _SCREAMING_SNAKE_CASE = ( """A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of""" """ field, close up, split lighting, cinematic""" ) _SCREAMING_SNAKE_CASE = jax.random.PRNGKey(0 ) _SCREAMING_SNAKE_CASE = 50 _SCREAMING_SNAKE_CASE = jax.device_count() _SCREAMING_SNAKE_CASE = num_samples * [prompt] _SCREAMING_SNAKE_CASE = pipeline.prepare_inputs(A__ ) # shard inputs and rng _SCREAMING_SNAKE_CASE = replicate(A__ ) _SCREAMING_SNAKE_CASE = jax.random.split(A__ , A__ ) _SCREAMING_SNAKE_CASE = shard(A__ ) _SCREAMING_SNAKE_CASE = pipeline(A__ , A__ , A__ , A__ , jit=A__ ).images assert images.shape == (num_samples, 1, 5_12, 5_12, 3) if jax.device_count() == 8: assert np.abs((np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa ).sum() - 0.0_4504_3945) ) < 1E-3 assert np.abs((np.abs(A__ , dtype=np.floataa ).sum() - 2_34_76_93.5) ) < 5E-1 def UpperCamelCase ( self ) -> Any: _SCREAMING_SNAKE_CASE = ( """A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of""" """ field, close up, split lighting, cinematic""" ) _SCREAMING_SNAKE_CASE = jax.device_count() _SCREAMING_SNAKE_CASE = num_samples * [prompt] _SCREAMING_SNAKE_CASE = jax.random.split(jax.random.PRNGKey(0 ) , A__ ) _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = FlaxStableDiffusionPipeline.from_pretrained( """CompVis/stable-diffusion-v1-4""" , revision="""bf16""" , dtype=jnp.bfloataa , safety_checker=A__ , ) _SCREAMING_SNAKE_CASE = replicate(A__ ) _SCREAMING_SNAKE_CASE = pipeline.prepare_inputs(A__ ) _SCREAMING_SNAKE_CASE = shard(A__ ) _SCREAMING_SNAKE_CASE = pipeline(A__ , A__ , A__ , jit=A__ ).images assert images.shape == (num_samples, 1, 5_12, 5_12, 3) _SCREAMING_SNAKE_CASE = images[2, 0, 2_56, 10:17, 1] # With memory efficient attention _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = FlaxStableDiffusionPipeline.from_pretrained( """CompVis/stable-diffusion-v1-4""" , revision="""bf16""" , dtype=jnp.bfloataa , safety_checker=A__ , use_memory_efficient_attention=A__ , ) _SCREAMING_SNAKE_CASE = replicate(A__ ) _SCREAMING_SNAKE_CASE = pipeline.prepare_inputs(A__ ) _SCREAMING_SNAKE_CASE = shard(A__ ) _SCREAMING_SNAKE_CASE = pipeline(A__ , A__ , A__ , jit=A__ ).images assert images_eff.shape == (num_samples, 1, 5_12, 5_12, 3) _SCREAMING_SNAKE_CASE = images[2, 0, 2_56, 10:17, 1] # I checked the results visually and they are very similar. However, I saw that the max diff is `1` and the `sum` # over the 8 images is exactly `256`, which is very suspicious. Testing a random slice for now. assert abs(slice_eff - slice ).max() < 1E-2
718
'''simple docstring''' from pathlib import PurePosixPath from typing import Optional import fsspec from fsspec import AbstractFileSystem from huggingface_hub.hf_api import DatasetInfo from ..utils.file_utils import get_authentication_headers_for_url from ..utils.hub import hf_hub_url class _a (_lowerCamelCase): """simple docstring""" SCREAMING_SNAKE_CASE = '' SCREAMING_SNAKE_CASE = 'hf-legacy' # "hf://"" is reserved for hffs def __init__( self , A__ = None , A__ = None , **A__ , ) -> Optional[int]: super().__init__(self , **A__ ) _SCREAMING_SNAKE_CASE = repo_info _SCREAMING_SNAKE_CASE = token _SCREAMING_SNAKE_CASE = None def UpperCamelCase ( self ) -> Tuple: if self.dir_cache is None: _SCREAMING_SNAKE_CASE = {} for hf_file in self.repo_info.siblings: # TODO(QL): add sizes _SCREAMING_SNAKE_CASE = { """name""": hf_file.rfilename, """size""": None, """type""": """file""", } self.dir_cache.update( { str(A__ ): {"""name""": str(A__ ), """size""": None, """type""": """directory"""} for d in list(PurePosixPath(hf_file.rfilename ).parents )[:-1] } ) def UpperCamelCase ( self , A__ , A__ = "rb" , **A__ , ) -> Optional[int]: if not isinstance(self.repo_info , A__ ): raise NotImplementedError(F"Open is only implemented for dataset repositories, but got {self.repo_info}" ) _SCREAMING_SNAKE_CASE = hf_hub_url(self.repo_info.id , A__ , revision=self.repo_info.sha ) return fsspec.open( A__ , mode=A__ , headers=get_authentication_headers_for_url(A__ , use_auth_token=self.token ) , client_kwargs={"""trust_env""": True} , ).open() def UpperCamelCase ( self , A__ , **A__ ) -> str: self._get_dirs() _SCREAMING_SNAKE_CASE = self._strip_protocol(A__ ) if path in self.dir_cache: return self.dir_cache[path] else: raise FileNotFoundError(A__ ) def UpperCamelCase ( self , A__ , A__=False , **A__ ) -> List[Any]: self._get_dirs() _SCREAMING_SNAKE_CASE = PurePosixPath(path.strip("""/""" ) ) _SCREAMING_SNAKE_CASE = {} for p, f in self.dir_cache.items(): _SCREAMING_SNAKE_CASE = PurePosixPath(p.strip("""/""" ) ) _SCREAMING_SNAKE_CASE = p.parent if root == path: _SCREAMING_SNAKE_CASE = f _SCREAMING_SNAKE_CASE = list(paths.values() ) if detail: return out else: return sorted(f["""name"""] for f in out )
0
0
from scipy.stats import pearsonr import datasets UpperCamelCase__ : str = "\nPearson correlation coefficient and p-value for testing non-correlation.\nThe Pearson correlation coefficient measures the linear relationship between two datasets. The calculation of the p-value relies on the assumption that each dataset is normally distributed. Like other correlation coefficients, this one varies between -1 and +1 with 0 implying no correlation. Correlations of -1 or +1 imply an exact linear relationship. Positive correlations imply that as x increases, so does y. Negative correlations imply that as x increases, y decreases.\nThe p-value roughly indicates the probability of an uncorrelated system producing datasets that have a Pearson correlation at least as extreme as the one computed from these datasets.\n" UpperCamelCase__ : str = "\nArgs:\n predictions (`list` of `int`): Predicted class labels, as returned by a model.\n references (`list` of `int`): Ground truth labels.\n return_pvalue (`boolean`): If `True`, returns the p-value, along with the correlation coefficient. If `False`, returns only the correlation coefficient. Defaults to `False`.\n\nReturns:\n pearsonr (`float`): Pearson correlation coefficient. Minimum possible value is -1. Maximum possible value is 1. Values of 1 and -1 indicate exact linear positive and negative relationships, respectively. A value of 0 implies no correlation.\n p-value (`float`): P-value, which roughly indicates the probability of an The p-value roughly indicates the probability of an uncorrelated system producing datasets that have a Pearson correlation at least as extreme as the one computed from these datasets. Minimum possible value is 0. Maximum possible value is 1. Higher values indicate higher probabilities.\n\nExamples:\n\n Example 1-A simple example using only predictions and references.\n >>> pearsonr_metric = datasets.load_metric(\"pearsonr\")\n >>> results = pearsonr_metric.compute(predictions=[10, 9, 2.5, 6, 4], references=[1, 2, 3, 4, 5])\n >>> print(round(results['pearsonr'], 2))\n -0.74\n\n Example 2-The same as Example 1, but that also returns the `p-value`.\n >>> pearsonr_metric = datasets.load_metric(\"pearsonr\")\n >>> results = pearsonr_metric.compute(predictions=[10, 9, 2.5, 6, 4], references=[1, 2, 3, 4, 5], return_pvalue=True)\n >>> print(sorted(list(results.keys())))\n ['p-value', 'pearsonr']\n >>> print(round(results['pearsonr'], 2))\n -0.74\n >>> print(round(results['p-value'], 2))\n 0.15\n" UpperCamelCase__ : Dict = "\n@article{2020SciPy-NMeth,\nauthor = {Virtanen, Pauli and Gommers, Ralf and Oliphant, Travis E. and\n Haberland, Matt and Reddy, Tyler and Cournapeau, David and\n Burovski, Evgeni and Peterson, Pearu and Weckesser, Warren and\n Bright, Jonathan and {van der Walt}, St{\'e}fan J. and\n Brett, Matthew and Wilson, Joshua and Millman, K. Jarrod and\n Mayorov, Nikolay and Nelson, Andrew R. J. and Jones, Eric and\n Kern, Robert and Larson, Eric and Carey, C J and\n Polat, Ilhan and Feng, Yu and Moore, Eric W. and\n {VanderPlas}, Jake and Laxalde, Denis and Perktold, Josef and\n Cimrman, Robert and Henriksen, Ian and Quintero, E. A. and\n Harris, Charles R. and Archibald, Anne M. and\n Ribeiro, Antonio H. and Pedregosa, Fabian and\n {van Mulbregt}, Paul and {SciPy 1.0 Contributors}},\ntitle = {{{SciPy} 1.0: Fundamental Algorithms for Scientific\n Computing in Python}},\njournal = {Nature Methods},\nyear = {2020},\nvolume = {17},\npages = {261--272},\nadsurl = {https://rdcu.be/b08Wh},\ndoi = {10.1038/s41592-019-0686-2},\n}\n" @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION) class _a (datasets.Metric): """simple docstring""" def UpperCamelCase ( self ) -> Union[str, Any]: return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { """predictions""": datasets.Value("""float""" ), """references""": datasets.Value("""float""" ), } ) , reference_urls=["""https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.pearsonr.html"""] , ) def UpperCamelCase ( self , A__ , A__ , A__=False ) -> Tuple: if return_pvalue: _SCREAMING_SNAKE_CASE = pearsonr(A__ , A__ ) return {"pearsonr": results[0], "p-value": results[1]} else: return {"pearsonr": float(pearsonr(A__ , A__ )[0] )}
719
'''simple docstring''' import pyarrow.parquet as pq import pytest from datasets import Audio, Dataset, DatasetDict, Features, NamedSplit, Sequence, Value, config from datasets.features.image import Image from datasets.io.parquet import ParquetDatasetReader, ParquetDatasetWriter, get_writer_batch_size from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> Optional[int]: """simple docstring""" assert isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) assert dataset.num_rows == 4 assert dataset.num_columns == 3 assert dataset.column_names == ["col_1", "col_2", "col_3"] for feature, expected_dtype in expected_features.items(): assert dataset.features[feature].dtype == expected_dtype @pytest.mark.parametrize("""keep_in_memory""" , [False, True] ) def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> List[Any]: """simple docstring""" _SCREAMING_SNAKE_CASE = tmp_path / """cache""" _SCREAMING_SNAKE_CASE = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""} with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase(): _SCREAMING_SNAKE_CASE = ParquetDatasetReader(SCREAMING_SNAKE_CASE_ , cache_dir=SCREAMING_SNAKE_CASE_ , keep_in_memory=SCREAMING_SNAKE_CASE_ ).read() _check_parquet_dataset(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) @pytest.mark.parametrize( """features""" , [ None, {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""}, {"""col_1""": """string""", """col_2""": """string""", """col_3""": """string"""}, {"""col_1""": """int32""", """col_2""": """int32""", """col_3""": """int32"""}, {"""col_1""": """float32""", """col_2""": """float32""", """col_3""": """float32"""}, ] , ) def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> Optional[int]: """simple docstring""" _SCREAMING_SNAKE_CASE = tmp_path / """cache""" _SCREAMING_SNAKE_CASE = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""} _SCREAMING_SNAKE_CASE = features.copy() if features else default_expected_features _SCREAMING_SNAKE_CASE = ( Features({feature: Value(SCREAMING_SNAKE_CASE_ ) for feature, dtype in features.items()} ) if features is not None else None ) _SCREAMING_SNAKE_CASE = ParquetDatasetReader(SCREAMING_SNAKE_CASE_ , features=SCREAMING_SNAKE_CASE_ , cache_dir=SCREAMING_SNAKE_CASE_ ).read() _check_parquet_dataset(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) @pytest.mark.parametrize("""split""" , [None, NamedSplit("""train""" ), """train""", """test"""] ) def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> List[str]: """simple docstring""" _SCREAMING_SNAKE_CASE = tmp_path / """cache""" _SCREAMING_SNAKE_CASE = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""} _SCREAMING_SNAKE_CASE = ParquetDatasetReader(SCREAMING_SNAKE_CASE_ , cache_dir=SCREAMING_SNAKE_CASE_ , split=SCREAMING_SNAKE_CASE_ ).read() _check_parquet_dataset(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) assert dataset.split == split if split else "train" @pytest.mark.parametrize("""path_type""" , [str, list] ) def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> Optional[int]: """simple docstring""" if issubclass(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ): _SCREAMING_SNAKE_CASE = parquet_path elif issubclass(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ): _SCREAMING_SNAKE_CASE = [parquet_path] _SCREAMING_SNAKE_CASE = tmp_path / """cache""" _SCREAMING_SNAKE_CASE = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""} _SCREAMING_SNAKE_CASE = ParquetDatasetReader(SCREAMING_SNAKE_CASE_ , cache_dir=SCREAMING_SNAKE_CASE_ ).read() _check_parquet_dataset(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=("train",) ) -> List[str]: """simple docstring""" assert isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) for split in splits: _SCREAMING_SNAKE_CASE = dataset_dict[split] assert dataset.num_rows == 4 assert dataset.num_columns == 3 assert dataset.column_names == ["col_1", "col_2", "col_3"] for feature, expected_dtype in expected_features.items(): assert dataset.features[feature].dtype == expected_dtype @pytest.mark.parametrize("""keep_in_memory""" , [False, True] ) def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> List[Any]: """simple docstring""" _SCREAMING_SNAKE_CASE = tmp_path / """cache""" _SCREAMING_SNAKE_CASE = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""} with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase(): _SCREAMING_SNAKE_CASE = ParquetDatasetReader( {"""train""": parquet_path} , cache_dir=SCREAMING_SNAKE_CASE_ , keep_in_memory=SCREAMING_SNAKE_CASE_ ).read() _check_parquet_datasetdict(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) @pytest.mark.parametrize( """features""" , [ None, {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""}, {"""col_1""": """string""", """col_2""": """string""", """col_3""": """string"""}, {"""col_1""": """int32""", """col_2""": """int32""", """col_3""": """int32"""}, {"""col_1""": """float32""", """col_2""": """float32""", """col_3""": """float32"""}, ] , ) def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> Optional[Any]: """simple docstring""" _SCREAMING_SNAKE_CASE = tmp_path / """cache""" _SCREAMING_SNAKE_CASE = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""} _SCREAMING_SNAKE_CASE = features.copy() if features else default_expected_features _SCREAMING_SNAKE_CASE = ( Features({feature: Value(SCREAMING_SNAKE_CASE_ ) for feature, dtype in features.items()} ) if features is not None else None ) _SCREAMING_SNAKE_CASE = ParquetDatasetReader({"""train""": parquet_path} , features=SCREAMING_SNAKE_CASE_ , cache_dir=SCREAMING_SNAKE_CASE_ ).read() _check_parquet_datasetdict(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) @pytest.mark.parametrize("""split""" , [None, NamedSplit("""train""" ), """train""", """test"""] ) def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> Optional[Any]: """simple docstring""" if split: _SCREAMING_SNAKE_CASE = {split: parquet_path} else: _SCREAMING_SNAKE_CASE = """train""" _SCREAMING_SNAKE_CASE = {"""train""": parquet_path, """test""": parquet_path} _SCREAMING_SNAKE_CASE = tmp_path / """cache""" _SCREAMING_SNAKE_CASE = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""} _SCREAMING_SNAKE_CASE = ParquetDatasetReader(SCREAMING_SNAKE_CASE_ , cache_dir=SCREAMING_SNAKE_CASE_ ).read() _check_parquet_datasetdict(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , splits=list(path.keys() ) ) assert all(dataset[split].split == split for split in path.keys() ) def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> Dict: """simple docstring""" _SCREAMING_SNAKE_CASE = ParquetDatasetWriter(SCREAMING_SNAKE_CASE_ , tmp_path / """foo.parquet""" ) assert writer.write() > 0 _SCREAMING_SNAKE_CASE = pq.ParquetFile(tmp_path / """foo.parquet""" ) _SCREAMING_SNAKE_CASE = pf.read() assert dataset.data.table == output_table def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> Optional[Any]: """simple docstring""" _SCREAMING_SNAKE_CASE = str(shared_datadir / """test_image_rgb.jpg""" ) _SCREAMING_SNAKE_CASE = {"""image""": [image_path]} _SCREAMING_SNAKE_CASE = Features({"""image""": Image()} ) _SCREAMING_SNAKE_CASE = Dataset.from_dict(SCREAMING_SNAKE_CASE_ , features=SCREAMING_SNAKE_CASE_ ) _SCREAMING_SNAKE_CASE = ParquetDatasetWriter(SCREAMING_SNAKE_CASE_ , tmp_path / """foo.parquet""" ) assert writer.write() > 0 _SCREAMING_SNAKE_CASE = Dataset.from_parquet(str(tmp_path / """foo.parquet""" ) ) assert dataset.features == reloaded_dataset.features _SCREAMING_SNAKE_CASE = ParquetDatasetReader(str(tmp_path / """foo.parquet""" ) , streaming=SCREAMING_SNAKE_CASE_ ).read() assert dataset.features == reloaded_iterable_dataset.features @pytest.mark.parametrize( """feature, expected""" , [ (Features({"""foo""": Value("""int32""" )} ), None), (Features({"""image""": Image(), """foo""": Value("""int32""" )} ), config.PARQUET_ROW_GROUP_SIZE_FOR_IMAGE_DATASETS), (Features({"""nested""": Sequence(Audio() )} ), config.PARQUET_ROW_GROUP_SIZE_FOR_AUDIO_DATASETS), ] , ) def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> str: """simple docstring""" assert get_writer_batch_size(SCREAMING_SNAKE_CASE_ ) == expected
0
0
'''simple docstring''' from __future__ import annotations def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> Tuple: """simple docstring""" print(F"Vertex\tShortest Distance from vertex {src}" ) for i, d in enumerate(SCREAMING_SNAKE_CASE_ ): print(F"{i}\t\t{d}" ) def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> List[Any]: """simple docstring""" for j in range(SCREAMING_SNAKE_CASE_ ): _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = (graph[j][k] for k in ["""src""", """dst""", """weight"""]) if distance[u] != float("""inf""" ) and distance[u] + w < distance[v]: return True return False def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> list[float]: """simple docstring""" _SCREAMING_SNAKE_CASE = [float("""inf""" )] * vertex_count _SCREAMING_SNAKE_CASE = 0.0 for _ in range(vertex_count - 1 ): for j in range(SCREAMING_SNAKE_CASE_ ): _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = (graph[j][k] for k in ["""src""", """dst""", """weight"""]) if distance[u] != float("""inf""" ) and distance[u] + w < distance[v]: _SCREAMING_SNAKE_CASE = distance[u] + w _SCREAMING_SNAKE_CASE = check_negative_cycle(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) if negative_cycle_exists: raise Exception("""Negative cycle found""" ) return distance if __name__ == "__main__": import doctest doctest.testmod() UpperCamelCase__ : int = int(input("Enter number of vertices: ").strip()) UpperCamelCase__ : int = int(input("Enter number of edges: ").strip()) UpperCamelCase__ : list[dict[str, int]] = [{} for _ in range(E)] for i in range(E): print("Edge ", i + 1) UpperCamelCase__ : Dict = ( int(x) for x in input("Enter source, destination, weight: ").strip().split(" ") ) UpperCamelCase__ : Optional[Any] = {"src": src, "dst": dest, "weight": weight} UpperCamelCase__ : Optional[Any] = int(input("\nEnter shortest path source:").strip()) UpperCamelCase__ : Any = bellman_ford(graph, V, E, source) print_distance(shortest_distance, 0)
720
'''simple docstring''' def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE_ ) -> int: """simple docstring""" if not isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ): raise ValueError("""multiplicative_persistence() only accepts integral values""" ) if num < 0: raise ValueError("""multiplicative_persistence() does not accept negative values""" ) _SCREAMING_SNAKE_CASE = 0 _SCREAMING_SNAKE_CASE = str(SCREAMING_SNAKE_CASE_ ) while len(SCREAMING_SNAKE_CASE_ ) != 1: _SCREAMING_SNAKE_CASE = [int(SCREAMING_SNAKE_CASE_ ) for i in num_string] _SCREAMING_SNAKE_CASE = 1 for i in range(0 , len(SCREAMING_SNAKE_CASE_ ) ): total *= numbers[i] _SCREAMING_SNAKE_CASE = str(SCREAMING_SNAKE_CASE_ ) steps += 1 return steps def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE_ ) -> int: """simple docstring""" if not isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ): raise ValueError("""additive_persistence() only accepts integral values""" ) if num < 0: raise ValueError("""additive_persistence() does not accept negative values""" ) _SCREAMING_SNAKE_CASE = 0 _SCREAMING_SNAKE_CASE = str(SCREAMING_SNAKE_CASE_ ) while len(SCREAMING_SNAKE_CASE_ ) != 1: _SCREAMING_SNAKE_CASE = [int(SCREAMING_SNAKE_CASE_ ) for i in num_string] _SCREAMING_SNAKE_CASE = 0 for i in range(0 , len(SCREAMING_SNAKE_CASE_ ) ): total += numbers[i] _SCREAMING_SNAKE_CASE = str(SCREAMING_SNAKE_CASE_ ) steps += 1 return steps if __name__ == "__main__": import doctest doctest.testmod()
0
0
'''simple docstring''' import inspect import re from hashlib import shaaaa from typing import Dict, List from .arrow import arrow from .audiofolder import audiofolder from .csv import csv from .imagefolder import imagefolder from .json import json from .pandas import pandas from .parquet import parquet from .sql import sql # noqa F401 from .text import text def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE_ ) -> str: """simple docstring""" _SCREAMING_SNAKE_CASE = [] for line in lines: _SCREAMING_SNAKE_CASE = re.sub(r"""#.*""" , """""" , SCREAMING_SNAKE_CASE_ ) # remove comments if line: filtered_lines.append(SCREAMING_SNAKE_CASE_ ) _SCREAMING_SNAKE_CASE = """\n""".join(SCREAMING_SNAKE_CASE_ ) # Make a hash from all this code _SCREAMING_SNAKE_CASE = full_str.encode("""utf-8""" ) return shaaaa(SCREAMING_SNAKE_CASE_ ).hexdigest() # get importable module names and hash for caching UpperCamelCase__ : List[str] = { "csv": (csv.__name__, _hash_python_lines(inspect.getsource(csv).splitlines())), "json": (json.__name__, _hash_python_lines(inspect.getsource(json).splitlines())), "pandas": (pandas.__name__, _hash_python_lines(inspect.getsource(pandas).splitlines())), "parquet": (parquet.__name__, _hash_python_lines(inspect.getsource(parquet).splitlines())), "arrow": (arrow.__name__, _hash_python_lines(inspect.getsource(arrow).splitlines())), "text": (text.__name__, _hash_python_lines(inspect.getsource(text).splitlines())), "imagefolder": (imagefolder.__name__, _hash_python_lines(inspect.getsource(imagefolder).splitlines())), "audiofolder": (audiofolder.__name__, _hash_python_lines(inspect.getsource(audiofolder).splitlines())), } # Used to infer the module to use based on the data files extensions UpperCamelCase__ : str = { ".csv": ("csv", {}), ".tsv": ("csv", {"sep": "\t"}), ".json": ("json", {}), ".jsonl": ("json", {}), ".parquet": ("parquet", {}), ".arrow": ("arrow", {}), ".txt": ("text", {}), } _EXTENSION_TO_MODULE.update({ext: ("imagefolder", {}) for ext in imagefolder.ImageFolder.EXTENSIONS}) _EXTENSION_TO_MODULE.update({ext.upper(): ("imagefolder", {}) for ext in imagefolder.ImageFolder.EXTENSIONS}) _EXTENSION_TO_MODULE.update({ext: ("audiofolder", {}) for ext in audiofolder.AudioFolder.EXTENSIONS}) _EXTENSION_TO_MODULE.update({ext.upper(): ("audiofolder", {}) for ext in audiofolder.AudioFolder.EXTENSIONS}) UpperCamelCase__ : Union[str, Any] = {"imagefolder", "audiofolder"} # Used to filter data files based on extensions given a module name UpperCamelCase__ : Dict[str, List[str]] = {} for _ext, (_module, _) in _EXTENSION_TO_MODULE.items(): _MODULE_TO_EXTENSIONS.setdefault(_module, []).append(_ext) _MODULE_TO_EXTENSIONS["imagefolder"].append(".zip") _MODULE_TO_EXTENSIONS["audiofolder"].append(".zip")
721
'''simple docstring''' import math import os import re import sys import unittest from pathlib import Path from typing import Tuple from unittest.mock import patch from parameterized import parameterized from transformers.testing_utils import ( CaptureStderr, ExtendSysPath, TestCasePlus, execute_subprocess_async, get_gpu_count, get_torch_dist_unique_port, require_apex, require_bitsandbytes, require_fairscale, require_torch, require_torch_gpu, require_torch_multi_gpu, require_torch_non_multi_gpu, slow, ) from transformers.trainer_callback import TrainerState from transformers.trainer_utils import set_seed UpperCamelCase__ : Tuple = os.path.abspath(os.path.dirname(__file__)) with ExtendSysPath(f"""{bindir}/../../examples/pytorch/translation"""): from run_translation import main # noqa set_seed(42) UpperCamelCase__ : Union[str, Any] = "sshleifer/student_marian_en_ro_6_1" UpperCamelCase__ : str = "sshleifer/tiny-mbart" @require_torch class _a (_lowerCamelCase): """simple docstring""" def UpperCamelCase ( self , A__=False , A__=None , A__=True , A__=True , A__=True , A__=True , ) -> Optional[Any]: _SCREAMING_SNAKE_CASE = self.run_trainer( eval_steps=1 , max_len=12 , model_name=A__ , num_train_epochs=1 , distributed=A__ , extra_args_str=A__ , predict_with_generate=A__ , do_train=A__ , do_eval=A__ , do_predict=A__ , ) _SCREAMING_SNAKE_CASE = TrainerState.load_from_json(os.path.join(A__ , """trainer_state.json""" ) ).log_history if not do_eval: return _SCREAMING_SNAKE_CASE = [log for log in logs if """eval_loss""" in log.keys()] _SCREAMING_SNAKE_CASE = eval_metrics[0] if predict_with_generate: assert "eval_bleu" in first_step_stats _SCREAMING_SNAKE_CASE = eval_metrics[-1] assert isinstance(last_step_stats["""eval_bleu"""] , A__ ) assert not math.isnan(float(last_step_stats["""eval_loss"""] ) ), "eval_loss must not be `nan`" @require_torch_non_multi_gpu def UpperCamelCase ( self ) -> Optional[int]: self.run_seqaseq_quick() @require_torch_multi_gpu def UpperCamelCase ( self ) -> Optional[Any]: self.run_seqaseq_quick(distributed=A__ ) @require_torch_multi_gpu def UpperCamelCase ( self ) -> Union[str, Any]: self.run_seqaseq_quick(distributed=A__ ) @unittest.skip("""Requires an update of the env running those tests""" ) @require_torch_multi_gpu @require_fairscale def UpperCamelCase ( self ) -> Any: self.run_seqaseq_quick(distributed=A__ , extra_args_str="""--sharded_ddp simple""" ) @unittest.skip("""Requires an update of the env running those tests""" ) @require_torch_multi_gpu @require_fairscale def UpperCamelCase ( self ) -> Tuple: self.run_seqaseq_quick(distributed=A__ , extra_args_str="""--sharded_ddp simple --fp16""" ) @unittest.skip("""Requires an update of the env running those tests""" ) @require_torch_multi_gpu @require_fairscale def UpperCamelCase ( self ) -> str: self.run_seqaseq_quick(distributed=A__ , extra_args_str="""--sharded_ddp zero_dp_2""" , predict_with_generate=A__ ) @unittest.skip("""Requires an update of the env running those tests""" ) @require_torch_multi_gpu @require_fairscale def UpperCamelCase ( self ) -> List[str]: self.run_seqaseq_quick( distributed=A__ , extra_args_str="""--sharded_ddp zero_dp_2 --fp16""" , predict_with_generate=A__ ) @require_apex @require_torch_gpu def UpperCamelCase ( self ) -> Optional[Any]: # XXX: apex breaks the trainer if it's run twice e.g. run_seq2seq.main() from the same # program and it breaks other tests that run from the same pytest worker, therefore until this is # sorted out it must be run only in an external program, that is distributed=True in this # test and only under one or more gpus - if we want cpu will need to make a special test # # specifically to the problem traced it to self.optimizer.step() - if it's run 2nd time via # 2nd main() call it botches the future eval. # self.run_seqaseq_quick(distributed=A__ , extra_args_str="""--fp16 --fp16_backend=apex""" ) # test 2nd time - was getting eval_loss': nan' # to reproduce the problem set distributed=False self.run_seqaseq_quick(distributed=A__ , extra_args_str="""--fp16 --fp16_backend=apex""" ) @parameterized.expand(["""base""", """low""", """high""", """mixed"""] ) @require_torch_multi_gpu def UpperCamelCase ( self , A__ ) -> List[Any]: # as each sub-test is slow-ish split into multiple sub-tests to avoid CI timeout _SCREAMING_SNAKE_CASE = { # test with the default log_level - should be info and thus log info once """base""": {"""extra_args_str""": """""", """n_matches""": 1}, # test with low log_level and log_level_replica - should be noisy on all processes # now the info string should appear twice on 2 processes """low""": {"""extra_args_str""": """--log_level debug --log_level_replica debug""", """n_matches""": 2}, # test with high log_level and low log_level_replica # now the info string should appear once only on the replica """high""": {"""extra_args_str""": """--log_level error --log_level_replica debug""", """n_matches""": 1}, # test with high log_level and log_level_replica - should be quiet on all processes """mixed""": {"""extra_args_str""": """--log_level error --log_level_replica error""", """n_matches""": 0}, } _SCREAMING_SNAKE_CASE = experiments[experiment_id] _SCREAMING_SNAKE_CASE = {"""distributed""": True, """predict_with_generate""": False, """do_eval""": False, """do_predict""": False} _SCREAMING_SNAKE_CASE = """Running training""" with CaptureStderr() as cl: self.run_seqaseq_quick(**A__ , extra_args_str=data["""extra_args_str"""] ) _SCREAMING_SNAKE_CASE = len(re.findall(A__ , cl.err ) ) self.assertEqual(A__ , data["""n_matches"""] ) @slow def UpperCamelCase ( self ) -> Any: _SCREAMING_SNAKE_CASE = self.run_trainer( eval_steps=2 , max_len=1_28 , model_name=A__ , learning_rate=3E-4 , num_train_epochs=10 , distributed=A__ , ) # Check metrics _SCREAMING_SNAKE_CASE = TrainerState.load_from_json(os.path.join(A__ , """trainer_state.json""" ) ).log_history _SCREAMING_SNAKE_CASE = [log for log in logs if """eval_loss""" in log.keys()] _SCREAMING_SNAKE_CASE = eval_metrics[0] _SCREAMING_SNAKE_CASE = eval_metrics[-1] assert first_step_stats["eval_loss"] > last_step_stats["eval_loss"], "model learned nothing" assert isinstance(last_step_stats["""eval_bleu"""] , A__ ) # test if do_predict saves generations and metrics _SCREAMING_SNAKE_CASE = os.listdir(A__ ) _SCREAMING_SNAKE_CASE = {os.path.basename(A__ ) for p in contents} assert "generated_predictions.txt" in contents assert "predict_results.json" in contents @slow @require_bitsandbytes def UpperCamelCase ( self ) -> Dict: from transformers.training_args import OptimizerNames def train_and_return_metrics(A__ ) -> Tuple[int, float]: _SCREAMING_SNAKE_CASE = """--skip_memory_metrics 0""" _SCREAMING_SNAKE_CASE = self.run_trainer( max_len=1_28 , model_name=A__ , learning_rate=3E-4 , num_train_epochs=1 , optim=A__ , distributed=A__ , extra_args_str=A__ , do_eval=A__ , do_predict=A__ , n_gpus_to_use=1 , ) # Check metrics _SCREAMING_SNAKE_CASE = TrainerState.load_from_json(Path(A__ , """trainer_state.json""" ) ).log_history _SCREAMING_SNAKE_CASE = int(logs[0]["""train_mem_gpu_peaked_delta"""] / 2**20 ) _SCREAMING_SNAKE_CASE = int(logs[0]["""train_mem_gpu_alloc_delta"""] / 2**20 ) _SCREAMING_SNAKE_CASE = logs[0]["""train_loss"""] return gpu_peak_mem_mb, gpu_alloc_mem_mb, loss _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = train_and_return_metrics(OptimizerNames.ADAMW_TORCH.value ) _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = train_and_return_metrics(OptimizerNames.ADAMW_BNB.value ) _SCREAMING_SNAKE_CASE = gpu_alloc_mem_orig - gpu_alloc_mem_bnb _SCREAMING_SNAKE_CASE = gpu_peak_mem_orig + gpu_alloc_mem_orig _SCREAMING_SNAKE_CASE = gpu_peak_mem_bnb + gpu_alloc_mem_bnb _SCREAMING_SNAKE_CASE = gpu_total_mem_orig - gpu_total_mem_bnb # sshleifer/student_marian_en_ro_6_1 has 54M parameter, 29M of which is `nn.Embedding` which # doesn't get quantized and remains in fp32. Therefore we only have 25M parameters quantized # in 2 bytes and the diff in optim memory usage is derived as so: # # - normal 25*8=~200MB (8 bytes per param) # - bnb 25*2= ~50MB (2 bytes per param) # # Thus we should expect ~150MB total memory saved. # # Peak memory should be the same - the total should be different by about that same margin # # After leaving a small margin to accommodate for differences between gpus let's check # that we have at least 120MB in savings _SCREAMING_SNAKE_CASE = 1_20 # uncomment the following if this test starts failing - requires py38 for a new print feature # gpu_peak_mem_diff = gpu_peak_mem_orig - gpu_peak_mem_bnb # print(f"{gpu_alloc_mem_orig=}MB {gpu_peak_mem_orig=}MB {gpu_alloc_mem_orig+gpu_peak_mem_orig=}MB") # print(f" {gpu_alloc_mem_bnb=}MB {gpu_peak_mem_bnb=}MB {gpu_alloc_mem_bnb+gpu_peak_mem_bnb=}MB") # print(f"{gpu_alloc_mem_diff=}MB") # print(f"{gpu_peak_mem_diff=}MB") # print(f"{gpu_total_mem_orig=}MB, {gpu_total_mem_bnb=}MB") # print(f"{gpu_total_mem_diff=}MB, {gpu_total_mem_diff=}MB") self.assertGreater( A__ , A__ , """should use ~150MB less alloc gpu memory with BNB, compared to without it for this model but got""" F" a difference of {gpu_alloc_mem_diff}MB, with gpu_alloc_mem_orig={gpu_alloc_mem_orig}MB and" F" gpu_alloc_mem_bnb={gpu_alloc_mem_bnb}MB" , ) self.assertGreater( A__ , A__ , """should use ~150MB less total gpu memory with BNB, compared to without it for this model but got""" F" a difference of {gpu_total_mem_diff}MB, with gpu_total_mem_orig={gpu_total_mem_orig}MB and" F" gpu_total_mem_bnb={gpu_total_mem_bnb}MB" , ) self.assertEqual( A__ , A__ , F"loss should be the same, but got loss_orig={loss_orig}, loss_bnb={loss_bnb}" ) def UpperCamelCase ( self , A__ , A__ , A__ , A__ = 3E-3 , A__ = "adafactor" , A__ = False , A__ = None , A__ = 0 , A__ = True , A__ = True , A__ = True , A__ = True , A__ = None , ) -> Dict: _SCREAMING_SNAKE_CASE = self.test_file_dir / """../fixtures/tests_samples/wmt_en_ro""" _SCREAMING_SNAKE_CASE = self.get_auto_remove_tmp_dir() _SCREAMING_SNAKE_CASE = F"\n --model_name_or_path {model_name}\n --train_file {data_dir}/train.json\n --validation_file {data_dir}/val.json\n --test_file {data_dir}/test.json\n --output_dir {output_dir}\n --overwrite_output_dir\n --max_train_samples 8\n --max_source_length {max_len}\n --max_target_length {max_len}\n --do_train\n --num_train_epochs {str(A__ )}\n --per_device_train_batch_size 4\n --learning_rate {learning_rate}\n --warmup_steps 8\n --logging_steps 0\n --logging_strategy no\n --save_steps {str(A__ )}\n --group_by_length\n --label_smoothing_factor 0.1\n --target_lang ro_RO\n --source_lang en_XX\n ".split() _SCREAMING_SNAKE_CASE = F"\n --do_eval\n --per_device_eval_batch_size 4\n --max_eval_samples 8\n --val_max_target_length {max_len}\n --evaluation_strategy steps\n --eval_steps {str(A__ )}\n ".split() _SCREAMING_SNAKE_CASE = """ --do_predict """.split() _SCREAMING_SNAKE_CASE = [] if do_train: args += args_train if do_eval: args += args_eval if do_predict: args += args_predict if predict_with_generate: args += "--predict_with_generate".split() if do_train: if optim == "adafactor": args += "--adafactor".split() else: args += F"--optim {optim}".split() if extra_args_str is not None: args += extra_args_str.split() if distributed: if n_gpus_to_use is None: _SCREAMING_SNAKE_CASE = get_gpu_count() _SCREAMING_SNAKE_CASE = get_torch_dist_unique_port() _SCREAMING_SNAKE_CASE = F"\n -m torch.distributed.run\n --nproc_per_node={n_gpus_to_use}\n --master_port={master_port}\n {self.examples_dir_str}/pytorch/translation/run_translation.py\n ".split() _SCREAMING_SNAKE_CASE = [sys.executable] + distributed_args + args # keep for quick debug # print(" ".join([f"\nPYTHONPATH={self.src_dir_str}"] +cmd)); die execute_subprocess_async(A__ , env=self.get_env() ) else: _SCREAMING_SNAKE_CASE = ["""run_translation.py"""] + args with patch.object(A__ , """argv""" , A__ ): main() return output_dir
0
0
'''simple docstring''' import os from shutil import copyfile from typing import Any, Dict, List, Optional, Tuple import sentencepiece as spm from ...tokenization_utils import PreTrainedTokenizer from ...utils import logging UpperCamelCase__ : List[Any] = logging.get_logger(__name__) UpperCamelCase__ : Any = "▁" UpperCamelCase__ : Any = {"vocab_file": "spiece.model"} UpperCamelCase__ : int = { "vocab_file": { "google/reformer-crime-and-punishment": ( "https://huggingface.co/google/reformer-crime-and-punishment/resolve/main/spiece.model" ) } } UpperCamelCase__ : Optional[int] = { "google/reformer-crime-and-punishment": 524_288, } class _a (_lowerCamelCase): SCREAMING_SNAKE_CASE = VOCAB_FILES_NAMES SCREAMING_SNAKE_CASE = PRETRAINED_VOCAB_FILES_MAP SCREAMING_SNAKE_CASE = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES SCREAMING_SNAKE_CASE = ['input_ids', 'attention_mask'] def __init__( self , A__ , A__="</s>" , A__="<unk>" , A__=[] , A__ = None , **A__ , ) -> None: _SCREAMING_SNAKE_CASE = {} if sp_model_kwargs is None else sp_model_kwargs super().__init__( eos_token=A__ , unk_token=A__ , additional_special_tokens=A__ , sp_model_kwargs=self.sp_model_kwargs , **A__ , ) _SCREAMING_SNAKE_CASE = vocab_file _SCREAMING_SNAKE_CASE = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(A__ ) @property def UpperCamelCase ( self ) -> Any: return self.sp_model.get_piece_size() def UpperCamelCase ( self ) -> Dict[str, int]: _SCREAMING_SNAKE_CASE = {self.convert_ids_to_tokens(A__ ): i for i in range(self.vocab_size )} vocab.update(self.added_tokens_encoder ) return vocab def __getstate__( self ) -> int: _SCREAMING_SNAKE_CASE = self.__dict__.copy() _SCREAMING_SNAKE_CASE = None return state def __setstate__( self , A__ ) -> str: _SCREAMING_SNAKE_CASE = d # for backward compatibility if not hasattr(self , """sp_model_kwargs""" ): _SCREAMING_SNAKE_CASE = {} _SCREAMING_SNAKE_CASE = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(self.vocab_file ) def UpperCamelCase ( self , A__ ) -> List[str]: return self.sp_model.encode(A__ , out_type=A__ ) def UpperCamelCase ( self , A__ ) -> Union[str, Any]: return self.sp_model.piece_to_id(A__ ) def UpperCamelCase ( self , A__ ) -> List[Any]: if index < self.sp_model.get_piece_size(): _SCREAMING_SNAKE_CASE = self.sp_model.IdToPiece(A__ ) return token def UpperCamelCase ( self , A__ ) -> str: _SCREAMING_SNAKE_CASE = [] _SCREAMING_SNAKE_CASE = """""" for token in tokens: # make sure that special tokens are not decoded using sentencepiece model if token in self.all_special_tokens: out_string += self.sp_model.decode(A__ ) + token _SCREAMING_SNAKE_CASE = [] else: current_sub_tokens.append(A__ ) out_string += self.sp_model.decode(A__ ) return out_string.strip() def UpperCamelCase ( self , A__ , A__ = None ) -> Tuple[str]: if not os.path.isdir(A__ ): logger.error(F"Vocabulary path ({save_directory}) should be a directory" ) return _SCREAMING_SNAKE_CASE = os.path.join( A__ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(A__ ) and os.path.isfile(self.vocab_file ): copyfile(self.vocab_file , A__ ) elif not os.path.isfile(self.vocab_file ): with open(A__ , """wb""" ) as fi: _SCREAMING_SNAKE_CASE = self.sp_model.serialized_model_proto() fi.write(A__ ) return (out_vocab_file,)
700
'''simple docstring''' import sys UpperCamelCase__ : int = ( "73167176531330624919225119674426574742355349194934" "96983520312774506326239578318016984801869478851843" "85861560789112949495459501737958331952853208805511" "12540698747158523863050715693290963295227443043557" "66896648950445244523161731856403098711121722383113" "62229893423380308135336276614282806444486645238749" "30358907296290491560440772390713810515859307960866" "70172427121883998797908792274921901699720888093776" "65727333001053367881220235421809751254540594752243" "52584907711670556013604839586446706324415722155397" "53697817977846174064955149290862569321978468622482" "83972241375657056057490261407972968652414535100474" "82166370484403199890008895243450658541227588666881" "16427171479924442928230863465674813919123162824586" "17866458359124566529476545682848912883142607690042" "24219022671055626321111109370544217506941658960408" "07198403850962455444362981230987879927244284909188" "84580156166097919133875499200524063689912560717606" "05886116467109405077541002256983155200055935729725" "71636269561882670428252483600823257530420752963450" ) def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE_ = N ) -> int: """simple docstring""" _SCREAMING_SNAKE_CASE = -sys.maxsize - 1 for i in range(len(SCREAMING_SNAKE_CASE_ ) - 12 ): _SCREAMING_SNAKE_CASE = 1 for j in range(13 ): product *= int(n[i + j] ) if product > largest_product: _SCREAMING_SNAKE_CASE = product return largest_product if __name__ == "__main__": print(f"""{solution() = }""")
0
0
'''simple docstring''' import os import pytest from datasets import ( get_dataset_config_info, get_dataset_config_names, get_dataset_infos, get_dataset_split_names, inspect_dataset, inspect_metric, ) UpperCamelCase__ : Tuple = pytest.mark.integration @pytest.mark.parametrize("""path""" , ["""paws""", """csv"""] ) def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> str: """simple docstring""" inspect_dataset(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) _SCREAMING_SNAKE_CASE = path + """.py""" assert script_name in os.listdir(SCREAMING_SNAKE_CASE_ ) assert "__pycache__" not in os.listdir(SCREAMING_SNAKE_CASE_ ) @pytest.mark.filterwarnings("""ignore:inspect_metric is deprecated:FutureWarning""" ) @pytest.mark.filterwarnings("""ignore:metric_module_factory is deprecated:FutureWarning""" ) @pytest.mark.parametrize("""path""" , ["""accuracy"""] ) def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> List[Any]: """simple docstring""" inspect_metric(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) _SCREAMING_SNAKE_CASE = path + """.py""" assert script_name in os.listdir(SCREAMING_SNAKE_CASE_ ) assert "__pycache__" not in os.listdir(SCREAMING_SNAKE_CASE_ ) @pytest.mark.parametrize( """path, config_name, expected_splits""" , [ ("""squad""", """plain_text""", ["""train""", """validation"""]), ("""dalle-mini/wit""", """dalle-mini--wit""", ["""train"""]), ("""paws""", """labeled_final""", ["""train""", """test""", """validation"""]), ] , ) def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> str: """simple docstring""" _SCREAMING_SNAKE_CASE = get_dataset_config_info(SCREAMING_SNAKE_CASE_ , config_name=SCREAMING_SNAKE_CASE_ ) assert info.config_name == config_name assert list(info.splits.keys() ) == expected_splits @pytest.mark.parametrize( """path, config_name, expected_exception""" , [ ("""paws""", None, ValueError), ] , ) def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> Optional[Any]: """simple docstring""" with pytest.raises(SCREAMING_SNAKE_CASE_ ): get_dataset_config_info(SCREAMING_SNAKE_CASE_ , config_name=SCREAMING_SNAKE_CASE_ ) @pytest.mark.parametrize( """path, expected""" , [ ("""squad""", """plain_text"""), ("""acronym_identification""", """default"""), ("""lhoestq/squad""", """plain_text"""), ("""lhoestq/test""", """default"""), ("""lhoestq/demo1""", """lhoestq--demo1"""), ("""dalle-mini/wit""", """dalle-mini--wit"""), ] , ) def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> Optional[int]: """simple docstring""" _SCREAMING_SNAKE_CASE = get_dataset_config_names(SCREAMING_SNAKE_CASE_ ) assert expected in config_names @pytest.mark.parametrize( """path, expected_configs, expected_splits_in_first_config""" , [ ("""squad""", ["""plain_text"""], ["""train""", """validation"""]), ("""dalle-mini/wit""", ["""dalle-mini--wit"""], ["""train"""]), ("""paws""", ["""labeled_final""", """labeled_swap""", """unlabeled_final"""], ["""train""", """test""", """validation"""]), ] , ) def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> List[Any]: """simple docstring""" _SCREAMING_SNAKE_CASE = get_dataset_infos(SCREAMING_SNAKE_CASE_ ) assert list(infos.keys() ) == expected_configs _SCREAMING_SNAKE_CASE = expected_configs[0] assert expected_config in infos _SCREAMING_SNAKE_CASE = infos[expected_config] assert info.config_name == expected_config assert list(info.splits.keys() ) == expected_splits_in_first_config @pytest.mark.parametrize( """path, expected_config, expected_splits""" , [ ("""squad""", """plain_text""", ["""train""", """validation"""]), ("""dalle-mini/wit""", """dalle-mini--wit""", ["""train"""]), ("""paws""", """labeled_final""", ["""train""", """test""", """validation"""]), ] , ) def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> Any: """simple docstring""" _SCREAMING_SNAKE_CASE = get_dataset_infos(SCREAMING_SNAKE_CASE_ ) assert expected_config in infos _SCREAMING_SNAKE_CASE = infos[expected_config] assert info.config_name == expected_config assert list(info.splits.keys() ) == expected_splits @pytest.mark.parametrize( """path, config_name, expected_exception""" , [ ("""paws""", None, ValueError), ] , ) def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> List[str]: """simple docstring""" with pytest.raises(SCREAMING_SNAKE_CASE_ ): get_dataset_split_names(SCREAMING_SNAKE_CASE_ , config_name=SCREAMING_SNAKE_CASE_ )
701
'''simple docstring''' UpperCamelCase__ : Dict = { "a": "AAAAA", "b": "AAAAB", "c": "AAABA", "d": "AAABB", "e": "AABAA", "f": "AABAB", "g": "AABBA", "h": "AABBB", "i": "ABAAA", "j": "BBBAA", "k": "ABAAB", "l": "ABABA", "m": "ABABB", "n": "ABBAA", "o": "ABBAB", "p": "ABBBA", "q": "ABBBB", "r": "BAAAA", "s": "BAAAB", "t": "BAABA", "u": "BAABB", "v": "BBBAB", "w": "BABAA", "x": "BABAB", "y": "BABBA", "z": "BABBB", " ": " ", } UpperCamelCase__ : str = {value: key for key, value in encode_dict.items()} def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE_ ) -> str: """simple docstring""" _SCREAMING_SNAKE_CASE = """""" for letter in word.lower(): if letter.isalpha() or letter == " ": encoded += encode_dict[letter] else: raise Exception("""encode() accepts only letters of the alphabet and spaces""" ) return encoded def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE_ ) -> str: """simple docstring""" if set(SCREAMING_SNAKE_CASE_ ) - {"A", "B", " "} != set(): raise Exception("""decode() accepts only 'A', 'B' and spaces""" ) _SCREAMING_SNAKE_CASE = """""" for word in coded.split(): while len(SCREAMING_SNAKE_CASE_ ) != 0: decoded += decode_dict[word[:5]] _SCREAMING_SNAKE_CASE = word[5:] decoded += " " return decoded.strip() if __name__ == "__main__": from doctest import testmod testmod()
0
0
'''simple docstring''' from __future__ import annotations import unittest from transformers import FunnelConfig, is_tf_available from transformers.testing_utils import require_tf from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import ( TFFunnelBaseModel, TFFunnelForMaskedLM, TFFunnelForMultipleChoice, TFFunnelForPreTraining, TFFunnelForQuestionAnswering, TFFunnelForSequenceClassification, TFFunnelForTokenClassification, TFFunnelModel, ) class _a : """simple docstring""" def __init__( self , A__ , A__=13 , A__=7 , A__=True , A__=True , A__=True , A__=True , A__=99 , A__=[1, 1, 2] , A__=1 , A__=32 , A__=4 , A__=8 , A__=37 , A__="gelu_new" , A__=0.1 , A__=0.1 , A__=0.0 , A__=5_12 , A__=3 , A__=0.02 , A__=3 , A__=4 , A__=None , A__=False , ) -> Dict: _SCREAMING_SNAKE_CASE = parent _SCREAMING_SNAKE_CASE = batch_size _SCREAMING_SNAKE_CASE = seq_length _SCREAMING_SNAKE_CASE = is_training _SCREAMING_SNAKE_CASE = use_input_mask _SCREAMING_SNAKE_CASE = use_token_type_ids _SCREAMING_SNAKE_CASE = use_labels _SCREAMING_SNAKE_CASE = vocab_size _SCREAMING_SNAKE_CASE = block_sizes _SCREAMING_SNAKE_CASE = num_decoder_layers _SCREAMING_SNAKE_CASE = d_model _SCREAMING_SNAKE_CASE = n_head _SCREAMING_SNAKE_CASE = d_head _SCREAMING_SNAKE_CASE = d_inner _SCREAMING_SNAKE_CASE = hidden_act _SCREAMING_SNAKE_CASE = hidden_dropout _SCREAMING_SNAKE_CASE = attention_dropout _SCREAMING_SNAKE_CASE = activation_dropout _SCREAMING_SNAKE_CASE = max_position_embeddings _SCREAMING_SNAKE_CASE = type_vocab_size _SCREAMING_SNAKE_CASE = 2 _SCREAMING_SNAKE_CASE = num_labels _SCREAMING_SNAKE_CASE = num_choices _SCREAMING_SNAKE_CASE = scope _SCREAMING_SNAKE_CASE = initializer_std # Used in the tests to check the size of the first attention layer _SCREAMING_SNAKE_CASE = n_head # Used in the tests to check the size of the first hidden state _SCREAMING_SNAKE_CASE = self.d_model # Used in the tests to check the number of output hidden states/attentions _SCREAMING_SNAKE_CASE = sum(self.block_sizes ) + (0 if base else self.num_decoder_layers) # FunnelModel adds two hidden layers: input embeddings and the sum of the upsampled encoder hidden state with # the last hidden state of the first block (which is the first hidden state of the decoder). if not base: _SCREAMING_SNAKE_CASE = self.num_hidden_layers + 2 def UpperCamelCase ( self ) -> int: _SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) _SCREAMING_SNAKE_CASE = None if self.use_input_mask: _SCREAMING_SNAKE_CASE = random_attention_mask([self.batch_size, self.seq_length] ) _SCREAMING_SNAKE_CASE = None if self.use_token_type_ids: _SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) _SCREAMING_SNAKE_CASE = None _SCREAMING_SNAKE_CASE = None _SCREAMING_SNAKE_CASE = None if self.use_labels: _SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size] , self.type_sequence_label_size ) _SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) _SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size] , self.num_choices ) _SCREAMING_SNAKE_CASE = FunnelConfig( vocab_size=self.vocab_size , block_sizes=self.block_sizes , num_decoder_layers=self.num_decoder_layers , d_model=self.d_model , n_head=self.n_head , d_head=self.d_head , d_inner=self.d_inner , hidden_act=self.hidden_act , hidden_dropout=self.hidden_dropout , attention_dropout=self.attention_dropout , activation_dropout=self.activation_dropout , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_std=self.initializer_std , ) return ( config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels, ) def UpperCamelCase ( self , A__ , A__ , A__ , A__ , A__ , A__ , A__ , ) -> List[str]: _SCREAMING_SNAKE_CASE = TFFunnelModel(config=A__ ) _SCREAMING_SNAKE_CASE = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids} _SCREAMING_SNAKE_CASE = model(A__ ) _SCREAMING_SNAKE_CASE = [input_ids, input_mask] _SCREAMING_SNAKE_CASE = model(A__ ) _SCREAMING_SNAKE_CASE = model(A__ ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.d_model) ) _SCREAMING_SNAKE_CASE = False _SCREAMING_SNAKE_CASE = TFFunnelModel(config=A__ ) _SCREAMING_SNAKE_CASE = model(A__ ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.d_model) ) _SCREAMING_SNAKE_CASE = False _SCREAMING_SNAKE_CASE = TFFunnelModel(config=A__ ) _SCREAMING_SNAKE_CASE = model(A__ ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.d_model) ) def UpperCamelCase ( self , A__ , A__ , A__ , A__ , A__ , A__ , A__ , ) -> Optional[int]: _SCREAMING_SNAKE_CASE = TFFunnelBaseModel(config=A__ ) _SCREAMING_SNAKE_CASE = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids} _SCREAMING_SNAKE_CASE = model(A__ ) _SCREAMING_SNAKE_CASE = [input_ids, input_mask] _SCREAMING_SNAKE_CASE = model(A__ ) _SCREAMING_SNAKE_CASE = model(A__ ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, 2, self.d_model) ) _SCREAMING_SNAKE_CASE = False _SCREAMING_SNAKE_CASE = TFFunnelBaseModel(config=A__ ) _SCREAMING_SNAKE_CASE = model(A__ ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, 3, self.d_model) ) _SCREAMING_SNAKE_CASE = False _SCREAMING_SNAKE_CASE = TFFunnelBaseModel(config=A__ ) _SCREAMING_SNAKE_CASE = model(A__ ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, 2, self.d_model) ) def UpperCamelCase ( self , A__ , A__ , A__ , A__ , A__ , A__ , A__ , ) -> Optional[Any]: _SCREAMING_SNAKE_CASE = TFFunnelForPreTraining(config=A__ ) _SCREAMING_SNAKE_CASE = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids} _SCREAMING_SNAKE_CASE = model(A__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length) ) def UpperCamelCase ( self , A__ , A__ , A__ , A__ , A__ , A__ , A__ , ) -> Optional[Any]: _SCREAMING_SNAKE_CASE = TFFunnelForMaskedLM(config=A__ ) _SCREAMING_SNAKE_CASE = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids} _SCREAMING_SNAKE_CASE = model(A__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def UpperCamelCase ( self , A__ , A__ , A__ , A__ , A__ , A__ , A__ , ) -> Tuple: _SCREAMING_SNAKE_CASE = self.num_labels _SCREAMING_SNAKE_CASE = TFFunnelForSequenceClassification(config=A__ ) _SCREAMING_SNAKE_CASE = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids} _SCREAMING_SNAKE_CASE = model(A__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def UpperCamelCase ( self , A__ , A__ , A__ , A__ , A__ , A__ , A__ , ) -> Dict: _SCREAMING_SNAKE_CASE = self.num_choices _SCREAMING_SNAKE_CASE = TFFunnelForMultipleChoice(config=A__ ) _SCREAMING_SNAKE_CASE = tf.tile(tf.expand_dims(A__ , 1 ) , (1, self.num_choices, 1) ) _SCREAMING_SNAKE_CASE = tf.tile(tf.expand_dims(A__ , 1 ) , (1, self.num_choices, 1) ) _SCREAMING_SNAKE_CASE = tf.tile(tf.expand_dims(A__ , 1 ) , (1, self.num_choices, 1) ) _SCREAMING_SNAKE_CASE = { """input_ids""": multiple_choice_inputs_ids, """attention_mask""": multiple_choice_input_mask, """token_type_ids""": multiple_choice_token_type_ids, } _SCREAMING_SNAKE_CASE = model(A__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) ) def UpperCamelCase ( self , A__ , A__ , A__ , A__ , A__ , A__ , A__ , ) -> List[Any]: _SCREAMING_SNAKE_CASE = self.num_labels _SCREAMING_SNAKE_CASE = TFFunnelForTokenClassification(config=A__ ) _SCREAMING_SNAKE_CASE = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids} _SCREAMING_SNAKE_CASE = model(A__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def UpperCamelCase ( self , A__ , A__ , A__ , A__ , A__ , A__ , A__ , ) -> Dict: _SCREAMING_SNAKE_CASE = TFFunnelForQuestionAnswering(config=A__ ) _SCREAMING_SNAKE_CASE = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids} _SCREAMING_SNAKE_CASE = model(A__ ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def UpperCamelCase ( self ) -> Optional[Any]: _SCREAMING_SNAKE_CASE = self.prepare_config_and_inputs() ( ( _SCREAMING_SNAKE_CASE ) , ( _SCREAMING_SNAKE_CASE ) , ( _SCREAMING_SNAKE_CASE ) , ( _SCREAMING_SNAKE_CASE ) , ( _SCREAMING_SNAKE_CASE ) , ( _SCREAMING_SNAKE_CASE ) , ( _SCREAMING_SNAKE_CASE ) , ) = config_and_inputs _SCREAMING_SNAKE_CASE = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": input_mask} return config, inputs_dict @require_tf class _a (_lowerCamelCase , _lowerCamelCase , unittest.TestCase): """simple docstring""" SCREAMING_SNAKE_CASE = ( ( TFFunnelModel, TFFunnelForMaskedLM, TFFunnelForPreTraining, TFFunnelForQuestionAnswering, TFFunnelForTokenClassification, ) if is_tf_available() else () ) SCREAMING_SNAKE_CASE = ( { 'feature-extraction': (TFFunnelBaseModel, TFFunnelModel), 'fill-mask': TFFunnelForMaskedLM, 'question-answering': TFFunnelForQuestionAnswering, 'text-classification': TFFunnelForSequenceClassification, 'token-classification': TFFunnelForTokenClassification, 'zero-shot': TFFunnelForSequenceClassification, } if is_tf_available() else {} ) SCREAMING_SNAKE_CASE = False SCREAMING_SNAKE_CASE = False def UpperCamelCase ( self ) -> int: _SCREAMING_SNAKE_CASE = TFFunnelModelTester(self ) _SCREAMING_SNAKE_CASE = ConfigTester(self , config_class=A__ ) def UpperCamelCase ( self ) -> List[Any]: self.config_tester.run_common_tests() def UpperCamelCase ( self ) -> Optional[int]: _SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*A__ ) def UpperCamelCase ( self ) -> int: _SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_pretraining(*A__ ) def UpperCamelCase ( self ) -> Union[str, Any]: _SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_lm(*A__ ) def UpperCamelCase ( self ) -> str: _SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_token_classification(*A__ ) def UpperCamelCase ( self ) -> Optional[int]: _SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_question_answering(*A__ ) @require_tf class _a (_lowerCamelCase , unittest.TestCase): """simple docstring""" SCREAMING_SNAKE_CASE = ( (TFFunnelBaseModel, TFFunnelForMultipleChoice, TFFunnelForSequenceClassification) if is_tf_available() else () ) SCREAMING_SNAKE_CASE = False SCREAMING_SNAKE_CASE = False def UpperCamelCase ( self ) -> Optional[int]: _SCREAMING_SNAKE_CASE = TFFunnelModelTester(self , base=A__ ) _SCREAMING_SNAKE_CASE = ConfigTester(self , config_class=A__ ) def UpperCamelCase ( self ) -> str: self.config_tester.run_common_tests() def UpperCamelCase ( self ) -> List[Any]: _SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_base_model(*A__ ) def UpperCamelCase ( self ) -> Union[str, Any]: _SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_sequence_classification(*A__ ) def UpperCamelCase ( self ) -> Tuple: _SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_multiple_choice(*A__ )
702
'''simple docstring''' import argparse import torch from torch import nn from transformers import MaMaaaConfig, MaMaaaForConditionalGeneration def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE_ ) -> Any: """simple docstring""" _SCREAMING_SNAKE_CASE = [ """encoder.version""", """decoder.version""", """model.encoder.version""", """model.decoder.version""", """decoder.output_projection.weight""", """_float_tensor""", """encoder.embed_positions._float_tensor""", """decoder.embed_positions._float_tensor""", ] for k in ignore_keys: state_dict.pop(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE_ ) -> str: """simple docstring""" _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = emb.weight.shape _SCREAMING_SNAKE_CASE = nn.Linear(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , bias=SCREAMING_SNAKE_CASE_ ) _SCREAMING_SNAKE_CASE = emb.weight.data return lin_layer def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE_ ) -> List[Any]: """simple docstring""" _SCREAMING_SNAKE_CASE = torch.load(SCREAMING_SNAKE_CASE_ , map_location="""cpu""" ) _SCREAMING_SNAKE_CASE = mam_aaa["""args"""] or mam_aaa["""cfg"""]["""model"""] _SCREAMING_SNAKE_CASE = mam_aaa["""model"""] remove_ignore_keys_(SCREAMING_SNAKE_CASE_ ) _SCREAMING_SNAKE_CASE = state_dict["""encoder.embed_tokens.weight"""].shape[0] _SCREAMING_SNAKE_CASE = MaMaaaConfig( vocab_size=SCREAMING_SNAKE_CASE_ , max_position_embeddings=10_24 , encoder_layers=args.encoder_layers , decoder_layers=args.decoder_layers , encoder_attention_heads=args.encoder_attention_heads , decoder_attention_heads=args.decoder_attention_heads , encoder_ffn_dim=args.encoder_ffn_embed_dim , decoder_ffn_dim=args.decoder_ffn_embed_dim , d_model=args.encoder_embed_dim , encoder_layerdrop=args.encoder_layerdrop , decoder_layerdrop=args.decoder_layerdrop , dropout=args.dropout , attention_dropout=args.attention_dropout , activation_dropout=args.activation_dropout , activation_function="""relu""" , ) _SCREAMING_SNAKE_CASE = state_dict["""decoder.embed_tokens.weight"""] _SCREAMING_SNAKE_CASE = MaMaaaForConditionalGeneration(SCREAMING_SNAKE_CASE_ ) model.model.load_state_dict(SCREAMING_SNAKE_CASE_ , strict=SCREAMING_SNAKE_CASE_ ) _SCREAMING_SNAKE_CASE = make_linear_from_emb(model.model.shared ) return model if __name__ == "__main__": UpperCamelCase__ : Dict = argparse.ArgumentParser() # Required parameters parser.add_argument("fairseq_path", type=str, help="path to a model.pt on local filesystem.") parser.add_argument("pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.") UpperCamelCase__ : List[str] = parser.parse_args() UpperCamelCase__ : Any = convert_fairseq_mamaaa_checkpoint_from_disk(args.fairseq_pathß) model.save_pretrained(args.pytorch_dump_folder_path)
0
0
'''simple docstring''' def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> float: """simple docstring""" _SCREAMING_SNAKE_CASE = (num_of_terms / 2) * (2 * first_term + (num_of_terms - 1) * common_diff) # formula for sum of series return total def lowerCAmelCase_ ( ) -> Optional[int]: """simple docstring""" print(sum_of_series(1 , 1 , 10 ) ) if __name__ == "__main__": import doctest doctest.testmod()
703
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available UpperCamelCase__ : str = { "configuration_canine": ["CANINE_PRETRAINED_CONFIG_ARCHIVE_MAP", "CanineConfig"], "tokenization_canine": ["CanineTokenizer"], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase__ : List[Any] = [ "CANINE_PRETRAINED_MODEL_ARCHIVE_LIST", "CanineForMultipleChoice", "CanineForQuestionAnswering", "CanineForSequenceClassification", "CanineForTokenClassification", "CanineLayer", "CanineModel", "CaninePreTrainedModel", "load_tf_weights_in_canine", ] if TYPE_CHECKING: from .configuration_canine import CANINE_PRETRAINED_CONFIG_ARCHIVE_MAP, CanineConfig from .tokenization_canine import CanineTokenizer try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_canine import ( CANINE_PRETRAINED_MODEL_ARCHIVE_LIST, CanineForMultipleChoice, CanineForQuestionAnswering, CanineForSequenceClassification, CanineForTokenClassification, CanineLayer, CanineModel, CaninePreTrainedModel, load_tf_weights_in_canine, ) else: import sys UpperCamelCase__ : str = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
0
0
import math def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = 0 , SCREAMING_SNAKE_CASE_ = 0 ) -> list: """simple docstring""" _SCREAMING_SNAKE_CASE = end or len(SCREAMING_SNAKE_CASE_ ) for i in range(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ): _SCREAMING_SNAKE_CASE = i _SCREAMING_SNAKE_CASE = array[i] while temp_index != start and temp_index_value < array[temp_index - 1]: _SCREAMING_SNAKE_CASE = array[temp_index - 1] temp_index -= 1 _SCREAMING_SNAKE_CASE = temp_index_value return array def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> None: # Max Heap """simple docstring""" _SCREAMING_SNAKE_CASE = index _SCREAMING_SNAKE_CASE = 2 * index + 1 # Left Node _SCREAMING_SNAKE_CASE = 2 * index + 2 # Right Node if left_index < heap_size and array[largest] < array[left_index]: _SCREAMING_SNAKE_CASE = left_index if right_index < heap_size and array[largest] < array[right_index]: _SCREAMING_SNAKE_CASE = right_index if largest != index: _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = array[largest], array[index] heapify(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE_ ) -> list: """simple docstring""" _SCREAMING_SNAKE_CASE = len(SCREAMING_SNAKE_CASE_ ) for i in range(n // 2 , -1 , -1 ): heapify(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) for i in range(n - 1 , 0 , -1 ): _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = array[0], array[i] heapify(SCREAMING_SNAKE_CASE_ , 0 , SCREAMING_SNAKE_CASE_ ) return array def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> int: """simple docstring""" if (array[first_index] > array[middle_index]) != ( array[first_index] > array[last_index] ): return array[first_index] elif (array[middle_index] > array[first_index]) != ( array[middle_index] > array[last_index] ): return array[middle_index] else: return array[last_index] def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> int: """simple docstring""" _SCREAMING_SNAKE_CASE = low _SCREAMING_SNAKE_CASE = high while True: while array[i] < pivot: i += 1 j -= 1 while pivot < array[j]: j -= 1 if i >= j: return i _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = array[j], array[i] i += 1 def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE_ ) -> list: """simple docstring""" if len(SCREAMING_SNAKE_CASE_ ) == 0: return array _SCREAMING_SNAKE_CASE = 2 * math.ceil(math.loga(len(SCREAMING_SNAKE_CASE_ ) ) ) _SCREAMING_SNAKE_CASE = 16 return intro_sort(SCREAMING_SNAKE_CASE_ , 0 , len(SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> list: """simple docstring""" while end - start > size_threshold: if max_depth == 0: return heap_sort(SCREAMING_SNAKE_CASE_ ) max_depth -= 1 _SCREAMING_SNAKE_CASE = median_of_a(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , start + ((end - start) // 2) + 1 , end - 1 ) _SCREAMING_SNAKE_CASE = partition(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) intro_sort(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) _SCREAMING_SNAKE_CASE = p return insertion_sort(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) if __name__ == "__main__": import doctest doctest.testmod() UpperCamelCase__ : Optional[Any] = input("Enter numbers separated by a comma : ").strip() UpperCamelCase__ : Optional[int] = [float(item) for item in user_input.split(",")] print(sort(unsorted))
704
'''simple docstring''' import warnings from ...processing_utils import ProcessorMixin from ...tokenization_utils_base import BatchEncoding class _a (_lowerCamelCase): """simple docstring""" SCREAMING_SNAKE_CASE = ['image_processor', 'tokenizer'] SCREAMING_SNAKE_CASE = 'ChineseCLIPImageProcessor' SCREAMING_SNAKE_CASE = ('BertTokenizer', 'BertTokenizerFast') def __init__( self , A__=None , A__=None , **A__ ) -> int: _SCREAMING_SNAKE_CASE = None if "feature_extractor" in kwargs: warnings.warn( """The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`""" """ instead.""" , A__ , ) _SCREAMING_SNAKE_CASE = kwargs.pop("""feature_extractor""" ) _SCREAMING_SNAKE_CASE = image_processor if image_processor is not None else feature_extractor if image_processor is None: raise ValueError("""You need to specify an `image_processor`.""" ) if tokenizer is None: raise ValueError("""You need to specify a `tokenizer`.""" ) super().__init__(A__ , A__ ) _SCREAMING_SNAKE_CASE = self.image_processor def __call__( self , A__=None , A__=None , A__=None , **A__ ) -> Optional[int]: if text is None and images is None: raise ValueError("""You have to specify either text or images. Both cannot be none.""" ) if text is not None: _SCREAMING_SNAKE_CASE = self.tokenizer(A__ , return_tensors=A__ , **A__ ) if images is not None: _SCREAMING_SNAKE_CASE = self.image_processor(A__ , return_tensors=A__ , **A__ ) if text is not None and images is not None: _SCREAMING_SNAKE_CASE = image_features.pixel_values return encoding elif text is not None: return encoding else: return BatchEncoding(data=dict(**A__ ) , tensor_type=A__ ) def UpperCamelCase ( self , *A__ , **A__ ) -> Dict: return self.tokenizer.batch_decode(*A__ , **A__ ) def UpperCamelCase ( self , *A__ , **A__ ) -> Optional[Any]: return self.tokenizer.decode(*A__ , **A__ ) @property def UpperCamelCase ( self ) -> List[Any]: _SCREAMING_SNAKE_CASE = self.tokenizer.model_input_names _SCREAMING_SNAKE_CASE = self.image_processor.model_input_names return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) ) @property def UpperCamelCase ( self ) -> Optional[int]: warnings.warn( """`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.""" , A__ , ) return self.image_processor_class
0
0
import functools import operator from ...configuration_utils import PretrainedConfig from ...utils import logging UpperCamelCase__ : Dict = logging.get_logger(__name__) UpperCamelCase__ : List[str] = { "asapp/sew-tiny-100k": "https://huggingface.co/asapp/sew-tiny-100k/resolve/main/config.json", # See all SEW models at https://huggingface.co/models?filter=sew } class _a (_lowerCamelCase): """simple docstring""" SCREAMING_SNAKE_CASE = 'sew' def __init__( self , A__=32 , A__=7_68 , A__=12 , A__=12 , A__=30_72 , A__=2 , A__="gelu" , A__=0.1 , A__=0.1 , A__=0.1 , A__=0.0 , A__=0.1 , A__=0.1 , A__=0.02 , A__=1E-5 , A__="group" , A__="gelu" , A__=(64, 1_28, 1_28, 1_28, 1_28, 2_56, 2_56, 2_56, 2_56, 5_12, 5_12, 5_12, 5_12) , A__=(5, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1) , A__=(10, 3, 1, 3, 1, 3, 1, 3, 1, 2, 1, 2, 1) , A__=False , A__=1_28 , A__=16 , A__=True , A__=0.05 , A__=10 , A__=2 , A__=0.0 , A__=10 , A__=0 , A__="mean" , A__=False , A__=False , A__=2_56 , A__=0 , A__=1 , A__=2 , **A__ , ) -> List[Any]: super().__init__(**A__ , pad_token_id=A__ , bos_token_id=A__ , eos_token_id=A__ ) _SCREAMING_SNAKE_CASE = hidden_size _SCREAMING_SNAKE_CASE = feat_extract_norm _SCREAMING_SNAKE_CASE = feat_extract_activation _SCREAMING_SNAKE_CASE = list(A__ ) _SCREAMING_SNAKE_CASE = list(A__ ) _SCREAMING_SNAKE_CASE = list(A__ ) _SCREAMING_SNAKE_CASE = conv_bias _SCREAMING_SNAKE_CASE = num_conv_pos_embeddings _SCREAMING_SNAKE_CASE = num_conv_pos_embedding_groups _SCREAMING_SNAKE_CASE = len(self.conv_dim ) _SCREAMING_SNAKE_CASE = num_hidden_layers _SCREAMING_SNAKE_CASE = intermediate_size _SCREAMING_SNAKE_CASE = squeeze_factor _SCREAMING_SNAKE_CASE = hidden_act _SCREAMING_SNAKE_CASE = num_attention_heads _SCREAMING_SNAKE_CASE = hidden_dropout _SCREAMING_SNAKE_CASE = attention_dropout _SCREAMING_SNAKE_CASE = activation_dropout _SCREAMING_SNAKE_CASE = feat_proj_dropout _SCREAMING_SNAKE_CASE = final_dropout _SCREAMING_SNAKE_CASE = layerdrop _SCREAMING_SNAKE_CASE = layer_norm_eps _SCREAMING_SNAKE_CASE = initializer_range _SCREAMING_SNAKE_CASE = vocab_size if ( (len(self.conv_stride ) != self.num_feat_extract_layers) or (len(self.conv_kernel ) != self.num_feat_extract_layers) or (len(self.conv_dim ) != self.num_feat_extract_layers) ): raise ValueError( """Configuration for convolutional layers is incorrect.""" """It is required that `len(config.conv_dim)` == `len(config.conv_stride)` == `len(config.conv_kernel)`,""" F"but is `len(config.conv_dim) = {len(self.conv_dim )}`, `len(config.conv_stride)" F"= {len(self.conv_stride )}`, `len(config.conv_kernel) = {len(self.conv_kernel )}`." ) # fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779 _SCREAMING_SNAKE_CASE = apply_spec_augment _SCREAMING_SNAKE_CASE = mask_time_prob _SCREAMING_SNAKE_CASE = mask_time_length _SCREAMING_SNAKE_CASE = mask_time_min_masks _SCREAMING_SNAKE_CASE = mask_feature_prob _SCREAMING_SNAKE_CASE = mask_feature_length _SCREAMING_SNAKE_CASE = mask_feature_min_masks # ctc loss _SCREAMING_SNAKE_CASE = ctc_loss_reduction _SCREAMING_SNAKE_CASE = ctc_zero_infinity # sequence classification _SCREAMING_SNAKE_CASE = use_weighted_layer_sum _SCREAMING_SNAKE_CASE = classifier_proj_size @property def UpperCamelCase ( self ) -> Optional[int]: return functools.reduce(operator.mul , self.conv_stride , 1 )
705
'''simple docstring''' from sklearn.metrics import matthews_corrcoef import datasets UpperCamelCase__ : List[str] = "\nCompute the Matthews correlation coefficient (MCC)\n\nThe Matthews correlation coefficient is used in machine learning as a\nmeasure of the quality of binary and multiclass classifications. It takes\ninto account true and false positives and negatives and is generally\nregarded as a balanced measure which can be used even if the classes are of\nvery different sizes. The MCC is in essence a correlation coefficient value\nbetween -1 and +1. A coefficient of +1 represents a perfect prediction, 0\nan average random prediction and -1 an inverse prediction. The statistic\nis also known as the phi coefficient. [source: Wikipedia]\n" UpperCamelCase__ : List[Any] = "\nArgs:\n predictions (list of int): Predicted labels, as returned by a model.\n references (list of int): Ground truth labels.\n sample_weight (list of int, float, or bool): Sample weights. Defaults to `None`.\nReturns:\n matthews_correlation (dict containing float): Matthews correlation.\nExamples:\n Example 1, a basic example with only predictions and references as inputs:\n >>> matthews_metric = datasets.load_metric(\"matthews_correlation\")\n >>> results = matthews_metric.compute(references=[1, 3, 2, 0, 3, 2],\n ... predictions=[1, 2, 2, 0, 3, 3])\n >>> print(round(results['matthews_correlation'], 2))\n 0.54\n\n Example 2, the same example as above, but also including sample weights:\n >>> matthews_metric = datasets.load_metric(\"matthews_correlation\")\n >>> results = matthews_metric.compute(references=[1, 3, 2, 0, 3, 2],\n ... predictions=[1, 2, 2, 0, 3, 3],\n ... sample_weight=[0.5, 3, 1, 1, 1, 2])\n >>> print(round(results['matthews_correlation'], 2))\n 0.1\n\n Example 3, the same example as above, but with sample weights that cause a negative correlation:\n >>> matthews_metric = datasets.load_metric(\"matthews_correlation\")\n >>> results = matthews_metric.compute(references=[1, 3, 2, 0, 3, 2],\n ... predictions=[1, 2, 2, 0, 3, 3],\n ... sample_weight=[0.5, 1, 0, 0, 0, 1])\n >>> print(round(results['matthews_correlation'], 2))\n -0.25\n" UpperCamelCase__ : Any = "\\n@article{scikit-learn,\n title={Scikit-learn: Machine Learning in {P}ython},\n author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V.\n and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P.\n and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and\n Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.},\n journal={Journal of Machine Learning Research},\n volume={12},\n pages={2825--2830},\n year={2011}\n}\n" @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION) class _a (datasets.Metric): """simple docstring""" def UpperCamelCase ( self ) -> Optional[Any]: return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { """predictions""": datasets.Value("""int32""" ), """references""": datasets.Value("""int32""" ), } ) , reference_urls=[ """https://scikit-learn.org/stable/modules/generated/sklearn.metrics.matthews_corrcoef.html""" ] , ) def UpperCamelCase ( self , A__ , A__ , A__=None ) -> List[str]: return { "matthews_correlation": float(matthews_corrcoef(A__ , A__ , sample_weight=A__ ) ), }
0
0
import math import random from typing import Any from .hill_climbing import SearchProblem def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = True , SCREAMING_SNAKE_CASE_ = math.inf , SCREAMING_SNAKE_CASE_ = -math.inf , SCREAMING_SNAKE_CASE_ = math.inf , SCREAMING_SNAKE_CASE_ = -math.inf , SCREAMING_SNAKE_CASE_ = False , SCREAMING_SNAKE_CASE_ = 1_00 , SCREAMING_SNAKE_CASE_ = 0.01 , SCREAMING_SNAKE_CASE_ = 1 , ) -> Any: """simple docstring""" _SCREAMING_SNAKE_CASE = False _SCREAMING_SNAKE_CASE = search_prob _SCREAMING_SNAKE_CASE = start_temperate _SCREAMING_SNAKE_CASE = [] _SCREAMING_SNAKE_CASE = 0 _SCREAMING_SNAKE_CASE = None while not search_end: _SCREAMING_SNAKE_CASE = current_state.score() if best_state is None or current_score > best_state.score(): _SCREAMING_SNAKE_CASE = current_state scores.append(SCREAMING_SNAKE_CASE_ ) iterations += 1 _SCREAMING_SNAKE_CASE = None _SCREAMING_SNAKE_CASE = current_state.get_neighbors() while ( next_state is None and neighbors ): # till we do not find a neighbor that we can move to _SCREAMING_SNAKE_CASE = random.randint(0 , len(SCREAMING_SNAKE_CASE_ ) - 1 ) # picking a random neighbor _SCREAMING_SNAKE_CASE = neighbors.pop(SCREAMING_SNAKE_CASE_ ) _SCREAMING_SNAKE_CASE = picked_neighbor.score() - current_score if ( picked_neighbor.x > max_x or picked_neighbor.x < min_x or picked_neighbor.y > max_y or picked_neighbor.y < min_y ): continue # neighbor outside our bounds if not find_max: _SCREAMING_SNAKE_CASE = change * -1 # in case we are finding minimum if change > 0: # improves the solution _SCREAMING_SNAKE_CASE = picked_neighbor else: _SCREAMING_SNAKE_CASE = (math.e) ** ( change / current_temp ) # probability generation function if random.random() < probability: # random number within probability _SCREAMING_SNAKE_CASE = picked_neighbor _SCREAMING_SNAKE_CASE = current_temp - (current_temp * rate_of_decrease) if current_temp < threshold_temp or next_state is None: # temperature below threshold, or could not find a suitable neighbor _SCREAMING_SNAKE_CASE = True else: _SCREAMING_SNAKE_CASE = next_state if visualization: from matplotlib import pyplot as plt plt.plot(range(SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ ) plt.xlabel("""Iterations""" ) plt.ylabel("""Function values""" ) plt.show() return best_state if __name__ == "__main__": def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> str: """simple docstring""" return (x**2) + (y**2) # starting the problem with initial coordinates (12, 47) UpperCamelCase__ : int = SearchProblem(x=12, y=47, step_size=1, function_to_optimize=test_fa) UpperCamelCase__ : List[str] = simulated_annealing( prob, find_max=False, max_x=100, min_x=5, max_y=50, min_y=-5, visualization=True ) print( "The minimum score for f(x, y) = x^2 + y^2 with the domain 100 > x > 5 " f"""and 50 > y > - 5 found via hill climbing: {local_min.score()}""" ) # starting the problem with initial coordinates (12, 47) UpperCamelCase__ : int = SearchProblem(x=12, y=47, step_size=1, function_to_optimize=test_fa) UpperCamelCase__ : int = simulated_annealing( prob, find_max=True, max_x=100, min_x=5, max_y=50, min_y=-5, visualization=True ) print( "The maximum score for f(x, y) = x^2 + y^2 with the domain 100 > x > 5 " f"""and 50 > y > - 5 found via hill climbing: {local_min.score()}""" ) def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> Optional[Any]: """simple docstring""" return (3 * x**2) - (6 * y) UpperCamelCase__ : List[Any] = SearchProblem(x=3, y=4, step_size=1, function_to_optimize=test_fa) UpperCamelCase__ : int = simulated_annealing(prob, find_max=False, visualization=True) print( "The minimum score for f(x, y) = 3*x^2 - 6*y found via hill climbing: " f"""{local_min.score()}""" ) UpperCamelCase__ : Union[str, Any] = SearchProblem(x=3, y=4, step_size=1, function_to_optimize=test_fa) UpperCamelCase__ : Dict = simulated_annealing(prob, find_max=True, visualization=True) print( "The maximum score for f(x, y) = 3*x^2 - 6*y found via hill climbing: " f"""{local_min.score()}""" )
706
'''simple docstring''' from __future__ import annotations def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> Tuple: """simple docstring""" print(F"Vertex\tShortest Distance from vertex {src}" ) for i, d in enumerate(SCREAMING_SNAKE_CASE_ ): print(F"{i}\t\t{d}" ) def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> List[Any]: """simple docstring""" for j in range(SCREAMING_SNAKE_CASE_ ): _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = (graph[j][k] for k in ["""src""", """dst""", """weight"""]) if distance[u] != float("""inf""" ) and distance[u] + w < distance[v]: return True return False def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> list[float]: """simple docstring""" _SCREAMING_SNAKE_CASE = [float("""inf""" )] * vertex_count _SCREAMING_SNAKE_CASE = 0.0 for _ in range(vertex_count - 1 ): for j in range(SCREAMING_SNAKE_CASE_ ): _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = (graph[j][k] for k in ["""src""", """dst""", """weight"""]) if distance[u] != float("""inf""" ) and distance[u] + w < distance[v]: _SCREAMING_SNAKE_CASE = distance[u] + w _SCREAMING_SNAKE_CASE = check_negative_cycle(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) if negative_cycle_exists: raise Exception("""Negative cycle found""" ) return distance if __name__ == "__main__": import doctest doctest.testmod() UpperCamelCase__ : int = int(input("Enter number of vertices: ").strip()) UpperCamelCase__ : int = int(input("Enter number of edges: ").strip()) UpperCamelCase__ : list[dict[str, int]] = [{} for _ in range(E)] for i in range(E): print("Edge ", i + 1) UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ : Dict = ( int(x) for x in input("Enter source, destination, weight: ").strip().split(" ") ) UpperCamelCase__ : Optional[Any] = {"src": src, "dst": dest, "weight": weight} UpperCamelCase__ : Optional[Any] = int(input("\nEnter shortest path source:").strip()) UpperCamelCase__ : Any = bellman_ford(graph, V, E, source) print_distance(shortest_distance, 0)
0
0
'''simple docstring''' from collections import namedtuple import requests from lxml import html # type: ignore UpperCamelCase__ : Union[str, Any] = namedtuple("covid_data", "cases deaths recovered") def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE_ = "https://www.worldometers.info/coronavirus/" ) -> covid_data: """simple docstring""" _SCREAMING_SNAKE_CASE = """//div[@class = \"maincounter-number\"]/span/text()""" return covid_data(*html.fromstring(requests.get(SCREAMING_SNAKE_CASE_ ).content ).xpath(SCREAMING_SNAKE_CASE_ ) ) UpperCamelCase__ : Tuple = "Total COVID-19 cases in the world: {}\nTotal deaths due to COVID-19 in the world: {}\nTotal COVID-19 patients recovered in the world: {}" print(fmt.format(*covid_stats()))
707
'''simple docstring''' from __future__ import annotations import unittest from transformers import RoFormerConfig, is_tf_available from transformers.testing_utils import require_tf, slow from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import ( TFRoFormerForCausalLM, TFRoFormerForMaskedLM, TFRoFormerForMultipleChoice, TFRoFormerForQuestionAnswering, TFRoFormerForSequenceClassification, TFRoFormerForTokenClassification, TFRoFormerModel, ) from transformers.models.roformer.modeling_tf_roformer import ( TFRoFormerSelfAttention, TFRoFormerSinusoidalPositionalEmbedding, ) class _a : """simple docstring""" def __init__( self , A__ , A__=13 , A__=7 , A__=True , A__=True , A__=True , A__=True , A__=99 , A__=32 , A__=2 , A__=4 , A__=37 , A__="gelu" , A__=0.1 , A__=0.1 , A__=5_12 , A__=16 , A__=2 , A__=0.02 , A__=3 , A__=4 , A__=None , ) -> int: _SCREAMING_SNAKE_CASE = parent _SCREAMING_SNAKE_CASE = 13 _SCREAMING_SNAKE_CASE = 7 _SCREAMING_SNAKE_CASE = True _SCREAMING_SNAKE_CASE = True _SCREAMING_SNAKE_CASE = True _SCREAMING_SNAKE_CASE = True _SCREAMING_SNAKE_CASE = 99 _SCREAMING_SNAKE_CASE = 32 _SCREAMING_SNAKE_CASE = 2 _SCREAMING_SNAKE_CASE = 4 _SCREAMING_SNAKE_CASE = 37 _SCREAMING_SNAKE_CASE = """gelu""" _SCREAMING_SNAKE_CASE = 0.1 _SCREAMING_SNAKE_CASE = 0.1 _SCREAMING_SNAKE_CASE = 5_12 _SCREAMING_SNAKE_CASE = 16 _SCREAMING_SNAKE_CASE = 2 _SCREAMING_SNAKE_CASE = 0.02 _SCREAMING_SNAKE_CASE = 3 _SCREAMING_SNAKE_CASE = 4 _SCREAMING_SNAKE_CASE = None def UpperCamelCase ( self ) -> Tuple: _SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) _SCREAMING_SNAKE_CASE = None if self.use_input_mask: _SCREAMING_SNAKE_CASE = random_attention_mask([self.batch_size, self.seq_length] ) _SCREAMING_SNAKE_CASE = None if self.use_token_type_ids: _SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) _SCREAMING_SNAKE_CASE = None _SCREAMING_SNAKE_CASE = None _SCREAMING_SNAKE_CASE = None if self.use_labels: _SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size] , self.type_sequence_label_size ) _SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) _SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size] , self.num_choices ) _SCREAMING_SNAKE_CASE = RoFormerConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , return_dict=A__ , ) return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def UpperCamelCase ( self , A__ , A__ , A__ , A__ , A__ , A__ , A__ ) -> Union[str, Any]: _SCREAMING_SNAKE_CASE = TFRoFormerModel(config=A__ ) _SCREAMING_SNAKE_CASE = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids} _SCREAMING_SNAKE_CASE = [input_ids, input_mask] _SCREAMING_SNAKE_CASE = model(A__ ) _SCREAMING_SNAKE_CASE = model(A__ ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def UpperCamelCase ( self , A__ , A__ , A__ , A__ , A__ , A__ , A__ ) -> str: _SCREAMING_SNAKE_CASE = True _SCREAMING_SNAKE_CASE = TFRoFormerForCausalLM(config=A__ ) _SCREAMING_SNAKE_CASE = { """input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids, } _SCREAMING_SNAKE_CASE = model(A__ )["""logits"""] self.parent.assertListEqual( list(prediction_scores.numpy().shape ) , [self.batch_size, self.seq_length, self.vocab_size] ) def UpperCamelCase ( self , A__ , A__ , A__ , A__ , A__ , A__ , A__ ) -> Dict: _SCREAMING_SNAKE_CASE = TFRoFormerForMaskedLM(config=A__ ) _SCREAMING_SNAKE_CASE = { """input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids, } _SCREAMING_SNAKE_CASE = model(A__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def UpperCamelCase ( self , A__ , A__ , A__ , A__ , A__ , A__ , A__ ) -> List[str]: _SCREAMING_SNAKE_CASE = self.num_labels _SCREAMING_SNAKE_CASE = TFRoFormerForSequenceClassification(config=A__ ) _SCREAMING_SNAKE_CASE = { """input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids, } _SCREAMING_SNAKE_CASE = model(A__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def UpperCamelCase ( self , A__ , A__ , A__ , A__ , A__ , A__ , A__ ) -> Any: _SCREAMING_SNAKE_CASE = self.num_choices _SCREAMING_SNAKE_CASE = TFRoFormerForMultipleChoice(config=A__ ) _SCREAMING_SNAKE_CASE = tf.tile(tf.expand_dims(A__ , 1 ) , (1, self.num_choices, 1) ) _SCREAMING_SNAKE_CASE = tf.tile(tf.expand_dims(A__ , 1 ) , (1, self.num_choices, 1) ) _SCREAMING_SNAKE_CASE = tf.tile(tf.expand_dims(A__ , 1 ) , (1, self.num_choices, 1) ) _SCREAMING_SNAKE_CASE = { """input_ids""": multiple_choice_inputs_ids, """attention_mask""": multiple_choice_input_mask, """token_type_ids""": multiple_choice_token_type_ids, } _SCREAMING_SNAKE_CASE = model(A__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) ) def UpperCamelCase ( self , A__ , A__ , A__ , A__ , A__ , A__ , A__ ) -> List[str]: _SCREAMING_SNAKE_CASE = self.num_labels _SCREAMING_SNAKE_CASE = TFRoFormerForTokenClassification(config=A__ ) _SCREAMING_SNAKE_CASE = { """input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids, } _SCREAMING_SNAKE_CASE = model(A__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def UpperCamelCase ( self , A__ , A__ , A__ , A__ , A__ , A__ , A__ ) -> Tuple: _SCREAMING_SNAKE_CASE = TFRoFormerForQuestionAnswering(config=A__ ) _SCREAMING_SNAKE_CASE = { """input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids, } _SCREAMING_SNAKE_CASE = model(A__ ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def UpperCamelCase ( self ) -> List[str]: _SCREAMING_SNAKE_CASE = self.prepare_config_and_inputs() ( ( _SCREAMING_SNAKE_CASE ) , ( _SCREAMING_SNAKE_CASE ) , ( _SCREAMING_SNAKE_CASE ) , ( _SCREAMING_SNAKE_CASE ) , ( _SCREAMING_SNAKE_CASE ) , ( _SCREAMING_SNAKE_CASE ) , ( _SCREAMING_SNAKE_CASE ) , ) = config_and_inputs _SCREAMING_SNAKE_CASE = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": input_mask} return config, inputs_dict @require_tf class _a (_lowerCamelCase , _lowerCamelCase , unittest.TestCase): """simple docstring""" SCREAMING_SNAKE_CASE = ( ( TFRoFormerModel, TFRoFormerForCausalLM, TFRoFormerForMaskedLM, TFRoFormerForQuestionAnswering, TFRoFormerForSequenceClassification, TFRoFormerForTokenClassification, TFRoFormerForMultipleChoice, ) if is_tf_available() else () ) SCREAMING_SNAKE_CASE = ( { 'feature-extraction': TFRoFormerModel, 'fill-mask': TFRoFormerForMaskedLM, 'question-answering': TFRoFormerForQuestionAnswering, 'text-classification': TFRoFormerForSequenceClassification, 'text-generation': TFRoFormerForCausalLM, 'token-classification': TFRoFormerForTokenClassification, 'zero-shot': TFRoFormerForSequenceClassification, } if is_tf_available() else {} ) SCREAMING_SNAKE_CASE = False SCREAMING_SNAKE_CASE = False def UpperCamelCase ( self , A__ , A__ , A__ , A__ , A__ ) -> str: if pipeline_test_casse_name == "TextGenerationPipelineTests": return True return False def UpperCamelCase ( self ) -> Union[str, Any]: _SCREAMING_SNAKE_CASE = TFRoFormerModelTester(self ) _SCREAMING_SNAKE_CASE = ConfigTester(self , config_class=A__ , hidden_size=37 ) def UpperCamelCase ( self ) -> Optional[Any]: self.config_tester.run_common_tests() def UpperCamelCase ( self ) -> List[Any]: _SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*A__ ) def UpperCamelCase ( self ) -> str: _SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_lm(*A__ ) def UpperCamelCase ( self ) -> int: _SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_lm_head(*A__ ) def UpperCamelCase ( self ) -> Dict: _SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_multiple_choice(*A__ ) def UpperCamelCase ( self ) -> Union[str, Any]: _SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_question_answering(*A__ ) def UpperCamelCase ( self ) -> Tuple: _SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_sequence_classification(*A__ ) def UpperCamelCase ( self ) -> List[Any]: _SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_token_classification(*A__ ) @slow def UpperCamelCase ( self ) -> str: _SCREAMING_SNAKE_CASE = TFRoFormerModel.from_pretrained("""junnyu/roformer_chinese_base""" ) self.assertIsNotNone(A__ ) @require_tf class _a (unittest.TestCase): """simple docstring""" @slow def UpperCamelCase ( self ) -> Optional[Any]: _SCREAMING_SNAKE_CASE = TFRoFormerForMaskedLM.from_pretrained("""junnyu/roformer_chinese_base""" ) _SCREAMING_SNAKE_CASE = tf.constant([[0, 1, 2, 3, 4, 5]] ) _SCREAMING_SNAKE_CASE = model(A__ )[0] # TODO Replace vocab size _SCREAMING_SNAKE_CASE = 5_00_00 _SCREAMING_SNAKE_CASE = [1, 6, vocab_size] self.assertEqual(output.shape , A__ ) print(output[:, :3, :3] ) # TODO Replace values below with what was printed above. _SCREAMING_SNAKE_CASE = tf.constant( [ [ [-0.1205_3341, -1.026_4901, 0.2922_1946], [-1.513_3783, 0.19_7433, 0.1519_0607], [-5.013_5403, -3.90_0256, -0.8403_8764], ] ] ) tf.debugging.assert_near(output[:, :3, :3] , A__ , atol=1E-4 ) @require_tf class _a (unittest.TestCase): """simple docstring""" SCREAMING_SNAKE_CASE = 1E-4 def UpperCamelCase ( self ) -> List[Any]: _SCREAMING_SNAKE_CASE = tf.constant([[4, 10]] ) _SCREAMING_SNAKE_CASE = TFRoFormerSinusoidalPositionalEmbedding(num_positions=6 , embedding_dim=6 ) _SCREAMING_SNAKE_CASE = emba(input_ids.shape ) _SCREAMING_SNAKE_CASE = tf.constant( [[0.0000, 0.0000, 0.0000, 1.0000, 1.0000, 1.0000], [0.8415, 0.0464, 0.0022, 0.5403, 0.9989, 1.0000]] ) tf.debugging.assert_near(A__ , A__ , atol=self.tolerance ) def UpperCamelCase ( self ) -> List[Any]: _SCREAMING_SNAKE_CASE = tf.constant( [ [0.0000, 0.0000, 0.0000, 0.0000, 0.0000], [0.8415, 0.8219, 0.8020, 0.7819, 0.7617], [0.9093, 0.9364, 0.9581, 0.9749, 0.9870], ] ) _SCREAMING_SNAKE_CASE = TFRoFormerSinusoidalPositionalEmbedding(num_positions=5_12 , embedding_dim=5_12 ) emba([2, 16, 5_12] ) _SCREAMING_SNAKE_CASE = emba.weight[:3, :5] tf.debugging.assert_near(A__ , A__ , atol=self.tolerance ) @require_tf class _a (unittest.TestCase): """simple docstring""" SCREAMING_SNAKE_CASE = 1E-4 def UpperCamelCase ( self ) -> int: # 2,12,16,64 _SCREAMING_SNAKE_CASE = tf.reshape(tf.range(2 * 12 * 16 * 64 , dtype=tf.floataa ) , shape=(2, 12, 16, 64) ) / 1_00 _SCREAMING_SNAKE_CASE = -tf.reshape(tf.range(2 * 12 * 16 * 64 , dtype=tf.floataa ) , shape=(2, 12, 16, 64) ) / 1_00 _SCREAMING_SNAKE_CASE = TFRoFormerSinusoidalPositionalEmbedding(num_positions=32 , embedding_dim=64 ) _SCREAMING_SNAKE_CASE = embed_positions([2, 16, 7_68] )[None, None, :, :] _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = TFRoFormerSelfAttention.apply_rotary_position_embeddings( A__ , A__ , A__ ) _SCREAMING_SNAKE_CASE = tf.constant( [ [0.0000, 0.0100, 0.0200, 0.0300, 0.0400, 0.0500, 0.0600, 0.0700], [-0.2012, 0.8897, 0.0263, 0.9401, 0.2074, 0.9463, 0.3481, 0.9343], [-1.7057, 0.6271, -1.2145, 1.3897, -0.6303, 1.7647, -0.1173, 1.8985], [-2.1731, -1.6397, -2.7358, 0.2854, -2.1840, 1.7183, -1.3018, 2.4871], [0.2717, -3.6173, -2.9206, -2.1988, -3.6638, 0.3858, -2.9155, 2.2980], [3.9859, -2.1580, -0.7984, -4.4904, -4.1181, -2.0252, -4.4782, 1.1253], ] ) _SCREAMING_SNAKE_CASE = tf.constant( [ [0.0000, -0.0100, -0.0200, -0.0300, -0.0400, -0.0500, -0.0600, -0.0700], [0.2012, -0.8897, -0.0263, -0.9401, -0.2074, -0.9463, -0.3481, -0.9343], [1.7057, -0.6271, 1.2145, -1.3897, 0.6303, -1.7647, 0.1173, -1.8985], [2.1731, 1.6397, 2.7358, -0.2854, 2.1840, -1.7183, 1.3018, -2.4871], [-0.2717, 3.6173, 2.9206, 2.1988, 3.6638, -0.3858, 2.9155, -2.2980], [-3.9859, 2.1580, 0.7984, 4.4904, 4.1181, 2.0252, 4.4782, -1.1253], ] ) tf.debugging.assert_near(query_layer[0, 0, :6, :8] , A__ , atol=self.tolerance ) tf.debugging.assert_near(key_layer[0, 0, :6, :8] , A__ , atol=self.tolerance )
0
0
'''simple docstring''' import argparse import re from pathlib import Path import requests import torch from PIL import Image from torchvision.transforms import CenterCrop, Compose, Normalize, Resize, ToTensor from transformers import ( EfficientFormerConfig, EfficientFormerForImageClassificationWithTeacher, EfficientFormerImageProcessor, ) from transformers.image_utils import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, PILImageResampling def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> Tuple: """simple docstring""" _SCREAMING_SNAKE_CASE = old_name if "patch_embed" in old_name: _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = old_name.split(""".""" ) if layer == "0": _SCREAMING_SNAKE_CASE = old_name.replace("""0""" , """convolution1""" ) elif layer == "1": _SCREAMING_SNAKE_CASE = old_name.replace("""1""" , """batchnorm_before""" ) elif layer == "3": _SCREAMING_SNAKE_CASE = old_name.replace("""3""" , """convolution2""" ) else: _SCREAMING_SNAKE_CASE = old_name.replace("""4""" , """batchnorm_after""" ) if "network" in old_name and re.search(r"""\d\.\d""" , SCREAMING_SNAKE_CASE_ ): _SCREAMING_SNAKE_CASE = r"""\b\d{2}\b""" if bool(re.search(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) ): _SCREAMING_SNAKE_CASE = re.search(r"""\d\.\d\d.""" , SCREAMING_SNAKE_CASE_ ).group() else: _SCREAMING_SNAKE_CASE = re.search(r"""\d\.\d.""" , SCREAMING_SNAKE_CASE_ ).group() if int(match[0] ) < 6: _SCREAMING_SNAKE_CASE = old_name.replace(SCREAMING_SNAKE_CASE_ , """""" ) _SCREAMING_SNAKE_CASE = trimmed_name.replace("""network""" , match[0] + """.meta4D_layers.blocks.""" + match[2:-1] ) _SCREAMING_SNAKE_CASE = """intermediate_stages.""" + trimmed_name else: _SCREAMING_SNAKE_CASE = old_name.replace(SCREAMING_SNAKE_CASE_ , """""" ) if int(match[2] ) < num_meta4D_last_stage: _SCREAMING_SNAKE_CASE = trimmed_name.replace("""network""" , """meta4D_layers.blocks.""" + match[2] ) else: _SCREAMING_SNAKE_CASE = str(int(match[2] ) - num_meta4D_last_stage ) _SCREAMING_SNAKE_CASE = trimmed_name.replace("""network""" , """meta3D_layers.blocks.""" + layer_index ) if "norm1" in old_name: _SCREAMING_SNAKE_CASE = trimmed_name.replace("""norm1""" , """layernorm1""" ) elif "norm2" in old_name: _SCREAMING_SNAKE_CASE = trimmed_name.replace("""norm2""" , """layernorm2""" ) elif "fc1" in old_name: _SCREAMING_SNAKE_CASE = trimmed_name.replace("""fc1""" , """linear_in""" ) elif "fc2" in old_name: _SCREAMING_SNAKE_CASE = trimmed_name.replace("""fc2""" , """linear_out""" ) _SCREAMING_SNAKE_CASE = """last_stage.""" + trimmed_name elif "network" in old_name and re.search(r""".\d.""" , SCREAMING_SNAKE_CASE_ ): _SCREAMING_SNAKE_CASE = old_name.replace("""network""" , """intermediate_stages""" ) if "fc" in new_name: _SCREAMING_SNAKE_CASE = new_name.replace("""fc""" , """convolution""" ) elif ("norm1" in new_name) and ("layernorm1" not in new_name): _SCREAMING_SNAKE_CASE = new_name.replace("""norm1""" , """batchnorm_before""" ) elif ("norm2" in new_name) and ("layernorm2" not in new_name): _SCREAMING_SNAKE_CASE = new_name.replace("""norm2""" , """batchnorm_after""" ) if "proj" in new_name: _SCREAMING_SNAKE_CASE = new_name.replace("""proj""" , """projection""" ) if "dist_head" in new_name: _SCREAMING_SNAKE_CASE = new_name.replace("""dist_head""" , """distillation_classifier""" ) elif "head" in new_name: _SCREAMING_SNAKE_CASE = new_name.replace("""head""" , """classifier""" ) elif "patch_embed" in new_name: _SCREAMING_SNAKE_CASE = """efficientformer.""" + new_name elif new_name == "norm.weight" or new_name == "norm.bias": _SCREAMING_SNAKE_CASE = new_name.replace("""norm""" , """layernorm""" ) _SCREAMING_SNAKE_CASE = """efficientformer.""" + new_name else: _SCREAMING_SNAKE_CASE = """efficientformer.encoder.""" + new_name return new_name def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> Dict: """simple docstring""" for key in checkpoint.copy().keys(): _SCREAMING_SNAKE_CASE = checkpoint.pop(SCREAMING_SNAKE_CASE_ ) _SCREAMING_SNAKE_CASE = val return checkpoint def lowerCAmelCase_ ( ) -> Union[str, Any]: """simple docstring""" _SCREAMING_SNAKE_CASE = """http://images.cocodataset.org/val2017/000000039769.jpg""" _SCREAMING_SNAKE_CASE = Image.open(requests.get(SCREAMING_SNAKE_CASE_ , stream=SCREAMING_SNAKE_CASE_ ).raw ) return image def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> Optional[Any]: """simple docstring""" _SCREAMING_SNAKE_CASE = torch.load(SCREAMING_SNAKE_CASE_ , map_location="""cpu""" )["""model"""] _SCREAMING_SNAKE_CASE = EfficientFormerConfig.from_json_file(SCREAMING_SNAKE_CASE_ ) _SCREAMING_SNAKE_CASE = EfficientFormerForImageClassificationWithTeacher(SCREAMING_SNAKE_CASE_ ) _SCREAMING_SNAKE_CASE = """_""".join(checkpoint_path.split("""/""" )[-1].split(""".""" )[0].split("""_""" )[:-1] ) _SCREAMING_SNAKE_CASE = config.depths[-1] - config.num_metaad_blocks + 1 _SCREAMING_SNAKE_CASE = convert_torch_checkpoint(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) model.load_state_dict(SCREAMING_SNAKE_CASE_ ) model.eval() _SCREAMING_SNAKE_CASE = { """bilinear""": PILImageResampling.BILINEAR, """bicubic""": PILImageResampling.BICUBIC, """nearest""": PILImageResampling.NEAREST, } # prepare image _SCREAMING_SNAKE_CASE = prepare_img() _SCREAMING_SNAKE_CASE = 2_56 _SCREAMING_SNAKE_CASE = 2_24 _SCREAMING_SNAKE_CASE = EfficientFormerImageProcessor( size={"""shortest_edge""": image_size} , crop_size={"""height""": crop_size, """width""": crop_size} , resample=pillow_resamplings["""bicubic"""] , ) _SCREAMING_SNAKE_CASE = processor(images=SCREAMING_SNAKE_CASE_ , return_tensors="""pt""" ).pixel_values # original processing pipeline _SCREAMING_SNAKE_CASE = Compose( [ Resize(SCREAMING_SNAKE_CASE_ , interpolation=pillow_resamplings["""bicubic"""] ), CenterCrop(SCREAMING_SNAKE_CASE_ ), ToTensor(), Normalize(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ), ] ) _SCREAMING_SNAKE_CASE = image_transforms(SCREAMING_SNAKE_CASE_ ).unsqueeze(0 ) assert torch.allclose(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) _SCREAMING_SNAKE_CASE = model(SCREAMING_SNAKE_CASE_ ) _SCREAMING_SNAKE_CASE = outputs.logits _SCREAMING_SNAKE_CASE = (1, 10_00) if "l1" in model_name: _SCREAMING_SNAKE_CASE = torch.Tensor( [-0.1312, 0.4353, -1.0499, -0.5124, 0.4183, -0.6793, -1.3777, -0.0893, -0.7358, -2.4328] ) assert torch.allclose(logits[0, :10] , SCREAMING_SNAKE_CASE_ , atol=1e-3 ) assert logits.shape == expected_shape elif "l3" in model_name: _SCREAMING_SNAKE_CASE = torch.Tensor( [-1.3150, -1.5456, -1.2556, -0.8496, -0.7127, -0.7897, -0.9728, -0.3052, 0.3751, -0.3127] ) assert torch.allclose(logits[0, :10] , SCREAMING_SNAKE_CASE_ , atol=1e-3 ) assert logits.shape == expected_shape elif "l7" in model_name: _SCREAMING_SNAKE_CASE = torch.Tensor( [-1.0283, -1.4131, -0.5644, -1.3115, -0.5785, -1.2049, -0.7528, 0.1992, -0.3822, -0.0878] ) assert logits.shape == expected_shape else: raise ValueError( F"Unknown model checkpoint: {checkpoint_path}. Supported version of efficientformer are l1, l3 and l7" ) # Save Checkpoints Path(SCREAMING_SNAKE_CASE_ ).mkdir(exist_ok=SCREAMING_SNAKE_CASE_ ) model.save_pretrained(SCREAMING_SNAKE_CASE_ ) print(F"Checkpoint successfuly converted. Model saved at {pytorch_dump_path}" ) processor.save_pretrained(SCREAMING_SNAKE_CASE_ ) print(F"Processor successfuly saved at {pytorch_dump_path}" ) if push_to_hub: print("""Pushing model to the hub...""" ) model.push_to_hub( repo_id=F"Bearnardd/{pytorch_dump_path}" , commit_message="""Add model""" , use_temp_dir=SCREAMING_SNAKE_CASE_ , ) processor.push_to_hub( repo_id=F"Bearnardd/{pytorch_dump_path}" , commit_message="""Add image processor""" , use_temp_dir=SCREAMING_SNAKE_CASE_ , ) if __name__ == "__main__": UpperCamelCase__ : List[str] = argparse.ArgumentParser() # Required parameters parser.add_argument( "--pytorch_model_path", default=None, type=str, required=True, help="Path to EfficientFormer pytorch checkpoint.", ) parser.add_argument( "--config_file", default=None, type=str, required=True, help="The json file for EfficientFormer model config.", ) parser.add_argument( "--pytorch_dump_path", default=None, type=str, required=True, help="Path to the output PyTorch model." ) parser.add_argument("--push_to_hub", action="store_true", help="Push model and image processor to the hub") parser.add_argument( "--no-push_to_hub", dest="push_to_hub", action="store_false", help="Do not push model and image processor to the hub", ) parser.set_defaults(push_to_hub=True) UpperCamelCase__ : str = parser.parse_args() convert_efficientformer_checkpoint( checkpoint_path=args.pytorch_model_path, efficientformer_config_file=args.config_file, pytorch_dump_path=args.pytorch_dump_path, push_to_hub=args.push_to_hub, )
708
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available UpperCamelCase__ : int = {"tokenization_herbert": ["HerbertTokenizer"]} try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase__ : Tuple = ["HerbertTokenizerFast"] if TYPE_CHECKING: from .tokenization_herbert import HerbertTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_herbert_fast import HerbertTokenizerFast else: import sys UpperCamelCase__ : Tuple = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
0
0
'''simple docstring''' import warnings from typing import List, Optional, Union from ...image_utils import ImageInput from ...processing_utils import ProcessorMixin from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy from ...utils import TensorType class _a (_lowerCamelCase): """simple docstring""" SCREAMING_SNAKE_CASE = ['image_processor', 'tokenizer'] SCREAMING_SNAKE_CASE = 'FlavaImageProcessor' SCREAMING_SNAKE_CASE = ('BertTokenizer', 'BertTokenizerFast') def __init__( self , A__=None , A__=None , **A__ ) -> str: _SCREAMING_SNAKE_CASE = None if "feature_extractor" in kwargs: warnings.warn( """The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`""" """ instead.""" , A__ , ) _SCREAMING_SNAKE_CASE = kwargs.pop("""feature_extractor""" ) _SCREAMING_SNAKE_CASE = image_processor if image_processor is not None else feature_extractor if image_processor is None: raise ValueError("""You need to specify an `image_processor`.""" ) if tokenizer is None: raise ValueError("""You need to specify a `tokenizer`.""" ) super().__init__(A__ , A__ ) _SCREAMING_SNAKE_CASE = self.image_processor def __call__( self , A__ = None , A__ = None , A__ = True , A__ = False , A__ = False , A__ = None , A__ = 0 , A__ = None , A__ = None , A__ = None , A__ = None , A__ = None , A__ = False , A__ = False , A__ = False , A__ = False , A__ = True , A__ = None , **A__ , ) -> str: if text is None and images is None: raise ValueError("""You have to specify either text or images. Both cannot be none.""" ) if text is not None: _SCREAMING_SNAKE_CASE = self.tokenizer( text=A__ , add_special_tokens=A__ , padding=A__ , truncation=A__ , max_length=A__ , stride=A__ , pad_to_multiple_of=A__ , return_token_type_ids=A__ , return_attention_mask=A__ , return_overflowing_tokens=A__ , return_special_tokens_mask=A__ , return_offsets_mapping=A__ , return_length=A__ , verbose=A__ , return_tensors=A__ , **A__ , ) if images is not None: _SCREAMING_SNAKE_CASE = self.image_processor( A__ , return_image_mask=A__ , return_codebook_pixels=A__ , return_tensors=A__ , **A__ , ) if text is not None and images is not None: encoding.update(A__ ) return encoding elif text is not None: return encoding else: return BatchEncoding(data=dict(**A__ ) , tensor_type=A__ ) def UpperCamelCase ( self , *A__ , **A__ ) -> Dict: return self.tokenizer.batch_decode(*A__ , **A__ ) def UpperCamelCase ( self , *A__ , **A__ ) -> Any: return self.tokenizer.decode(*A__ , **A__ ) @property def UpperCamelCase ( self ) -> Optional[Any]: _SCREAMING_SNAKE_CASE = self.tokenizer.model_input_names _SCREAMING_SNAKE_CASE = self.image_processor.model_input_names return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) ) @property def UpperCamelCase ( self ) -> List[Any]: warnings.warn( """`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.""" , A__ , ) return self.image_processor_class @property def UpperCamelCase ( self ) -> List[Any]: warnings.warn( """`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.""" , A__ , ) return self.image_processor
709
'''simple docstring''' import argparse import gdown import numpy as np import torch from huggingface_hub import hf_hub_download from transformers import ( CLIPTokenizer, CLIPTokenizerFast, VideoMAEImageProcessor, XCLIPConfig, XCLIPModel, XCLIPProcessor, XCLIPTextConfig, XCLIPVisionConfig, ) def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> Dict: """simple docstring""" _SCREAMING_SNAKE_CASE = XCLIPTextConfig() # derive patch size from model name _SCREAMING_SNAKE_CASE = model_name.find("""patch""" ) _SCREAMING_SNAKE_CASE = int(model_name[start_idx + len("""patch""" ) : start_idx + len("""patch""" ) + 2] ) _SCREAMING_SNAKE_CASE = XCLIPVisionConfig(patch_size=SCREAMING_SNAKE_CASE_ , num_frames=SCREAMING_SNAKE_CASE_ ) if "large" in model_name: _SCREAMING_SNAKE_CASE = 7_68 _SCREAMING_SNAKE_CASE = 30_72 _SCREAMING_SNAKE_CASE = 12 _SCREAMING_SNAKE_CASE = 10_24 _SCREAMING_SNAKE_CASE = 40_96 _SCREAMING_SNAKE_CASE = 16 _SCREAMING_SNAKE_CASE = 24 _SCREAMING_SNAKE_CASE = 7_68 _SCREAMING_SNAKE_CASE = 30_72 if model_name == "xclip-large-patch14-16-frames": _SCREAMING_SNAKE_CASE = 3_36 _SCREAMING_SNAKE_CASE = XCLIPConfig.from_text_vision_configs(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) if "large" in model_name: _SCREAMING_SNAKE_CASE = 7_68 return config def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE_ ) -> Dict: """simple docstring""" # text encoder if name == "token_embedding.weight": _SCREAMING_SNAKE_CASE = name.replace("""token_embedding.weight""" , """text_model.embeddings.token_embedding.weight""" ) if name == "positional_embedding": _SCREAMING_SNAKE_CASE = name.replace("""positional_embedding""" , """text_model.embeddings.position_embedding.weight""" ) if "ln_1" in name: _SCREAMING_SNAKE_CASE = name.replace("""ln_1""" , """layer_norm1""" ) if "ln_2" in name: _SCREAMING_SNAKE_CASE = name.replace("""ln_2""" , """layer_norm2""" ) if "c_fc" in name: _SCREAMING_SNAKE_CASE = name.replace("""c_fc""" , """fc1""" ) if "c_proj" in name: _SCREAMING_SNAKE_CASE = name.replace("""c_proj""" , """fc2""" ) if name.startswith("""transformer.resblocks""" ): _SCREAMING_SNAKE_CASE = name.replace("""transformer.resblocks""" , """text_model.encoder.layers""" ) if "attn.out_proj" in name and "message" not in name: _SCREAMING_SNAKE_CASE = name.replace("""attn.out_proj""" , """self_attn.out_proj""" ) if "ln_final" in name: _SCREAMING_SNAKE_CASE = name.replace("""ln_final""" , """text_model.final_layer_norm""" ) # visual encoder if name == "visual.class_embedding": _SCREAMING_SNAKE_CASE = name.replace("""visual.class_embedding""" , """vision_model.embeddings.class_embedding""" ) if name == "visual.positional_embedding": _SCREAMING_SNAKE_CASE = name.replace("""visual.positional_embedding""" , """vision_model.embeddings.position_embedding.weight""" ) if name.startswith("""visual.transformer.resblocks""" ): _SCREAMING_SNAKE_CASE = name.replace("""visual.transformer.resblocks""" , """vision_model.encoder.layers""" ) if "visual.conv1" in name: _SCREAMING_SNAKE_CASE = name.replace("""visual.conv1""" , """vision_model.embeddings.patch_embedding""" ) if "visual.ln_pre" in name: _SCREAMING_SNAKE_CASE = name.replace("""visual.ln_pre""" , """vision_model.pre_layernorm""" ) if "visual.ln_post" in name: _SCREAMING_SNAKE_CASE = name.replace("""visual.ln_post""" , """vision_model.post_layernorm""" ) if "visual.proj" in name: _SCREAMING_SNAKE_CASE = name.replace("""visual.proj""" , """visual_projection.weight""" ) if "text_projection" in name: _SCREAMING_SNAKE_CASE = name.replace("""text_projection""" , """text_projection.weight""" ) # things on top if "prompts_visual_proj" in name: _SCREAMING_SNAKE_CASE = name.replace("""prompts_visual_proj""" , """prompts_visual_projection""" ) if "prompts_visual_ln" in name: _SCREAMING_SNAKE_CASE = name.replace("""prompts_visual_ln""" , """prompts_visual_layernorm""" ) # mit if name == "mit.positional_embedding": _SCREAMING_SNAKE_CASE = name.replace("""positional""" , """position""" ) if name.startswith("""mit.resblocks""" ): _SCREAMING_SNAKE_CASE = name.replace("""mit.resblocks""" , """mit.encoder.layers""" ) # prompts generator if name.startswith("""prompts_generator.norm""" ): _SCREAMING_SNAKE_CASE = name.replace("""prompts_generator.norm""" , """prompts_generator.layernorm""" ) return name def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> Optional[int]: """simple docstring""" for key in orig_state_dict.copy().keys(): _SCREAMING_SNAKE_CASE = orig_state_dict.pop(SCREAMING_SNAKE_CASE_ ) if "attn.in_proj" in key: _SCREAMING_SNAKE_CASE = key.split(""".""" ) if key.startswith("""visual""" ): _SCREAMING_SNAKE_CASE = key_split[3] _SCREAMING_SNAKE_CASE = config.vision_config.hidden_size if "message_attn" in key: if "weight" in key: _SCREAMING_SNAKE_CASE = val[ :dim, : ] _SCREAMING_SNAKE_CASE = val[ dim : dim * 2, : ] _SCREAMING_SNAKE_CASE = val[ -dim:, : ] else: _SCREAMING_SNAKE_CASE = val[ :dim ] _SCREAMING_SNAKE_CASE = val[ dim : dim * 2 ] _SCREAMING_SNAKE_CASE = val[ -dim: ] else: if "weight" in key: _SCREAMING_SNAKE_CASE = val[ :dim, : ] _SCREAMING_SNAKE_CASE = val[ dim : dim * 2, : ] _SCREAMING_SNAKE_CASE = val[ -dim:, : ] else: _SCREAMING_SNAKE_CASE = val[:dim] _SCREAMING_SNAKE_CASE = val[ dim : dim * 2 ] _SCREAMING_SNAKE_CASE = val[-dim:] elif key.startswith("""mit""" ): _SCREAMING_SNAKE_CASE = key_split[2] _SCREAMING_SNAKE_CASE = config.vision_config.mit_hidden_size if "weight" in key: _SCREAMING_SNAKE_CASE = val[:dim, :] _SCREAMING_SNAKE_CASE = val[dim : dim * 2, :] _SCREAMING_SNAKE_CASE = val[-dim:, :] else: _SCREAMING_SNAKE_CASE = val[:dim] _SCREAMING_SNAKE_CASE = val[dim : dim * 2] _SCREAMING_SNAKE_CASE = val[-dim:] else: _SCREAMING_SNAKE_CASE = key_split[2] _SCREAMING_SNAKE_CASE = config.text_config.hidden_size if "weight" in key: _SCREAMING_SNAKE_CASE = val[:dim, :] _SCREAMING_SNAKE_CASE = val[ dim : dim * 2, : ] _SCREAMING_SNAKE_CASE = val[-dim:, :] else: _SCREAMING_SNAKE_CASE = val[:dim] _SCREAMING_SNAKE_CASE = val[ dim : dim * 2 ] _SCREAMING_SNAKE_CASE = val[-dim:] else: _SCREAMING_SNAKE_CASE = rename_key(SCREAMING_SNAKE_CASE_ ) if new_key_name in ["visual_projection.weight", "text_projection.weight"]: _SCREAMING_SNAKE_CASE = val.T _SCREAMING_SNAKE_CASE = val return orig_state_dict def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE_ ) -> Optional[Any]: """simple docstring""" if num_frames == 8: _SCREAMING_SNAKE_CASE = """eating_spaghetti_8_frames.npy""" elif num_frames == 16: _SCREAMING_SNAKE_CASE = """eating_spaghetti.npy""" elif num_frames == 32: _SCREAMING_SNAKE_CASE = """eating_spaghetti_32_frames.npy""" _SCREAMING_SNAKE_CASE = hf_hub_download( repo_id="""hf-internal-testing/spaghetti-video""" , filename=SCREAMING_SNAKE_CASE_ , repo_type="""dataset""" , ) _SCREAMING_SNAKE_CASE = np.load(SCREAMING_SNAKE_CASE_ ) return list(SCREAMING_SNAKE_CASE_ ) def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=False ) -> Optional[int]: """simple docstring""" _SCREAMING_SNAKE_CASE = { # fully supervised kinetics-400 checkpoints """xclip-base-patch32""": """https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_32_8.pth""", """xclip-base-patch32-16-frames""": ( """https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_32_16.pth""" ), """xclip-base-patch16""": """https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_16_8.pth""", """xclip-base-patch16-16-frames""": ( """https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_16_16.pth""" ), """xclip-large-patch14""": """https://drive.google.com/u/0/uc?id=1NUOImq0o5DlQTST17iIP3vG7DgmHQuCx&amp;export=download&amp;confirm=t&amp;uuid=b26caedc-88e2-473e-830a-9d158b653cdb""", """xclip-large-patch14-16-frames""": """https://drive.google.com/u/0/uc?id=1FOYgnJc097OJ4lGwtRCCydQyVPJEOH7d&amp;export=download&amp;confirm=t&amp;uuid=538fa810-e671-4050-b385-9a623f89804f""", # fully supervised kinetics-600 checkpoints """xclip-base-patch16-kinetics-600""": ( """https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k600_16_8.pth""" ), """xclip-base-patch16-kinetics-600-16-frames""": ( """https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k600_16_16.pth""" ), """xclip-large-patch14-kinetics-600""": """https://drive.google.com/u/0/uc?id=1FV8C1INuM91sLAN4ImjzePLIlpMSihwV&amp;export=download&amp;confirm=t&amp;uuid=141d4977-4a65-44ae-864f-4b0c19f838be""", # few shot """xclip-base-patch16-hmdb-2-shot""": ( """https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_2.pth""" ), """xclip-base-patch16-hmdb-4-shot""": ( """https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_4.pth""" ), """xclip-base-patch16-hmdb-8-shot""": ( """https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_8.pth""" ), """xclip-base-patch16-hmdb-16-shot""": ( """https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_16.pth""" ), """xclip-base-patch16-ucf-2-shot""": ( """https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_2.pth""" ), """xclip-base-patch16-ucf-4-shot""": ( """https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_4.pth""" ), """xclip-base-patch16-ucf-8-shot""": ( """https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_8.pth""" ), """xclip-base-patch16-ucf-16-shot""": ( """https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_16.pth""" ), # zero shot """xclip-base-patch16-zero-shot""": """https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/zero.pth""", } _SCREAMING_SNAKE_CASE = model_to_url[model_name] _SCREAMING_SNAKE_CASE = 8 if "16-frames" in model_name: _SCREAMING_SNAKE_CASE = 16 elif "shot" in model_name: _SCREAMING_SNAKE_CASE = 32 _SCREAMING_SNAKE_CASE = get_xclip_config(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) _SCREAMING_SNAKE_CASE = XCLIPModel(SCREAMING_SNAKE_CASE_ ) model.eval() if "drive" in checkpoint_url: _SCREAMING_SNAKE_CASE = """pytorch_model.bin""" gdown.cached_download(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , quiet=SCREAMING_SNAKE_CASE_ ) _SCREAMING_SNAKE_CASE = torch.load(SCREAMING_SNAKE_CASE_ , map_location="""cpu""" )["""model"""] else: _SCREAMING_SNAKE_CASE = torch.hub.load_state_dict_from_url(SCREAMING_SNAKE_CASE_ )["""model"""] _SCREAMING_SNAKE_CASE = convert_state_dict(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) _SCREAMING_SNAKE_CASE = XCLIPModel(SCREAMING_SNAKE_CASE_ ) _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = model.load_state_dict(SCREAMING_SNAKE_CASE_ , strict=SCREAMING_SNAKE_CASE_ ) assert missing_keys == ["text_model.embeddings.position_ids", "vision_model.embeddings.position_ids"] model.eval() _SCREAMING_SNAKE_CASE = 3_36 if model_name == """xclip-large-patch14-16-frames""" else 2_24 _SCREAMING_SNAKE_CASE = VideoMAEImageProcessor(size=SCREAMING_SNAKE_CASE_ ) _SCREAMING_SNAKE_CASE = CLIPTokenizer.from_pretrained("""openai/clip-vit-base-patch32""" ) _SCREAMING_SNAKE_CASE = CLIPTokenizerFast.from_pretrained("""openai/clip-vit-base-patch32""" ) _SCREAMING_SNAKE_CASE = XCLIPProcessor(image_processor=SCREAMING_SNAKE_CASE_ , tokenizer=SCREAMING_SNAKE_CASE_ ) _SCREAMING_SNAKE_CASE = prepare_video(SCREAMING_SNAKE_CASE_ ) _SCREAMING_SNAKE_CASE = processor( text=["""playing sports""", """eating spaghetti""", """go shopping"""] , videos=SCREAMING_SNAKE_CASE_ , return_tensors="""pt""" , padding=SCREAMING_SNAKE_CASE_ ) print("""Shape of pixel values:""" , inputs.pixel_values.shape ) with torch.no_grad(): _SCREAMING_SNAKE_CASE = model(**SCREAMING_SNAKE_CASE_ ) # Verify outputs _SCREAMING_SNAKE_CASE = outputs.logits_per_video _SCREAMING_SNAKE_CASE = logits_per_video.softmax(dim=1 ) print("""Probs:""" , SCREAMING_SNAKE_CASE_ ) # kinetics-400 if model_name == "xclip-base-patch32": _SCREAMING_SNAKE_CASE = torch.tensor([[0.0019, 0.9951, 0.0030]] ) elif model_name == "xclip-base-patch32-16-frames": _SCREAMING_SNAKE_CASE = torch.tensor([[7.0999e-04, 9.9883e-01, 4.5580e-04]] ) elif model_name == "xclip-base-patch16": _SCREAMING_SNAKE_CASE = torch.tensor([[0.0083, 0.9681, 0.0236]] ) elif model_name == "xclip-base-patch16-16-frames": _SCREAMING_SNAKE_CASE = torch.tensor([[7.6937e-04, 9.9728e-01, 1.9473e-03]] ) elif model_name == "xclip-large-patch14": _SCREAMING_SNAKE_CASE = torch.tensor([[0.0062, 0.9864, 0.0075]] ) elif model_name == "xclip-large-patch14-16-frames": _SCREAMING_SNAKE_CASE = torch.tensor([[3.3877e-04, 9.9937e-01, 2.8888e-04]] ) # kinetics-600 elif model_name == "xclip-base-patch16-kinetics-600": _SCREAMING_SNAKE_CASE = torch.tensor([[0.0555, 0.8914, 0.0531]] ) elif model_name == "xclip-base-patch16-kinetics-600-16-frames": _SCREAMING_SNAKE_CASE = torch.tensor([[3.8554e-04, 9.9929e-01, 3.2754e-04]] ) elif model_name == "xclip-large-patch14-kinetics-600": _SCREAMING_SNAKE_CASE = torch.tensor([[0.0036, 0.9920, 0.0045]] ) # few shot elif model_name == "xclip-base-patch16-hmdb-2-shot": _SCREAMING_SNAKE_CASE = torch.tensor([[7.1890e-06, 9.9994e-01, 5.6559e-05]] ) elif model_name == "xclip-base-patch16-hmdb-4-shot": _SCREAMING_SNAKE_CASE = torch.tensor([[1.0320e-05, 9.9993e-01, 6.2435e-05]] ) elif model_name == "xclip-base-patch16-hmdb-8-shot": _SCREAMING_SNAKE_CASE = torch.tensor([[4.1377e-06, 9.9990e-01, 9.8386e-05]] ) elif model_name == "xclip-base-patch16-hmdb-16-shot": _SCREAMING_SNAKE_CASE = torch.tensor([[4.1347e-05, 9.9962e-01, 3.3411e-04]] ) elif model_name == "xclip-base-patch16-ucf-2-shot": _SCREAMING_SNAKE_CASE = torch.tensor([[8.5857e-05, 9.9928e-01, 6.3291e-04]] ) elif model_name == "xclip-base-patch16-ucf-4-shot": _SCREAMING_SNAKE_CASE = torch.tensor([[8.5857e-05, 9.9928e-01, 6.3291e-04]] ) elif model_name == "xclip-base-patch16-ucf-8-shot": _SCREAMING_SNAKE_CASE = torch.tensor([[0.0027, 0.9904, 0.0070]] ) elif model_name == "xclip-base-patch16-ucf-16-shot": _SCREAMING_SNAKE_CASE = torch.tensor([[9.8219e-04, 9.9593e-01, 3.0863e-03]] ) # zero shot elif model_name == "xclip-base-patch16-zero-shot": _SCREAMING_SNAKE_CASE = torch.tensor([[3.5082e-04, 9.9785e-01, 1.7966e-03]] ) else: raise ValueError(F"Model name {model_name} not supported" ) assert torch.allclose(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , atol=1e-3 ) print("""Looks ok!""" ) if pytorch_dump_folder_path is not None: print(F"Saving model {model_name} to {pytorch_dump_folder_path}" ) model.save_pretrained(SCREAMING_SNAKE_CASE_ ) if push_to_hub: print("""Pushing model, processor and slow tokenizer files to the hub...""" ) model.push_to_hub(SCREAMING_SNAKE_CASE_ , organization="""nielsr""" ) processor.push_to_hub(SCREAMING_SNAKE_CASE_ , organization="""nielsr""" ) slow_tokenizer.push_to_hub(SCREAMING_SNAKE_CASE_ , organization="""nielsr""" ) if __name__ == "__main__": UpperCamelCase__ : Union[str, Any] = argparse.ArgumentParser() # Required parameters parser.add_argument( "--model_name", default="xclip-base-patch32", type=str, help="Name of the model.", ) parser.add_argument( "--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory." ) parser.add_argument( "--push_to_hub", action="store_true", help="Whether or not to push the converted model to the 🤗 hub." ) UpperCamelCase__ : str = parser.parse_args() convert_xclip_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
0
0
'''simple docstring''' import unittest from transformers import GPTNeoXJapaneseConfig, is_torch_available from transformers.models.gpt_neox_japanese.tokenization_gpt_neox_japanese import GPTNeoXJapaneseTokenizer from transformers.testing_utils import require_torch, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import GPTNeoXJapaneseForCausalLM, GPTNeoXJapaneseModel class _a : """simple docstring""" def __init__( self , A__ , A__=13 , A__=7 , A__=True , A__=True , A__=True , A__=True , A__=99 , A__=32 , A__=5 , A__=4 , A__=4 , A__="gelu" , A__=0.0 , A__=0.1 , A__=True , A__=5_12 , A__=16 , A__=2 , A__=0.02 , A__=3 , A__=4 , A__=None , ) -> Dict: _SCREAMING_SNAKE_CASE = parent _SCREAMING_SNAKE_CASE = batch_size _SCREAMING_SNAKE_CASE = seq_length _SCREAMING_SNAKE_CASE = is_training _SCREAMING_SNAKE_CASE = use_input_mask _SCREAMING_SNAKE_CASE = use_token_type_ids _SCREAMING_SNAKE_CASE = use_labels _SCREAMING_SNAKE_CASE = vocab_size _SCREAMING_SNAKE_CASE = hidden_size _SCREAMING_SNAKE_CASE = num_hidden_layers _SCREAMING_SNAKE_CASE = num_attention_heads _SCREAMING_SNAKE_CASE = intermediate_multiple_size _SCREAMING_SNAKE_CASE = hidden_act _SCREAMING_SNAKE_CASE = hidden_dropout _SCREAMING_SNAKE_CASE = attention_dropout _SCREAMING_SNAKE_CASE = weight_tying _SCREAMING_SNAKE_CASE = max_position_embeddings _SCREAMING_SNAKE_CASE = type_vocab_size _SCREAMING_SNAKE_CASE = type_sequence_label_size _SCREAMING_SNAKE_CASE = initializer_range _SCREAMING_SNAKE_CASE = num_labels _SCREAMING_SNAKE_CASE = num_choices _SCREAMING_SNAKE_CASE = scope def UpperCamelCase ( self ) -> List[Any]: _SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) _SCREAMING_SNAKE_CASE = None if self.use_input_mask: _SCREAMING_SNAKE_CASE = random_attention_mask([self.batch_size, self.seq_length] ) _SCREAMING_SNAKE_CASE = None if self.use_labels: _SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) _SCREAMING_SNAKE_CASE = self.get_config() return config, input_ids, input_mask, token_labels def UpperCamelCase ( self ) -> Any: return GPTNeoXJapaneseConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_multiple_size=self.intermediate_multiple_size , hidden_act=self.hidden_act , hidden_dropout=self.hidden_dropout , attention_dropout=self.attention_dropout , weight_tying=self.weight_tying , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=A__ , initializer_range=self.initializer_range , ) def UpperCamelCase ( self ) -> int: _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = self.prepare_config_and_inputs() _SCREAMING_SNAKE_CASE = True return config, input_ids, input_mask, token_labels def UpperCamelCase ( self , A__ , A__ , A__ ) -> Dict: _SCREAMING_SNAKE_CASE = GPTNeoXJapaneseModel(config=A__ ) model.to(A__ ) model.eval() _SCREAMING_SNAKE_CASE = model(A__ , attention_mask=A__ ) _SCREAMING_SNAKE_CASE = model(A__ ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def UpperCamelCase ( self , A__ , A__ , A__ ) -> str: _SCREAMING_SNAKE_CASE = True _SCREAMING_SNAKE_CASE = GPTNeoXJapaneseModel(A__ ) model.to(A__ ) model.eval() _SCREAMING_SNAKE_CASE = model(A__ , attention_mask=A__ ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def UpperCamelCase ( self , A__ , A__ , A__ , A__ ) -> Any: _SCREAMING_SNAKE_CASE = GPTNeoXJapaneseForCausalLM(config=A__ ) model.to(A__ ) model.eval() _SCREAMING_SNAKE_CASE = model(A__ , attention_mask=A__ , labels=A__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def UpperCamelCase ( self , A__ , A__ , A__ ) -> Optional[Any]: _SCREAMING_SNAKE_CASE = True _SCREAMING_SNAKE_CASE = GPTNeoXJapaneseForCausalLM(config=A__ ) model.to(A__ ) model.eval() # first forward pass _SCREAMING_SNAKE_CASE = model(A__ , attention_mask=A__ , use_cache=A__ ) _SCREAMING_SNAKE_CASE = outputs.past_key_values # create hypothetical multiple next token and extent to next_input_ids _SCREAMING_SNAKE_CASE = ids_tensor((self.batch_size, 3) , config.vocab_size ) _SCREAMING_SNAKE_CASE = ids_tensor((self.batch_size, 3) , vocab_size=2 ) # append to next input_ids and _SCREAMING_SNAKE_CASE = torch.cat([input_ids, next_tokens] , dim=-1 ) _SCREAMING_SNAKE_CASE = torch.cat([input_mask, next_mask] , dim=-1 ) _SCREAMING_SNAKE_CASE = model(A__ , attention_mask=A__ , output_hidden_states=A__ ) _SCREAMING_SNAKE_CASE = output_from_no_past["""hidden_states"""][0] _SCREAMING_SNAKE_CASE = model( A__ , attention_mask=A__ , past_key_values=A__ , output_hidden_states=A__ , )["""hidden_states"""][0] # select random slice _SCREAMING_SNAKE_CASE = ids_tensor((1,) , output_from_past.shape[-1] ).item() _SCREAMING_SNAKE_CASE = output_from_no_past[:, -3:, random_slice_idx].detach() _SCREAMING_SNAKE_CASE = output_from_past[:, :, random_slice_idx].detach() self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] ) # test that outputs are equal for slice self.parent.assertTrue(torch.allclose(A__ , A__ , atol=1E-3 ) ) def UpperCamelCase ( self ) -> int: _SCREAMING_SNAKE_CASE = self.prepare_config_and_inputs() _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = config_and_inputs _SCREAMING_SNAKE_CASE = {"""input_ids""": input_ids, """attention_mask""": input_mask} return config, inputs_dict @require_torch class _a (_lowerCamelCase , _lowerCamelCase , unittest.TestCase): """simple docstring""" SCREAMING_SNAKE_CASE = (GPTNeoXJapaneseModel, GPTNeoXJapaneseForCausalLM) if is_torch_available() else () SCREAMING_SNAKE_CASE = (GPTNeoXJapaneseForCausalLM,) if is_torch_available() else () SCREAMING_SNAKE_CASE = ( {'feature-extraction': GPTNeoXJapaneseModel, 'text-generation': GPTNeoXJapaneseForCausalLM} if is_torch_available() else {} ) SCREAMING_SNAKE_CASE = False SCREAMING_SNAKE_CASE = False SCREAMING_SNAKE_CASE = False SCREAMING_SNAKE_CASE = False def UpperCamelCase ( self ) -> List[str]: _SCREAMING_SNAKE_CASE = GPTNeoXJapaneseModelTester(self ) _SCREAMING_SNAKE_CASE = ConfigTester(self , config_class=A__ , hidden_size=37 ) def UpperCamelCase ( self ) -> Optional[int]: self.config_tester.run_common_tests() def UpperCamelCase ( self ) -> Dict: _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(A__ , A__ , A__ ) def UpperCamelCase ( self ) -> int: _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_decoder() self.model_tester.create_and_check_model_as_decoder(A__ , A__ , A__ ) def UpperCamelCase ( self ) -> List[Any]: # This regression test was failing with PyTorch < 1.3 _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_decoder() _SCREAMING_SNAKE_CASE = None self.model_tester.create_and_check_model_as_decoder(A__ , A__ , A__ ) def UpperCamelCase ( self ) -> Dict: _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_decoder_model_past_large_inputs(A__ , A__ , A__ ) def UpperCamelCase ( self ) -> List[Any]: _SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_causal_lm(*A__ ) @slow def UpperCamelCase ( self ) -> List[Any]: _SCREAMING_SNAKE_CASE = """abeja/gpt-neox-japanese-2.7b""" _SCREAMING_SNAKE_CASE = ["""データサイエンティストとは、""", """100年後に必要とされる会社は、""", """フルリモートの環境で働くために必要なことは、""", """国境の長いトンネルを抜けると""", """美味しい日本食といえば、"""] _SCREAMING_SNAKE_CASE = [ """データサイエンティストとは、データを分析し、ビジネスに役立つ知見を導き出す専門家のことです。""", """100年後に必要とされる会社は、「人」が中心の会社です。""", """フルリモートの環境で働くために必要なことは、「自分の時間をコントロールする」ことです。""", """国境の長いトンネルを抜けると、そこは雪国だった。""", """美味しい日本食といえば、やっぱりお寿司ですよね。""", ] _SCREAMING_SNAKE_CASE = GPTNeoXJapaneseTokenizer.from_pretrained(A__ ) _SCREAMING_SNAKE_CASE = GPTNeoXJapaneseForCausalLM.from_pretrained(A__ ) _SCREAMING_SNAKE_CASE = [] for prompt in prompts: _SCREAMING_SNAKE_CASE = tokenizer(A__ , return_tensors="""pt""" ).input_ids _SCREAMING_SNAKE_CASE = model.generate(A__ , max_length=50 ) _SCREAMING_SNAKE_CASE = tokenizer.batch_decode(A__ , skip_special_tokens=A__ ) predicted_outputs += generated_string self.assertListEqual(A__ , A__ )
710
'''simple docstring''' import numpy as np import torch from torch.utils.data import Dataset from utils import logger class _a (_lowerCamelCase): """simple docstring""" def __init__( self , A__ , A__ ) -> Any: _SCREAMING_SNAKE_CASE = params _SCREAMING_SNAKE_CASE = np.array(A__ ) _SCREAMING_SNAKE_CASE = np.array([len(A__ ) for t in data] ) self.check() self.remove_long_sequences() self.remove_empty_sequences() self.remove_unknown_sequences() self.check() self.print_statistics() def __getitem__( self , A__ ) -> Dict: return (self.token_ids[index], self.lengths[index]) def __len__( self ) -> Tuple: return len(self.lengths ) def UpperCamelCase ( self ) -> Dict: assert len(self.token_ids ) == len(self.lengths ) assert all(self.lengths[i] == len(self.token_ids[i] ) for i in range(len(self.lengths ) ) ) def UpperCamelCase ( self ) -> List[str]: _SCREAMING_SNAKE_CASE = self.params.max_model_input_size _SCREAMING_SNAKE_CASE = self.lengths > max_len logger.info(F"Splitting {sum(A__ )} too long sequences." ) def divide_chunks(A__ , A__ ): return [l[i : i + n] for i in range(0 , len(A__ ) , A__ )] _SCREAMING_SNAKE_CASE = [] _SCREAMING_SNAKE_CASE = [] if self.params.mlm: _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = self.params.special_tok_ids["""cls_token"""], self.params.special_tok_ids["""sep_token"""] else: _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = self.params.special_tok_ids["""bos_token"""], self.params.special_tok_ids["""eos_token"""] for seq_, len_ in zip(self.token_ids , self.lengths ): assert (seq_[0] == cls_id) and (seq_[-1] == sep_id), seq_ if len_ <= max_len: new_tok_ids.append(seq_ ) new_lengths.append(len_ ) else: _SCREAMING_SNAKE_CASE = [] for sub_s in divide_chunks(seq_ , max_len - 2 ): if sub_s[0] != cls_id: _SCREAMING_SNAKE_CASE = np.insert(A__ , 0 , A__ ) if sub_s[-1] != sep_id: _SCREAMING_SNAKE_CASE = np.insert(A__ , len(A__ ) , A__ ) assert len(A__ ) <= max_len assert (sub_s[0] == cls_id) and (sub_s[-1] == sep_id), sub_s sub_seqs.append(A__ ) new_tok_ids.extend(A__ ) new_lengths.extend([len(A__ ) for l in sub_seqs] ) _SCREAMING_SNAKE_CASE = np.array(A__ ) _SCREAMING_SNAKE_CASE = np.array(A__ ) def UpperCamelCase ( self ) -> List[str]: _SCREAMING_SNAKE_CASE = len(self ) _SCREAMING_SNAKE_CASE = self.lengths > 11 _SCREAMING_SNAKE_CASE = self.token_ids[indices] _SCREAMING_SNAKE_CASE = self.lengths[indices] _SCREAMING_SNAKE_CASE = len(self ) logger.info(F"Remove {init_size - new_size} too short (<=11 tokens) sequences." ) def UpperCamelCase ( self ) -> int: if "unk_token" not in self.params.special_tok_ids: return else: _SCREAMING_SNAKE_CASE = self.params.special_tok_ids["""unk_token"""] _SCREAMING_SNAKE_CASE = len(self ) _SCREAMING_SNAKE_CASE = np.array([np.count_nonzero(a == unk_token_id ) for a in self.token_ids] ) _SCREAMING_SNAKE_CASE = (unk_occs / self.lengths) < 0.5 _SCREAMING_SNAKE_CASE = self.token_ids[indices] _SCREAMING_SNAKE_CASE = self.lengths[indices] _SCREAMING_SNAKE_CASE = len(self ) logger.info(F"Remove {init_size - new_size} sequences with a high level of unknown tokens (50%)." ) def UpperCamelCase ( self ) -> Optional[Any]: if not self.params.is_master: return logger.info(F"{len(self )} sequences" ) # data_len = sum(self.lengths) # nb_unique_tokens = len(Counter(list(chain(*self.token_ids)))) # logger.info(f'{data_len} tokens ({nb_unique_tokens} unique)') # unk_idx = self.params.special_tok_ids['unk_token'] # nb_unknown = sum([(t==unk_idx).sum() for t in self.token_ids]) # logger.info(f'{nb_unknown} unknown tokens (covering {100*nb_unknown/data_len:.2f}% of the data)') def UpperCamelCase ( self , A__ ) -> Any: _SCREAMING_SNAKE_CASE = [t[0] for t in batch] _SCREAMING_SNAKE_CASE = [t[1] for t in batch] assert len(A__ ) == len(A__ ) # Max for paddings _SCREAMING_SNAKE_CASE = max(A__ ) # Pad token ids if self.params.mlm: _SCREAMING_SNAKE_CASE = self.params.special_tok_ids["""pad_token"""] else: _SCREAMING_SNAKE_CASE = self.params.special_tok_ids["""unk_token"""] _SCREAMING_SNAKE_CASE = [list(t.astype(A__ ) ) + [pad_idx] * (max_seq_len_ - len(A__ )) for t in token_ids] assert len(tk_ ) == len(A__ ) assert all(len(A__ ) == max_seq_len_ for t in tk_ ) _SCREAMING_SNAKE_CASE = torch.tensor(tk_ ) # (bs, max_seq_len_) _SCREAMING_SNAKE_CASE = torch.tensor(A__ ) # (bs) return tk_t, lg_t
0
0
'''simple docstring''' def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE_ ) -> int: """simple docstring""" if not isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ): raise TypeError("""only integers accepted as input""" ) else: _SCREAMING_SNAKE_CASE = str(abs(SCREAMING_SNAKE_CASE_ ) ) _SCREAMING_SNAKE_CASE = [list(SCREAMING_SNAKE_CASE_ ) for char in range(len(SCREAMING_SNAKE_CASE_ ) )] for index in range(len(SCREAMING_SNAKE_CASE_ ) ): num_transpositions[index].pop(SCREAMING_SNAKE_CASE_ ) return max( int("""""".join(list(SCREAMING_SNAKE_CASE_ ) ) ) for transposition in num_transpositions ) if __name__ == "__main__": __import__("doctest").testmod()
711
'''simple docstring''' import os from shutil import copyfile from typing import Any, Dict, List, Optional, Tuple import sentencepiece as spm from ...tokenization_utils import PreTrainedTokenizer from ...utils import logging UpperCamelCase__ : List[Any] = logging.get_logger(__name__) UpperCamelCase__ : Any = "▁" UpperCamelCase__ : Any = {"vocab_file": "spiece.model"} UpperCamelCase__ : int = { "vocab_file": { "google/reformer-crime-and-punishment": ( "https://huggingface.co/google/reformer-crime-and-punishment/resolve/main/spiece.model" ) } } UpperCamelCase__ : Optional[int] = { "google/reformer-crime-and-punishment": 524_288, } class _a (_lowerCamelCase): """simple docstring""" SCREAMING_SNAKE_CASE = VOCAB_FILES_NAMES SCREAMING_SNAKE_CASE = PRETRAINED_VOCAB_FILES_MAP SCREAMING_SNAKE_CASE = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES SCREAMING_SNAKE_CASE = ['input_ids', 'attention_mask'] def __init__( self , A__ , A__="</s>" , A__="<unk>" , A__=[] , A__ = None , **A__ , ) -> None: _SCREAMING_SNAKE_CASE = {} if sp_model_kwargs is None else sp_model_kwargs super().__init__( eos_token=A__ , unk_token=A__ , additional_special_tokens=A__ , sp_model_kwargs=self.sp_model_kwargs , **A__ , ) _SCREAMING_SNAKE_CASE = vocab_file _SCREAMING_SNAKE_CASE = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(A__ ) @property def UpperCamelCase ( self ) -> Any: return self.sp_model.get_piece_size() def UpperCamelCase ( self ) -> Dict[str, int]: _SCREAMING_SNAKE_CASE = {self.convert_ids_to_tokens(A__ ): i for i in range(self.vocab_size )} vocab.update(self.added_tokens_encoder ) return vocab def __getstate__( self ) -> int: _SCREAMING_SNAKE_CASE = self.__dict__.copy() _SCREAMING_SNAKE_CASE = None return state def __setstate__( self , A__ ) -> str: _SCREAMING_SNAKE_CASE = d # for backward compatibility if not hasattr(self , """sp_model_kwargs""" ): _SCREAMING_SNAKE_CASE = {} _SCREAMING_SNAKE_CASE = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(self.vocab_file ) def UpperCamelCase ( self , A__ ) -> List[str]: return self.sp_model.encode(A__ , out_type=A__ ) def UpperCamelCase ( self , A__ ) -> Union[str, Any]: return self.sp_model.piece_to_id(A__ ) def UpperCamelCase ( self , A__ ) -> List[Any]: if index < self.sp_model.get_piece_size(): _SCREAMING_SNAKE_CASE = self.sp_model.IdToPiece(A__ ) return token def UpperCamelCase ( self , A__ ) -> str: _SCREAMING_SNAKE_CASE = [] _SCREAMING_SNAKE_CASE = """""" for token in tokens: # make sure that special tokens are not decoded using sentencepiece model if token in self.all_special_tokens: out_string += self.sp_model.decode(A__ ) + token _SCREAMING_SNAKE_CASE = [] else: current_sub_tokens.append(A__ ) out_string += self.sp_model.decode(A__ ) return out_string.strip() def UpperCamelCase ( self , A__ , A__ = None ) -> Tuple[str]: if not os.path.isdir(A__ ): logger.error(F"Vocabulary path ({save_directory}) should be a directory" ) return _SCREAMING_SNAKE_CASE = os.path.join( A__ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(A__ ) and os.path.isfile(self.vocab_file ): copyfile(self.vocab_file , A__ ) elif not os.path.isfile(self.vocab_file ): with open(A__ , """wb""" ) as fi: _SCREAMING_SNAKE_CASE = self.sp_model.serialized_model_proto() fi.write(A__ ) return (out_vocab_file,)
0
0
'''simple docstring''' from __future__ import annotations import inspect import unittest import numpy as np from transformers import ResNetConfig from transformers.testing_utils import require_tf, require_vision, slow from transformers.utils import cached_property, is_tf_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import TFResNetForImageClassification, TFResNetModel from transformers.models.resnet.modeling_tf_resnet import TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import AutoImageProcessor class _a : """simple docstring""" def __init__( self , A__ , A__=3 , A__=32 , A__=3 , A__=10 , A__=[10, 20, 30, 40] , A__=[1, 1, 2, 1] , A__=True , A__=True , A__="relu" , A__=3 , A__=None , ) -> List[str]: _SCREAMING_SNAKE_CASE = parent _SCREAMING_SNAKE_CASE = batch_size _SCREAMING_SNAKE_CASE = image_size _SCREAMING_SNAKE_CASE = num_channels _SCREAMING_SNAKE_CASE = embeddings_size _SCREAMING_SNAKE_CASE = hidden_sizes _SCREAMING_SNAKE_CASE = depths _SCREAMING_SNAKE_CASE = is_training _SCREAMING_SNAKE_CASE = use_labels _SCREAMING_SNAKE_CASE = hidden_act _SCREAMING_SNAKE_CASE = num_labels _SCREAMING_SNAKE_CASE = scope _SCREAMING_SNAKE_CASE = len(A__ ) def UpperCamelCase ( self ) -> Tuple: _SCREAMING_SNAKE_CASE = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) _SCREAMING_SNAKE_CASE = None if self.use_labels: _SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size] , self.num_labels ) _SCREAMING_SNAKE_CASE = self.get_config() return config, pixel_values, labels def UpperCamelCase ( self ) -> Optional[int]: return ResNetConfig( num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , image_size=self.image_size , ) def UpperCamelCase ( self , A__ , A__ , A__ ) -> int: _SCREAMING_SNAKE_CASE = TFResNetModel(config=A__ ) _SCREAMING_SNAKE_CASE = model(A__ ) # expected last hidden states: B, C, H // 32, W // 32 self.parent.assertEqual( result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , ) def UpperCamelCase ( self , A__ , A__ , A__ ) -> Tuple: _SCREAMING_SNAKE_CASE = self.num_labels _SCREAMING_SNAKE_CASE = TFResNetForImageClassification(A__ ) _SCREAMING_SNAKE_CASE = model(A__ , labels=A__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def UpperCamelCase ( self ) -> List[str]: _SCREAMING_SNAKE_CASE = self.prepare_config_and_inputs() _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = config_and_inputs _SCREAMING_SNAKE_CASE = {"""pixel_values""": pixel_values} return config, inputs_dict @require_tf class _a (_lowerCamelCase , _lowerCamelCase , unittest.TestCase): """simple docstring""" SCREAMING_SNAKE_CASE = (TFResNetModel, TFResNetForImageClassification) if is_tf_available() else () SCREAMING_SNAKE_CASE = ( {'feature-extraction': TFResNetModel, 'image-classification': TFResNetForImageClassification} if is_tf_available() else {} ) SCREAMING_SNAKE_CASE = False SCREAMING_SNAKE_CASE = False SCREAMING_SNAKE_CASE = False SCREAMING_SNAKE_CASE = False SCREAMING_SNAKE_CASE = False def UpperCamelCase ( self ) -> Union[str, Any]: _SCREAMING_SNAKE_CASE = TFResNetModelTester(self ) _SCREAMING_SNAKE_CASE = ConfigTester(self , config_class=A__ , has_text_modality=A__ ) def UpperCamelCase ( self ) -> int: self.create_and_test_config_common_properties() self.config_tester.create_and_test_config_to_json_string() self.config_tester.create_and_test_config_to_json_file() self.config_tester.create_and_test_config_from_and_save_pretrained() self.config_tester.create_and_test_config_with_num_labels() self.config_tester.check_config_can_be_init_without_params() self.config_tester.check_config_arguments_init() def UpperCamelCase ( self ) -> str: return @unittest.skip(reason="""ResNet does not use inputs_embeds""" ) def UpperCamelCase ( self ) -> Tuple: pass @unittest.skip(reason="""ResNet does not support input and output embeddings""" ) def UpperCamelCase ( self ) -> Union[str, Any]: pass def UpperCamelCase ( self ) -> str: _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: _SCREAMING_SNAKE_CASE = model_class(A__ ) _SCREAMING_SNAKE_CASE = inspect.signature(model.call ) # signature.parameters is an OrderedDict => so arg_names order is deterministic _SCREAMING_SNAKE_CASE = [*signature.parameters.keys()] _SCREAMING_SNAKE_CASE = ["""pixel_values"""] self.assertListEqual(arg_names[:1] , A__ ) def UpperCamelCase ( self ) -> List[str]: _SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*A__ ) def UpperCamelCase ( self ) -> Optional[Any]: def check_hidden_states_output(A__ , A__ , A__ ): _SCREAMING_SNAKE_CASE = model_class(A__ ) _SCREAMING_SNAKE_CASE = model(**self._prepare_for_class(A__ , A__ ) ) _SCREAMING_SNAKE_CASE = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states _SCREAMING_SNAKE_CASE = self.model_tester.num_stages self.assertEqual(len(A__ ) , expected_num_stages + 1 ) # ResNet's feature maps are of shape (batch_size, num_channels, height, width) self.assertListEqual( list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , ) _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common() _SCREAMING_SNAKE_CASE = ["""basic""", """bottleneck"""] for model_class in self.all_model_classes: for layer_type in layers_type: _SCREAMING_SNAKE_CASE = layer_type _SCREAMING_SNAKE_CASE = True check_hidden_states_output(A__ , A__ , A__ ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] _SCREAMING_SNAKE_CASE = True check_hidden_states_output(A__ , A__ , A__ ) def UpperCamelCase ( self ) -> int: _SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*A__ ) @slow def UpperCamelCase ( self ) -> List[Any]: for model_name in TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: _SCREAMING_SNAKE_CASE = TFResNetModel.from_pretrained(A__ ) self.assertIsNotNone(A__ ) def lowerCAmelCase_ ( ) -> str: """simple docstring""" _SCREAMING_SNAKE_CASE = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" ) return image @require_tf @require_vision class _a (unittest.TestCase): """simple docstring""" @cached_property def UpperCamelCase ( self ) -> Any: return ( AutoImageProcessor.from_pretrained(TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] ) if is_vision_available() else None ) @slow def UpperCamelCase ( self ) -> str: _SCREAMING_SNAKE_CASE = TFResNetForImageClassification.from_pretrained(TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] ) _SCREAMING_SNAKE_CASE = self.default_image_processor _SCREAMING_SNAKE_CASE = prepare_img() _SCREAMING_SNAKE_CASE = image_processor(images=A__ , return_tensors="""tf""" ) # forward pass _SCREAMING_SNAKE_CASE = model(**A__ ) # verify the logits _SCREAMING_SNAKE_CASE = tf.TensorShape((1, 10_00) ) self.assertEqual(outputs.logits.shape , A__ ) _SCREAMING_SNAKE_CASE = tf.constant([-11.1069, -9.7877, -8.3777] ) self.assertTrue(np.allclose(outputs.logits[0, :3].numpy() , A__ , atol=1E-4 ) )
712
'''simple docstring''' import os import unittest from transformers import MobileBertTokenizer, MobileBertTokenizerFast from transformers.models.bert.tokenization_bert import ( VOCAB_FILES_NAMES, BasicTokenizer, WordpieceTokenizer, _is_control, _is_punctuation, _is_whitespace, ) from transformers.testing_utils import require_tokenizers, slow from ...test_tokenization_common import TokenizerTesterMixin, filter_non_english @require_tokenizers class _a (_lowerCamelCase , unittest.TestCase): """simple docstring""" SCREAMING_SNAKE_CASE = MobileBertTokenizer SCREAMING_SNAKE_CASE = MobileBertTokenizerFast SCREAMING_SNAKE_CASE = True SCREAMING_SNAKE_CASE = True SCREAMING_SNAKE_CASE = filter_non_english SCREAMING_SNAKE_CASE = 'google/mobilebert-uncased' def UpperCamelCase ( self ) -> Any: super().setUp() _SCREAMING_SNAKE_CASE = [ """[UNK]""", """[CLS]""", """[SEP]""", """[PAD]""", """[MASK]""", """want""", """##want""", """##ed""", """wa""", """un""", """runn""", """##ing""", """,""", """low""", """lowest""", ] _SCREAMING_SNAKE_CASE = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] ) with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as vocab_writer: vocab_writer.write("""""".join([x + """\n""" for x in vocab_tokens] ) ) _SCREAMING_SNAKE_CASE = [ (tokenizer_def[0], self.pre_trained_model_path, tokenizer_def[2]) # else the 'google/' prefix is stripped for tokenizer_def in self.tokenizers_list ] def UpperCamelCase ( self , A__ ) -> List[str]: _SCREAMING_SNAKE_CASE = """UNwant\u00E9d,running""" _SCREAMING_SNAKE_CASE = """unwanted, running""" return input_text, output_text def UpperCamelCase ( self ) -> Any: _SCREAMING_SNAKE_CASE = self.tokenizer_class(self.vocab_file ) _SCREAMING_SNAKE_CASE = tokenizer.tokenize("""UNwant\u00E9d,running""" ) self.assertListEqual(A__ , ["""un""", """##want""", """##ed""", """,""", """runn""", """##ing"""] ) self.assertListEqual(tokenizer.convert_tokens_to_ids(A__ ) , [9, 6, 7, 12, 10, 11] ) def UpperCamelCase ( self ) -> Optional[int]: if not self.test_rust_tokenizer: return _SCREAMING_SNAKE_CASE = self.get_tokenizer() _SCREAMING_SNAKE_CASE = self.get_rust_tokenizer() _SCREAMING_SNAKE_CASE = """UNwant\u00E9d,running""" _SCREAMING_SNAKE_CASE = tokenizer.tokenize(A__ ) _SCREAMING_SNAKE_CASE = rust_tokenizer.tokenize(A__ ) self.assertListEqual(A__ , A__ ) _SCREAMING_SNAKE_CASE = tokenizer.encode(A__ , add_special_tokens=A__ ) _SCREAMING_SNAKE_CASE = rust_tokenizer.encode(A__ , add_special_tokens=A__ ) self.assertListEqual(A__ , A__ ) _SCREAMING_SNAKE_CASE = self.get_rust_tokenizer() _SCREAMING_SNAKE_CASE = tokenizer.encode(A__ ) _SCREAMING_SNAKE_CASE = rust_tokenizer.encode(A__ ) self.assertListEqual(A__ , A__ ) # With lower casing _SCREAMING_SNAKE_CASE = self.get_tokenizer(do_lower_case=A__ ) _SCREAMING_SNAKE_CASE = self.get_rust_tokenizer(do_lower_case=A__ ) _SCREAMING_SNAKE_CASE = """UNwant\u00E9d,running""" _SCREAMING_SNAKE_CASE = tokenizer.tokenize(A__ ) _SCREAMING_SNAKE_CASE = rust_tokenizer.tokenize(A__ ) self.assertListEqual(A__ , A__ ) _SCREAMING_SNAKE_CASE = tokenizer.encode(A__ , add_special_tokens=A__ ) _SCREAMING_SNAKE_CASE = rust_tokenizer.encode(A__ , add_special_tokens=A__ ) self.assertListEqual(A__ , A__ ) _SCREAMING_SNAKE_CASE = self.get_rust_tokenizer() _SCREAMING_SNAKE_CASE = tokenizer.encode(A__ ) _SCREAMING_SNAKE_CASE = rust_tokenizer.encode(A__ ) self.assertListEqual(A__ , A__ ) def UpperCamelCase ( self ) -> Tuple: _SCREAMING_SNAKE_CASE = BasicTokenizer() self.assertListEqual(tokenizer.tokenize("""ah\u535A\u63A8zz""" ) , ["""ah""", """\u535A""", """\u63A8""", """zz"""] ) def UpperCamelCase ( self ) -> List[Any]: _SCREAMING_SNAKE_CASE = BasicTokenizer(do_lower_case=A__ ) self.assertListEqual( tokenizer.tokenize(""" \tHeLLo!how \n Are yoU? """ ) , ["""hello""", """!""", """how""", """are""", """you""", """?"""] ) self.assertListEqual(tokenizer.tokenize("""H\u00E9llo""" ) , ["""hello"""] ) def UpperCamelCase ( self ) -> Optional[Any]: _SCREAMING_SNAKE_CASE = BasicTokenizer(do_lower_case=A__ , strip_accents=A__ ) self.assertListEqual( tokenizer.tokenize(""" \tHäLLo!how \n Are yoU? """ ) , ["""hällo""", """!""", """how""", """are""", """you""", """?"""] ) self.assertListEqual(tokenizer.tokenize("""H\u00E9llo""" ) , ["""h\u00E9llo"""] ) def UpperCamelCase ( self ) -> Any: _SCREAMING_SNAKE_CASE = BasicTokenizer(do_lower_case=A__ , strip_accents=A__ ) self.assertListEqual( tokenizer.tokenize(""" \tHäLLo!how \n Are yoU? """ ) , ["""hallo""", """!""", """how""", """are""", """you""", """?"""] ) self.assertListEqual(tokenizer.tokenize("""H\u00E9llo""" ) , ["""hello"""] ) def UpperCamelCase ( self ) -> Any: _SCREAMING_SNAKE_CASE = BasicTokenizer(do_lower_case=A__ ) self.assertListEqual( tokenizer.tokenize(""" \tHäLLo!how \n Are yoU? """ ) , ["""hallo""", """!""", """how""", """are""", """you""", """?"""] ) self.assertListEqual(tokenizer.tokenize("""H\u00E9llo""" ) , ["""hello"""] ) def UpperCamelCase ( self ) -> str: _SCREAMING_SNAKE_CASE = BasicTokenizer(do_lower_case=A__ ) self.assertListEqual( tokenizer.tokenize(""" \tHeLLo!how \n Are yoU? """ ) , ["""HeLLo""", """!""", """how""", """Are""", """yoU""", """?"""] ) def UpperCamelCase ( self ) -> Dict: _SCREAMING_SNAKE_CASE = BasicTokenizer(do_lower_case=A__ , strip_accents=A__ ) self.assertListEqual( tokenizer.tokenize(""" \tHäLLo!how \n Are yoU? """ ) , ["""HäLLo""", """!""", """how""", """Are""", """yoU""", """?"""] ) def UpperCamelCase ( self ) -> Union[str, Any]: _SCREAMING_SNAKE_CASE = BasicTokenizer(do_lower_case=A__ , strip_accents=A__ ) self.assertListEqual( tokenizer.tokenize(""" \tHäLLo!how \n Are yoU? """ ) , ["""HaLLo""", """!""", """how""", """Are""", """yoU""", """?"""] ) def UpperCamelCase ( self ) -> str: _SCREAMING_SNAKE_CASE = BasicTokenizer(do_lower_case=A__ , never_split=["""[UNK]"""] ) self.assertListEqual( tokenizer.tokenize(""" \tHeLLo!how \n Are yoU? [UNK]""" ) , ["""HeLLo""", """!""", """how""", """Are""", """yoU""", """?""", """[UNK]"""] ) def UpperCamelCase ( self ) -> Tuple: _SCREAMING_SNAKE_CASE = ["""[UNK]""", """[CLS]""", """[SEP]""", """want""", """##want""", """##ed""", """wa""", """un""", """runn""", """##ing"""] _SCREAMING_SNAKE_CASE = {} for i, token in enumerate(A__ ): _SCREAMING_SNAKE_CASE = i _SCREAMING_SNAKE_CASE = WordpieceTokenizer(vocab=A__ , unk_token="""[UNK]""" ) self.assertListEqual(tokenizer.tokenize("""""" ) , [] ) self.assertListEqual(tokenizer.tokenize("""unwanted running""" ) , ["""un""", """##want""", """##ed""", """runn""", """##ing"""] ) self.assertListEqual(tokenizer.tokenize("""unwantedX running""" ) , ["""[UNK]""", """runn""", """##ing"""] ) def UpperCamelCase ( self ) -> str: self.assertTrue(_is_whitespace(""" """ ) ) self.assertTrue(_is_whitespace("""\t""" ) ) self.assertTrue(_is_whitespace("""\r""" ) ) self.assertTrue(_is_whitespace("""\n""" ) ) self.assertTrue(_is_whitespace("""\u00A0""" ) ) self.assertFalse(_is_whitespace("""A""" ) ) self.assertFalse(_is_whitespace("""-""" ) ) def UpperCamelCase ( self ) -> Union[str, Any]: self.assertTrue(_is_control("""\u0005""" ) ) self.assertFalse(_is_control("""A""" ) ) self.assertFalse(_is_control(""" """ ) ) self.assertFalse(_is_control("""\t""" ) ) self.assertFalse(_is_control("""\r""" ) ) def UpperCamelCase ( self ) -> Dict: self.assertTrue(_is_punctuation("""-""" ) ) self.assertTrue(_is_punctuation("""$""" ) ) self.assertTrue(_is_punctuation("""`""" ) ) self.assertTrue(_is_punctuation(""".""" ) ) self.assertFalse(_is_punctuation("""A""" ) ) self.assertFalse(_is_punctuation(""" """ ) ) def UpperCamelCase ( self ) -> str: _SCREAMING_SNAKE_CASE = self.get_tokenizer() _SCREAMING_SNAKE_CASE = self.get_rust_tokenizer() # Example taken from the issue https://github.com/huggingface/tokenizers/issues/340 self.assertListEqual([tokenizer.tokenize(A__ ) for t in ["""Test""", """\xad""", """test"""]] , [["""[UNK]"""], [], ["""[UNK]"""]] ) self.assertListEqual( [rust_tokenizer.tokenize(A__ ) for t in ["""Test""", """\xad""", """test"""]] , [["""[UNK]"""], [], ["""[UNK]"""]] ) @slow def UpperCamelCase ( self ) -> Any: _SCREAMING_SNAKE_CASE = self.tokenizer_class.from_pretrained("""google/mobilebert-uncased""" ) _SCREAMING_SNAKE_CASE = tokenizer.encode("""sequence builders""" , add_special_tokens=A__ ) _SCREAMING_SNAKE_CASE = tokenizer.encode("""multi-sequence build""" , add_special_tokens=A__ ) _SCREAMING_SNAKE_CASE = tokenizer.build_inputs_with_special_tokens(A__ ) _SCREAMING_SNAKE_CASE = tokenizer.build_inputs_with_special_tokens(A__ , A__ ) assert encoded_sentence == [1_01] + text + [1_02] assert encoded_pair == [1_01] + text + [1_02] + text_a + [1_02] def UpperCamelCase ( self ) -> List[str]: for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(F"{tokenizer.__class__.__name__} ({pretrained_name})" ): _SCREAMING_SNAKE_CASE = self.rust_tokenizer_class.from_pretrained(A__ , **A__ ) _SCREAMING_SNAKE_CASE = F"A, naïve {tokenizer_r.mask_token} AllenNLP sentence." _SCREAMING_SNAKE_CASE = tokenizer_r.encode_plus( A__ , return_attention_mask=A__ , return_token_type_ids=A__ , return_offsets_mapping=A__ , add_special_tokens=A__ , ) _SCREAMING_SNAKE_CASE = tokenizer_r.do_lower_case if hasattr(A__ , """do_lower_case""" ) else False _SCREAMING_SNAKE_CASE = ( [ ((0, 0), tokenizer_r.cls_token), ((0, 1), """A"""), ((1, 2), ""","""), ((3, 5), """na"""), ((5, 6), """##ï"""), ((6, 8), """##ve"""), ((9, 15), tokenizer_r.mask_token), ((16, 21), """Allen"""), ((21, 23), """##NL"""), ((23, 24), """##P"""), ((25, 33), """sentence"""), ((33, 34), """."""), ((0, 0), tokenizer_r.sep_token), ] if not do_lower_case else [ ((0, 0), tokenizer_r.cls_token), ((0, 1), """a"""), ((1, 2), ""","""), ((3, 8), """naive"""), ((9, 15), tokenizer_r.mask_token), ((16, 21), """allen"""), ((21, 23), """##nl"""), ((23, 24), """##p"""), ((25, 33), """sentence"""), ((33, 34), """."""), ((0, 0), tokenizer_r.sep_token), ] ) self.assertEqual( [e[1] for e in expected_results] , tokenizer_r.convert_ids_to_tokens(tokens["""input_ids"""] ) ) self.assertEqual([e[0] for e in expected_results] , tokens["""offset_mapping"""] ) def UpperCamelCase ( self ) -> Any: _SCREAMING_SNAKE_CASE = ["""的""", """人""", """有"""] _SCREAMING_SNAKE_CASE = """""".join(A__ ) for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(F"{tokenizer.__class__.__name__} ({pretrained_name})" ): _SCREAMING_SNAKE_CASE = True _SCREAMING_SNAKE_CASE = self.tokenizer_class.from_pretrained(A__ , **A__ ) _SCREAMING_SNAKE_CASE = self.rust_tokenizer_class.from_pretrained(A__ , **A__ ) _SCREAMING_SNAKE_CASE = tokenizer_p.encode(A__ , add_special_tokens=A__ ) _SCREAMING_SNAKE_CASE = tokenizer_r.encode(A__ , add_special_tokens=A__ ) _SCREAMING_SNAKE_CASE = tokenizer_r.convert_ids_to_tokens(A__ ) _SCREAMING_SNAKE_CASE = tokenizer_p.convert_ids_to_tokens(A__ ) # it is expected that each Chinese character is not preceded by "##" self.assertListEqual(A__ , A__ ) self.assertListEqual(A__ , A__ ) _SCREAMING_SNAKE_CASE = False _SCREAMING_SNAKE_CASE = self.rust_tokenizer_class.from_pretrained(A__ , **A__ ) _SCREAMING_SNAKE_CASE = self.tokenizer_class.from_pretrained(A__ , **A__ ) _SCREAMING_SNAKE_CASE = tokenizer_r.encode(A__ , add_special_tokens=A__ ) _SCREAMING_SNAKE_CASE = tokenizer_p.encode(A__ , add_special_tokens=A__ ) _SCREAMING_SNAKE_CASE = tokenizer_r.convert_ids_to_tokens(A__ ) _SCREAMING_SNAKE_CASE = tokenizer_p.convert_ids_to_tokens(A__ ) # it is expected that only the first Chinese character is not preceded by "##". _SCREAMING_SNAKE_CASE = [ F"##{token}" if idx != 0 else token for idx, token in enumerate(A__ ) ] self.assertListEqual(A__ , A__ ) self.assertListEqual(A__ , A__ )
0
0
'''simple docstring''' from typing import List, Optional, Tuple, Union import torch from ...models import UNetaDModel from ...schedulers import KarrasVeScheduler from ...utils import randn_tensor from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput class _a (_lowerCamelCase): """simple docstring""" SCREAMING_SNAKE_CASE = 42 SCREAMING_SNAKE_CASE = 42 def __init__( self , A__ , A__ ) -> List[str]: super().__init__() self.register_modules(unet=A__ , scheduler=A__ ) @torch.no_grad() def __call__( self , A__ = 1 , A__ = 50 , A__ = None , A__ = "pil" , A__ = True , **A__ , ) -> Union[Tuple, ImagePipelineOutput]: _SCREAMING_SNAKE_CASE = self.unet.config.sample_size _SCREAMING_SNAKE_CASE = (batch_size, 3, img_size, img_size) _SCREAMING_SNAKE_CASE = self.unet # sample x_0 ~ N(0, sigma_0^2 * I) _SCREAMING_SNAKE_CASE = randn_tensor(A__ , generator=A__ , device=self.device ) * self.scheduler.init_noise_sigma self.scheduler.set_timesteps(A__ ) for t in self.progress_bar(self.scheduler.timesteps ): # here sigma_t == t_i from the paper _SCREAMING_SNAKE_CASE = self.scheduler.schedule[t] _SCREAMING_SNAKE_CASE = self.scheduler.schedule[t - 1] if t > 0 else 0 # 1. Select temporarily increased noise level sigma_hat # 2. Add new noise to move from sample_i to sample_hat _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = self.scheduler.add_noise_to_input(A__ , A__ , generator=A__ ) # 3. Predict the noise residual given the noise magnitude `sigma_hat` # The model inputs and output are adjusted by following eq. (213) in [1]. _SCREAMING_SNAKE_CASE = (sigma_hat / 2) * model((sample_hat + 1) / 2 , sigma_hat / 2 ).sample # 4. Evaluate dx/dt at sigma_hat # 5. Take Euler step from sigma to sigma_prev _SCREAMING_SNAKE_CASE = self.scheduler.step(A__ , A__ , A__ , A__ ) if sigma_prev != 0: # 6. Apply 2nd order correction # The model inputs and output are adjusted by following eq. (213) in [1]. _SCREAMING_SNAKE_CASE = (sigma_prev / 2) * model((step_output.prev_sample + 1) / 2 , sigma_prev / 2 ).sample _SCREAMING_SNAKE_CASE = self.scheduler.step_correct( A__ , A__ , A__ , A__ , step_output.prev_sample , step_output["""derivative"""] , ) _SCREAMING_SNAKE_CASE = step_output.prev_sample _SCREAMING_SNAKE_CASE = (sample / 2 + 0.5).clamp(0 , 1 ) _SCREAMING_SNAKE_CASE = sample.cpu().permute(0 , 2 , 3 , 1 ).numpy() if output_type == "pil": _SCREAMING_SNAKE_CASE = self.numpy_to_pil(A__ ) if not return_dict: return (image,) return ImagePipelineOutput(images=A__ )
713
'''simple docstring''' import logging import os import quant_trainer import torch from torch.utils.data import DataLoader from transformers import Trainer, is_torch_tpu_available from transformers.trainer_utils import PredictionOutput UpperCamelCase__ : Tuple = logging.getLogger(__name__) if is_torch_tpu_available(check_device=False): import torch_xla.core.xla_model as xm import torch_xla.debug.metrics as met class _a (_lowerCamelCase): """simple docstring""" def __init__( self , *A__ , A__=None , A__=None , A__=None , **A__ ) -> Optional[int]: super().__init__(*A__ , **A__ ) _SCREAMING_SNAKE_CASE = eval_examples _SCREAMING_SNAKE_CASE = post_process_function _SCREAMING_SNAKE_CASE = quant_trainer_args _SCREAMING_SNAKE_CASE = 1_28 # default number of calibration samples def UpperCamelCase ( self , A__=None ) -> Union[str, Any]: if calib_dataset is None and self.calib_dataset is None: raise ValueError("""Trainer: calibration requires an calib_dataset.""" ) _SCREAMING_SNAKE_CASE = calib_dataset if calib_dataset is not None else self.calib_dataset _SCREAMING_SNAKE_CASE = self._remove_unused_columns(A__ , description="""Calibration""" ) return DataLoader( A__ , batch_size=self.args.eval_batch_size , collate_fn=self.data_collator , drop_last=self.args.dataloader_drop_last , num_workers=self.args.dataloader_num_workers , pin_memory=self.args.dataloader_pin_memory , shuffle=A__ , ) def UpperCamelCase ( self , A__=None ) -> str: _SCREAMING_SNAKE_CASE = self.train_dataset if calib_dataset is None else calib_dataset _SCREAMING_SNAKE_CASE = self.get_calib_dataloader(A__ ) _SCREAMING_SNAKE_CASE = self.model quant_trainer.configure_model(A__ , self.quant_trainer_args , calib=A__ ) model.eval() quant_trainer.enable_calibration(A__ ) logger.info("""***** Running calibration *****""" ) logger.info(F" Num examples = {self.calib_num}" ) logger.info(F" Batch size = {calib_dataloader.batch_size}" ) for step, inputs in enumerate(A__ ): # Prediction step _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = self.prediction_step(A__ , A__ , prediction_loss_only=A__ ) if (step + 1) * calib_dataloader.batch_size >= self.calib_num: break quant_trainer.finish_calibration(A__ , self.quant_trainer_args ) _SCREAMING_SNAKE_CASE = model def UpperCamelCase ( self , A__=None , A__=None , A__=None , A__ = "eval" ) -> List[Any]: _SCREAMING_SNAKE_CASE = self.eval_dataset if eval_dataset is None else eval_dataset _SCREAMING_SNAKE_CASE = self.get_eval_dataloader(A__ ) _SCREAMING_SNAKE_CASE = self.eval_examples if eval_examples is None else eval_examples # Temporarily disable metric computation, we will do it in the loop here. _SCREAMING_SNAKE_CASE = self.compute_metrics _SCREAMING_SNAKE_CASE = None _SCREAMING_SNAKE_CASE = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop try: _SCREAMING_SNAKE_CASE = eval_loop( A__ , description="""Evaluation""" , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=A__ , ) finally: _SCREAMING_SNAKE_CASE = compute_metrics if self.post_process_function is not None and self.compute_metrics is not None: _SCREAMING_SNAKE_CASE = self.post_process_function(A__ , A__ , output.predictions ) _SCREAMING_SNAKE_CASE = self.compute_metrics(A__ ) # Prefix all keys with metric_key_prefix + '_' for key in list(metrics.keys() ): if not key.startswith(F"{metric_key_prefix}_" ): _SCREAMING_SNAKE_CASE = metrics.pop(A__ ) self.log(A__ ) else: _SCREAMING_SNAKE_CASE = {} if self.args.tpu_metrics_debug or self.args.debug: # tpu-comment: Logging debug metrics for PyTorch/XLA (compile, execute times, ops, etc.) xm.master_print(met.metrics_report() ) _SCREAMING_SNAKE_CASE = self.callback_handler.on_evaluate(self.args , self.state , self.control , A__ ) return metrics def UpperCamelCase ( self , A__ , A__ , A__=None , A__ = "test" ) -> List[str]: _SCREAMING_SNAKE_CASE = self.get_test_dataloader(A__ ) # Temporarily disable metric computation, we will do it in the loop here. _SCREAMING_SNAKE_CASE = self.compute_metrics _SCREAMING_SNAKE_CASE = None _SCREAMING_SNAKE_CASE = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop try: _SCREAMING_SNAKE_CASE = eval_loop( A__ , description="""Prediction""" , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=A__ , ) finally: _SCREAMING_SNAKE_CASE = compute_metrics if self.post_process_function is None or self.compute_metrics is None: return output _SCREAMING_SNAKE_CASE = self.post_process_function(A__ , A__ , output.predictions , """predict""" ) _SCREAMING_SNAKE_CASE = self.compute_metrics(A__ ) # Prefix all keys with metric_key_prefix + '_' for key in list(metrics.keys() ): if not key.startswith(F"{metric_key_prefix}_" ): _SCREAMING_SNAKE_CASE = metrics.pop(A__ ) return PredictionOutput(predictions=predictions.predictions , label_ids=predictions.label_ids , metrics=A__ ) def UpperCamelCase ( self , A__="./" ) -> Tuple: _SCREAMING_SNAKE_CASE = self.eval_dataset _SCREAMING_SNAKE_CASE = self.get_eval_dataloader(A__ ) _SCREAMING_SNAKE_CASE = next(iter(A__ ) ) # saving device - to make it consistent _SCREAMING_SNAKE_CASE = torch.device("""cuda""" if torch.cuda.is_available() else """cpu""" ) # convert to tuple _SCREAMING_SNAKE_CASE = tuple(v.to(A__ ) for k, v in batch.items() ) logger.info("""Converting model to be onnx compatible""" ) from pytorch_quantization.nn import TensorQuantizer _SCREAMING_SNAKE_CASE = True _SCREAMING_SNAKE_CASE = self.model.to(A__ ) model.eval() model.float() _SCREAMING_SNAKE_CASE = model.module if hasattr(A__ , """module""" ) else model quant_trainer.configure_model(A__ , self.quant_trainer_args ) _SCREAMING_SNAKE_CASE = os.path.join(A__ , """model.onnx""" ) logger.info(F"exporting model to {output_model_file}" ) _SCREAMING_SNAKE_CASE = {0: """batch_size""", 1: """seq_len"""} torch.onnx.export( A__ , A__ , A__ , export_params=A__ , opset_version=13 , do_constant_folding=A__ , input_names=["""input_ids""", """attention_mask""", """token_type_ids"""] , output_names=["""output_start_logits""", """output_end_logits"""] , dynamic_axes={ """input_ids""": axes, """attention_mask""": axes, """token_type_ids""": axes, """output_start_logits""": axes, """output_end_logits""": axes, } , verbose=A__ , ) logger.info("""onnx export finished""" )
0
0
from collections.abc import Sequence def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE_ = None ) -> int: """simple docstring""" if nums is None or not nums: raise ValueError("""Input sequence should not be empty""" ) _SCREAMING_SNAKE_CASE = nums[0] for i in range(1 , len(SCREAMING_SNAKE_CASE_ ) ): _SCREAMING_SNAKE_CASE = nums[i] _SCREAMING_SNAKE_CASE = max(SCREAMING_SNAKE_CASE_ , ans + num , SCREAMING_SNAKE_CASE_ ) return ans if __name__ == "__main__": import doctest doctest.testmod() # Try on a sample input from the user UpperCamelCase__ : List[str] = int(input("Enter number of elements : ").strip()) UpperCamelCase__ : Tuple = list(map(int, input("\nEnter the numbers : ").strip().split()))[:n] print(max_subsequence_sum(array))
714
'''simple docstring''' def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE_ ) -> str: """simple docstring""" return "".join([hex(SCREAMING_SNAKE_CASE_ )[2:].zfill(2 ).upper() for byte in list(SCREAMING_SNAKE_CASE_ )] ) def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE_ ) -> bytes: """simple docstring""" # Check data validity, following RFC3548 # https://www.ietf.org/rfc/rfc3548.txt if (len(SCREAMING_SNAKE_CASE_ ) % 2) != 0: raise ValueError( """Base16 encoded data is invalid: Data does not have an even number of hex digits.""" ) # Check the character set - the standard base16 alphabet # is uppercase according to RFC3548 section 6 if not set(SCREAMING_SNAKE_CASE_ ) <= set("""0123456789ABCDEF""" ): raise ValueError( """Base16 encoded data is invalid: Data is not uppercase hex or it contains invalid characters.""" ) # For every two hexadecimal digits (= a byte), turn it into an integer. # Then, string the result together into bytes, and return it. return bytes(int(data[i] + data[i + 1] , 16 ) for i in range(0 , len(SCREAMING_SNAKE_CASE_ ) , 2 ) ) if __name__ == "__main__": import doctest doctest.testmod()
0
0
'''simple docstring''' from ...configuration_utils import PretrainedConfig from ...utils import logging UpperCamelCase__ : Optional[Any] = logging.get_logger(__name__) UpperCamelCase__ : List[Any] = { "microsoft/markuplm-base": "https://huggingface.co/microsoft/markuplm-base/resolve/main/config.json", "microsoft/markuplm-large": "https://huggingface.co/microsoft/markuplm-large/resolve/main/config.json", } class _a (_lowerCamelCase): """simple docstring""" SCREAMING_SNAKE_CASE = 'markuplm' def __init__( self , A__=3_05_22 , A__=7_68 , A__=12 , A__=12 , A__=30_72 , A__="gelu" , A__=0.1 , A__=0.1 , A__=5_12 , A__=2 , A__=0.02 , A__=1E-12 , A__=0 , A__=0 , A__=2 , A__=2_56 , A__=10_24 , A__=2_16 , A__=10_01 , A__=32 , A__=50 , A__="absolute" , A__=True , A__=None , **A__ , ) -> int: super().__init__( pad_token_id=A__ , bos_token_id=A__ , eos_token_id=A__ , **A__ , ) _SCREAMING_SNAKE_CASE = vocab_size _SCREAMING_SNAKE_CASE = hidden_size _SCREAMING_SNAKE_CASE = num_hidden_layers _SCREAMING_SNAKE_CASE = num_attention_heads _SCREAMING_SNAKE_CASE = hidden_act _SCREAMING_SNAKE_CASE = intermediate_size _SCREAMING_SNAKE_CASE = hidden_dropout_prob _SCREAMING_SNAKE_CASE = attention_probs_dropout_prob _SCREAMING_SNAKE_CASE = max_position_embeddings _SCREAMING_SNAKE_CASE = type_vocab_size _SCREAMING_SNAKE_CASE = initializer_range _SCREAMING_SNAKE_CASE = layer_norm_eps _SCREAMING_SNAKE_CASE = position_embedding_type _SCREAMING_SNAKE_CASE = use_cache _SCREAMING_SNAKE_CASE = classifier_dropout # additional properties _SCREAMING_SNAKE_CASE = max_depth _SCREAMING_SNAKE_CASE = max_xpath_tag_unit_embeddings _SCREAMING_SNAKE_CASE = max_xpath_subs_unit_embeddings _SCREAMING_SNAKE_CASE = tag_pad_id _SCREAMING_SNAKE_CASE = subs_pad_id _SCREAMING_SNAKE_CASE = xpath_unit_hidden_size
715
'''simple docstring''' import pytest import requests from datasets.utils.file_utils import http_head from .utils import OfflineSimulationMode, RequestWouldHangIndefinitelyError, offline @pytest.mark.integration def lowerCAmelCase_ ( ) -> List[Any]: """simple docstring""" with offline(OfflineSimulationMode.CONNECTION_TIMES_OUT ): with pytest.raises(SCREAMING_SNAKE_CASE_ ): requests.request("""GET""" , """https://huggingface.co""" ) with pytest.raises(requests.exceptions.ConnectTimeout ): requests.request("""GET""" , """https://huggingface.co""" , timeout=1.0 ) @pytest.mark.integration def lowerCAmelCase_ ( ) -> int: """simple docstring""" with offline(OfflineSimulationMode.CONNECTION_FAILS ): with pytest.raises(requests.exceptions.ConnectionError ): requests.request("""GET""" , """https://huggingface.co""" ) def lowerCAmelCase_ ( ) -> Optional[Any]: """simple docstring""" with offline(OfflineSimulationMode.HF_DATASETS_OFFLINE_SET_TO_1 ): with pytest.raises(SCREAMING_SNAKE_CASE_ ): http_head("""https://huggingface.co""" )
0
0
from typing import Dict import numpy as np import torch from . import residue_constants as rc from .tensor_utils import tensor_tree_map, tree_map def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE_ ) -> Dict[str, torch.Tensor]: """simple docstring""" _SCREAMING_SNAKE_CASE = [] _SCREAMING_SNAKE_CASE = [] _SCREAMING_SNAKE_CASE = [] for rt in rc.restypes: _SCREAMING_SNAKE_CASE = rc.restype_name_to_atomaa_names[rc.restype_atoa[rt]] restype_atomaa_to_atomaa_list.append([(rc.atom_order[name] if name else 0) for name in atom_names] ) _SCREAMING_SNAKE_CASE = {name: i for i, name in enumerate(SCREAMING_SNAKE_CASE_ )} restype_atomaa_to_atomaa_list.append( [(atom_name_to_idxaa[name] if name in atom_name_to_idxaa else 0) for name in rc.atom_types] ) restype_atomaa_mask_list.append([(1.0 if name else 0.0) for name in atom_names] ) # Add dummy mapping for restype 'UNK' restype_atomaa_to_atomaa_list.append([0] * 14 ) restype_atomaa_to_atomaa_list.append([0] * 37 ) restype_atomaa_mask_list.append([0.0] * 14 ) _SCREAMING_SNAKE_CASE = torch.tensor( SCREAMING_SNAKE_CASE_ , dtype=torch.intaa , device=protein["""aatype"""].device , ) _SCREAMING_SNAKE_CASE = torch.tensor( SCREAMING_SNAKE_CASE_ , dtype=torch.intaa , device=protein["""aatype"""].device , ) _SCREAMING_SNAKE_CASE = torch.tensor( SCREAMING_SNAKE_CASE_ , dtype=torch.floataa , device=protein["""aatype"""].device , ) _SCREAMING_SNAKE_CASE = protein["""aatype"""].to(torch.long ) # create the mapping for (residx, atom14) --> atom37, i.e. an array # with shape (num_res, 14) containing the atom37 indices for this protein _SCREAMING_SNAKE_CASE = restype_atomaa_to_atomaa[protein_aatype] _SCREAMING_SNAKE_CASE = restype_atomaa_mask[protein_aatype] _SCREAMING_SNAKE_CASE = residx_atomaa_mask _SCREAMING_SNAKE_CASE = residx_atomaa_to_atomaa.long() # create the gather indices for mapping back _SCREAMING_SNAKE_CASE = restype_atomaa_to_atomaa[protein_aatype] _SCREAMING_SNAKE_CASE = residx_atomaa_to_atomaa.long() # create the corresponding mask _SCREAMING_SNAKE_CASE = torch.zeros([21, 37] , dtype=torch.floataa , device=protein["""aatype"""].device ) for restype, restype_letter in enumerate(rc.restypes ): _SCREAMING_SNAKE_CASE = rc.restype_atoa[restype_letter] _SCREAMING_SNAKE_CASE = rc.residue_atoms[restype_name] for atom_name in atom_names: _SCREAMING_SNAKE_CASE = rc.atom_order[atom_name] _SCREAMING_SNAKE_CASE = 1 _SCREAMING_SNAKE_CASE = restype_atomaa_mask[protein_aatype] _SCREAMING_SNAKE_CASE = residx_atomaa_mask return protein def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE_ ) -> Dict[str, np.ndarray]: """simple docstring""" _SCREAMING_SNAKE_CASE = tree_map(lambda SCREAMING_SNAKE_CASE_ : torch.tensor(SCREAMING_SNAKE_CASE_ , device=batch["""aatype"""].device ) , SCREAMING_SNAKE_CASE_ , np.ndarray ) _SCREAMING_SNAKE_CASE = tensor_tree_map(lambda SCREAMING_SNAKE_CASE_ : np.array(SCREAMING_SNAKE_CASE_ ) , make_atomaa_masks(SCREAMING_SNAKE_CASE_ ) ) return out
716
'''simple docstring''' import math from collections.abc import Iterator from itertools import takewhile def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE_ ) -> bool: """simple docstring""" if 1 < number < 4: # 2 and 3 are primes return True elif number < 2 or number % 2 == 0 or number % 3 == 0: # Negatives, 0, 1, all even numbers, all multiples of 3 are not primes return False # All primes number are in format of 6k +/- 1 for i in range(5 , int(math.sqrt(SCREAMING_SNAKE_CASE_ ) + 1 ) , 6 ): if number % i == 0 or number % (i + 2) == 0: return False return True def lowerCAmelCase_ ( ) -> Iterator[int]: """simple docstring""" _SCREAMING_SNAKE_CASE = 2 while True: if is_prime(SCREAMING_SNAKE_CASE_ ): yield num num += 1 def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE_ = 2_00_00_00 ) -> int: """simple docstring""" return sum(takewhile(lambda SCREAMING_SNAKE_CASE_ : x < n , prime_generator() ) ) if __name__ == "__main__": print(f"""{solution() = }""")
0
0
'''simple docstring''' import logging import os from typing import List, TextIO, Union from conllu import parse_incr from utils_ner import InputExample, Split, TokenClassificationTask UpperCamelCase__ : Any = logging.getLogger(__name__) class _a (_lowerCamelCase): """simple docstring""" def __init__( self , A__=-1 ) -> str: # in NER datasets, the last column is usually reserved for NER label _SCREAMING_SNAKE_CASE = label_idx def UpperCamelCase ( self , A__ , A__ ) -> List[InputExample]: if isinstance(A__ , A__ ): _SCREAMING_SNAKE_CASE = mode.value _SCREAMING_SNAKE_CASE = os.path.join(A__ , F"{mode}.txt" ) _SCREAMING_SNAKE_CASE = 1 _SCREAMING_SNAKE_CASE = [] with open(A__ , encoding="""utf-8""" ) as f: _SCREAMING_SNAKE_CASE = [] _SCREAMING_SNAKE_CASE = [] for line in f: if line.startswith("""-DOCSTART-""" ) or line == "" or line == "\n": if words: examples.append(InputExample(guid=F"{mode}-{guid_index}" , words=A__ , labels=A__ ) ) guid_index += 1 _SCREAMING_SNAKE_CASE = [] _SCREAMING_SNAKE_CASE = [] else: _SCREAMING_SNAKE_CASE = line.split(""" """ ) words.append(splits[0] ) if len(A__ ) > 1: labels.append(splits[self.label_idx].replace("""\n""" , """""" ) ) else: # Examples could have no label for mode = "test" labels.append("""O""" ) if words: examples.append(InputExample(guid=F"{mode}-{guid_index}" , words=A__ , labels=A__ ) ) return examples def UpperCamelCase ( self , A__ , A__ , A__ ) -> Optional[Any]: _SCREAMING_SNAKE_CASE = 0 for line in test_input_reader: if line.startswith("""-DOCSTART-""" ) or line == "" or line == "\n": writer.write(A__ ) if not preds_list[example_id]: example_id += 1 elif preds_list[example_id]: _SCREAMING_SNAKE_CASE = line.split()[0] + """ """ + preds_list[example_id].pop(0 ) + """\n""" writer.write(A__ ) else: logger.warning("""Maximum sequence length exceeded: No prediction for '%s'.""" , line.split()[0] ) def UpperCamelCase ( self , A__ ) -> List[str]: if path: with open(A__ , """r""" ) as f: _SCREAMING_SNAKE_CASE = f.read().splitlines() if "O" not in labels: _SCREAMING_SNAKE_CASE = ["""O"""] + labels return labels else: return ["O", "B-MISC", "I-MISC", "B-PER", "I-PER", "B-ORG", "I-ORG", "B-LOC", "I-LOC"] class _a (_lowerCamelCase): """simple docstring""" def __init__( self ) -> List[str]: # in CONLL2003 dataset chunk column is second-to-last super().__init__(label_idx=-2 ) def UpperCamelCase ( self , A__ ) -> List[str]: if path: with open(A__ , """r""" ) as f: _SCREAMING_SNAKE_CASE = f.read().splitlines() if "O" not in labels: _SCREAMING_SNAKE_CASE = ["""O"""] + labels return labels else: return [ "O", "B-ADVP", "B-INTJ", "B-LST", "B-PRT", "B-NP", "B-SBAR", "B-VP", "B-ADJP", "B-CONJP", "B-PP", "I-ADVP", "I-INTJ", "I-LST", "I-PRT", "I-NP", "I-SBAR", "I-VP", "I-ADJP", "I-CONJP", "I-PP", ] class _a (_lowerCamelCase): """simple docstring""" def UpperCamelCase ( self , A__ , A__ ) -> List[InputExample]: if isinstance(A__ , A__ ): _SCREAMING_SNAKE_CASE = mode.value _SCREAMING_SNAKE_CASE = os.path.join(A__ , F"{mode}.txt" ) _SCREAMING_SNAKE_CASE = 1 _SCREAMING_SNAKE_CASE = [] with open(A__ , encoding="""utf-8""" ) as f: for sentence in parse_incr(A__ ): _SCREAMING_SNAKE_CASE = [] _SCREAMING_SNAKE_CASE = [] for token in sentence: words.append(token["""form"""] ) labels.append(token["""upos"""] ) assert len(A__ ) == len(A__ ) if words: examples.append(InputExample(guid=F"{mode}-{guid_index}" , words=A__ , labels=A__ ) ) guid_index += 1 return examples def UpperCamelCase ( self , A__ , A__ , A__ ) -> Dict: _SCREAMING_SNAKE_CASE = 0 for sentence in parse_incr(A__ ): _SCREAMING_SNAKE_CASE = preds_list[example_id] _SCREAMING_SNAKE_CASE = """""" for token in sentence: out += F"{token['form']} ({token['upos']}|{s_p.pop(0 )}) " out += "\n" writer.write(A__ ) example_id += 1 def UpperCamelCase ( self , A__ ) -> List[str]: if path: with open(A__ , """r""" ) as f: return f.read().splitlines() else: return [ "ADJ", "ADP", "ADV", "AUX", "CCONJ", "DET", "INTJ", "NOUN", "NUM", "PART", "PRON", "PROPN", "PUNCT", "SCONJ", "SYM", "VERB", "X", ]
717
'''simple docstring''' import unittest import numpy as np from transformers.testing_utils import require_flax, require_tf, require_torch from transformers.utils import ( expand_dims, flatten_dict, is_flax_available, is_tf_available, is_torch_available, reshape, squeeze, transpose, ) if is_flax_available(): import jax.numpy as jnp if is_tf_available(): import tensorflow as tf if is_torch_available(): import torch class _a (unittest.TestCase): """simple docstring""" def UpperCamelCase ( self ) -> Optional[Any]: _SCREAMING_SNAKE_CASE = { """task_specific_params""": { """summarization""": {"""length_penalty""": 1.0, """max_length""": 1_28, """min_length""": 12, """num_beams""": 4}, """summarization_cnn""": {"""length_penalty""": 2.0, """max_length""": 1_42, """min_length""": 56, """num_beams""": 4}, """summarization_xsum""": {"""length_penalty""": 1.0, """max_length""": 62, """min_length""": 11, """num_beams""": 6}, } } _SCREAMING_SNAKE_CASE = { """task_specific_params.summarization.length_penalty""": 1.0, """task_specific_params.summarization.max_length""": 1_28, """task_specific_params.summarization.min_length""": 12, """task_specific_params.summarization.num_beams""": 4, """task_specific_params.summarization_cnn.length_penalty""": 2.0, """task_specific_params.summarization_cnn.max_length""": 1_42, """task_specific_params.summarization_cnn.min_length""": 56, """task_specific_params.summarization_cnn.num_beams""": 4, """task_specific_params.summarization_xsum.length_penalty""": 1.0, """task_specific_params.summarization_xsum.max_length""": 62, """task_specific_params.summarization_xsum.min_length""": 11, """task_specific_params.summarization_xsum.num_beams""": 6, } self.assertEqual(flatten_dict(A__ ) , A__ ) def UpperCamelCase ( self ) -> int: _SCREAMING_SNAKE_CASE = np.random.randn(3 , 4 ) self.assertTrue(np.allclose(transpose(A__ ) , x.transpose() ) ) _SCREAMING_SNAKE_CASE = np.random.randn(3 , 4 , 5 ) self.assertTrue(np.allclose(transpose(A__ , axes=(1, 2, 0) ) , x.transpose((1, 2, 0) ) ) ) @require_torch def UpperCamelCase ( self ) -> Optional[Any]: _SCREAMING_SNAKE_CASE = np.random.randn(3 , 4 ) _SCREAMING_SNAKE_CASE = torch.tensor(A__ ) self.assertTrue(np.allclose(transpose(A__ ) , transpose(A__ ).numpy() ) ) _SCREAMING_SNAKE_CASE = np.random.randn(3 , 4 , 5 ) _SCREAMING_SNAKE_CASE = torch.tensor(A__ ) self.assertTrue(np.allclose(transpose(A__ , axes=(1, 2, 0) ) , transpose(A__ , axes=(1, 2, 0) ).numpy() ) ) @require_tf def UpperCamelCase ( self ) -> Optional[int]: _SCREAMING_SNAKE_CASE = np.random.randn(3 , 4 ) _SCREAMING_SNAKE_CASE = tf.constant(A__ ) self.assertTrue(np.allclose(transpose(A__ ) , transpose(A__ ).numpy() ) ) _SCREAMING_SNAKE_CASE = np.random.randn(3 , 4 , 5 ) _SCREAMING_SNAKE_CASE = tf.constant(A__ ) self.assertTrue(np.allclose(transpose(A__ , axes=(1, 2, 0) ) , transpose(A__ , axes=(1, 2, 0) ).numpy() ) ) @require_flax def UpperCamelCase ( self ) -> List[str]: _SCREAMING_SNAKE_CASE = np.random.randn(3 , 4 ) _SCREAMING_SNAKE_CASE = jnp.array(A__ ) self.assertTrue(np.allclose(transpose(A__ ) , np.asarray(transpose(A__ ) ) ) ) _SCREAMING_SNAKE_CASE = np.random.randn(3 , 4 , 5 ) _SCREAMING_SNAKE_CASE = jnp.array(A__ ) self.assertTrue(np.allclose(transpose(A__ , axes=(1, 2, 0) ) , np.asarray(transpose(A__ , axes=(1, 2, 0) ) ) ) ) def UpperCamelCase ( self ) -> Optional[int]: _SCREAMING_SNAKE_CASE = np.random.randn(3 , 4 ) self.assertTrue(np.allclose(reshape(A__ , (4, 3) ) , np.reshape(A__ , (4, 3) ) ) ) _SCREAMING_SNAKE_CASE = np.random.randn(3 , 4 , 5 ) self.assertTrue(np.allclose(reshape(A__ , (12, 5) ) , np.reshape(A__ , (12, 5) ) ) ) @require_torch def UpperCamelCase ( self ) -> Optional[int]: _SCREAMING_SNAKE_CASE = np.random.randn(3 , 4 ) _SCREAMING_SNAKE_CASE = torch.tensor(A__ ) self.assertTrue(np.allclose(reshape(A__ , (4, 3) ) , reshape(A__ , (4, 3) ).numpy() ) ) _SCREAMING_SNAKE_CASE = np.random.randn(3 , 4 , 5 ) _SCREAMING_SNAKE_CASE = torch.tensor(A__ ) self.assertTrue(np.allclose(reshape(A__ , (12, 5) ) , reshape(A__ , (12, 5) ).numpy() ) ) @require_tf def UpperCamelCase ( self ) -> Tuple: _SCREAMING_SNAKE_CASE = np.random.randn(3 , 4 ) _SCREAMING_SNAKE_CASE = tf.constant(A__ ) self.assertTrue(np.allclose(reshape(A__ , (4, 3) ) , reshape(A__ , (4, 3) ).numpy() ) ) _SCREAMING_SNAKE_CASE = np.random.randn(3 , 4 , 5 ) _SCREAMING_SNAKE_CASE = tf.constant(A__ ) self.assertTrue(np.allclose(reshape(A__ , (12, 5) ) , reshape(A__ , (12, 5) ).numpy() ) ) @require_flax def UpperCamelCase ( self ) -> List[Any]: _SCREAMING_SNAKE_CASE = np.random.randn(3 , 4 ) _SCREAMING_SNAKE_CASE = jnp.array(A__ ) self.assertTrue(np.allclose(reshape(A__ , (4, 3) ) , np.asarray(reshape(A__ , (4, 3) ) ) ) ) _SCREAMING_SNAKE_CASE = np.random.randn(3 , 4 , 5 ) _SCREAMING_SNAKE_CASE = jnp.array(A__ ) self.assertTrue(np.allclose(reshape(A__ , (12, 5) ) , np.asarray(reshape(A__ , (12, 5) ) ) ) ) def UpperCamelCase ( self ) -> Any: _SCREAMING_SNAKE_CASE = np.random.randn(1 , 3 , 4 ) self.assertTrue(np.allclose(squeeze(A__ ) , np.squeeze(A__ ) ) ) _SCREAMING_SNAKE_CASE = np.random.randn(1 , 4 , 1 , 5 ) self.assertTrue(np.allclose(squeeze(A__ , axis=2 ) , np.squeeze(A__ , axis=2 ) ) ) @require_torch def UpperCamelCase ( self ) -> Any: _SCREAMING_SNAKE_CASE = np.random.randn(1 , 3 , 4 ) _SCREAMING_SNAKE_CASE = torch.tensor(A__ ) self.assertTrue(np.allclose(squeeze(A__ ) , squeeze(A__ ).numpy() ) ) _SCREAMING_SNAKE_CASE = np.random.randn(1 , 4 , 1 , 5 ) _SCREAMING_SNAKE_CASE = torch.tensor(A__ ) self.assertTrue(np.allclose(squeeze(A__ , axis=2 ) , squeeze(A__ , axis=2 ).numpy() ) ) @require_tf def UpperCamelCase ( self ) -> List[str]: _SCREAMING_SNAKE_CASE = np.random.randn(1 , 3 , 4 ) _SCREAMING_SNAKE_CASE = tf.constant(A__ ) self.assertTrue(np.allclose(squeeze(A__ ) , squeeze(A__ ).numpy() ) ) _SCREAMING_SNAKE_CASE = np.random.randn(1 , 4 , 1 , 5 ) _SCREAMING_SNAKE_CASE = tf.constant(A__ ) self.assertTrue(np.allclose(squeeze(A__ , axis=2 ) , squeeze(A__ , axis=2 ).numpy() ) ) @require_flax def UpperCamelCase ( self ) -> Optional[int]: _SCREAMING_SNAKE_CASE = np.random.randn(1 , 3 , 4 ) _SCREAMING_SNAKE_CASE = jnp.array(A__ ) self.assertTrue(np.allclose(squeeze(A__ ) , np.asarray(squeeze(A__ ) ) ) ) _SCREAMING_SNAKE_CASE = np.random.randn(1 , 4 , 1 , 5 ) _SCREAMING_SNAKE_CASE = jnp.array(A__ ) self.assertTrue(np.allclose(squeeze(A__ , axis=2 ) , np.asarray(squeeze(A__ , axis=2 ) ) ) ) def UpperCamelCase ( self ) -> Optional[Any]: _SCREAMING_SNAKE_CASE = np.random.randn(3 , 4 ) self.assertTrue(np.allclose(expand_dims(A__ , axis=1 ) , np.expand_dims(A__ , axis=1 ) ) ) @require_torch def UpperCamelCase ( self ) -> Optional[int]: _SCREAMING_SNAKE_CASE = np.random.randn(3 , 4 ) _SCREAMING_SNAKE_CASE = torch.tensor(A__ ) self.assertTrue(np.allclose(expand_dims(A__ , axis=1 ) , expand_dims(A__ , axis=1 ).numpy() ) ) @require_tf def UpperCamelCase ( self ) -> str: _SCREAMING_SNAKE_CASE = np.random.randn(3 , 4 ) _SCREAMING_SNAKE_CASE = tf.constant(A__ ) self.assertTrue(np.allclose(expand_dims(A__ , axis=1 ) , expand_dims(A__ , axis=1 ).numpy() ) ) @require_flax def UpperCamelCase ( self ) -> Any: _SCREAMING_SNAKE_CASE = np.random.randn(3 , 4 ) _SCREAMING_SNAKE_CASE = jnp.array(A__ ) self.assertTrue(np.allclose(expand_dims(A__ , axis=1 ) , np.asarray(expand_dims(A__ , axis=1 ) ) ) )
0
0
'''simple docstring''' def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE_ ) -> list[list]: """simple docstring""" _SCREAMING_SNAKE_CASE = current_set.copy() for row_index, row in enumerate(SCREAMING_SNAKE_CASE_ ): _SCREAMING_SNAKE_CASE = row[0] for column_index, column in enumerate(SCREAMING_SNAKE_CASE_ ): if magnitude == 0: _SCREAMING_SNAKE_CASE = column continue _SCREAMING_SNAKE_CASE = column / magnitude # Subtract to cancel term _SCREAMING_SNAKE_CASE = current_set[0] _SCREAMING_SNAKE_CASE = [first_row] _SCREAMING_SNAKE_CASE = current_set[1::] for row in current_set: _SCREAMING_SNAKE_CASE = [] # If first term is 0, it is already in form we want, so we preserve it if row[0] == 0: final_set.append(SCREAMING_SNAKE_CASE_ ) continue for column_index in range(len(SCREAMING_SNAKE_CASE_ ) ): temp_row.append(first_row[column_index] - row[column_index] ) final_set.append(SCREAMING_SNAKE_CASE_ ) # Create next recursion iteration set if len(final_set[0] ) != 3: _SCREAMING_SNAKE_CASE = final_set[0] _SCREAMING_SNAKE_CASE = [] _SCREAMING_SNAKE_CASE = [] for row in final_set[1::]: current_first_column.append(row[0] ) next_iteration.append(row[1::] ) _SCREAMING_SNAKE_CASE = simplify(SCREAMING_SNAKE_CASE_ ) for i in range(len(SCREAMING_SNAKE_CASE_ ) ): resultant[i].insert(0 , current_first_column[i] ) resultant.insert(0 , SCREAMING_SNAKE_CASE_ ) _SCREAMING_SNAKE_CASE = resultant return final_set def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE_ ) -> list: """simple docstring""" if len(SCREAMING_SNAKE_CASE_ ) == 0: raise IndexError("""solve_simultaneous() requires n lists of length n+1""" ) _SCREAMING_SNAKE_CASE = len(SCREAMING_SNAKE_CASE_ ) + 1 if any(len(SCREAMING_SNAKE_CASE_ ) != _length for item in equations ): raise IndexError("""solve_simultaneous() requires n lists of length n+1""" ) for row in equations: if any(not isinstance(SCREAMING_SNAKE_CASE_ , (int, float) ) for column in row ): raise ValueError("""solve_simultaneous() requires lists of integers""" ) if len(SCREAMING_SNAKE_CASE_ ) == 1: return [equations[0][-1] / equations[0][0]] _SCREAMING_SNAKE_CASE = equations.copy() if any(0 in row for row in data_set ): _SCREAMING_SNAKE_CASE = data_set.copy() _SCREAMING_SNAKE_CASE = [] for row_index, row in enumerate(SCREAMING_SNAKE_CASE_ ): if 0 not in row: _SCREAMING_SNAKE_CASE = data_set.pop(SCREAMING_SNAKE_CASE_ ) break if not full_row: raise ValueError("""solve_simultaneous() requires at least 1 full equation""" ) data_set.insert(0 , SCREAMING_SNAKE_CASE_ ) _SCREAMING_SNAKE_CASE = data_set.copy() _SCREAMING_SNAKE_CASE = simplify(SCREAMING_SNAKE_CASE_ ) _SCREAMING_SNAKE_CASE = simplified[::-1] _SCREAMING_SNAKE_CASE = [] for row in simplified: _SCREAMING_SNAKE_CASE = row[-1] if not solutions: if row[-2] == 0: solutions.append(0 ) continue solutions.append(current_solution / row[-2] ) continue _SCREAMING_SNAKE_CASE = row.copy()[: len(SCREAMING_SNAKE_CASE_ ) - 1 :] while temp_row[0] == 0: temp_row.pop(0 ) if len(SCREAMING_SNAKE_CASE_ ) == 0: solutions.append(0 ) continue _SCREAMING_SNAKE_CASE = temp_row[1::] _SCREAMING_SNAKE_CASE = temp_row[::-1] for column_index, column in enumerate(SCREAMING_SNAKE_CASE_ ): current_solution -= column * solutions[column_index] solutions.append(SCREAMING_SNAKE_CASE_ ) _SCREAMING_SNAKE_CASE = [] for item in solutions: final.append(float(round(SCREAMING_SNAKE_CASE_ , 5 ) ) ) return final[::-1] if __name__ == "__main__": import doctest doctest.testmod() UpperCamelCase__ : Union[str, Any] = [ [2, 1, 1, 1, 1, 4], [1, 2, 1, 1, 1, 5], [1, 1, 2, 1, 1, 6], [1, 1, 1, 2, 1, 7], [1, 1, 1, 1, 2, 8], ] print(solve_simultaneous(eq)) print(solve_simultaneous([[4, 2]]))
718
'''simple docstring''' from pathlib import PurePosixPath from typing import Optional import fsspec from fsspec import AbstractFileSystem from huggingface_hub.hf_api import DatasetInfo from ..utils.file_utils import get_authentication_headers_for_url from ..utils.hub import hf_hub_url class _a (_lowerCamelCase): """simple docstring""" SCREAMING_SNAKE_CASE = '' SCREAMING_SNAKE_CASE = 'hf-legacy' # "hf://"" is reserved for hffs def __init__( self , A__ = None , A__ = None , **A__ , ) -> Optional[int]: super().__init__(self , **A__ ) _SCREAMING_SNAKE_CASE = repo_info _SCREAMING_SNAKE_CASE = token _SCREAMING_SNAKE_CASE = None def UpperCamelCase ( self ) -> Tuple: if self.dir_cache is None: _SCREAMING_SNAKE_CASE = {} for hf_file in self.repo_info.siblings: # TODO(QL): add sizes _SCREAMING_SNAKE_CASE = { """name""": hf_file.rfilename, """size""": None, """type""": """file""", } self.dir_cache.update( { str(A__ ): {"""name""": str(A__ ), """size""": None, """type""": """directory"""} for d in list(PurePosixPath(hf_file.rfilename ).parents )[:-1] } ) def UpperCamelCase ( self , A__ , A__ = "rb" , **A__ , ) -> Optional[int]: if not isinstance(self.repo_info , A__ ): raise NotImplementedError(F"Open is only implemented for dataset repositories, but got {self.repo_info}" ) _SCREAMING_SNAKE_CASE = hf_hub_url(self.repo_info.id , A__ , revision=self.repo_info.sha ) return fsspec.open( A__ , mode=A__ , headers=get_authentication_headers_for_url(A__ , use_auth_token=self.token ) , client_kwargs={"""trust_env""": True} , ).open() def UpperCamelCase ( self , A__ , **A__ ) -> str: self._get_dirs() _SCREAMING_SNAKE_CASE = self._strip_protocol(A__ ) if path in self.dir_cache: return self.dir_cache[path] else: raise FileNotFoundError(A__ ) def UpperCamelCase ( self , A__ , A__=False , **A__ ) -> List[Any]: self._get_dirs() _SCREAMING_SNAKE_CASE = PurePosixPath(path.strip("""/""" ) ) _SCREAMING_SNAKE_CASE = {} for p, f in self.dir_cache.items(): _SCREAMING_SNAKE_CASE = PurePosixPath(p.strip("""/""" ) ) _SCREAMING_SNAKE_CASE = p.parent if root == path: _SCREAMING_SNAKE_CASE = f _SCREAMING_SNAKE_CASE = list(paths.values() ) if detail: return out else: return sorted(f["""name"""] for f in out )
0
0
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available UpperCamelCase__ : int = {"tokenization_herbert": ["HerbertTokenizer"]} try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase__ : Tuple = ["HerbertTokenizerFast"] if TYPE_CHECKING: from .tokenization_herbert import HerbertTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_herbert_fast import HerbertTokenizerFast else: import sys UpperCamelCase__ : Tuple = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
719
'''simple docstring''' import pyarrow.parquet as pq import pytest from datasets import Audio, Dataset, DatasetDict, Features, NamedSplit, Sequence, Value, config from datasets.features.image import Image from datasets.io.parquet import ParquetDatasetReader, ParquetDatasetWriter, get_writer_batch_size from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> Optional[int]: """simple docstring""" assert isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) assert dataset.num_rows == 4 assert dataset.num_columns == 3 assert dataset.column_names == ["col_1", "col_2", "col_3"] for feature, expected_dtype in expected_features.items(): assert dataset.features[feature].dtype == expected_dtype @pytest.mark.parametrize("""keep_in_memory""" , [False, True] ) def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> List[Any]: """simple docstring""" _SCREAMING_SNAKE_CASE = tmp_path / """cache""" _SCREAMING_SNAKE_CASE = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""} with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase(): _SCREAMING_SNAKE_CASE = ParquetDatasetReader(SCREAMING_SNAKE_CASE_ , cache_dir=SCREAMING_SNAKE_CASE_ , keep_in_memory=SCREAMING_SNAKE_CASE_ ).read() _check_parquet_dataset(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) @pytest.mark.parametrize( """features""" , [ None, {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""}, {"""col_1""": """string""", """col_2""": """string""", """col_3""": """string"""}, {"""col_1""": """int32""", """col_2""": """int32""", """col_3""": """int32"""}, {"""col_1""": """float32""", """col_2""": """float32""", """col_3""": """float32"""}, ] , ) def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> Optional[int]: """simple docstring""" _SCREAMING_SNAKE_CASE = tmp_path / """cache""" _SCREAMING_SNAKE_CASE = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""} _SCREAMING_SNAKE_CASE = features.copy() if features else default_expected_features _SCREAMING_SNAKE_CASE = ( Features({feature: Value(SCREAMING_SNAKE_CASE_ ) for feature, dtype in features.items()} ) if features is not None else None ) _SCREAMING_SNAKE_CASE = ParquetDatasetReader(SCREAMING_SNAKE_CASE_ , features=SCREAMING_SNAKE_CASE_ , cache_dir=SCREAMING_SNAKE_CASE_ ).read() _check_parquet_dataset(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) @pytest.mark.parametrize("""split""" , [None, NamedSplit("""train""" ), """train""", """test"""] ) def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> List[str]: """simple docstring""" _SCREAMING_SNAKE_CASE = tmp_path / """cache""" _SCREAMING_SNAKE_CASE = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""} _SCREAMING_SNAKE_CASE = ParquetDatasetReader(SCREAMING_SNAKE_CASE_ , cache_dir=SCREAMING_SNAKE_CASE_ , split=SCREAMING_SNAKE_CASE_ ).read() _check_parquet_dataset(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) assert dataset.split == split if split else "train" @pytest.mark.parametrize("""path_type""" , [str, list] ) def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> Optional[int]: """simple docstring""" if issubclass(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ): _SCREAMING_SNAKE_CASE = parquet_path elif issubclass(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ): _SCREAMING_SNAKE_CASE = [parquet_path] _SCREAMING_SNAKE_CASE = tmp_path / """cache""" _SCREAMING_SNAKE_CASE = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""} _SCREAMING_SNAKE_CASE = ParquetDatasetReader(SCREAMING_SNAKE_CASE_ , cache_dir=SCREAMING_SNAKE_CASE_ ).read() _check_parquet_dataset(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=("train",) ) -> List[str]: """simple docstring""" assert isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) for split in splits: _SCREAMING_SNAKE_CASE = dataset_dict[split] assert dataset.num_rows == 4 assert dataset.num_columns == 3 assert dataset.column_names == ["col_1", "col_2", "col_3"] for feature, expected_dtype in expected_features.items(): assert dataset.features[feature].dtype == expected_dtype @pytest.mark.parametrize("""keep_in_memory""" , [False, True] ) def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> List[Any]: """simple docstring""" _SCREAMING_SNAKE_CASE = tmp_path / """cache""" _SCREAMING_SNAKE_CASE = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""} with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase(): _SCREAMING_SNAKE_CASE = ParquetDatasetReader( {"""train""": parquet_path} , cache_dir=SCREAMING_SNAKE_CASE_ , keep_in_memory=SCREAMING_SNAKE_CASE_ ).read() _check_parquet_datasetdict(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) @pytest.mark.parametrize( """features""" , [ None, {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""}, {"""col_1""": """string""", """col_2""": """string""", """col_3""": """string"""}, {"""col_1""": """int32""", """col_2""": """int32""", """col_3""": """int32"""}, {"""col_1""": """float32""", """col_2""": """float32""", """col_3""": """float32"""}, ] , ) def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> Optional[Any]: """simple docstring""" _SCREAMING_SNAKE_CASE = tmp_path / """cache""" _SCREAMING_SNAKE_CASE = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""} _SCREAMING_SNAKE_CASE = features.copy() if features else default_expected_features _SCREAMING_SNAKE_CASE = ( Features({feature: Value(SCREAMING_SNAKE_CASE_ ) for feature, dtype in features.items()} ) if features is not None else None ) _SCREAMING_SNAKE_CASE = ParquetDatasetReader({"""train""": parquet_path} , features=SCREAMING_SNAKE_CASE_ , cache_dir=SCREAMING_SNAKE_CASE_ ).read() _check_parquet_datasetdict(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) @pytest.mark.parametrize("""split""" , [None, NamedSplit("""train""" ), """train""", """test"""] ) def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> Optional[Any]: """simple docstring""" if split: _SCREAMING_SNAKE_CASE = {split: parquet_path} else: _SCREAMING_SNAKE_CASE = """train""" _SCREAMING_SNAKE_CASE = {"""train""": parquet_path, """test""": parquet_path} _SCREAMING_SNAKE_CASE = tmp_path / """cache""" _SCREAMING_SNAKE_CASE = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""} _SCREAMING_SNAKE_CASE = ParquetDatasetReader(SCREAMING_SNAKE_CASE_ , cache_dir=SCREAMING_SNAKE_CASE_ ).read() _check_parquet_datasetdict(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , splits=list(path.keys() ) ) assert all(dataset[split].split == split for split in path.keys() ) def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> Dict: """simple docstring""" _SCREAMING_SNAKE_CASE = ParquetDatasetWriter(SCREAMING_SNAKE_CASE_ , tmp_path / """foo.parquet""" ) assert writer.write() > 0 _SCREAMING_SNAKE_CASE = pq.ParquetFile(tmp_path / """foo.parquet""" ) _SCREAMING_SNAKE_CASE = pf.read() assert dataset.data.table == output_table def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> Optional[Any]: """simple docstring""" _SCREAMING_SNAKE_CASE = str(shared_datadir / """test_image_rgb.jpg""" ) _SCREAMING_SNAKE_CASE = {"""image""": [image_path]} _SCREAMING_SNAKE_CASE = Features({"""image""": Image()} ) _SCREAMING_SNAKE_CASE = Dataset.from_dict(SCREAMING_SNAKE_CASE_ , features=SCREAMING_SNAKE_CASE_ ) _SCREAMING_SNAKE_CASE = ParquetDatasetWriter(SCREAMING_SNAKE_CASE_ , tmp_path / """foo.parquet""" ) assert writer.write() > 0 _SCREAMING_SNAKE_CASE = Dataset.from_parquet(str(tmp_path / """foo.parquet""" ) ) assert dataset.features == reloaded_dataset.features _SCREAMING_SNAKE_CASE = ParquetDatasetReader(str(tmp_path / """foo.parquet""" ) , streaming=SCREAMING_SNAKE_CASE_ ).read() assert dataset.features == reloaded_iterable_dataset.features @pytest.mark.parametrize( """feature, expected""" , [ (Features({"""foo""": Value("""int32""" )} ), None), (Features({"""image""": Image(), """foo""": Value("""int32""" )} ), config.PARQUET_ROW_GROUP_SIZE_FOR_IMAGE_DATASETS), (Features({"""nested""": Sequence(Audio() )} ), config.PARQUET_ROW_GROUP_SIZE_FOR_AUDIO_DATASETS), ] , ) def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> str: """simple docstring""" assert get_writer_batch_size(SCREAMING_SNAKE_CASE_ ) == expected
0
0
'''simple docstring''' import argparse import copy def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE_ ) -> Tuple: """simple docstring""" _SCREAMING_SNAKE_CASE = {} with open(SCREAMING_SNAKE_CASE_ ) as f: for line in f: if line.split()[0] not in dict_of_neighbours: _SCREAMING_SNAKE_CASE = [] _list.append([line.split()[1], line.split()[2]] ) _SCREAMING_SNAKE_CASE = _list else: dict_of_neighbours[line.split()[0]].append( [line.split()[1], line.split()[2]] ) if line.split()[1] not in dict_of_neighbours: _SCREAMING_SNAKE_CASE = [] _list.append([line.split()[0], line.split()[2]] ) _SCREAMING_SNAKE_CASE = _list else: dict_of_neighbours[line.split()[1]].append( [line.split()[0], line.split()[2]] ) return dict_of_neighbours def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> int: """simple docstring""" with open(SCREAMING_SNAKE_CASE_ ) as f: _SCREAMING_SNAKE_CASE = f.read(1 ) _SCREAMING_SNAKE_CASE = start_node _SCREAMING_SNAKE_CASE = [] _SCREAMING_SNAKE_CASE = start_node _SCREAMING_SNAKE_CASE = 0 while visiting not in first_solution: _SCREAMING_SNAKE_CASE = 1_00_00 for k in dict_of_neighbours[visiting]: if int(k[1] ) < int(SCREAMING_SNAKE_CASE_ ) and k[0] not in first_solution: _SCREAMING_SNAKE_CASE = k[1] _SCREAMING_SNAKE_CASE = k[0] first_solution.append(SCREAMING_SNAKE_CASE_ ) _SCREAMING_SNAKE_CASE = distance_of_first_solution + int(SCREAMING_SNAKE_CASE_ ) _SCREAMING_SNAKE_CASE = best_node first_solution.append(SCREAMING_SNAKE_CASE_ ) _SCREAMING_SNAKE_CASE = 0 for k in dict_of_neighbours[first_solution[-2]]: if k[0] == start_node: break position += 1 _SCREAMING_SNAKE_CASE = ( distance_of_first_solution + int(dict_of_neighbours[first_solution[-2]][position][1] ) - 1_00_00 ) return first_solution, distance_of_first_solution def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> Any: """simple docstring""" _SCREAMING_SNAKE_CASE = [] for n in solution[1:-1]: _SCREAMING_SNAKE_CASE = solution.index(SCREAMING_SNAKE_CASE_ ) for kn in solution[1:-1]: _SCREAMING_SNAKE_CASE = solution.index(SCREAMING_SNAKE_CASE_ ) if n == kn: continue _SCREAMING_SNAKE_CASE = copy.deepcopy(SCREAMING_SNAKE_CASE_ ) _SCREAMING_SNAKE_CASE = kn _SCREAMING_SNAKE_CASE = n _SCREAMING_SNAKE_CASE = 0 for k in _tmp[:-1]: _SCREAMING_SNAKE_CASE = _tmp[_tmp.index(SCREAMING_SNAKE_CASE_ ) + 1] for i in dict_of_neighbours[k]: if i[0] == next_node: _SCREAMING_SNAKE_CASE = distance + int(i[1] ) _tmp.append(SCREAMING_SNAKE_CASE_ ) if _tmp not in neighborhood_of_solution: neighborhood_of_solution.append(_tmp ) _SCREAMING_SNAKE_CASE = len(neighborhood_of_solution[0] ) - 1 neighborhood_of_solution.sort(key=lambda SCREAMING_SNAKE_CASE_ : x[index_of_last_item_in_the_list] ) return neighborhood_of_solution def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> Union[str, Any]: """simple docstring""" _SCREAMING_SNAKE_CASE = 1 _SCREAMING_SNAKE_CASE = first_solution _SCREAMING_SNAKE_CASE = [] _SCREAMING_SNAKE_CASE = distance_of_first_solution _SCREAMING_SNAKE_CASE = solution while count <= iters: _SCREAMING_SNAKE_CASE = find_neighborhood(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) _SCREAMING_SNAKE_CASE = 0 _SCREAMING_SNAKE_CASE = neighborhood[index_of_best_solution] _SCREAMING_SNAKE_CASE = len(SCREAMING_SNAKE_CASE_ ) - 1 _SCREAMING_SNAKE_CASE = False while not found: _SCREAMING_SNAKE_CASE = 0 while i < len(SCREAMING_SNAKE_CASE_ ): if best_solution[i] != solution[i]: _SCREAMING_SNAKE_CASE = best_solution[i] _SCREAMING_SNAKE_CASE = solution[i] break _SCREAMING_SNAKE_CASE = i + 1 if [first_exchange_node, second_exchange_node] not in tabu_list and [ second_exchange_node, first_exchange_node, ] not in tabu_list: tabu_list.append([first_exchange_node, second_exchange_node] ) _SCREAMING_SNAKE_CASE = True _SCREAMING_SNAKE_CASE = best_solution[:-1] _SCREAMING_SNAKE_CASE = neighborhood[index_of_best_solution][best_cost_index] if cost < best_cost: _SCREAMING_SNAKE_CASE = cost _SCREAMING_SNAKE_CASE = solution else: _SCREAMING_SNAKE_CASE = index_of_best_solution + 1 _SCREAMING_SNAKE_CASE = neighborhood[index_of_best_solution] if len(SCREAMING_SNAKE_CASE_ ) >= size: tabu_list.pop(0 ) _SCREAMING_SNAKE_CASE = count + 1 return best_solution_ever, best_cost def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE_=None ) -> List[Any]: """simple docstring""" _SCREAMING_SNAKE_CASE = generate_neighbours(args.File ) _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = generate_first_solution( args.File , SCREAMING_SNAKE_CASE_ ) _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = tabu_search( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , args.Iterations , args.Size , ) print(F"Best solution: {best_sol}, with total distance: {best_cost}." ) if __name__ == "__main__": UpperCamelCase__ : Tuple = argparse.ArgumentParser(description="Tabu Search") parser.add_argument( "-f", "--File", type=str, help="Path to the file containing the data", required=True, ) parser.add_argument( "-i", "--Iterations", type=int, help="How many iterations the algorithm should perform", required=True, ) parser.add_argument( "-s", "--Size", type=int, help="Size of the tabu list", required=True ) # Pass the arguments to main method main(parser.parse_args())
720
'''simple docstring''' def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE_ ) -> int: """simple docstring""" if not isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ): raise ValueError("""multiplicative_persistence() only accepts integral values""" ) if num < 0: raise ValueError("""multiplicative_persistence() does not accept negative values""" ) _SCREAMING_SNAKE_CASE = 0 _SCREAMING_SNAKE_CASE = str(SCREAMING_SNAKE_CASE_ ) while len(SCREAMING_SNAKE_CASE_ ) != 1: _SCREAMING_SNAKE_CASE = [int(SCREAMING_SNAKE_CASE_ ) for i in num_string] _SCREAMING_SNAKE_CASE = 1 for i in range(0 , len(SCREAMING_SNAKE_CASE_ ) ): total *= numbers[i] _SCREAMING_SNAKE_CASE = str(SCREAMING_SNAKE_CASE_ ) steps += 1 return steps def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE_ ) -> int: """simple docstring""" if not isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ): raise ValueError("""additive_persistence() only accepts integral values""" ) if num < 0: raise ValueError("""additive_persistence() does not accept negative values""" ) _SCREAMING_SNAKE_CASE = 0 _SCREAMING_SNAKE_CASE = str(SCREAMING_SNAKE_CASE_ ) while len(SCREAMING_SNAKE_CASE_ ) != 1: _SCREAMING_SNAKE_CASE = [int(SCREAMING_SNAKE_CASE_ ) for i in num_string] _SCREAMING_SNAKE_CASE = 0 for i in range(0 , len(SCREAMING_SNAKE_CASE_ ) ): total += numbers[i] _SCREAMING_SNAKE_CASE = str(SCREAMING_SNAKE_CASE_ ) steps += 1 return steps if __name__ == "__main__": import doctest doctest.testmod()
0
0
'''simple docstring''' import os import re import shutil import sys import tempfile import unittest import black UpperCamelCase__ : Optional[int] = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__)))) sys.path.append(os.path.join(git_repo_path, "utils")) import check_copies # noqa: E402 # This is the reference code that will be used in the tests. # If DDPMSchedulerOutput is changed in scheduling_ddpm.py, this code needs to be manually updated. UpperCamelCase__ : Optional[int] = " \"\"\"\n Output class for the scheduler's step function output.\n\n Args:\n prev_sample (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)` for images):\n Computed sample (x_{t-1}) of previous timestep. `prev_sample` should be used as next model input in the\n denoising loop.\n pred_original_sample (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)` for images):\n The predicted denoised sample (x_{0}) based on the model output from the current timestep.\n `pred_original_sample` can be used to preview progress or for guidance.\n \"\"\"\n\n prev_sample: torch.FloatTensor\n pred_original_sample: Optional[torch.FloatTensor] = None\n" class _a (unittest.TestCase): """simple docstring""" def UpperCamelCase ( self ) -> List[Any]: _SCREAMING_SNAKE_CASE = tempfile.mkdtemp() os.makedirs(os.path.join(self.diffusers_dir , """schedulers/""" ) ) _SCREAMING_SNAKE_CASE = self.diffusers_dir shutil.copy( os.path.join(A__ , """src/diffusers/schedulers/scheduling_ddpm.py""" ) , os.path.join(self.diffusers_dir , """schedulers/scheduling_ddpm.py""" ) , ) def UpperCamelCase ( self ) -> str: _SCREAMING_SNAKE_CASE = """src/diffusers""" shutil.rmtree(self.diffusers_dir ) def UpperCamelCase ( self , A__ , A__ , A__ , A__=None ) -> str: _SCREAMING_SNAKE_CASE = comment + F"\nclass {class_name}(nn.Module):\n" + class_code if overwrite_result is not None: _SCREAMING_SNAKE_CASE = comment + F"\nclass {class_name}(nn.Module):\n" + overwrite_result _SCREAMING_SNAKE_CASE = black.Mode(target_versions={black.TargetVersion.PYaa} , line_length=1_19 ) _SCREAMING_SNAKE_CASE = black.format_str(A__ , mode=A__ ) _SCREAMING_SNAKE_CASE = os.path.join(self.diffusers_dir , """new_code.py""" ) with open(A__ , """w""" , newline="""\n""" ) as f: f.write(A__ ) if overwrite_result is None: self.assertTrue(len(check_copies.is_copy_consistent(A__ ) ) == 0 ) else: check_copies.is_copy_consistent(f.name , overwrite=A__ ) with open(A__ , """r""" ) as f: self.assertTrue(f.read() , A__ ) def UpperCamelCase ( self ) -> List[str]: _SCREAMING_SNAKE_CASE = check_copies.find_code_in_diffusers("""schedulers.scheduling_ddpm.DDPMSchedulerOutput""" ) self.assertEqual(A__ , A__ ) def UpperCamelCase ( self ) -> str: # Base copy consistency self.check_copy_consistency( """# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput""" , """DDPMSchedulerOutput""" , REFERENCE_CODE + """\n""" , ) # With no empty line at the end self.check_copy_consistency( """# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput""" , """DDPMSchedulerOutput""" , A__ , ) # Copy consistency with rename self.check_copy_consistency( """# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->Test""" , """TestSchedulerOutput""" , re.sub("""DDPM""" , """Test""" , A__ ) , ) # Copy consistency with a really long name _SCREAMING_SNAKE_CASE = """TestClassWithAReallyLongNameBecauseSomePeopleLikeThatForSomeReason""" self.check_copy_consistency( F"# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->{long_class_name}" , F"{long_class_name}SchedulerOutput" , re.sub("""Bert""" , A__ , A__ ) , ) # Copy consistency with overwrite self.check_copy_consistency( """# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->Test""" , """TestSchedulerOutput""" , A__ , overwrite_result=re.sub("""DDPM""" , """Test""" , A__ ) , )
721
'''simple docstring''' import math import os import re import sys import unittest from pathlib import Path from typing import Tuple from unittest.mock import patch from parameterized import parameterized from transformers.testing_utils import ( CaptureStderr, ExtendSysPath, TestCasePlus, execute_subprocess_async, get_gpu_count, get_torch_dist_unique_port, require_apex, require_bitsandbytes, require_fairscale, require_torch, require_torch_gpu, require_torch_multi_gpu, require_torch_non_multi_gpu, slow, ) from transformers.trainer_callback import TrainerState from transformers.trainer_utils import set_seed UpperCamelCase__ : Tuple = os.path.abspath(os.path.dirname(__file__)) with ExtendSysPath(f"""{bindir}/../../examples/pytorch/translation"""): from run_translation import main # noqa set_seed(42) UpperCamelCase__ : Union[str, Any] = "sshleifer/student_marian_en_ro_6_1" UpperCamelCase__ : str = "sshleifer/tiny-mbart" @require_torch class _a (_lowerCamelCase): """simple docstring""" def UpperCamelCase ( self , A__=False , A__=None , A__=True , A__=True , A__=True , A__=True , ) -> Optional[Any]: _SCREAMING_SNAKE_CASE = self.run_trainer( eval_steps=1 , max_len=12 , model_name=A__ , num_train_epochs=1 , distributed=A__ , extra_args_str=A__ , predict_with_generate=A__ , do_train=A__ , do_eval=A__ , do_predict=A__ , ) _SCREAMING_SNAKE_CASE = TrainerState.load_from_json(os.path.join(A__ , """trainer_state.json""" ) ).log_history if not do_eval: return _SCREAMING_SNAKE_CASE = [log for log in logs if """eval_loss""" in log.keys()] _SCREAMING_SNAKE_CASE = eval_metrics[0] if predict_with_generate: assert "eval_bleu" in first_step_stats _SCREAMING_SNAKE_CASE = eval_metrics[-1] assert isinstance(last_step_stats["""eval_bleu"""] , A__ ) assert not math.isnan(float(last_step_stats["""eval_loss"""] ) ), "eval_loss must not be `nan`" @require_torch_non_multi_gpu def UpperCamelCase ( self ) -> Optional[int]: self.run_seqaseq_quick() @require_torch_multi_gpu def UpperCamelCase ( self ) -> Optional[Any]: self.run_seqaseq_quick(distributed=A__ ) @require_torch_multi_gpu def UpperCamelCase ( self ) -> Union[str, Any]: self.run_seqaseq_quick(distributed=A__ ) @unittest.skip("""Requires an update of the env running those tests""" ) @require_torch_multi_gpu @require_fairscale def UpperCamelCase ( self ) -> Any: self.run_seqaseq_quick(distributed=A__ , extra_args_str="""--sharded_ddp simple""" ) @unittest.skip("""Requires an update of the env running those tests""" ) @require_torch_multi_gpu @require_fairscale def UpperCamelCase ( self ) -> Tuple: self.run_seqaseq_quick(distributed=A__ , extra_args_str="""--sharded_ddp simple --fp16""" ) @unittest.skip("""Requires an update of the env running those tests""" ) @require_torch_multi_gpu @require_fairscale def UpperCamelCase ( self ) -> str: self.run_seqaseq_quick(distributed=A__ , extra_args_str="""--sharded_ddp zero_dp_2""" , predict_with_generate=A__ ) @unittest.skip("""Requires an update of the env running those tests""" ) @require_torch_multi_gpu @require_fairscale def UpperCamelCase ( self ) -> List[str]: self.run_seqaseq_quick( distributed=A__ , extra_args_str="""--sharded_ddp zero_dp_2 --fp16""" , predict_with_generate=A__ ) @require_apex @require_torch_gpu def UpperCamelCase ( self ) -> Optional[Any]: # XXX: apex breaks the trainer if it's run twice e.g. run_seq2seq.main() from the same # program and it breaks other tests that run from the same pytest worker, therefore until this is # sorted out it must be run only in an external program, that is distributed=True in this # test and only under one or more gpus - if we want cpu will need to make a special test # # specifically to the problem traced it to self.optimizer.step() - if it's run 2nd time via # 2nd main() call it botches the future eval. # self.run_seqaseq_quick(distributed=A__ , extra_args_str="""--fp16 --fp16_backend=apex""" ) # test 2nd time - was getting eval_loss': nan' # to reproduce the problem set distributed=False self.run_seqaseq_quick(distributed=A__ , extra_args_str="""--fp16 --fp16_backend=apex""" ) @parameterized.expand(["""base""", """low""", """high""", """mixed"""] ) @require_torch_multi_gpu def UpperCamelCase ( self , A__ ) -> List[Any]: # as each sub-test is slow-ish split into multiple sub-tests to avoid CI timeout _SCREAMING_SNAKE_CASE = { # test with the default log_level - should be info and thus log info once """base""": {"""extra_args_str""": """""", """n_matches""": 1}, # test with low log_level and log_level_replica - should be noisy on all processes # now the info string should appear twice on 2 processes """low""": {"""extra_args_str""": """--log_level debug --log_level_replica debug""", """n_matches""": 2}, # test with high log_level and low log_level_replica # now the info string should appear once only on the replica """high""": {"""extra_args_str""": """--log_level error --log_level_replica debug""", """n_matches""": 1}, # test with high log_level and log_level_replica - should be quiet on all processes """mixed""": {"""extra_args_str""": """--log_level error --log_level_replica error""", """n_matches""": 0}, } _SCREAMING_SNAKE_CASE = experiments[experiment_id] _SCREAMING_SNAKE_CASE = {"""distributed""": True, """predict_with_generate""": False, """do_eval""": False, """do_predict""": False} _SCREAMING_SNAKE_CASE = """Running training""" with CaptureStderr() as cl: self.run_seqaseq_quick(**A__ , extra_args_str=data["""extra_args_str"""] ) _SCREAMING_SNAKE_CASE = len(re.findall(A__ , cl.err ) ) self.assertEqual(A__ , data["""n_matches"""] ) @slow def UpperCamelCase ( self ) -> Any: _SCREAMING_SNAKE_CASE = self.run_trainer( eval_steps=2 , max_len=1_28 , model_name=A__ , learning_rate=3E-4 , num_train_epochs=10 , distributed=A__ , ) # Check metrics _SCREAMING_SNAKE_CASE = TrainerState.load_from_json(os.path.join(A__ , """trainer_state.json""" ) ).log_history _SCREAMING_SNAKE_CASE = [log for log in logs if """eval_loss""" in log.keys()] _SCREAMING_SNAKE_CASE = eval_metrics[0] _SCREAMING_SNAKE_CASE = eval_metrics[-1] assert first_step_stats["eval_loss"] > last_step_stats["eval_loss"], "model learned nothing" assert isinstance(last_step_stats["""eval_bleu"""] , A__ ) # test if do_predict saves generations and metrics _SCREAMING_SNAKE_CASE = os.listdir(A__ ) _SCREAMING_SNAKE_CASE = {os.path.basename(A__ ) for p in contents} assert "generated_predictions.txt" in contents assert "predict_results.json" in contents @slow @require_bitsandbytes def UpperCamelCase ( self ) -> Dict: from transformers.training_args import OptimizerNames def train_and_return_metrics(A__ ) -> Tuple[int, float]: _SCREAMING_SNAKE_CASE = """--skip_memory_metrics 0""" _SCREAMING_SNAKE_CASE = self.run_trainer( max_len=1_28 , model_name=A__ , learning_rate=3E-4 , num_train_epochs=1 , optim=A__ , distributed=A__ , extra_args_str=A__ , do_eval=A__ , do_predict=A__ , n_gpus_to_use=1 , ) # Check metrics _SCREAMING_SNAKE_CASE = TrainerState.load_from_json(Path(A__ , """trainer_state.json""" ) ).log_history _SCREAMING_SNAKE_CASE = int(logs[0]["""train_mem_gpu_peaked_delta"""] / 2**20 ) _SCREAMING_SNAKE_CASE = int(logs[0]["""train_mem_gpu_alloc_delta"""] / 2**20 ) _SCREAMING_SNAKE_CASE = logs[0]["""train_loss"""] return gpu_peak_mem_mb, gpu_alloc_mem_mb, loss _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = train_and_return_metrics(OptimizerNames.ADAMW_TORCH.value ) _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = train_and_return_metrics(OptimizerNames.ADAMW_BNB.value ) _SCREAMING_SNAKE_CASE = gpu_alloc_mem_orig - gpu_alloc_mem_bnb _SCREAMING_SNAKE_CASE = gpu_peak_mem_orig + gpu_alloc_mem_orig _SCREAMING_SNAKE_CASE = gpu_peak_mem_bnb + gpu_alloc_mem_bnb _SCREAMING_SNAKE_CASE = gpu_total_mem_orig - gpu_total_mem_bnb # sshleifer/student_marian_en_ro_6_1 has 54M parameter, 29M of which is `nn.Embedding` which # doesn't get quantized and remains in fp32. Therefore we only have 25M parameters quantized # in 2 bytes and the diff in optim memory usage is derived as so: # # - normal 25*8=~200MB (8 bytes per param) # - bnb 25*2= ~50MB (2 bytes per param) # # Thus we should expect ~150MB total memory saved. # # Peak memory should be the same - the total should be different by about that same margin # # After leaving a small margin to accommodate for differences between gpus let's check # that we have at least 120MB in savings _SCREAMING_SNAKE_CASE = 1_20 # uncomment the following if this test starts failing - requires py38 for a new print feature # gpu_peak_mem_diff = gpu_peak_mem_orig - gpu_peak_mem_bnb # print(f"{gpu_alloc_mem_orig=}MB {gpu_peak_mem_orig=}MB {gpu_alloc_mem_orig+gpu_peak_mem_orig=}MB") # print(f" {gpu_alloc_mem_bnb=}MB {gpu_peak_mem_bnb=}MB {gpu_alloc_mem_bnb+gpu_peak_mem_bnb=}MB") # print(f"{gpu_alloc_mem_diff=}MB") # print(f"{gpu_peak_mem_diff=}MB") # print(f"{gpu_total_mem_orig=}MB, {gpu_total_mem_bnb=}MB") # print(f"{gpu_total_mem_diff=}MB, {gpu_total_mem_diff=}MB") self.assertGreater( A__ , A__ , """should use ~150MB less alloc gpu memory with BNB, compared to without it for this model but got""" F" a difference of {gpu_alloc_mem_diff}MB, with gpu_alloc_mem_orig={gpu_alloc_mem_orig}MB and" F" gpu_alloc_mem_bnb={gpu_alloc_mem_bnb}MB" , ) self.assertGreater( A__ , A__ , """should use ~150MB less total gpu memory with BNB, compared to without it for this model but got""" F" a difference of {gpu_total_mem_diff}MB, with gpu_total_mem_orig={gpu_total_mem_orig}MB and" F" gpu_total_mem_bnb={gpu_total_mem_bnb}MB" , ) self.assertEqual( A__ , A__ , F"loss should be the same, but got loss_orig={loss_orig}, loss_bnb={loss_bnb}" ) def UpperCamelCase ( self , A__ , A__ , A__ , A__ = 3E-3 , A__ = "adafactor" , A__ = False , A__ = None , A__ = 0 , A__ = True , A__ = True , A__ = True , A__ = True , A__ = None , ) -> Dict: _SCREAMING_SNAKE_CASE = self.test_file_dir / """../fixtures/tests_samples/wmt_en_ro""" _SCREAMING_SNAKE_CASE = self.get_auto_remove_tmp_dir() _SCREAMING_SNAKE_CASE = F"\n --model_name_or_path {model_name}\n --train_file {data_dir}/train.json\n --validation_file {data_dir}/val.json\n --test_file {data_dir}/test.json\n --output_dir {output_dir}\n --overwrite_output_dir\n --max_train_samples 8\n --max_source_length {max_len}\n --max_target_length {max_len}\n --do_train\n --num_train_epochs {str(A__ )}\n --per_device_train_batch_size 4\n --learning_rate {learning_rate}\n --warmup_steps 8\n --logging_steps 0\n --logging_strategy no\n --save_steps {str(A__ )}\n --group_by_length\n --label_smoothing_factor 0.1\n --target_lang ro_RO\n --source_lang en_XX\n ".split() _SCREAMING_SNAKE_CASE = F"\n --do_eval\n --per_device_eval_batch_size 4\n --max_eval_samples 8\n --val_max_target_length {max_len}\n --evaluation_strategy steps\n --eval_steps {str(A__ )}\n ".split() _SCREAMING_SNAKE_CASE = """ --do_predict """.split() _SCREAMING_SNAKE_CASE = [] if do_train: args += args_train if do_eval: args += args_eval if do_predict: args += args_predict if predict_with_generate: args += "--predict_with_generate".split() if do_train: if optim == "adafactor": args += "--adafactor".split() else: args += F"--optim {optim}".split() if extra_args_str is not None: args += extra_args_str.split() if distributed: if n_gpus_to_use is None: _SCREAMING_SNAKE_CASE = get_gpu_count() _SCREAMING_SNAKE_CASE = get_torch_dist_unique_port() _SCREAMING_SNAKE_CASE = F"\n -m torch.distributed.run\n --nproc_per_node={n_gpus_to_use}\n --master_port={master_port}\n {self.examples_dir_str}/pytorch/translation/run_translation.py\n ".split() _SCREAMING_SNAKE_CASE = [sys.executable] + distributed_args + args # keep for quick debug # print(" ".join([f"\nPYTHONPATH={self.src_dir_str}"] +cmd)); die execute_subprocess_async(A__ , env=self.get_env() ) else: _SCREAMING_SNAKE_CASE = ["""run_translation.py"""] + args with patch.object(A__ , """argv""" , A__ ): main() return output_dir
0
0
'''simple docstring''' import multiprocessing import os from typing import BinaryIO, Optional, Union import fsspec from .. import Dataset, Features, NamedSplit, config from ..formatting import query_table from ..packaged_modules.json.json import Json from ..utils import logging from ..utils.typing import NestedDataStructureLike, PathLike from .abc import AbstractDatasetReader class _a (_lowerCamelCase): def __init__( self , A__ , A__ = None , A__ = None , A__ = None , A__ = False , A__ = False , A__ = None , A__ = None , **A__ , ) -> int: super().__init__( A__ , split=A__ , features=A__ , cache_dir=A__ , keep_in_memory=A__ , streaming=A__ , num_proc=A__ , **A__ , ) _SCREAMING_SNAKE_CASE = field _SCREAMING_SNAKE_CASE = path_or_paths if isinstance(A__ , A__ ) else {self.split: path_or_paths} _SCREAMING_SNAKE_CASE = Json( cache_dir=A__ , data_files=A__ , features=A__ , field=A__ , **A__ , ) def UpperCamelCase ( self ) -> int: # Build iterable dataset if self.streaming: _SCREAMING_SNAKE_CASE = self.builder.as_streaming_dataset(split=self.split ) # Build regular (map-style) dataset else: _SCREAMING_SNAKE_CASE = None _SCREAMING_SNAKE_CASE = None _SCREAMING_SNAKE_CASE = None _SCREAMING_SNAKE_CASE = None self.builder.download_and_prepare( download_config=A__ , download_mode=A__ , verification_mode=A__ , base_path=A__ , num_proc=self.num_proc , ) _SCREAMING_SNAKE_CASE = self.builder.as_dataset( split=self.split , verification_mode=A__ , in_memory=self.keep_in_memory ) return dataset class _a : def __init__( self , A__ , A__ , A__ = None , A__ = None , **A__ , ) -> List[str]: if num_proc is not None and num_proc <= 0: raise ValueError(F"num_proc {num_proc} must be an integer > 0." ) _SCREAMING_SNAKE_CASE = dataset _SCREAMING_SNAKE_CASE = path_or_buf _SCREAMING_SNAKE_CASE = batch_size if batch_size else config.DEFAULT_MAX_BATCH_SIZE _SCREAMING_SNAKE_CASE = num_proc _SCREAMING_SNAKE_CASE = """utf-8""" _SCREAMING_SNAKE_CASE = to_json_kwargs def UpperCamelCase ( self ) -> int: _SCREAMING_SNAKE_CASE = self.to_json_kwargs.pop("""path_or_buf""" , A__ ) _SCREAMING_SNAKE_CASE = self.to_json_kwargs.pop("""orient""" , """records""" ) _SCREAMING_SNAKE_CASE = self.to_json_kwargs.pop("""lines""" , True if orient == """records""" else False ) _SCREAMING_SNAKE_CASE = self.to_json_kwargs.pop("""index""" , False if orient in ["""split""", """table"""] else True ) _SCREAMING_SNAKE_CASE = self.to_json_kwargs.pop("""compression""" , A__ ) if compression not in [None, "infer", "gzip", "bz2", "xz"]: raise NotImplementedError(F"`datasets` currently does not support {compression} compression" ) if isinstance(self.path_or_buf , (str, bytes, os.PathLike) ): with fsspec.open(self.path_or_buf , """wb""" , compression=A__ ) as buffer: _SCREAMING_SNAKE_CASE = self._write(file_obj=A__ , orient=A__ , lines=A__ , index=A__ , **self.to_json_kwargs ) else: if compression: raise NotImplementedError( F"The compression parameter is not supported when writing to a buffer, but compression={compression}" """ was passed. Please provide a local path instead.""" ) _SCREAMING_SNAKE_CASE = self._write( file_obj=self.path_or_buf , orient=A__ , lines=A__ , index=A__ , **self.to_json_kwargs ) return written def UpperCamelCase ( self , A__ ) -> List[Any]: _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = args _SCREAMING_SNAKE_CASE = query_table( table=self.dataset.data , key=slice(A__ , offset + self.batch_size ) , indices=self.dataset._indices , ) _SCREAMING_SNAKE_CASE = batch.to_pandas().to_json( path_or_buf=A__ , orient=A__ , lines=A__ , index=A__ , **A__ ) if not json_str.endswith("""\n""" ): json_str += "\n" return json_str.encode(self.encoding ) def UpperCamelCase ( self , A__ , A__ , A__ , A__ , **A__ , ) -> int: _SCREAMING_SNAKE_CASE = 0 if self.num_proc is None or self.num_proc == 1: for offset in logging.tqdm( range(0 , len(self.dataset ) , self.batch_size ) , unit="""ba""" , disable=not logging.is_progress_bar_enabled() , desc="""Creating json from Arrow format""" , ): _SCREAMING_SNAKE_CASE = self._batch_json((offset, orient, lines, index, to_json_kwargs) ) written += file_obj.write(A__ ) else: _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = len(self.dataset ), self.batch_size with multiprocessing.Pool(self.num_proc ) as pool: for json_str in logging.tqdm( pool.imap( self._batch_json , [(offset, orient, lines, index, to_json_kwargs) for offset in range(0 , A__ , A__ )] , ) , total=(num_rows // batch_size) + 1 if num_rows % batch_size else num_rows // batch_size , unit="""ba""" , disable=not logging.is_progress_bar_enabled() , desc="""Creating json from Arrow format""" , ): written += file_obj.write(A__ ) return written
700
'''simple docstring''' import sys UpperCamelCase__ : int = ( "73167176531330624919225119674426574742355349194934" "96983520312774506326239578318016984801869478851843" "85861560789112949495459501737958331952853208805511" "12540698747158523863050715693290963295227443043557" "66896648950445244523161731856403098711121722383113" "62229893423380308135336276614282806444486645238749" "30358907296290491560440772390713810515859307960866" "70172427121883998797908792274921901699720888093776" "65727333001053367881220235421809751254540594752243" "52584907711670556013604839586446706324415722155397" "53697817977846174064955149290862569321978468622482" "83972241375657056057490261407972968652414535100474" "82166370484403199890008895243450658541227588666881" "16427171479924442928230863465674813919123162824586" "17866458359124566529476545682848912883142607690042" "24219022671055626321111109370544217506941658960408" "07198403850962455444362981230987879927244284909188" "84580156166097919133875499200524063689912560717606" "05886116467109405077541002256983155200055935729725" "71636269561882670428252483600823257530420752963450" ) def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE_ = N ) -> int: """simple docstring""" _SCREAMING_SNAKE_CASE = -sys.maxsize - 1 for i in range(len(SCREAMING_SNAKE_CASE_ ) - 12 ): _SCREAMING_SNAKE_CASE = 1 for j in range(13 ): product *= int(n[i + j] ) if product > largest_product: _SCREAMING_SNAKE_CASE = product return largest_product if __name__ == "__main__": print(f"""{solution() = }""")
0
0
'''simple docstring''' UpperCamelCase__ : List[str] = 256 # Modulus to hash a string UpperCamelCase__ : List[Any] = 1_000_003 def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> bool: """simple docstring""" _SCREAMING_SNAKE_CASE = len(SCREAMING_SNAKE_CASE_ ) _SCREAMING_SNAKE_CASE = len(SCREAMING_SNAKE_CASE_ ) if p_len > t_len: return False _SCREAMING_SNAKE_CASE = 0 _SCREAMING_SNAKE_CASE = 0 _SCREAMING_SNAKE_CASE = 1 # Calculating the hash of pattern and substring of text for i in range(SCREAMING_SNAKE_CASE_ ): _SCREAMING_SNAKE_CASE = (ord(pattern[i] ) + p_hash * alphabet_size) % modulus _SCREAMING_SNAKE_CASE = (ord(text[i] ) + text_hash * alphabet_size) % modulus if i == p_len - 1: continue _SCREAMING_SNAKE_CASE = (modulus_power * alphabet_size) % modulus for i in range(0 , t_len - p_len + 1 ): if text_hash == p_hash and text[i : i + p_len] == pattern: return True if i == t_len - p_len: continue # Calculate the https://en.wikipedia.org/wiki/Rolling_hash _SCREAMING_SNAKE_CASE = ( (text_hash - ord(text[i] ) * modulus_power) * alphabet_size + ord(text[i + p_len] ) ) % modulus return False def lowerCAmelCase_ ( ) -> None: """simple docstring""" _SCREAMING_SNAKE_CASE = """abc1abc12""" _SCREAMING_SNAKE_CASE = """alskfjaldsabc1abc1abc12k23adsfabcabc""" _SCREAMING_SNAKE_CASE = """alskfjaldsk23adsfabcabc""" assert rabin_karp(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) and not rabin_karp(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) # Test 2) _SCREAMING_SNAKE_CASE = """ABABX""" _SCREAMING_SNAKE_CASE = """ABABZABABYABABX""" assert rabin_karp(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) # Test 3) _SCREAMING_SNAKE_CASE = """AAAB""" _SCREAMING_SNAKE_CASE = """ABAAAAAB""" assert rabin_karp(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) # Test 4) _SCREAMING_SNAKE_CASE = """abcdabcy""" _SCREAMING_SNAKE_CASE = """abcxabcdabxabcdabcdabcy""" assert rabin_karp(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) # Test 5) _SCREAMING_SNAKE_CASE = """Lü""" _SCREAMING_SNAKE_CASE = """Lüsai""" assert rabin_karp(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) _SCREAMING_SNAKE_CASE = """Lue""" assert not rabin_karp(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) print("""Success.""" ) if __name__ == "__main__": test_rabin_karp()
701
'''simple docstring''' UpperCamelCase__ : Dict = { "a": "AAAAA", "b": "AAAAB", "c": "AAABA", "d": "AAABB", "e": "AABAA", "f": "AABAB", "g": "AABBA", "h": "AABBB", "i": "ABAAA", "j": "BBBAA", "k": "ABAAB", "l": "ABABA", "m": "ABABB", "n": "ABBAA", "o": "ABBAB", "p": "ABBBA", "q": "ABBBB", "r": "BAAAA", "s": "BAAAB", "t": "BAABA", "u": "BAABB", "v": "BBBAB", "w": "BABAA", "x": "BABAB", "y": "BABBA", "z": "BABBB", " ": " ", } UpperCamelCase__ : str = {value: key for key, value in encode_dict.items()} def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE_ ) -> str: """simple docstring""" _SCREAMING_SNAKE_CASE = """""" for letter in word.lower(): if letter.isalpha() or letter == " ": encoded += encode_dict[letter] else: raise Exception("""encode() accepts only letters of the alphabet and spaces""" ) return encoded def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE_ ) -> str: """simple docstring""" if set(SCREAMING_SNAKE_CASE_ ) - {"A", "B", " "} != set(): raise Exception("""decode() accepts only 'A', 'B' and spaces""" ) _SCREAMING_SNAKE_CASE = """""" for word in coded.split(): while len(SCREAMING_SNAKE_CASE_ ) != 0: decoded += decode_dict[word[:5]] _SCREAMING_SNAKE_CASE = word[5:] decoded += " " return decoded.strip() if __name__ == "__main__": from doctest import testmod testmod()
0
0
'''simple docstring''' import math import time from transformers import Trainer, is_torch_tpu_available from transformers.trainer_utils import PredictionOutput, speed_metrics if is_torch_tpu_available(check_device=False): import torch_xla.core.xla_model as xm import torch_xla.debug.metrics as met class _a (_lowerCamelCase): """simple docstring""" def __init__( self , *A__ , A__=None , A__=None , **A__ ) -> Union[str, Any]: super().__init__(*A__ , **A__ ) _SCREAMING_SNAKE_CASE = eval_examples _SCREAMING_SNAKE_CASE = post_process_function def UpperCamelCase ( self , A__=None , A__=None , A__=None , A__ = "eval" ) -> Optional[Any]: _SCREAMING_SNAKE_CASE = self.eval_dataset if eval_dataset is None else eval_dataset _SCREAMING_SNAKE_CASE = self.get_eval_dataloader(A__ ) _SCREAMING_SNAKE_CASE = self.eval_examples if eval_examples is None else eval_examples # Temporarily disable metric computation, we will do it in the loop here. _SCREAMING_SNAKE_CASE = self.compute_metrics _SCREAMING_SNAKE_CASE = None _SCREAMING_SNAKE_CASE = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop _SCREAMING_SNAKE_CASE = time.time() try: _SCREAMING_SNAKE_CASE = eval_loop( A__ , description="""Evaluation""" , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=A__ , metric_key_prefix=A__ , ) finally: _SCREAMING_SNAKE_CASE = compute_metrics _SCREAMING_SNAKE_CASE = self.args.eval_batch_size * self.args.world_size if F"{metric_key_prefix}_jit_compilation_time" in output.metrics: start_time += output.metrics[F"{metric_key_prefix}_jit_compilation_time"] output.metrics.update( speed_metrics( A__ , A__ , num_samples=output.num_samples , num_steps=math.ceil(output.num_samples / total_batch_size ) , ) ) if self.post_process_function is not None and self.compute_metrics is not None and self.args.should_save: # Only the main node write the results by default _SCREAMING_SNAKE_CASE = self.post_process_function(A__ , A__ , output.predictions ) _SCREAMING_SNAKE_CASE = self.compute_metrics(A__ ) # Prefix all keys with metric_key_prefix + '_' for key in list(metrics.keys() ): if not key.startswith(F"{metric_key_prefix}_" ): _SCREAMING_SNAKE_CASE = metrics.pop(A__ ) metrics.update(output.metrics ) else: _SCREAMING_SNAKE_CASE = output.metrics if self.args.should_log: # Only the main node log the results by default self.log(A__ ) if self.args.tpu_metrics_debug or self.args.debug: # tpu-comment: Logging debug metrics for PyTorch/XLA (compile, execute times, ops, etc.) xm.master_print(met.metrics_report() ) _SCREAMING_SNAKE_CASE = self.callback_handler.on_evaluate(self.args , self.state , self.control , A__ ) return metrics def UpperCamelCase ( self , A__ , A__ , A__=None , A__ = "test" ) -> List[str]: _SCREAMING_SNAKE_CASE = self.get_test_dataloader(A__ ) # Temporarily disable metric computation, we will do it in the loop here. _SCREAMING_SNAKE_CASE = self.compute_metrics _SCREAMING_SNAKE_CASE = None _SCREAMING_SNAKE_CASE = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop _SCREAMING_SNAKE_CASE = time.time() try: _SCREAMING_SNAKE_CASE = eval_loop( A__ , description="""Prediction""" , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=A__ , metric_key_prefix=A__ , ) finally: _SCREAMING_SNAKE_CASE = compute_metrics _SCREAMING_SNAKE_CASE = self.args.eval_batch_size * self.args.world_size if F"{metric_key_prefix}_jit_compilation_time" in output.metrics: start_time += output.metrics[F"{metric_key_prefix}_jit_compilation_time"] output.metrics.update( speed_metrics( A__ , A__ , num_samples=output.num_samples , num_steps=math.ceil(output.num_samples / total_batch_size ) , ) ) if self.post_process_function is None or self.compute_metrics is None: return output _SCREAMING_SNAKE_CASE = self.post_process_function(A__ , A__ , output.predictions , """predict""" ) _SCREAMING_SNAKE_CASE = self.compute_metrics(A__ ) # Prefix all keys with metric_key_prefix + '_' for key in list(metrics.keys() ): if not key.startswith(F"{metric_key_prefix}_" ): _SCREAMING_SNAKE_CASE = metrics.pop(A__ ) metrics.update(output.metrics ) return PredictionOutput(predictions=predictions.predictions , label_ids=predictions.label_ids , metrics=A__ )
702
'''simple docstring''' import argparse import torch from torch import nn from transformers import MaMaaaConfig, MaMaaaForConditionalGeneration def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE_ ) -> Any: """simple docstring""" _SCREAMING_SNAKE_CASE = [ """encoder.version""", """decoder.version""", """model.encoder.version""", """model.decoder.version""", """decoder.output_projection.weight""", """_float_tensor""", """encoder.embed_positions._float_tensor""", """decoder.embed_positions._float_tensor""", ] for k in ignore_keys: state_dict.pop(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE_ ) -> str: """simple docstring""" _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = emb.weight.shape _SCREAMING_SNAKE_CASE = nn.Linear(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , bias=SCREAMING_SNAKE_CASE_ ) _SCREAMING_SNAKE_CASE = emb.weight.data return lin_layer def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE_ ) -> List[Any]: """simple docstring""" _SCREAMING_SNAKE_CASE = torch.load(SCREAMING_SNAKE_CASE_ , map_location="""cpu""" ) _SCREAMING_SNAKE_CASE = mam_aaa["""args"""] or mam_aaa["""cfg"""]["""model"""] _SCREAMING_SNAKE_CASE = mam_aaa["""model"""] remove_ignore_keys_(SCREAMING_SNAKE_CASE_ ) _SCREAMING_SNAKE_CASE = state_dict["""encoder.embed_tokens.weight"""].shape[0] _SCREAMING_SNAKE_CASE = MaMaaaConfig( vocab_size=SCREAMING_SNAKE_CASE_ , max_position_embeddings=10_24 , encoder_layers=args.encoder_layers , decoder_layers=args.decoder_layers , encoder_attention_heads=args.encoder_attention_heads , decoder_attention_heads=args.decoder_attention_heads , encoder_ffn_dim=args.encoder_ffn_embed_dim , decoder_ffn_dim=args.decoder_ffn_embed_dim , d_model=args.encoder_embed_dim , encoder_layerdrop=args.encoder_layerdrop , decoder_layerdrop=args.decoder_layerdrop , dropout=args.dropout , attention_dropout=args.attention_dropout , activation_dropout=args.activation_dropout , activation_function="""relu""" , ) _SCREAMING_SNAKE_CASE = state_dict["""decoder.embed_tokens.weight"""] _SCREAMING_SNAKE_CASE = MaMaaaForConditionalGeneration(SCREAMING_SNAKE_CASE_ ) model.model.load_state_dict(SCREAMING_SNAKE_CASE_ , strict=SCREAMING_SNAKE_CASE_ ) _SCREAMING_SNAKE_CASE = make_linear_from_emb(model.model.shared ) return model if __name__ == "__main__": UpperCamelCase__ : Dict = argparse.ArgumentParser() # Required parameters parser.add_argument("fairseq_path", type=str, help="path to a model.pt on local filesystem.") parser.add_argument("pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.") UpperCamelCase__ : List[str] = parser.parse_args() UpperCamelCase__ : Any = convert_fairseq_mamaaa_checkpoint_from_disk(args.fairseq_pathß) model.save_pretrained(args.pytorch_dump_folder_path)
0
0
'''simple docstring''' import random from typing import Any def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE_ ) -> list[Any]: """simple docstring""" for _ in range(len(SCREAMING_SNAKE_CASE_ ) ): _SCREAMING_SNAKE_CASE = random.randint(0 , len(SCREAMING_SNAKE_CASE_ ) - 1 ) _SCREAMING_SNAKE_CASE = random.randint(0 , len(SCREAMING_SNAKE_CASE_ ) - 1 ) _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = data[b], data[a] return data if __name__ == "__main__": UpperCamelCase__ : str = [0, 1, 2, 3, 4, 5, 6, 7] UpperCamelCase__ : Optional[int] = ["python", "says", "hello", "!"] print("Fisher-Yates Shuffle:") print("List", integers, strings) print("FY Shuffle", fisher_yates_shuffle(integers), fisher_yates_shuffle(strings))
703
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available UpperCamelCase__ : str = { "configuration_canine": ["CANINE_PRETRAINED_CONFIG_ARCHIVE_MAP", "CanineConfig"], "tokenization_canine": ["CanineTokenizer"], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase__ : List[Any] = [ "CANINE_PRETRAINED_MODEL_ARCHIVE_LIST", "CanineForMultipleChoice", "CanineForQuestionAnswering", "CanineForSequenceClassification", "CanineForTokenClassification", "CanineLayer", "CanineModel", "CaninePreTrainedModel", "load_tf_weights_in_canine", ] if TYPE_CHECKING: from .configuration_canine import CANINE_PRETRAINED_CONFIG_ARCHIVE_MAP, CanineConfig from .tokenization_canine import CanineTokenizer try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_canine import ( CANINE_PRETRAINED_MODEL_ARCHIVE_LIST, CanineForMultipleChoice, CanineForQuestionAnswering, CanineForSequenceClassification, CanineForTokenClassification, CanineLayer, CanineModel, CaninePreTrainedModel, load_tf_weights_in_canine, ) else: import sys UpperCamelCase__ : str = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
0
0
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE_ ) -> int: """simple docstring""" if not isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ): raise ValueError("""multiplicative_persistence() only accepts integral values""" ) if num < 0: raise ValueError("""multiplicative_persistence() does not accept negative values""" ) _SCREAMING_SNAKE_CASE = 0 _SCREAMING_SNAKE_CASE = str(SCREAMING_SNAKE_CASE_ ) while len(SCREAMING_SNAKE_CASE_ ) != 1: _SCREAMING_SNAKE_CASE = [int(SCREAMING_SNAKE_CASE_ ) for i in num_string] _SCREAMING_SNAKE_CASE = 1 for i in range(0 , len(SCREAMING_SNAKE_CASE_ ) ): total *= numbers[i] _SCREAMING_SNAKE_CASE = str(SCREAMING_SNAKE_CASE_ ) steps += 1 return steps def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE_ ) -> int: """simple docstring""" if not isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ): raise ValueError("""additive_persistence() only accepts integral values""" ) if num < 0: raise ValueError("""additive_persistence() does not accept negative values""" ) _SCREAMING_SNAKE_CASE = 0 _SCREAMING_SNAKE_CASE = str(SCREAMING_SNAKE_CASE_ ) while len(SCREAMING_SNAKE_CASE_ ) != 1: _SCREAMING_SNAKE_CASE = [int(SCREAMING_SNAKE_CASE_ ) for i in num_string] _SCREAMING_SNAKE_CASE = 0 for i in range(0 , len(SCREAMING_SNAKE_CASE_ ) ): total += numbers[i] _SCREAMING_SNAKE_CASE = str(SCREAMING_SNAKE_CASE_ ) steps += 1 return steps if __name__ == "__main__": import doctest doctest.testmod()
704
'''simple docstring''' import warnings from ...processing_utils import ProcessorMixin from ...tokenization_utils_base import BatchEncoding class _a (_lowerCamelCase): """simple docstring""" SCREAMING_SNAKE_CASE = ['image_processor', 'tokenizer'] SCREAMING_SNAKE_CASE = 'ChineseCLIPImageProcessor' SCREAMING_SNAKE_CASE = ('BertTokenizer', 'BertTokenizerFast') def __init__( self , A__=None , A__=None , **A__ ) -> int: _SCREAMING_SNAKE_CASE = None if "feature_extractor" in kwargs: warnings.warn( """The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`""" """ instead.""" , A__ , ) _SCREAMING_SNAKE_CASE = kwargs.pop("""feature_extractor""" ) _SCREAMING_SNAKE_CASE = image_processor if image_processor is not None else feature_extractor if image_processor is None: raise ValueError("""You need to specify an `image_processor`.""" ) if tokenizer is None: raise ValueError("""You need to specify a `tokenizer`.""" ) super().__init__(A__ , A__ ) _SCREAMING_SNAKE_CASE = self.image_processor def __call__( self , A__=None , A__=None , A__=None , **A__ ) -> Optional[int]: if text is None and images is None: raise ValueError("""You have to specify either text or images. Both cannot be none.""" ) if text is not None: _SCREAMING_SNAKE_CASE = self.tokenizer(A__ , return_tensors=A__ , **A__ ) if images is not None: _SCREAMING_SNAKE_CASE = self.image_processor(A__ , return_tensors=A__ , **A__ ) if text is not None and images is not None: _SCREAMING_SNAKE_CASE = image_features.pixel_values return encoding elif text is not None: return encoding else: return BatchEncoding(data=dict(**A__ ) , tensor_type=A__ ) def UpperCamelCase ( self , *A__ , **A__ ) -> Dict: return self.tokenizer.batch_decode(*A__ , **A__ ) def UpperCamelCase ( self , *A__ , **A__ ) -> Optional[Any]: return self.tokenizer.decode(*A__ , **A__ ) @property def UpperCamelCase ( self ) -> List[Any]: _SCREAMING_SNAKE_CASE = self.tokenizer.model_input_names _SCREAMING_SNAKE_CASE = self.image_processor.model_input_names return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) ) @property def UpperCamelCase ( self ) -> Optional[int]: warnings.warn( """`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.""" , A__ , ) return self.image_processor_class
0
0
import argparse from tax import checkpoints from transformers import AutoConfig, FlaxAutoModelForSeqaSeqLM def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> Dict: """simple docstring""" _SCREAMING_SNAKE_CASE = AutoConfig.from_pretrained(SCREAMING_SNAKE_CASE_ ) _SCREAMING_SNAKE_CASE = FlaxAutoModelForSeqaSeqLM.from_config(config=SCREAMING_SNAKE_CASE_ ) _SCREAMING_SNAKE_CASE = checkpoints.load_tax_checkpoint(SCREAMING_SNAKE_CASE_ ) _SCREAMING_SNAKE_CASE = """wi_0""" in tax_model["""target"""]["""encoder"""]["""layers_0"""]["""mlp"""] if config.model_type == "t5": _SCREAMING_SNAKE_CASE = """SelfAttention""" if config.model_type == "longt5" and config.encoder_attention_type == "local": _SCREAMING_SNAKE_CASE = """LocalSelfAttention""" elif config.model_type == "longt5" and config.encoder_attention_type == "transient-global": _SCREAMING_SNAKE_CASE = """TransientGlobalSelfAttention""" else: raise ValueError( """Given config is expected to have `model_type='t5'`, or `model_type='longt5` with `encoder_attention_type`""" """ attribute with a value from ['local', 'transient-global].""" ) # Encoder for layer_index in range(config.num_layers ): _SCREAMING_SNAKE_CASE = F"layers_{str(SCREAMING_SNAKE_CASE_ )}" # Self-Attention _SCREAMING_SNAKE_CASE = tax_model["""target"""]["""encoder"""][layer_name]["""attention"""]["""key"""]["""kernel"""] _SCREAMING_SNAKE_CASE = tax_model["""target"""]["""encoder"""][layer_name]["""attention"""]["""out"""]["""kernel"""] _SCREAMING_SNAKE_CASE = tax_model["""target"""]["""encoder"""][layer_name]["""attention"""]["""query"""]["""kernel"""] _SCREAMING_SNAKE_CASE = tax_model["""target"""]["""encoder"""][layer_name]["""attention"""]["""value"""]["""kernel"""] # Global input layer norm if config.model_type == "longt5" and config.encoder_attention_type == "transient-global": _SCREAMING_SNAKE_CASE = tax_model["""target"""]["""encoder"""][layer_name]["""attention"""]["""T5LayerNorm_0"""]["""scale"""] # Layer Normalization _SCREAMING_SNAKE_CASE = tax_model["""target"""]["""encoder"""][layer_name]["""pre_attention_layer_norm"""]["""scale"""] if split_mlp_wi: _SCREAMING_SNAKE_CASE = tax_model["""target"""]["""encoder"""][layer_name]["""mlp"""]["""wi_0"""]["""kernel"""] _SCREAMING_SNAKE_CASE = tax_model["""target"""]["""encoder"""][layer_name]["""mlp"""]["""wi_1"""]["""kernel"""] else: _SCREAMING_SNAKE_CASE = tax_model["""target"""]["""encoder"""][layer_name]["""mlp"""]["""wi"""]["""kernel"""] _SCREAMING_SNAKE_CASE = tax_model["""target"""]["""encoder"""][layer_name]["""mlp"""]["""wo"""]["""kernel"""] # Layer Normalization _SCREAMING_SNAKE_CASE = tax_model["""target"""]["""encoder"""][layer_name]["""pre_mlp_layer_norm"""]["""scale"""] # Assigning _SCREAMING_SNAKE_CASE = flax_model.params["""encoder"""]["""block"""][str(SCREAMING_SNAKE_CASE_ )]["""layer"""] _SCREAMING_SNAKE_CASE = tax_attention_key _SCREAMING_SNAKE_CASE = tax_attention_out _SCREAMING_SNAKE_CASE = tax_attention_query _SCREAMING_SNAKE_CASE = tax_attention_value _SCREAMING_SNAKE_CASE = tax_attention_layer_norm # Global input layer norm if config.model_type == "longt5" and config.encoder_attention_type == "transient-global": _SCREAMING_SNAKE_CASE = tax_global_layer_norm if split_mlp_wi: _SCREAMING_SNAKE_CASE = tax_mlp_wi_a _SCREAMING_SNAKE_CASE = tax_mlp_wi_a else: _SCREAMING_SNAKE_CASE = tax_mlp_wi _SCREAMING_SNAKE_CASE = tax_mlp_wo _SCREAMING_SNAKE_CASE = tax_mlp_layer_norm _SCREAMING_SNAKE_CASE = flax_model_encoder_layer_block # Only for layer 0: _SCREAMING_SNAKE_CASE = tax_model["""target"""]["""encoder"""]["""relpos_bias"""]["""rel_embedding"""].T _SCREAMING_SNAKE_CASE = tax_encoder_rel_embedding # Side/global relative position_bias + layer norm if config.model_type == "longt5" and config.encoder_attention_type == "transient-global": _SCREAMING_SNAKE_CASE = tax_model["""target"""]["""encoder"""]["""side_relpos_bias"""]["""rel_embedding"""].T _SCREAMING_SNAKE_CASE = tax_encoder_global_rel_embedding # Assigning _SCREAMING_SNAKE_CASE = tax_model["""target"""]["""encoder"""]["""encoder_norm"""]["""scale"""] _SCREAMING_SNAKE_CASE = tax_encoder_norm # Decoder for layer_index in range(config.num_layers ): _SCREAMING_SNAKE_CASE = F"layers_{str(SCREAMING_SNAKE_CASE_ )}" # Self-Attention _SCREAMING_SNAKE_CASE = tax_model["""target"""]["""decoder"""][layer_name]["""self_attention"""]["""key"""]["""kernel"""] _SCREAMING_SNAKE_CASE = tax_model["""target"""]["""decoder"""][layer_name]["""self_attention"""]["""out"""]["""kernel"""] _SCREAMING_SNAKE_CASE = tax_model["""target"""]["""decoder"""][layer_name]["""self_attention"""]["""query"""]["""kernel"""] _SCREAMING_SNAKE_CASE = tax_model["""target"""]["""decoder"""][layer_name]["""self_attention"""]["""value"""]["""kernel"""] # Layer Normalization _SCREAMING_SNAKE_CASE = tax_model["""target"""]["""decoder"""][layer_name]["""pre_self_attention_layer_norm"""][ """scale""" ] # Encoder-Decoder-Attention _SCREAMING_SNAKE_CASE = tax_model["""target"""]["""decoder"""][layer_name]["""encoder_decoder_attention"""] _SCREAMING_SNAKE_CASE = tax_enc_dec_attention_module["""key"""]["""kernel"""] _SCREAMING_SNAKE_CASE = tax_enc_dec_attention_module["""out"""]["""kernel"""] _SCREAMING_SNAKE_CASE = tax_enc_dec_attention_module["""query"""]["""kernel"""] _SCREAMING_SNAKE_CASE = tax_enc_dec_attention_module["""value"""]["""kernel"""] # Layer Normalization _SCREAMING_SNAKE_CASE = tax_model["""target"""]["""decoder"""][layer_name]["""pre_cross_attention_layer_norm"""]["""scale"""] # MLP if split_mlp_wi: _SCREAMING_SNAKE_CASE = tax_model["""target"""]["""decoder"""][layer_name]["""mlp"""]["""wi_0"""]["""kernel"""] _SCREAMING_SNAKE_CASE = tax_model["""target"""]["""decoder"""][layer_name]["""mlp"""]["""wi_1"""]["""kernel"""] else: _SCREAMING_SNAKE_CASE = tax_model["""target"""]["""decoder"""][layer_name]["""mlp"""]["""wi"""]["""kernel"""] _SCREAMING_SNAKE_CASE = tax_model["""target"""]["""decoder"""][layer_name]["""mlp"""]["""wo"""]["""kernel"""] # Layer Normalization _SCREAMING_SNAKE_CASE = tax_model["""target"""]["""decoder"""][layer_name]["""pre_mlp_layer_norm"""]["""scale"""] # Assigning _SCREAMING_SNAKE_CASE = flax_model.params["""decoder"""]["""block"""][str(SCREAMING_SNAKE_CASE_ )]["""layer"""] _SCREAMING_SNAKE_CASE = tax_attention_key _SCREAMING_SNAKE_CASE = tax_attention_out _SCREAMING_SNAKE_CASE = tax_attention_query _SCREAMING_SNAKE_CASE = tax_attention_value _SCREAMING_SNAKE_CASE = tax_pre_attention_layer_norm _SCREAMING_SNAKE_CASE = tax_enc_dec_attention_key _SCREAMING_SNAKE_CASE = tax_enc_dec_attention_out _SCREAMING_SNAKE_CASE = tax_enc_dec_attention_query _SCREAMING_SNAKE_CASE = tax_enc_dec_attention_value _SCREAMING_SNAKE_CASE = tax_cross_layer_norm if split_mlp_wi: _SCREAMING_SNAKE_CASE = tax_mlp_wi_a _SCREAMING_SNAKE_CASE = tax_mlp_wi_a else: _SCREAMING_SNAKE_CASE = tax_mlp_wi _SCREAMING_SNAKE_CASE = tax_mlp_wo _SCREAMING_SNAKE_CASE = txa_mlp_layer_norm _SCREAMING_SNAKE_CASE = flax_model_decoder_layer_block # Decoder Normalization _SCREAMING_SNAKE_CASE = tax_model["""target"""]["""decoder"""]["""decoder_norm"""]["""scale"""] _SCREAMING_SNAKE_CASE = txa_decoder_norm # Only for layer 0: _SCREAMING_SNAKE_CASE = tax_model["""target"""]["""decoder"""]["""relpos_bias"""]["""rel_embedding"""].T _SCREAMING_SNAKE_CASE = tax_decoder_rel_embedding # Token Embeddings _SCREAMING_SNAKE_CASE = tax_model["""target"""]["""token_embedder"""]["""embedding"""] _SCREAMING_SNAKE_CASE = txa_token_embeddings # LM Head (only in v1.1 and LongT5 checkpoints) if "logits_dense" in tax_model["target"]["decoder"]: _SCREAMING_SNAKE_CASE = tax_model["""target"""]["""decoder"""]["""logits_dense"""]["""kernel"""] flax_model.save_pretrained(SCREAMING_SNAKE_CASE_ ) print("""T5X Model was sucessfully converted!""" ) if __name__ == "__main__": UpperCamelCase__ : Union[str, Any] = argparse.ArgumentParser() # Required parameters parser.add_argument( "--t5x_checkpoint_path", default=None, type=str, required=True, help="Path the T5X checkpoint." ) parser.add_argument("--config_name", default=None, type=str, required=True, help="Config name of LongT5/T5 model.") parser.add_argument( "--flax_dump_folder_path", default=None, type=str, required=True, help="Path to the output FLAX model." ) UpperCamelCase__ : int = parser.parse_args() convert_tax_checkpoint_to_flax(args.tax_checkpoint_path, args.config_name, args.flax_dump_folder_path)
705
'''simple docstring''' from sklearn.metrics import matthews_corrcoef import datasets UpperCamelCase__ : List[str] = "\nCompute the Matthews correlation coefficient (MCC)\n\nThe Matthews correlation coefficient is used in machine learning as a\nmeasure of the quality of binary and multiclass classifications. It takes\ninto account true and false positives and negatives and is generally\nregarded as a balanced measure which can be used even if the classes are of\nvery different sizes. The MCC is in essence a correlation coefficient value\nbetween -1 and +1. A coefficient of +1 represents a perfect prediction, 0\nan average random prediction and -1 an inverse prediction. The statistic\nis also known as the phi coefficient. [source: Wikipedia]\n" UpperCamelCase__ : List[Any] = "\nArgs:\n predictions (list of int): Predicted labels, as returned by a model.\n references (list of int): Ground truth labels.\n sample_weight (list of int, float, or bool): Sample weights. Defaults to `None`.\nReturns:\n matthews_correlation (dict containing float): Matthews correlation.\nExamples:\n Example 1, a basic example with only predictions and references as inputs:\n >>> matthews_metric = datasets.load_metric(\"matthews_correlation\")\n >>> results = matthews_metric.compute(references=[1, 3, 2, 0, 3, 2],\n ... predictions=[1, 2, 2, 0, 3, 3])\n >>> print(round(results['matthews_correlation'], 2))\n 0.54\n\n Example 2, the same example as above, but also including sample weights:\n >>> matthews_metric = datasets.load_metric(\"matthews_correlation\")\n >>> results = matthews_metric.compute(references=[1, 3, 2, 0, 3, 2],\n ... predictions=[1, 2, 2, 0, 3, 3],\n ... sample_weight=[0.5, 3, 1, 1, 1, 2])\n >>> print(round(results['matthews_correlation'], 2))\n 0.1\n\n Example 3, the same example as above, but with sample weights that cause a negative correlation:\n >>> matthews_metric = datasets.load_metric(\"matthews_correlation\")\n >>> results = matthews_metric.compute(references=[1, 3, 2, 0, 3, 2],\n ... predictions=[1, 2, 2, 0, 3, 3],\n ... sample_weight=[0.5, 1, 0, 0, 0, 1])\n >>> print(round(results['matthews_correlation'], 2))\n -0.25\n" UpperCamelCase__ : Any = "\\n@article{scikit-learn,\n title={Scikit-learn: Machine Learning in {P}ython},\n author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V.\n and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P.\n and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and\n Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.},\n journal={Journal of Machine Learning Research},\n volume={12},\n pages={2825--2830},\n year={2011}\n}\n" @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION) class _a (datasets.Metric): """simple docstring""" def UpperCamelCase ( self ) -> Optional[Any]: return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { """predictions""": datasets.Value("""int32""" ), """references""": datasets.Value("""int32""" ), } ) , reference_urls=[ """https://scikit-learn.org/stable/modules/generated/sklearn.metrics.matthews_corrcoef.html""" ] , ) def UpperCamelCase ( self , A__ , A__ , A__=None ) -> List[str]: return { "matthews_correlation": float(matthews_corrcoef(A__ , A__ , sample_weight=A__ ) ), }
0
0
import os import tempfile import unittest from transformers import FlaubertConfig, is_torch_available from transformers.testing_utils import require_torch, require_torch_gpu, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( FlaubertForMultipleChoice, FlaubertForQuestionAnswering, FlaubertForQuestionAnsweringSimple, FlaubertForSequenceClassification, FlaubertForTokenClassification, FlaubertModel, FlaubertWithLMHeadModel, ) from transformers.models.flaubert.modeling_flaubert import FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST class _a (_lowerCamelCase): """simple docstring""" def __init__( self , A__ , A__=13 , A__=7 , A__=True , A__=True , A__=True , A__=True , A__=True , A__=False , A__=False , A__=False , A__=2 , A__=99 , A__=0 , A__=32 , A__=5 , A__=4 , A__=0.1 , A__=0.1 , A__=5_12 , A__=12 , A__=2 , A__=0.02 , A__=3 , A__=4 , A__="last" , A__=None , A__=None , ) -> Dict: _SCREAMING_SNAKE_CASE = parent _SCREAMING_SNAKE_CASE = batch_size _SCREAMING_SNAKE_CASE = seq_length _SCREAMING_SNAKE_CASE = is_training _SCREAMING_SNAKE_CASE = use_input_lengths _SCREAMING_SNAKE_CASE = use_token_type_ids _SCREAMING_SNAKE_CASE = use_labels _SCREAMING_SNAKE_CASE = gelu_activation _SCREAMING_SNAKE_CASE = sinusoidal_embeddings _SCREAMING_SNAKE_CASE = causal _SCREAMING_SNAKE_CASE = asm _SCREAMING_SNAKE_CASE = n_langs _SCREAMING_SNAKE_CASE = vocab_size _SCREAMING_SNAKE_CASE = n_special _SCREAMING_SNAKE_CASE = hidden_size _SCREAMING_SNAKE_CASE = num_hidden_layers _SCREAMING_SNAKE_CASE = num_attention_heads _SCREAMING_SNAKE_CASE = hidden_dropout_prob _SCREAMING_SNAKE_CASE = attention_probs_dropout_prob _SCREAMING_SNAKE_CASE = max_position_embeddings _SCREAMING_SNAKE_CASE = type_vocab_size _SCREAMING_SNAKE_CASE = type_sequence_label_size _SCREAMING_SNAKE_CASE = initializer_range _SCREAMING_SNAKE_CASE = num_labels _SCREAMING_SNAKE_CASE = num_choices _SCREAMING_SNAKE_CASE = summary_type _SCREAMING_SNAKE_CASE = use_proj _SCREAMING_SNAKE_CASE = scope def UpperCamelCase ( self ) -> Tuple: _SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) _SCREAMING_SNAKE_CASE = random_attention_mask([self.batch_size, self.seq_length] ) _SCREAMING_SNAKE_CASE = None if self.use_input_lengths: _SCREAMING_SNAKE_CASE = ( ids_tensor([self.batch_size] , vocab_size=2 ) + self.seq_length - 2 ) # small variation of seq_length _SCREAMING_SNAKE_CASE = None if self.use_token_type_ids: _SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length] , self.n_langs ) _SCREAMING_SNAKE_CASE = None _SCREAMING_SNAKE_CASE = None _SCREAMING_SNAKE_CASE = None if self.use_labels: _SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size] , self.type_sequence_label_size ) _SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) _SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size] , 2 ).float() _SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size] , self.num_choices ) _SCREAMING_SNAKE_CASE = self.get_config() return ( config, input_ids, token_type_ids, input_lengths, sequence_labels, token_labels, is_impossible_labels, choice_labels, input_mask, ) def UpperCamelCase ( self ) -> Optional[Any]: return FlaubertConfig( vocab_size=self.vocab_size , n_special=self.n_special , emb_dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , gelu_activation=self.gelu_activation , sinusoidal_embeddings=self.sinusoidal_embeddings , asm=self.asm , causal=self.causal , n_langs=self.n_langs , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , summary_type=self.summary_type , use_proj=self.use_proj , ) def UpperCamelCase ( self , A__ , A__ , A__ , A__ , A__ , A__ , A__ , A__ , A__ , ) -> Tuple: _SCREAMING_SNAKE_CASE = FlaubertModel(config=A__ ) model.to(A__ ) model.eval() _SCREAMING_SNAKE_CASE = model(A__ , lengths=A__ , langs=A__ ) _SCREAMING_SNAKE_CASE = model(A__ , langs=A__ ) _SCREAMING_SNAKE_CASE = model(A__ ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def UpperCamelCase ( self , A__ , A__ , A__ , A__ , A__ , A__ , A__ , A__ , A__ , ) -> Dict: _SCREAMING_SNAKE_CASE = FlaubertWithLMHeadModel(A__ ) model.to(A__ ) model.eval() _SCREAMING_SNAKE_CASE = model(A__ , token_type_ids=A__ , labels=A__ ) self.parent.assertEqual(result.loss.shape , () ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def UpperCamelCase ( self , A__ , A__ , A__ , A__ , A__ , A__ , A__ , A__ , A__ , ) -> Optional[Any]: _SCREAMING_SNAKE_CASE = FlaubertForQuestionAnsweringSimple(A__ ) model.to(A__ ) model.eval() _SCREAMING_SNAKE_CASE = model(A__ ) _SCREAMING_SNAKE_CASE = model(A__ , start_positions=A__ , end_positions=A__ ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def UpperCamelCase ( self , A__ , A__ , A__ , A__ , A__ , A__ , A__ , A__ , A__ , ) -> Optional[int]: _SCREAMING_SNAKE_CASE = FlaubertForQuestionAnswering(A__ ) model.to(A__ ) model.eval() _SCREAMING_SNAKE_CASE = model(A__ ) _SCREAMING_SNAKE_CASE = model( A__ , start_positions=A__ , end_positions=A__ , cls_index=A__ , is_impossible=A__ , p_mask=A__ , ) _SCREAMING_SNAKE_CASE = model( A__ , start_positions=A__ , end_positions=A__ , cls_index=A__ , is_impossible=A__ , ) ((_SCREAMING_SNAKE_CASE ) , ) = result_with_labels.to_tuple() _SCREAMING_SNAKE_CASE = model(A__ , start_positions=A__ , end_positions=A__ ) ((_SCREAMING_SNAKE_CASE ) , ) = result_with_labels.to_tuple() self.parent.assertEqual(result_with_labels.loss.shape , () ) self.parent.assertEqual(result.start_top_log_probs.shape , (self.batch_size, model.config.start_n_top) ) self.parent.assertEqual(result.start_top_index.shape , (self.batch_size, model.config.start_n_top) ) self.parent.assertEqual( result.end_top_log_probs.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top) ) self.parent.assertEqual( result.end_top_index.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top) ) self.parent.assertEqual(result.cls_logits.shape , (self.batch_size,) ) def UpperCamelCase ( self , A__ , A__ , A__ , A__ , A__ , A__ , A__ , A__ , A__ , ) -> List[str]: _SCREAMING_SNAKE_CASE = FlaubertForSequenceClassification(A__ ) model.to(A__ ) model.eval() _SCREAMING_SNAKE_CASE = model(A__ ) _SCREAMING_SNAKE_CASE = model(A__ , labels=A__ ) self.parent.assertEqual(result.loss.shape , () ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) ) def UpperCamelCase ( self , A__ , A__ , A__ , A__ , A__ , A__ , A__ , A__ , A__ , ) -> Optional[Any]: _SCREAMING_SNAKE_CASE = self.num_labels _SCREAMING_SNAKE_CASE = FlaubertForTokenClassification(A__ ) model.to(A__ ) model.eval() _SCREAMING_SNAKE_CASE = model(A__ , attention_mask=A__ , labels=A__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def UpperCamelCase ( self , A__ , A__ , A__ , A__ , A__ , A__ , A__ , A__ , A__ , ) -> int: _SCREAMING_SNAKE_CASE = self.num_choices _SCREAMING_SNAKE_CASE = FlaubertForMultipleChoice(config=A__ ) model.to(A__ ) model.eval() _SCREAMING_SNAKE_CASE = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() _SCREAMING_SNAKE_CASE = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() _SCREAMING_SNAKE_CASE = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() _SCREAMING_SNAKE_CASE = model( A__ , attention_mask=A__ , token_type_ids=A__ , labels=A__ , ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) ) def UpperCamelCase ( self ) -> List[Any]: _SCREAMING_SNAKE_CASE = self.prepare_config_and_inputs() ( ( _SCREAMING_SNAKE_CASE ) , ( _SCREAMING_SNAKE_CASE ) , ( _SCREAMING_SNAKE_CASE ) , ( _SCREAMING_SNAKE_CASE ) , ( _SCREAMING_SNAKE_CASE ) , ( _SCREAMING_SNAKE_CASE ) , ( _SCREAMING_SNAKE_CASE ) , ( _SCREAMING_SNAKE_CASE ) , ( _SCREAMING_SNAKE_CASE ) , ) = config_and_inputs _SCREAMING_SNAKE_CASE = { """input_ids""": input_ids, """token_type_ids""": token_type_ids, """lengths""": input_lengths, """attention_mask""": input_mask, } return config, inputs_dict @require_torch class _a (_lowerCamelCase , _lowerCamelCase , unittest.TestCase): """simple docstring""" SCREAMING_SNAKE_CASE = ( ( FlaubertModel, FlaubertWithLMHeadModel, FlaubertForQuestionAnswering, FlaubertForQuestionAnsweringSimple, FlaubertForSequenceClassification, FlaubertForTokenClassification, FlaubertForMultipleChoice, ) if is_torch_available() else () ) SCREAMING_SNAKE_CASE = ( { 'feature-extraction': FlaubertModel, 'fill-mask': FlaubertWithLMHeadModel, 'question-answering': FlaubertForQuestionAnsweringSimple, 'text-classification': FlaubertForSequenceClassification, 'token-classification': FlaubertForTokenClassification, 'zero-shot': FlaubertForSequenceClassification, } if is_torch_available() else {} ) def UpperCamelCase ( self , A__ , A__ , A__ , A__ , A__ ) -> Optional[Any]: if ( pipeline_test_casse_name == "QAPipelineTests" and tokenizer_name is not None and not tokenizer_name.endswith("""Fast""" ) ): # `QAPipelineTests` fails for a few models when the slower tokenizer are used. # (The slower tokenizers were never used for pipeline tests before the pipeline testing rework) # TODO: check (and possibly fix) the `QAPipelineTests` with slower tokenizer return True return False def UpperCamelCase ( self , A__ , A__ , A__=False ) -> List[Any]: _SCREAMING_SNAKE_CASE = super()._prepare_for_class(A__ , A__ , return_labels=A__ ) if return_labels: if model_class.__name__ == "FlaubertForQuestionAnswering": _SCREAMING_SNAKE_CASE = torch.zeros( self.model_tester.batch_size , dtype=torch.long , device=A__ ) _SCREAMING_SNAKE_CASE = torch.zeros( self.model_tester.batch_size , dtype=torch.long , device=A__ ) return inputs_dict def UpperCamelCase ( self ) -> List[Any]: _SCREAMING_SNAKE_CASE = FlaubertModelTester(self ) _SCREAMING_SNAKE_CASE = ConfigTester(self , config_class=A__ , emb_dim=37 ) def UpperCamelCase ( self ) -> Union[str, Any]: self.config_tester.run_common_tests() def UpperCamelCase ( self ) -> Optional[Any]: _SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_flaubert_model(*A__ ) def UpperCamelCase ( self ) -> Tuple: _SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_flaubert_lm_head(*A__ ) def UpperCamelCase ( self ) -> List[Any]: _SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_flaubert_simple_qa(*A__ ) def UpperCamelCase ( self ) -> Tuple: _SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_flaubert_qa(*A__ ) def UpperCamelCase ( self ) -> Any: _SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_flaubert_sequence_classif(*A__ ) def UpperCamelCase ( self ) -> Any: _SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_flaubert_token_classif(*A__ ) def UpperCamelCase ( self ) -> Tuple: _SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_flaubert_multiple_choice(*A__ ) @slow def UpperCamelCase ( self ) -> Optional[int]: for model_name in FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: _SCREAMING_SNAKE_CASE = FlaubertModel.from_pretrained(A__ ) self.assertIsNotNone(A__ ) @slow @require_torch_gpu def UpperCamelCase ( self ) -> Union[str, Any]: _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: # FlauBertForMultipleChoice behaves incorrectly in JIT environments. if model_class == FlaubertForMultipleChoice: return _SCREAMING_SNAKE_CASE = True _SCREAMING_SNAKE_CASE = model_class(config=A__ ) _SCREAMING_SNAKE_CASE = self._prepare_for_class(A__ , A__ ) _SCREAMING_SNAKE_CASE = torch.jit.trace( A__ , (inputs_dict["""input_ids"""].to("""cpu""" ), inputs_dict["""attention_mask"""].to("""cpu""" )) ) with tempfile.TemporaryDirectory() as tmp: torch.jit.save(A__ , os.path.join(A__ , """traced_model.pt""" ) ) _SCREAMING_SNAKE_CASE = torch.jit.load(os.path.join(A__ , """traced_model.pt""" ) , map_location=A__ ) loaded(inputs_dict["""input_ids"""].to(A__ ) , inputs_dict["""attention_mask"""].to(A__ ) ) @require_torch class _a (unittest.TestCase): """simple docstring""" @slow def UpperCamelCase ( self ) -> Dict: _SCREAMING_SNAKE_CASE = FlaubertModel.from_pretrained("""flaubert/flaubert_base_cased""" ) _SCREAMING_SNAKE_CASE = torch.tensor([[0, 3_45, 2_32, 3_28, 7_40, 1_40, 16_95, 69, 60_78, 15_88, 2]] ) with torch.no_grad(): _SCREAMING_SNAKE_CASE = model(A__ )[0] _SCREAMING_SNAKE_CASE = torch.Size((1, 11, 7_68) ) self.assertEqual(output.shape , A__ ) _SCREAMING_SNAKE_CASE = torch.tensor( [[[-2.6251, -1.4298, -0.0227], [-2.8510, -1.6387, 0.2258], [-2.8114, -1.1832, -0.3066]]] ) self.assertTrue(torch.allclose(output[:, :3, :3] , A__ , atol=1E-4 ) )
706
'''simple docstring''' from __future__ import annotations def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> Tuple: """simple docstring""" print(F"Vertex\tShortest Distance from vertex {src}" ) for i, d in enumerate(SCREAMING_SNAKE_CASE_ ): print(F"{i}\t\t{d}" ) def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> List[Any]: """simple docstring""" for j in range(SCREAMING_SNAKE_CASE_ ): _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = (graph[j][k] for k in ["""src""", """dst""", """weight"""]) if distance[u] != float("""inf""" ) and distance[u] + w < distance[v]: return True return False def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> list[float]: """simple docstring""" _SCREAMING_SNAKE_CASE = [float("""inf""" )] * vertex_count _SCREAMING_SNAKE_CASE = 0.0 for _ in range(vertex_count - 1 ): for j in range(SCREAMING_SNAKE_CASE_ ): _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = (graph[j][k] for k in ["""src""", """dst""", """weight"""]) if distance[u] != float("""inf""" ) and distance[u] + w < distance[v]: _SCREAMING_SNAKE_CASE = distance[u] + w _SCREAMING_SNAKE_CASE = check_negative_cycle(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) if negative_cycle_exists: raise Exception("""Negative cycle found""" ) return distance if __name__ == "__main__": import doctest doctest.testmod() UpperCamelCase__ : int = int(input("Enter number of vertices: ").strip()) UpperCamelCase__ : int = int(input("Enter number of edges: ").strip()) UpperCamelCase__ : list[dict[str, int]] = [{} for _ in range(E)] for i in range(E): print("Edge ", i + 1) UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ : Dict = ( int(x) for x in input("Enter source, destination, weight: ").strip().split(" ") ) UpperCamelCase__ : Optional[Any] = {"src": src, "dst": dest, "weight": weight} UpperCamelCase__ : Optional[Any] = int(input("\nEnter shortest path source:").strip()) UpperCamelCase__ : Any = bellman_ford(graph, V, E, source) print_distance(shortest_distance, 0)
0
0
'''simple docstring''' import os def lowerCAmelCase_ ( ) -> Optional[Any]: """simple docstring""" with open(os.path.dirname(SCREAMING_SNAKE_CASE_ ) + """/grid.txt""" ) as f: _SCREAMING_SNAKE_CASE = [] # noqa: E741 for _ in range(20 ): l.append([int(SCREAMING_SNAKE_CASE_ ) for x in f.readline().split()] ) _SCREAMING_SNAKE_CASE = 0 # right for i in range(20 ): for j in range(17 ): _SCREAMING_SNAKE_CASE = l[i][j] * l[i][j + 1] * l[i][j + 2] * l[i][j + 3] if temp > maximum: _SCREAMING_SNAKE_CASE = temp # down for i in range(17 ): for j in range(20 ): _SCREAMING_SNAKE_CASE = l[i][j] * l[i + 1][j] * l[i + 2][j] * l[i + 3][j] if temp > maximum: _SCREAMING_SNAKE_CASE = temp # diagonal 1 for i in range(17 ): for j in range(17 ): _SCREAMING_SNAKE_CASE = l[i][j] * l[i + 1][j + 1] * l[i + 2][j + 2] * l[i + 3][j + 3] if temp > maximum: _SCREAMING_SNAKE_CASE = temp # diagonal 2 for i in range(17 ): for j in range(3 , 20 ): _SCREAMING_SNAKE_CASE = l[i][j] * l[i + 1][j - 1] * l[i + 2][j - 2] * l[i + 3][j - 3] if temp > maximum: _SCREAMING_SNAKE_CASE = temp return maximum if __name__ == "__main__": print(solution())
707
'''simple docstring''' from __future__ import annotations import unittest from transformers import RoFormerConfig, is_tf_available from transformers.testing_utils import require_tf, slow from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import ( TFRoFormerForCausalLM, TFRoFormerForMaskedLM, TFRoFormerForMultipleChoice, TFRoFormerForQuestionAnswering, TFRoFormerForSequenceClassification, TFRoFormerForTokenClassification, TFRoFormerModel, ) from transformers.models.roformer.modeling_tf_roformer import ( TFRoFormerSelfAttention, TFRoFormerSinusoidalPositionalEmbedding, ) class _a : """simple docstring""" def __init__( self , A__ , A__=13 , A__=7 , A__=True , A__=True , A__=True , A__=True , A__=99 , A__=32 , A__=2 , A__=4 , A__=37 , A__="gelu" , A__=0.1 , A__=0.1 , A__=5_12 , A__=16 , A__=2 , A__=0.02 , A__=3 , A__=4 , A__=None , ) -> int: _SCREAMING_SNAKE_CASE = parent _SCREAMING_SNAKE_CASE = 13 _SCREAMING_SNAKE_CASE = 7 _SCREAMING_SNAKE_CASE = True _SCREAMING_SNAKE_CASE = True _SCREAMING_SNAKE_CASE = True _SCREAMING_SNAKE_CASE = True _SCREAMING_SNAKE_CASE = 99 _SCREAMING_SNAKE_CASE = 32 _SCREAMING_SNAKE_CASE = 2 _SCREAMING_SNAKE_CASE = 4 _SCREAMING_SNAKE_CASE = 37 _SCREAMING_SNAKE_CASE = """gelu""" _SCREAMING_SNAKE_CASE = 0.1 _SCREAMING_SNAKE_CASE = 0.1 _SCREAMING_SNAKE_CASE = 5_12 _SCREAMING_SNAKE_CASE = 16 _SCREAMING_SNAKE_CASE = 2 _SCREAMING_SNAKE_CASE = 0.02 _SCREAMING_SNAKE_CASE = 3 _SCREAMING_SNAKE_CASE = 4 _SCREAMING_SNAKE_CASE = None def UpperCamelCase ( self ) -> Tuple: _SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) _SCREAMING_SNAKE_CASE = None if self.use_input_mask: _SCREAMING_SNAKE_CASE = random_attention_mask([self.batch_size, self.seq_length] ) _SCREAMING_SNAKE_CASE = None if self.use_token_type_ids: _SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) _SCREAMING_SNAKE_CASE = None _SCREAMING_SNAKE_CASE = None _SCREAMING_SNAKE_CASE = None if self.use_labels: _SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size] , self.type_sequence_label_size ) _SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) _SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size] , self.num_choices ) _SCREAMING_SNAKE_CASE = RoFormerConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , return_dict=A__ , ) return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def UpperCamelCase ( self , A__ , A__ , A__ , A__ , A__ , A__ , A__ ) -> Union[str, Any]: _SCREAMING_SNAKE_CASE = TFRoFormerModel(config=A__ ) _SCREAMING_SNAKE_CASE = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids} _SCREAMING_SNAKE_CASE = [input_ids, input_mask] _SCREAMING_SNAKE_CASE = model(A__ ) _SCREAMING_SNAKE_CASE = model(A__ ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def UpperCamelCase ( self , A__ , A__ , A__ , A__ , A__ , A__ , A__ ) -> str: _SCREAMING_SNAKE_CASE = True _SCREAMING_SNAKE_CASE = TFRoFormerForCausalLM(config=A__ ) _SCREAMING_SNAKE_CASE = { """input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids, } _SCREAMING_SNAKE_CASE = model(A__ )["""logits"""] self.parent.assertListEqual( list(prediction_scores.numpy().shape ) , [self.batch_size, self.seq_length, self.vocab_size] ) def UpperCamelCase ( self , A__ , A__ , A__ , A__ , A__ , A__ , A__ ) -> Dict: _SCREAMING_SNAKE_CASE = TFRoFormerForMaskedLM(config=A__ ) _SCREAMING_SNAKE_CASE = { """input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids, } _SCREAMING_SNAKE_CASE = model(A__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def UpperCamelCase ( self , A__ , A__ , A__ , A__ , A__ , A__ , A__ ) -> List[str]: _SCREAMING_SNAKE_CASE = self.num_labels _SCREAMING_SNAKE_CASE = TFRoFormerForSequenceClassification(config=A__ ) _SCREAMING_SNAKE_CASE = { """input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids, } _SCREAMING_SNAKE_CASE = model(A__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def UpperCamelCase ( self , A__ , A__ , A__ , A__ , A__ , A__ , A__ ) -> Any: _SCREAMING_SNAKE_CASE = self.num_choices _SCREAMING_SNAKE_CASE = TFRoFormerForMultipleChoice(config=A__ ) _SCREAMING_SNAKE_CASE = tf.tile(tf.expand_dims(A__ , 1 ) , (1, self.num_choices, 1) ) _SCREAMING_SNAKE_CASE = tf.tile(tf.expand_dims(A__ , 1 ) , (1, self.num_choices, 1) ) _SCREAMING_SNAKE_CASE = tf.tile(tf.expand_dims(A__ , 1 ) , (1, self.num_choices, 1) ) _SCREAMING_SNAKE_CASE = { """input_ids""": multiple_choice_inputs_ids, """attention_mask""": multiple_choice_input_mask, """token_type_ids""": multiple_choice_token_type_ids, } _SCREAMING_SNAKE_CASE = model(A__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) ) def UpperCamelCase ( self , A__ , A__ , A__ , A__ , A__ , A__ , A__ ) -> List[str]: _SCREAMING_SNAKE_CASE = self.num_labels _SCREAMING_SNAKE_CASE = TFRoFormerForTokenClassification(config=A__ ) _SCREAMING_SNAKE_CASE = { """input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids, } _SCREAMING_SNAKE_CASE = model(A__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def UpperCamelCase ( self , A__ , A__ , A__ , A__ , A__ , A__ , A__ ) -> Tuple: _SCREAMING_SNAKE_CASE = TFRoFormerForQuestionAnswering(config=A__ ) _SCREAMING_SNAKE_CASE = { """input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids, } _SCREAMING_SNAKE_CASE = model(A__ ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def UpperCamelCase ( self ) -> List[str]: _SCREAMING_SNAKE_CASE = self.prepare_config_and_inputs() ( ( _SCREAMING_SNAKE_CASE ) , ( _SCREAMING_SNAKE_CASE ) , ( _SCREAMING_SNAKE_CASE ) , ( _SCREAMING_SNAKE_CASE ) , ( _SCREAMING_SNAKE_CASE ) , ( _SCREAMING_SNAKE_CASE ) , ( _SCREAMING_SNAKE_CASE ) , ) = config_and_inputs _SCREAMING_SNAKE_CASE = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": input_mask} return config, inputs_dict @require_tf class _a (_lowerCamelCase , _lowerCamelCase , unittest.TestCase): """simple docstring""" SCREAMING_SNAKE_CASE = ( ( TFRoFormerModel, TFRoFormerForCausalLM, TFRoFormerForMaskedLM, TFRoFormerForQuestionAnswering, TFRoFormerForSequenceClassification, TFRoFormerForTokenClassification, TFRoFormerForMultipleChoice, ) if is_tf_available() else () ) SCREAMING_SNAKE_CASE = ( { 'feature-extraction': TFRoFormerModel, 'fill-mask': TFRoFormerForMaskedLM, 'question-answering': TFRoFormerForQuestionAnswering, 'text-classification': TFRoFormerForSequenceClassification, 'text-generation': TFRoFormerForCausalLM, 'token-classification': TFRoFormerForTokenClassification, 'zero-shot': TFRoFormerForSequenceClassification, } if is_tf_available() else {} ) SCREAMING_SNAKE_CASE = False SCREAMING_SNAKE_CASE = False def UpperCamelCase ( self , A__ , A__ , A__ , A__ , A__ ) -> str: if pipeline_test_casse_name == "TextGenerationPipelineTests": return True return False def UpperCamelCase ( self ) -> Union[str, Any]: _SCREAMING_SNAKE_CASE = TFRoFormerModelTester(self ) _SCREAMING_SNAKE_CASE = ConfigTester(self , config_class=A__ , hidden_size=37 ) def UpperCamelCase ( self ) -> Optional[Any]: self.config_tester.run_common_tests() def UpperCamelCase ( self ) -> List[Any]: _SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*A__ ) def UpperCamelCase ( self ) -> str: _SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_lm(*A__ ) def UpperCamelCase ( self ) -> int: _SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_lm_head(*A__ ) def UpperCamelCase ( self ) -> Dict: _SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_multiple_choice(*A__ ) def UpperCamelCase ( self ) -> Union[str, Any]: _SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_question_answering(*A__ ) def UpperCamelCase ( self ) -> Tuple: _SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_sequence_classification(*A__ ) def UpperCamelCase ( self ) -> List[Any]: _SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_token_classification(*A__ ) @slow def UpperCamelCase ( self ) -> str: _SCREAMING_SNAKE_CASE = TFRoFormerModel.from_pretrained("""junnyu/roformer_chinese_base""" ) self.assertIsNotNone(A__ ) @require_tf class _a (unittest.TestCase): """simple docstring""" @slow def UpperCamelCase ( self ) -> Optional[Any]: _SCREAMING_SNAKE_CASE = TFRoFormerForMaskedLM.from_pretrained("""junnyu/roformer_chinese_base""" ) _SCREAMING_SNAKE_CASE = tf.constant([[0, 1, 2, 3, 4, 5]] ) _SCREAMING_SNAKE_CASE = model(A__ )[0] # TODO Replace vocab size _SCREAMING_SNAKE_CASE = 5_00_00 _SCREAMING_SNAKE_CASE = [1, 6, vocab_size] self.assertEqual(output.shape , A__ ) print(output[:, :3, :3] ) # TODO Replace values below with what was printed above. _SCREAMING_SNAKE_CASE = tf.constant( [ [ [-0.1205_3341, -1.026_4901, 0.2922_1946], [-1.513_3783, 0.19_7433, 0.1519_0607], [-5.013_5403, -3.90_0256, -0.8403_8764], ] ] ) tf.debugging.assert_near(output[:, :3, :3] , A__ , atol=1E-4 ) @require_tf class _a (unittest.TestCase): """simple docstring""" SCREAMING_SNAKE_CASE = 1E-4 def UpperCamelCase ( self ) -> List[Any]: _SCREAMING_SNAKE_CASE = tf.constant([[4, 10]] ) _SCREAMING_SNAKE_CASE = TFRoFormerSinusoidalPositionalEmbedding(num_positions=6 , embedding_dim=6 ) _SCREAMING_SNAKE_CASE = emba(input_ids.shape ) _SCREAMING_SNAKE_CASE = tf.constant( [[0.0000, 0.0000, 0.0000, 1.0000, 1.0000, 1.0000], [0.8415, 0.0464, 0.0022, 0.5403, 0.9989, 1.0000]] ) tf.debugging.assert_near(A__ , A__ , atol=self.tolerance ) def UpperCamelCase ( self ) -> List[Any]: _SCREAMING_SNAKE_CASE = tf.constant( [ [0.0000, 0.0000, 0.0000, 0.0000, 0.0000], [0.8415, 0.8219, 0.8020, 0.7819, 0.7617], [0.9093, 0.9364, 0.9581, 0.9749, 0.9870], ] ) _SCREAMING_SNAKE_CASE = TFRoFormerSinusoidalPositionalEmbedding(num_positions=5_12 , embedding_dim=5_12 ) emba([2, 16, 5_12] ) _SCREAMING_SNAKE_CASE = emba.weight[:3, :5] tf.debugging.assert_near(A__ , A__ , atol=self.tolerance ) @require_tf class _a (unittest.TestCase): """simple docstring""" SCREAMING_SNAKE_CASE = 1E-4 def UpperCamelCase ( self ) -> int: # 2,12,16,64 _SCREAMING_SNAKE_CASE = tf.reshape(tf.range(2 * 12 * 16 * 64 , dtype=tf.floataa ) , shape=(2, 12, 16, 64) ) / 1_00 _SCREAMING_SNAKE_CASE = -tf.reshape(tf.range(2 * 12 * 16 * 64 , dtype=tf.floataa ) , shape=(2, 12, 16, 64) ) / 1_00 _SCREAMING_SNAKE_CASE = TFRoFormerSinusoidalPositionalEmbedding(num_positions=32 , embedding_dim=64 ) _SCREAMING_SNAKE_CASE = embed_positions([2, 16, 7_68] )[None, None, :, :] _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = TFRoFormerSelfAttention.apply_rotary_position_embeddings( A__ , A__ , A__ ) _SCREAMING_SNAKE_CASE = tf.constant( [ [0.0000, 0.0100, 0.0200, 0.0300, 0.0400, 0.0500, 0.0600, 0.0700], [-0.2012, 0.8897, 0.0263, 0.9401, 0.2074, 0.9463, 0.3481, 0.9343], [-1.7057, 0.6271, -1.2145, 1.3897, -0.6303, 1.7647, -0.1173, 1.8985], [-2.1731, -1.6397, -2.7358, 0.2854, -2.1840, 1.7183, -1.3018, 2.4871], [0.2717, -3.6173, -2.9206, -2.1988, -3.6638, 0.3858, -2.9155, 2.2980], [3.9859, -2.1580, -0.7984, -4.4904, -4.1181, -2.0252, -4.4782, 1.1253], ] ) _SCREAMING_SNAKE_CASE = tf.constant( [ [0.0000, -0.0100, -0.0200, -0.0300, -0.0400, -0.0500, -0.0600, -0.0700], [0.2012, -0.8897, -0.0263, -0.9401, -0.2074, -0.9463, -0.3481, -0.9343], [1.7057, -0.6271, 1.2145, -1.3897, 0.6303, -1.7647, 0.1173, -1.8985], [2.1731, 1.6397, 2.7358, -0.2854, 2.1840, -1.7183, 1.3018, -2.4871], [-0.2717, 3.6173, 2.9206, 2.1988, 3.6638, -0.3858, 2.9155, -2.2980], [-3.9859, 2.1580, 0.7984, 4.4904, 4.1181, 2.0252, 4.4782, -1.1253], ] ) tf.debugging.assert_near(query_layer[0, 0, :6, :8] , A__ , atol=self.tolerance ) tf.debugging.assert_near(key_layer[0, 0, :6, :8] , A__ , atol=self.tolerance )
0
0
'''simple docstring''' import argparse import datetime def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE_ ) -> str: """simple docstring""" _SCREAMING_SNAKE_CASE = { """0""": """Sunday""", """1""": """Monday""", """2""": """Tuesday""", """3""": """Wednesday""", """4""": """Thursday""", """5""": """Friday""", """6""": """Saturday""", } _SCREAMING_SNAKE_CASE = {0: 1, 1: 2, 2: 3, 3: 4, 4: 5, 5: 6, 6: 0} # Validate if not 0 < len(SCREAMING_SNAKE_CASE_ ) < 11: raise ValueError("""Must be 10 characters long""" ) # Get month _SCREAMING_SNAKE_CASE = int(date_input[0] + date_input[1] ) # Validate if not 0 < m < 13: raise ValueError("""Month must be between 1 - 12""" ) _SCREAMING_SNAKE_CASE = date_input[2] # Validate if sep_a not in ["-", "/"]: raise ValueError("""Date separator must be '-' or '/'""" ) # Get day _SCREAMING_SNAKE_CASE = int(date_input[3] + date_input[4] ) # Validate if not 0 < d < 32: raise ValueError("""Date must be between 1 - 31""" ) # Get second separator _SCREAMING_SNAKE_CASE = date_input[5] # Validate if sep_a not in ["-", "/"]: raise ValueError("""Date separator must be '-' or '/'""" ) # Get year _SCREAMING_SNAKE_CASE = int(date_input[6] + date_input[7] + date_input[8] + date_input[9] ) # Arbitrary year range if not 45 < y < 85_00: raise ValueError( """Year out of range. There has to be some sort of limit...right?""" ) # Get datetime obj for validation _SCREAMING_SNAKE_CASE = datetime.date(int(SCREAMING_SNAKE_CASE_ ) , int(SCREAMING_SNAKE_CASE_ ) , int(SCREAMING_SNAKE_CASE_ ) ) # Start math if m <= 2: _SCREAMING_SNAKE_CASE = y - 1 _SCREAMING_SNAKE_CASE = m + 12 # maths var _SCREAMING_SNAKE_CASE = int(str(SCREAMING_SNAKE_CASE_ )[:2] ) _SCREAMING_SNAKE_CASE = int(str(SCREAMING_SNAKE_CASE_ )[2:] ) _SCREAMING_SNAKE_CASE = int(2.6 * m - 5.39 ) _SCREAMING_SNAKE_CASE = int(c / 4 ) _SCREAMING_SNAKE_CASE = int(k / 4 ) _SCREAMING_SNAKE_CASE = int(d + k ) _SCREAMING_SNAKE_CASE = int(t + u + v + x ) _SCREAMING_SNAKE_CASE = int(z - (2 * c) ) _SCREAMING_SNAKE_CASE = round(w % 7 ) # End math # Validate math if f != convert_datetime_days[dt_ck.weekday()]: raise AssertionError("""The date was evaluated incorrectly. Contact developer.""" ) # Response _SCREAMING_SNAKE_CASE = F"Your date {date_input}, is a {days[str(SCREAMING_SNAKE_CASE_ )]}!" return response if __name__ == "__main__": import doctest doctest.testmod() UpperCamelCase__ : Dict = argparse.ArgumentParser( description=( "Find out what day of the week nearly any date is or was. Enter " "date as a string in the mm-dd-yyyy or mm/dd/yyyy format" ) ) parser.add_argument( "date_input", type=str, help="Date as a string (mm-dd-yyyy or mm/dd/yyyy)" ) UpperCamelCase__ : int = parser.parse_args() zeller(args.date_input)
708
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available UpperCamelCase__ : int = {"tokenization_herbert": ["HerbertTokenizer"]} try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase__ : Tuple = ["HerbertTokenizerFast"] if TYPE_CHECKING: from .tokenization_herbert import HerbertTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_herbert_fast import HerbertTokenizerFast else: import sys UpperCamelCase__ : Tuple = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
0
0
'''simple docstring''' import json import logging import os import socket import git import numpy as np import torch logging.basicConfig( format="%(asctime)s - %(levelname)s - %(name)s - PID: %(process)d - %(message)s", datefmt="%m/%d/%Y %H:%M:%S", level=logging.INFO, ) UpperCamelCase__ : Union[str, Any] = logging.getLogger(__name__) def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE_ ) -> int: """simple docstring""" _SCREAMING_SNAKE_CASE = git.Repo(search_parent_directories=SCREAMING_SNAKE_CASE_ ) _SCREAMING_SNAKE_CASE = { """repo_id""": str(SCREAMING_SNAKE_CASE_ ), """repo_sha""": str(repo.head.object.hexsha ), """repo_branch""": str(repo.active_branch ), } with open(os.path.join(SCREAMING_SNAKE_CASE_ , """git_log.json""" ) , """w""" ) as f: json.dump(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , indent=4 ) def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE_ ) -> Dict: """simple docstring""" if params.n_gpu <= 0: _SCREAMING_SNAKE_CASE = 0 _SCREAMING_SNAKE_CASE = -1 _SCREAMING_SNAKE_CASE = True _SCREAMING_SNAKE_CASE = False return assert torch.cuda.is_available() logger.info("""Initializing GPUs""" ) if params.n_gpu > 1: assert params.local_rank != -1 _SCREAMING_SNAKE_CASE = int(os.environ["""WORLD_SIZE"""] ) _SCREAMING_SNAKE_CASE = int(os.environ["""N_GPU_NODE"""] ) _SCREAMING_SNAKE_CASE = int(os.environ["""RANK"""] ) # number of nodes / node ID _SCREAMING_SNAKE_CASE = params.world_size // params.n_gpu_per_node _SCREAMING_SNAKE_CASE = params.global_rank // params.n_gpu_per_node _SCREAMING_SNAKE_CASE = True assert params.n_nodes == int(os.environ["""N_NODES"""] ) assert params.node_id == int(os.environ["""NODE_RANK"""] ) # local job (single GPU) else: assert params.local_rank == -1 _SCREAMING_SNAKE_CASE = 1 _SCREAMING_SNAKE_CASE = 0 _SCREAMING_SNAKE_CASE = 0 _SCREAMING_SNAKE_CASE = 0 _SCREAMING_SNAKE_CASE = 1 _SCREAMING_SNAKE_CASE = 1 _SCREAMING_SNAKE_CASE = False # sanity checks assert params.n_nodes >= 1 assert 0 <= params.node_id < params.n_nodes assert 0 <= params.local_rank <= params.global_rank < params.world_size assert params.world_size == params.n_nodes * params.n_gpu_per_node # define whether this is the master process / if we are in multi-node distributed mode _SCREAMING_SNAKE_CASE = params.node_id == 0 and params.local_rank == 0 _SCREAMING_SNAKE_CASE = params.n_nodes > 1 # summary _SCREAMING_SNAKE_CASE = F"--- Global rank: {params.global_rank} - " logger.info(PREFIX + """Number of nodes: %i""" % params.n_nodes ) logger.info(PREFIX + """Node ID : %i""" % params.node_id ) logger.info(PREFIX + """Local rank : %i""" % params.local_rank ) logger.info(PREFIX + """World size : %i""" % params.world_size ) logger.info(PREFIX + """GPUs per node : %i""" % params.n_gpu_per_node ) logger.info(PREFIX + """Master : %s""" % str(params.is_master ) ) logger.info(PREFIX + """Multi-node : %s""" % str(params.multi_node ) ) logger.info(PREFIX + """Multi-GPU : %s""" % str(params.multi_gpu ) ) logger.info(PREFIX + """Hostname : %s""" % socket.gethostname() ) # set GPU device torch.cuda.set_device(params.local_rank ) # initialize multi-GPU if params.multi_gpu: logger.info("""Initializing PyTorch distributed""" ) torch.distributed.init_process_group( init_method="""env://""" , backend="""nccl""" , ) def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE_ ) -> List[Any]: """simple docstring""" np.random.seed(args.seed ) torch.manual_seed(args.seed ) if args.n_gpu > 0: torch.cuda.manual_seed_all(args.seed )
709
'''simple docstring''' import argparse import gdown import numpy as np import torch from huggingface_hub import hf_hub_download from transformers import ( CLIPTokenizer, CLIPTokenizerFast, VideoMAEImageProcessor, XCLIPConfig, XCLIPModel, XCLIPProcessor, XCLIPTextConfig, XCLIPVisionConfig, ) def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> Dict: """simple docstring""" _SCREAMING_SNAKE_CASE = XCLIPTextConfig() # derive patch size from model name _SCREAMING_SNAKE_CASE = model_name.find("""patch""" ) _SCREAMING_SNAKE_CASE = int(model_name[start_idx + len("""patch""" ) : start_idx + len("""patch""" ) + 2] ) _SCREAMING_SNAKE_CASE = XCLIPVisionConfig(patch_size=SCREAMING_SNAKE_CASE_ , num_frames=SCREAMING_SNAKE_CASE_ ) if "large" in model_name: _SCREAMING_SNAKE_CASE = 7_68 _SCREAMING_SNAKE_CASE = 30_72 _SCREAMING_SNAKE_CASE = 12 _SCREAMING_SNAKE_CASE = 10_24 _SCREAMING_SNAKE_CASE = 40_96 _SCREAMING_SNAKE_CASE = 16 _SCREAMING_SNAKE_CASE = 24 _SCREAMING_SNAKE_CASE = 7_68 _SCREAMING_SNAKE_CASE = 30_72 if model_name == "xclip-large-patch14-16-frames": _SCREAMING_SNAKE_CASE = 3_36 _SCREAMING_SNAKE_CASE = XCLIPConfig.from_text_vision_configs(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) if "large" in model_name: _SCREAMING_SNAKE_CASE = 7_68 return config def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE_ ) -> Dict: """simple docstring""" # text encoder if name == "token_embedding.weight": _SCREAMING_SNAKE_CASE = name.replace("""token_embedding.weight""" , """text_model.embeddings.token_embedding.weight""" ) if name == "positional_embedding": _SCREAMING_SNAKE_CASE = name.replace("""positional_embedding""" , """text_model.embeddings.position_embedding.weight""" ) if "ln_1" in name: _SCREAMING_SNAKE_CASE = name.replace("""ln_1""" , """layer_norm1""" ) if "ln_2" in name: _SCREAMING_SNAKE_CASE = name.replace("""ln_2""" , """layer_norm2""" ) if "c_fc" in name: _SCREAMING_SNAKE_CASE = name.replace("""c_fc""" , """fc1""" ) if "c_proj" in name: _SCREAMING_SNAKE_CASE = name.replace("""c_proj""" , """fc2""" ) if name.startswith("""transformer.resblocks""" ): _SCREAMING_SNAKE_CASE = name.replace("""transformer.resblocks""" , """text_model.encoder.layers""" ) if "attn.out_proj" in name and "message" not in name: _SCREAMING_SNAKE_CASE = name.replace("""attn.out_proj""" , """self_attn.out_proj""" ) if "ln_final" in name: _SCREAMING_SNAKE_CASE = name.replace("""ln_final""" , """text_model.final_layer_norm""" ) # visual encoder if name == "visual.class_embedding": _SCREAMING_SNAKE_CASE = name.replace("""visual.class_embedding""" , """vision_model.embeddings.class_embedding""" ) if name == "visual.positional_embedding": _SCREAMING_SNAKE_CASE = name.replace("""visual.positional_embedding""" , """vision_model.embeddings.position_embedding.weight""" ) if name.startswith("""visual.transformer.resblocks""" ): _SCREAMING_SNAKE_CASE = name.replace("""visual.transformer.resblocks""" , """vision_model.encoder.layers""" ) if "visual.conv1" in name: _SCREAMING_SNAKE_CASE = name.replace("""visual.conv1""" , """vision_model.embeddings.patch_embedding""" ) if "visual.ln_pre" in name: _SCREAMING_SNAKE_CASE = name.replace("""visual.ln_pre""" , """vision_model.pre_layernorm""" ) if "visual.ln_post" in name: _SCREAMING_SNAKE_CASE = name.replace("""visual.ln_post""" , """vision_model.post_layernorm""" ) if "visual.proj" in name: _SCREAMING_SNAKE_CASE = name.replace("""visual.proj""" , """visual_projection.weight""" ) if "text_projection" in name: _SCREAMING_SNAKE_CASE = name.replace("""text_projection""" , """text_projection.weight""" ) # things on top if "prompts_visual_proj" in name: _SCREAMING_SNAKE_CASE = name.replace("""prompts_visual_proj""" , """prompts_visual_projection""" ) if "prompts_visual_ln" in name: _SCREAMING_SNAKE_CASE = name.replace("""prompts_visual_ln""" , """prompts_visual_layernorm""" ) # mit if name == "mit.positional_embedding": _SCREAMING_SNAKE_CASE = name.replace("""positional""" , """position""" ) if name.startswith("""mit.resblocks""" ): _SCREAMING_SNAKE_CASE = name.replace("""mit.resblocks""" , """mit.encoder.layers""" ) # prompts generator if name.startswith("""prompts_generator.norm""" ): _SCREAMING_SNAKE_CASE = name.replace("""prompts_generator.norm""" , """prompts_generator.layernorm""" ) return name def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> Optional[int]: """simple docstring""" for key in orig_state_dict.copy().keys(): _SCREAMING_SNAKE_CASE = orig_state_dict.pop(SCREAMING_SNAKE_CASE_ ) if "attn.in_proj" in key: _SCREAMING_SNAKE_CASE = key.split(""".""" ) if key.startswith("""visual""" ): _SCREAMING_SNAKE_CASE = key_split[3] _SCREAMING_SNAKE_CASE = config.vision_config.hidden_size if "message_attn" in key: if "weight" in key: _SCREAMING_SNAKE_CASE = val[ :dim, : ] _SCREAMING_SNAKE_CASE = val[ dim : dim * 2, : ] _SCREAMING_SNAKE_CASE = val[ -dim:, : ] else: _SCREAMING_SNAKE_CASE = val[ :dim ] _SCREAMING_SNAKE_CASE = val[ dim : dim * 2 ] _SCREAMING_SNAKE_CASE = val[ -dim: ] else: if "weight" in key: _SCREAMING_SNAKE_CASE = val[ :dim, : ] _SCREAMING_SNAKE_CASE = val[ dim : dim * 2, : ] _SCREAMING_SNAKE_CASE = val[ -dim:, : ] else: _SCREAMING_SNAKE_CASE = val[:dim] _SCREAMING_SNAKE_CASE = val[ dim : dim * 2 ] _SCREAMING_SNAKE_CASE = val[-dim:] elif key.startswith("""mit""" ): _SCREAMING_SNAKE_CASE = key_split[2] _SCREAMING_SNAKE_CASE = config.vision_config.mit_hidden_size if "weight" in key: _SCREAMING_SNAKE_CASE = val[:dim, :] _SCREAMING_SNAKE_CASE = val[dim : dim * 2, :] _SCREAMING_SNAKE_CASE = val[-dim:, :] else: _SCREAMING_SNAKE_CASE = val[:dim] _SCREAMING_SNAKE_CASE = val[dim : dim * 2] _SCREAMING_SNAKE_CASE = val[-dim:] else: _SCREAMING_SNAKE_CASE = key_split[2] _SCREAMING_SNAKE_CASE = config.text_config.hidden_size if "weight" in key: _SCREAMING_SNAKE_CASE = val[:dim, :] _SCREAMING_SNAKE_CASE = val[ dim : dim * 2, : ] _SCREAMING_SNAKE_CASE = val[-dim:, :] else: _SCREAMING_SNAKE_CASE = val[:dim] _SCREAMING_SNAKE_CASE = val[ dim : dim * 2 ] _SCREAMING_SNAKE_CASE = val[-dim:] else: _SCREAMING_SNAKE_CASE = rename_key(SCREAMING_SNAKE_CASE_ ) if new_key_name in ["visual_projection.weight", "text_projection.weight"]: _SCREAMING_SNAKE_CASE = val.T _SCREAMING_SNAKE_CASE = val return orig_state_dict def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE_ ) -> Optional[Any]: """simple docstring""" if num_frames == 8: _SCREAMING_SNAKE_CASE = """eating_spaghetti_8_frames.npy""" elif num_frames == 16: _SCREAMING_SNAKE_CASE = """eating_spaghetti.npy""" elif num_frames == 32: _SCREAMING_SNAKE_CASE = """eating_spaghetti_32_frames.npy""" _SCREAMING_SNAKE_CASE = hf_hub_download( repo_id="""hf-internal-testing/spaghetti-video""" , filename=SCREAMING_SNAKE_CASE_ , repo_type="""dataset""" , ) _SCREAMING_SNAKE_CASE = np.load(SCREAMING_SNAKE_CASE_ ) return list(SCREAMING_SNAKE_CASE_ ) def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=False ) -> Optional[int]: """simple docstring""" _SCREAMING_SNAKE_CASE = { # fully supervised kinetics-400 checkpoints """xclip-base-patch32""": """https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_32_8.pth""", """xclip-base-patch32-16-frames""": ( """https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_32_16.pth""" ), """xclip-base-patch16""": """https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_16_8.pth""", """xclip-base-patch16-16-frames""": ( """https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_16_16.pth""" ), """xclip-large-patch14""": """https://drive.google.com/u/0/uc?id=1NUOImq0o5DlQTST17iIP3vG7DgmHQuCx&amp;export=download&amp;confirm=t&amp;uuid=b26caedc-88e2-473e-830a-9d158b653cdb""", """xclip-large-patch14-16-frames""": """https://drive.google.com/u/0/uc?id=1FOYgnJc097OJ4lGwtRCCydQyVPJEOH7d&amp;export=download&amp;confirm=t&amp;uuid=538fa810-e671-4050-b385-9a623f89804f""", # fully supervised kinetics-600 checkpoints """xclip-base-patch16-kinetics-600""": ( """https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k600_16_8.pth""" ), """xclip-base-patch16-kinetics-600-16-frames""": ( """https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k600_16_16.pth""" ), """xclip-large-patch14-kinetics-600""": """https://drive.google.com/u/0/uc?id=1FV8C1INuM91sLAN4ImjzePLIlpMSihwV&amp;export=download&amp;confirm=t&amp;uuid=141d4977-4a65-44ae-864f-4b0c19f838be""", # few shot """xclip-base-patch16-hmdb-2-shot""": ( """https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_2.pth""" ), """xclip-base-patch16-hmdb-4-shot""": ( """https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_4.pth""" ), """xclip-base-patch16-hmdb-8-shot""": ( """https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_8.pth""" ), """xclip-base-patch16-hmdb-16-shot""": ( """https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_16.pth""" ), """xclip-base-patch16-ucf-2-shot""": ( """https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_2.pth""" ), """xclip-base-patch16-ucf-4-shot""": ( """https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_4.pth""" ), """xclip-base-patch16-ucf-8-shot""": ( """https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_8.pth""" ), """xclip-base-patch16-ucf-16-shot""": ( """https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_16.pth""" ), # zero shot """xclip-base-patch16-zero-shot""": """https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/zero.pth""", } _SCREAMING_SNAKE_CASE = model_to_url[model_name] _SCREAMING_SNAKE_CASE = 8 if "16-frames" in model_name: _SCREAMING_SNAKE_CASE = 16 elif "shot" in model_name: _SCREAMING_SNAKE_CASE = 32 _SCREAMING_SNAKE_CASE = get_xclip_config(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) _SCREAMING_SNAKE_CASE = XCLIPModel(SCREAMING_SNAKE_CASE_ ) model.eval() if "drive" in checkpoint_url: _SCREAMING_SNAKE_CASE = """pytorch_model.bin""" gdown.cached_download(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , quiet=SCREAMING_SNAKE_CASE_ ) _SCREAMING_SNAKE_CASE = torch.load(SCREAMING_SNAKE_CASE_ , map_location="""cpu""" )["""model"""] else: _SCREAMING_SNAKE_CASE = torch.hub.load_state_dict_from_url(SCREAMING_SNAKE_CASE_ )["""model"""] _SCREAMING_SNAKE_CASE = convert_state_dict(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) _SCREAMING_SNAKE_CASE = XCLIPModel(SCREAMING_SNAKE_CASE_ ) _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = model.load_state_dict(SCREAMING_SNAKE_CASE_ , strict=SCREAMING_SNAKE_CASE_ ) assert missing_keys == ["text_model.embeddings.position_ids", "vision_model.embeddings.position_ids"] model.eval() _SCREAMING_SNAKE_CASE = 3_36 if model_name == """xclip-large-patch14-16-frames""" else 2_24 _SCREAMING_SNAKE_CASE = VideoMAEImageProcessor(size=SCREAMING_SNAKE_CASE_ ) _SCREAMING_SNAKE_CASE = CLIPTokenizer.from_pretrained("""openai/clip-vit-base-patch32""" ) _SCREAMING_SNAKE_CASE = CLIPTokenizerFast.from_pretrained("""openai/clip-vit-base-patch32""" ) _SCREAMING_SNAKE_CASE = XCLIPProcessor(image_processor=SCREAMING_SNAKE_CASE_ , tokenizer=SCREAMING_SNAKE_CASE_ ) _SCREAMING_SNAKE_CASE = prepare_video(SCREAMING_SNAKE_CASE_ ) _SCREAMING_SNAKE_CASE = processor( text=["""playing sports""", """eating spaghetti""", """go shopping"""] , videos=SCREAMING_SNAKE_CASE_ , return_tensors="""pt""" , padding=SCREAMING_SNAKE_CASE_ ) print("""Shape of pixel values:""" , inputs.pixel_values.shape ) with torch.no_grad(): _SCREAMING_SNAKE_CASE = model(**SCREAMING_SNAKE_CASE_ ) # Verify outputs _SCREAMING_SNAKE_CASE = outputs.logits_per_video _SCREAMING_SNAKE_CASE = logits_per_video.softmax(dim=1 ) print("""Probs:""" , SCREAMING_SNAKE_CASE_ ) # kinetics-400 if model_name == "xclip-base-patch32": _SCREAMING_SNAKE_CASE = torch.tensor([[0.0019, 0.9951, 0.0030]] ) elif model_name == "xclip-base-patch32-16-frames": _SCREAMING_SNAKE_CASE = torch.tensor([[7.0999e-04, 9.9883e-01, 4.5580e-04]] ) elif model_name == "xclip-base-patch16": _SCREAMING_SNAKE_CASE = torch.tensor([[0.0083, 0.9681, 0.0236]] ) elif model_name == "xclip-base-patch16-16-frames": _SCREAMING_SNAKE_CASE = torch.tensor([[7.6937e-04, 9.9728e-01, 1.9473e-03]] ) elif model_name == "xclip-large-patch14": _SCREAMING_SNAKE_CASE = torch.tensor([[0.0062, 0.9864, 0.0075]] ) elif model_name == "xclip-large-patch14-16-frames": _SCREAMING_SNAKE_CASE = torch.tensor([[3.3877e-04, 9.9937e-01, 2.8888e-04]] ) # kinetics-600 elif model_name == "xclip-base-patch16-kinetics-600": _SCREAMING_SNAKE_CASE = torch.tensor([[0.0555, 0.8914, 0.0531]] ) elif model_name == "xclip-base-patch16-kinetics-600-16-frames": _SCREAMING_SNAKE_CASE = torch.tensor([[3.8554e-04, 9.9929e-01, 3.2754e-04]] ) elif model_name == "xclip-large-patch14-kinetics-600": _SCREAMING_SNAKE_CASE = torch.tensor([[0.0036, 0.9920, 0.0045]] ) # few shot elif model_name == "xclip-base-patch16-hmdb-2-shot": _SCREAMING_SNAKE_CASE = torch.tensor([[7.1890e-06, 9.9994e-01, 5.6559e-05]] ) elif model_name == "xclip-base-patch16-hmdb-4-shot": _SCREAMING_SNAKE_CASE = torch.tensor([[1.0320e-05, 9.9993e-01, 6.2435e-05]] ) elif model_name == "xclip-base-patch16-hmdb-8-shot": _SCREAMING_SNAKE_CASE = torch.tensor([[4.1377e-06, 9.9990e-01, 9.8386e-05]] ) elif model_name == "xclip-base-patch16-hmdb-16-shot": _SCREAMING_SNAKE_CASE = torch.tensor([[4.1347e-05, 9.9962e-01, 3.3411e-04]] ) elif model_name == "xclip-base-patch16-ucf-2-shot": _SCREAMING_SNAKE_CASE = torch.tensor([[8.5857e-05, 9.9928e-01, 6.3291e-04]] ) elif model_name == "xclip-base-patch16-ucf-4-shot": _SCREAMING_SNAKE_CASE = torch.tensor([[8.5857e-05, 9.9928e-01, 6.3291e-04]] ) elif model_name == "xclip-base-patch16-ucf-8-shot": _SCREAMING_SNAKE_CASE = torch.tensor([[0.0027, 0.9904, 0.0070]] ) elif model_name == "xclip-base-patch16-ucf-16-shot": _SCREAMING_SNAKE_CASE = torch.tensor([[9.8219e-04, 9.9593e-01, 3.0863e-03]] ) # zero shot elif model_name == "xclip-base-patch16-zero-shot": _SCREAMING_SNAKE_CASE = torch.tensor([[3.5082e-04, 9.9785e-01, 1.7966e-03]] ) else: raise ValueError(F"Model name {model_name} not supported" ) assert torch.allclose(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , atol=1e-3 ) print("""Looks ok!""" ) if pytorch_dump_folder_path is not None: print(F"Saving model {model_name} to {pytorch_dump_folder_path}" ) model.save_pretrained(SCREAMING_SNAKE_CASE_ ) if push_to_hub: print("""Pushing model, processor and slow tokenizer files to the hub...""" ) model.push_to_hub(SCREAMING_SNAKE_CASE_ , organization="""nielsr""" ) processor.push_to_hub(SCREAMING_SNAKE_CASE_ , organization="""nielsr""" ) slow_tokenizer.push_to_hub(SCREAMING_SNAKE_CASE_ , organization="""nielsr""" ) if __name__ == "__main__": UpperCamelCase__ : Union[str, Any] = argparse.ArgumentParser() # Required parameters parser.add_argument( "--model_name", default="xclip-base-patch32", type=str, help="Name of the model.", ) parser.add_argument( "--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory." ) parser.add_argument( "--push_to_hub", action="store_true", help="Whether or not to push the converted model to the 🤗 hub." ) UpperCamelCase__ : str = parser.parse_args() convert_xclip_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
0
0
'''simple docstring''' import copy from ...configuration_utils import PretrainedConfig from ...utils import logging from ..bit import BitConfig UpperCamelCase__ = logging.get_logger(__name__) UpperCamelCase__ = { "Intel/dpt-large": "https://huggingface.co/Intel/dpt-large/resolve/main/config.json", # See all DPT models at https://huggingface.co/models?filter=dpt } class _a (_lowerCamelCase): """simple docstring""" SCREAMING_SNAKE_CASE = 'dpt' def __init__( self , A__=7_68 , A__=12 , A__=12 , A__=30_72 , A__="gelu" , A__=0.0 , A__=0.0 , A__=0.02 , A__=1E-12 , A__=3_84 , A__=16 , A__=3 , A__=False , A__=True , A__=[2, 5, 8, 11] , A__="project" , A__=[4, 2, 1, 0.5] , A__=[96, 1_92, 3_84, 7_68] , A__=2_56 , A__=-1 , A__=False , A__=True , A__=0.4 , A__=2_55 , A__=0.1 , A__=[1, 10_24, 24, 24] , A__=[0, 1] , A__=None , **A__ , ) -> str: super().__init__(**A__ ) _SCREAMING_SNAKE_CASE = hidden_size _SCREAMING_SNAKE_CASE = is_hybrid if self.is_hybrid: if backbone_config is None: logger.info("""Initializing the config with a `BiT` backbone.""" ) _SCREAMING_SNAKE_CASE = { """global_padding""": """same""", """layer_type""": """bottleneck""", """depths""": [3, 4, 9], """out_features""": ["""stage1""", """stage2""", """stage3"""], """embedding_dynamic_padding""": True, } _SCREAMING_SNAKE_CASE = BitConfig(**A__ ) elif isinstance(A__ , A__ ): logger.info("""Initializing the config with a `BiT` backbone.""" ) _SCREAMING_SNAKE_CASE = BitConfig(**A__ ) elif isinstance(A__ , A__ ): _SCREAMING_SNAKE_CASE = backbone_config else: raise ValueError( F"backbone_config must be a dictionary or a `PretrainedConfig`, got {backbone_config.__class__}." ) _SCREAMING_SNAKE_CASE = backbone_featmap_shape _SCREAMING_SNAKE_CASE = neck_ignore_stages if readout_type != "project": raise ValueError("""Readout type must be 'project' when using `DPT-hybrid` mode.""" ) else: _SCREAMING_SNAKE_CASE = None _SCREAMING_SNAKE_CASE = None _SCREAMING_SNAKE_CASE = [] _SCREAMING_SNAKE_CASE = num_hidden_layers _SCREAMING_SNAKE_CASE = num_attention_heads _SCREAMING_SNAKE_CASE = intermediate_size _SCREAMING_SNAKE_CASE = hidden_act _SCREAMING_SNAKE_CASE = hidden_dropout_prob _SCREAMING_SNAKE_CASE = attention_probs_dropout_prob _SCREAMING_SNAKE_CASE = initializer_range _SCREAMING_SNAKE_CASE = layer_norm_eps _SCREAMING_SNAKE_CASE = image_size _SCREAMING_SNAKE_CASE = patch_size _SCREAMING_SNAKE_CASE = num_channels _SCREAMING_SNAKE_CASE = qkv_bias _SCREAMING_SNAKE_CASE = backbone_out_indices if readout_type not in ["ignore", "add", "project"]: raise ValueError("""Readout_type must be one of ['ignore', 'add', 'project']""" ) _SCREAMING_SNAKE_CASE = readout_type _SCREAMING_SNAKE_CASE = reassemble_factors _SCREAMING_SNAKE_CASE = neck_hidden_sizes _SCREAMING_SNAKE_CASE = fusion_hidden_size _SCREAMING_SNAKE_CASE = head_in_index _SCREAMING_SNAKE_CASE = use_batch_norm_in_fusion_residual # auxiliary head attributes (semantic segmentation) _SCREAMING_SNAKE_CASE = use_auxiliary_head _SCREAMING_SNAKE_CASE = auxiliary_loss_weight _SCREAMING_SNAKE_CASE = semantic_loss_ignore_index _SCREAMING_SNAKE_CASE = semantic_classifier_dropout def UpperCamelCase ( self ) -> List[str]: _SCREAMING_SNAKE_CASE = copy.deepcopy(self.__dict__ ) if output["backbone_config"] is not None: _SCREAMING_SNAKE_CASE = self.backbone_config.to_dict() _SCREAMING_SNAKE_CASE = self.__class__.model_type return output
710
'''simple docstring''' import numpy as np import torch from torch.utils.data import Dataset from utils import logger class _a (_lowerCamelCase): """simple docstring""" def __init__( self , A__ , A__ ) -> Any: _SCREAMING_SNAKE_CASE = params _SCREAMING_SNAKE_CASE = np.array(A__ ) _SCREAMING_SNAKE_CASE = np.array([len(A__ ) for t in data] ) self.check() self.remove_long_sequences() self.remove_empty_sequences() self.remove_unknown_sequences() self.check() self.print_statistics() def __getitem__( self , A__ ) -> Dict: return (self.token_ids[index], self.lengths[index]) def __len__( self ) -> Tuple: return len(self.lengths ) def UpperCamelCase ( self ) -> Dict: assert len(self.token_ids ) == len(self.lengths ) assert all(self.lengths[i] == len(self.token_ids[i] ) for i in range(len(self.lengths ) ) ) def UpperCamelCase ( self ) -> List[str]: _SCREAMING_SNAKE_CASE = self.params.max_model_input_size _SCREAMING_SNAKE_CASE = self.lengths > max_len logger.info(F"Splitting {sum(A__ )} too long sequences." ) def divide_chunks(A__ , A__ ): return [l[i : i + n] for i in range(0 , len(A__ ) , A__ )] _SCREAMING_SNAKE_CASE = [] _SCREAMING_SNAKE_CASE = [] if self.params.mlm: _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = self.params.special_tok_ids["""cls_token"""], self.params.special_tok_ids["""sep_token"""] else: _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = self.params.special_tok_ids["""bos_token"""], self.params.special_tok_ids["""eos_token"""] for seq_, len_ in zip(self.token_ids , self.lengths ): assert (seq_[0] == cls_id) and (seq_[-1] == sep_id), seq_ if len_ <= max_len: new_tok_ids.append(seq_ ) new_lengths.append(len_ ) else: _SCREAMING_SNAKE_CASE = [] for sub_s in divide_chunks(seq_ , max_len - 2 ): if sub_s[0] != cls_id: _SCREAMING_SNAKE_CASE = np.insert(A__ , 0 , A__ ) if sub_s[-1] != sep_id: _SCREAMING_SNAKE_CASE = np.insert(A__ , len(A__ ) , A__ ) assert len(A__ ) <= max_len assert (sub_s[0] == cls_id) and (sub_s[-1] == sep_id), sub_s sub_seqs.append(A__ ) new_tok_ids.extend(A__ ) new_lengths.extend([len(A__ ) for l in sub_seqs] ) _SCREAMING_SNAKE_CASE = np.array(A__ ) _SCREAMING_SNAKE_CASE = np.array(A__ ) def UpperCamelCase ( self ) -> List[str]: _SCREAMING_SNAKE_CASE = len(self ) _SCREAMING_SNAKE_CASE = self.lengths > 11 _SCREAMING_SNAKE_CASE = self.token_ids[indices] _SCREAMING_SNAKE_CASE = self.lengths[indices] _SCREAMING_SNAKE_CASE = len(self ) logger.info(F"Remove {init_size - new_size} too short (<=11 tokens) sequences." ) def UpperCamelCase ( self ) -> int: if "unk_token" not in self.params.special_tok_ids: return else: _SCREAMING_SNAKE_CASE = self.params.special_tok_ids["""unk_token"""] _SCREAMING_SNAKE_CASE = len(self ) _SCREAMING_SNAKE_CASE = np.array([np.count_nonzero(a == unk_token_id ) for a in self.token_ids] ) _SCREAMING_SNAKE_CASE = (unk_occs / self.lengths) < 0.5 _SCREAMING_SNAKE_CASE = self.token_ids[indices] _SCREAMING_SNAKE_CASE = self.lengths[indices] _SCREAMING_SNAKE_CASE = len(self ) logger.info(F"Remove {init_size - new_size} sequences with a high level of unknown tokens (50%)." ) def UpperCamelCase ( self ) -> Optional[Any]: if not self.params.is_master: return logger.info(F"{len(self )} sequences" ) # data_len = sum(self.lengths) # nb_unique_tokens = len(Counter(list(chain(*self.token_ids)))) # logger.info(f'{data_len} tokens ({nb_unique_tokens} unique)') # unk_idx = self.params.special_tok_ids['unk_token'] # nb_unknown = sum([(t==unk_idx).sum() for t in self.token_ids]) # logger.info(f'{nb_unknown} unknown tokens (covering {100*nb_unknown/data_len:.2f}% of the data)') def UpperCamelCase ( self , A__ ) -> Any: _SCREAMING_SNAKE_CASE = [t[0] for t in batch] _SCREAMING_SNAKE_CASE = [t[1] for t in batch] assert len(A__ ) == len(A__ ) # Max for paddings _SCREAMING_SNAKE_CASE = max(A__ ) # Pad token ids if self.params.mlm: _SCREAMING_SNAKE_CASE = self.params.special_tok_ids["""pad_token"""] else: _SCREAMING_SNAKE_CASE = self.params.special_tok_ids["""unk_token"""] _SCREAMING_SNAKE_CASE = [list(t.astype(A__ ) ) + [pad_idx] * (max_seq_len_ - len(A__ )) for t in token_ids] assert len(tk_ ) == len(A__ ) assert all(len(A__ ) == max_seq_len_ for t in tk_ ) _SCREAMING_SNAKE_CASE = torch.tensor(tk_ ) # (bs, max_seq_len_) _SCREAMING_SNAKE_CASE = torch.tensor(A__ ) # (bs) return tk_t, lg_t
0
0
'''simple docstring''' UpperCamelCase__ : Dict = { "a": "AAAAA", "b": "AAAAB", "c": "AAABA", "d": "AAABB", "e": "AABAA", "f": "AABAB", "g": "AABBA", "h": "AABBB", "i": "ABAAA", "j": "BBBAA", "k": "ABAAB", "l": "ABABA", "m": "ABABB", "n": "ABBAA", "o": "ABBAB", "p": "ABBBA", "q": "ABBBB", "r": "BAAAA", "s": "BAAAB", "t": "BAABA", "u": "BAABB", "v": "BBBAB", "w": "BABAA", "x": "BABAB", "y": "BABBA", "z": "BABBB", " ": " ", } UpperCamelCase__ : str = {value: key for key, value in encode_dict.items()} def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE_ ) -> str: """simple docstring""" _SCREAMING_SNAKE_CASE = """""" for letter in word.lower(): if letter.isalpha() or letter == " ": encoded += encode_dict[letter] else: raise Exception("""encode() accepts only letters of the alphabet and spaces""" ) return encoded def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE_ ) -> str: """simple docstring""" if set(SCREAMING_SNAKE_CASE_ ) - {"A", "B", " "} != set(): raise Exception("""decode() accepts only 'A', 'B' and spaces""" ) _SCREAMING_SNAKE_CASE = """""" for word in coded.split(): while len(SCREAMING_SNAKE_CASE_ ) != 0: decoded += decode_dict[word[:5]] _SCREAMING_SNAKE_CASE = word[5:] decoded += " " return decoded.strip() if __name__ == "__main__": from doctest import testmod testmod()
711
'''simple docstring''' import os from shutil import copyfile from typing import Any, Dict, List, Optional, Tuple import sentencepiece as spm from ...tokenization_utils import PreTrainedTokenizer from ...utils import logging UpperCamelCase__ : List[Any] = logging.get_logger(__name__) UpperCamelCase__ : Any = "▁" UpperCamelCase__ : Any = {"vocab_file": "spiece.model"} UpperCamelCase__ : int = { "vocab_file": { "google/reformer-crime-and-punishment": ( "https://huggingface.co/google/reformer-crime-and-punishment/resolve/main/spiece.model" ) } } UpperCamelCase__ : Optional[int] = { "google/reformer-crime-and-punishment": 524_288, } class _a (_lowerCamelCase): """simple docstring""" SCREAMING_SNAKE_CASE = VOCAB_FILES_NAMES SCREAMING_SNAKE_CASE = PRETRAINED_VOCAB_FILES_MAP SCREAMING_SNAKE_CASE = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES SCREAMING_SNAKE_CASE = ['input_ids', 'attention_mask'] def __init__( self , A__ , A__="</s>" , A__="<unk>" , A__=[] , A__ = None , **A__ , ) -> None: _SCREAMING_SNAKE_CASE = {} if sp_model_kwargs is None else sp_model_kwargs super().__init__( eos_token=A__ , unk_token=A__ , additional_special_tokens=A__ , sp_model_kwargs=self.sp_model_kwargs , **A__ , ) _SCREAMING_SNAKE_CASE = vocab_file _SCREAMING_SNAKE_CASE = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(A__ ) @property def UpperCamelCase ( self ) -> Any: return self.sp_model.get_piece_size() def UpperCamelCase ( self ) -> Dict[str, int]: _SCREAMING_SNAKE_CASE = {self.convert_ids_to_tokens(A__ ): i for i in range(self.vocab_size )} vocab.update(self.added_tokens_encoder ) return vocab def __getstate__( self ) -> int: _SCREAMING_SNAKE_CASE = self.__dict__.copy() _SCREAMING_SNAKE_CASE = None return state def __setstate__( self , A__ ) -> str: _SCREAMING_SNAKE_CASE = d # for backward compatibility if not hasattr(self , """sp_model_kwargs""" ): _SCREAMING_SNAKE_CASE = {} _SCREAMING_SNAKE_CASE = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(self.vocab_file ) def UpperCamelCase ( self , A__ ) -> List[str]: return self.sp_model.encode(A__ , out_type=A__ ) def UpperCamelCase ( self , A__ ) -> Union[str, Any]: return self.sp_model.piece_to_id(A__ ) def UpperCamelCase ( self , A__ ) -> List[Any]: if index < self.sp_model.get_piece_size(): _SCREAMING_SNAKE_CASE = self.sp_model.IdToPiece(A__ ) return token def UpperCamelCase ( self , A__ ) -> str: _SCREAMING_SNAKE_CASE = [] _SCREAMING_SNAKE_CASE = """""" for token in tokens: # make sure that special tokens are not decoded using sentencepiece model if token in self.all_special_tokens: out_string += self.sp_model.decode(A__ ) + token _SCREAMING_SNAKE_CASE = [] else: current_sub_tokens.append(A__ ) out_string += self.sp_model.decode(A__ ) return out_string.strip() def UpperCamelCase ( self , A__ , A__ = None ) -> Tuple[str]: if not os.path.isdir(A__ ): logger.error(F"Vocabulary path ({save_directory}) should be a directory" ) return _SCREAMING_SNAKE_CASE = os.path.join( A__ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(A__ ) and os.path.isfile(self.vocab_file ): copyfile(self.vocab_file , A__ ) elif not os.path.isfile(self.vocab_file ): with open(A__ , """wb""" ) as fi: _SCREAMING_SNAKE_CASE = self.sp_model.serialized_model_proto() fi.write(A__ ) return (out_vocab_file,)
0
0
'''simple docstring''' import re import time from typing import Optional import IPython.display as disp from ..trainer_callback import TrainerCallback from ..trainer_utils import IntervalStrategy, has_length def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE_ ) -> int: """simple docstring""" _SCREAMING_SNAKE_CASE = int(SCREAMING_SNAKE_CASE_ ) _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = t // 36_00, (t // 60) % 60, t % 60 return F"{h}:{m:02d}:{s:02d}" if h != 0 else F"{m:02d}:{s:02d}" def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=3_00 ) -> Union[str, Any]: """simple docstring""" return F"\n <div>\n {prefix}\n <progress value='{value}' max='{total}' style='width:{width}px; height:20px; vertical-align: middle;'></progress>\n {label}\n </div>\n " def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE_ ) -> Tuple: """simple docstring""" _SCREAMING_SNAKE_CASE = """<table border=\"1\" class=\"dataframe\">\n""" html_code += """ <thead>\n <tr style="text-align: left;">\n""" for i in items[0]: html_code += F" <th>{i}</th>\n" html_code += " </tr>\n </thead>\n <tbody>\n" for line in items[1:]: html_code += " <tr>\n" for elt in line: _SCREAMING_SNAKE_CASE = F"{elt:.6f}" if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) else str(SCREAMING_SNAKE_CASE_ ) html_code += F" <td>{elt}</td>\n" html_code += " </tr>\n" html_code += " </tbody>\n</table><p>" return html_code class _a : """simple docstring""" SCREAMING_SNAKE_CASE = 5 SCREAMING_SNAKE_CASE = 0.2 def __init__( self , A__ , A__ = None , A__ = True , A__ = None , A__ = 3_00 , ) -> int: _SCREAMING_SNAKE_CASE = total _SCREAMING_SNAKE_CASE = """""" if prefix is None else prefix _SCREAMING_SNAKE_CASE = leave _SCREAMING_SNAKE_CASE = parent _SCREAMING_SNAKE_CASE = width _SCREAMING_SNAKE_CASE = None _SCREAMING_SNAKE_CASE = None _SCREAMING_SNAKE_CASE = None def UpperCamelCase ( self , A__ , A__ = False , A__ = None ) -> Optional[Any]: _SCREAMING_SNAKE_CASE = value if comment is not None: _SCREAMING_SNAKE_CASE = comment if self.last_value is None: _SCREAMING_SNAKE_CASE = _SCREAMING_SNAKE_CASE = time.time() _SCREAMING_SNAKE_CASE = _SCREAMING_SNAKE_CASE = value _SCREAMING_SNAKE_CASE = _SCREAMING_SNAKE_CASE = None _SCREAMING_SNAKE_CASE = self.warmup _SCREAMING_SNAKE_CASE = 1 self.update_bar(A__ ) elif value <= self.last_value and not force_update: return elif force_update or self.first_calls > 0 or value >= min(self.last_value + self.wait_for , self.total ): if self.first_calls > 0: self.first_calls -= 1 _SCREAMING_SNAKE_CASE = time.time() _SCREAMING_SNAKE_CASE = current_time - self.start_time # We could have value = self.start_value if the update is called twixe with the same start value. if value > self.start_value: _SCREAMING_SNAKE_CASE = self.elapsed_time / (value - self.start_value) else: _SCREAMING_SNAKE_CASE = None if value >= self.total: _SCREAMING_SNAKE_CASE = self.total _SCREAMING_SNAKE_CASE = None if not self.leave: self.close() elif self.average_time_per_item is not None: _SCREAMING_SNAKE_CASE = self.average_time_per_item * (self.total - value) self.update_bar(A__ ) _SCREAMING_SNAKE_CASE = value _SCREAMING_SNAKE_CASE = current_time if self.average_time_per_item is None: _SCREAMING_SNAKE_CASE = 1 else: _SCREAMING_SNAKE_CASE = max(int(self.update_every / self.average_time_per_item ) , 1 ) def UpperCamelCase ( self , A__ , A__=None ) -> List[Any]: _SCREAMING_SNAKE_CASE = """ """ * (len(str(self.total ) ) - len(str(A__ ) )) + str(A__ ) if self.elapsed_time is None: _SCREAMING_SNAKE_CASE = F"[{spaced_value}/{self.total} : < :" elif self.predicted_remaining is None: _SCREAMING_SNAKE_CASE = F"[{spaced_value}/{self.total} {format_time(self.elapsed_time )}" else: _SCREAMING_SNAKE_CASE = ( F"[{spaced_value}/{self.total} {format_time(self.elapsed_time )} <" F" {format_time(self.predicted_remaining )}" ) self.label += F", {1/self.average_time_per_item:.2f} it/s" self.label += "]" if self.comment is None or len(self.comment ) == 0 else F", {self.comment}]" self.display() def UpperCamelCase ( self ) -> str: _SCREAMING_SNAKE_CASE = html_progress_bar(self.value , self.total , self.prefix , self.label , self.width ) if self.parent is not None: # If this is a child bar, the parent will take care of the display. self.parent.display() return if self.output is None: _SCREAMING_SNAKE_CASE = disp.display(disp.HTML(self.html_code ) , display_id=A__ ) else: self.output.update(disp.HTML(self.html_code ) ) def UpperCamelCase ( self ) -> Optional[Any]: if self.parent is None and self.output is not None: self.output.update(disp.HTML("""""" ) ) class _a (_lowerCamelCase): """simple docstring""" def __init__( self , A__ , A__=None ) -> Optional[int]: super().__init__(A__ ) _SCREAMING_SNAKE_CASE = None if column_names is None else [column_names] _SCREAMING_SNAKE_CASE = None def UpperCamelCase ( self ) -> Optional[int]: _SCREAMING_SNAKE_CASE = html_progress_bar(self.value , self.total , self.prefix , self.label , self.width ) if self.inner_table is not None: self.html_code += text_to_html_table(self.inner_table ) if self.child_bar is not None: self.html_code += self.child_bar.html_code if self.output is None: _SCREAMING_SNAKE_CASE = disp.display(disp.HTML(self.html_code ) , display_id=A__ ) else: self.output.update(disp.HTML(self.html_code ) ) def UpperCamelCase ( self , A__ ) -> Any: if self.inner_table is None: _SCREAMING_SNAKE_CASE = [list(values.keys() ), list(values.values() )] else: _SCREAMING_SNAKE_CASE = self.inner_table[0] if len(self.inner_table ) == 1: # We give a chance to update the column names at the first iteration for key in values.keys(): if key not in columns: columns.append(A__ ) _SCREAMING_SNAKE_CASE = columns self.inner_table.append([values[c] for c in columns] ) def UpperCamelCase ( self , A__ , A__=None , A__=3_00 ) -> str: _SCREAMING_SNAKE_CASE = NotebookProgressBar(A__ , prefix=A__ , parent=self , width=A__ ) return self.child_bar def UpperCamelCase ( self ) -> str: _SCREAMING_SNAKE_CASE = None self.display() class _a (_lowerCamelCase): """simple docstring""" def __init__( self ) -> Optional[Any]: _SCREAMING_SNAKE_CASE = None _SCREAMING_SNAKE_CASE = None _SCREAMING_SNAKE_CASE = False def UpperCamelCase ( self , A__ , A__ , A__ , **A__ ) -> Union[str, Any]: _SCREAMING_SNAKE_CASE = """Epoch""" if args.evaluation_strategy == IntervalStrategy.EPOCH else """Step""" _SCREAMING_SNAKE_CASE = 0 _SCREAMING_SNAKE_CASE = 0 _SCREAMING_SNAKE_CASE = [self.first_column] + ["""Training Loss"""] if args.evaluation_strategy != IntervalStrategy.NO: column_names.append("""Validation Loss""" ) _SCREAMING_SNAKE_CASE = NotebookTrainingTracker(state.max_steps , A__ ) def UpperCamelCase ( self , A__ , A__ , A__ , **A__ ) -> Union[str, Any]: _SCREAMING_SNAKE_CASE = int(state.epoch ) if int(state.epoch ) == state.epoch else F"{state.epoch:.2f}" self.training_tracker.update( state.global_step + 1 , comment=F"Epoch {epoch}/{state.num_train_epochs}" , force_update=self._force_next_update , ) _SCREAMING_SNAKE_CASE = False def UpperCamelCase ( self , A__ , A__ , A__ , A__=None , **A__ ) -> Union[str, Any]: if not has_length(A__ ): return if self.prediction_bar is None: if self.training_tracker is not None: _SCREAMING_SNAKE_CASE = self.training_tracker.add_child(len(A__ ) ) else: _SCREAMING_SNAKE_CASE = NotebookProgressBar(len(A__ ) ) self.prediction_bar.update(1 ) else: self.prediction_bar.update(self.prediction_bar.value + 1 ) def UpperCamelCase ( self , A__ , A__ , A__ , **A__ ) -> List[Any]: if self.prediction_bar is not None: self.prediction_bar.close() _SCREAMING_SNAKE_CASE = None def UpperCamelCase ( self , A__ , A__ , A__ , A__=None , **A__ ) -> Optional[int]: # Only for when there is no evaluation if args.evaluation_strategy == IntervalStrategy.NO and "loss" in logs: _SCREAMING_SNAKE_CASE = {"""Training Loss""": logs["""loss"""]} # First column is necessarily Step sine we're not in epoch eval strategy _SCREAMING_SNAKE_CASE = state.global_step self.training_tracker.write_line(A__ ) def UpperCamelCase ( self , A__ , A__ , A__ , A__=None , **A__ ) -> Dict: if self.training_tracker is not None: _SCREAMING_SNAKE_CASE = {"""Training Loss""": """No log""", """Validation Loss""": """No log"""} for log in reversed(state.log_history ): if "loss" in log: _SCREAMING_SNAKE_CASE = log["""loss"""] break if self.first_column == "Epoch": _SCREAMING_SNAKE_CASE = int(state.epoch ) else: _SCREAMING_SNAKE_CASE = state.global_step _SCREAMING_SNAKE_CASE = """eval""" for k in metrics: if k.endswith("""_loss""" ): _SCREAMING_SNAKE_CASE = re.sub(R"""\_loss$""" , """""" , A__ ) _SCREAMING_SNAKE_CASE = metrics.pop("""total_flos""" , A__ ) _SCREAMING_SNAKE_CASE = metrics.pop("""epoch""" , A__ ) _SCREAMING_SNAKE_CASE = metrics.pop(F"{metric_key_prefix}_runtime" , A__ ) _SCREAMING_SNAKE_CASE = metrics.pop(F"{metric_key_prefix}_samples_per_second" , A__ ) _SCREAMING_SNAKE_CASE = metrics.pop(F"{metric_key_prefix}_steps_per_second" , A__ ) _SCREAMING_SNAKE_CASE = metrics.pop(F"{metric_key_prefix}_jit_compilation_time" , A__ ) for k, v in metrics.items(): if k == F"{metric_key_prefix}_loss": _SCREAMING_SNAKE_CASE = v else: _SCREAMING_SNAKE_CASE = k.split("""_""" ) _SCREAMING_SNAKE_CASE = """ """.join([part.capitalize() for part in splits[1:]] ) _SCREAMING_SNAKE_CASE = v self.training_tracker.write_line(A__ ) self.training_tracker.remove_child() _SCREAMING_SNAKE_CASE = None # Evaluation takes a long time so we should force the next update. _SCREAMING_SNAKE_CASE = True def UpperCamelCase ( self , A__ , A__ , A__ , **A__ ) -> Dict: self.training_tracker.update( state.global_step , comment=F"Epoch {int(state.epoch )}/{state.num_train_epochs}" , force_update=A__ ) _SCREAMING_SNAKE_CASE = None
712
'''simple docstring''' import os import unittest from transformers import MobileBertTokenizer, MobileBertTokenizerFast from transformers.models.bert.tokenization_bert import ( VOCAB_FILES_NAMES, BasicTokenizer, WordpieceTokenizer, _is_control, _is_punctuation, _is_whitespace, ) from transformers.testing_utils import require_tokenizers, slow from ...test_tokenization_common import TokenizerTesterMixin, filter_non_english @require_tokenizers class _a (_lowerCamelCase , unittest.TestCase): """simple docstring""" SCREAMING_SNAKE_CASE = MobileBertTokenizer SCREAMING_SNAKE_CASE = MobileBertTokenizerFast SCREAMING_SNAKE_CASE = True SCREAMING_SNAKE_CASE = True SCREAMING_SNAKE_CASE = filter_non_english SCREAMING_SNAKE_CASE = 'google/mobilebert-uncased' def UpperCamelCase ( self ) -> Any: super().setUp() _SCREAMING_SNAKE_CASE = [ """[UNK]""", """[CLS]""", """[SEP]""", """[PAD]""", """[MASK]""", """want""", """##want""", """##ed""", """wa""", """un""", """runn""", """##ing""", """,""", """low""", """lowest""", ] _SCREAMING_SNAKE_CASE = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] ) with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as vocab_writer: vocab_writer.write("""""".join([x + """\n""" for x in vocab_tokens] ) ) _SCREAMING_SNAKE_CASE = [ (tokenizer_def[0], self.pre_trained_model_path, tokenizer_def[2]) # else the 'google/' prefix is stripped for tokenizer_def in self.tokenizers_list ] def UpperCamelCase ( self , A__ ) -> List[str]: _SCREAMING_SNAKE_CASE = """UNwant\u00E9d,running""" _SCREAMING_SNAKE_CASE = """unwanted, running""" return input_text, output_text def UpperCamelCase ( self ) -> Any: _SCREAMING_SNAKE_CASE = self.tokenizer_class(self.vocab_file ) _SCREAMING_SNAKE_CASE = tokenizer.tokenize("""UNwant\u00E9d,running""" ) self.assertListEqual(A__ , ["""un""", """##want""", """##ed""", """,""", """runn""", """##ing"""] ) self.assertListEqual(tokenizer.convert_tokens_to_ids(A__ ) , [9, 6, 7, 12, 10, 11] ) def UpperCamelCase ( self ) -> Optional[int]: if not self.test_rust_tokenizer: return _SCREAMING_SNAKE_CASE = self.get_tokenizer() _SCREAMING_SNAKE_CASE = self.get_rust_tokenizer() _SCREAMING_SNAKE_CASE = """UNwant\u00E9d,running""" _SCREAMING_SNAKE_CASE = tokenizer.tokenize(A__ ) _SCREAMING_SNAKE_CASE = rust_tokenizer.tokenize(A__ ) self.assertListEqual(A__ , A__ ) _SCREAMING_SNAKE_CASE = tokenizer.encode(A__ , add_special_tokens=A__ ) _SCREAMING_SNAKE_CASE = rust_tokenizer.encode(A__ , add_special_tokens=A__ ) self.assertListEqual(A__ , A__ ) _SCREAMING_SNAKE_CASE = self.get_rust_tokenizer() _SCREAMING_SNAKE_CASE = tokenizer.encode(A__ ) _SCREAMING_SNAKE_CASE = rust_tokenizer.encode(A__ ) self.assertListEqual(A__ , A__ ) # With lower casing _SCREAMING_SNAKE_CASE = self.get_tokenizer(do_lower_case=A__ ) _SCREAMING_SNAKE_CASE = self.get_rust_tokenizer(do_lower_case=A__ ) _SCREAMING_SNAKE_CASE = """UNwant\u00E9d,running""" _SCREAMING_SNAKE_CASE = tokenizer.tokenize(A__ ) _SCREAMING_SNAKE_CASE = rust_tokenizer.tokenize(A__ ) self.assertListEqual(A__ , A__ ) _SCREAMING_SNAKE_CASE = tokenizer.encode(A__ , add_special_tokens=A__ ) _SCREAMING_SNAKE_CASE = rust_tokenizer.encode(A__ , add_special_tokens=A__ ) self.assertListEqual(A__ , A__ ) _SCREAMING_SNAKE_CASE = self.get_rust_tokenizer() _SCREAMING_SNAKE_CASE = tokenizer.encode(A__ ) _SCREAMING_SNAKE_CASE = rust_tokenizer.encode(A__ ) self.assertListEqual(A__ , A__ ) def UpperCamelCase ( self ) -> Tuple: _SCREAMING_SNAKE_CASE = BasicTokenizer() self.assertListEqual(tokenizer.tokenize("""ah\u535A\u63A8zz""" ) , ["""ah""", """\u535A""", """\u63A8""", """zz"""] ) def UpperCamelCase ( self ) -> List[Any]: _SCREAMING_SNAKE_CASE = BasicTokenizer(do_lower_case=A__ ) self.assertListEqual( tokenizer.tokenize(""" \tHeLLo!how \n Are yoU? """ ) , ["""hello""", """!""", """how""", """are""", """you""", """?"""] ) self.assertListEqual(tokenizer.tokenize("""H\u00E9llo""" ) , ["""hello"""] ) def UpperCamelCase ( self ) -> Optional[Any]: _SCREAMING_SNAKE_CASE = BasicTokenizer(do_lower_case=A__ , strip_accents=A__ ) self.assertListEqual( tokenizer.tokenize(""" \tHäLLo!how \n Are yoU? """ ) , ["""hällo""", """!""", """how""", """are""", """you""", """?"""] ) self.assertListEqual(tokenizer.tokenize("""H\u00E9llo""" ) , ["""h\u00E9llo"""] ) def UpperCamelCase ( self ) -> Any: _SCREAMING_SNAKE_CASE = BasicTokenizer(do_lower_case=A__ , strip_accents=A__ ) self.assertListEqual( tokenizer.tokenize(""" \tHäLLo!how \n Are yoU? """ ) , ["""hallo""", """!""", """how""", """are""", """you""", """?"""] ) self.assertListEqual(tokenizer.tokenize("""H\u00E9llo""" ) , ["""hello"""] ) def UpperCamelCase ( self ) -> Any: _SCREAMING_SNAKE_CASE = BasicTokenizer(do_lower_case=A__ ) self.assertListEqual( tokenizer.tokenize(""" \tHäLLo!how \n Are yoU? """ ) , ["""hallo""", """!""", """how""", """are""", """you""", """?"""] ) self.assertListEqual(tokenizer.tokenize("""H\u00E9llo""" ) , ["""hello"""] ) def UpperCamelCase ( self ) -> str: _SCREAMING_SNAKE_CASE = BasicTokenizer(do_lower_case=A__ ) self.assertListEqual( tokenizer.tokenize(""" \tHeLLo!how \n Are yoU? """ ) , ["""HeLLo""", """!""", """how""", """Are""", """yoU""", """?"""] ) def UpperCamelCase ( self ) -> Dict: _SCREAMING_SNAKE_CASE = BasicTokenizer(do_lower_case=A__ , strip_accents=A__ ) self.assertListEqual( tokenizer.tokenize(""" \tHäLLo!how \n Are yoU? """ ) , ["""HäLLo""", """!""", """how""", """Are""", """yoU""", """?"""] ) def UpperCamelCase ( self ) -> Union[str, Any]: _SCREAMING_SNAKE_CASE = BasicTokenizer(do_lower_case=A__ , strip_accents=A__ ) self.assertListEqual( tokenizer.tokenize(""" \tHäLLo!how \n Are yoU? """ ) , ["""HaLLo""", """!""", """how""", """Are""", """yoU""", """?"""] ) def UpperCamelCase ( self ) -> str: _SCREAMING_SNAKE_CASE = BasicTokenizer(do_lower_case=A__ , never_split=["""[UNK]"""] ) self.assertListEqual( tokenizer.tokenize(""" \tHeLLo!how \n Are yoU? [UNK]""" ) , ["""HeLLo""", """!""", """how""", """Are""", """yoU""", """?""", """[UNK]"""] ) def UpperCamelCase ( self ) -> Tuple: _SCREAMING_SNAKE_CASE = ["""[UNK]""", """[CLS]""", """[SEP]""", """want""", """##want""", """##ed""", """wa""", """un""", """runn""", """##ing"""] _SCREAMING_SNAKE_CASE = {} for i, token in enumerate(A__ ): _SCREAMING_SNAKE_CASE = i _SCREAMING_SNAKE_CASE = WordpieceTokenizer(vocab=A__ , unk_token="""[UNK]""" ) self.assertListEqual(tokenizer.tokenize("""""" ) , [] ) self.assertListEqual(tokenizer.tokenize("""unwanted running""" ) , ["""un""", """##want""", """##ed""", """runn""", """##ing"""] ) self.assertListEqual(tokenizer.tokenize("""unwantedX running""" ) , ["""[UNK]""", """runn""", """##ing"""] ) def UpperCamelCase ( self ) -> str: self.assertTrue(_is_whitespace(""" """ ) ) self.assertTrue(_is_whitespace("""\t""" ) ) self.assertTrue(_is_whitespace("""\r""" ) ) self.assertTrue(_is_whitespace("""\n""" ) ) self.assertTrue(_is_whitespace("""\u00A0""" ) ) self.assertFalse(_is_whitespace("""A""" ) ) self.assertFalse(_is_whitespace("""-""" ) ) def UpperCamelCase ( self ) -> Union[str, Any]: self.assertTrue(_is_control("""\u0005""" ) ) self.assertFalse(_is_control("""A""" ) ) self.assertFalse(_is_control(""" """ ) ) self.assertFalse(_is_control("""\t""" ) ) self.assertFalse(_is_control("""\r""" ) ) def UpperCamelCase ( self ) -> Dict: self.assertTrue(_is_punctuation("""-""" ) ) self.assertTrue(_is_punctuation("""$""" ) ) self.assertTrue(_is_punctuation("""`""" ) ) self.assertTrue(_is_punctuation(""".""" ) ) self.assertFalse(_is_punctuation("""A""" ) ) self.assertFalse(_is_punctuation(""" """ ) ) def UpperCamelCase ( self ) -> str: _SCREAMING_SNAKE_CASE = self.get_tokenizer() _SCREAMING_SNAKE_CASE = self.get_rust_tokenizer() # Example taken from the issue https://github.com/huggingface/tokenizers/issues/340 self.assertListEqual([tokenizer.tokenize(A__ ) for t in ["""Test""", """\xad""", """test"""]] , [["""[UNK]"""], [], ["""[UNK]"""]] ) self.assertListEqual( [rust_tokenizer.tokenize(A__ ) for t in ["""Test""", """\xad""", """test"""]] , [["""[UNK]"""], [], ["""[UNK]"""]] ) @slow def UpperCamelCase ( self ) -> Any: _SCREAMING_SNAKE_CASE = self.tokenizer_class.from_pretrained("""google/mobilebert-uncased""" ) _SCREAMING_SNAKE_CASE = tokenizer.encode("""sequence builders""" , add_special_tokens=A__ ) _SCREAMING_SNAKE_CASE = tokenizer.encode("""multi-sequence build""" , add_special_tokens=A__ ) _SCREAMING_SNAKE_CASE = tokenizer.build_inputs_with_special_tokens(A__ ) _SCREAMING_SNAKE_CASE = tokenizer.build_inputs_with_special_tokens(A__ , A__ ) assert encoded_sentence == [1_01] + text + [1_02] assert encoded_pair == [1_01] + text + [1_02] + text_a + [1_02] def UpperCamelCase ( self ) -> List[str]: for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(F"{tokenizer.__class__.__name__} ({pretrained_name})" ): _SCREAMING_SNAKE_CASE = self.rust_tokenizer_class.from_pretrained(A__ , **A__ ) _SCREAMING_SNAKE_CASE = F"A, naïve {tokenizer_r.mask_token} AllenNLP sentence." _SCREAMING_SNAKE_CASE = tokenizer_r.encode_plus( A__ , return_attention_mask=A__ , return_token_type_ids=A__ , return_offsets_mapping=A__ , add_special_tokens=A__ , ) _SCREAMING_SNAKE_CASE = tokenizer_r.do_lower_case if hasattr(A__ , """do_lower_case""" ) else False _SCREAMING_SNAKE_CASE = ( [ ((0, 0), tokenizer_r.cls_token), ((0, 1), """A"""), ((1, 2), ""","""), ((3, 5), """na"""), ((5, 6), """##ï"""), ((6, 8), """##ve"""), ((9, 15), tokenizer_r.mask_token), ((16, 21), """Allen"""), ((21, 23), """##NL"""), ((23, 24), """##P"""), ((25, 33), """sentence"""), ((33, 34), """."""), ((0, 0), tokenizer_r.sep_token), ] if not do_lower_case else [ ((0, 0), tokenizer_r.cls_token), ((0, 1), """a"""), ((1, 2), ""","""), ((3, 8), """naive"""), ((9, 15), tokenizer_r.mask_token), ((16, 21), """allen"""), ((21, 23), """##nl"""), ((23, 24), """##p"""), ((25, 33), """sentence"""), ((33, 34), """."""), ((0, 0), tokenizer_r.sep_token), ] ) self.assertEqual( [e[1] for e in expected_results] , tokenizer_r.convert_ids_to_tokens(tokens["""input_ids"""] ) ) self.assertEqual([e[0] for e in expected_results] , tokens["""offset_mapping"""] ) def UpperCamelCase ( self ) -> Any: _SCREAMING_SNAKE_CASE = ["""的""", """人""", """有"""] _SCREAMING_SNAKE_CASE = """""".join(A__ ) for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(F"{tokenizer.__class__.__name__} ({pretrained_name})" ): _SCREAMING_SNAKE_CASE = True _SCREAMING_SNAKE_CASE = self.tokenizer_class.from_pretrained(A__ , **A__ ) _SCREAMING_SNAKE_CASE = self.rust_tokenizer_class.from_pretrained(A__ , **A__ ) _SCREAMING_SNAKE_CASE = tokenizer_p.encode(A__ , add_special_tokens=A__ ) _SCREAMING_SNAKE_CASE = tokenizer_r.encode(A__ , add_special_tokens=A__ ) _SCREAMING_SNAKE_CASE = tokenizer_r.convert_ids_to_tokens(A__ ) _SCREAMING_SNAKE_CASE = tokenizer_p.convert_ids_to_tokens(A__ ) # it is expected that each Chinese character is not preceded by "##" self.assertListEqual(A__ , A__ ) self.assertListEqual(A__ , A__ ) _SCREAMING_SNAKE_CASE = False _SCREAMING_SNAKE_CASE = self.rust_tokenizer_class.from_pretrained(A__ , **A__ ) _SCREAMING_SNAKE_CASE = self.tokenizer_class.from_pretrained(A__ , **A__ ) _SCREAMING_SNAKE_CASE = tokenizer_r.encode(A__ , add_special_tokens=A__ ) _SCREAMING_SNAKE_CASE = tokenizer_p.encode(A__ , add_special_tokens=A__ ) _SCREAMING_SNAKE_CASE = tokenizer_r.convert_ids_to_tokens(A__ ) _SCREAMING_SNAKE_CASE = tokenizer_p.convert_ids_to_tokens(A__ ) # it is expected that only the first Chinese character is not preceded by "##". _SCREAMING_SNAKE_CASE = [ F"##{token}" if idx != 0 else token for idx, token in enumerate(A__ ) ] self.assertListEqual(A__ , A__ ) self.assertListEqual(A__ , A__ )
0
0
'''simple docstring''' import json import os import shutil import tempfile import unittest import numpy as np import pytest from transformers import CLIPTokenizer, CLIPTokenizerFast from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES from transformers.testing_utils import require_vision from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available if is_vision_available(): from PIL import Image from transformers import CLIPSegProcessor, ViTImageProcessor @require_vision class _a (unittest.TestCase): """simple docstring""" def UpperCamelCase ( self ) -> List[Any]: _SCREAMING_SNAKE_CASE = tempfile.mkdtemp() # fmt: off _SCREAMING_SNAKE_CASE = ["""l""", """o""", """w""", """e""", """r""", """s""", """t""", """i""", """d""", """n""", """lo""", """l</w>""", """w</w>""", """r</w>""", """t</w>""", """low</w>""", """er</w>""", """lowest</w>""", """newer</w>""", """wider""", """<unk>""", """<|startoftext|>""", """<|endoftext|>"""] # fmt: on _SCREAMING_SNAKE_CASE = dict(zip(A__ , range(len(A__ ) ) ) ) _SCREAMING_SNAKE_CASE = ["""#version: 0.2""", """l o""", """lo w</w>""", """e r</w>""", """"""] _SCREAMING_SNAKE_CASE = {"""unk_token""": """<unk>"""} _SCREAMING_SNAKE_CASE = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] ) _SCREAMING_SNAKE_CASE = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""merges_file"""] ) with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as fp: fp.write(json.dumps(A__ ) + """\n""" ) with open(self.merges_file , """w""" , encoding="""utf-8""" ) as fp: fp.write("""\n""".join(A__ ) ) _SCREAMING_SNAKE_CASE = { """do_resize""": True, """size""": 20, """do_center_crop""": True, """crop_size""": 18, """do_normalize""": True, """image_mean""": [0.4814_5466, 0.457_8275, 0.4082_1073], """image_std""": [0.2686_2954, 0.2613_0258, 0.2757_7711], } _SCREAMING_SNAKE_CASE = os.path.join(self.tmpdirname , A__ ) with open(self.image_processor_file , """w""" , encoding="""utf-8""" ) as fp: json.dump(A__ , A__ ) def UpperCamelCase ( self , **A__ ) -> Any: return CLIPTokenizer.from_pretrained(self.tmpdirname , **A__ ) def UpperCamelCase ( self , **A__ ) -> str: return CLIPTokenizerFast.from_pretrained(self.tmpdirname , **A__ ) def UpperCamelCase ( self , **A__ ) -> Dict: return ViTImageProcessor.from_pretrained(self.tmpdirname , **A__ ) def UpperCamelCase ( self ) -> Any: shutil.rmtree(self.tmpdirname ) def UpperCamelCase ( self ) -> List[Any]: _SCREAMING_SNAKE_CASE = [np.random.randint(2_55 , size=(3, 30, 4_00) , dtype=np.uinta )] _SCREAMING_SNAKE_CASE = [Image.fromarray(np.moveaxis(A__ , 0 , -1 ) ) for x in image_inputs] return image_inputs def UpperCamelCase ( self ) -> Union[str, Any]: _SCREAMING_SNAKE_CASE = self.get_tokenizer() _SCREAMING_SNAKE_CASE = self.get_rust_tokenizer() _SCREAMING_SNAKE_CASE = self.get_image_processor() _SCREAMING_SNAKE_CASE = CLIPSegProcessor(tokenizer=A__ , image_processor=A__ ) processor_slow.save_pretrained(self.tmpdirname ) _SCREAMING_SNAKE_CASE = CLIPSegProcessor.from_pretrained(self.tmpdirname , use_fast=A__ ) _SCREAMING_SNAKE_CASE = CLIPSegProcessor(tokenizer=A__ , image_processor=A__ ) processor_fast.save_pretrained(self.tmpdirname ) _SCREAMING_SNAKE_CASE = CLIPSegProcessor.from_pretrained(self.tmpdirname ) self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() ) self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() ) self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() ) self.assertIsInstance(processor_slow.tokenizer , A__ ) self.assertIsInstance(processor_fast.tokenizer , A__ ) self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() ) self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() ) self.assertIsInstance(processor_slow.image_processor , A__ ) self.assertIsInstance(processor_fast.image_processor , A__ ) def UpperCamelCase ( self ) -> Optional[Any]: _SCREAMING_SNAKE_CASE = CLIPSegProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() ) processor.save_pretrained(self.tmpdirname ) _SCREAMING_SNAKE_CASE = self.get_tokenizer(bos_token="""(BOS)""" , eos_token="""(EOS)""" ) _SCREAMING_SNAKE_CASE = self.get_image_processor(do_normalize=A__ , padding_value=1.0 ) _SCREAMING_SNAKE_CASE = CLIPSegProcessor.from_pretrained( self.tmpdirname , bos_token="""(BOS)""" , eos_token="""(EOS)""" , do_normalize=A__ , padding_value=1.0 ) self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() ) self.assertIsInstance(processor.tokenizer , A__ ) self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() ) self.assertIsInstance(processor.image_processor , A__ ) def UpperCamelCase ( self ) -> Dict: _SCREAMING_SNAKE_CASE = self.get_image_processor() _SCREAMING_SNAKE_CASE = self.get_tokenizer() _SCREAMING_SNAKE_CASE = CLIPSegProcessor(tokenizer=A__ , image_processor=A__ ) _SCREAMING_SNAKE_CASE = self.prepare_image_inputs() _SCREAMING_SNAKE_CASE = image_processor(A__ , return_tensors="""np""" ) _SCREAMING_SNAKE_CASE = processor(images=A__ , return_tensors="""np""" ) for key in input_feat_extract.keys(): self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2 ) def UpperCamelCase ( self ) -> Tuple: _SCREAMING_SNAKE_CASE = self.get_image_processor() _SCREAMING_SNAKE_CASE = self.get_tokenizer() _SCREAMING_SNAKE_CASE = CLIPSegProcessor(tokenizer=A__ , image_processor=A__ ) _SCREAMING_SNAKE_CASE = """lower newer""" _SCREAMING_SNAKE_CASE = processor(text=A__ ) _SCREAMING_SNAKE_CASE = tokenizer(A__ ) for key in encoded_tok.keys(): self.assertListEqual(encoded_tok[key] , encoded_processor[key] ) def UpperCamelCase ( self ) -> Any: _SCREAMING_SNAKE_CASE = self.get_image_processor() _SCREAMING_SNAKE_CASE = self.get_tokenizer() _SCREAMING_SNAKE_CASE = CLIPSegProcessor(tokenizer=A__ , image_processor=A__ ) _SCREAMING_SNAKE_CASE = """lower newer""" _SCREAMING_SNAKE_CASE = self.prepare_image_inputs() _SCREAMING_SNAKE_CASE = processor(text=A__ , images=A__ ) self.assertListEqual(list(inputs.keys() ) , ["""input_ids""", """attention_mask""", """pixel_values"""] ) # test if it raises when no input is passed with pytest.raises(A__ ): processor() def UpperCamelCase ( self ) -> int: _SCREAMING_SNAKE_CASE = self.get_image_processor() _SCREAMING_SNAKE_CASE = self.get_tokenizer() _SCREAMING_SNAKE_CASE = CLIPSegProcessor(tokenizer=A__ , image_processor=A__ ) _SCREAMING_SNAKE_CASE = self.prepare_image_inputs() _SCREAMING_SNAKE_CASE = self.prepare_image_inputs() _SCREAMING_SNAKE_CASE = processor(images=A__ , visual_prompt=A__ ) self.assertListEqual(list(inputs.keys() ) , ["""pixel_values""", """conditional_pixel_values"""] ) # test if it raises when no input is passed with pytest.raises(A__ ): processor() def UpperCamelCase ( self ) -> Optional[Any]: _SCREAMING_SNAKE_CASE = self.get_image_processor() _SCREAMING_SNAKE_CASE = self.get_tokenizer() _SCREAMING_SNAKE_CASE = CLIPSegProcessor(tokenizer=A__ , image_processor=A__ ) _SCREAMING_SNAKE_CASE = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]] _SCREAMING_SNAKE_CASE = processor.batch_decode(A__ ) _SCREAMING_SNAKE_CASE = tokenizer.batch_decode(A__ ) self.assertListEqual(A__ , A__ )
713
'''simple docstring''' import logging import os import quant_trainer import torch from torch.utils.data import DataLoader from transformers import Trainer, is_torch_tpu_available from transformers.trainer_utils import PredictionOutput UpperCamelCase__ : Tuple = logging.getLogger(__name__) if is_torch_tpu_available(check_device=False): import torch_xla.core.xla_model as xm import torch_xla.debug.metrics as met class _a (_lowerCamelCase): """simple docstring""" def __init__( self , *A__ , A__=None , A__=None , A__=None , **A__ ) -> Optional[int]: super().__init__(*A__ , **A__ ) _SCREAMING_SNAKE_CASE = eval_examples _SCREAMING_SNAKE_CASE = post_process_function _SCREAMING_SNAKE_CASE = quant_trainer_args _SCREAMING_SNAKE_CASE = 1_28 # default number of calibration samples def UpperCamelCase ( self , A__=None ) -> Union[str, Any]: if calib_dataset is None and self.calib_dataset is None: raise ValueError("""Trainer: calibration requires an calib_dataset.""" ) _SCREAMING_SNAKE_CASE = calib_dataset if calib_dataset is not None else self.calib_dataset _SCREAMING_SNAKE_CASE = self._remove_unused_columns(A__ , description="""Calibration""" ) return DataLoader( A__ , batch_size=self.args.eval_batch_size , collate_fn=self.data_collator , drop_last=self.args.dataloader_drop_last , num_workers=self.args.dataloader_num_workers , pin_memory=self.args.dataloader_pin_memory , shuffle=A__ , ) def UpperCamelCase ( self , A__=None ) -> str: _SCREAMING_SNAKE_CASE = self.train_dataset if calib_dataset is None else calib_dataset _SCREAMING_SNAKE_CASE = self.get_calib_dataloader(A__ ) _SCREAMING_SNAKE_CASE = self.model quant_trainer.configure_model(A__ , self.quant_trainer_args , calib=A__ ) model.eval() quant_trainer.enable_calibration(A__ ) logger.info("""***** Running calibration *****""" ) logger.info(F" Num examples = {self.calib_num}" ) logger.info(F" Batch size = {calib_dataloader.batch_size}" ) for step, inputs in enumerate(A__ ): # Prediction step _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = self.prediction_step(A__ , A__ , prediction_loss_only=A__ ) if (step + 1) * calib_dataloader.batch_size >= self.calib_num: break quant_trainer.finish_calibration(A__ , self.quant_trainer_args ) _SCREAMING_SNAKE_CASE = model def UpperCamelCase ( self , A__=None , A__=None , A__=None , A__ = "eval" ) -> List[Any]: _SCREAMING_SNAKE_CASE = self.eval_dataset if eval_dataset is None else eval_dataset _SCREAMING_SNAKE_CASE = self.get_eval_dataloader(A__ ) _SCREAMING_SNAKE_CASE = self.eval_examples if eval_examples is None else eval_examples # Temporarily disable metric computation, we will do it in the loop here. _SCREAMING_SNAKE_CASE = self.compute_metrics _SCREAMING_SNAKE_CASE = None _SCREAMING_SNAKE_CASE = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop try: _SCREAMING_SNAKE_CASE = eval_loop( A__ , description="""Evaluation""" , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=A__ , ) finally: _SCREAMING_SNAKE_CASE = compute_metrics if self.post_process_function is not None and self.compute_metrics is not None: _SCREAMING_SNAKE_CASE = self.post_process_function(A__ , A__ , output.predictions ) _SCREAMING_SNAKE_CASE = self.compute_metrics(A__ ) # Prefix all keys with metric_key_prefix + '_' for key in list(metrics.keys() ): if not key.startswith(F"{metric_key_prefix}_" ): _SCREAMING_SNAKE_CASE = metrics.pop(A__ ) self.log(A__ ) else: _SCREAMING_SNAKE_CASE = {} if self.args.tpu_metrics_debug or self.args.debug: # tpu-comment: Logging debug metrics for PyTorch/XLA (compile, execute times, ops, etc.) xm.master_print(met.metrics_report() ) _SCREAMING_SNAKE_CASE = self.callback_handler.on_evaluate(self.args , self.state , self.control , A__ ) return metrics def UpperCamelCase ( self , A__ , A__ , A__=None , A__ = "test" ) -> List[str]: _SCREAMING_SNAKE_CASE = self.get_test_dataloader(A__ ) # Temporarily disable metric computation, we will do it in the loop here. _SCREAMING_SNAKE_CASE = self.compute_metrics _SCREAMING_SNAKE_CASE = None _SCREAMING_SNAKE_CASE = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop try: _SCREAMING_SNAKE_CASE = eval_loop( A__ , description="""Prediction""" , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=A__ , ) finally: _SCREAMING_SNAKE_CASE = compute_metrics if self.post_process_function is None or self.compute_metrics is None: return output _SCREAMING_SNAKE_CASE = self.post_process_function(A__ , A__ , output.predictions , """predict""" ) _SCREAMING_SNAKE_CASE = self.compute_metrics(A__ ) # Prefix all keys with metric_key_prefix + '_' for key in list(metrics.keys() ): if not key.startswith(F"{metric_key_prefix}_" ): _SCREAMING_SNAKE_CASE = metrics.pop(A__ ) return PredictionOutput(predictions=predictions.predictions , label_ids=predictions.label_ids , metrics=A__ ) def UpperCamelCase ( self , A__="./" ) -> Tuple: _SCREAMING_SNAKE_CASE = self.eval_dataset _SCREAMING_SNAKE_CASE = self.get_eval_dataloader(A__ ) _SCREAMING_SNAKE_CASE = next(iter(A__ ) ) # saving device - to make it consistent _SCREAMING_SNAKE_CASE = torch.device("""cuda""" if torch.cuda.is_available() else """cpu""" ) # convert to tuple _SCREAMING_SNAKE_CASE = tuple(v.to(A__ ) for k, v in batch.items() ) logger.info("""Converting model to be onnx compatible""" ) from pytorch_quantization.nn import TensorQuantizer _SCREAMING_SNAKE_CASE = True _SCREAMING_SNAKE_CASE = self.model.to(A__ ) model.eval() model.float() _SCREAMING_SNAKE_CASE = model.module if hasattr(A__ , """module""" ) else model quant_trainer.configure_model(A__ , self.quant_trainer_args ) _SCREAMING_SNAKE_CASE = os.path.join(A__ , """model.onnx""" ) logger.info(F"exporting model to {output_model_file}" ) _SCREAMING_SNAKE_CASE = {0: """batch_size""", 1: """seq_len"""} torch.onnx.export( A__ , A__ , A__ , export_params=A__ , opset_version=13 , do_constant_folding=A__ , input_names=["""input_ids""", """attention_mask""", """token_type_ids"""] , output_names=["""output_start_logits""", """output_end_logits"""] , dynamic_axes={ """input_ids""": axes, """attention_mask""": axes, """token_type_ids""": axes, """output_start_logits""": axes, """output_end_logits""": axes, } , verbose=A__ , ) logger.info("""onnx export finished""" )
0
0
import argparse import json import requests import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import ViTImageProcessor, ViTMSNConfig, ViTMSNModel from transformers.image_utils import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD torch.set_grad_enabled(False) def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=False ) -> Any: """simple docstring""" _SCREAMING_SNAKE_CASE = [] for i in range(config.num_hidden_layers ): # encoder layers: output projection, 2 feedforward neural networks and 2 layernorms rename_keys.append((F"module.blocks.{i}.norm1.weight", F"vit.encoder.layer.{i}.layernorm_before.weight") ) rename_keys.append((F"module.blocks.{i}.norm1.bias", F"vit.encoder.layer.{i}.layernorm_before.bias") ) rename_keys.append( (F"module.blocks.{i}.attn.proj.weight", F"vit.encoder.layer.{i}.attention.output.dense.weight") ) rename_keys.append((F"module.blocks.{i}.attn.proj.bias", F"vit.encoder.layer.{i}.attention.output.dense.bias") ) rename_keys.append((F"module.blocks.{i}.norm2.weight", F"vit.encoder.layer.{i}.layernorm_after.weight") ) rename_keys.append((F"module.blocks.{i}.norm2.bias", F"vit.encoder.layer.{i}.layernorm_after.bias") ) rename_keys.append((F"module.blocks.{i}.mlp.fc1.weight", F"vit.encoder.layer.{i}.intermediate.dense.weight") ) rename_keys.append((F"module.blocks.{i}.mlp.fc1.bias", F"vit.encoder.layer.{i}.intermediate.dense.bias") ) rename_keys.append((F"module.blocks.{i}.mlp.fc2.weight", F"vit.encoder.layer.{i}.output.dense.weight") ) rename_keys.append((F"module.blocks.{i}.mlp.fc2.bias", F"vit.encoder.layer.{i}.output.dense.bias") ) # projection layer + position embeddings rename_keys.extend( [ ("""module.cls_token""", """vit.embeddings.cls_token"""), ("""module.patch_embed.proj.weight""", """vit.embeddings.patch_embeddings.projection.weight"""), ("""module.patch_embed.proj.bias""", """vit.embeddings.patch_embeddings.projection.bias"""), ("""module.pos_embed""", """vit.embeddings.position_embeddings"""), ] ) if base_model: # layernorm + pooler rename_keys.extend( [ ("""module.norm.weight""", """layernorm.weight"""), ("""module.norm.bias""", """layernorm.bias"""), ] ) # if just the base model, we should remove "vit" from all keys that start with "vit" _SCREAMING_SNAKE_CASE = [(pair[0], pair[1][4:]) if pair[1].startswith("""vit""" ) else pair for pair in rename_keys] else: # layernorm + classification head rename_keys.extend( [ ("""norm.weight""", """vit.layernorm.weight"""), ("""norm.bias""", """vit.layernorm.bias"""), ("""head.weight""", """classifier.weight"""), ("""head.bias""", """classifier.bias"""), ] ) return rename_keys def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=False ) -> int: """simple docstring""" for i in range(config.num_hidden_layers ): if base_model: _SCREAMING_SNAKE_CASE = """""" else: _SCREAMING_SNAKE_CASE = """vit.""" # read in weights + bias of input projection layer (in timm, this is a single matrix + bias) _SCREAMING_SNAKE_CASE = state_dict.pop(F"module.blocks.{i}.attn.qkv.weight" ) _SCREAMING_SNAKE_CASE = state_dict.pop(F"module.blocks.{i}.attn.qkv.bias" ) # next, add query, keys and values (in that order) to the state dict _SCREAMING_SNAKE_CASE = in_proj_weight[ : config.hidden_size, : ] _SCREAMING_SNAKE_CASE = in_proj_bias[: config.hidden_size] _SCREAMING_SNAKE_CASE = in_proj_weight[ config.hidden_size : config.hidden_size * 2, : ] _SCREAMING_SNAKE_CASE = in_proj_bias[ config.hidden_size : config.hidden_size * 2 ] _SCREAMING_SNAKE_CASE = in_proj_weight[ -config.hidden_size :, : ] _SCREAMING_SNAKE_CASE = in_proj_bias[-config.hidden_size :] def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE_ ) -> Union[str, Any]: """simple docstring""" _SCREAMING_SNAKE_CASE = ["""head.weight""", """head.bias"""] for k in ignore_keys: state_dict.pop(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE_ ) -> int: """simple docstring""" _SCREAMING_SNAKE_CASE = [ """module.fc.fc1.weight""", """module.fc.fc1.bias""", """module.fc.bn1.weight""", """module.fc.bn1.bias""", """module.fc.bn1.running_mean""", """module.fc.bn1.running_var""", """module.fc.bn1.num_batches_tracked""", """module.fc.fc2.weight""", """module.fc.fc2.bias""", """module.fc.bn2.weight""", """module.fc.bn2.bias""", """module.fc.bn2.running_mean""", """module.fc.bn2.running_var""", """module.fc.bn2.num_batches_tracked""", """module.fc.fc3.weight""", """module.fc.fc3.bias""", ] for k in ignore_keys: state_dict.pop(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> str: """simple docstring""" _SCREAMING_SNAKE_CASE = dct.pop(SCREAMING_SNAKE_CASE_ ) _SCREAMING_SNAKE_CASE = val def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> Union[str, Any]: """simple docstring""" _SCREAMING_SNAKE_CASE = ViTMSNConfig() _SCREAMING_SNAKE_CASE = 10_00 _SCREAMING_SNAKE_CASE = """datasets/huggingface/label-files""" _SCREAMING_SNAKE_CASE = """imagenet-1k-id2label.json""" _SCREAMING_SNAKE_CASE = json.load(open(hf_hub_download(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) , """r""" ) ) _SCREAMING_SNAKE_CASE = {int(SCREAMING_SNAKE_CASE_ ): v for k, v in idalabel.items()} _SCREAMING_SNAKE_CASE = idalabel _SCREAMING_SNAKE_CASE = {v: k for k, v in idalabel.items()} if "s16" in checkpoint_url: _SCREAMING_SNAKE_CASE = 3_84 _SCREAMING_SNAKE_CASE = 15_36 _SCREAMING_SNAKE_CASE = 6 elif "l16" in checkpoint_url: _SCREAMING_SNAKE_CASE = 10_24 _SCREAMING_SNAKE_CASE = 40_96 _SCREAMING_SNAKE_CASE = 24 _SCREAMING_SNAKE_CASE = 16 _SCREAMING_SNAKE_CASE = 0.1 elif "b4" in checkpoint_url: _SCREAMING_SNAKE_CASE = 4 elif "l7" in checkpoint_url: _SCREAMING_SNAKE_CASE = 7 _SCREAMING_SNAKE_CASE = 10_24 _SCREAMING_SNAKE_CASE = 40_96 _SCREAMING_SNAKE_CASE = 24 _SCREAMING_SNAKE_CASE = 16 _SCREAMING_SNAKE_CASE = 0.1 _SCREAMING_SNAKE_CASE = ViTMSNModel(SCREAMING_SNAKE_CASE_ ) _SCREAMING_SNAKE_CASE = torch.hub.load_state_dict_from_url(SCREAMING_SNAKE_CASE_ , map_location="""cpu""" )["""target_encoder"""] _SCREAMING_SNAKE_CASE = ViTImageProcessor(size=config.image_size ) remove_projection_head(SCREAMING_SNAKE_CASE_ ) _SCREAMING_SNAKE_CASE = create_rename_keys(SCREAMING_SNAKE_CASE_ , base_model=SCREAMING_SNAKE_CASE_ ) for src, dest in rename_keys: rename_key(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) read_in_q_k_v(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , base_model=SCREAMING_SNAKE_CASE_ ) model.load_state_dict(SCREAMING_SNAKE_CASE_ ) model.eval() _SCREAMING_SNAKE_CASE = """http://images.cocodataset.org/val2017/000000039769.jpg""" _SCREAMING_SNAKE_CASE = Image.open(requests.get(SCREAMING_SNAKE_CASE_ , stream=SCREAMING_SNAKE_CASE_ ).raw ) _SCREAMING_SNAKE_CASE = ViTImageProcessor( size=config.image_size , image_mean=SCREAMING_SNAKE_CASE_ , image_std=SCREAMING_SNAKE_CASE_ ) _SCREAMING_SNAKE_CASE = image_processor(images=SCREAMING_SNAKE_CASE_ , return_tensors="""pt""" ) # forward pass torch.manual_seed(2 ) _SCREAMING_SNAKE_CASE = model(**SCREAMING_SNAKE_CASE_ ) _SCREAMING_SNAKE_CASE = outputs.last_hidden_state # The following Colab Notebook was used to generate these outputs: # https://colab.research.google.com/gist/sayakpaul/3672419a04f5997827503fd84079bdd1/scratchpad.ipynb if "s16" in checkpoint_url: _SCREAMING_SNAKE_CASE = torch.tensor([[-1.0915, -1.4876, -1.1809]] ) elif "b16" in checkpoint_url: _SCREAMING_SNAKE_CASE = torch.tensor([[14.2889, -18.9045, 11.7281]] ) elif "l16" in checkpoint_url: _SCREAMING_SNAKE_CASE = torch.tensor([[41.5028, -22.8681, 45.6475]] ) elif "b4" in checkpoint_url: _SCREAMING_SNAKE_CASE = torch.tensor([[-4.3868, 5.2932, -0.4137]] ) else: _SCREAMING_SNAKE_CASE = torch.tensor([[-0.1792, -0.6465, 2.4263]] ) # verify logits assert torch.allclose(last_hidden_state[:, 0, :3] , SCREAMING_SNAKE_CASE_ , atol=1e-4 ) print(F"Saving model to {pytorch_dump_folder_path}" ) model.save_pretrained(SCREAMING_SNAKE_CASE_ ) print(F"Saving image processor to {pytorch_dump_folder_path}" ) image_processor.save_pretrained(SCREAMING_SNAKE_CASE_ ) if __name__ == "__main__": UpperCamelCase__ : int = argparse.ArgumentParser() # Required parameters parser.add_argument( "--checkpoint_url", default="https://dl.fbaipublicfiles.com/msn/vits16_800ep.pth.tar", type=str, help="URL of the checkpoint you'd like to convert.", ) parser.add_argument( "--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory." ) UpperCamelCase__ : List[Any] = parser.parse_args() convert_vit_msn_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path)
714
'''simple docstring''' def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE_ ) -> str: """simple docstring""" return "".join([hex(SCREAMING_SNAKE_CASE_ )[2:].zfill(2 ).upper() for byte in list(SCREAMING_SNAKE_CASE_ )] ) def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE_ ) -> bytes: """simple docstring""" # Check data validity, following RFC3548 # https://www.ietf.org/rfc/rfc3548.txt if (len(SCREAMING_SNAKE_CASE_ ) % 2) != 0: raise ValueError( """Base16 encoded data is invalid: Data does not have an even number of hex digits.""" ) # Check the character set - the standard base16 alphabet # is uppercase according to RFC3548 section 6 if not set(SCREAMING_SNAKE_CASE_ ) <= set("""0123456789ABCDEF""" ): raise ValueError( """Base16 encoded data is invalid: Data is not uppercase hex or it contains invalid characters.""" ) # For every two hexadecimal digits (= a byte), turn it into an integer. # Then, string the result together into bytes, and return it. return bytes(int(data[i] + data[i + 1] , 16 ) for i in range(0 , len(SCREAMING_SNAKE_CASE_ ) , 2 ) ) if __name__ == "__main__": import doctest doctest.testmod()
0
0
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available UpperCamelCase__ : str = { "configuration_canine": ["CANINE_PRETRAINED_CONFIG_ARCHIVE_MAP", "CanineConfig"], "tokenization_canine": ["CanineTokenizer"], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase__ : List[Any] = [ "CANINE_PRETRAINED_MODEL_ARCHIVE_LIST", "CanineForMultipleChoice", "CanineForQuestionAnswering", "CanineForSequenceClassification", "CanineForTokenClassification", "CanineLayer", "CanineModel", "CaninePreTrainedModel", "load_tf_weights_in_canine", ] if TYPE_CHECKING: from .configuration_canine import CANINE_PRETRAINED_CONFIG_ARCHIVE_MAP, CanineConfig from .tokenization_canine import CanineTokenizer try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_canine import ( CANINE_PRETRAINED_MODEL_ARCHIVE_LIST, CanineForMultipleChoice, CanineForQuestionAnswering, CanineForSequenceClassification, CanineForTokenClassification, CanineLayer, CanineModel, CaninePreTrainedModel, load_tf_weights_in_canine, ) else: import sys UpperCamelCase__ : str = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
715
'''simple docstring''' import pytest import requests from datasets.utils.file_utils import http_head from .utils import OfflineSimulationMode, RequestWouldHangIndefinitelyError, offline @pytest.mark.integration def lowerCAmelCase_ ( ) -> List[Any]: """simple docstring""" with offline(OfflineSimulationMode.CONNECTION_TIMES_OUT ): with pytest.raises(SCREAMING_SNAKE_CASE_ ): requests.request("""GET""" , """https://huggingface.co""" ) with pytest.raises(requests.exceptions.ConnectTimeout ): requests.request("""GET""" , """https://huggingface.co""" , timeout=1.0 ) @pytest.mark.integration def lowerCAmelCase_ ( ) -> int: """simple docstring""" with offline(OfflineSimulationMode.CONNECTION_FAILS ): with pytest.raises(requests.exceptions.ConnectionError ): requests.request("""GET""" , """https://huggingface.co""" ) def lowerCAmelCase_ ( ) -> Optional[Any]: """simple docstring""" with offline(OfflineSimulationMode.HF_DATASETS_OFFLINE_SET_TO_1 ): with pytest.raises(SCREAMING_SNAKE_CASE_ ): http_head("""https://huggingface.co""" )
0
0
from math import isqrt def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE_ ) -> list[int]: """simple docstring""" _SCREAMING_SNAKE_CASE = [True] * max_number for i in range(2 , isqrt(max_number - 1 ) + 1 ): if is_prime[i]: for j in range(i**2 , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ): _SCREAMING_SNAKE_CASE = False return [i for i in range(2 , SCREAMING_SNAKE_CASE_ ) if is_prime[i]] def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE_ = 10**8 ) -> int: """simple docstring""" _SCREAMING_SNAKE_CASE = calculate_prime_numbers(max_number // 2 ) _SCREAMING_SNAKE_CASE = 0 _SCREAMING_SNAKE_CASE = 0 _SCREAMING_SNAKE_CASE = len(SCREAMING_SNAKE_CASE_ ) - 1 while left <= right: while prime_numbers[left] * prime_numbers[right] >= max_number: right -= 1 semiprimes_count += right - left + 1 left += 1 return semiprimes_count if __name__ == "__main__": print(f"""{solution() = }""")
716
'''simple docstring''' import math from collections.abc import Iterator from itertools import takewhile def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE_ ) -> bool: """simple docstring""" if 1 < number < 4: # 2 and 3 are primes return True elif number < 2 or number % 2 == 0 or number % 3 == 0: # Negatives, 0, 1, all even numbers, all multiples of 3 are not primes return False # All primes number are in format of 6k +/- 1 for i in range(5 , int(math.sqrt(SCREAMING_SNAKE_CASE_ ) + 1 ) , 6 ): if number % i == 0 or number % (i + 2) == 0: return False return True def lowerCAmelCase_ ( ) -> Iterator[int]: """simple docstring""" _SCREAMING_SNAKE_CASE = 2 while True: if is_prime(SCREAMING_SNAKE_CASE_ ): yield num num += 1 def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE_ = 2_00_00_00 ) -> int: """simple docstring""" return sum(takewhile(lambda SCREAMING_SNAKE_CASE_ : x < n , prime_generator() ) ) if __name__ == "__main__": print(f"""{solution() = }""")
0
0
'''simple docstring''' import argparse import os import jax as jnp import numpy as onp import torch import torch.nn as nn from music_spectrogram_diffusion import inference from tax import checkpoints from diffusers import DDPMScheduler, OnnxRuntimeModel, SpectrogramDiffusionPipeline from diffusers.pipelines.spectrogram_diffusion import SpectrogramContEncoder, SpectrogramNotesEncoder, TaFilmDecoder UpperCamelCase__ : Optional[int] = "base_with_context" def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> List[str]: """simple docstring""" _SCREAMING_SNAKE_CASE = nn.Parameter(torch.FloatTensor(weights["""token_embedder"""]["""embedding"""] ) ) _SCREAMING_SNAKE_CASE = nn.Parameter( torch.FloatTensor(weights["""Embed_0"""]["""embedding"""] ) , requires_grad=SCREAMING_SNAKE_CASE_ ) for lyr_num, lyr in enumerate(model.encoders ): _SCREAMING_SNAKE_CASE = weights[F"layers_{lyr_num}"] _SCREAMING_SNAKE_CASE = nn.Parameter( torch.FloatTensor(ly_weight["""pre_attention_layer_norm"""]["""scale"""] ) ) _SCREAMING_SNAKE_CASE = ly_weight["""attention"""] _SCREAMING_SNAKE_CASE = nn.Parameter(torch.FloatTensor(attention_weights["""query"""]["""kernel"""].T ) ) _SCREAMING_SNAKE_CASE = nn.Parameter(torch.FloatTensor(attention_weights["""key"""]["""kernel"""].T ) ) _SCREAMING_SNAKE_CASE = nn.Parameter(torch.FloatTensor(attention_weights["""value"""]["""kernel"""].T ) ) _SCREAMING_SNAKE_CASE = nn.Parameter(torch.FloatTensor(attention_weights["""out"""]["""kernel"""].T ) ) _SCREAMING_SNAKE_CASE = nn.Parameter(torch.FloatTensor(ly_weight["""pre_mlp_layer_norm"""]["""scale"""] ) ) _SCREAMING_SNAKE_CASE = nn.Parameter(torch.FloatTensor(ly_weight["""mlp"""]["""wi_0"""]["""kernel"""].T ) ) _SCREAMING_SNAKE_CASE = nn.Parameter(torch.FloatTensor(ly_weight["""mlp"""]["""wi_1"""]["""kernel"""].T ) ) _SCREAMING_SNAKE_CASE = nn.Parameter(torch.FloatTensor(ly_weight["""mlp"""]["""wo"""]["""kernel"""].T ) ) _SCREAMING_SNAKE_CASE = nn.Parameter(torch.FloatTensor(weights["""encoder_norm"""]["""scale"""] ) ) return model def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> List[Any]: """simple docstring""" _SCREAMING_SNAKE_CASE = nn.Parameter(torch.FloatTensor(weights["""input_proj"""]["""kernel"""].T ) ) _SCREAMING_SNAKE_CASE = nn.Parameter( torch.FloatTensor(weights["""Embed_0"""]["""embedding"""] ) , requires_grad=SCREAMING_SNAKE_CASE_ ) for lyr_num, lyr in enumerate(model.encoders ): _SCREAMING_SNAKE_CASE = weights[F"layers_{lyr_num}"] _SCREAMING_SNAKE_CASE = ly_weight["""attention"""] _SCREAMING_SNAKE_CASE = nn.Parameter(torch.FloatTensor(attention_weights["""query"""]["""kernel"""].T ) ) _SCREAMING_SNAKE_CASE = nn.Parameter(torch.FloatTensor(attention_weights["""key"""]["""kernel"""].T ) ) _SCREAMING_SNAKE_CASE = nn.Parameter(torch.FloatTensor(attention_weights["""value"""]["""kernel"""].T ) ) _SCREAMING_SNAKE_CASE = nn.Parameter(torch.FloatTensor(attention_weights["""out"""]["""kernel"""].T ) ) _SCREAMING_SNAKE_CASE = nn.Parameter( torch.FloatTensor(ly_weight["""pre_attention_layer_norm"""]["""scale"""] ) ) _SCREAMING_SNAKE_CASE = nn.Parameter(torch.FloatTensor(ly_weight["""mlp"""]["""wi_0"""]["""kernel"""].T ) ) _SCREAMING_SNAKE_CASE = nn.Parameter(torch.FloatTensor(ly_weight["""mlp"""]["""wi_1"""]["""kernel"""].T ) ) _SCREAMING_SNAKE_CASE = nn.Parameter(torch.FloatTensor(ly_weight["""mlp"""]["""wo"""]["""kernel"""].T ) ) _SCREAMING_SNAKE_CASE = nn.Parameter(torch.FloatTensor(ly_weight["""pre_mlp_layer_norm"""]["""scale"""] ) ) _SCREAMING_SNAKE_CASE = nn.Parameter(torch.FloatTensor(weights["""encoder_norm"""]["""scale"""] ) ) return model def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> Optional[int]: """simple docstring""" _SCREAMING_SNAKE_CASE = nn.Parameter(torch.FloatTensor(weights["""time_emb_dense0"""]["""kernel"""].T ) ) _SCREAMING_SNAKE_CASE = nn.Parameter(torch.FloatTensor(weights["""time_emb_dense1"""]["""kernel"""].T ) ) _SCREAMING_SNAKE_CASE = nn.Parameter( torch.FloatTensor(weights["""Embed_0"""]["""embedding"""] ) , requires_grad=SCREAMING_SNAKE_CASE_ ) _SCREAMING_SNAKE_CASE = nn.Parameter( torch.FloatTensor(weights["""continuous_inputs_projection"""]["""kernel"""].T ) ) for lyr_num, lyr in enumerate(model.decoders ): _SCREAMING_SNAKE_CASE = weights[F"layers_{lyr_num}"] _SCREAMING_SNAKE_CASE = nn.Parameter( torch.FloatTensor(ly_weight["""pre_self_attention_layer_norm"""]["""scale"""] ) ) _SCREAMING_SNAKE_CASE = nn.Parameter( torch.FloatTensor(ly_weight["""FiLMLayer_0"""]["""DenseGeneral_0"""]["""kernel"""].T ) ) _SCREAMING_SNAKE_CASE = ly_weight["""self_attention"""] _SCREAMING_SNAKE_CASE = nn.Parameter(torch.FloatTensor(attention_weights["""query"""]["""kernel"""].T ) ) _SCREAMING_SNAKE_CASE = nn.Parameter(torch.FloatTensor(attention_weights["""key"""]["""kernel"""].T ) ) _SCREAMING_SNAKE_CASE = nn.Parameter(torch.FloatTensor(attention_weights["""value"""]["""kernel"""].T ) ) _SCREAMING_SNAKE_CASE = nn.Parameter(torch.FloatTensor(attention_weights["""out"""]["""kernel"""].T ) ) _SCREAMING_SNAKE_CASE = ly_weight["""MultiHeadDotProductAttention_0"""] _SCREAMING_SNAKE_CASE = nn.Parameter(torch.FloatTensor(attention_weights["""query"""]["""kernel"""].T ) ) _SCREAMING_SNAKE_CASE = nn.Parameter(torch.FloatTensor(attention_weights["""key"""]["""kernel"""].T ) ) _SCREAMING_SNAKE_CASE = nn.Parameter(torch.FloatTensor(attention_weights["""value"""]["""kernel"""].T ) ) _SCREAMING_SNAKE_CASE = nn.Parameter(torch.FloatTensor(attention_weights["""out"""]["""kernel"""].T ) ) _SCREAMING_SNAKE_CASE = nn.Parameter( torch.FloatTensor(ly_weight["""pre_cross_attention_layer_norm"""]["""scale"""] ) ) _SCREAMING_SNAKE_CASE = nn.Parameter(torch.FloatTensor(ly_weight["""pre_mlp_layer_norm"""]["""scale"""] ) ) _SCREAMING_SNAKE_CASE = nn.Parameter( torch.FloatTensor(ly_weight["""FiLMLayer_1"""]["""DenseGeneral_0"""]["""kernel"""].T ) ) _SCREAMING_SNAKE_CASE = nn.Parameter(torch.FloatTensor(ly_weight["""mlp"""]["""wi_0"""]["""kernel"""].T ) ) _SCREAMING_SNAKE_CASE = nn.Parameter(torch.FloatTensor(ly_weight["""mlp"""]["""wi_1"""]["""kernel"""].T ) ) _SCREAMING_SNAKE_CASE = nn.Parameter(torch.FloatTensor(ly_weight["""mlp"""]["""wo"""]["""kernel"""].T ) ) _SCREAMING_SNAKE_CASE = nn.Parameter(torch.FloatTensor(weights["""decoder_norm"""]["""scale"""] ) ) _SCREAMING_SNAKE_CASE = nn.Parameter(torch.FloatTensor(weights["""spec_out_dense"""]["""kernel"""].T ) ) return model def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE_ ) -> Union[str, Any]: """simple docstring""" _SCREAMING_SNAKE_CASE = checkpoints.load_tax_checkpoint(args.checkpoint_path ) _SCREAMING_SNAKE_CASE = jnp.tree_util.tree_map(onp.array , SCREAMING_SNAKE_CASE_ ) _SCREAMING_SNAKE_CASE = [ """from __gin__ import dynamic_registration""", """from music_spectrogram_diffusion.models.diffusion import diffusion_utils""", """diffusion_utils.ClassifierFreeGuidanceConfig.eval_condition_weight = 2.0""", """diffusion_utils.DiffusionConfig.classifier_free_guidance = @diffusion_utils.ClassifierFreeGuidanceConfig()""", ] _SCREAMING_SNAKE_CASE = os.path.join(args.checkpoint_path , """..""" , """config.gin""" ) _SCREAMING_SNAKE_CASE = inference.parse_training_gin_file(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) _SCREAMING_SNAKE_CASE = inference.InferenceModel(args.checkpoint_path , SCREAMING_SNAKE_CASE_ ) _SCREAMING_SNAKE_CASE = DDPMScheduler(beta_schedule="""squaredcos_cap_v2""" , variance_type="""fixed_large""" ) _SCREAMING_SNAKE_CASE = SpectrogramNotesEncoder( max_length=synth_model.sequence_length["""inputs"""] , vocab_size=synth_model.model.module.config.vocab_size , d_model=synth_model.model.module.config.emb_dim , dropout_rate=synth_model.model.module.config.dropout_rate , num_layers=synth_model.model.module.config.num_encoder_layers , num_heads=synth_model.model.module.config.num_heads , d_kv=synth_model.model.module.config.head_dim , d_ff=synth_model.model.module.config.mlp_dim , feed_forward_proj="""gated-gelu""" , ) _SCREAMING_SNAKE_CASE = SpectrogramContEncoder( input_dims=synth_model.audio_codec.n_dims , targets_context_length=synth_model.sequence_length["""targets_context"""] , d_model=synth_model.model.module.config.emb_dim , dropout_rate=synth_model.model.module.config.dropout_rate , num_layers=synth_model.model.module.config.num_encoder_layers , num_heads=synth_model.model.module.config.num_heads , d_kv=synth_model.model.module.config.head_dim , d_ff=synth_model.model.module.config.mlp_dim , feed_forward_proj="""gated-gelu""" , ) _SCREAMING_SNAKE_CASE = TaFilmDecoder( input_dims=synth_model.audio_codec.n_dims , targets_length=synth_model.sequence_length["""targets_context"""] , max_decoder_noise_time=synth_model.model.module.config.max_decoder_noise_time , d_model=synth_model.model.module.config.emb_dim , num_layers=synth_model.model.module.config.num_decoder_layers , num_heads=synth_model.model.module.config.num_heads , d_kv=synth_model.model.module.config.head_dim , d_ff=synth_model.model.module.config.mlp_dim , dropout_rate=synth_model.model.module.config.dropout_rate , ) _SCREAMING_SNAKE_CASE = load_notes_encoder(ta_checkpoint["""target"""]["""token_encoder"""] , SCREAMING_SNAKE_CASE_ ) _SCREAMING_SNAKE_CASE = load_continuous_encoder(ta_checkpoint["""target"""]["""continuous_encoder"""] , SCREAMING_SNAKE_CASE_ ) _SCREAMING_SNAKE_CASE = load_decoder(ta_checkpoint["""target"""]["""decoder"""] , SCREAMING_SNAKE_CASE_ ) _SCREAMING_SNAKE_CASE = OnnxRuntimeModel.from_pretrained("""kashif/soundstream_mel_decoder""" ) _SCREAMING_SNAKE_CASE = SpectrogramDiffusionPipeline( notes_encoder=SCREAMING_SNAKE_CASE_ , continuous_encoder=SCREAMING_SNAKE_CASE_ , decoder=SCREAMING_SNAKE_CASE_ , scheduler=SCREAMING_SNAKE_CASE_ , melgan=SCREAMING_SNAKE_CASE_ , ) if args.save: pipe.save_pretrained(args.output_path ) if __name__ == "__main__": UpperCamelCase__ : List[Any] = argparse.ArgumentParser() parser.add_argument("--output_path", default=None, type=str, required=True, help="Path to the converted model.") parser.add_argument( "--save", default=True, type=bool, required=False, help="Whether to save the converted model or not." ) parser.add_argument( "--checkpoint_path", default=f"""{MODEL}/checkpoint_500000""", type=str, required=False, help="Path to the original jax model checkpoint.", ) UpperCamelCase__ : List[Any] = parser.parse_args() main(args)
717
'''simple docstring''' import unittest import numpy as np from transformers.testing_utils import require_flax, require_tf, require_torch from transformers.utils import ( expand_dims, flatten_dict, is_flax_available, is_tf_available, is_torch_available, reshape, squeeze, transpose, ) if is_flax_available(): import jax.numpy as jnp if is_tf_available(): import tensorflow as tf if is_torch_available(): import torch class _a (unittest.TestCase): """simple docstring""" def UpperCamelCase ( self ) -> Optional[Any]: _SCREAMING_SNAKE_CASE = { """task_specific_params""": { """summarization""": {"""length_penalty""": 1.0, """max_length""": 1_28, """min_length""": 12, """num_beams""": 4}, """summarization_cnn""": {"""length_penalty""": 2.0, """max_length""": 1_42, """min_length""": 56, """num_beams""": 4}, """summarization_xsum""": {"""length_penalty""": 1.0, """max_length""": 62, """min_length""": 11, """num_beams""": 6}, } } _SCREAMING_SNAKE_CASE = { """task_specific_params.summarization.length_penalty""": 1.0, """task_specific_params.summarization.max_length""": 1_28, """task_specific_params.summarization.min_length""": 12, """task_specific_params.summarization.num_beams""": 4, """task_specific_params.summarization_cnn.length_penalty""": 2.0, """task_specific_params.summarization_cnn.max_length""": 1_42, """task_specific_params.summarization_cnn.min_length""": 56, """task_specific_params.summarization_cnn.num_beams""": 4, """task_specific_params.summarization_xsum.length_penalty""": 1.0, """task_specific_params.summarization_xsum.max_length""": 62, """task_specific_params.summarization_xsum.min_length""": 11, """task_specific_params.summarization_xsum.num_beams""": 6, } self.assertEqual(flatten_dict(A__ ) , A__ ) def UpperCamelCase ( self ) -> int: _SCREAMING_SNAKE_CASE = np.random.randn(3 , 4 ) self.assertTrue(np.allclose(transpose(A__ ) , x.transpose() ) ) _SCREAMING_SNAKE_CASE = np.random.randn(3 , 4 , 5 ) self.assertTrue(np.allclose(transpose(A__ , axes=(1, 2, 0) ) , x.transpose((1, 2, 0) ) ) ) @require_torch def UpperCamelCase ( self ) -> Optional[Any]: _SCREAMING_SNAKE_CASE = np.random.randn(3 , 4 ) _SCREAMING_SNAKE_CASE = torch.tensor(A__ ) self.assertTrue(np.allclose(transpose(A__ ) , transpose(A__ ).numpy() ) ) _SCREAMING_SNAKE_CASE = np.random.randn(3 , 4 , 5 ) _SCREAMING_SNAKE_CASE = torch.tensor(A__ ) self.assertTrue(np.allclose(transpose(A__ , axes=(1, 2, 0) ) , transpose(A__ , axes=(1, 2, 0) ).numpy() ) ) @require_tf def UpperCamelCase ( self ) -> Optional[int]: _SCREAMING_SNAKE_CASE = np.random.randn(3 , 4 ) _SCREAMING_SNAKE_CASE = tf.constant(A__ ) self.assertTrue(np.allclose(transpose(A__ ) , transpose(A__ ).numpy() ) ) _SCREAMING_SNAKE_CASE = np.random.randn(3 , 4 , 5 ) _SCREAMING_SNAKE_CASE = tf.constant(A__ ) self.assertTrue(np.allclose(transpose(A__ , axes=(1, 2, 0) ) , transpose(A__ , axes=(1, 2, 0) ).numpy() ) ) @require_flax def UpperCamelCase ( self ) -> List[str]: _SCREAMING_SNAKE_CASE = np.random.randn(3 , 4 ) _SCREAMING_SNAKE_CASE = jnp.array(A__ ) self.assertTrue(np.allclose(transpose(A__ ) , np.asarray(transpose(A__ ) ) ) ) _SCREAMING_SNAKE_CASE = np.random.randn(3 , 4 , 5 ) _SCREAMING_SNAKE_CASE = jnp.array(A__ ) self.assertTrue(np.allclose(transpose(A__ , axes=(1, 2, 0) ) , np.asarray(transpose(A__ , axes=(1, 2, 0) ) ) ) ) def UpperCamelCase ( self ) -> Optional[int]: _SCREAMING_SNAKE_CASE = np.random.randn(3 , 4 ) self.assertTrue(np.allclose(reshape(A__ , (4, 3) ) , np.reshape(A__ , (4, 3) ) ) ) _SCREAMING_SNAKE_CASE = np.random.randn(3 , 4 , 5 ) self.assertTrue(np.allclose(reshape(A__ , (12, 5) ) , np.reshape(A__ , (12, 5) ) ) ) @require_torch def UpperCamelCase ( self ) -> Optional[int]: _SCREAMING_SNAKE_CASE = np.random.randn(3 , 4 ) _SCREAMING_SNAKE_CASE = torch.tensor(A__ ) self.assertTrue(np.allclose(reshape(A__ , (4, 3) ) , reshape(A__ , (4, 3) ).numpy() ) ) _SCREAMING_SNAKE_CASE = np.random.randn(3 , 4 , 5 ) _SCREAMING_SNAKE_CASE = torch.tensor(A__ ) self.assertTrue(np.allclose(reshape(A__ , (12, 5) ) , reshape(A__ , (12, 5) ).numpy() ) ) @require_tf def UpperCamelCase ( self ) -> Tuple: _SCREAMING_SNAKE_CASE = np.random.randn(3 , 4 ) _SCREAMING_SNAKE_CASE = tf.constant(A__ ) self.assertTrue(np.allclose(reshape(A__ , (4, 3) ) , reshape(A__ , (4, 3) ).numpy() ) ) _SCREAMING_SNAKE_CASE = np.random.randn(3 , 4 , 5 ) _SCREAMING_SNAKE_CASE = tf.constant(A__ ) self.assertTrue(np.allclose(reshape(A__ , (12, 5) ) , reshape(A__ , (12, 5) ).numpy() ) ) @require_flax def UpperCamelCase ( self ) -> List[Any]: _SCREAMING_SNAKE_CASE = np.random.randn(3 , 4 ) _SCREAMING_SNAKE_CASE = jnp.array(A__ ) self.assertTrue(np.allclose(reshape(A__ , (4, 3) ) , np.asarray(reshape(A__ , (4, 3) ) ) ) ) _SCREAMING_SNAKE_CASE = np.random.randn(3 , 4 , 5 ) _SCREAMING_SNAKE_CASE = jnp.array(A__ ) self.assertTrue(np.allclose(reshape(A__ , (12, 5) ) , np.asarray(reshape(A__ , (12, 5) ) ) ) ) def UpperCamelCase ( self ) -> Any: _SCREAMING_SNAKE_CASE = np.random.randn(1 , 3 , 4 ) self.assertTrue(np.allclose(squeeze(A__ ) , np.squeeze(A__ ) ) ) _SCREAMING_SNAKE_CASE = np.random.randn(1 , 4 , 1 , 5 ) self.assertTrue(np.allclose(squeeze(A__ , axis=2 ) , np.squeeze(A__ , axis=2 ) ) ) @require_torch def UpperCamelCase ( self ) -> Any: _SCREAMING_SNAKE_CASE = np.random.randn(1 , 3 , 4 ) _SCREAMING_SNAKE_CASE = torch.tensor(A__ ) self.assertTrue(np.allclose(squeeze(A__ ) , squeeze(A__ ).numpy() ) ) _SCREAMING_SNAKE_CASE = np.random.randn(1 , 4 , 1 , 5 ) _SCREAMING_SNAKE_CASE = torch.tensor(A__ ) self.assertTrue(np.allclose(squeeze(A__ , axis=2 ) , squeeze(A__ , axis=2 ).numpy() ) ) @require_tf def UpperCamelCase ( self ) -> List[str]: _SCREAMING_SNAKE_CASE = np.random.randn(1 , 3 , 4 ) _SCREAMING_SNAKE_CASE = tf.constant(A__ ) self.assertTrue(np.allclose(squeeze(A__ ) , squeeze(A__ ).numpy() ) ) _SCREAMING_SNAKE_CASE = np.random.randn(1 , 4 , 1 , 5 ) _SCREAMING_SNAKE_CASE = tf.constant(A__ ) self.assertTrue(np.allclose(squeeze(A__ , axis=2 ) , squeeze(A__ , axis=2 ).numpy() ) ) @require_flax def UpperCamelCase ( self ) -> Optional[int]: _SCREAMING_SNAKE_CASE = np.random.randn(1 , 3 , 4 ) _SCREAMING_SNAKE_CASE = jnp.array(A__ ) self.assertTrue(np.allclose(squeeze(A__ ) , np.asarray(squeeze(A__ ) ) ) ) _SCREAMING_SNAKE_CASE = np.random.randn(1 , 4 , 1 , 5 ) _SCREAMING_SNAKE_CASE = jnp.array(A__ ) self.assertTrue(np.allclose(squeeze(A__ , axis=2 ) , np.asarray(squeeze(A__ , axis=2 ) ) ) ) def UpperCamelCase ( self ) -> Optional[Any]: _SCREAMING_SNAKE_CASE = np.random.randn(3 , 4 ) self.assertTrue(np.allclose(expand_dims(A__ , axis=1 ) , np.expand_dims(A__ , axis=1 ) ) ) @require_torch def UpperCamelCase ( self ) -> Optional[int]: _SCREAMING_SNAKE_CASE = np.random.randn(3 , 4 ) _SCREAMING_SNAKE_CASE = torch.tensor(A__ ) self.assertTrue(np.allclose(expand_dims(A__ , axis=1 ) , expand_dims(A__ , axis=1 ).numpy() ) ) @require_tf def UpperCamelCase ( self ) -> str: _SCREAMING_SNAKE_CASE = np.random.randn(3 , 4 ) _SCREAMING_SNAKE_CASE = tf.constant(A__ ) self.assertTrue(np.allclose(expand_dims(A__ , axis=1 ) , expand_dims(A__ , axis=1 ).numpy() ) ) @require_flax def UpperCamelCase ( self ) -> Any: _SCREAMING_SNAKE_CASE = np.random.randn(3 , 4 ) _SCREAMING_SNAKE_CASE = jnp.array(A__ ) self.assertTrue(np.allclose(expand_dims(A__ , axis=1 ) , np.asarray(expand_dims(A__ , axis=1 ) ) ) )
0
0
'''simple docstring''' import unittest from transformers import ( MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING, TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING, TextaTextGenerationPipeline, pipeline, ) from transformers.testing_utils import is_pipeline_test, require_tf, require_torch from transformers.utils import is_torch_available from .test_pipelines_common import ANY if is_torch_available(): import torch @is_pipeline_test class _a (unittest.TestCase): """simple docstring""" SCREAMING_SNAKE_CASE = MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING SCREAMING_SNAKE_CASE = TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING def UpperCamelCase ( self , A__ , A__ , A__ ) -> Optional[Any]: _SCREAMING_SNAKE_CASE = TextaTextGenerationPipeline(model=A__ , tokenizer=A__ ) return generator, ["Something to write", "Something else"] def UpperCamelCase ( self , A__ , A__ ) -> Optional[int]: _SCREAMING_SNAKE_CASE = generator("""Something there""" ) self.assertEqual(A__ , [{"""generated_text""": ANY(A__ )}] ) # These are encoder decoder, they don't just append to incoming string self.assertFalse(outputs[0]["""generated_text"""].startswith("""Something there""" ) ) _SCREAMING_SNAKE_CASE = generator(["""This is great !""", """Something else"""] , num_return_sequences=2 , do_sample=A__ ) self.assertEqual( A__ , [ [{"""generated_text""": ANY(A__ )}, {"""generated_text""": ANY(A__ )}], [{"""generated_text""": ANY(A__ )}, {"""generated_text""": ANY(A__ )}], ] , ) _SCREAMING_SNAKE_CASE = generator( ["""This is great !""", """Something else"""] , num_return_sequences=2 , batch_size=2 , do_sample=A__ ) self.assertEqual( A__ , [ [{"""generated_text""": ANY(A__ )}, {"""generated_text""": ANY(A__ )}], [{"""generated_text""": ANY(A__ )}, {"""generated_text""": ANY(A__ )}], ] , ) with self.assertRaises(A__ ): generator(4 ) @require_torch def UpperCamelCase ( self ) -> Optional[int]: _SCREAMING_SNAKE_CASE = pipeline("""text2text-generation""" , model="""patrickvonplaten/t5-tiny-random""" , framework="""pt""" ) # do_sample=False necessary for reproducibility _SCREAMING_SNAKE_CASE = generator("""Something there""" , do_sample=A__ ) self.assertEqual(A__ , [{"""generated_text""": """"""}] ) _SCREAMING_SNAKE_CASE = 3 _SCREAMING_SNAKE_CASE = generator( """Something there""" , num_return_sequences=A__ , num_beams=A__ , ) _SCREAMING_SNAKE_CASE = [ {"""generated_text""": """Beide Beide Beide Beide Beide Beide Beide Beide Beide"""}, {"""generated_text""": """Beide Beide Beide Beide Beide Beide Beide Beide"""}, {"""generated_text""": """"""}, ] self.assertEqual(A__ , A__ ) _SCREAMING_SNAKE_CASE = generator("""This is a test""" , do_sample=A__ , num_return_sequences=2 , return_tensors=A__ ) self.assertEqual( A__ , [ {"""generated_token_ids""": ANY(torch.Tensor )}, {"""generated_token_ids""": ANY(torch.Tensor )}, ] , ) _SCREAMING_SNAKE_CASE = generator.model.config.eos_token_id _SCREAMING_SNAKE_CASE = """<pad>""" _SCREAMING_SNAKE_CASE = generator( ["""This is a test""", """This is a second test"""] , do_sample=A__ , num_return_sequences=2 , batch_size=2 , return_tensors=A__ , ) self.assertEqual( A__ , [ [ {"""generated_token_ids""": ANY(torch.Tensor )}, {"""generated_token_ids""": ANY(torch.Tensor )}, ], [ {"""generated_token_ids""": ANY(torch.Tensor )}, {"""generated_token_ids""": ANY(torch.Tensor )}, ], ] , ) @require_tf def UpperCamelCase ( self ) -> str: _SCREAMING_SNAKE_CASE = pipeline("""text2text-generation""" , model="""patrickvonplaten/t5-tiny-random""" , framework="""tf""" ) # do_sample=False necessary for reproducibility _SCREAMING_SNAKE_CASE = generator("""Something there""" , do_sample=A__ ) self.assertEqual(A__ , [{"""generated_text""": """"""}] )
718
'''simple docstring''' from pathlib import PurePosixPath from typing import Optional import fsspec from fsspec import AbstractFileSystem from huggingface_hub.hf_api import DatasetInfo from ..utils.file_utils import get_authentication_headers_for_url from ..utils.hub import hf_hub_url class _a (_lowerCamelCase): """simple docstring""" SCREAMING_SNAKE_CASE = '' SCREAMING_SNAKE_CASE = 'hf-legacy' # "hf://"" is reserved for hffs def __init__( self , A__ = None , A__ = None , **A__ , ) -> Optional[int]: super().__init__(self , **A__ ) _SCREAMING_SNAKE_CASE = repo_info _SCREAMING_SNAKE_CASE = token _SCREAMING_SNAKE_CASE = None def UpperCamelCase ( self ) -> Tuple: if self.dir_cache is None: _SCREAMING_SNAKE_CASE = {} for hf_file in self.repo_info.siblings: # TODO(QL): add sizes _SCREAMING_SNAKE_CASE = { """name""": hf_file.rfilename, """size""": None, """type""": """file""", } self.dir_cache.update( { str(A__ ): {"""name""": str(A__ ), """size""": None, """type""": """directory"""} for d in list(PurePosixPath(hf_file.rfilename ).parents )[:-1] } ) def UpperCamelCase ( self , A__ , A__ = "rb" , **A__ , ) -> Optional[int]: if not isinstance(self.repo_info , A__ ): raise NotImplementedError(F"Open is only implemented for dataset repositories, but got {self.repo_info}" ) _SCREAMING_SNAKE_CASE = hf_hub_url(self.repo_info.id , A__ , revision=self.repo_info.sha ) return fsspec.open( A__ , mode=A__ , headers=get_authentication_headers_for_url(A__ , use_auth_token=self.token ) , client_kwargs={"""trust_env""": True} , ).open() def UpperCamelCase ( self , A__ , **A__ ) -> str: self._get_dirs() _SCREAMING_SNAKE_CASE = self._strip_protocol(A__ ) if path in self.dir_cache: return self.dir_cache[path] else: raise FileNotFoundError(A__ ) def UpperCamelCase ( self , A__ , A__=False , **A__ ) -> List[Any]: self._get_dirs() _SCREAMING_SNAKE_CASE = PurePosixPath(path.strip("""/""" ) ) _SCREAMING_SNAKE_CASE = {} for p, f in self.dir_cache.items(): _SCREAMING_SNAKE_CASE = PurePosixPath(p.strip("""/""" ) ) _SCREAMING_SNAKE_CASE = p.parent if root == path: _SCREAMING_SNAKE_CASE = f _SCREAMING_SNAKE_CASE = list(paths.values() ) if detail: return out else: return sorted(f["""name"""] for f in out )
0
0
import argparse import math import os from copy import deepcopy import torch from audio_diffusion.models import DiffusionAttnUnetaD from diffusion import sampling from torch import nn from diffusers import DanceDiffusionPipeline, IPNDMScheduler, UNetaDModel UpperCamelCase__ : Any = { "gwf-440k": { "url": "https://model-server.zqevans2.workers.dev/gwf-440k.ckpt", "sample_rate": 48_000, "sample_size": 65_536, }, "jmann-small-190k": { "url": "https://model-server.zqevans2.workers.dev/jmann-small-190k.ckpt", "sample_rate": 48_000, "sample_size": 65_536, }, "jmann-large-580k": { "url": "https://model-server.zqevans2.workers.dev/jmann-large-580k.ckpt", "sample_rate": 48_000, "sample_size": 131_072, }, "maestro-uncond-150k": { "url": "https://model-server.zqevans2.workers.dev/maestro-uncond-150k.ckpt", "sample_rate": 16_000, "sample_size": 65_536, }, "unlocked-uncond-250k": { "url": "https://model-server.zqevans2.workers.dev/unlocked-uncond-250k.ckpt", "sample_rate": 16_000, "sample_size": 65_536, }, "honk-140k": { "url": "https://model-server.zqevans2.workers.dev/honk-140k.ckpt", "sample_rate": 16_000, "sample_size": 65_536, }, } def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> List[str]: """simple docstring""" return torch.atana(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) / math.pi * 2 def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE_ ) -> List[Any]: """simple docstring""" _SCREAMING_SNAKE_CASE = torch.sin(t * math.pi / 2 ) ** 2 _SCREAMING_SNAKE_CASE = (1 - sigma**2) ** 0.5 return alpha_sigma_to_t(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) class _a (_lowerCamelCase): """simple docstring""" pass class _a (nn.Module): """simple docstring""" def __init__( self , A__ ) -> Dict: super().__init__() _SCREAMING_SNAKE_CASE = DiffusionAttnUnetaD(A__ , n_attn_layers=4 ) _SCREAMING_SNAKE_CASE = deepcopy(self.diffusion ) _SCREAMING_SNAKE_CASE = torch.quasirandom.SobolEngine(1 , scramble=A__ ) def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE_ ) -> str: """simple docstring""" _SCREAMING_SNAKE_CASE = MODELS_MAP[model_name]["""url"""] os.system(F"wget {url} ./" ) return F"./{model_name}.ckpt" UpperCamelCase__ : Optional[int] = { "1": "resnets.0", "2": "attentions.0", "3": "resnets.1", "4": "attentions.1", "5": "resnets.2", "6": "attentions.2", } UpperCamelCase__ : List[str] = { "8": "resnets.0", "9": "attentions.0", "10": "resnets.1", "11": "attentions.1", "12": "resnets.2", "13": "attentions.2", } UpperCamelCase__ : str = { "1": "resnets.0", "2": "attentions.0", "3": "resnets.1", "4": "attentions.1", "5": "resnets.2", "6": "attentions.2", "8": "resnets.3", "9": "attentions.3", "10": "resnets.4", "11": "attentions.4", "12": "resnets.5", "13": "attentions.5", } UpperCamelCase__ : Union[str, Any] = { "0": "resnets.0", "1": "resnets.1", "2": "resnets.2", "4": "resnets.0", "5": "resnets.1", "6": "resnets.2", } UpperCamelCase__ : Optional[int] = { "skip": "conv_skip", "main.0": "conv_1", "main.1": "group_norm_1", "main.3": "conv_2", "main.4": "group_norm_2", } UpperCamelCase__ : Any = { "norm": "group_norm", "qkv_proj": ["query", "key", "value"], "out_proj": ["proj_attn"], } def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE_ ) -> Dict: """simple docstring""" if name.startswith("""skip""" ): return name.replace("""skip""" , RES_CONV_MAP["""skip"""] ) # name has to be of format main.{digit} if not name.startswith("""main.""" ): raise ValueError(F"ResConvBlock error with {name}" ) return name.replace(name[:6] , RES_CONV_MAP[name[:6]] ) def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE_ ) -> Any: """simple docstring""" for key, value in ATTN_MAP.items(): if name.startswith(SCREAMING_SNAKE_CASE_ ) and not isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ): return name.replace(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) elif name.startswith(SCREAMING_SNAKE_CASE_ ): return [name.replace(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) for v in value] raise ValueError(F"Attn error with {name}" ) def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=13 ) -> Any: """simple docstring""" _SCREAMING_SNAKE_CASE = input_string if string.split(""".""" )[0] == "timestep_embed": return string.replace("""timestep_embed""" , """time_proj""" ) _SCREAMING_SNAKE_CASE = 0 if string.startswith("""net.3.""" ): depth += 1 _SCREAMING_SNAKE_CASE = string[6:] elif string.startswith("""net.""" ): _SCREAMING_SNAKE_CASE = string[4:] while string.startswith("""main.7.""" ): depth += 1 _SCREAMING_SNAKE_CASE = string[7:] if string.startswith("""main.""" ): _SCREAMING_SNAKE_CASE = string[5:] # mid block if string[:2].isdigit(): _SCREAMING_SNAKE_CASE = string[:2] _SCREAMING_SNAKE_CASE = string[2:] else: _SCREAMING_SNAKE_CASE = string[0] _SCREAMING_SNAKE_CASE = string[1:] if depth == max_depth: _SCREAMING_SNAKE_CASE = MID_NUM_TO_LAYER[layer_num] _SCREAMING_SNAKE_CASE = """mid_block""" elif depth > 0 and int(SCREAMING_SNAKE_CASE_ ) < 7: _SCREAMING_SNAKE_CASE = DOWN_NUM_TO_LAYER[layer_num] _SCREAMING_SNAKE_CASE = F"down_blocks.{depth}" elif depth > 0 and int(SCREAMING_SNAKE_CASE_ ) > 7: _SCREAMING_SNAKE_CASE = UP_NUM_TO_LAYER[layer_num] _SCREAMING_SNAKE_CASE = F"up_blocks.{max_depth - depth - 1}" elif depth == 0: _SCREAMING_SNAKE_CASE = DEPTH_0_TO_LAYER[layer_num] _SCREAMING_SNAKE_CASE = F"up_blocks.{max_depth - 1}" if int(SCREAMING_SNAKE_CASE_ ) > 3 else """down_blocks.0""" if not string_left.startswith(""".""" ): raise ValueError(F"Naming error with {input_string} and string_left: {string_left}." ) _SCREAMING_SNAKE_CASE = string_left[1:] if "resnets" in new_layer: _SCREAMING_SNAKE_CASE = convert_resconv_naming(SCREAMING_SNAKE_CASE_ ) elif "attentions" in new_layer: _SCREAMING_SNAKE_CASE = convert_attn_naming(SCREAMING_SNAKE_CASE_ ) _SCREAMING_SNAKE_CASE = new_string_left if not isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ): _SCREAMING_SNAKE_CASE = prefix + """.""" + new_layer + """.""" + string_left else: _SCREAMING_SNAKE_CASE = [prefix + """.""" + new_layer + """.""" + s for s in string_left] return new_string def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE_ ) -> Optional[int]: """simple docstring""" _SCREAMING_SNAKE_CASE = {} for k, v in state_dict.items(): if k.endswith("""kernel""" ): # up- and downsample layers, don't have trainable weights continue _SCREAMING_SNAKE_CASE = rename(SCREAMING_SNAKE_CASE_ ) # check if we need to transform from Conv => Linear for attention if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ): _SCREAMING_SNAKE_CASE = transform_conv_attns(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) else: _SCREAMING_SNAKE_CASE = v return new_state_dict def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> Union[str, Any]: """simple docstring""" if len(SCREAMING_SNAKE_CASE_ ) == 1: if len(v.shape ) == 3: # weight _SCREAMING_SNAKE_CASE = v[:, :, 0] else: # bias _SCREAMING_SNAKE_CASE = v else: # qkv matrices _SCREAMING_SNAKE_CASE = v.shape[0] _SCREAMING_SNAKE_CASE = trippled_shape // 3 for i in range(3 ): if len(v.shape ) == 3: _SCREAMING_SNAKE_CASE = v[i * single_shape : (i + 1) * single_shape, :, 0] else: _SCREAMING_SNAKE_CASE = v[i * single_shape : (i + 1) * single_shape] return new_state_dict def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE_ ) -> List[Any]: """simple docstring""" _SCREAMING_SNAKE_CASE = torch.device("""cuda""" if torch.cuda.is_available() else """cpu""" ) _SCREAMING_SNAKE_CASE = args.model_path.split("""/""" )[-1].split(""".""" )[0] if not os.path.isfile(args.model_path ): assert ( model_name == args.model_path ), F"Make sure to provide one of the official model names {MODELS_MAP.keys()}" _SCREAMING_SNAKE_CASE = download(SCREAMING_SNAKE_CASE_ ) _SCREAMING_SNAKE_CASE = MODELS_MAP[model_name]["""sample_rate"""] _SCREAMING_SNAKE_CASE = MODELS_MAP[model_name]["""sample_size"""] _SCREAMING_SNAKE_CASE = Object() _SCREAMING_SNAKE_CASE = sample_size _SCREAMING_SNAKE_CASE = sample_rate _SCREAMING_SNAKE_CASE = 0 _SCREAMING_SNAKE_CASE = UNetaDModel(sample_size=SCREAMING_SNAKE_CASE_ , sample_rate=SCREAMING_SNAKE_CASE_ ) _SCREAMING_SNAKE_CASE = diffusers_model.state_dict() _SCREAMING_SNAKE_CASE = DiffusionUncond(SCREAMING_SNAKE_CASE_ ) orig_model.load_state_dict(torch.load(args.model_path , map_location=SCREAMING_SNAKE_CASE_ )["""state_dict"""] ) _SCREAMING_SNAKE_CASE = orig_model.diffusion_ema.eval() _SCREAMING_SNAKE_CASE = orig_model.state_dict() _SCREAMING_SNAKE_CASE = rename_orig_weights(SCREAMING_SNAKE_CASE_ ) _SCREAMING_SNAKE_CASE = set(renamed_state_dict.keys() ) - set(diffusers_state_dict.keys() ) _SCREAMING_SNAKE_CASE = set(diffusers_state_dict.keys() ) - set(renamed_state_dict.keys() ) assert len(SCREAMING_SNAKE_CASE_ ) == 0, F"Problem with {renamed_minus_diffusers}" assert all(k.endswith("""kernel""" ) for k in list(SCREAMING_SNAKE_CASE_ ) ), F"Problem with {diffusers_minus_renamed}" for key, value in renamed_state_dict.items(): assert ( diffusers_state_dict[key].squeeze().shape == value.squeeze().shape ), F"Shape for {key} doesn't match. Diffusers: {diffusers_state_dict[key].shape} vs. {value.shape}" if key == "time_proj.weight": _SCREAMING_SNAKE_CASE = value.squeeze() _SCREAMING_SNAKE_CASE = value diffusers_model.load_state_dict(SCREAMING_SNAKE_CASE_ ) _SCREAMING_SNAKE_CASE = 1_00 _SCREAMING_SNAKE_CASE = 33 _SCREAMING_SNAKE_CASE = IPNDMScheduler(num_train_timesteps=SCREAMING_SNAKE_CASE_ ) _SCREAMING_SNAKE_CASE = torch.manual_seed(SCREAMING_SNAKE_CASE_ ) _SCREAMING_SNAKE_CASE = torch.randn([1, 2, config.sample_size] , generator=SCREAMING_SNAKE_CASE_ ).to(SCREAMING_SNAKE_CASE_ ) _SCREAMING_SNAKE_CASE = torch.linspace(1 , 0 , steps + 1 , device=SCREAMING_SNAKE_CASE_ )[:-1] _SCREAMING_SNAKE_CASE = get_crash_schedule(SCREAMING_SNAKE_CASE_ ) _SCREAMING_SNAKE_CASE = DanceDiffusionPipeline(unet=SCREAMING_SNAKE_CASE_ , scheduler=SCREAMING_SNAKE_CASE_ ) _SCREAMING_SNAKE_CASE = torch.manual_seed(33 ) _SCREAMING_SNAKE_CASE = pipe(num_inference_steps=SCREAMING_SNAKE_CASE_ , generator=SCREAMING_SNAKE_CASE_ ).audios _SCREAMING_SNAKE_CASE = sampling.iplms_sample(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , {} ) _SCREAMING_SNAKE_CASE = generated.clamp(-1 , 1 ) _SCREAMING_SNAKE_CASE = (generated - audio).abs().sum() _SCREAMING_SNAKE_CASE = (generated - audio).abs().max() if args.save: pipe.save_pretrained(args.checkpoint_path ) print("""Diff sum""" , SCREAMING_SNAKE_CASE_ ) print("""Diff max""" , SCREAMING_SNAKE_CASE_ ) assert diff_max < 1e-3, F"Diff max: {diff_max} is too much :-/" print(F"Conversion for {model_name} successful!" ) if __name__ == "__main__": UpperCamelCase__ : Union[str, Any] = argparse.ArgumentParser() parser.add_argument("--model_path", default=None, type=str, required=True, help="Path to the model to convert.") parser.add_argument( "--save", default=True, type=bool, required=False, help="Whether to save the converted model or not." ) parser.add_argument("--checkpoint_path", default=None, type=str, required=True, help="Path to the output model.") UpperCamelCase__ : int = parser.parse_args() main(args)
719
'''simple docstring''' import pyarrow.parquet as pq import pytest from datasets import Audio, Dataset, DatasetDict, Features, NamedSplit, Sequence, Value, config from datasets.features.image import Image from datasets.io.parquet import ParquetDatasetReader, ParquetDatasetWriter, get_writer_batch_size from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> Optional[int]: """simple docstring""" assert isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) assert dataset.num_rows == 4 assert dataset.num_columns == 3 assert dataset.column_names == ["col_1", "col_2", "col_3"] for feature, expected_dtype in expected_features.items(): assert dataset.features[feature].dtype == expected_dtype @pytest.mark.parametrize("""keep_in_memory""" , [False, True] ) def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> List[Any]: """simple docstring""" _SCREAMING_SNAKE_CASE = tmp_path / """cache""" _SCREAMING_SNAKE_CASE = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""} with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase(): _SCREAMING_SNAKE_CASE = ParquetDatasetReader(SCREAMING_SNAKE_CASE_ , cache_dir=SCREAMING_SNAKE_CASE_ , keep_in_memory=SCREAMING_SNAKE_CASE_ ).read() _check_parquet_dataset(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) @pytest.mark.parametrize( """features""" , [ None, {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""}, {"""col_1""": """string""", """col_2""": """string""", """col_3""": """string"""}, {"""col_1""": """int32""", """col_2""": """int32""", """col_3""": """int32"""}, {"""col_1""": """float32""", """col_2""": """float32""", """col_3""": """float32"""}, ] , ) def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> Optional[int]: """simple docstring""" _SCREAMING_SNAKE_CASE = tmp_path / """cache""" _SCREAMING_SNAKE_CASE = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""} _SCREAMING_SNAKE_CASE = features.copy() if features else default_expected_features _SCREAMING_SNAKE_CASE = ( Features({feature: Value(SCREAMING_SNAKE_CASE_ ) for feature, dtype in features.items()} ) if features is not None else None ) _SCREAMING_SNAKE_CASE = ParquetDatasetReader(SCREAMING_SNAKE_CASE_ , features=SCREAMING_SNAKE_CASE_ , cache_dir=SCREAMING_SNAKE_CASE_ ).read() _check_parquet_dataset(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) @pytest.mark.parametrize("""split""" , [None, NamedSplit("""train""" ), """train""", """test"""] ) def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> List[str]: """simple docstring""" _SCREAMING_SNAKE_CASE = tmp_path / """cache""" _SCREAMING_SNAKE_CASE = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""} _SCREAMING_SNAKE_CASE = ParquetDatasetReader(SCREAMING_SNAKE_CASE_ , cache_dir=SCREAMING_SNAKE_CASE_ , split=SCREAMING_SNAKE_CASE_ ).read() _check_parquet_dataset(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) assert dataset.split == split if split else "train" @pytest.mark.parametrize("""path_type""" , [str, list] ) def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> Optional[int]: """simple docstring""" if issubclass(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ): _SCREAMING_SNAKE_CASE = parquet_path elif issubclass(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ): _SCREAMING_SNAKE_CASE = [parquet_path] _SCREAMING_SNAKE_CASE = tmp_path / """cache""" _SCREAMING_SNAKE_CASE = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""} _SCREAMING_SNAKE_CASE = ParquetDatasetReader(SCREAMING_SNAKE_CASE_ , cache_dir=SCREAMING_SNAKE_CASE_ ).read() _check_parquet_dataset(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=("train",) ) -> List[str]: """simple docstring""" assert isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) for split in splits: _SCREAMING_SNAKE_CASE = dataset_dict[split] assert dataset.num_rows == 4 assert dataset.num_columns == 3 assert dataset.column_names == ["col_1", "col_2", "col_3"] for feature, expected_dtype in expected_features.items(): assert dataset.features[feature].dtype == expected_dtype @pytest.mark.parametrize("""keep_in_memory""" , [False, True] ) def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> List[Any]: """simple docstring""" _SCREAMING_SNAKE_CASE = tmp_path / """cache""" _SCREAMING_SNAKE_CASE = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""} with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase(): _SCREAMING_SNAKE_CASE = ParquetDatasetReader( {"""train""": parquet_path} , cache_dir=SCREAMING_SNAKE_CASE_ , keep_in_memory=SCREAMING_SNAKE_CASE_ ).read() _check_parquet_datasetdict(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) @pytest.mark.parametrize( """features""" , [ None, {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""}, {"""col_1""": """string""", """col_2""": """string""", """col_3""": """string"""}, {"""col_1""": """int32""", """col_2""": """int32""", """col_3""": """int32"""}, {"""col_1""": """float32""", """col_2""": """float32""", """col_3""": """float32"""}, ] , ) def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> Optional[Any]: """simple docstring""" _SCREAMING_SNAKE_CASE = tmp_path / """cache""" _SCREAMING_SNAKE_CASE = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""} _SCREAMING_SNAKE_CASE = features.copy() if features else default_expected_features _SCREAMING_SNAKE_CASE = ( Features({feature: Value(SCREAMING_SNAKE_CASE_ ) for feature, dtype in features.items()} ) if features is not None else None ) _SCREAMING_SNAKE_CASE = ParquetDatasetReader({"""train""": parquet_path} , features=SCREAMING_SNAKE_CASE_ , cache_dir=SCREAMING_SNAKE_CASE_ ).read() _check_parquet_datasetdict(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) @pytest.mark.parametrize("""split""" , [None, NamedSplit("""train""" ), """train""", """test"""] ) def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> Optional[Any]: """simple docstring""" if split: _SCREAMING_SNAKE_CASE = {split: parquet_path} else: _SCREAMING_SNAKE_CASE = """train""" _SCREAMING_SNAKE_CASE = {"""train""": parquet_path, """test""": parquet_path} _SCREAMING_SNAKE_CASE = tmp_path / """cache""" _SCREAMING_SNAKE_CASE = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""} _SCREAMING_SNAKE_CASE = ParquetDatasetReader(SCREAMING_SNAKE_CASE_ , cache_dir=SCREAMING_SNAKE_CASE_ ).read() _check_parquet_datasetdict(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , splits=list(path.keys() ) ) assert all(dataset[split].split == split for split in path.keys() ) def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> Dict: """simple docstring""" _SCREAMING_SNAKE_CASE = ParquetDatasetWriter(SCREAMING_SNAKE_CASE_ , tmp_path / """foo.parquet""" ) assert writer.write() > 0 _SCREAMING_SNAKE_CASE = pq.ParquetFile(tmp_path / """foo.parquet""" ) _SCREAMING_SNAKE_CASE = pf.read() assert dataset.data.table == output_table def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> Optional[Any]: """simple docstring""" _SCREAMING_SNAKE_CASE = str(shared_datadir / """test_image_rgb.jpg""" ) _SCREAMING_SNAKE_CASE = {"""image""": [image_path]} _SCREAMING_SNAKE_CASE = Features({"""image""": Image()} ) _SCREAMING_SNAKE_CASE = Dataset.from_dict(SCREAMING_SNAKE_CASE_ , features=SCREAMING_SNAKE_CASE_ ) _SCREAMING_SNAKE_CASE = ParquetDatasetWriter(SCREAMING_SNAKE_CASE_ , tmp_path / """foo.parquet""" ) assert writer.write() > 0 _SCREAMING_SNAKE_CASE = Dataset.from_parquet(str(tmp_path / """foo.parquet""" ) ) assert dataset.features == reloaded_dataset.features _SCREAMING_SNAKE_CASE = ParquetDatasetReader(str(tmp_path / """foo.parquet""" ) , streaming=SCREAMING_SNAKE_CASE_ ).read() assert dataset.features == reloaded_iterable_dataset.features @pytest.mark.parametrize( """feature, expected""" , [ (Features({"""foo""": Value("""int32""" )} ), None), (Features({"""image""": Image(), """foo""": Value("""int32""" )} ), config.PARQUET_ROW_GROUP_SIZE_FOR_IMAGE_DATASETS), (Features({"""nested""": Sequence(Audio() )} ), config.PARQUET_ROW_GROUP_SIZE_FOR_AUDIO_DATASETS), ] , ) def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> str: """simple docstring""" assert get_writer_batch_size(SCREAMING_SNAKE_CASE_ ) == expected
0
0
'''simple docstring''' def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> int: """simple docstring""" while b: _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = b, a % b return a def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> int: """simple docstring""" return a if b == 0 else euclidean_gcd_recursive(SCREAMING_SNAKE_CASE_ , a % b ) def lowerCAmelCase_ ( ) -> Tuple: """simple docstring""" print(F"euclidean_gcd(3, 5) = {euclidean_gcd(3 , 5 )}" ) print(F"euclidean_gcd(5, 3) = {euclidean_gcd(5 , 3 )}" ) print(F"euclidean_gcd(1, 3) = {euclidean_gcd(1 , 3 )}" ) print(F"euclidean_gcd(3, 6) = {euclidean_gcd(3 , 6 )}" ) print(F"euclidean_gcd(6, 3) = {euclidean_gcd(6 , 3 )}" ) print(F"euclidean_gcd_recursive(3, 5) = {euclidean_gcd_recursive(3 , 5 )}" ) print(F"euclidean_gcd_recursive(5, 3) = {euclidean_gcd_recursive(5 , 3 )}" ) print(F"euclidean_gcd_recursive(1, 3) = {euclidean_gcd_recursive(1 , 3 )}" ) print(F"euclidean_gcd_recursive(3, 6) = {euclidean_gcd_recursive(3 , 6 )}" ) print(F"euclidean_gcd_recursive(6, 3) = {euclidean_gcd_recursive(6 , 3 )}" ) if __name__ == "__main__": main()
720
'''simple docstring''' def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE_ ) -> int: """simple docstring""" if not isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ): raise ValueError("""multiplicative_persistence() only accepts integral values""" ) if num < 0: raise ValueError("""multiplicative_persistence() does not accept negative values""" ) _SCREAMING_SNAKE_CASE = 0 _SCREAMING_SNAKE_CASE = str(SCREAMING_SNAKE_CASE_ ) while len(SCREAMING_SNAKE_CASE_ ) != 1: _SCREAMING_SNAKE_CASE = [int(SCREAMING_SNAKE_CASE_ ) for i in num_string] _SCREAMING_SNAKE_CASE = 1 for i in range(0 , len(SCREAMING_SNAKE_CASE_ ) ): total *= numbers[i] _SCREAMING_SNAKE_CASE = str(SCREAMING_SNAKE_CASE_ ) steps += 1 return steps def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE_ ) -> int: """simple docstring""" if not isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ): raise ValueError("""additive_persistence() only accepts integral values""" ) if num < 0: raise ValueError("""additive_persistence() does not accept negative values""" ) _SCREAMING_SNAKE_CASE = 0 _SCREAMING_SNAKE_CASE = str(SCREAMING_SNAKE_CASE_ ) while len(SCREAMING_SNAKE_CASE_ ) != 1: _SCREAMING_SNAKE_CASE = [int(SCREAMING_SNAKE_CASE_ ) for i in num_string] _SCREAMING_SNAKE_CASE = 0 for i in range(0 , len(SCREAMING_SNAKE_CASE_ ) ): total += numbers[i] _SCREAMING_SNAKE_CASE = str(SCREAMING_SNAKE_CASE_ ) steps += 1 return steps if __name__ == "__main__": import doctest doctest.testmod()
0
0
'''simple docstring''' import argparse import json import os import tensorstore as ts import torch from flax import serialization from flax.traverse_util import flatten_dict, unflatten_dict from tensorflow.io import gfile from transformers.modeling_utils import dtype_byte_size from transformers.models.switch_transformers.convert_switch_transformers_original_flax_checkpoint_to_pytorch import ( rename_keys, ) from transformers.utils import WEIGHTS_INDEX_NAME, WEIGHTS_NAME from transformers.utils.hub import convert_file_size_to_int def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> Dict: """simple docstring""" if flax_key_tuple[-1] == "kernel" and flax_tensor.ndim == 3: # expert layer _SCREAMING_SNAKE_CASE = flax_key_tuple[:-1] + ("""weight""",) _SCREAMING_SNAKE_CASE = torch.permute(SCREAMING_SNAKE_CASE_ , (0, 2, 1) ) elif flax_key_tuple[-1] == "kernel" and ".".join(SCREAMING_SNAKE_CASE_ ): # linear layer _SCREAMING_SNAKE_CASE = flax_key_tuple[:-1] + ("""weight""",) _SCREAMING_SNAKE_CASE = flax_tensor.T elif flax_key_tuple[-1] in ["scale", "embedding"]: _SCREAMING_SNAKE_CASE = flax_key_tuple[:-1] + ("""weight""",) return flax_key_tuple, flax_tensor def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> List[str]: """simple docstring""" if "metadata" in layer: _SCREAMING_SNAKE_CASE = layer.split("""metadata""" ) _SCREAMING_SNAKE_CASE = """""".join(split_layer[0] )[:-1] _SCREAMING_SNAKE_CASE = [tuple(("""metadata""" + split_layer[1]).split("""/""" ) )] elif "kvstore" in layer: _SCREAMING_SNAKE_CASE = layer.split("""kvstore""" ) _SCREAMING_SNAKE_CASE = """""".join(split_layer[0] )[:-1] _SCREAMING_SNAKE_CASE = [tuple(("""kvstore""" + split_layer[1]).split("""/""" ) )] else: _SCREAMING_SNAKE_CASE = layer.split("""/""" ) _SCREAMING_SNAKE_CASE = """/""".join(split_layer[:-1] ) _SCREAMING_SNAKE_CASE = (split_layer[-1],) if "kvstore/path" in layer: _SCREAMING_SNAKE_CASE = F"{switch_checkpoint_path}/{checkpoint_info[layer]}" elif "kvstore/driver" in layer: _SCREAMING_SNAKE_CASE = """file""" else: _SCREAMING_SNAKE_CASE = checkpoint_info[layer] return curr_real_layer_name, split_layer, content def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> Any: """simple docstring""" _SCREAMING_SNAKE_CASE = rename_keys(SCREAMING_SNAKE_CASE_ ) _SCREAMING_SNAKE_CASE = {} for k, v in current_block.items(): _SCREAMING_SNAKE_CASE = v _SCREAMING_SNAKE_CASE = new_current_block torch.save(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = WEIGHTS_NAME ) -> Any: """simple docstring""" _SCREAMING_SNAKE_CASE = convert_file_size_to_int(SCREAMING_SNAKE_CASE_ ) _SCREAMING_SNAKE_CASE = [] _SCREAMING_SNAKE_CASE = {} _SCREAMING_SNAKE_CASE = 0 _SCREAMING_SNAKE_CASE = 0 os.makedirs(SCREAMING_SNAKE_CASE_ , exist_ok=SCREAMING_SNAKE_CASE_ ) with gfile.GFile(switch_checkpoint_path + """/checkpoint""" , """rb""" ) as fp: _SCREAMING_SNAKE_CASE = serialization.msgpack_restore(fp.read() )["""optimizer"""]["""target"""] _SCREAMING_SNAKE_CASE = flatten_dict(SCREAMING_SNAKE_CASE_ , sep="""/""" ) _SCREAMING_SNAKE_CASE = {} for layer in checkpoint_info.keys(): _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = get_key_and_tensorstore_dict( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) if curr_real_layer_name in all_layers: _SCREAMING_SNAKE_CASE = content else: _SCREAMING_SNAKE_CASE = {split_layer[-1]: content} for key in all_layers.keys(): # open tensorstore file _SCREAMING_SNAKE_CASE = ts.open(unflatten_dict(all_layers[key] ) ).result().read().result() _SCREAMING_SNAKE_CASE = torch.tensor(SCREAMING_SNAKE_CASE_ ) _SCREAMING_SNAKE_CASE = raw_weights.numel() * dtype_byte_size(raw_weights.dtype ) # use the renaming pattern from the small conversion scripts _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = rename_base_flax_keys(tuple(key.split("""/""" ) ) , SCREAMING_SNAKE_CASE_ ) _SCREAMING_SNAKE_CASE = """/""".join(SCREAMING_SNAKE_CASE_ ) # If this weight is going to tip up over the maximal size, we split. if current_block_size + weight_size > max_shard_size: _SCREAMING_SNAKE_CASE = os.path.join( SCREAMING_SNAKE_CASE_ , weights_name.replace(""".bin""" , F"-{len(SCREAMING_SNAKE_CASE_ )+1:05d}-of-???.bin" ) ) rename_and_save_block(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) sharded_state_dicts.append(current_block.keys() ) del current_block _SCREAMING_SNAKE_CASE = {} _SCREAMING_SNAKE_CASE = 0 _SCREAMING_SNAKE_CASE = raw_weights.to(getattr(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) ) current_block_size += weight_size total_size += weight_size # Add the last block _SCREAMING_SNAKE_CASE = os.path.join(SCREAMING_SNAKE_CASE_ , weights_name.replace(""".bin""" , F"-{len(SCREAMING_SNAKE_CASE_ )+1:05d}-of-???.bin" ) ) rename_and_save_block(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) sharded_state_dicts.append(current_block.keys() ) # If we only have one shard, we return it if len(SCREAMING_SNAKE_CASE_ ) == 1: return {weights_name: sharded_state_dicts[0]}, None # Otherwise, let's build the index _SCREAMING_SNAKE_CASE = {} _SCREAMING_SNAKE_CASE = {} for idx, shard in enumerate(SCREAMING_SNAKE_CASE_ ): _SCREAMING_SNAKE_CASE = weights_name.replace( """.bin""" , F"-{idx+1:05d}-of-{len(SCREAMING_SNAKE_CASE_ ):05d}.bin" ) # len(sharded_state_dicts):05d} _SCREAMING_SNAKE_CASE = os.path.join(SCREAMING_SNAKE_CASE_ , weights_name.replace(""".bin""" , F"-{idx+1:05d}-of-???.bin" ) ) os.rename(SCREAMING_SNAKE_CASE_ , os.path.join(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) ) _SCREAMING_SNAKE_CASE = shard for key in shard: _SCREAMING_SNAKE_CASE = shard_file # Add the metadata _SCREAMING_SNAKE_CASE = {"""total_size""": total_size} _SCREAMING_SNAKE_CASE = {"""metadata""": metadata, """weight_map""": weight_map} with open(os.path.join(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) , """w""" , encoding="""utf-8""" ) as f: _SCREAMING_SNAKE_CASE = json.dumps(SCREAMING_SNAKE_CASE_ , indent=2 , sort_keys=SCREAMING_SNAKE_CASE_ ) + """\n""" f.write(SCREAMING_SNAKE_CASE_ ) return metadata, index if __name__ == "__main__": UpperCamelCase__ : Any = argparse.ArgumentParser() # Required parameters parser.add_argument( "--switch_t5x_checkpoint_path", default="/mnt/disks/disk_switch/original_checkpoints/switch-xxl-128/checkpoint_634600", type=str, required=False, help="Path to a directory containing a folder per layer. Follows the original Google format.", ) parser.add_argument("--max_shard_size", default="10GB", required=False, help="Max shard size") parser.add_argument("--dtype", default="bfloat16", type=str, required=False, help="dtype of the saved model") parser.add_argument( "--pytorch_dump_folder_path", default="/mnt/disks/disk_switch/original_checkpoints/switch-xxl-128-converted", type=str, required=False, help="Path to the output pytorch model.", ) UpperCamelCase__ : Optional[Any] = parser.parse_args() shard_on_the_fly( args.switch_tax_checkpoint_path, args.pytorch_dump_folder_path, args.max_shard_size, args.dtype, ) def lowerCAmelCase_ ( ) -> Tuple: """simple docstring""" from transformers import SwitchTransformersConfig, SwitchTransformersForConditionalGeneration, TaTokenizer _SCREAMING_SNAKE_CASE = SwitchTransformersConfig.from_pretrained("""google/switch-base-8""" ) config.save_pretrained("""/home/arthur_huggingface_co/transformers/switch_converted""" ) _SCREAMING_SNAKE_CASE = SwitchTransformersForConditionalGeneration.from_pretrained( """/home/arthur_huggingface_co/transformers/switch_converted""" , device_map="""auto""" ) _SCREAMING_SNAKE_CASE = TaTokenizer.from_pretrained("""t5-small""" ) _SCREAMING_SNAKE_CASE = """A <extra_id_0> walks into a bar a orders a <extra_id_1> with <extra_id_2> pinch of <extra_id_3>.""" _SCREAMING_SNAKE_CASE = tokenizer(SCREAMING_SNAKE_CASE_ , return_tensors="""pt""" ).input_ids _SCREAMING_SNAKE_CASE = model.generate(SCREAMING_SNAKE_CASE_ , decoder_start_token_id=0 ) print(tokenizer.decode(out[0] ) )
721
'''simple docstring''' import math import os import re import sys import unittest from pathlib import Path from typing import Tuple from unittest.mock import patch from parameterized import parameterized from transformers.testing_utils import ( CaptureStderr, ExtendSysPath, TestCasePlus, execute_subprocess_async, get_gpu_count, get_torch_dist_unique_port, require_apex, require_bitsandbytes, require_fairscale, require_torch, require_torch_gpu, require_torch_multi_gpu, require_torch_non_multi_gpu, slow, ) from transformers.trainer_callback import TrainerState from transformers.trainer_utils import set_seed UpperCamelCase__ : Tuple = os.path.abspath(os.path.dirname(__file__)) with ExtendSysPath(f"""{bindir}/../../examples/pytorch/translation"""): from run_translation import main # noqa set_seed(42) UpperCamelCase__ : Union[str, Any] = "sshleifer/student_marian_en_ro_6_1" UpperCamelCase__ : str = "sshleifer/tiny-mbart" @require_torch class _a (_lowerCamelCase): """simple docstring""" def UpperCamelCase ( self , A__=False , A__=None , A__=True , A__=True , A__=True , A__=True , ) -> Optional[Any]: _SCREAMING_SNAKE_CASE = self.run_trainer( eval_steps=1 , max_len=12 , model_name=A__ , num_train_epochs=1 , distributed=A__ , extra_args_str=A__ , predict_with_generate=A__ , do_train=A__ , do_eval=A__ , do_predict=A__ , ) _SCREAMING_SNAKE_CASE = TrainerState.load_from_json(os.path.join(A__ , """trainer_state.json""" ) ).log_history if not do_eval: return _SCREAMING_SNAKE_CASE = [log for log in logs if """eval_loss""" in log.keys()] _SCREAMING_SNAKE_CASE = eval_metrics[0] if predict_with_generate: assert "eval_bleu" in first_step_stats _SCREAMING_SNAKE_CASE = eval_metrics[-1] assert isinstance(last_step_stats["""eval_bleu"""] , A__ ) assert not math.isnan(float(last_step_stats["""eval_loss"""] ) ), "eval_loss must not be `nan`" @require_torch_non_multi_gpu def UpperCamelCase ( self ) -> Optional[int]: self.run_seqaseq_quick() @require_torch_multi_gpu def UpperCamelCase ( self ) -> Optional[Any]: self.run_seqaseq_quick(distributed=A__ ) @require_torch_multi_gpu def UpperCamelCase ( self ) -> Union[str, Any]: self.run_seqaseq_quick(distributed=A__ ) @unittest.skip("""Requires an update of the env running those tests""" ) @require_torch_multi_gpu @require_fairscale def UpperCamelCase ( self ) -> Any: self.run_seqaseq_quick(distributed=A__ , extra_args_str="""--sharded_ddp simple""" ) @unittest.skip("""Requires an update of the env running those tests""" ) @require_torch_multi_gpu @require_fairscale def UpperCamelCase ( self ) -> Tuple: self.run_seqaseq_quick(distributed=A__ , extra_args_str="""--sharded_ddp simple --fp16""" ) @unittest.skip("""Requires an update of the env running those tests""" ) @require_torch_multi_gpu @require_fairscale def UpperCamelCase ( self ) -> str: self.run_seqaseq_quick(distributed=A__ , extra_args_str="""--sharded_ddp zero_dp_2""" , predict_with_generate=A__ ) @unittest.skip("""Requires an update of the env running those tests""" ) @require_torch_multi_gpu @require_fairscale def UpperCamelCase ( self ) -> List[str]: self.run_seqaseq_quick( distributed=A__ , extra_args_str="""--sharded_ddp zero_dp_2 --fp16""" , predict_with_generate=A__ ) @require_apex @require_torch_gpu def UpperCamelCase ( self ) -> Optional[Any]: # XXX: apex breaks the trainer if it's run twice e.g. run_seq2seq.main() from the same # program and it breaks other tests that run from the same pytest worker, therefore until this is # sorted out it must be run only in an external program, that is distributed=True in this # test and only under one or more gpus - if we want cpu will need to make a special test # # specifically to the problem traced it to self.optimizer.step() - if it's run 2nd time via # 2nd main() call it botches the future eval. # self.run_seqaseq_quick(distributed=A__ , extra_args_str="""--fp16 --fp16_backend=apex""" ) # test 2nd time - was getting eval_loss': nan' # to reproduce the problem set distributed=False self.run_seqaseq_quick(distributed=A__ , extra_args_str="""--fp16 --fp16_backend=apex""" ) @parameterized.expand(["""base""", """low""", """high""", """mixed"""] ) @require_torch_multi_gpu def UpperCamelCase ( self , A__ ) -> List[Any]: # as each sub-test is slow-ish split into multiple sub-tests to avoid CI timeout _SCREAMING_SNAKE_CASE = { # test with the default log_level - should be info and thus log info once """base""": {"""extra_args_str""": """""", """n_matches""": 1}, # test with low log_level and log_level_replica - should be noisy on all processes # now the info string should appear twice on 2 processes """low""": {"""extra_args_str""": """--log_level debug --log_level_replica debug""", """n_matches""": 2}, # test with high log_level and low log_level_replica # now the info string should appear once only on the replica """high""": {"""extra_args_str""": """--log_level error --log_level_replica debug""", """n_matches""": 1}, # test with high log_level and log_level_replica - should be quiet on all processes """mixed""": {"""extra_args_str""": """--log_level error --log_level_replica error""", """n_matches""": 0}, } _SCREAMING_SNAKE_CASE = experiments[experiment_id] _SCREAMING_SNAKE_CASE = {"""distributed""": True, """predict_with_generate""": False, """do_eval""": False, """do_predict""": False} _SCREAMING_SNAKE_CASE = """Running training""" with CaptureStderr() as cl: self.run_seqaseq_quick(**A__ , extra_args_str=data["""extra_args_str"""] ) _SCREAMING_SNAKE_CASE = len(re.findall(A__ , cl.err ) ) self.assertEqual(A__ , data["""n_matches"""] ) @slow def UpperCamelCase ( self ) -> Any: _SCREAMING_SNAKE_CASE = self.run_trainer( eval_steps=2 , max_len=1_28 , model_name=A__ , learning_rate=3E-4 , num_train_epochs=10 , distributed=A__ , ) # Check metrics _SCREAMING_SNAKE_CASE = TrainerState.load_from_json(os.path.join(A__ , """trainer_state.json""" ) ).log_history _SCREAMING_SNAKE_CASE = [log for log in logs if """eval_loss""" in log.keys()] _SCREAMING_SNAKE_CASE = eval_metrics[0] _SCREAMING_SNAKE_CASE = eval_metrics[-1] assert first_step_stats["eval_loss"] > last_step_stats["eval_loss"], "model learned nothing" assert isinstance(last_step_stats["""eval_bleu"""] , A__ ) # test if do_predict saves generations and metrics _SCREAMING_SNAKE_CASE = os.listdir(A__ ) _SCREAMING_SNAKE_CASE = {os.path.basename(A__ ) for p in contents} assert "generated_predictions.txt" in contents assert "predict_results.json" in contents @slow @require_bitsandbytes def UpperCamelCase ( self ) -> Dict: from transformers.training_args import OptimizerNames def train_and_return_metrics(A__ ) -> Tuple[int, float]: _SCREAMING_SNAKE_CASE = """--skip_memory_metrics 0""" _SCREAMING_SNAKE_CASE = self.run_trainer( max_len=1_28 , model_name=A__ , learning_rate=3E-4 , num_train_epochs=1 , optim=A__ , distributed=A__ , extra_args_str=A__ , do_eval=A__ , do_predict=A__ , n_gpus_to_use=1 , ) # Check metrics _SCREAMING_SNAKE_CASE = TrainerState.load_from_json(Path(A__ , """trainer_state.json""" ) ).log_history _SCREAMING_SNAKE_CASE = int(logs[0]["""train_mem_gpu_peaked_delta"""] / 2**20 ) _SCREAMING_SNAKE_CASE = int(logs[0]["""train_mem_gpu_alloc_delta"""] / 2**20 ) _SCREAMING_SNAKE_CASE = logs[0]["""train_loss"""] return gpu_peak_mem_mb, gpu_alloc_mem_mb, loss _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = train_and_return_metrics(OptimizerNames.ADAMW_TORCH.value ) _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = train_and_return_metrics(OptimizerNames.ADAMW_BNB.value ) _SCREAMING_SNAKE_CASE = gpu_alloc_mem_orig - gpu_alloc_mem_bnb _SCREAMING_SNAKE_CASE = gpu_peak_mem_orig + gpu_alloc_mem_orig _SCREAMING_SNAKE_CASE = gpu_peak_mem_bnb + gpu_alloc_mem_bnb _SCREAMING_SNAKE_CASE = gpu_total_mem_orig - gpu_total_mem_bnb # sshleifer/student_marian_en_ro_6_1 has 54M parameter, 29M of which is `nn.Embedding` which # doesn't get quantized and remains in fp32. Therefore we only have 25M parameters quantized # in 2 bytes and the diff in optim memory usage is derived as so: # # - normal 25*8=~200MB (8 bytes per param) # - bnb 25*2= ~50MB (2 bytes per param) # # Thus we should expect ~150MB total memory saved. # # Peak memory should be the same - the total should be different by about that same margin # # After leaving a small margin to accommodate for differences between gpus let's check # that we have at least 120MB in savings _SCREAMING_SNAKE_CASE = 1_20 # uncomment the following if this test starts failing - requires py38 for a new print feature # gpu_peak_mem_diff = gpu_peak_mem_orig - gpu_peak_mem_bnb # print(f"{gpu_alloc_mem_orig=}MB {gpu_peak_mem_orig=}MB {gpu_alloc_mem_orig+gpu_peak_mem_orig=}MB") # print(f" {gpu_alloc_mem_bnb=}MB {gpu_peak_mem_bnb=}MB {gpu_alloc_mem_bnb+gpu_peak_mem_bnb=}MB") # print(f"{gpu_alloc_mem_diff=}MB") # print(f"{gpu_peak_mem_diff=}MB") # print(f"{gpu_total_mem_orig=}MB, {gpu_total_mem_bnb=}MB") # print(f"{gpu_total_mem_diff=}MB, {gpu_total_mem_diff=}MB") self.assertGreater( A__ , A__ , """should use ~150MB less alloc gpu memory with BNB, compared to without it for this model but got""" F" a difference of {gpu_alloc_mem_diff}MB, with gpu_alloc_mem_orig={gpu_alloc_mem_orig}MB and" F" gpu_alloc_mem_bnb={gpu_alloc_mem_bnb}MB" , ) self.assertGreater( A__ , A__ , """should use ~150MB less total gpu memory with BNB, compared to without it for this model but got""" F" a difference of {gpu_total_mem_diff}MB, with gpu_total_mem_orig={gpu_total_mem_orig}MB and" F" gpu_total_mem_bnb={gpu_total_mem_bnb}MB" , ) self.assertEqual( A__ , A__ , F"loss should be the same, but got loss_orig={loss_orig}, loss_bnb={loss_bnb}" ) def UpperCamelCase ( self , A__ , A__ , A__ , A__ = 3E-3 , A__ = "adafactor" , A__ = False , A__ = None , A__ = 0 , A__ = True , A__ = True , A__ = True , A__ = True , A__ = None , ) -> Dict: _SCREAMING_SNAKE_CASE = self.test_file_dir / """../fixtures/tests_samples/wmt_en_ro""" _SCREAMING_SNAKE_CASE = self.get_auto_remove_tmp_dir() _SCREAMING_SNAKE_CASE = F"\n --model_name_or_path {model_name}\n --train_file {data_dir}/train.json\n --validation_file {data_dir}/val.json\n --test_file {data_dir}/test.json\n --output_dir {output_dir}\n --overwrite_output_dir\n --max_train_samples 8\n --max_source_length {max_len}\n --max_target_length {max_len}\n --do_train\n --num_train_epochs {str(A__ )}\n --per_device_train_batch_size 4\n --learning_rate {learning_rate}\n --warmup_steps 8\n --logging_steps 0\n --logging_strategy no\n --save_steps {str(A__ )}\n --group_by_length\n --label_smoothing_factor 0.1\n --target_lang ro_RO\n --source_lang en_XX\n ".split() _SCREAMING_SNAKE_CASE = F"\n --do_eval\n --per_device_eval_batch_size 4\n --max_eval_samples 8\n --val_max_target_length {max_len}\n --evaluation_strategy steps\n --eval_steps {str(A__ )}\n ".split() _SCREAMING_SNAKE_CASE = """ --do_predict """.split() _SCREAMING_SNAKE_CASE = [] if do_train: args += args_train if do_eval: args += args_eval if do_predict: args += args_predict if predict_with_generate: args += "--predict_with_generate".split() if do_train: if optim == "adafactor": args += "--adafactor".split() else: args += F"--optim {optim}".split() if extra_args_str is not None: args += extra_args_str.split() if distributed: if n_gpus_to_use is None: _SCREAMING_SNAKE_CASE = get_gpu_count() _SCREAMING_SNAKE_CASE = get_torch_dist_unique_port() _SCREAMING_SNAKE_CASE = F"\n -m torch.distributed.run\n --nproc_per_node={n_gpus_to_use}\n --master_port={master_port}\n {self.examples_dir_str}/pytorch/translation/run_translation.py\n ".split() _SCREAMING_SNAKE_CASE = [sys.executable] + distributed_args + args # keep for quick debug # print(" ".join([f"\nPYTHONPATH={self.src_dir_str}"] +cmd)); die execute_subprocess_async(A__ , env=self.get_env() ) else: _SCREAMING_SNAKE_CASE = ["""run_translation.py"""] + args with patch.object(A__ , """argv""" , A__ ): main() return output_dir
0
0
'''simple docstring''' import functools import operator from ...configuration_utils import PretrainedConfig from ...utils import logging UpperCamelCase__ : Tuple = logging.get_logger(__name__) UpperCamelCase__ : str = { "microsoft/unispeech-sat-base-100h-libri-ft": ( "https://huggingface.co/microsoft/unispeech-sat-base-100h-libri-ft/resolve/main/config.json" ), # See all UniSpeechSat models at https://huggingface.co/models?filter=unispeech_sat } class _a (_lowerCamelCase): SCREAMING_SNAKE_CASE = 'unispeech-sat' def __init__( self , A__=32 , A__=7_68 , A__=12 , A__=12 , A__=30_72 , A__="gelu" , A__=0.1 , A__=0.1 , A__=0.1 , A__=0.0 , A__=0.0 , A__=0.1 , A__=0.1 , A__=0.02 , A__=1E-5 , A__="group" , A__="gelu" , A__=(5_12, 5_12, 5_12, 5_12, 5_12, 5_12, 5_12) , A__=(5, 2, 2, 2, 2, 2, 2) , A__=(10, 3, 3, 3, 3, 2, 2) , A__=False , A__=1_28 , A__=16 , A__=False , A__=True , A__=0.05 , A__=10 , A__=2 , A__=0.0 , A__=10 , A__=0 , A__=3_20 , A__=2 , A__=0.1 , A__=1_00 , A__=2_56 , A__=2_56 , A__=0.1 , A__="mean" , A__=False , A__=False , A__=2_56 , A__=(5_12, 5_12, 5_12, 5_12, 15_00) , A__=(5, 3, 3, 1, 1) , A__=(1, 2, 3, 1, 1) , A__=5_12 , A__=0 , A__=1 , A__=2 , A__=5_04 , **A__ , ) -> List[str]: super().__init__(**A__ , pad_token_id=A__ , bos_token_id=A__ , eos_token_id=A__ ) _SCREAMING_SNAKE_CASE = hidden_size _SCREAMING_SNAKE_CASE = feat_extract_norm _SCREAMING_SNAKE_CASE = feat_extract_activation _SCREAMING_SNAKE_CASE = list(A__ ) _SCREAMING_SNAKE_CASE = list(A__ ) _SCREAMING_SNAKE_CASE = list(A__ ) _SCREAMING_SNAKE_CASE = conv_bias _SCREAMING_SNAKE_CASE = num_conv_pos_embeddings _SCREAMING_SNAKE_CASE = num_conv_pos_embedding_groups _SCREAMING_SNAKE_CASE = len(self.conv_dim ) _SCREAMING_SNAKE_CASE = num_hidden_layers _SCREAMING_SNAKE_CASE = intermediate_size _SCREAMING_SNAKE_CASE = hidden_act _SCREAMING_SNAKE_CASE = num_attention_heads _SCREAMING_SNAKE_CASE = hidden_dropout _SCREAMING_SNAKE_CASE = attention_dropout _SCREAMING_SNAKE_CASE = activation_dropout _SCREAMING_SNAKE_CASE = feat_proj_dropout _SCREAMING_SNAKE_CASE = final_dropout _SCREAMING_SNAKE_CASE = layerdrop _SCREAMING_SNAKE_CASE = layer_norm_eps _SCREAMING_SNAKE_CASE = initializer_range _SCREAMING_SNAKE_CASE = vocab_size _SCREAMING_SNAKE_CASE = num_clusters _SCREAMING_SNAKE_CASE = do_stable_layer_norm _SCREAMING_SNAKE_CASE = use_weighted_layer_sum if ( (len(self.conv_stride ) != self.num_feat_extract_layers) or (len(self.conv_kernel ) != self.num_feat_extract_layers) or (len(self.conv_dim ) != self.num_feat_extract_layers) ): raise ValueError( """Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` ==""" """ `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) =""" F" {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`," F" `len(config.conv_kernel) = {len(self.conv_kernel )}`." ) # fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779 _SCREAMING_SNAKE_CASE = apply_spec_augment _SCREAMING_SNAKE_CASE = mask_time_prob _SCREAMING_SNAKE_CASE = mask_time_length _SCREAMING_SNAKE_CASE = mask_time_min_masks _SCREAMING_SNAKE_CASE = mask_feature_prob _SCREAMING_SNAKE_CASE = mask_feature_length _SCREAMING_SNAKE_CASE = mask_feature_min_masks # parameters for pretraining with codevector quantized representations _SCREAMING_SNAKE_CASE = num_codevectors_per_group _SCREAMING_SNAKE_CASE = num_codevector_groups _SCREAMING_SNAKE_CASE = contrastive_logits_temperature _SCREAMING_SNAKE_CASE = feat_quantizer_dropout _SCREAMING_SNAKE_CASE = num_negatives _SCREAMING_SNAKE_CASE = codevector_dim _SCREAMING_SNAKE_CASE = proj_codevector_dim _SCREAMING_SNAKE_CASE = diversity_loss_weight # ctc loss _SCREAMING_SNAKE_CASE = ctc_loss_reduction _SCREAMING_SNAKE_CASE = ctc_zero_infinity # SequenceClassification-specific parameter. Feel free to ignore for other classes. _SCREAMING_SNAKE_CASE = classifier_proj_size # XVector-specific parameters. Feel free to ignore for other classes. _SCREAMING_SNAKE_CASE = list(A__ ) _SCREAMING_SNAKE_CASE = list(A__ ) _SCREAMING_SNAKE_CASE = list(A__ ) _SCREAMING_SNAKE_CASE = xvector_output_dim @property def UpperCamelCase ( self ) -> Dict: return functools.reduce(operator.mul , self.conv_stride , 1 )
700
'''simple docstring''' import sys UpperCamelCase__ : int = ( "73167176531330624919225119674426574742355349194934" "96983520312774506326239578318016984801869478851843" "85861560789112949495459501737958331952853208805511" "12540698747158523863050715693290963295227443043557" "66896648950445244523161731856403098711121722383113" "62229893423380308135336276614282806444486645238749" "30358907296290491560440772390713810515859307960866" "70172427121883998797908792274921901699720888093776" "65727333001053367881220235421809751254540594752243" "52584907711670556013604839586446706324415722155397" "53697817977846174064955149290862569321978468622482" "83972241375657056057490261407972968652414535100474" "82166370484403199890008895243450658541227588666881" "16427171479924442928230863465674813919123162824586" "17866458359124566529476545682848912883142607690042" "24219022671055626321111109370544217506941658960408" "07198403850962455444362981230987879927244284909188" "84580156166097919133875499200524063689912560717606" "05886116467109405077541002256983155200055935729725" "71636269561882670428252483600823257530420752963450" ) def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE_ = N ) -> int: """simple docstring""" _SCREAMING_SNAKE_CASE = -sys.maxsize - 1 for i in range(len(SCREAMING_SNAKE_CASE_ ) - 12 ): _SCREAMING_SNAKE_CASE = 1 for j in range(13 ): product *= int(n[i + j] ) if product > largest_product: _SCREAMING_SNAKE_CASE = product return largest_product if __name__ == "__main__": print(f"""{solution() = }""")
0
0
'''simple docstring''' from ...utils import ( OptionalDependencyNotAvailable, is_torch_available, is_transformers_available, is_transformers_version, ) try: if not (is_transformers_available() and is_torch_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ...utils.dummy_torch_and_transformers_objects import ( ImageTextPipelineOutput, UniDiffuserPipeline, ) else: from .modeling_text_decoder import UniDiffuserTextDecoder from .modeling_uvit import UniDiffuserModel, UTransformeraDModel from .pipeline_unidiffuser import ImageTextPipelineOutput, UniDiffuserPipeline
701
'''simple docstring''' UpperCamelCase__ : Dict = { "a": "AAAAA", "b": "AAAAB", "c": "AAABA", "d": "AAABB", "e": "AABAA", "f": "AABAB", "g": "AABBA", "h": "AABBB", "i": "ABAAA", "j": "BBBAA", "k": "ABAAB", "l": "ABABA", "m": "ABABB", "n": "ABBAA", "o": "ABBAB", "p": "ABBBA", "q": "ABBBB", "r": "BAAAA", "s": "BAAAB", "t": "BAABA", "u": "BAABB", "v": "BBBAB", "w": "BABAA", "x": "BABAB", "y": "BABBA", "z": "BABBB", " ": " ", } UpperCamelCase__ : str = {value: key for key, value in encode_dict.items()} def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE_ ) -> str: """simple docstring""" _SCREAMING_SNAKE_CASE = """""" for letter in word.lower(): if letter.isalpha() or letter == " ": encoded += encode_dict[letter] else: raise Exception("""encode() accepts only letters of the alphabet and spaces""" ) return encoded def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE_ ) -> str: """simple docstring""" if set(SCREAMING_SNAKE_CASE_ ) - {"A", "B", " "} != set(): raise Exception("""decode() accepts only 'A', 'B' and spaces""" ) _SCREAMING_SNAKE_CASE = """""" for word in coded.split(): while len(SCREAMING_SNAKE_CASE_ ) != 0: decoded += decode_dict[word[:5]] _SCREAMING_SNAKE_CASE = word[5:] decoded += " " return decoded.strip() if __name__ == "__main__": from doctest import testmod testmod()
0
0
'''simple docstring''' def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE_ ) -> str: """simple docstring""" return "".join([hex(SCREAMING_SNAKE_CASE_ )[2:].zfill(2 ).upper() for byte in list(SCREAMING_SNAKE_CASE_ )] ) def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE_ ) -> bytes: """simple docstring""" # Check data validity, following RFC3548 # https://www.ietf.org/rfc/rfc3548.txt if (len(SCREAMING_SNAKE_CASE_ ) % 2) != 0: raise ValueError( """Base16 encoded data is invalid: Data does not have an even number of hex digits.""" ) # Check the character set - the standard base16 alphabet # is uppercase according to RFC3548 section 6 if not set(SCREAMING_SNAKE_CASE_ ) <= set("""0123456789ABCDEF""" ): raise ValueError( """Base16 encoded data is invalid: Data is not uppercase hex or it contains invalid characters.""" ) # For every two hexadecimal digits (= a byte), turn it into an integer. # Then, string the result together into bytes, and return it. return bytes(int(data[i] + data[i + 1] , 16 ) for i in range(0 , len(SCREAMING_SNAKE_CASE_ ) , 2 ) ) if __name__ == "__main__": import doctest doctest.testmod()
702
'''simple docstring''' import argparse import torch from torch import nn from transformers import MaMaaaConfig, MaMaaaForConditionalGeneration def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE_ ) -> Any: """simple docstring""" _SCREAMING_SNAKE_CASE = [ """encoder.version""", """decoder.version""", """model.encoder.version""", """model.decoder.version""", """decoder.output_projection.weight""", """_float_tensor""", """encoder.embed_positions._float_tensor""", """decoder.embed_positions._float_tensor""", ] for k in ignore_keys: state_dict.pop(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE_ ) -> str: """simple docstring""" _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = emb.weight.shape _SCREAMING_SNAKE_CASE = nn.Linear(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , bias=SCREAMING_SNAKE_CASE_ ) _SCREAMING_SNAKE_CASE = emb.weight.data return lin_layer def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE_ ) -> List[Any]: """simple docstring""" _SCREAMING_SNAKE_CASE = torch.load(SCREAMING_SNAKE_CASE_ , map_location="""cpu""" ) _SCREAMING_SNAKE_CASE = mam_aaa["""args"""] or mam_aaa["""cfg"""]["""model"""] _SCREAMING_SNAKE_CASE = mam_aaa["""model"""] remove_ignore_keys_(SCREAMING_SNAKE_CASE_ ) _SCREAMING_SNAKE_CASE = state_dict["""encoder.embed_tokens.weight"""].shape[0] _SCREAMING_SNAKE_CASE = MaMaaaConfig( vocab_size=SCREAMING_SNAKE_CASE_ , max_position_embeddings=10_24 , encoder_layers=args.encoder_layers , decoder_layers=args.decoder_layers , encoder_attention_heads=args.encoder_attention_heads , decoder_attention_heads=args.decoder_attention_heads , encoder_ffn_dim=args.encoder_ffn_embed_dim , decoder_ffn_dim=args.decoder_ffn_embed_dim , d_model=args.encoder_embed_dim , encoder_layerdrop=args.encoder_layerdrop , decoder_layerdrop=args.decoder_layerdrop , dropout=args.dropout , attention_dropout=args.attention_dropout , activation_dropout=args.activation_dropout , activation_function="""relu""" , ) _SCREAMING_SNAKE_CASE = state_dict["""decoder.embed_tokens.weight"""] _SCREAMING_SNAKE_CASE = MaMaaaForConditionalGeneration(SCREAMING_SNAKE_CASE_ ) model.model.load_state_dict(SCREAMING_SNAKE_CASE_ , strict=SCREAMING_SNAKE_CASE_ ) _SCREAMING_SNAKE_CASE = make_linear_from_emb(model.model.shared ) return model if __name__ == "__main__": UpperCamelCase__ : Dict = argparse.ArgumentParser() # Required parameters parser.add_argument("fairseq_path", type=str, help="path to a model.pt on local filesystem.") parser.add_argument("pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.") UpperCamelCase__ : List[str] = parser.parse_args() UpperCamelCase__ : Any = convert_fairseq_mamaaa_checkpoint_from_disk(args.fairseq_pathß) model.save_pretrained(args.pytorch_dump_folder_path)
0
0
'''simple docstring''' import json import os import shutil import tempfile import unittest from transformers import BatchEncoding, CanineTokenizer from transformers.testing_utils import require_tokenizers, require_torch from transformers.tokenization_utils import AddedToken from transformers.utils import cached_property from ...test_tokenization_common import TokenizerTesterMixin class _a (_lowerCamelCase , unittest.TestCase): """simple docstring""" SCREAMING_SNAKE_CASE = CanineTokenizer SCREAMING_SNAKE_CASE = False def UpperCamelCase ( self ) -> str: super().setUp() _SCREAMING_SNAKE_CASE = CanineTokenizer() tokenizer.save_pretrained(self.tmpdirname ) @cached_property def UpperCamelCase ( self ) -> str: return CanineTokenizer.from_pretrained("""google/canine-s""" ) def UpperCamelCase ( self , **A__ ) -> CanineTokenizer: _SCREAMING_SNAKE_CASE = self.tokenizer_class.from_pretrained(self.tmpdirname , **A__ ) _SCREAMING_SNAKE_CASE = 10_24 return tokenizer @require_torch def UpperCamelCase ( self ) -> Union[str, Any]: _SCREAMING_SNAKE_CASE = self.canine_tokenizer _SCREAMING_SNAKE_CASE = ["""Life is like a box of chocolates.""", """You never know what you're gonna get."""] # fmt: off _SCREAMING_SNAKE_CASE = [5_73_44, 76, 1_05, 1_02, 1_01, 32, 1_05, 1_15, 32, 1_08, 1_05, 1_07, 1_01, 32, 97, 32, 98, 1_11, 1_20, 32, 1_11, 1_02, 32, 99, 1_04, 1_11, 99, 1_11, 1_08, 97, 1_16, 1_01, 1_15, 46, 5_73_45, 0, 0, 0, 0] # fmt: on _SCREAMING_SNAKE_CASE = tokenizer(A__ , padding=A__ , return_tensors="""pt""" ) self.assertIsInstance(A__ , A__ ) _SCREAMING_SNAKE_CASE = list(batch.input_ids.numpy()[0] ) self.assertListEqual(A__ , A__ ) self.assertEqual((2, 39) , batch.input_ids.shape ) self.assertEqual((2, 39) , batch.attention_mask.shape ) @require_torch def UpperCamelCase ( self ) -> List[str]: _SCREAMING_SNAKE_CASE = self.canine_tokenizer _SCREAMING_SNAKE_CASE = ["""Once there was a man.""", """He wrote a test in HuggingFace Tranformers."""] _SCREAMING_SNAKE_CASE = tokenizer(A__ , padding=A__ , return_tensors="""pt""" ) # check if input_ids, attention_mask and token_type_ids are returned self.assertIn("""input_ids""" , A__ ) self.assertIn("""attention_mask""" , A__ ) self.assertIn("""token_type_ids""" , A__ ) @require_torch def UpperCamelCase ( self ) -> Union[str, Any]: _SCREAMING_SNAKE_CASE = self.canine_tokenizer _SCREAMING_SNAKE_CASE = [ """What's the weater?""", """It's about 25 degrees.""", ] _SCREAMING_SNAKE_CASE = tokenizer( text_target=A__ , max_length=32 , padding="""max_length""" , truncation=A__ , return_tensors="""pt""" ) self.assertEqual(32 , targets["""input_ids"""].shape[1] ) def UpperCamelCase ( self ) -> Optional[Any]: # safety check on max_len default value so we are sure the test works _SCREAMING_SNAKE_CASE = self.get_tokenizers() for tokenizer in tokenizers: with self.subTest(F"{tokenizer.__class__.__name__}" ): self.assertNotEqual(tokenizer.model_max_length , 42 ) # Now let's start the test _SCREAMING_SNAKE_CASE = self.get_tokenizers() for tokenizer in tokenizers: with self.subTest(F"{tokenizer.__class__.__name__}" ): # Isolate this from the other tests because we save additional tokens/etc _SCREAMING_SNAKE_CASE = tempfile.mkdtemp() _SCREAMING_SNAKE_CASE = """ He is very happy, UNwant\u00E9d,running""" _SCREAMING_SNAKE_CASE = tokenizer.encode(A__ , add_special_tokens=A__ ) tokenizer.save_pretrained(A__ ) _SCREAMING_SNAKE_CASE = tokenizer.__class__.from_pretrained(A__ ) _SCREAMING_SNAKE_CASE = after_tokenizer.encode(A__ , add_special_tokens=A__ ) self.assertListEqual(A__ , A__ ) shutil.rmtree(A__ ) _SCREAMING_SNAKE_CASE = self.get_tokenizers(model_max_length=42 ) for tokenizer in tokenizers: with self.subTest(F"{tokenizer.__class__.__name__}" ): # Isolate this from the other tests because we save additional tokens/etc _SCREAMING_SNAKE_CASE = tempfile.mkdtemp() _SCREAMING_SNAKE_CASE = """ He is very happy, UNwant\u00E9d,running""" _SCREAMING_SNAKE_CASE = tokenizer.additional_special_tokens # We can add a new special token for Canine as follows: _SCREAMING_SNAKE_CASE = chr(0Xe_0_0_7 ) additional_special_tokens.append(A__ ) tokenizer.add_special_tokens({"""additional_special_tokens""": additional_special_tokens} ) _SCREAMING_SNAKE_CASE = tokenizer.encode(A__ , add_special_tokens=A__ ) tokenizer.save_pretrained(A__ ) _SCREAMING_SNAKE_CASE = tokenizer.__class__.from_pretrained(A__ ) _SCREAMING_SNAKE_CASE = after_tokenizer.encode(A__ , add_special_tokens=A__ ) self.assertListEqual(A__ , A__ ) self.assertIn(A__ , after_tokenizer.additional_special_tokens ) self.assertEqual(after_tokenizer.model_max_length , 42 ) _SCREAMING_SNAKE_CASE = tokenizer.__class__.from_pretrained(A__ , model_max_length=43 ) self.assertEqual(tokenizer.model_max_length , 43 ) shutil.rmtree(A__ ) def UpperCamelCase ( self ) -> Optional[Any]: _SCREAMING_SNAKE_CASE = self.get_tokenizers(do_lower_case=A__ ) for tokenizer in tokenizers: with self.subTest(F"{tokenizer.__class__.__name__}" ): _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = self.get_clean_sequence(A__ ) # a special token for Canine can be defined as follows: _SCREAMING_SNAKE_CASE = 0Xe_0_0_5 _SCREAMING_SNAKE_CASE = chr(A__ ) tokenizer.add_special_tokens({"""cls_token""": special_token} ) _SCREAMING_SNAKE_CASE = tokenizer.encode(A__ , add_special_tokens=A__ ) self.assertEqual(len(A__ ) , 1 ) _SCREAMING_SNAKE_CASE = tokenizer.decode(ids + encoded_special_token , clean_up_tokenization_spaces=A__ ) _SCREAMING_SNAKE_CASE = tokenizer.encode(A__ , add_special_tokens=A__ ) _SCREAMING_SNAKE_CASE = tokenizer.encode(A__ , add_special_tokens=A__ ) _SCREAMING_SNAKE_CASE = tokenizer.encode(A__ , add_special_tokens=A__ ) self.assertEqual(A__ , input_encoded + special_token_id ) _SCREAMING_SNAKE_CASE = tokenizer.decode(A__ , skip_special_tokens=A__ ) self.assertTrue(special_token not in decoded ) def UpperCamelCase ( self ) -> Union[str, Any]: _SCREAMING_SNAKE_CASE = self.get_tokenizers(do_lower_case=A__ ) for tokenizer in tokenizers: with self.subTest(F"{tokenizer.__class__.__name__}" ): _SCREAMING_SNAKE_CASE = chr(0Xe_0_0_5 ) _SCREAMING_SNAKE_CASE = chr(0Xe_0_0_6 ) # `add_tokens` method stores special tokens only in `tokenizer.unique_no_split_tokens`. (in tokenization_utils.py) tokenizer.add_tokens([SPECIAL_TOKEN_1] , special_tokens=A__ ) # `add_special_tokens` method stores special tokens in `tokenizer.additional_special_tokens`, # which also occur in `tokenizer.all_special_tokens`. (in tokenization_utils_base.py) tokenizer.add_special_tokens({"""additional_special_tokens""": [SPECIAL_TOKEN_2]} ) _SCREAMING_SNAKE_CASE = tokenizer.tokenize(A__ ) _SCREAMING_SNAKE_CASE = tokenizer.tokenize(A__ ) self.assertEqual(len(A__ ) , 1 ) self.assertEqual(len(A__ ) , 1 ) self.assertEqual(token_a[0] , A__ ) self.assertEqual(token_a[0] , A__ ) @require_tokenizers def UpperCamelCase ( self ) -> int: _SCREAMING_SNAKE_CASE = self.get_tokenizers(do_lower_case=A__ ) for tokenizer in tokenizers: with self.subTest(F"{tokenizer.__class__.__name__}" ): # a special token for Canine can be defined as follows: _SCREAMING_SNAKE_CASE = 0Xe_0_0_6 _SCREAMING_SNAKE_CASE = chr(A__ ) _SCREAMING_SNAKE_CASE = AddedToken(A__ , lstrip=A__ ) tokenizer.add_special_tokens({"""additional_special_tokens""": [new_token]} ) with tempfile.TemporaryDirectory() as tmp_dir_name: tokenizer.save_pretrained(A__ ) tokenizer.from_pretrained(A__ ) def UpperCamelCase ( self ) -> List[Any]: _SCREAMING_SNAKE_CASE = [] if self.test_slow_tokenizer: tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()) ) if self.test_rust_tokenizer: tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()) ) for tokenizer_class, tokenizer_utils in tokenizer_list: with tempfile.TemporaryDirectory() as tmp_dir: tokenizer_utils.save_pretrained(A__ ) with open(os.path.join(A__ , """special_tokens_map.json""" ) , encoding="""utf-8""" ) as json_file: _SCREAMING_SNAKE_CASE = json.load(A__ ) with open(os.path.join(A__ , """tokenizer_config.json""" ) , encoding="""utf-8""" ) as json_file: _SCREAMING_SNAKE_CASE = json.load(A__ ) # a special token for Canine can be defined as follows: _SCREAMING_SNAKE_CASE = 0Xe_0_0_6 _SCREAMING_SNAKE_CASE = chr(A__ ) _SCREAMING_SNAKE_CASE = [new_token_a] _SCREAMING_SNAKE_CASE = [new_token_a] with open(os.path.join(A__ , """special_tokens_map.json""" ) , """w""" , encoding="""utf-8""" ) as outfile: json.dump(A__ , A__ ) with open(os.path.join(A__ , """tokenizer_config.json""" ) , """w""" , encoding="""utf-8""" ) as outfile: json.dump(A__ , A__ ) # the following checks allow us to verify that our test works as expected, i.e. that the tokenizer takes # into account the new value of additional_special_tokens given in the "tokenizer_config.json" and # "special_tokens_map.json" files _SCREAMING_SNAKE_CASE = tokenizer_class.from_pretrained(A__ , extra_ids=0 ) self.assertIn(A__ , tokenizer_without_change_in_init.additional_special_tokens ) # self.assertIn("an_additional_special_token",tokenizer_without_change_in_init.get_vocab()) # ByT5Tokenization no vocab self.assertEqual( [new_token_a] , tokenizer_without_change_in_init.convert_ids_to_tokens( tokenizer_without_change_in_init.convert_tokens_to_ids([new_token_a] ) ) , ) _SCREAMING_SNAKE_CASE = 0Xe_0_0_7 _SCREAMING_SNAKE_CASE = chr(A__ ) # Now we test that we can change the value of additional_special_tokens in the from_pretrained _SCREAMING_SNAKE_CASE = [AddedToken(A__ , lstrip=A__ )] _SCREAMING_SNAKE_CASE = tokenizer_class.from_pretrained( A__ , additional_special_tokens=A__ , extra_ids=0 ) self.assertIn(A__ , tokenizer.additional_special_tokens ) # self.assertIn(new_token_2,tokenizer.get_vocab()) # ByT5Tokenization no vocab self.assertEqual( [new_token_a] , tokenizer.convert_ids_to_tokens(tokenizer.convert_tokens_to_ids([new_token_a] ) ) ) @require_tokenizers def UpperCamelCase ( self ) -> List[str]: _SCREAMING_SNAKE_CASE = self.get_tokenizers(do_lower_case=A__ ) for tokenizer in tokenizers: with self.subTest(F"{tokenizer.__class__.__name__}" ): _SCREAMING_SNAKE_CASE = """hello world""" if self.space_between_special_tokens: _SCREAMING_SNAKE_CASE = """[CLS] hello world [SEP]""" else: _SCREAMING_SNAKE_CASE = input _SCREAMING_SNAKE_CASE = tokenizer.encode(A__ , add_special_tokens=A__ ) _SCREAMING_SNAKE_CASE = tokenizer.decode(A__ , spaces_between_special_tokens=self.space_between_special_tokens ) self.assertIn(A__ , [output, output.lower()] ) def UpperCamelCase ( self ) -> Union[str, Any]: _SCREAMING_SNAKE_CASE = self.get_tokenizers() for tokenizer in tokenizers: with self.subTest(F"{tokenizer.__class__.__name__}" ): _SCREAMING_SNAKE_CASE = [ """bos_token""", """eos_token""", """unk_token""", """sep_token""", """pad_token""", """cls_token""", """mask_token""", ] _SCREAMING_SNAKE_CASE = """a""" _SCREAMING_SNAKE_CASE = ord(A__ ) for attr in attributes_list: setattr(A__ , attr + """_id""" , A__ ) self.assertEqual(getattr(A__ , A__ ) , A__ ) self.assertEqual(getattr(A__ , attr + """_id""" ) , A__ ) setattr(A__ , attr + """_id""" , A__ ) self.assertEqual(getattr(A__ , A__ ) , A__ ) self.assertEqual(getattr(A__ , attr + """_id""" ) , A__ ) setattr(A__ , """additional_special_tokens_ids""" , [] ) self.assertListEqual(getattr(A__ , """additional_special_tokens""" ) , [] ) self.assertListEqual(getattr(A__ , """additional_special_tokens_ids""" ) , [] ) _SCREAMING_SNAKE_CASE = 0Xe_0_0_6 _SCREAMING_SNAKE_CASE = chr(A__ ) setattr(A__ , """additional_special_tokens_ids""" , [additional_special_token_id] ) self.assertListEqual(getattr(A__ , """additional_special_tokens""" ) , [additional_special_token] ) self.assertListEqual(getattr(A__ , """additional_special_tokens_ids""" ) , [additional_special_token_id] ) def UpperCamelCase ( self ) -> Union[str, Any]: pass def UpperCamelCase ( self ) -> List[str]: pass def UpperCamelCase ( self ) -> Optional[Any]: pass def UpperCamelCase ( self ) -> List[Any]: pass def UpperCamelCase ( self ) -> Optional[int]: pass def UpperCamelCase ( self ) -> List[Any]: pass def UpperCamelCase ( self ) -> int: pass def UpperCamelCase ( self ) -> Optional[int]: pass
703
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available UpperCamelCase__ : str = { "configuration_canine": ["CANINE_PRETRAINED_CONFIG_ARCHIVE_MAP", "CanineConfig"], "tokenization_canine": ["CanineTokenizer"], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase__ : List[Any] = [ "CANINE_PRETRAINED_MODEL_ARCHIVE_LIST", "CanineForMultipleChoice", "CanineForQuestionAnswering", "CanineForSequenceClassification", "CanineForTokenClassification", "CanineLayer", "CanineModel", "CaninePreTrainedModel", "load_tf_weights_in_canine", ] if TYPE_CHECKING: from .configuration_canine import CANINE_PRETRAINED_CONFIG_ARCHIVE_MAP, CanineConfig from .tokenization_canine import CanineTokenizer try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_canine import ( CANINE_PRETRAINED_MODEL_ARCHIVE_LIST, CanineForMultipleChoice, CanineForQuestionAnswering, CanineForSequenceClassification, CanineForTokenClassification, CanineLayer, CanineModel, CaninePreTrainedModel, load_tf_weights_in_canine, ) else: import sys UpperCamelCase__ : str = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
0
0