code
stringlengths
82
54.1k
code_codestyle
int64
0
699
style_context
stringlengths
111
35.6k
style_context_codestyle
int64
0
699
label
int64
0
1
from __future__ import annotations import inspect import unittest import numpy as np from transformers import ResNetConfig from transformers.testing_utils import require_tf, require_vision, slow from transformers.utils import cached_property, is_tf_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import TFResNetForImageClassification, TFResNetModel from transformers.models.resnet.modeling_tf_resnet import TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import AutoImageProcessor class lowerCamelCase_ : def __init__( self , __lowerCAmelCase , __lowerCAmelCase=3 , __lowerCAmelCase=3_2 , __lowerCAmelCase=3 , __lowerCAmelCase=1_0 , __lowerCAmelCase=[1_0, 2_0, 3_0, 4_0] , __lowerCAmelCase=[1, 1, 2, 1] , __lowerCAmelCase=True , __lowerCAmelCase=True , __lowerCAmelCase="relu" , __lowerCAmelCase=3 , __lowerCAmelCase=None , ): """simple docstring""" __magic_name__ :int = parent __magic_name__ :Any = batch_size __magic_name__ :List[str] = image_size __magic_name__ :str = num_channels __magic_name__ :List[str] = embeddings_size __magic_name__ :Union[str, Any] = hidden_sizes __magic_name__ :List[str] = depths __magic_name__ :int = is_training __magic_name__ :Dict = use_labels __magic_name__ :str = hidden_act __magic_name__ :List[str] = num_labels __magic_name__ :int = scope __magic_name__ :List[str] = len(__lowerCAmelCase ) def A ( self ): """simple docstring""" __magic_name__ :Any = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) __magic_name__ :Optional[int] = None if self.use_labels: __magic_name__ :Union[str, Any] = ids_tensor([self.batch_size] , self.num_labels ) __magic_name__ :List[Any] = self.get_config() return config, pixel_values, labels def A ( self ): """simple docstring""" return ResNetConfig( num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , image_size=self.image_size , ) def A ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ): """simple docstring""" __magic_name__ :List[Any] = TFResNetModel(config=__lowerCAmelCase ) __magic_name__ :List[Any] = model(__lowerCAmelCase ) # expected last hidden states: B, C, H // 32, W // 32 self.parent.assertEqual( result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 3_2, self.image_size // 3_2) , ) def A ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ): """simple docstring""" __magic_name__ :List[str] = self.num_labels __magic_name__ :Optional[int] = TFResNetForImageClassification(__lowerCAmelCase ) __magic_name__ :Optional[Any] = model(__lowerCAmelCase , labels=__lowerCAmelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def A ( self ): """simple docstring""" __magic_name__ :int = self.prepare_config_and_inputs() __magic_name__ , __magic_name__ , __magic_name__ :Optional[int] = config_and_inputs __magic_name__ :List[Any] = {'''pixel_values''': pixel_values} return config, inputs_dict @require_tf class lowerCamelCase_ ( lowerCamelCase , lowerCamelCase , unittest.TestCase ): a__ = (TFResNetModel, TFResNetForImageClassification) if is_tf_available() else () a__ = ( {'''feature-extraction''': TFResNetModel, '''image-classification''': TFResNetForImageClassification} if is_tf_available() else {} ) a__ = False a__ = False a__ = False a__ = False a__ = False def A ( self ): """simple docstring""" __magic_name__ :str = TFResNetModelTester(self ) __magic_name__ :List[str] = ConfigTester(self , config_class=__lowerCAmelCase , has_text_modality=__lowerCAmelCase ) def A ( self ): """simple docstring""" self.create_and_test_config_common_properties() self.config_tester.create_and_test_config_to_json_string() self.config_tester.create_and_test_config_to_json_file() self.config_tester.create_and_test_config_from_and_save_pretrained() self.config_tester.create_and_test_config_with_num_labels() self.config_tester.check_config_can_be_init_without_params() self.config_tester.check_config_arguments_init() def A ( self ): """simple docstring""" return @unittest.skip(reason='''ResNet does not use inputs_embeds''' ) def A ( self ): """simple docstring""" pass @unittest.skip(reason='''ResNet does not support input and output embeddings''' ) def A ( self ): """simple docstring""" pass def A ( self ): """simple docstring""" __magic_name__ , __magic_name__ :List[Any] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: __magic_name__ :List[str] = model_class(__lowerCAmelCase ) __magic_name__ :List[str] = inspect.signature(model.call ) # signature.parameters is an OrderedDict => so arg_names order is deterministic __magic_name__ :Dict = [*signature.parameters.keys()] __magic_name__ :Optional[Any] = ['''pixel_values'''] self.assertListEqual(arg_names[:1] , __lowerCAmelCase ) def A ( self ): """simple docstring""" __magic_name__ :Optional[int] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*__lowerCAmelCase ) def A ( self ): """simple docstring""" def check_hidden_states_output(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ): __magic_name__ :List[Any] = model_class(__lowerCAmelCase ) __magic_name__ :int = model(**self._prepare_for_class(__lowerCAmelCase , __lowerCAmelCase ) ) __magic_name__ :List[Any] = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states __magic_name__ :Tuple = self.model_tester.num_stages self.assertEqual(len(__lowerCAmelCase ) , expected_num_stages + 1 ) # ResNet's feature maps are of shape (batch_size, num_channels, height, width) self.assertListEqual( list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , ) __magic_name__ , __magic_name__ :List[str] = self.model_tester.prepare_config_and_inputs_for_common() __magic_name__ :List[str] = ['''basic''', '''bottleneck'''] for model_class in self.all_model_classes: for layer_type in layers_type: __magic_name__ :Optional[int] = layer_type __magic_name__ :List[Any] = True check_hidden_states_output(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] __magic_name__ :str = True check_hidden_states_output(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) def A ( self ): """simple docstring""" __magic_name__ :Dict = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*__lowerCAmelCase ) @slow def A ( self ): """simple docstring""" for model_name in TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: __magic_name__ :Tuple = TFResNetModel.from_pretrained(__lowerCAmelCase ) self.assertIsNotNone(__lowerCAmelCase ) def __lowercase ( ): """simple docstring""" __magic_name__ :int = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' ) return image @require_tf @require_vision class lowerCamelCase_ ( unittest.TestCase ): @cached_property def A ( self ): """simple docstring""" return ( AutoImageProcessor.from_pretrained(TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] ) if is_vision_available() else None ) @slow def A ( self ): """simple docstring""" __magic_name__ :List[Any] = TFResNetForImageClassification.from_pretrained(TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] ) __magic_name__ :Any = self.default_image_processor __magic_name__ :List[str] = prepare_img() __magic_name__ :str = image_processor(images=__lowerCAmelCase , return_tensors='''tf''' ) # forward pass __magic_name__ :List[Any] = model(**__lowerCAmelCase ) # verify the logits __magic_name__ :Tuple = tf.TensorShape((1, 1_0_0_0) ) self.assertEqual(outputs.logits.shape , __lowerCAmelCase ) __magic_name__ :Tuple = tf.constant([-11.1069, -9.7877, -8.3777] ) self.assertTrue(np.allclose(outputs.logits[0, :3].numpy() , __lowerCAmelCase , atol=1E-4 ) )
0
import argparse import torch from torch import nn from transformers import MaMaaaConfig, MaMaaaForConditionalGeneration def __lowercase ( snake_case ): """simple docstring""" __magic_name__ :Optional[Any] = [ '''encoder.version''', '''decoder.version''', '''model.encoder.version''', '''model.decoder.version''', '''decoder.output_projection.weight''', '''_float_tensor''', '''encoder.embed_positions._float_tensor''', '''decoder.embed_positions._float_tensor''', ] for k in ignore_keys: state_dict.pop(snake_case, snake_case ) def __lowercase ( snake_case ): """simple docstring""" __magic_name__ , __magic_name__ :Tuple = emb.weight.shape __magic_name__ :int = nn.Linear(snake_case, snake_case, bias=snake_case ) __magic_name__ :str = emb.weight.data return lin_layer def __lowercase ( snake_case ): """simple docstring""" __magic_name__ :int = torch.load(snake_case, map_location='''cpu''' ) __magic_name__ :Optional[Any] = mam_aaa['''args'''] or mam_aaa['''cfg''']['''model'''] __magic_name__ :List[Any] = mam_aaa['''model'''] remove_ignore_keys_(snake_case ) __magic_name__ :Tuple = state_dict['''encoder.embed_tokens.weight'''].shape[0] __magic_name__ :List[str] = MaMaaaConfig( vocab_size=snake_case, max_position_embeddings=1_0_2_4, encoder_layers=args.encoder_layers, decoder_layers=args.decoder_layers, encoder_attention_heads=args.encoder_attention_heads, decoder_attention_heads=args.decoder_attention_heads, encoder_ffn_dim=args.encoder_ffn_embed_dim, decoder_ffn_dim=args.decoder_ffn_embed_dim, d_model=args.encoder_embed_dim, encoder_layerdrop=args.encoder_layerdrop, decoder_layerdrop=args.decoder_layerdrop, dropout=args.dropout, attention_dropout=args.attention_dropout, activation_dropout=args.activation_dropout, activation_function='''relu''', ) __magic_name__ :int = state_dict['''decoder.embed_tokens.weight'''] __magic_name__ :List[str] = MaMaaaForConditionalGeneration(snake_case ) model.model.load_state_dict(snake_case, strict=snake_case ) __magic_name__ :List[str] = make_linear_from_emb(model.model.shared ) return model if __name__ == "__main__": SCREAMING_SNAKE_CASE__ : Optional[Any] = argparse.ArgumentParser() # Required parameters parser.add_argument("""fairseq_path""", type=str, help="""path to a model.pt on local filesystem.""") parser.add_argument("""pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""") SCREAMING_SNAKE_CASE__ : int = parser.parse_args() SCREAMING_SNAKE_CASE__ : Any = convert_fairseq_mamaaa_checkpoint_from_disk(args.fairseq_pathß) model.save_pretrained(args.pytorch_dump_folder_path)
0
1
import copy import inspect import unittest import numpy as np from huggingface_hub import hf_hub_download from transformers import VideoMAEConfig from transformers.models.auto import get_values from transformers.testing_utils import require_torch, require_vision, slow, torch_device from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from torch import nn from transformers import ( MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING, VideoMAEForPreTraining, VideoMAEForVideoClassification, VideoMAEModel, ) from transformers.models.videomae.modeling_videomae import VIDEOMAE_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from transformers import VideoMAEImageProcessor class lowerCamelCase_ : def __init__( self , __lowerCAmelCase , __lowerCAmelCase=1_3 , __lowerCAmelCase=1_0 , __lowerCAmelCase=3 , __lowerCAmelCase=2 , __lowerCAmelCase=2 , __lowerCAmelCase=2 , __lowerCAmelCase=True , __lowerCAmelCase=True , __lowerCAmelCase=3_2 , __lowerCAmelCase=5 , __lowerCAmelCase=4 , __lowerCAmelCase=3_7 , __lowerCAmelCase="gelu" , __lowerCAmelCase=0.1 , __lowerCAmelCase=0.1 , __lowerCAmelCase=1_0 , __lowerCAmelCase=0.02 , __lowerCAmelCase=0.9 , __lowerCAmelCase=None , ): """simple docstring""" __magic_name__ :Tuple = parent __magic_name__ :Dict = batch_size __magic_name__ :Any = image_size __magic_name__ :List[str] = num_channels __magic_name__ :List[str] = patch_size __magic_name__ :Tuple = tubelet_size __magic_name__ :Dict = num_frames __magic_name__ :Dict = is_training __magic_name__ :Tuple = use_labels __magic_name__ :str = hidden_size __magic_name__ :Any = num_hidden_layers __magic_name__ :int = num_attention_heads __magic_name__ :str = intermediate_size __magic_name__ :int = hidden_act __magic_name__ :Dict = hidden_dropout_prob __magic_name__ :Dict = attention_probs_dropout_prob __magic_name__ :Optional[int] = type_sequence_label_size __magic_name__ :int = initializer_range __magic_name__ :Optional[Any] = mask_ratio __magic_name__ :Optional[int] = scope # in VideoMAE, the number of tokens equals num_frames/tubelet_size * num_patches per frame __magic_name__ :str = (image_size // patch_size) ** 2 __magic_name__ :Optional[Any] = (num_frames // tubelet_size) * self.num_patches_per_frame # use this variable to define bool_masked_pos __magic_name__ :Optional[Any] = int(mask_ratio * self.seq_length ) def A ( self ): """simple docstring""" __magic_name__ :Dict = floats_tensor( [self.batch_size, self.num_frames, self.num_channels, self.image_size, self.image_size] ) __magic_name__ :Tuple = None if self.use_labels: __magic_name__ :Optional[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size ) __magic_name__ :Dict = self.get_config() return config, pixel_values, labels def A ( self ): """simple docstring""" return VideoMAEConfig( image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , num_frames=self.num_frames , tubelet_size=self.tubelet_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=__lowerCAmelCase , initializer_range=self.initializer_range , ) def A ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ): """simple docstring""" __magic_name__ :Union[str, Any] = VideoMAEModel(config=__lowerCAmelCase ) model.to(__lowerCAmelCase ) model.eval() __magic_name__ :Tuple = model(__lowerCAmelCase ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def A ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ): """simple docstring""" __magic_name__ :Any = VideoMAEForPreTraining(__lowerCAmelCase ) model.to(__lowerCAmelCase ) model.eval() # important: each video needs to have the same number of masked patches # hence we define a single mask, which we then repeat for each example in the batch __magic_name__ :Any = torch.ones((self.num_masks,) ) __magic_name__ :int = torch.cat([mask, torch.zeros(self.seq_length - mask.size(0 ) )] ) __magic_name__ :List[str] = mask.expand(self.batch_size , -1 ).bool() __magic_name__ :Tuple = model(__lowerCAmelCase , __lowerCAmelCase ) # model only returns predictions for masked patches __magic_name__ :Tuple = mask.sum().item() __magic_name__ :List[str] = 3 * self.tubelet_size * self.patch_size**2 self.parent.assertEqual(result.logits.shape , (self.batch_size, num_masked_patches, decoder_num_labels) ) def A ( self ): """simple docstring""" __magic_name__ :Union[str, Any] = self.prepare_config_and_inputs() __magic_name__ , __magic_name__ , __magic_name__ :List[Any] = config_and_inputs __magic_name__ :Optional[int] = {'''pixel_values''': pixel_values} return config, inputs_dict @require_torch class lowerCamelCase_ ( lowerCamelCase , lowerCamelCase , unittest.TestCase ): a__ = ( (VideoMAEModel, VideoMAEForPreTraining, VideoMAEForVideoClassification) if is_torch_available() else () ) a__ = ( {'''feature-extraction''': VideoMAEModel, '''video-classification''': VideoMAEForVideoClassification} if is_torch_available() else {} ) a__ = False a__ = False a__ = False a__ = False def A ( self ): """simple docstring""" __magic_name__ :str = VideoMAEModelTester(self ) __magic_name__ :List[Any] = ConfigTester(self , config_class=__lowerCAmelCase , has_text_modality=__lowerCAmelCase , hidden_size=3_7 ) def A ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase=False ): """simple docstring""" __magic_name__ :Optional[Any] = copy.deepcopy(__lowerCAmelCase ) if model_class == VideoMAEForPreTraining: # important: each video needs to have the same number of masked patches # hence we define a single mask, which we then repeat for each example in the batch __magic_name__ :Optional[int] = torch.ones((self.model_tester.num_masks,) ) __magic_name__ :str = torch.cat([mask, torch.zeros(self.model_tester.seq_length - mask.size(0 ) )] ) __magic_name__ :List[str] = mask.expand(self.model_tester.batch_size , -1 ).bool() __magic_name__ :List[str] = bool_masked_pos.to(__lowerCAmelCase ) if return_labels: if model_class in [ *get_values(__lowerCAmelCase ), ]: __magic_name__ :Tuple = torch.zeros( self.model_tester.batch_size , dtype=torch.long , device=__lowerCAmelCase ) return inputs_dict def A ( self ): """simple docstring""" self.config_tester.run_common_tests() @unittest.skip(reason='''VideoMAE does not use inputs_embeds''' ) def A ( self ): """simple docstring""" pass def A ( self ): """simple docstring""" __magic_name__ , __magic_name__ :str = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: __magic_name__ :Any = model_class(__lowerCAmelCase ) self.assertIsInstance(model.get_input_embeddings() , (nn.Module) ) __magic_name__ :Any = model.get_output_embeddings() self.assertTrue(x is None or isinstance(__lowerCAmelCase , nn.Linear ) ) def A ( self ): """simple docstring""" __magic_name__ , __magic_name__ :Any = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: __magic_name__ :Union[str, Any] = model_class(__lowerCAmelCase ) __magic_name__ :Any = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic __magic_name__ :List[str] = [*signature.parameters.keys()] __magic_name__ :Any = ['''pixel_values'''] self.assertListEqual(arg_names[:1] , __lowerCAmelCase ) def A ( self ): """simple docstring""" __magic_name__ :Union[str, Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*__lowerCAmelCase ) def A ( self ): """simple docstring""" __magic_name__ :Any = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_pretraining(*__lowerCAmelCase ) @slow def A ( self ): """simple docstring""" for model_name in VIDEOMAE_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: __magic_name__ :List[Any] = VideoMAEModel.from_pretrained(__lowerCAmelCase ) self.assertIsNotNone(__lowerCAmelCase ) def A ( self ): """simple docstring""" if not self.has_attentions: pass else: __magic_name__ , __magic_name__ :List[str] = self.model_tester.prepare_config_and_inputs_for_common() __magic_name__ :Tuple = True for model_class in self.all_model_classes: __magic_name__ :Tuple = self.model_tester.seq_length - self.model_tester.num_masks __magic_name__ :int = ( num_visible_patches if model_class == VideoMAEForPreTraining else self.model_tester.seq_length ) __magic_name__ :Optional[int] = True __magic_name__ :Any = False __magic_name__ :Union[str, Any] = True __magic_name__ :Tuple = model_class(__lowerCAmelCase ) model.to(__lowerCAmelCase ) model.eval() with torch.no_grad(): __magic_name__ :str = model(**self._prepare_for_class(__lowerCAmelCase , __lowerCAmelCase ) ) __magic_name__ :List[Any] = outputs.attentions self.assertEqual(len(__lowerCAmelCase ) , self.model_tester.num_hidden_layers ) # check that output_attentions also work using config del inputs_dict["output_attentions"] __magic_name__ :Tuple = True __magic_name__ :Any = model_class(__lowerCAmelCase ) model.to(__lowerCAmelCase ) model.eval() with torch.no_grad(): __magic_name__ :Any = model(**self._prepare_for_class(__lowerCAmelCase , __lowerCAmelCase ) ) __magic_name__ :Optional[Any] = outputs.attentions self.assertEqual(len(__lowerCAmelCase ) , self.model_tester.num_hidden_layers ) self.assertListEqual( list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len, seq_len] , ) __magic_name__ :Any = len(__lowerCAmelCase ) # Check attention is always last and order is fine __magic_name__ :List[str] = True __magic_name__ :Dict = True __magic_name__ :Dict = model_class(__lowerCAmelCase ) model.to(__lowerCAmelCase ) model.eval() with torch.no_grad(): __magic_name__ :str = model(**self._prepare_for_class(__lowerCAmelCase , __lowerCAmelCase ) ) self.assertEqual(out_len + 1 , len(__lowerCAmelCase ) ) __magic_name__ :Optional[int] = outputs.attentions self.assertEqual(len(__lowerCAmelCase ) , self.model_tester.num_hidden_layers ) self.assertListEqual( list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len, seq_len] , ) def A ( self ): """simple docstring""" def check_hidden_states_output(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ): __magic_name__ :Tuple = model_class(__lowerCAmelCase ) model.to(__lowerCAmelCase ) model.eval() with torch.no_grad(): __magic_name__ :Optional[Any] = model(**self._prepare_for_class(__lowerCAmelCase , __lowerCAmelCase ) ) __magic_name__ :Optional[int] = outputs.hidden_states __magic_name__ :Optional[int] = self.model_tester.num_hidden_layers + 1 self.assertEqual(len(__lowerCAmelCase ) , __lowerCAmelCase ) __magic_name__ :Tuple = self.model_tester.seq_length - self.model_tester.num_masks __magic_name__ :int = num_visible_patches if model_class == VideoMAEForPreTraining else self.model_tester.seq_length self.assertListEqual( list(hidden_states[0].shape[-2:] ) , [seq_length, self.model_tester.hidden_size] , ) __magic_name__ , __magic_name__ :Any = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: __magic_name__ :List[str] = True check_hidden_states_output(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] __magic_name__ :Any = True check_hidden_states_output(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) @unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''' ) def A ( self ): """simple docstring""" pass def __lowercase ( ): """simple docstring""" __magic_name__ :Union[str, Any] = hf_hub_download( repo_id='''hf-internal-testing/spaghetti-video''', filename='''eating_spaghetti.npy''', repo_type='''dataset''' ) __magic_name__ :Tuple = np.load(snake_case ) return list(snake_case ) @require_torch @require_vision class lowerCamelCase_ ( unittest.TestCase ): @cached_property def A ( self ): """simple docstring""" # logits were tested with a different mean and std, so we use the same here return ( VideoMAEImageProcessor(image_mean=[0.5, 0.5, 0.5] , image_std=[0.5, 0.5, 0.5] ) if is_vision_available() else None ) @slow def A ( self ): """simple docstring""" __magic_name__ :Tuple = VideoMAEForVideoClassification.from_pretrained('''MCG-NJU/videomae-base-finetuned-kinetics''' ).to( __lowerCAmelCase ) __magic_name__ :List[str] = self.default_image_processor __magic_name__ :Optional[int] = prepare_video() __magic_name__ :int = image_processor(__lowerCAmelCase , return_tensors='''pt''' ).to(__lowerCAmelCase ) # forward pass with torch.no_grad(): __magic_name__ :List[Any] = model(**__lowerCAmelCase ) # verify the logits __magic_name__ :Dict = torch.Size((1, 4_0_0) ) self.assertEqual(outputs.logits.shape , __lowerCAmelCase ) __magic_name__ :Union[str, Any] = torch.tensor([0.3669, -0.0688, -0.2421] ).to(__lowerCAmelCase ) self.assertTrue(torch.allclose(outputs.logits[0, :3] , __lowerCAmelCase , atol=1E-4 ) ) @slow def A ( self ): """simple docstring""" __magic_name__ :int = VideoMAEForPreTraining.from_pretrained('''MCG-NJU/videomae-base-short''' ).to(__lowerCAmelCase ) __magic_name__ :Tuple = self.default_image_processor __magic_name__ :List[str] = prepare_video() __magic_name__ :Any = image_processor(__lowerCAmelCase , return_tensors='''pt''' ).to(__lowerCAmelCase ) # add boolean mask, indicating which patches to mask __magic_name__ :Any = hf_hub_download(repo_id='''hf-internal-testing/bool-masked-pos''' , filename='''bool_masked_pos.pt''' ) __magic_name__ :Tuple = torch.load(__lowerCAmelCase ) # forward pass with torch.no_grad(): __magic_name__ :Tuple = model(**__lowerCAmelCase ) # verify the logits __magic_name__ :List[str] = torch.Size([1, 1_4_0_8, 1_5_3_6] ) __magic_name__ :str = torch.tensor( [[0.7994, 0.9612, 0.8508], [0.7401, 0.8958, 0.8302], [0.5862, 0.7468, 0.7325]] , device=__lowerCAmelCase ) self.assertEqual(outputs.logits.shape , __lowerCAmelCase ) self.assertTrue(torch.allclose(outputs.logits[0, :3, :3] , __lowerCAmelCase , atol=1E-4 ) ) # verify the loss (`config.norm_pix_loss` = `True`) __magic_name__ :Optional[Any] = torch.tensor([0.5142] , device=__lowerCAmelCase ) self.assertTrue(torch.allclose(outputs.loss , __lowerCAmelCase , atol=1E-4 ) ) # verify the loss (`config.norm_pix_loss` = `False`) __magic_name__ :Dict = VideoMAEForPreTraining.from_pretrained('''MCG-NJU/videomae-base-short''' , norm_pix_loss=__lowerCAmelCase ).to( __lowerCAmelCase ) with torch.no_grad(): __magic_name__ :Optional[Any] = model(**__lowerCAmelCase ) __magic_name__ :List[str] = torch.tensor(torch.tensor([0.6469] ) , device=__lowerCAmelCase ) self.assertTrue(torch.allclose(outputs.loss , __lowerCAmelCase , atol=1E-4 ) )
0
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available SCREAMING_SNAKE_CASE__ : Dict = { """configuration_canine""": ["""CANINE_PRETRAINED_CONFIG_ARCHIVE_MAP""", """CanineConfig"""], """tokenization_canine""": ["""CanineTokenizer"""], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: SCREAMING_SNAKE_CASE__ : str = [ """CANINE_PRETRAINED_MODEL_ARCHIVE_LIST""", """CanineForMultipleChoice""", """CanineForQuestionAnswering""", """CanineForSequenceClassification""", """CanineForTokenClassification""", """CanineLayer""", """CanineModel""", """CaninePreTrainedModel""", """load_tf_weights_in_canine""", ] if TYPE_CHECKING: from .configuration_canine import CANINE_PRETRAINED_CONFIG_ARCHIVE_MAP, CanineConfig from .tokenization_canine import CanineTokenizer try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_canine import ( CANINE_PRETRAINED_MODEL_ARCHIVE_LIST, CanineForMultipleChoice, CanineForQuestionAnswering, CanineForSequenceClassification, CanineForTokenClassification, CanineLayer, CanineModel, CaninePreTrainedModel, load_tf_weights_in_canine, ) else: import sys SCREAMING_SNAKE_CASE__ : Union[str, Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
0
1
import argparse import datetime def __lowercase ( snake_case ): """simple docstring""" __magic_name__ :Dict = { '''0''': '''Sunday''', '''1''': '''Monday''', '''2''': '''Tuesday''', '''3''': '''Wednesday''', '''4''': '''Thursday''', '''5''': '''Friday''', '''6''': '''Saturday''', } __magic_name__ :Dict = {0: 1, 1: 2, 2: 3, 3: 4, 4: 5, 5: 6, 6: 0} # Validate if not 0 < len(snake_case ) < 1_1: raise ValueError('''Must be 10 characters long''' ) # Get month __magic_name__ :int = int(date_input[0] + date_input[1] ) # Validate if not 0 < m < 1_3: raise ValueError('''Month must be between 1 - 12''' ) __magic_name__ :str = date_input[2] # Validate if sep_a not in ["-", "/"]: raise ValueError('''Date separator must be \'-\' or \'/\'''' ) # Get day __magic_name__ :int = int(date_input[3] + date_input[4] ) # Validate if not 0 < d < 3_2: raise ValueError('''Date must be between 1 - 31''' ) # Get second separator __magic_name__ :str = date_input[5] # Validate if sep_a not in ["-", "/"]: raise ValueError('''Date separator must be \'-\' or \'/\'''' ) # Get year __magic_name__ :int = int(date_input[6] + date_input[7] + date_input[8] + date_input[9] ) # Arbitrary year range if not 4_5 < y < 8_5_0_0: raise ValueError( '''Year out of range. There has to be some sort of limit...right?''' ) # Get datetime obj for validation __magic_name__ :List[str] = datetime.date(int(snake_case ), int(snake_case ), int(snake_case ) ) # Start math if m <= 2: __magic_name__ :int = y - 1 __magic_name__ :int = m + 1_2 # maths var __magic_name__ :int = int(str(snake_case )[:2] ) __magic_name__ :int = int(str(snake_case )[2:] ) __magic_name__ :int = int(2.6 * m - 5.39 ) __magic_name__ :int = int(c / 4 ) __magic_name__ :int = int(k / 4 ) __magic_name__ :int = int(d + k ) __magic_name__ :int = int(t + u + v + x ) __magic_name__ :int = int(z - (2 * c) ) __magic_name__ :int = round(w % 7 ) # End math # Validate math if f != convert_datetime_days[dt_ck.weekday()]: raise AssertionError('''The date was evaluated incorrectly. Contact developer.''' ) # Response __magic_name__ :str = f'''Your date {date_input}, is a {days[str(snake_case )]}!''' return response if __name__ == "__main__": import doctest doctest.testmod() SCREAMING_SNAKE_CASE__ : Optional[Any] = argparse.ArgumentParser( description=( """Find out what day of the week nearly any date is or was. Enter """ """date as a string in the mm-dd-yyyy or mm/dd/yyyy format""" ) ) parser.add_argument( """date_input""", type=str, help="""Date as a string (mm-dd-yyyy or mm/dd/yyyy)""" ) SCREAMING_SNAKE_CASE__ : Dict = parser.parse_args() zeller(args.date_input)
0
import warnings from ...processing_utils import ProcessorMixin from ...tokenization_utils_base import BatchEncoding class lowerCamelCase_ ( lowerCamelCase ): a__ = ['''image_processor''', '''tokenizer'''] a__ = '''ChineseCLIPImageProcessor''' a__ = ('''BertTokenizer''', '''BertTokenizerFast''') def __init__( self , __lowerCAmelCase=None , __lowerCAmelCase=None , **__lowerCAmelCase ): """simple docstring""" __magic_name__ :Tuple = None if "feature_extractor" in kwargs: warnings.warn( '''The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`''' ''' instead.''' , __lowerCAmelCase , ) __magic_name__ :Optional[Any] = kwargs.pop('''feature_extractor''' ) __magic_name__ :Tuple = image_processor if image_processor is not None else feature_extractor if image_processor is None: raise ValueError('''You need to specify an `image_processor`.''' ) if tokenizer is None: raise ValueError('''You need to specify a `tokenizer`.''' ) super().__init__(__lowerCAmelCase , __lowerCAmelCase ) __magic_name__ :List[Any] = self.image_processor def __call__( self , __lowerCAmelCase=None , __lowerCAmelCase=None , __lowerCAmelCase=None , **__lowerCAmelCase ): """simple docstring""" if text is None and images is None: raise ValueError('''You have to specify either text or images. Both cannot be none.''' ) if text is not None: __magic_name__ :int = self.tokenizer(__lowerCAmelCase , return_tensors=__lowerCAmelCase , **__lowerCAmelCase ) if images is not None: __magic_name__ :Dict = self.image_processor(__lowerCAmelCase , return_tensors=__lowerCAmelCase , **__lowerCAmelCase ) if text is not None and images is not None: __magic_name__ :Union[str, Any] = image_features.pixel_values return encoding elif text is not None: return encoding else: return BatchEncoding(data=dict(**__lowerCAmelCase ) , tensor_type=__lowerCAmelCase ) def A ( self , *__lowerCAmelCase , **__lowerCAmelCase ): """simple docstring""" return self.tokenizer.batch_decode(*__lowerCAmelCase , **__lowerCAmelCase ) def A ( self , *__lowerCAmelCase , **__lowerCAmelCase ): """simple docstring""" return self.tokenizer.decode(*__lowerCAmelCase , **__lowerCAmelCase ) @property def A ( self ): """simple docstring""" __magic_name__ :List[Any] = self.tokenizer.model_input_names __magic_name__ :Any = self.image_processor.model_input_names return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) ) @property def A ( self ): """simple docstring""" warnings.warn( '''`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.''' , __lowerCAmelCase , ) return self.image_processor_class
0
1
from typing import Dict from .base import GenericTensor, Pipeline class lowerCamelCase_ ( lowerCamelCase ): def A ( self , __lowerCAmelCase=None , __lowerCAmelCase=None , __lowerCAmelCase=None , **__lowerCAmelCase ): """simple docstring""" if tokenize_kwargs is None: __magic_name__ :List[str] = {} if truncation is not None: if "truncation" in tokenize_kwargs: raise ValueError( '''truncation parameter defined twice (given as keyword argument as well as in tokenize_kwargs)''' ) __magic_name__ :List[Any] = truncation __magic_name__ :Dict = tokenize_kwargs __magic_name__ :str = {} if return_tensors is not None: __magic_name__ :Any = return_tensors return preprocess_params, {}, postprocess_params def A ( self , __lowerCAmelCase , **__lowerCAmelCase ): """simple docstring""" __magic_name__ :str = self.framework __magic_name__ :Optional[Any] = self.tokenizer(__lowerCAmelCase , return_tensors=__lowerCAmelCase , **__lowerCAmelCase ) return model_inputs def A ( self , __lowerCAmelCase ): """simple docstring""" __magic_name__ :Optional[Any] = self.model(**__lowerCAmelCase ) return model_outputs def A ( self , __lowerCAmelCase , __lowerCAmelCase=False ): """simple docstring""" # [0] is the first available tensor, logits or last_hidden_state. if return_tensors: return model_outputs[0] if self.framework == "pt": return model_outputs[0].tolist() elif self.framework == "tf": return model_outputs[0].numpy().tolist() def __call__( self , *__lowerCAmelCase , **__lowerCAmelCase ): """simple docstring""" return super().__call__(*__lowerCAmelCase , **__lowerCAmelCase )
0
from sklearn.metrics import matthews_corrcoef import datasets SCREAMING_SNAKE_CASE__ : Optional[Any] = """ Compute the Matthews correlation coefficient (MCC) The Matthews correlation coefficient is used in machine learning as a measure of the quality of binary and multiclass classifications. It takes into account true and false positives and negatives and is generally regarded as a balanced measure which can be used even if the classes are of very different sizes. The MCC is in essence a correlation coefficient value between -1 and +1. A coefficient of +1 represents a perfect prediction, 0 an average random prediction and -1 an inverse prediction. The statistic is also known as the phi coefficient. [source: Wikipedia] """ SCREAMING_SNAKE_CASE__ : Union[str, Any] = """ Args: predictions (list of int): Predicted labels, as returned by a model. references (list of int): Ground truth labels. sample_weight (list of int, float, or bool): Sample weights. Defaults to `None`. Returns: matthews_correlation (dict containing float): Matthews correlation. Examples: Example 1, a basic example with only predictions and references as inputs: >>> matthews_metric = datasets.load_metric(\"matthews_correlation\") >>> results = matthews_metric.compute(references=[1, 3, 2, 0, 3, 2], ... predictions=[1, 2, 2, 0, 3, 3]) >>> print(round(results['matthews_correlation'], 2)) 0.54 Example 2, the same example as above, but also including sample weights: >>> matthews_metric = datasets.load_metric(\"matthews_correlation\") >>> results = matthews_metric.compute(references=[1, 3, 2, 0, 3, 2], ... predictions=[1, 2, 2, 0, 3, 3], ... sample_weight=[0.5, 3, 1, 1, 1, 2]) >>> print(round(results['matthews_correlation'], 2)) 0.1 Example 3, the same example as above, but with sample weights that cause a negative correlation: >>> matthews_metric = datasets.load_metric(\"matthews_correlation\") >>> results = matthews_metric.compute(references=[1, 3, 2, 0, 3, 2], ... predictions=[1, 2, 2, 0, 3, 3], ... sample_weight=[0.5, 1, 0, 0, 0, 1]) >>> print(round(results['matthews_correlation'], 2)) -0.25 """ SCREAMING_SNAKE_CASE__ : int = """\ @article{scikit-learn, title={Scikit-learn: Machine Learning in {P}ython}, author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V. and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P. and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.}, journal={Journal of Machine Learning Research}, volume={12}, pages={2825--2830}, year={2011} } """ @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class lowerCamelCase_ ( datasets.Metric ): def A ( self ): """simple docstring""" return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { '''predictions''': datasets.Value('''int32''' ), '''references''': datasets.Value('''int32''' ), } ) , reference_urls=[ '''https://scikit-learn.org/stable/modules/generated/sklearn.metrics.matthews_corrcoef.html''' ] , ) def A ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase=None ): """simple docstring""" return { "matthews_correlation": float(matthews_corrcoef(__lowerCAmelCase , __lowerCAmelCase , sample_weight=__lowerCAmelCase ) ), }
0
1
import math import os import re import sys import unittest from pathlib import Path from typing import Tuple from unittest.mock import patch from parameterized import parameterized from transformers.testing_utils import ( CaptureStderr, ExtendSysPath, TestCasePlus, execute_subprocess_async, get_gpu_count, get_torch_dist_unique_port, require_apex, require_bitsandbytes, require_fairscale, require_torch, require_torch_gpu, require_torch_multi_gpu, require_torch_non_multi_gpu, slow, ) from transformers.trainer_callback import TrainerState from transformers.trainer_utils import set_seed SCREAMING_SNAKE_CASE__ : List[Any] = os.path.abspath(os.path.dirname(__file__)) with ExtendSysPath(f"{bindir}/../../examples/pytorch/translation"): from run_translation import main # noqa set_seed(42) SCREAMING_SNAKE_CASE__ : Optional[Any] = """sshleifer/student_marian_en_ro_6_1""" SCREAMING_SNAKE_CASE__ : List[Any] = """sshleifer/tiny-mbart""" @require_torch class lowerCamelCase_ ( lowerCamelCase ): def A ( self , __lowerCAmelCase=False , __lowerCAmelCase=None , __lowerCAmelCase=True , __lowerCAmelCase=True , __lowerCAmelCase=True , __lowerCAmelCase=True , ): """simple docstring""" __magic_name__ :List[Any] = self.run_trainer( eval_steps=1 , max_len=1_2 , model_name=__lowerCAmelCase , num_train_epochs=1 , distributed=__lowerCAmelCase , extra_args_str=__lowerCAmelCase , predict_with_generate=__lowerCAmelCase , do_train=__lowerCAmelCase , do_eval=__lowerCAmelCase , do_predict=__lowerCAmelCase , ) __magic_name__ :Any = TrainerState.load_from_json(os.path.join(__lowerCAmelCase , '''trainer_state.json''' ) ).log_history if not do_eval: return __magic_name__ :Any = [log for log in logs if '''eval_loss''' in log.keys()] __magic_name__ :str = eval_metrics[0] if predict_with_generate: assert "eval_bleu" in first_step_stats __magic_name__ :Tuple = eval_metrics[-1] assert isinstance(last_step_stats['''eval_bleu'''] , __lowerCAmelCase ) assert not math.isnan(float(last_step_stats['''eval_loss'''] ) ), "eval_loss must not be `nan`" @require_torch_non_multi_gpu def A ( self ): """simple docstring""" self.run_seqaseq_quick() @require_torch_multi_gpu def A ( self ): """simple docstring""" self.run_seqaseq_quick(distributed=__lowerCAmelCase ) @require_torch_multi_gpu def A ( self ): """simple docstring""" self.run_seqaseq_quick(distributed=__lowerCAmelCase ) @unittest.skip('''Requires an update of the env running those tests''' ) @require_torch_multi_gpu @require_fairscale def A ( self ): """simple docstring""" self.run_seqaseq_quick(distributed=__lowerCAmelCase , extra_args_str='''--sharded_ddp simple''' ) @unittest.skip('''Requires an update of the env running those tests''' ) @require_torch_multi_gpu @require_fairscale def A ( self ): """simple docstring""" self.run_seqaseq_quick(distributed=__lowerCAmelCase , extra_args_str='''--sharded_ddp simple --fp16''' ) @unittest.skip('''Requires an update of the env running those tests''' ) @require_torch_multi_gpu @require_fairscale def A ( self ): """simple docstring""" self.run_seqaseq_quick(distributed=__lowerCAmelCase , extra_args_str='''--sharded_ddp zero_dp_2''' , predict_with_generate=__lowerCAmelCase ) @unittest.skip('''Requires an update of the env running those tests''' ) @require_torch_multi_gpu @require_fairscale def A ( self ): """simple docstring""" self.run_seqaseq_quick( distributed=__lowerCAmelCase , extra_args_str='''--sharded_ddp zero_dp_2 --fp16''' , predict_with_generate=__lowerCAmelCase ) @require_apex @require_torch_gpu def A ( self ): """simple docstring""" # XXX: apex breaks the trainer if it's run twice e.g. run_seq2seq.main() from the same # program and it breaks other tests that run from the same pytest worker, therefore until this is # sorted out it must be run only in an external program, that is distributed=True in this # test and only under one or more gpus - if we want cpu will need to make a special test # # specifically to the problem traced it to self.optimizer.step() - if it's run 2nd time via # 2nd main() call it botches the future eval. # self.run_seqaseq_quick(distributed=__lowerCAmelCase , extra_args_str='''--fp16 --fp16_backend=apex''' ) # test 2nd time - was getting eval_loss': nan' # to reproduce the problem set distributed=False self.run_seqaseq_quick(distributed=__lowerCAmelCase , extra_args_str='''--fp16 --fp16_backend=apex''' ) @parameterized.expand(['''base''', '''low''', '''high''', '''mixed'''] ) @require_torch_multi_gpu def A ( self , __lowerCAmelCase ): """simple docstring""" # as each sub-test is slow-ish split into multiple sub-tests to avoid CI timeout __magic_name__ :Any = { # test with the default log_level - should be info and thus log info once '''base''': {'''extra_args_str''': '''''', '''n_matches''': 1}, # test with low log_level and log_level_replica - should be noisy on all processes # now the info string should appear twice on 2 processes '''low''': {'''extra_args_str''': '''--log_level debug --log_level_replica debug''', '''n_matches''': 2}, # test with high log_level and low log_level_replica # now the info string should appear once only on the replica '''high''': {'''extra_args_str''': '''--log_level error --log_level_replica debug''', '''n_matches''': 1}, # test with high log_level and log_level_replica - should be quiet on all processes '''mixed''': {'''extra_args_str''': '''--log_level error --log_level_replica error''', '''n_matches''': 0}, } __magic_name__ :Optional[Any] = experiments[experiment_id] __magic_name__ :List[Any] = {'''distributed''': True, '''predict_with_generate''': False, '''do_eval''': False, '''do_predict''': False} __magic_name__ :Optional[int] = '''Running training''' with CaptureStderr() as cl: self.run_seqaseq_quick(**__lowerCAmelCase , extra_args_str=data['''extra_args_str'''] ) __magic_name__ :int = len(re.findall(__lowerCAmelCase , cl.err ) ) self.assertEqual(__lowerCAmelCase , data['''n_matches'''] ) @slow def A ( self ): """simple docstring""" __magic_name__ :List[str] = self.run_trainer( eval_steps=2 , max_len=1_2_8 , model_name=__lowerCAmelCase , learning_rate=3E-4 , num_train_epochs=1_0 , distributed=__lowerCAmelCase , ) # Check metrics __magic_name__ :Optional[int] = TrainerState.load_from_json(os.path.join(__lowerCAmelCase , '''trainer_state.json''' ) ).log_history __magic_name__ :List[str] = [log for log in logs if '''eval_loss''' in log.keys()] __magic_name__ :Any = eval_metrics[0] __magic_name__ :int = eval_metrics[-1] assert first_step_stats["eval_loss"] > last_step_stats["eval_loss"], "model learned nothing" assert isinstance(last_step_stats['''eval_bleu'''] , __lowerCAmelCase ) # test if do_predict saves generations and metrics __magic_name__ :List[Any] = os.listdir(__lowerCAmelCase ) __magic_name__ :List[str] = {os.path.basename(__lowerCAmelCase ) for p in contents} assert "generated_predictions.txt" in contents assert "predict_results.json" in contents @slow @require_bitsandbytes def A ( self ): """simple docstring""" from transformers.training_args import OptimizerNames def train_and_return_metrics(__lowerCAmelCase ) -> Tuple[int, float]: __magic_name__ :str = '''--skip_memory_metrics 0''' __magic_name__ :Dict = self.run_trainer( max_len=1_2_8 , model_name=__lowerCAmelCase , learning_rate=3E-4 , num_train_epochs=1 , optim=__lowerCAmelCase , distributed=__lowerCAmelCase , extra_args_str=__lowerCAmelCase , do_eval=__lowerCAmelCase , do_predict=__lowerCAmelCase , n_gpus_to_use=1 , ) # Check metrics __magic_name__ :Optional[Any] = TrainerState.load_from_json(Path(__lowerCAmelCase , '''trainer_state.json''' ) ).log_history __magic_name__ :int = int(logs[0]['''train_mem_gpu_peaked_delta'''] / 2**2_0 ) __magic_name__ :Optional[Any] = int(logs[0]['''train_mem_gpu_alloc_delta'''] / 2**2_0 ) __magic_name__ :Any = logs[0]['''train_loss'''] return gpu_peak_mem_mb, gpu_alloc_mem_mb, loss __magic_name__ , __magic_name__ , __magic_name__ :int = train_and_return_metrics(OptimizerNames.ADAMW_TORCH.value ) __magic_name__ , __magic_name__ , __magic_name__ :Tuple = train_and_return_metrics(OptimizerNames.ADAMW_BNB.value ) __magic_name__ :Tuple = gpu_alloc_mem_orig - gpu_alloc_mem_bnb __magic_name__ :Tuple = gpu_peak_mem_orig + gpu_alloc_mem_orig __magic_name__ :List[Any] = gpu_peak_mem_bnb + gpu_alloc_mem_bnb __magic_name__ :Optional[int] = gpu_total_mem_orig - gpu_total_mem_bnb # sshleifer/student_marian_en_ro_6_1 has 54M parameter, 29M of which is `nn.Embedding` which # doesn't get quantized and remains in fp32. Therefore we only have 25M parameters quantized # in 2 bytes and the diff in optim memory usage is derived as so: # # - normal 25*8=~200MB (8 bytes per param) # - bnb 25*2= ~50MB (2 bytes per param) # # Thus we should expect ~150MB total memory saved. # # Peak memory should be the same - the total should be different by about that same margin # # After leaving a small margin to accommodate for differences between gpus let's check # that we have at least 120MB in savings __magic_name__ :Optional[Any] = 1_2_0 # uncomment the following if this test starts failing - requires py38 for a new print feature # gpu_peak_mem_diff = gpu_peak_mem_orig - gpu_peak_mem_bnb # print(f"{gpu_alloc_mem_orig=}MB {gpu_peak_mem_orig=}MB {gpu_alloc_mem_orig+gpu_peak_mem_orig=}MB") # print(f" {gpu_alloc_mem_bnb=}MB {gpu_peak_mem_bnb=}MB {gpu_alloc_mem_bnb+gpu_peak_mem_bnb=}MB") # print(f"{gpu_alloc_mem_diff=}MB") # print(f"{gpu_peak_mem_diff=}MB") # print(f"{gpu_total_mem_orig=}MB, {gpu_total_mem_bnb=}MB") # print(f"{gpu_total_mem_diff=}MB, {gpu_total_mem_diff=}MB") self.assertGreater( __lowerCAmelCase , __lowerCAmelCase , '''should use ~150MB less alloc gpu memory with BNB, compared to without it for this model but got''' F''' a difference of {gpu_alloc_mem_diff}MB, with gpu_alloc_mem_orig={gpu_alloc_mem_orig}MB and''' F''' gpu_alloc_mem_bnb={gpu_alloc_mem_bnb}MB''' , ) self.assertGreater( __lowerCAmelCase , __lowerCAmelCase , '''should use ~150MB less total gpu memory with BNB, compared to without it for this model but got''' F''' a difference of {gpu_total_mem_diff}MB, with gpu_total_mem_orig={gpu_total_mem_orig}MB and''' F''' gpu_total_mem_bnb={gpu_total_mem_bnb}MB''' , ) self.assertEqual( __lowerCAmelCase , __lowerCAmelCase , F'''loss should be the same, but got loss_orig={loss_orig}, loss_bnb={loss_bnb}''' ) def A ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = 3E-3 , __lowerCAmelCase = "adafactor" , __lowerCAmelCase = False , __lowerCAmelCase = None , __lowerCAmelCase = 0 , __lowerCAmelCase = True , __lowerCAmelCase = True , __lowerCAmelCase = True , __lowerCAmelCase = True , __lowerCAmelCase = None , ): """simple docstring""" __magic_name__ :int = self.test_file_dir / '''../fixtures/tests_samples/wmt_en_ro''' __magic_name__ :Dict = self.get_auto_remove_tmp_dir() __magic_name__ :Tuple = F''' --model_name_or_path {model_name} --train_file {data_dir}/train.json --validation_file {data_dir}/val.json --test_file {data_dir}/test.json --output_dir {output_dir} --overwrite_output_dir --max_train_samples 8 --max_source_length {max_len} --max_target_length {max_len} --do_train --num_train_epochs {str(__lowerCAmelCase )} --per_device_train_batch_size 4 --learning_rate {learning_rate} --warmup_steps 8 --logging_steps 0 --logging_strategy no --save_steps {str(__lowerCAmelCase )} --group_by_length --label_smoothing_factor 0.1 --target_lang ro_RO --source_lang en_XX '''.split() __magic_name__ :str = F''' --do_eval --per_device_eval_batch_size 4 --max_eval_samples 8 --val_max_target_length {max_len} --evaluation_strategy steps --eval_steps {str(__lowerCAmelCase )} '''.split() __magic_name__ :Dict = ''' --do_predict '''.split() __magic_name__ :Optional[int] = [] if do_train: args += args_train if do_eval: args += args_eval if do_predict: args += args_predict if predict_with_generate: args += "--predict_with_generate".split() if do_train: if optim == "adafactor": args += "--adafactor".split() else: args += F'''--optim {optim}'''.split() if extra_args_str is not None: args += extra_args_str.split() if distributed: if n_gpus_to_use is None: __magic_name__ :List[Any] = get_gpu_count() __magic_name__ :Tuple = get_torch_dist_unique_port() __magic_name__ :Union[str, Any] = F''' -m torch.distributed.run --nproc_per_node={n_gpus_to_use} --master_port={master_port} {self.examples_dir_str}/pytorch/translation/run_translation.py '''.split() __magic_name__ :Any = [sys.executable] + distributed_args + args # keep for quick debug # print(" ".join([f"\nPYTHONPATH={self.src_dir_str}"] +cmd)); die execute_subprocess_async(__lowerCAmelCase , env=self.get_env() ) else: __magic_name__ :List[Any] = ['''run_translation.py'''] + args with patch.object(__lowerCAmelCase , '''argv''' , __lowerCAmelCase ): main() return output_dir
0
from __future__ import annotations def __lowercase ( snake_case, snake_case ): """simple docstring""" print(f'''Vertex\tShortest Distance from vertex {src}''' ) for i, d in enumerate(snake_case ): print(f'''{i}\t\t{d}''' ) def __lowercase ( snake_case, snake_case, snake_case ): """simple docstring""" for j in range(snake_case ): __magic_name__ , __magic_name__ , __magic_name__ :Tuple = (graph[j][k] for k in ['''src''', '''dst''', '''weight''']) if distance[u] != float('''inf''' ) and distance[u] + w < distance[v]: return True return False def __lowercase ( snake_case, snake_case, snake_case, snake_case ): """simple docstring""" __magic_name__ :List[Any] = [float('''inf''' )] * vertex_count __magic_name__ :Tuple = 0.0 for _ in range(vertex_count - 1 ): for j in range(snake_case ): __magic_name__ , __magic_name__ , __magic_name__ :Dict = (graph[j][k] for k in ['''src''', '''dst''', '''weight''']) if distance[u] != float('''inf''' ) and distance[u] + w < distance[v]: __magic_name__ :Tuple = distance[u] + w __magic_name__ :Tuple = check_negative_cycle(snake_case, snake_case, snake_case ) if negative_cycle_exists: raise Exception('''Negative cycle found''' ) return distance if __name__ == "__main__": import doctest doctest.testmod() SCREAMING_SNAKE_CASE__ : Tuple = int(input("""Enter number of vertices: """).strip()) SCREAMING_SNAKE_CASE__ : Any = int(input("""Enter number of edges: """).strip()) SCREAMING_SNAKE_CASE__ : list[dict[str, int]] = [{} for _ in range(E)] for i in range(E): print("""Edge """, i + 1) SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Dict = ( int(x) for x in input("""Enter source, destination, weight: """).strip().split(""" """) ) SCREAMING_SNAKE_CASE__ : Dict = {"""src""": src, """dst""": dest, """weight""": weight} SCREAMING_SNAKE_CASE__ : List[Any] = int(input("""\nEnter shortest path source:""").strip()) SCREAMING_SNAKE_CASE__ : List[str] = bellman_ford(graph, V, E, source) print_distance(shortest_distance, 0)
0
1
import argparse import torch from transformers import GPTaLMHeadModel, RobertaForMaskedLM if __name__ == "__main__": SCREAMING_SNAKE_CASE__ : Optional[int] = argparse.ArgumentParser( description=( """Extraction some layers of the full RobertaForMaskedLM or GPT2LMHeadModel for Transfer Learned""" """ Distillation""" ) ) parser.add_argument("""--model_type""", default="""roberta""", choices=["""roberta""", """gpt2"""]) parser.add_argument("""--model_name""", default="""roberta-large""", type=str) parser.add_argument("""--dump_checkpoint""", default="""serialization_dir/tf_roberta_048131723.pth""", type=str) parser.add_argument("""--vocab_transform""", action="""store_true""") SCREAMING_SNAKE_CASE__ : Tuple = parser.parse_args() if args.model_type == "roberta": SCREAMING_SNAKE_CASE__ : Optional[int] = RobertaForMaskedLM.from_pretrained(args.model_name) SCREAMING_SNAKE_CASE__ : List[str] = """roberta""" elif args.model_type == "gpt2": SCREAMING_SNAKE_CASE__ : Optional[int] = GPTaLMHeadModel.from_pretrained(args.model_name) SCREAMING_SNAKE_CASE__ : Any = """transformer""" SCREAMING_SNAKE_CASE__ : Tuple = model.state_dict() SCREAMING_SNAKE_CASE__ : List[str] = {} # Embeddings # if args.model_type == "gpt2": for param_name in ["wte.weight", "wpe.weight"]: SCREAMING_SNAKE_CASE__ : Union[str, Any] = state_dict[f"{prefix}.{param_name}"] else: for w in ["word_embeddings", "position_embeddings", "token_type_embeddings"]: SCREAMING_SNAKE_CASE__ : Optional[Any] = f"{prefix}.embeddings.{w}.weight" SCREAMING_SNAKE_CASE__ : int = state_dict[param_name] for w in ["weight", "bias"]: SCREAMING_SNAKE_CASE__ : Optional[int] = f"{prefix}.embeddings.LayerNorm.{w}" SCREAMING_SNAKE_CASE__ : Dict = state_dict[param_name] # Transformer Blocks # SCREAMING_SNAKE_CASE__ : int = 0 for teacher_idx in [0, 2, 4, 7, 9, 11]: if args.model_type == "gpt2": for layer in ["ln_1", "attn.c_attn", "attn.c_proj", "ln_2", "mlp.c_fc", "mlp.c_proj"]: for w in ["weight", "bias"]: SCREAMING_SNAKE_CASE__ : Union[str, Any] = state_dict[ f"{prefix}.h.{teacher_idx}.{layer}.{w}" ] SCREAMING_SNAKE_CASE__ : List[Any] = state_dict[f"{prefix}.h.{teacher_idx}.attn.bias"] else: for layer in [ "attention.self.query", "attention.self.key", "attention.self.value", "attention.output.dense", "attention.output.LayerNorm", "intermediate.dense", "output.dense", "output.LayerNorm", ]: for w in ["weight", "bias"]: SCREAMING_SNAKE_CASE__ : List[Any] = state_dict[ f"{prefix}.encoder.layer.{teacher_idx}.{layer}.{w}" ] std_idx += 1 # Language Modeling Head ###s if args.model_type == "roberta": for layer in ["lm_head.decoder.weight", "lm_head.bias"]: SCREAMING_SNAKE_CASE__ : Optional[Any] = state_dict[f"{layer}"] if args.vocab_transform: for w in ["weight", "bias"]: SCREAMING_SNAKE_CASE__ : Optional[Any] = state_dict[f"lm_head.dense.{w}"] SCREAMING_SNAKE_CASE__ : str = state_dict[f"lm_head.layer_norm.{w}"] elif args.model_type == "gpt2": for w in ["weight", "bias"]: SCREAMING_SNAKE_CASE__ : Optional[Any] = state_dict[f"{prefix}.ln_f.{w}"] SCREAMING_SNAKE_CASE__ : Tuple = state_dict["""lm_head.weight"""] print(f"N layers selected for distillation: {std_idx}") print(f"Number of params transferred for distillation: {len(compressed_sd.keys())}") print(f"Save transferred checkpoint to {args.dump_checkpoint}.") torch.save(compressed_sd, args.dump_checkpoint)
0
from __future__ import annotations import unittest from transformers import RoFormerConfig, is_tf_available from transformers.testing_utils import require_tf, slow from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import ( TFRoFormerForCausalLM, TFRoFormerForMaskedLM, TFRoFormerForMultipleChoice, TFRoFormerForQuestionAnswering, TFRoFormerForSequenceClassification, TFRoFormerForTokenClassification, TFRoFormerModel, ) from transformers.models.roformer.modeling_tf_roformer import ( TFRoFormerSelfAttention, TFRoFormerSinusoidalPositionalEmbedding, ) class lowerCamelCase_ : def __init__( self , __lowerCAmelCase , __lowerCAmelCase=1_3 , __lowerCAmelCase=7 , __lowerCAmelCase=True , __lowerCAmelCase=True , __lowerCAmelCase=True , __lowerCAmelCase=True , __lowerCAmelCase=9_9 , __lowerCAmelCase=3_2 , __lowerCAmelCase=2 , __lowerCAmelCase=4 , __lowerCAmelCase=3_7 , __lowerCAmelCase="gelu" , __lowerCAmelCase=0.1 , __lowerCAmelCase=0.1 , __lowerCAmelCase=5_1_2 , __lowerCAmelCase=1_6 , __lowerCAmelCase=2 , __lowerCAmelCase=0.02 , __lowerCAmelCase=3 , __lowerCAmelCase=4 , __lowerCAmelCase=None , ): """simple docstring""" __magic_name__ :Optional[int] = parent __magic_name__ :List[Any] = 1_3 __magic_name__ :Union[str, Any] = 7 __magic_name__ :Optional[Any] = True __magic_name__ :Tuple = True __magic_name__ :List[str] = True __magic_name__ :List[Any] = True __magic_name__ :int = 9_9 __magic_name__ :Any = 3_2 __magic_name__ :Union[str, Any] = 2 __magic_name__ :List[str] = 4 __magic_name__ :List[Any] = 3_7 __magic_name__ :Tuple = '''gelu''' __magic_name__ :Any = 0.1 __magic_name__ :str = 0.1 __magic_name__ :List[str] = 5_1_2 __magic_name__ :int = 1_6 __magic_name__ :Any = 2 __magic_name__ :List[Any] = 0.02 __magic_name__ :Optional[Any] = 3 __magic_name__ :Tuple = 4 __magic_name__ :Optional[Any] = None def A ( self ): """simple docstring""" __magic_name__ :Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) __magic_name__ :str = None if self.use_input_mask: __magic_name__ :Optional[int] = random_attention_mask([self.batch_size, self.seq_length] ) __magic_name__ :str = None if self.use_token_type_ids: __magic_name__ :List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) __magic_name__ :Union[str, Any] = None __magic_name__ :Tuple = None __magic_name__ :str = None if self.use_labels: __magic_name__ :List[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size ) __magic_name__ :List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) __magic_name__ :List[Any] = ids_tensor([self.batch_size] , self.num_choices ) __magic_name__ :str = RoFormerConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , return_dict=__lowerCAmelCase , ) return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def A ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ): """simple docstring""" __magic_name__ :int = TFRoFormerModel(config=__lowerCAmelCase ) __magic_name__ :Optional[Any] = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids} __magic_name__ :List[str] = [input_ids, input_mask] __magic_name__ :Any = model(__lowerCAmelCase ) __magic_name__ :List[str] = model(__lowerCAmelCase ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def A ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ): """simple docstring""" __magic_name__ :Dict = True __magic_name__ :List[str] = TFRoFormerForCausalLM(config=__lowerCAmelCase ) __magic_name__ :str = { '''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids, } __magic_name__ :Optional[Any] = model(__lowerCAmelCase )['''logits'''] self.parent.assertListEqual( list(prediction_scores.numpy().shape ) , [self.batch_size, self.seq_length, self.vocab_size] ) def A ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ): """simple docstring""" __magic_name__ :Optional[Any] = TFRoFormerForMaskedLM(config=__lowerCAmelCase ) __magic_name__ :Any = { '''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids, } __magic_name__ :Dict = model(__lowerCAmelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def A ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ): """simple docstring""" __magic_name__ :int = self.num_labels __magic_name__ :str = TFRoFormerForSequenceClassification(config=__lowerCAmelCase ) __magic_name__ :Optional[int] = { '''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids, } __magic_name__ :str = model(__lowerCAmelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def A ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ): """simple docstring""" __magic_name__ :Union[str, Any] = self.num_choices __magic_name__ :Tuple = TFRoFormerForMultipleChoice(config=__lowerCAmelCase ) __magic_name__ :int = tf.tile(tf.expand_dims(__lowerCAmelCase , 1 ) , (1, self.num_choices, 1) ) __magic_name__ :Optional[Any] = tf.tile(tf.expand_dims(__lowerCAmelCase , 1 ) , (1, self.num_choices, 1) ) __magic_name__ :Union[str, Any] = tf.tile(tf.expand_dims(__lowerCAmelCase , 1 ) , (1, self.num_choices, 1) ) __magic_name__ :str = { '''input_ids''': multiple_choice_inputs_ids, '''attention_mask''': multiple_choice_input_mask, '''token_type_ids''': multiple_choice_token_type_ids, } __magic_name__ :Tuple = model(__lowerCAmelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) ) def A ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ): """simple docstring""" __magic_name__ :Optional[int] = self.num_labels __magic_name__ :Any = TFRoFormerForTokenClassification(config=__lowerCAmelCase ) __magic_name__ :str = { '''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids, } __magic_name__ :Dict = model(__lowerCAmelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def A ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ): """simple docstring""" __magic_name__ :List[str] = TFRoFormerForQuestionAnswering(config=__lowerCAmelCase ) __magic_name__ :List[str] = { '''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids, } __magic_name__ :Union[str, Any] = model(__lowerCAmelCase ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def A ( self ): """simple docstring""" __magic_name__ :Union[str, Any] = self.prepare_config_and_inputs() ( ( __magic_name__ ) , ( __magic_name__ ) , ( __magic_name__ ) , ( __magic_name__ ) , ( __magic_name__ ) , ( __magic_name__ ) , ( __magic_name__ ) , ) :Union[str, Any] = config_and_inputs __magic_name__ :Optional[Any] = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask} return config, inputs_dict @require_tf class lowerCamelCase_ ( lowerCamelCase , lowerCamelCase , unittest.TestCase ): a__ = ( ( TFRoFormerModel, TFRoFormerForCausalLM, TFRoFormerForMaskedLM, TFRoFormerForQuestionAnswering, TFRoFormerForSequenceClassification, TFRoFormerForTokenClassification, TFRoFormerForMultipleChoice, ) if is_tf_available() else () ) a__ = ( { '''feature-extraction''': TFRoFormerModel, '''fill-mask''': TFRoFormerForMaskedLM, '''question-answering''': TFRoFormerForQuestionAnswering, '''text-classification''': TFRoFormerForSequenceClassification, '''text-generation''': TFRoFormerForCausalLM, '''token-classification''': TFRoFormerForTokenClassification, '''zero-shot''': TFRoFormerForSequenceClassification, } if is_tf_available() else {} ) a__ = False a__ = False def A ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ): """simple docstring""" if pipeline_test_casse_name == "TextGenerationPipelineTests": return True return False def A ( self ): """simple docstring""" __magic_name__ :List[str] = TFRoFormerModelTester(self ) __magic_name__ :List[str] = ConfigTester(self , config_class=__lowerCAmelCase , hidden_size=3_7 ) def A ( self ): """simple docstring""" self.config_tester.run_common_tests() def A ( self ): """simple docstring""" __magic_name__ :Optional[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*__lowerCAmelCase ) def A ( self ): """simple docstring""" __magic_name__ :Union[str, Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_lm(*__lowerCAmelCase ) def A ( self ): """simple docstring""" __magic_name__ :Tuple = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_lm_head(*__lowerCAmelCase ) def A ( self ): """simple docstring""" __magic_name__ :Optional[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_multiple_choice(*__lowerCAmelCase ) def A ( self ): """simple docstring""" __magic_name__ :Tuple = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_question_answering(*__lowerCAmelCase ) def A ( self ): """simple docstring""" __magic_name__ :Dict = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_sequence_classification(*__lowerCAmelCase ) def A ( self ): """simple docstring""" __magic_name__ :Optional[int] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_token_classification(*__lowerCAmelCase ) @slow def A ( self ): """simple docstring""" __magic_name__ :Optional[Any] = TFRoFormerModel.from_pretrained('''junnyu/roformer_chinese_base''' ) self.assertIsNotNone(__lowerCAmelCase ) @require_tf class lowerCamelCase_ ( unittest.TestCase ): @slow def A ( self ): """simple docstring""" __magic_name__ :int = TFRoFormerForMaskedLM.from_pretrained('''junnyu/roformer_chinese_base''' ) __magic_name__ :Dict = tf.constant([[0, 1, 2, 3, 4, 5]] ) __magic_name__ :Optional[Any] = model(__lowerCAmelCase )[0] # TODO Replace vocab size __magic_name__ :int = 5_0_0_0_0 __magic_name__ :Tuple = [1, 6, vocab_size] self.assertEqual(output.shape , __lowerCAmelCase ) print(output[:, :3, :3] ) # TODO Replace values below with what was printed above. __magic_name__ :Any = tf.constant( [ [ [-0.12053341, -1.0264901, 0.29221946], [-1.5133783, 0.197433, 0.15190607], [-5.0135403, -3.900256, -0.84038764], ] ] ) tf.debugging.assert_near(output[:, :3, :3] , __lowerCAmelCase , atol=1E-4 ) @require_tf class lowerCamelCase_ ( unittest.TestCase ): a__ = 1e-4 def A ( self ): """simple docstring""" __magic_name__ :Optional[int] = tf.constant([[4, 1_0]] ) __magic_name__ :Optional[int] = TFRoFormerSinusoidalPositionalEmbedding(num_positions=6 , embedding_dim=6 ) __magic_name__ :Optional[Any] = emba(input_ids.shape ) __magic_name__ :List[str] = tf.constant( [[0.0000, 0.0000, 0.0000, 1.0000, 1.0000, 1.0000], [0.8415, 0.0464, 0.0022, 0.5403, 0.9989, 1.0000]] ) tf.debugging.assert_near(__lowerCAmelCase , __lowerCAmelCase , atol=self.tolerance ) def A ( self ): """simple docstring""" __magic_name__ :Tuple = tf.constant( [ [0.0000, 0.0000, 0.0000, 0.0000, 0.0000], [0.8415, 0.8219, 0.8020, 0.7819, 0.7617], [0.9093, 0.9364, 0.9581, 0.9749, 0.9870], ] ) __magic_name__ :Union[str, Any] = TFRoFormerSinusoidalPositionalEmbedding(num_positions=5_1_2 , embedding_dim=5_1_2 ) emba([2, 1_6, 5_1_2] ) __magic_name__ :Optional[int] = emba.weight[:3, :5] tf.debugging.assert_near(__lowerCAmelCase , __lowerCAmelCase , atol=self.tolerance ) @require_tf class lowerCamelCase_ ( unittest.TestCase ): a__ = 1e-4 def A ( self ): """simple docstring""" # 2,12,16,64 __magic_name__ :int = tf.reshape(tf.range(2 * 1_2 * 1_6 * 6_4 , dtype=tf.floataa ) , shape=(2, 1_2, 1_6, 6_4) ) / 1_0_0 __magic_name__ :str = -tf.reshape(tf.range(2 * 1_2 * 1_6 * 6_4 , dtype=tf.floataa ) , shape=(2, 1_2, 1_6, 6_4) ) / 1_0_0 __magic_name__ :int = TFRoFormerSinusoidalPositionalEmbedding(num_positions=3_2 , embedding_dim=6_4 ) __magic_name__ :List[str] = embed_positions([2, 1_6, 7_6_8] )[None, None, :, :] __magic_name__ , __magic_name__ :Union[str, Any] = TFRoFormerSelfAttention.apply_rotary_position_embeddings( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) __magic_name__ :Tuple = tf.constant( [ [0.0000, 0.0100, 0.0200, 0.0300, 0.0400, 0.0500, 0.0600, 0.0700], [-0.2012, 0.8897, 0.0263, 0.9401, 0.2074, 0.9463, 0.3481, 0.9343], [-1.7057, 0.6271, -1.2145, 1.3897, -0.6303, 1.7647, -0.1173, 1.8985], [-2.1731, -1.6397, -2.7358, 0.2854, -2.1840, 1.7183, -1.3018, 2.4871], [0.2717, -3.6173, -2.9206, -2.1988, -3.6638, 0.3858, -2.9155, 2.2980], [3.9859, -2.1580, -0.7984, -4.4904, -4.1181, -2.0252, -4.4782, 1.1253], ] ) __magic_name__ :List[str] = tf.constant( [ [0.0000, -0.0100, -0.0200, -0.0300, -0.0400, -0.0500, -0.0600, -0.0700], [0.2012, -0.8897, -0.0263, -0.9401, -0.2074, -0.9463, -0.3481, -0.9343], [1.7057, -0.6271, 1.2145, -1.3897, 0.6303, -1.7647, 0.1173, -1.8985], [2.1731, 1.6397, 2.7358, -0.2854, 2.1840, -1.7183, 1.3018, -2.4871], [-0.2717, 3.6173, 2.9206, 2.1988, 3.6638, -0.3858, 2.9155, -2.2980], [-3.9859, 2.1580, 0.7984, 4.4904, 4.1181, 2.0252, 4.4782, -1.1253], ] ) tf.debugging.assert_near(query_layer[0, 0, :6, :8] , __lowerCAmelCase , atol=self.tolerance ) tf.debugging.assert_near(key_layer[0, 0, :6, :8] , __lowerCAmelCase , atol=self.tolerance )
0
1
import math from collections.abc import Iterator from itertools import takewhile def __lowercase ( snake_case ): """simple docstring""" if 1 < number < 4: # 2 and 3 are primes return True elif number < 2 or number % 2 == 0 or number % 3 == 0: # Negatives, 0, 1, all even numbers, all multiples of 3 are not primes return False # All primes number are in format of 6k +/- 1 for i in range(5, int(math.sqrt(snake_case ) + 1 ), 6 ): if number % i == 0 or number % (i + 2) == 0: return False return True def __lowercase ( ): """simple docstring""" __magic_name__ :str = 2 while True: if is_prime(snake_case ): yield num num += 1 def __lowercase ( snake_case = 2_0_0_0_0_0_0 ): """simple docstring""" return sum(takewhile(lambda snake_case : x < n, prime_generator() ) ) if __name__ == "__main__": print(f"{solution() = }")
0
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available SCREAMING_SNAKE_CASE__ : Optional[int] = {"""tokenization_herbert""": ["""HerbertTokenizer"""]} try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: SCREAMING_SNAKE_CASE__ : Optional[Any] = ["""HerbertTokenizerFast"""] if TYPE_CHECKING: from .tokenization_herbert import HerbertTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_herbert_fast import HerbertTokenizerFast else: import sys SCREAMING_SNAKE_CASE__ : Union[str, Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
0
1
from pathlib import Path from typing import List from transformers import is_torch_available, is_vision_available from transformers.testing_utils import get_tests_dir, is_tool_test from transformers.tools.agent_types import AGENT_TYPE_MAPPING, AgentAudio, AgentImage, AgentText if is_torch_available(): import torch if is_vision_available(): from PIL import Image SCREAMING_SNAKE_CASE__ : int = ["""text""", """image""", """audio"""] def __lowercase ( snake_case ): """simple docstring""" __magic_name__ :Dict = [] for input_type in input_types: if input_type == "text": inputs.append('''Text input''' ) elif input_type == "image": inputs.append( Image.open(Path(get_tests_dir('''fixtures/tests_samples/COCO''' ) ) / '''000000039769.png''' ).resize((5_1_2, 5_1_2) ) ) elif input_type == "audio": inputs.append(torch.ones(3_0_0_0 ) ) elif isinstance(snake_case, snake_case ): inputs.append(create_inputs(snake_case ) ) else: raise ValueError(f'''Invalid type requested: {input_type}''' ) return inputs def __lowercase ( snake_case ): """simple docstring""" __magic_name__ :int = [] for output in outputs: if isinstance(snake_case, (str, AgentText) ): output_types.append('''text''' ) elif isinstance(snake_case, (Image.Image, AgentImage) ): output_types.append('''image''' ) elif isinstance(snake_case, (torch.Tensor, AgentAudio) ): output_types.append('''audio''' ) else: raise ValueError(f'''Invalid output: {output}''' ) return output_types @is_tool_test class lowerCamelCase_ : def A ( self ): """simple docstring""" self.assertTrue(hasattr(self.tool , '''inputs''' ) ) self.assertTrue(hasattr(self.tool , '''outputs''' ) ) __magic_name__ :Union[str, Any] = self.tool.inputs for _input in inputs: if isinstance(_input , __lowerCAmelCase ): for __input in _input: self.assertTrue(__input in authorized_types ) else: self.assertTrue(_input in authorized_types ) __magic_name__ :Optional[Any] = self.tool.outputs for _output in outputs: self.assertTrue(_output in authorized_types ) def A ( self ): """simple docstring""" __magic_name__ :List[Any] = create_inputs(self.tool.inputs ) __magic_name__ :Union[str, Any] = self.tool(*__lowerCAmelCase ) # There is a single output if len(self.tool.outputs ) == 1: __magic_name__ :Union[str, Any] = [outputs] self.assertListEqual(output_types(__lowerCAmelCase ) , self.tool.outputs ) def A ( self ): """simple docstring""" self.assertTrue(hasattr(self.tool , '''description''' ) ) self.assertTrue(hasattr(self.tool , '''default_checkpoint''' ) ) self.assertTrue(self.tool.description.startswith('''This is a tool that''' ) ) def A ( self ): """simple docstring""" __magic_name__ :str = create_inputs(self.tool.inputs ) __magic_name__ :Optional[Any] = self.tool(*__lowerCAmelCase ) if not isinstance(__lowerCAmelCase , __lowerCAmelCase ): __magic_name__ :str = [outputs] self.assertEqual(len(__lowerCAmelCase ) , len(self.tool.outputs ) ) for output, output_type in zip(__lowerCAmelCase , self.tool.outputs ): __magic_name__ :Tuple = AGENT_TYPE_MAPPING[output_type] self.assertTrue(isinstance(__lowerCAmelCase , __lowerCAmelCase ) ) def A ( self ): """simple docstring""" __magic_name__ :List[str] = create_inputs(self.tool.inputs ) __magic_name__ :Tuple = [] for _input, input_type in zip(__lowerCAmelCase , self.tool.inputs ): if isinstance(__lowerCAmelCase , __lowerCAmelCase ): _inputs.append([AGENT_TYPE_MAPPING[_input_type](_input ) for _input_type in input_type] ) else: _inputs.append(AGENT_TYPE_MAPPING[input_type](_input ) ) # Should not raise an error __magic_name__ :Optional[int] = self.tool(*__lowerCAmelCase ) if not isinstance(__lowerCAmelCase , __lowerCAmelCase ): __magic_name__ :Any = [outputs] self.assertEqual(len(__lowerCAmelCase ) , len(self.tool.outputs ) )
0
import argparse import gdown import numpy as np import torch from huggingface_hub import hf_hub_download from transformers import ( CLIPTokenizer, CLIPTokenizerFast, VideoMAEImageProcessor, XCLIPConfig, XCLIPModel, XCLIPProcessor, XCLIPTextConfig, XCLIPVisionConfig, ) def __lowercase ( snake_case, snake_case ): """simple docstring""" __magic_name__ :str = XCLIPTextConfig() # derive patch size from model name __magic_name__ :Union[str, Any] = model_name.find('''patch''' ) __magic_name__ :Optional[Any] = int(model_name[start_idx + len('''patch''' ) : start_idx + len('''patch''' ) + 2] ) __magic_name__ :int = XCLIPVisionConfig(patch_size=snake_case, num_frames=snake_case ) if "large" in model_name: __magic_name__ :Dict = 7_6_8 __magic_name__ :int = 3_0_7_2 __magic_name__ :List[Any] = 1_2 __magic_name__ :str = 1_0_2_4 __magic_name__ :Any = 4_0_9_6 __magic_name__ :Optional[Any] = 1_6 __magic_name__ :Union[str, Any] = 2_4 __magic_name__ :Union[str, Any] = 7_6_8 __magic_name__ :Tuple = 3_0_7_2 if model_name == "xclip-large-patch14-16-frames": __magic_name__ :List[str] = 3_3_6 __magic_name__ :Any = XCLIPConfig.from_text_vision_configs(snake_case, snake_case ) if "large" in model_name: __magic_name__ :str = 7_6_8 return config def __lowercase ( snake_case ): """simple docstring""" if name == "token_embedding.weight": __magic_name__ :Any = name.replace('''token_embedding.weight''', '''text_model.embeddings.token_embedding.weight''' ) if name == "positional_embedding": __magic_name__ :Any = name.replace('''positional_embedding''', '''text_model.embeddings.position_embedding.weight''' ) if "ln_1" in name: __magic_name__ :List[str] = name.replace('''ln_1''', '''layer_norm1''' ) if "ln_2" in name: __magic_name__ :str = name.replace('''ln_2''', '''layer_norm2''' ) if "c_fc" in name: __magic_name__ :List[Any] = name.replace('''c_fc''', '''fc1''' ) if "c_proj" in name: __magic_name__ :Any = name.replace('''c_proj''', '''fc2''' ) if name.startswith('''transformer.resblocks''' ): __magic_name__ :Any = name.replace('''transformer.resblocks''', '''text_model.encoder.layers''' ) if "attn.out_proj" in name and "message" not in name: __magic_name__ :Union[str, Any] = name.replace('''attn.out_proj''', '''self_attn.out_proj''' ) if "ln_final" in name: __magic_name__ :Tuple = name.replace('''ln_final''', '''text_model.final_layer_norm''' ) # visual encoder if name == "visual.class_embedding": __magic_name__ :List[Any] = name.replace('''visual.class_embedding''', '''vision_model.embeddings.class_embedding''' ) if name == "visual.positional_embedding": __magic_name__ :Any = name.replace('''visual.positional_embedding''', '''vision_model.embeddings.position_embedding.weight''' ) if name.startswith('''visual.transformer.resblocks''' ): __magic_name__ :Union[str, Any] = name.replace('''visual.transformer.resblocks''', '''vision_model.encoder.layers''' ) if "visual.conv1" in name: __magic_name__ :Tuple = name.replace('''visual.conv1''', '''vision_model.embeddings.patch_embedding''' ) if "visual.ln_pre" in name: __magic_name__ :Tuple = name.replace('''visual.ln_pre''', '''vision_model.pre_layernorm''' ) if "visual.ln_post" in name: __magic_name__ :Optional[Any] = name.replace('''visual.ln_post''', '''vision_model.post_layernorm''' ) if "visual.proj" in name: __magic_name__ :Tuple = name.replace('''visual.proj''', '''visual_projection.weight''' ) if "text_projection" in name: __magic_name__ :int = name.replace('''text_projection''', '''text_projection.weight''' ) # things on top if "prompts_visual_proj" in name: __magic_name__ :int = name.replace('''prompts_visual_proj''', '''prompts_visual_projection''' ) if "prompts_visual_ln" in name: __magic_name__ :Dict = name.replace('''prompts_visual_ln''', '''prompts_visual_layernorm''' ) # mit if name == "mit.positional_embedding": __magic_name__ :List[Any] = name.replace('''positional''', '''position''' ) if name.startswith('''mit.resblocks''' ): __magic_name__ :Union[str, Any] = name.replace('''mit.resblocks''', '''mit.encoder.layers''' ) # prompts generator if name.startswith('''prompts_generator.norm''' ): __magic_name__ :str = name.replace('''prompts_generator.norm''', '''prompts_generator.layernorm''' ) return name def __lowercase ( snake_case, snake_case ): """simple docstring""" for key in orig_state_dict.copy().keys(): __magic_name__ :Any = orig_state_dict.pop(snake_case ) if "attn.in_proj" in key: __magic_name__ :str = key.split('''.''' ) if key.startswith('''visual''' ): __magic_name__ :List[Any] = key_split[3] __magic_name__ :List[Any] = config.vision_config.hidden_size if "message_attn" in key: if "weight" in key: __magic_name__ :List[Any] = val[ :dim, : ] __magic_name__ :List[str] = val[ dim : dim * 2, : ] __magic_name__ :List[str] = val[ -dim:, : ] else: __magic_name__ :str = val[ :dim ] __magic_name__ :Optional[int] = val[ dim : dim * 2 ] __magic_name__ :Any = val[ -dim: ] else: if "weight" in key: __magic_name__ :int = val[ :dim, : ] __magic_name__ :Union[str, Any] = val[ dim : dim * 2, : ] __magic_name__ :List[Any] = val[ -dim:, : ] else: __magic_name__ :Union[str, Any] = val[:dim] __magic_name__ :str = val[ dim : dim * 2 ] __magic_name__ :Dict = val[-dim:] elif key.startswith('''mit''' ): __magic_name__ :List[Any] = key_split[2] __magic_name__ :Any = config.vision_config.mit_hidden_size if "weight" in key: __magic_name__ :Union[str, Any] = val[:dim, :] __magic_name__ :Optional[int] = val[dim : dim * 2, :] __magic_name__ :int = val[-dim:, :] else: __magic_name__ :Tuple = val[:dim] __magic_name__ :Optional[int] = val[dim : dim * 2] __magic_name__ :Optional[int] = val[-dim:] else: __magic_name__ :Any = key_split[2] __magic_name__ :List[Any] = config.text_config.hidden_size if "weight" in key: __magic_name__ :Union[str, Any] = val[:dim, :] __magic_name__ :Tuple = val[ dim : dim * 2, : ] __magic_name__ :str = val[-dim:, :] else: __magic_name__ :int = val[:dim] __magic_name__ :Any = val[ dim : dim * 2 ] __magic_name__ :str = val[-dim:] else: __magic_name__ :Tuple = rename_key(snake_case ) if new_key_name in ["visual_projection.weight", "text_projection.weight"]: __magic_name__ :List[Any] = val.T __magic_name__ :Optional[Any] = val return orig_state_dict def __lowercase ( snake_case ): """simple docstring""" if num_frames == 8: __magic_name__ :Any = '''eating_spaghetti_8_frames.npy''' elif num_frames == 1_6: __magic_name__ :List[Any] = '''eating_spaghetti.npy''' elif num_frames == 3_2: __magic_name__ :Tuple = '''eating_spaghetti_32_frames.npy''' __magic_name__ :str = hf_hub_download( repo_id='''hf-internal-testing/spaghetti-video''', filename=snake_case, repo_type='''dataset''', ) __magic_name__ :List[Any] = np.load(snake_case ) return list(snake_case ) def __lowercase ( snake_case, snake_case=None, snake_case=False ): """simple docstring""" __magic_name__ :Union[str, Any] = { # fully supervised kinetics-400 checkpoints '''xclip-base-patch32''': '''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_32_8.pth''', '''xclip-base-patch32-16-frames''': ( '''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_32_16.pth''' ), '''xclip-base-patch16''': '''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_16_8.pth''', '''xclip-base-patch16-16-frames''': ( '''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_16_16.pth''' ), '''xclip-large-patch14''': '''https://drive.google.com/u/0/uc?id=1NUOImq0o5DlQTST17iIP3vG7DgmHQuCx&amp;export=download&amp;confirm=t&amp;uuid=b26caedc-88e2-473e-830a-9d158b653cdb''', '''xclip-large-patch14-16-frames''': '''https://drive.google.com/u/0/uc?id=1FOYgnJc097OJ4lGwtRCCydQyVPJEOH7d&amp;export=download&amp;confirm=t&amp;uuid=538fa810-e671-4050-b385-9a623f89804f''', # fully supervised kinetics-600 checkpoints '''xclip-base-patch16-kinetics-600''': ( '''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k600_16_8.pth''' ), '''xclip-base-patch16-kinetics-600-16-frames''': ( '''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k600_16_16.pth''' ), '''xclip-large-patch14-kinetics-600''': '''https://drive.google.com/u/0/uc?id=1FV8C1INuM91sLAN4ImjzePLIlpMSihwV&amp;export=download&amp;confirm=t&amp;uuid=141d4977-4a65-44ae-864f-4b0c19f838be''', # few shot '''xclip-base-patch16-hmdb-2-shot''': ( '''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_2.pth''' ), '''xclip-base-patch16-hmdb-4-shot''': ( '''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_4.pth''' ), '''xclip-base-patch16-hmdb-8-shot''': ( '''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_8.pth''' ), '''xclip-base-patch16-hmdb-16-shot''': ( '''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_16.pth''' ), '''xclip-base-patch16-ucf-2-shot''': ( '''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_2.pth''' ), '''xclip-base-patch16-ucf-4-shot''': ( '''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_4.pth''' ), '''xclip-base-patch16-ucf-8-shot''': ( '''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_8.pth''' ), '''xclip-base-patch16-ucf-16-shot''': ( '''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_16.pth''' ), # zero shot '''xclip-base-patch16-zero-shot''': '''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/zero.pth''', } __magic_name__ :Optional[int] = model_to_url[model_name] __magic_name__ :List[str] = 8 if "16-frames" in model_name: __magic_name__ :List[Any] = 1_6 elif "shot" in model_name: __magic_name__ :Dict = 3_2 __magic_name__ :str = get_xclip_config(snake_case, snake_case ) __magic_name__ :List[Any] = XCLIPModel(snake_case ) model.eval() if "drive" in checkpoint_url: __magic_name__ :Any = '''pytorch_model.bin''' gdown.cached_download(snake_case, snake_case, quiet=snake_case ) __magic_name__ :Optional[Any] = torch.load(snake_case, map_location='''cpu''' )['''model'''] else: __magic_name__ :Optional[int] = torch.hub.load_state_dict_from_url(snake_case )['''model'''] __magic_name__ :List[str] = convert_state_dict(snake_case, snake_case ) __magic_name__ :List[Any] = XCLIPModel(snake_case ) __magic_name__ , __magic_name__ :Optional[Any] = model.load_state_dict(snake_case, strict=snake_case ) assert missing_keys == ["text_model.embeddings.position_ids", "vision_model.embeddings.position_ids"] model.eval() __magic_name__ :str = 3_3_6 if model_name == '''xclip-large-patch14-16-frames''' else 2_2_4 __magic_name__ :Optional[int] = VideoMAEImageProcessor(size=snake_case ) __magic_name__ :Optional[int] = CLIPTokenizer.from_pretrained('''openai/clip-vit-base-patch32''' ) __magic_name__ :Tuple = CLIPTokenizerFast.from_pretrained('''openai/clip-vit-base-patch32''' ) __magic_name__ :Optional[int] = XCLIPProcessor(image_processor=snake_case, tokenizer=snake_case ) __magic_name__ :List[Any] = prepare_video(snake_case ) __magic_name__ :str = processor( text=['''playing sports''', '''eating spaghetti''', '''go shopping'''], videos=snake_case, return_tensors='''pt''', padding=snake_case ) print('''Shape of pixel values:''', inputs.pixel_values.shape ) with torch.no_grad(): __magic_name__ :Tuple = model(**snake_case ) # Verify outputs __magic_name__ :Any = outputs.logits_per_video __magic_name__ :str = logits_per_video.softmax(dim=1 ) print('''Probs:''', snake_case ) # kinetics-400 if model_name == "xclip-base-patch32": __magic_name__ :Dict = torch.tensor([[0.0019, 0.9951, 0.0030]] ) elif model_name == "xclip-base-patch32-16-frames": __magic_name__ :str = torch.tensor([[7.0_9_9_9E-0_4, 9.9_8_8_3E-0_1, 4.5_5_8_0E-0_4]] ) elif model_name == "xclip-base-patch16": __magic_name__ :Tuple = torch.tensor([[0.0083, 0.9681, 0.0236]] ) elif model_name == "xclip-base-patch16-16-frames": __magic_name__ :Tuple = torch.tensor([[7.6_9_3_7E-0_4, 9.9_7_2_8E-0_1, 1.9_4_7_3E-0_3]] ) elif model_name == "xclip-large-patch14": __magic_name__ :str = torch.tensor([[0.0062, 0.9864, 0.0075]] ) elif model_name == "xclip-large-patch14-16-frames": __magic_name__ :Optional[int] = torch.tensor([[3.3_8_7_7E-0_4, 9.9_9_3_7E-0_1, 2.8_8_8_8E-0_4]] ) # kinetics-600 elif model_name == "xclip-base-patch16-kinetics-600": __magic_name__ :Optional[int] = torch.tensor([[0.0555, 0.8914, 0.0531]] ) elif model_name == "xclip-base-patch16-kinetics-600-16-frames": __magic_name__ :List[str] = torch.tensor([[3.8_5_5_4E-0_4, 9.9_9_2_9E-0_1, 3.2_7_5_4E-0_4]] ) elif model_name == "xclip-large-patch14-kinetics-600": __magic_name__ :List[str] = torch.tensor([[0.0036, 0.9920, 0.0045]] ) # few shot elif model_name == "xclip-base-patch16-hmdb-2-shot": __magic_name__ :Tuple = torch.tensor([[7.1_8_9_0E-0_6, 9.9_9_9_4E-0_1, 5.6_5_5_9E-0_5]] ) elif model_name == "xclip-base-patch16-hmdb-4-shot": __magic_name__ :List[str] = torch.tensor([[1.0_3_2_0E-0_5, 9.9_9_9_3E-0_1, 6.2_4_3_5E-0_5]] ) elif model_name == "xclip-base-patch16-hmdb-8-shot": __magic_name__ :Optional[int] = torch.tensor([[4.1_3_7_7E-0_6, 9.9_9_9_0E-0_1, 9.8_3_8_6E-0_5]] ) elif model_name == "xclip-base-patch16-hmdb-16-shot": __magic_name__ :Optional[int] = torch.tensor([[4.1_3_4_7E-0_5, 9.9_9_6_2E-0_1, 3.3_4_1_1E-0_4]] ) elif model_name == "xclip-base-patch16-ucf-2-shot": __magic_name__ :Union[str, Any] = torch.tensor([[8.5_8_5_7E-0_5, 9.9_9_2_8E-0_1, 6.3_2_9_1E-0_4]] ) elif model_name == "xclip-base-patch16-ucf-4-shot": __magic_name__ :Union[str, Any] = torch.tensor([[8.5_8_5_7E-0_5, 9.9_9_2_8E-0_1, 6.3_2_9_1E-0_4]] ) elif model_name == "xclip-base-patch16-ucf-8-shot": __magic_name__ :Optional[int] = torch.tensor([[0.0027, 0.9904, 0.0070]] ) elif model_name == "xclip-base-patch16-ucf-16-shot": __magic_name__ :Any = torch.tensor([[9.8_2_1_9E-0_4, 9.9_5_9_3E-0_1, 3.0_8_6_3E-0_3]] ) # zero shot elif model_name == "xclip-base-patch16-zero-shot": __magic_name__ :Optional[int] = torch.tensor([[3.5_0_8_2E-0_4, 9.9_7_8_5E-0_1, 1.7_9_6_6E-0_3]] ) else: raise ValueError(f'''Model name {model_name} not supported''' ) assert torch.allclose(snake_case, snake_case, atol=1E-3 ) print('''Looks ok!''' ) if pytorch_dump_folder_path is not None: print(f'''Saving model {model_name} to {pytorch_dump_folder_path}''' ) model.save_pretrained(snake_case ) if push_to_hub: print('''Pushing model, processor and slow tokenizer files to the hub...''' ) model.push_to_hub(snake_case, organization='''nielsr''' ) processor.push_to_hub(snake_case, organization='''nielsr''' ) slow_tokenizer.push_to_hub(snake_case, organization='''nielsr''' ) if __name__ == "__main__": SCREAMING_SNAKE_CASE__ : Optional[Any] = argparse.ArgumentParser() # Required parameters parser.add_argument( """--model_name""", default="""xclip-base-patch32""", type=str, help="""Name of the model.""", ) parser.add_argument( """--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory.""" ) parser.add_argument( """--push_to_hub""", action="""store_true""", help="""Whether or not to push the converted model to the 🤗 hub.""" ) SCREAMING_SNAKE_CASE__ : List[Any] = parser.parse_args() convert_xclip_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
0
1
import gzip import hashlib import json import multiprocessing import os import re import shutil import time from pathlib import Path import numpy as np from arguments import PreprocessingArguments from datasets import load_dataset from minhash_deduplication import deduplicate_dataset from transformers import AutoTokenizer, HfArgumentParser SCREAMING_SNAKE_CASE__ : Union[str, Any] = re.compile(r"""\s+""") def __lowercase ( snake_case ): """simple docstring""" return {"hash": hashlib.mda(re.sub(snake_case, '''''', example['''content'''] ).encode('''utf-8''' ) ).hexdigest()} def __lowercase ( snake_case ): """simple docstring""" __magic_name__ :List[Any] = [len(snake_case ) for line in example['''content'''].splitlines()] return {"line_mean": np.mean(snake_case ), "line_max": max(snake_case )} def __lowercase ( snake_case ): """simple docstring""" __magic_name__ :Union[str, Any] = np.mean([c.isalnum() for c in example['''content''']] ) return {"alpha_frac": alpha_frac} def __lowercase ( snake_case, snake_case ): """simple docstring""" if example["hash"] in uniques: uniques.remove(example['''hash'''] ) return True else: return False def __lowercase ( snake_case, snake_case=5 ): """simple docstring""" __magic_name__ :int = ['''auto-generated''', '''autogenerated''', '''automatically generated'''] __magic_name__ :Dict = example['''content'''].splitlines() for _, line in zip(range(snake_case ), snake_case ): for keyword in keywords: if keyword in line.lower(): return {"autogenerated": True} else: return {"autogenerated": False} def __lowercase ( snake_case, snake_case=5, snake_case=0.05 ): """simple docstring""" __magic_name__ :str = ['''unit tests''', '''test file''', '''configuration file'''] __magic_name__ :Optional[Any] = example['''content'''].splitlines() __magic_name__ :Any = 0 __magic_name__ :List[Any] = 0 # first test for _, line in zip(range(snake_case ), snake_case ): for keyword in keywords: if keyword in line.lower(): return {"config_or_test": True} # second test __magic_name__ :List[str] = example['''content'''].count('''\n''' ) __magic_name__ :Optional[Any] = int(coeff * nlines ) for line in lines: count_config += line.lower().count('''config''' ) count_test += line.lower().count('''test''' ) if count_config > threshold or count_test > threshold: return {"config_or_test": True} return {"config_or_test": False} def __lowercase ( snake_case ): """simple docstring""" __magic_name__ :Optional[int] = ['''def ''', '''class ''', '''for ''', '''while '''] __magic_name__ :int = example['''content'''].splitlines() for line in lines: for keyword in keywords: if keyword in line.lower(): return {"has_no_keywords": False} return {"has_no_keywords": True} def __lowercase ( snake_case, snake_case=4 ): """simple docstring""" __magic_name__ :Any = example['''content'''].splitlines() __magic_name__ :Optional[int] = 0 for line in lines: counter += line.lower().count('''=''' ) if counter > minimum: return {"has_few_assignments": False} return {"has_few_assignments": True} def __lowercase ( snake_case ): """simple docstring""" __magic_name__ :List[Any] = tokenizer(example['''content'''], truncation=snake_case )['''input_ids'''] __magic_name__ :Optional[Any] = len(example['''content'''] ) / len(snake_case ) return {"ratio": ratio} def __lowercase ( snake_case ): """simple docstring""" __magic_name__ :Union[str, Any] = {} results.update(get_hash(snake_case ) ) results.update(line_stats(snake_case ) ) results.update(alpha_stats(snake_case ) ) results.update(char_token_ratio(snake_case ) ) results.update(is_autogenerated(snake_case ) ) results.update(is_config_or_test(snake_case ) ) results.update(has_no_keywords(snake_case ) ) results.update(has_few_assignments(snake_case ) ) return results def __lowercase ( snake_case, snake_case, snake_case ): """simple docstring""" if not check_uniques(snake_case, snake_case ): return False elif example["autogenerated"]: return False elif example["line_max"] > args.line_max: return False elif example["line_mean"] > args.line_mean: return False elif example["alpha_frac"] < args.alpha_frac: return False elif example["ratio"] < args.min_token_ratio: return False elif example["config_or_test"] and np.random.rand() <= args.filter_proba: return False elif example["has_no_keywords"] and np.random.rand() <= args.filter_proba: return False elif example["has_few_assignments"]: return False else: return True def __lowercase ( snake_case ): """simple docstring""" with open(snake_case, '''rb''' ) as f_in: with gzip.open(str(snake_case ) + '''.gz''', '''wb''', compresslevel=6 ) as f_out: shutil.copyfileobj(snake_case, snake_case ) os.unlink(snake_case ) # Settings SCREAMING_SNAKE_CASE__ : int = HfArgumentParser(PreprocessingArguments) SCREAMING_SNAKE_CASE__ : Union[str, Any] = parser.parse_args() if args.num_workers is None: SCREAMING_SNAKE_CASE__ : Tuple = multiprocessing.cpu_count() SCREAMING_SNAKE_CASE__ : List[Any] = AutoTokenizer.from_pretrained(args.tokenizer_dir) # Load dataset SCREAMING_SNAKE_CASE__ : List[Any] = time.time() SCREAMING_SNAKE_CASE__ : List[Any] = load_dataset(args.dataset_name, split="""train""") print(f"Time to load dataset: {time.time()-t_start:.2f}") # Run preprocessing SCREAMING_SNAKE_CASE__ : Union[str, Any] = time.time() SCREAMING_SNAKE_CASE__ : Optional[int] = ds.map(preprocess, num_proc=args.num_workers) print(f"Time to preprocess dataset: {time.time()-t_start:.2f}") # Deduplicate hashes SCREAMING_SNAKE_CASE__ : int = set(ds.unique("""hash""")) SCREAMING_SNAKE_CASE__ : List[Any] = len(uniques) / len(ds) print(f"Fraction of duplicates: {1-frac:.2%}") # Deduplicate data and apply heuristics SCREAMING_SNAKE_CASE__ : int = time.time() SCREAMING_SNAKE_CASE__ : List[Any] = ds.filter(filter, fn_kwargs={"""uniques""": uniques, """args""": args}) print(f"Time to filter dataset: {time.time()-t_start:.2f}") print(f"Size of filtered dataset: {len(ds_filter)}") # Deduplicate with minhash and jaccard similarity if args.near_deduplication: SCREAMING_SNAKE_CASE__ : List[Any] = time.time() SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Dict = deduplicate_dataset(ds_filter, args.jaccard_threshold) print(f"Time to deduplicate dataset: {time.time()-t_start:.2f}") print(f"Size of deduplicate dataset: {len(ds_filter)}") # Save data in batches of samples_per_file SCREAMING_SNAKE_CASE__ : Any = Path(args.output_dir) output_dir.mkdir(exist_ok=True) # save duplicate_clusters in the output_dir as artifacts # not sure it is the right place the save it if args.near_deduplication: with open(output_dir / """duplicate_clusters.json""", """w""") as f: json.dump(duplicate_clusters, f) SCREAMING_SNAKE_CASE__ : Optional[Any] = output_dir / """data""" data_dir.mkdir(exist_ok=True) SCREAMING_SNAKE_CASE__ : Union[str, Any] = time.time() for file_number, index in enumerate(range(0, len(ds_filter), args.samples_per_file)): SCREAMING_SNAKE_CASE__ : str = str(data_dir / f"file-{file_number+1:012}.json") SCREAMING_SNAKE_CASE__ : Tuple = min(len(ds_filter), index + args.samples_per_file) ds_filter.select(list(range(index, end_index))).to_json(file_path) compress_file(file_path) print(f"Time to save dataset: {time.time()-t_start:.2f}")
0
import numpy as np import torch from torch.utils.data import Dataset from utils import logger class lowerCamelCase_ ( lowerCamelCase ): def __init__( self , __lowerCAmelCase , __lowerCAmelCase ): """simple docstring""" __magic_name__ :Optional[int] = params __magic_name__ :Any = np.array(__lowerCAmelCase ) __magic_name__ :Optional[Any] = np.array([len(__lowerCAmelCase ) for t in data] ) self.check() self.remove_long_sequences() self.remove_empty_sequences() self.remove_unknown_sequences() self.check() self.print_statistics() def __getitem__( self , __lowerCAmelCase ): """simple docstring""" return (self.token_ids[index], self.lengths[index]) def __len__( self ): """simple docstring""" return len(self.lengths ) def A ( self ): """simple docstring""" assert len(self.token_ids ) == len(self.lengths ) assert all(self.lengths[i] == len(self.token_ids[i] ) for i in range(len(self.lengths ) ) ) def A ( self ): """simple docstring""" __magic_name__ :Any = self.params.max_model_input_size __magic_name__ :int = self.lengths > max_len logger.info(F'''Splitting {sum(__lowerCAmelCase )} too long sequences.''' ) def divide_chunks(__lowerCAmelCase , __lowerCAmelCase ): return [l[i : i + n] for i in range(0 , len(__lowerCAmelCase ) , __lowerCAmelCase )] __magic_name__ :Optional[int] = [] __magic_name__ :List[Any] = [] if self.params.mlm: __magic_name__ , __magic_name__ :Optional[Any] = self.params.special_tok_ids['''cls_token'''], self.params.special_tok_ids['''sep_token'''] else: __magic_name__ , __magic_name__ :Tuple = self.params.special_tok_ids['''bos_token'''], self.params.special_tok_ids['''eos_token'''] for seq_, len_ in zip(self.token_ids , self.lengths ): assert (seq_[0] == cls_id) and (seq_[-1] == sep_id), seq_ if len_ <= max_len: new_tok_ids.append(seq_ ) new_lengths.append(len_ ) else: __magic_name__ :int = [] for sub_s in divide_chunks(seq_ , max_len - 2 ): if sub_s[0] != cls_id: __magic_name__ :List[Any] = np.insert(__lowerCAmelCase , 0 , __lowerCAmelCase ) if sub_s[-1] != sep_id: __magic_name__ :Union[str, Any] = np.insert(__lowerCAmelCase , len(__lowerCAmelCase ) , __lowerCAmelCase ) assert len(__lowerCAmelCase ) <= max_len assert (sub_s[0] == cls_id) and (sub_s[-1] == sep_id), sub_s sub_seqs.append(__lowerCAmelCase ) new_tok_ids.extend(__lowerCAmelCase ) new_lengths.extend([len(__lowerCAmelCase ) for l in sub_seqs] ) __magic_name__ :Tuple = np.array(__lowerCAmelCase ) __magic_name__ :Optional[int] = np.array(__lowerCAmelCase ) def A ( self ): """simple docstring""" __magic_name__ :Optional[Any] = len(self ) __magic_name__ :int = self.lengths > 1_1 __magic_name__ :List[str] = self.token_ids[indices] __magic_name__ :Union[str, Any] = self.lengths[indices] __magic_name__ :List[str] = len(self ) logger.info(F'''Remove {init_size - new_size} too short (<=11 tokens) sequences.''' ) def A ( self ): """simple docstring""" if "unk_token" not in self.params.special_tok_ids: return else: __magic_name__ :Tuple = self.params.special_tok_ids['''unk_token'''] __magic_name__ :Dict = len(self ) __magic_name__ :Tuple = np.array([np.count_nonzero(a == unk_token_id ) for a in self.token_ids] ) __magic_name__ :int = (unk_occs / self.lengths) < 0.5 __magic_name__ :str = self.token_ids[indices] __magic_name__ :str = self.lengths[indices] __magic_name__ :Any = len(self ) logger.info(F'''Remove {init_size - new_size} sequences with a high level of unknown tokens (50%).''' ) def A ( self ): """simple docstring""" if not self.params.is_master: return logger.info(F'''{len(self )} sequences''' ) # data_len = sum(self.lengths) # nb_unique_tokens = len(Counter(list(chain(*self.token_ids)))) # logger.info(f'{data_len} tokens ({nb_unique_tokens} unique)') # unk_idx = self.params.special_tok_ids['unk_token'] # nb_unknown = sum([(t==unk_idx).sum() for t in self.token_ids]) # logger.info(f'{nb_unknown} unknown tokens (covering {100*nb_unknown/data_len:.2f}% of the data)') def A ( self , __lowerCAmelCase ): """simple docstring""" __magic_name__ :Optional[Any] = [t[0] for t in batch] __magic_name__ :List[Any] = [t[1] for t in batch] assert len(__lowerCAmelCase ) == len(__lowerCAmelCase ) # Max for paddings __magic_name__ :Tuple = max(__lowerCAmelCase ) # Pad token ids if self.params.mlm: __magic_name__ :Any = self.params.special_tok_ids['''pad_token'''] else: __magic_name__ :str = self.params.special_tok_ids['''unk_token'''] __magic_name__ :Any = [list(t.astype(__lowerCAmelCase ) ) + [pad_idx] * (max_seq_len_ - len(__lowerCAmelCase )) for t in token_ids] assert len(tk_ ) == len(__lowerCAmelCase ) assert all(len(__lowerCAmelCase ) == max_seq_len_ for t in tk_ ) __magic_name__ :Optional[int] = torch.tensor(tk_ ) # (bs, max_seq_len_) __magic_name__ :Optional[int] = torch.tensor(__lowerCAmelCase ) # (bs) return tk_t, lg_t
0
1
# limitations under the License. from typing import Optional, Tuple, Union import torch from diffusers import DiffusionPipeline, ImagePipelineOutput class lowerCamelCase_ ( lowerCamelCase ): def __init__( self , __lowerCAmelCase , __lowerCAmelCase ): """simple docstring""" super().__init__() self.register_modules(unet=__lowerCAmelCase , scheduler=__lowerCAmelCase ) @torch.no_grad() def __call__( self , __lowerCAmelCase = 1 , __lowerCAmelCase = None , __lowerCAmelCase = 5_0 , __lowerCAmelCase = "pil" , __lowerCAmelCase = True , **__lowerCAmelCase , ): """simple docstring""" __magic_name__ :List[str] = torch.randn( (batch_size, self.unet.config.in_channels, self.unet.config.sample_size, self.unet.config.sample_size) , generator=__lowerCAmelCase , ) __magic_name__ :Union[str, Any] = image.to(self.device ) # set step values self.scheduler.set_timesteps(__lowerCAmelCase ) for t in self.progress_bar(self.scheduler.timesteps ): # 1. predict noise model_output __magic_name__ :Union[str, Any] = self.unet(__lowerCAmelCase , __lowerCAmelCase ).sample # 2. predict previous mean of image x_t-1 and add variance depending on eta # eta corresponds to η in paper and should be between [0, 1] # do x_t -> x_t-1 __magic_name__ :List[str] = self.scheduler.step(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ).prev_sample __magic_name__ :Optional[Any] = (image / 2 + 0.5).clamp(0 , 1 ) __magic_name__ :int = image.cpu().permute(0 , 2 , 3 , 1 ).numpy() if output_type == "pil": __magic_name__ :Dict = self.numpy_to_pil(__lowerCAmelCase ) if not return_dict: return (image,), "This is a local test" return ImagePipelineOutput(images=__lowerCAmelCase ), "This is a local test"
0
import os from shutil import copyfile from typing import Any, Dict, List, Optional, Tuple import sentencepiece as spm from ...tokenization_utils import PreTrainedTokenizer from ...utils import logging SCREAMING_SNAKE_CASE__ : str = logging.get_logger(__name__) SCREAMING_SNAKE_CASE__ : Tuple = """▁""" SCREAMING_SNAKE_CASE__ : Union[str, Any] = {"""vocab_file""": """spiece.model"""} SCREAMING_SNAKE_CASE__ : List[Any] = { """vocab_file""": { """google/reformer-crime-and-punishment""": ( """https://huggingface.co/google/reformer-crime-and-punishment/resolve/main/spiece.model""" ) } } SCREAMING_SNAKE_CASE__ : Optional[int] = { """google/reformer-crime-and-punishment""": 52_42_88, } class lowerCamelCase_ ( lowerCamelCase ): a__ = VOCAB_FILES_NAMES a__ = PRETRAINED_VOCAB_FILES_MAP a__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES a__ = ['''input_ids''', '''attention_mask'''] def __init__( self , __lowerCAmelCase , __lowerCAmelCase="</s>" , __lowerCAmelCase="<unk>" , __lowerCAmelCase=[] , __lowerCAmelCase = None , **__lowerCAmelCase , ): """simple docstring""" __magic_name__ :int = {} if sp_model_kwargs is None else sp_model_kwargs super().__init__( eos_token=__lowerCAmelCase , unk_token=__lowerCAmelCase , additional_special_tokens=__lowerCAmelCase , sp_model_kwargs=self.sp_model_kwargs , **__lowerCAmelCase , ) __magic_name__ :Optional[Any] = vocab_file __magic_name__ :int = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(__lowerCAmelCase ) @property def A ( self ): """simple docstring""" return self.sp_model.get_piece_size() def A ( self ): """simple docstring""" __magic_name__ :str = {self.convert_ids_to_tokens(__lowerCAmelCase ): i for i in range(self.vocab_size )} vocab.update(self.added_tokens_encoder ) return vocab def __getstate__( self ): """simple docstring""" __magic_name__ :Optional[Any] = self.__dict__.copy() __magic_name__ :Optional[Any] = None return state def __setstate__( self , __lowerCAmelCase ): """simple docstring""" __magic_name__ :Any = d # for backward compatibility if not hasattr(self , '''sp_model_kwargs''' ): __magic_name__ :Optional[int] = {} __magic_name__ :Union[str, Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(self.vocab_file ) def A ( self , __lowerCAmelCase ): """simple docstring""" return self.sp_model.encode(__lowerCAmelCase , out_type=__lowerCAmelCase ) def A ( self , __lowerCAmelCase ): """simple docstring""" return self.sp_model.piece_to_id(__lowerCAmelCase ) def A ( self , __lowerCAmelCase ): """simple docstring""" if index < self.sp_model.get_piece_size(): __magic_name__ :int = self.sp_model.IdToPiece(__lowerCAmelCase ) return token def A ( self , __lowerCAmelCase ): """simple docstring""" __magic_name__ :Optional[Any] = [] __magic_name__ :Tuple = '''''' for token in tokens: # make sure that special tokens are not decoded using sentencepiece model if token in self.all_special_tokens: out_string += self.sp_model.decode(__lowerCAmelCase ) + token __magic_name__ :Optional[Any] = [] else: current_sub_tokens.append(__lowerCAmelCase ) out_string += self.sp_model.decode(__lowerCAmelCase ) return out_string.strip() def A ( self , __lowerCAmelCase , __lowerCAmelCase = None ): """simple docstring""" if not os.path.isdir(__lowerCAmelCase ): logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' ) return __magic_name__ :Optional[int] = os.path.join( __lowerCAmelCase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(__lowerCAmelCase ) and os.path.isfile(self.vocab_file ): copyfile(self.vocab_file , __lowerCAmelCase ) elif not os.path.isfile(self.vocab_file ): with open(__lowerCAmelCase , '''wb''' ) as fi: __magic_name__ :Dict = self.sp_model.serialized_model_proto() fi.write(__lowerCAmelCase ) return (out_vocab_file,)
0
1
def __lowercase ( snake_case ): """simple docstring""" if not isinstance(snake_case, snake_case ): raise ValueError('''multiplicative_persistence() only accepts integral values''' ) if num < 0: raise ValueError('''multiplicative_persistence() does not accept negative values''' ) __magic_name__ :str = 0 __magic_name__ :Dict = str(snake_case ) while len(snake_case ) != 1: __magic_name__ :Optional[Any] = [int(snake_case ) for i in num_string] __magic_name__ :Dict = 1 for i in range(0, len(snake_case ) ): total *= numbers[i] __magic_name__ :int = str(snake_case ) steps += 1 return steps def __lowercase ( snake_case ): """simple docstring""" if not isinstance(snake_case, snake_case ): raise ValueError('''additive_persistence() only accepts integral values''' ) if num < 0: raise ValueError('''additive_persistence() does not accept negative values''' ) __magic_name__ :str = 0 __magic_name__ :Union[str, Any] = str(snake_case ) while len(snake_case ) != 1: __magic_name__ :str = [int(snake_case ) for i in num_string] __magic_name__ :Optional[int] = 0 for i in range(0, len(snake_case ) ): total += numbers[i] __magic_name__ :int = str(snake_case ) steps += 1 return steps if __name__ == "__main__": import doctest doctest.testmod()
0
import os import unittest from transformers import MobileBertTokenizer, MobileBertTokenizerFast from transformers.models.bert.tokenization_bert import ( VOCAB_FILES_NAMES, BasicTokenizer, WordpieceTokenizer, _is_control, _is_punctuation, _is_whitespace, ) from transformers.testing_utils import require_tokenizers, slow from ...test_tokenization_common import TokenizerTesterMixin, filter_non_english @require_tokenizers class lowerCamelCase_ ( lowerCamelCase , unittest.TestCase ): a__ = MobileBertTokenizer a__ = MobileBertTokenizerFast a__ = True a__ = True a__ = filter_non_english a__ = '''google/mobilebert-uncased''' def A ( self ): """simple docstring""" super().setUp() __magic_name__ :Tuple = [ '''[UNK]''', '''[CLS]''', '''[SEP]''', '''[PAD]''', '''[MASK]''', '''want''', '''##want''', '''##ed''', '''wa''', '''un''', '''runn''', '''##ing''', ''',''', '''low''', '''lowest''', ] __magic_name__ :Dict = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] ) with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer: vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) ) __magic_name__ :List[str] = [ (tokenizer_def[0], self.pre_trained_model_path, tokenizer_def[2]) # else the 'google/' prefix is stripped for tokenizer_def in self.tokenizers_list ] def A ( self , __lowerCAmelCase ): """simple docstring""" __magic_name__ :Union[str, Any] = '''UNwant\u00E9d,running''' __magic_name__ :int = '''unwanted, running''' return input_text, output_text def A ( self ): """simple docstring""" __magic_name__ :Optional[int] = self.tokenizer_class(self.vocab_file ) __magic_name__ :List[Any] = tokenizer.tokenize('''UNwant\u00E9d,running''' ) self.assertListEqual(__lowerCAmelCase , ['''un''', '''##want''', '''##ed''', ''',''', '''runn''', '''##ing'''] ) self.assertListEqual(tokenizer.convert_tokens_to_ids(__lowerCAmelCase ) , [9, 6, 7, 1_2, 1_0, 1_1] ) def A ( self ): """simple docstring""" if not self.test_rust_tokenizer: return __magic_name__ :int = self.get_tokenizer() __magic_name__ :Tuple = self.get_rust_tokenizer() __magic_name__ :List[str] = '''UNwant\u00E9d,running''' __magic_name__ :Optional[Any] = tokenizer.tokenize(__lowerCAmelCase ) __magic_name__ :List[Any] = rust_tokenizer.tokenize(__lowerCAmelCase ) self.assertListEqual(__lowerCAmelCase , __lowerCAmelCase ) __magic_name__ :int = tokenizer.encode(__lowerCAmelCase , add_special_tokens=__lowerCAmelCase ) __magic_name__ :str = rust_tokenizer.encode(__lowerCAmelCase , add_special_tokens=__lowerCAmelCase ) self.assertListEqual(__lowerCAmelCase , __lowerCAmelCase ) __magic_name__ :List[Any] = self.get_rust_tokenizer() __magic_name__ :Any = tokenizer.encode(__lowerCAmelCase ) __magic_name__ :Any = rust_tokenizer.encode(__lowerCAmelCase ) self.assertListEqual(__lowerCAmelCase , __lowerCAmelCase ) # With lower casing __magic_name__ :Any = self.get_tokenizer(do_lower_case=__lowerCAmelCase ) __magic_name__ :List[Any] = self.get_rust_tokenizer(do_lower_case=__lowerCAmelCase ) __magic_name__ :Dict = '''UNwant\u00E9d,running''' __magic_name__ :Tuple = tokenizer.tokenize(__lowerCAmelCase ) __magic_name__ :Union[str, Any] = rust_tokenizer.tokenize(__lowerCAmelCase ) self.assertListEqual(__lowerCAmelCase , __lowerCAmelCase ) __magic_name__ :Optional[Any] = tokenizer.encode(__lowerCAmelCase , add_special_tokens=__lowerCAmelCase ) __magic_name__ :Dict = rust_tokenizer.encode(__lowerCAmelCase , add_special_tokens=__lowerCAmelCase ) self.assertListEqual(__lowerCAmelCase , __lowerCAmelCase ) __magic_name__ :Tuple = self.get_rust_tokenizer() __magic_name__ :Dict = tokenizer.encode(__lowerCAmelCase ) __magic_name__ :List[Any] = rust_tokenizer.encode(__lowerCAmelCase ) self.assertListEqual(__lowerCAmelCase , __lowerCAmelCase ) def A ( self ): """simple docstring""" __magic_name__ :Optional[int] = BasicTokenizer() self.assertListEqual(tokenizer.tokenize('''ah\u535A\u63A8zz''' ) , ['''ah''', '''\u535A''', '''\u63A8''', '''zz'''] ) def A ( self ): """simple docstring""" __magic_name__ :List[Any] = BasicTokenizer(do_lower_case=__lowerCAmelCase ) self.assertListEqual( tokenizer.tokenize(''' \tHeLLo!how \n Are yoU? ''' ) , ['''hello''', '''!''', '''how''', '''are''', '''you''', '''?'''] ) self.assertListEqual(tokenizer.tokenize('''H\u00E9llo''' ) , ['''hello'''] ) def A ( self ): """simple docstring""" __magic_name__ :Union[str, Any] = BasicTokenizer(do_lower_case=__lowerCAmelCase , strip_accents=__lowerCAmelCase ) self.assertListEqual( tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''hällo''', '''!''', '''how''', '''are''', '''you''', '''?'''] ) self.assertListEqual(tokenizer.tokenize('''H\u00E9llo''' ) , ['''h\u00E9llo'''] ) def A ( self ): """simple docstring""" __magic_name__ :Dict = BasicTokenizer(do_lower_case=__lowerCAmelCase , strip_accents=__lowerCAmelCase ) self.assertListEqual( tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''hallo''', '''!''', '''how''', '''are''', '''you''', '''?'''] ) self.assertListEqual(tokenizer.tokenize('''H\u00E9llo''' ) , ['''hello'''] ) def A ( self ): """simple docstring""" __magic_name__ :Optional[int] = BasicTokenizer(do_lower_case=__lowerCAmelCase ) self.assertListEqual( tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''hallo''', '''!''', '''how''', '''are''', '''you''', '''?'''] ) self.assertListEqual(tokenizer.tokenize('''H\u00E9llo''' ) , ['''hello'''] ) def A ( self ): """simple docstring""" __magic_name__ :List[str] = BasicTokenizer(do_lower_case=__lowerCAmelCase ) self.assertListEqual( tokenizer.tokenize(''' \tHeLLo!how \n Are yoU? ''' ) , ['''HeLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?'''] ) def A ( self ): """simple docstring""" __magic_name__ :int = BasicTokenizer(do_lower_case=__lowerCAmelCase , strip_accents=__lowerCAmelCase ) self.assertListEqual( tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''HäLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?'''] ) def A ( self ): """simple docstring""" __magic_name__ :Optional[int] = BasicTokenizer(do_lower_case=__lowerCAmelCase , strip_accents=__lowerCAmelCase ) self.assertListEqual( tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''HaLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?'''] ) def A ( self ): """simple docstring""" __magic_name__ :Optional[Any] = BasicTokenizer(do_lower_case=__lowerCAmelCase , never_split=['''[UNK]'''] ) self.assertListEqual( tokenizer.tokenize(''' \tHeLLo!how \n Are yoU? [UNK]''' ) , ['''HeLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?''', '''[UNK]'''] ) def A ( self ): """simple docstring""" __magic_name__ :int = ['''[UNK]''', '''[CLS]''', '''[SEP]''', '''want''', '''##want''', '''##ed''', '''wa''', '''un''', '''runn''', '''##ing'''] __magic_name__ :Union[str, Any] = {} for i, token in enumerate(__lowerCAmelCase ): __magic_name__ :Tuple = i __magic_name__ :List[Any] = WordpieceTokenizer(vocab=__lowerCAmelCase , unk_token='''[UNK]''' ) self.assertListEqual(tokenizer.tokenize('''''' ) , [] ) self.assertListEqual(tokenizer.tokenize('''unwanted running''' ) , ['''un''', '''##want''', '''##ed''', '''runn''', '''##ing'''] ) self.assertListEqual(tokenizer.tokenize('''unwantedX running''' ) , ['''[UNK]''', '''runn''', '''##ing'''] ) def A ( self ): """simple docstring""" self.assertTrue(_is_whitespace(''' ''' ) ) self.assertTrue(_is_whitespace('''\t''' ) ) self.assertTrue(_is_whitespace('''\r''' ) ) self.assertTrue(_is_whitespace('''\n''' ) ) self.assertTrue(_is_whitespace('''\u00A0''' ) ) self.assertFalse(_is_whitespace('''A''' ) ) self.assertFalse(_is_whitespace('''-''' ) ) def A ( self ): """simple docstring""" self.assertTrue(_is_control('''\u0005''' ) ) self.assertFalse(_is_control('''A''' ) ) self.assertFalse(_is_control(''' ''' ) ) self.assertFalse(_is_control('''\t''' ) ) self.assertFalse(_is_control('''\r''' ) ) def A ( self ): """simple docstring""" self.assertTrue(_is_punctuation('''-''' ) ) self.assertTrue(_is_punctuation('''$''' ) ) self.assertTrue(_is_punctuation('''`''' ) ) self.assertTrue(_is_punctuation('''.''' ) ) self.assertFalse(_is_punctuation('''A''' ) ) self.assertFalse(_is_punctuation(''' ''' ) ) def A ( self ): """simple docstring""" __magic_name__ :Any = self.get_tokenizer() __magic_name__ :Any = self.get_rust_tokenizer() # Example taken from the issue https://github.com/huggingface/tokenizers/issues/340 self.assertListEqual([tokenizer.tokenize(__lowerCAmelCase ) for t in ['''Test''', '''\xad''', '''test''']] , [['''[UNK]'''], [], ['''[UNK]''']] ) self.assertListEqual( [rust_tokenizer.tokenize(__lowerCAmelCase ) for t in ['''Test''', '''\xad''', '''test''']] , [['''[UNK]'''], [], ['''[UNK]''']] ) @slow def A ( self ): """simple docstring""" __magic_name__ :Optional[int] = self.tokenizer_class.from_pretrained('''google/mobilebert-uncased''' ) __magic_name__ :Optional[int] = tokenizer.encode('''sequence builders''' , add_special_tokens=__lowerCAmelCase ) __magic_name__ :List[Any] = tokenizer.encode('''multi-sequence build''' , add_special_tokens=__lowerCAmelCase ) __magic_name__ :Union[str, Any] = tokenizer.build_inputs_with_special_tokens(__lowerCAmelCase ) __magic_name__ :List[Any] = tokenizer.build_inputs_with_special_tokens(__lowerCAmelCase , __lowerCAmelCase ) assert encoded_sentence == [1_0_1] + text + [1_0_2] assert encoded_pair == [1_0_1] + text + [1_0_2] + text_a + [1_0_2] def A ( self ): """simple docstring""" for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})''' ): __magic_name__ :Optional[Any] = self.rust_tokenizer_class.from_pretrained(__lowerCAmelCase , **__lowerCAmelCase ) __magic_name__ :Optional[int] = F'''A, naïve {tokenizer_r.mask_token} AllenNLP sentence.''' __magic_name__ :Optional[Any] = tokenizer_r.encode_plus( __lowerCAmelCase , return_attention_mask=__lowerCAmelCase , return_token_type_ids=__lowerCAmelCase , return_offsets_mapping=__lowerCAmelCase , add_special_tokens=__lowerCAmelCase , ) __magic_name__ :Any = tokenizer_r.do_lower_case if hasattr(__lowerCAmelCase , '''do_lower_case''' ) else False __magic_name__ :Optional[int] = ( [ ((0, 0), tokenizer_r.cls_token), ((0, 1), '''A'''), ((1, 2), ''','''), ((3, 5), '''na'''), ((5, 6), '''##ï'''), ((6, 8), '''##ve'''), ((9, 1_5), tokenizer_r.mask_token), ((1_6, 2_1), '''Allen'''), ((2_1, 2_3), '''##NL'''), ((2_3, 2_4), '''##P'''), ((2_5, 3_3), '''sentence'''), ((3_3, 3_4), '''.'''), ((0, 0), tokenizer_r.sep_token), ] if not do_lower_case else [ ((0, 0), tokenizer_r.cls_token), ((0, 1), '''a'''), ((1, 2), ''','''), ((3, 8), '''naive'''), ((9, 1_5), tokenizer_r.mask_token), ((1_6, 2_1), '''allen'''), ((2_1, 2_3), '''##nl'''), ((2_3, 2_4), '''##p'''), ((2_5, 3_3), '''sentence'''), ((3_3, 3_4), '''.'''), ((0, 0), tokenizer_r.sep_token), ] ) self.assertEqual( [e[1] for e in expected_results] , tokenizer_r.convert_ids_to_tokens(tokens['''input_ids'''] ) ) self.assertEqual([e[0] for e in expected_results] , tokens['''offset_mapping'''] ) def A ( self ): """simple docstring""" __magic_name__ :Dict = ['''的''', '''人''', '''有'''] __magic_name__ :Any = ''''''.join(__lowerCAmelCase ) for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})''' ): __magic_name__ :Optional[Any] = True __magic_name__ :Optional[int] = self.tokenizer_class.from_pretrained(__lowerCAmelCase , **__lowerCAmelCase ) __magic_name__ :Tuple = self.rust_tokenizer_class.from_pretrained(__lowerCAmelCase , **__lowerCAmelCase ) __magic_name__ :Dict = tokenizer_p.encode(__lowerCAmelCase , add_special_tokens=__lowerCAmelCase ) __magic_name__ :List[str] = tokenizer_r.encode(__lowerCAmelCase , add_special_tokens=__lowerCAmelCase ) __magic_name__ :Dict = tokenizer_r.convert_ids_to_tokens(__lowerCAmelCase ) __magic_name__ :Union[str, Any] = tokenizer_p.convert_ids_to_tokens(__lowerCAmelCase ) # it is expected that each Chinese character is not preceded by "##" self.assertListEqual(__lowerCAmelCase , __lowerCAmelCase ) self.assertListEqual(__lowerCAmelCase , __lowerCAmelCase ) __magic_name__ :List[str] = False __magic_name__ :Tuple = self.rust_tokenizer_class.from_pretrained(__lowerCAmelCase , **__lowerCAmelCase ) __magic_name__ :List[str] = self.tokenizer_class.from_pretrained(__lowerCAmelCase , **__lowerCAmelCase ) __magic_name__ :Optional[Any] = tokenizer_r.encode(__lowerCAmelCase , add_special_tokens=__lowerCAmelCase ) __magic_name__ :Union[str, Any] = tokenizer_p.encode(__lowerCAmelCase , add_special_tokens=__lowerCAmelCase ) __magic_name__ :List[str] = tokenizer_r.convert_ids_to_tokens(__lowerCAmelCase ) __magic_name__ :Optional[int] = tokenizer_p.convert_ids_to_tokens(__lowerCAmelCase ) # it is expected that only the first Chinese character is not preceded by "##". __magic_name__ :Dict = [ F'''##{token}''' if idx != 0 else token for idx, token in enumerate(__lowerCAmelCase ) ] self.assertListEqual(__lowerCAmelCase , __lowerCAmelCase ) self.assertListEqual(__lowerCAmelCase , __lowerCAmelCase )
0
1
import math_equivalence # From: git+https://github.com/hendrycks/math.git import datasets SCREAMING_SNAKE_CASE__ : Optional[int] = """\ @article{hendrycksmath2021, title={Measuring Mathematical Problem Solving With the MATH Dataset}, author={Dan Hendrycks and Collin Burns and Saurav Kadavath and Akul Arora and Steven Basart and Eric Tang and Dawn Song and Jacob Steinhardt}, journal={arXiv preprint arXiv:2103.03874}, year={2021} } """ SCREAMING_SNAKE_CASE__ : str = """\ This metric is used to assess performance on the Mathematics Aptitude Test of Heuristics (MATH) dataset. It first canonicalizes the inputs (e.g., converting \"1/2\" to \"\\frac{1}{2}\") and then computes accuracy. """ SCREAMING_SNAKE_CASE__ : List[str] = r""" Calculates accuracy after canonicalizing inputs. Args: predictions: list of predictions to score. Each prediction is a string that contains natural language and LaTex. references: list of reference for each prediction. Each reference is a string that contains natural language and LaTex. Returns: accuracy: accuracy after canonicalizing inputs (e.g., converting \"1/2\" to \"\\frac{1}{2}\") Examples: >>> metric = datasets.load_metric(\"competition_math\") >>> results = metric.compute(references=[\"\\frac{1}{2}\"], predictions=[\"1/2\"]) >>> print(results) {'accuracy': 1.0} """ @datasets.utils.file_utils.add_end_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class lowerCamelCase_ ( datasets.Metric ): def A ( self ): """simple docstring""" return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { '''predictions''': datasets.Value('''string''' ), '''references''': datasets.Value('''string''' ), } ) , homepage='''https://github.com/hendrycks/math''' , codebase_urls=['''https://github.com/hendrycks/math'''] , ) def A ( self , __lowerCAmelCase , __lowerCAmelCase ): """simple docstring""" __magic_name__ :Dict = 0.0 for i, j in zip(__lowerCAmelCase , __lowerCAmelCase ): n_correct += 1.0 if math_equivalence.is_equiv(__lowerCAmelCase , __lowerCAmelCase ) else 0.0 __magic_name__ :Dict = n_correct / len(__lowerCAmelCase ) return { "accuracy": accuracy, }
0
import logging import os import quant_trainer import torch from torch.utils.data import DataLoader from transformers import Trainer, is_torch_tpu_available from transformers.trainer_utils import PredictionOutput SCREAMING_SNAKE_CASE__ : List[str] = logging.getLogger(__name__) if is_torch_tpu_available(check_device=False): import torch_xla.core.xla_model as xm import torch_xla.debug.metrics as met class lowerCamelCase_ ( lowerCamelCase ): def __init__( self , *__lowerCAmelCase , __lowerCAmelCase=None , __lowerCAmelCase=None , __lowerCAmelCase=None , **__lowerCAmelCase ): """simple docstring""" super().__init__(*__lowerCAmelCase , **__lowerCAmelCase ) __magic_name__ :Any = eval_examples __magic_name__ :str = post_process_function __magic_name__ :int = quant_trainer_args __magic_name__ :List[str] = 1_2_8 # default number of calibration samples def A ( self , __lowerCAmelCase=None ): """simple docstring""" if calib_dataset is None and self.calib_dataset is None: raise ValueError('''Trainer: calibration requires an calib_dataset.''' ) __magic_name__ :Optional[Any] = calib_dataset if calib_dataset is not None else self.calib_dataset __magic_name__ :Optional[int] = self._remove_unused_columns(__lowerCAmelCase , description='''Calibration''' ) return DataLoader( __lowerCAmelCase , batch_size=self.args.eval_batch_size , collate_fn=self.data_collator , drop_last=self.args.dataloader_drop_last , num_workers=self.args.dataloader_num_workers , pin_memory=self.args.dataloader_pin_memory , shuffle=__lowerCAmelCase , ) def A ( self , __lowerCAmelCase=None ): """simple docstring""" __magic_name__ :Dict = self.train_dataset if calib_dataset is None else calib_dataset __magic_name__ :Any = self.get_calib_dataloader(__lowerCAmelCase ) __magic_name__ :List[str] = self.model quant_trainer.configure_model(__lowerCAmelCase , self.quant_trainer_args , calib=__lowerCAmelCase ) model.eval() quant_trainer.enable_calibration(__lowerCAmelCase ) logger.info('''***** Running calibration *****''' ) logger.info(F''' Num examples = {self.calib_num}''' ) logger.info(F''' Batch size = {calib_dataloader.batch_size}''' ) for step, inputs in enumerate(__lowerCAmelCase ): # Prediction step __magic_name__ , __magic_name__ , __magic_name__ :str = self.prediction_step(__lowerCAmelCase , __lowerCAmelCase , prediction_loss_only=__lowerCAmelCase ) if (step + 1) * calib_dataloader.batch_size >= self.calib_num: break quant_trainer.finish_calibration(__lowerCAmelCase , self.quant_trainer_args ) __magic_name__ :Any = model def A ( self , __lowerCAmelCase=None , __lowerCAmelCase=None , __lowerCAmelCase=None , __lowerCAmelCase = "eval" ): """simple docstring""" __magic_name__ :Tuple = self.eval_dataset if eval_dataset is None else eval_dataset __magic_name__ :Optional[Any] = self.get_eval_dataloader(__lowerCAmelCase ) __magic_name__ :str = self.eval_examples if eval_examples is None else eval_examples # Temporarily disable metric computation, we will do it in the loop here. __magic_name__ :Any = self.compute_metrics __magic_name__ :List[Any] = None __magic_name__ :List[str] = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop try: __magic_name__ :Optional[Any] = eval_loop( __lowerCAmelCase , description='''Evaluation''' , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=__lowerCAmelCase , ) finally: __magic_name__ :Union[str, Any] = compute_metrics if self.post_process_function is not None and self.compute_metrics is not None: __magic_name__ :Union[str, Any] = self.post_process_function(__lowerCAmelCase , __lowerCAmelCase , output.predictions ) __magic_name__ :int = self.compute_metrics(__lowerCAmelCase ) # Prefix all keys with metric_key_prefix + '_' for key in list(metrics.keys() ): if not key.startswith(F'''{metric_key_prefix}_''' ): __magic_name__ :Dict = metrics.pop(__lowerCAmelCase ) self.log(__lowerCAmelCase ) else: __magic_name__ :List[str] = {} if self.args.tpu_metrics_debug or self.args.debug: # tpu-comment: Logging debug metrics for PyTorch/XLA (compile, execute times, ops, etc.) xm.master_print(met.metrics_report() ) __magic_name__ :Optional[Any] = self.callback_handler.on_evaluate(self.args , self.state , self.control , __lowerCAmelCase ) return metrics def A ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase=None , __lowerCAmelCase = "test" ): """simple docstring""" __magic_name__ :int = self.get_test_dataloader(__lowerCAmelCase ) # Temporarily disable metric computation, we will do it in the loop here. __magic_name__ :Dict = self.compute_metrics __magic_name__ :str = None __magic_name__ :Optional[int] = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop try: __magic_name__ :int = eval_loop( __lowerCAmelCase , description='''Prediction''' , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=__lowerCAmelCase , ) finally: __magic_name__ :List[Any] = compute_metrics if self.post_process_function is None or self.compute_metrics is None: return output __magic_name__ :Optional[Any] = self.post_process_function(__lowerCAmelCase , __lowerCAmelCase , output.predictions , '''predict''' ) __magic_name__ :Dict = self.compute_metrics(__lowerCAmelCase ) # Prefix all keys with metric_key_prefix + '_' for key in list(metrics.keys() ): if not key.startswith(F'''{metric_key_prefix}_''' ): __magic_name__ :List[str] = metrics.pop(__lowerCAmelCase ) return PredictionOutput(predictions=predictions.predictions , label_ids=predictions.label_ids , metrics=__lowerCAmelCase ) def A ( self , __lowerCAmelCase="./" ): """simple docstring""" __magic_name__ :List[Any] = self.eval_dataset __magic_name__ :Any = self.get_eval_dataloader(__lowerCAmelCase ) __magic_name__ :int = next(iter(__lowerCAmelCase ) ) # saving device - to make it consistent __magic_name__ :str = torch.device('''cuda''' if torch.cuda.is_available() else '''cpu''' ) # convert to tuple __magic_name__ :int = tuple(v.to(__lowerCAmelCase ) for k, v in batch.items() ) logger.info('''Converting model to be onnx compatible''' ) from pytorch_quantization.nn import TensorQuantizer __magic_name__ :Any = True __magic_name__ :Optional[int] = self.model.to(__lowerCAmelCase ) model.eval() model.float() __magic_name__ :Any = model.module if hasattr(__lowerCAmelCase , '''module''' ) else model quant_trainer.configure_model(__lowerCAmelCase , self.quant_trainer_args ) __magic_name__ :int = os.path.join(__lowerCAmelCase , '''model.onnx''' ) logger.info(F'''exporting model to {output_model_file}''' ) __magic_name__ :Dict = {0: '''batch_size''', 1: '''seq_len'''} torch.onnx.export( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , export_params=__lowerCAmelCase , opset_version=1_3 , do_constant_folding=__lowerCAmelCase , input_names=['''input_ids''', '''attention_mask''', '''token_type_ids'''] , output_names=['''output_start_logits''', '''output_end_logits'''] , dynamic_axes={ '''input_ids''': axes, '''attention_mask''': axes, '''token_type_ids''': axes, '''output_start_logits''': axes, '''output_end_logits''': axes, } , verbose=__lowerCAmelCase , ) logger.info('''onnx export finished''' )
0
1
import argparse import re from typing import Dict import torch from datasets import Audio, Dataset, load_dataset, load_metric from transformers import AutoFeatureExtractor, pipeline def __lowercase ( snake_case, snake_case ): """simple docstring""" __magic_name__ :List[str] = args.log_outputs __magic_name__ :Tuple = '''_'''.join(args.dataset.split('''/''' ) + [args.config, args.split] ) # load metric __magic_name__ :Dict = load_metric('''wer''' ) __magic_name__ :List[Any] = load_metric('''cer''' ) # compute metrics __magic_name__ :Optional[Any] = wer.compute(references=result['''target'''], predictions=result['''prediction'''] ) __magic_name__ :Dict = cer.compute(references=result['''target'''], predictions=result['''prediction'''] ) # print & log results __magic_name__ :Tuple = f'''WER: {wer_result}\nCER: {cer_result}''' print(snake_case ) with open(f'''{dataset_id}_eval_results.txt''', '''w''' ) as f: f.write(snake_case ) # log all results in text file. Possibly interesting for analysis if log_outputs is not None: __magic_name__ :Dict = f'''log_{dataset_id}_predictions.txt''' __magic_name__ :Union[str, Any] = f'''log_{dataset_id}_targets.txt''' with open(snake_case, '''w''' ) as p, open(snake_case, '''w''' ) as t: # mapping function to write output def write_to_file(snake_case, snake_case ): p.write(f'''{i}''' + '''\n''' ) p.write(batch['''prediction'''] + '''\n''' ) t.write(f'''{i}''' + '''\n''' ) t.write(batch['''target'''] + '''\n''' ) result.map(snake_case, with_indices=snake_case ) def __lowercase ( snake_case ): """simple docstring""" __magic_name__ :Optional[Any] = '''[,?.!\-\;\:"“%‘”�—’…–]''' # noqa: W605 IMPORTANT: this should correspond to the chars that were ignored during training __magic_name__ :Optional[Any] = re.sub(snake_case, '''''', text.lower() ) # In addition, we can normalize the target text, e.g. removing new lines characters etc... # note that order is important here! __magic_name__ :Tuple = ['''\n\n''', '''\n''', ''' ''', ''' '''] for t in token_sequences_to_ignore: __magic_name__ :Any = ''' '''.join(text.split(snake_case ) ) return text def __lowercase ( snake_case ): """simple docstring""" __magic_name__ :List[str] = load_dataset(args.dataset, args.config, split=args.split, use_auth_token=snake_case ) # for testing: only process the first two examples as a test # dataset = dataset.select(range(10)) # load processor __magic_name__ :str = AutoFeatureExtractor.from_pretrained(args.model_id ) __magic_name__ :Optional[int] = feature_extractor.sampling_rate # resample audio __magic_name__ :Dict = dataset.cast_column('''audio''', Audio(sampling_rate=snake_case ) ) # load eval pipeline if args.device is None: __magic_name__ :List[str] = 0 if torch.cuda.is_available() else -1 __magic_name__ :Any = pipeline('''automatic-speech-recognition''', model=args.model_id, device=args.device ) # map function to decode audio def map_to_pred(snake_case ): __magic_name__ :List[str] = asr( batch['''audio''']['''array'''], chunk_length_s=args.chunk_length_s, stride_length_s=args.stride_length_s ) __magic_name__ :Any = prediction['''text'''] __magic_name__ :Optional[int] = normalize_text(batch['''sentence'''] ) return batch # run inference on all examples __magic_name__ :Any = dataset.map(snake_case, remove_columns=dataset.column_names ) # compute and log_results # do not change function below log_results(snake_case, snake_case ) if __name__ == "__main__": SCREAMING_SNAKE_CASE__ : Any = argparse.ArgumentParser() parser.add_argument( """--model_id""", type=str, required=True, help="""Model identifier. Should be loadable with 🤗 Transformers""" ) parser.add_argument( """--dataset""", type=str, required=True, help="""Dataset name to evaluate the `model_id`. Should be loadable with 🤗 Datasets""", ) parser.add_argument( """--config""", type=str, required=True, help="""Config of the dataset. *E.g.* `'en'` for Common Voice""" ) parser.add_argument("""--split""", type=str, required=True, help="""Split of the dataset. *E.g.* `'test'`""") parser.add_argument( """--chunk_length_s""", type=float, default=None, help="""Chunk length in seconds. Defaults to 5 seconds.""" ) parser.add_argument( """--stride_length_s""", type=float, default=None, help="""Stride of the audio chunks. Defaults to 1 second.""" ) parser.add_argument( """--log_outputs""", action="""store_true""", help="""If defined, write outputs to log file for analysis.""" ) parser.add_argument( """--device""", type=int, default=None, help="""The device to run the pipeline on. -1 for CPU (default), 0 for the first GPU and so on.""", ) SCREAMING_SNAKE_CASE__ : int = parser.parse_args() main(args)
0
def __lowercase ( snake_case ): """simple docstring""" return "".join([hex(snake_case )[2:].zfill(2 ).upper() for byte in list(snake_case )] ) def __lowercase ( snake_case ): """simple docstring""" if (len(snake_case ) % 2) != 0: raise ValueError( '''Base16 encoded data is invalid: Data does not have an even number of hex digits.''' ) # Check the character set - the standard base16 alphabet # is uppercase according to RFC3548 section 6 if not set(snake_case ) <= set('''0123456789ABCDEF''' ): raise ValueError( '''Base16 encoded data is invalid: Data is not uppercase hex or it contains invalid characters.''' ) # For every two hexadecimal digits (= a byte), turn it into an integer. # Then, string the result together into bytes, and return it. return bytes(int(data[i] + data[i + 1], 1_6 ) for i in range(0, len(snake_case ), 2 ) ) if __name__ == "__main__": import doctest doctest.testmod()
0
1
from collections import OrderedDict from typing import Mapping from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging SCREAMING_SNAKE_CASE__ : Any = logging.get_logger(__name__) SCREAMING_SNAKE_CASE__ : Optional[Any] = { """google/bigbird-roberta-base""": """https://huggingface.co/google/bigbird-roberta-base/resolve/main/config.json""", """google/bigbird-roberta-large""": """https://huggingface.co/google/bigbird-roberta-large/resolve/main/config.json""", """google/bigbird-base-trivia-itc""": """https://huggingface.co/google/bigbird-base-trivia-itc/resolve/main/config.json""", # See all BigBird models at https://huggingface.co/models?filter=big_bird } class lowerCamelCase_ ( lowerCamelCase ): a__ = '''big_bird''' def __init__( self , __lowerCAmelCase=5_0_3_5_8 , __lowerCAmelCase=7_6_8 , __lowerCAmelCase=1_2 , __lowerCAmelCase=1_2 , __lowerCAmelCase=3_0_7_2 , __lowerCAmelCase="gelu_new" , __lowerCAmelCase=0.1 , __lowerCAmelCase=0.1 , __lowerCAmelCase=4_0_9_6 , __lowerCAmelCase=2 , __lowerCAmelCase=0.02 , __lowerCAmelCase=1E-12 , __lowerCAmelCase=True , __lowerCAmelCase=0 , __lowerCAmelCase=1 , __lowerCAmelCase=2 , __lowerCAmelCase=6_6 , __lowerCAmelCase="block_sparse" , __lowerCAmelCase=True , __lowerCAmelCase=False , __lowerCAmelCase=6_4 , __lowerCAmelCase=3 , __lowerCAmelCase=None , **__lowerCAmelCase , ): """simple docstring""" super().__init__( pad_token_id=__lowerCAmelCase , bos_token_id=__lowerCAmelCase , eos_token_id=__lowerCAmelCase , sep_token_id=__lowerCAmelCase , **__lowerCAmelCase , ) __magic_name__ :Union[str, Any] = vocab_size __magic_name__ :List[str] = max_position_embeddings __magic_name__ :Dict = hidden_size __magic_name__ :Union[str, Any] = num_hidden_layers __magic_name__ :str = num_attention_heads __magic_name__ :Any = intermediate_size __magic_name__ :List[Any] = hidden_act __magic_name__ :Any = hidden_dropout_prob __magic_name__ :str = attention_probs_dropout_prob __magic_name__ :Union[str, Any] = initializer_range __magic_name__ :Optional[int] = type_vocab_size __magic_name__ :Optional[Any] = layer_norm_eps __magic_name__ :List[str] = use_cache __magic_name__ :Optional[Any] = rescale_embeddings __magic_name__ :Optional[Any] = attention_type __magic_name__ :Optional[Any] = use_bias __magic_name__ :Optional[int] = block_size __magic_name__ :List[Any] = num_random_blocks __magic_name__ :List[str] = classifier_dropout class lowerCamelCase_ ( lowerCamelCase ): @property def A ( self ): """simple docstring""" if self.task == "multiple-choice": __magic_name__ :int = {0: '''batch''', 1: '''choice''', 2: '''sequence'''} else: __magic_name__ :Dict = {0: '''batch''', 1: '''sequence'''} return OrderedDict( [ ('''input_ids''', dynamic_axis), ('''attention_mask''', dynamic_axis), ] )
0
import pytest import requests from datasets.utils.file_utils import http_head from .utils import OfflineSimulationMode, RequestWouldHangIndefinitelyError, offline @pytest.mark.integration def __lowercase ( ): """simple docstring""" with offline(OfflineSimulationMode.CONNECTION_TIMES_OUT ): with pytest.raises(snake_case ): requests.request('''GET''', '''https://huggingface.co''' ) with pytest.raises(requests.exceptions.ConnectTimeout ): requests.request('''GET''', '''https://huggingface.co''', timeout=1.0 ) @pytest.mark.integration def __lowercase ( ): """simple docstring""" with offline(OfflineSimulationMode.CONNECTION_FAILS ): with pytest.raises(requests.exceptions.ConnectionError ): requests.request('''GET''', '''https://huggingface.co''' ) def __lowercase ( ): """simple docstring""" with offline(OfflineSimulationMode.HF_DATASETS_OFFLINE_SET_TO_1 ): with pytest.raises(snake_case ): http_head('''https://huggingface.co''' )
0
1
from scipy.stats import pearsonr import datasets SCREAMING_SNAKE_CASE__ : str = """ Pearson correlation coefficient and p-value for testing non-correlation. The Pearson correlation coefficient measures the linear relationship between two datasets. The calculation of the p-value relies on the assumption that each dataset is normally distributed. Like other correlation coefficients, this one varies between -1 and +1 with 0 implying no correlation. Correlations of -1 or +1 imply an exact linear relationship. Positive correlations imply that as x increases, so does y. Negative correlations imply that as x increases, y decreases. The p-value roughly indicates the probability of an uncorrelated system producing datasets that have a Pearson correlation at least as extreme as the one computed from these datasets. """ SCREAMING_SNAKE_CASE__ : Tuple = """ Args: predictions (`list` of `int`): Predicted class labels, as returned by a model. references (`list` of `int`): Ground truth labels. return_pvalue (`boolean`): If `True`, returns the p-value, along with the correlation coefficient. If `False`, returns only the correlation coefficient. Defaults to `False`. Returns: pearsonr (`float`): Pearson correlation coefficient. Minimum possible value is -1. Maximum possible value is 1. Values of 1 and -1 indicate exact linear positive and negative relationships, respectively. A value of 0 implies no correlation. p-value (`float`): P-value, which roughly indicates the probability of an The p-value roughly indicates the probability of an uncorrelated system producing datasets that have a Pearson correlation at least as extreme as the one computed from these datasets. Minimum possible value is 0. Maximum possible value is 1. Higher values indicate higher probabilities. Examples: Example 1-A simple example using only predictions and references. >>> pearsonr_metric = datasets.load_metric(\"pearsonr\") >>> results = pearsonr_metric.compute(predictions=[10, 9, 2.5, 6, 4], references=[1, 2, 3, 4, 5]) >>> print(round(results['pearsonr'], 2)) -0.74 Example 2-The same as Example 1, but that also returns the `p-value`. >>> pearsonr_metric = datasets.load_metric(\"pearsonr\") >>> results = pearsonr_metric.compute(predictions=[10, 9, 2.5, 6, 4], references=[1, 2, 3, 4, 5], return_pvalue=True) >>> print(sorted(list(results.keys()))) ['p-value', 'pearsonr'] >>> print(round(results['pearsonr'], 2)) -0.74 >>> print(round(results['p-value'], 2)) 0.15 """ SCREAMING_SNAKE_CASE__ : int = """ @article{2020SciPy-NMeth, author = {Virtanen, Pauli and Gommers, Ralf and Oliphant, Travis E. and Haberland, Matt and Reddy, Tyler and Cournapeau, David and Burovski, Evgeni and Peterson, Pearu and Weckesser, Warren and Bright, Jonathan and {van der Walt}, St{\'e}fan J. and Brett, Matthew and Wilson, Joshua and Millman, K. Jarrod and Mayorov, Nikolay and Nelson, Andrew R. J. and Jones, Eric and Kern, Robert and Larson, Eric and Carey, C J and Polat, Ilhan and Feng, Yu and Moore, Eric W. and {VanderPlas}, Jake and Laxalde, Denis and Perktold, Josef and Cimrman, Robert and Henriksen, Ian and Quintero, E. A. and Harris, Charles R. and Archibald, Anne M. and Ribeiro, Antonio H. and Pedregosa, Fabian and {van Mulbregt}, Paul and {SciPy 1.0 Contributors}}, title = {{{SciPy} 1.0: Fundamental Algorithms for Scientific Computing in Python}}, journal = {Nature Methods}, year = {2020}, volume = {17}, pages = {261--272}, adsurl = {https://rdcu.be/b08Wh}, doi = {10.1038/s41592-019-0686-2}, } """ @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class lowerCamelCase_ ( datasets.Metric ): def A ( self ): """simple docstring""" return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { '''predictions''': datasets.Value('''float''' ), '''references''': datasets.Value('''float''' ), } ) , reference_urls=['''https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.pearsonr.html'''] , ) def A ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase=False ): """simple docstring""" if return_pvalue: __magic_name__ :str = pearsonr(__lowerCAmelCase , __lowerCAmelCase ) return {"pearsonr": results[0], "p-value": results[1]} else: return {"pearsonr": float(pearsonr(__lowerCAmelCase , __lowerCAmelCase )[0] )}
0
import math from collections.abc import Iterator from itertools import takewhile def __lowercase ( snake_case ): """simple docstring""" if 1 < number < 4: # 2 and 3 are primes return True elif number < 2 or number % 2 == 0 or number % 3 == 0: # Negatives, 0, 1, all even numbers, all multiples of 3 are not primes return False # All primes number are in format of 6k +/- 1 for i in range(5, int(math.sqrt(snake_case ) + 1 ), 6 ): if number % i == 0 or number % (i + 2) == 0: return False return True def __lowercase ( ): """simple docstring""" __magic_name__ :str = 2 while True: if is_prime(snake_case ): yield num num += 1 def __lowercase ( snake_case = 2_0_0_0_0_0_0 ): """simple docstring""" return sum(takewhile(lambda snake_case : x < n, prime_generator() ) ) if __name__ == "__main__": print(f"{solution() = }")
0
1
import os from shutil import copyfile from typing import List, Optional, Tuple from tokenizers import processors from ...tokenization_utils import AddedToken, BatchEncoding from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import is_sentencepiece_available, logging if is_sentencepiece_available(): from .tokenization_mbart import MBartTokenizer else: SCREAMING_SNAKE_CASE__ : Optional[int] = None SCREAMING_SNAKE_CASE__ : Any = logging.get_logger(__name__) SCREAMING_SNAKE_CASE__ : str = {"""vocab_file""": """sentencepiece.bpe.model""", """tokenizer_file""": """tokenizer.json"""} SCREAMING_SNAKE_CASE__ : Tuple = { """vocab_file""": { """facebook/mbart-large-en-ro""": ( """https://huggingface.co/facebook/mbart-large-en-ro/resolve/main/sentencepiece.bpe.model""" ), """facebook/mbart-large-cc25""": ( """https://huggingface.co/facebook/mbart-large-cc25/resolve/main/sentencepiece.bpe.model""" ), }, """tokenizer_file""": { """facebook/mbart-large-en-ro""": """https://huggingface.co/facebook/mbart-large-en-ro/resolve/main/tokenizer.json""", """facebook/mbart-large-cc25""": """https://huggingface.co/facebook/mbart-large-cc25/resolve/main/tokenizer.json""", }, } SCREAMING_SNAKE_CASE__ : Dict = { """facebook/mbart-large-en-ro""": 10_24, """facebook/mbart-large-cc25""": 10_24, } # fmt: off SCREAMING_SNAKE_CASE__ : Union[str, Any] = ["""ar_AR""", """cs_CZ""", """de_DE""", """en_XX""", """es_XX""", """et_EE""", """fi_FI""", """fr_XX""", """gu_IN""", """hi_IN""", """it_IT""", """ja_XX""", """kk_KZ""", """ko_KR""", """lt_LT""", """lv_LV""", """my_MM""", """ne_NP""", """nl_XX""", """ro_RO""", """ru_RU""", """si_LK""", """tr_TR""", """vi_VN""", """zh_CN"""] class lowerCamelCase_ ( lowerCamelCase ): a__ = VOCAB_FILES_NAMES a__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES a__ = PRETRAINED_VOCAB_FILES_MAP a__ = ['''input_ids''', '''attention_mask'''] a__ = MBartTokenizer a__ = [] a__ = [] def __init__( self , __lowerCAmelCase=None , __lowerCAmelCase=None , __lowerCAmelCase="<s>" , __lowerCAmelCase="</s>" , __lowerCAmelCase="</s>" , __lowerCAmelCase="<s>" , __lowerCAmelCase="<unk>" , __lowerCAmelCase="<pad>" , __lowerCAmelCase="<mask>" , __lowerCAmelCase=None , __lowerCAmelCase=None , __lowerCAmelCase=None , **__lowerCAmelCase , ): """simple docstring""" # Mask token behave like a normal word, i.e. include the space before it __magic_name__ :Optional[int] = AddedToken(__lowerCAmelCase , lstrip=__lowerCAmelCase , rstrip=__lowerCAmelCase ) if isinstance(__lowerCAmelCase , __lowerCAmelCase ) else mask_token super().__init__( vocab_file=__lowerCAmelCase , tokenizer_file=__lowerCAmelCase , bos_token=__lowerCAmelCase , eos_token=__lowerCAmelCase , sep_token=__lowerCAmelCase , cls_token=__lowerCAmelCase , unk_token=__lowerCAmelCase , pad_token=__lowerCAmelCase , mask_token=__lowerCAmelCase , src_lang=__lowerCAmelCase , tgt_lang=__lowerCAmelCase , additional_special_tokens=__lowerCAmelCase , **__lowerCAmelCase , ) __magic_name__ :List[Any] = vocab_file __magic_name__ :Any = False if not self.vocab_file else True __magic_name__ :Union[str, Any] = FAIRSEQ_LANGUAGE_CODES.copy() if additional_special_tokens is not None: # Only add those special tokens if they are not already there. _additional_special_tokens.extend( [t for t in additional_special_tokens if t not in _additional_special_tokens] ) self.add_special_tokens({'''additional_special_tokens''': _additional_special_tokens} ) __magic_name__ :str = { lang_code: self.convert_tokens_to_ids(__lowerCAmelCase ) for lang_code in FAIRSEQ_LANGUAGE_CODES } __magic_name__ :Optional[int] = src_lang if src_lang is not None else '''en_XX''' __magic_name__ :int = self.convert_tokens_to_ids(self._src_lang ) __magic_name__ :str = tgt_lang self.set_src_lang_special_tokens(self._src_lang ) @property def A ( self ): """simple docstring""" return self._src_lang @src_lang.setter def A ( self , __lowerCAmelCase ): """simple docstring""" __magic_name__ :Union[str, Any] = new_src_lang self.set_src_lang_special_tokens(self._src_lang ) def A ( self , __lowerCAmelCase , __lowerCAmelCase = None ): """simple docstring""" if token_ids_a is None: return self.prefix_tokens + token_ids_a + self.suffix_tokens # We don't expect to process pairs, but leave the pair logic for API consistency return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens def A ( self , __lowerCAmelCase , __lowerCAmelCase = None ): """simple docstring""" __magic_name__ :Any = [self.sep_token_id] __magic_name__ :List[Any] = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0] def A ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , **__lowerCAmelCase ): """simple docstring""" if src_lang is None or tgt_lang is None: raise ValueError('''Translation requires a `src_lang` and a `tgt_lang` for this model''' ) __magic_name__ :Union[str, Any] = src_lang __magic_name__ :Union[str, Any] = self(__lowerCAmelCase , add_special_tokens=__lowerCAmelCase , return_tensors=__lowerCAmelCase , **__lowerCAmelCase ) __magic_name__ :Any = self.convert_tokens_to_ids(__lowerCAmelCase ) __magic_name__ :List[Any] = tgt_lang_id return inputs def A ( self , __lowerCAmelCase , __lowerCAmelCase = "en_XX" , __lowerCAmelCase = None , __lowerCAmelCase = "ro_RO" , **__lowerCAmelCase , ): """simple docstring""" __magic_name__ :Tuple = src_lang __magic_name__ :Dict = tgt_lang return super().prepare_seqaseq_batch(__lowerCAmelCase , __lowerCAmelCase , **__lowerCAmelCase ) def A ( self ): """simple docstring""" return self.set_src_lang_special_tokens(self.src_lang ) def A ( self ): """simple docstring""" return self.set_tgt_lang_special_tokens(self.tgt_lang ) def A ( self , __lowerCAmelCase ): """simple docstring""" __magic_name__ :str = self.convert_tokens_to_ids(__lowerCAmelCase ) __magic_name__ :str = [] __magic_name__ :str = [self.eos_token_id, self.cur_lang_code] __magic_name__ :Optional[int] = self.convert_ids_to_tokens(self.prefix_tokens ) __magic_name__ :List[Any] = self.convert_ids_to_tokens(self.suffix_tokens ) __magic_name__ :List[Any] = processors.TemplateProcessing( single=prefix_tokens_str + ['''$A'''] + suffix_tokens_str , pair=prefix_tokens_str + ['''$A''', '''$B'''] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens ) ) , ) def A ( self , __lowerCAmelCase ): """simple docstring""" __magic_name__ :List[Any] = self.convert_tokens_to_ids(__lowerCAmelCase ) __magic_name__ :Optional[int] = [] __magic_name__ :List[str] = [self.eos_token_id, self.cur_lang_code] __magic_name__ :Optional[int] = self.convert_ids_to_tokens(self.prefix_tokens ) __magic_name__ :List[Any] = self.convert_ids_to_tokens(self.suffix_tokens ) __magic_name__ :Optional[int] = processors.TemplateProcessing( single=prefix_tokens_str + ['''$A'''] + suffix_tokens_str , pair=prefix_tokens_str + ['''$A''', '''$B'''] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens ) ) , ) def A ( self , __lowerCAmelCase , __lowerCAmelCase = None ): """simple docstring""" if not self.can_save_slow_tokenizer: raise ValueError( '''Your fast tokenizer does not have the necessary information to save the vocabulary for a slow ''' '''tokenizer.''' ) if not os.path.isdir(__lowerCAmelCase ): logger.error(F'''Vocabulary path ({save_directory}) should be a directory.''' ) return __magic_name__ :Dict = os.path.join( __lowerCAmelCase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(__lowerCAmelCase ): copyfile(self.vocab_file , __lowerCAmelCase ) return (out_vocab_file,)
0
import unittest import numpy as np from transformers.testing_utils import require_flax, require_tf, require_torch from transformers.utils import ( expand_dims, flatten_dict, is_flax_available, is_tf_available, is_torch_available, reshape, squeeze, transpose, ) if is_flax_available(): import jax.numpy as jnp if is_tf_available(): import tensorflow as tf if is_torch_available(): import torch class lowerCamelCase_ ( unittest.TestCase ): def A ( self ): """simple docstring""" __magic_name__ :List[Any] = { '''task_specific_params''': { '''summarization''': {'''length_penalty''': 1.0, '''max_length''': 1_2_8, '''min_length''': 1_2, '''num_beams''': 4}, '''summarization_cnn''': {'''length_penalty''': 2.0, '''max_length''': 1_4_2, '''min_length''': 5_6, '''num_beams''': 4}, '''summarization_xsum''': {'''length_penalty''': 1.0, '''max_length''': 6_2, '''min_length''': 1_1, '''num_beams''': 6}, } } __magic_name__ :List[str] = { '''task_specific_params.summarization.length_penalty''': 1.0, '''task_specific_params.summarization.max_length''': 1_2_8, '''task_specific_params.summarization.min_length''': 1_2, '''task_specific_params.summarization.num_beams''': 4, '''task_specific_params.summarization_cnn.length_penalty''': 2.0, '''task_specific_params.summarization_cnn.max_length''': 1_4_2, '''task_specific_params.summarization_cnn.min_length''': 5_6, '''task_specific_params.summarization_cnn.num_beams''': 4, '''task_specific_params.summarization_xsum.length_penalty''': 1.0, '''task_specific_params.summarization_xsum.max_length''': 6_2, '''task_specific_params.summarization_xsum.min_length''': 1_1, '''task_specific_params.summarization_xsum.num_beams''': 6, } self.assertEqual(flatten_dict(__lowerCAmelCase ) , __lowerCAmelCase ) def A ( self ): """simple docstring""" __magic_name__ :Optional[Any] = np.random.randn(3 , 4 ) self.assertTrue(np.allclose(transpose(__lowerCAmelCase ) , x.transpose() ) ) __magic_name__ :List[Any] = np.random.randn(3 , 4 , 5 ) self.assertTrue(np.allclose(transpose(__lowerCAmelCase , axes=(1, 2, 0) ) , x.transpose((1, 2, 0) ) ) ) @require_torch def A ( self ): """simple docstring""" __magic_name__ :Optional[Any] = np.random.randn(3 , 4 ) __magic_name__ :Tuple = torch.tensor(__lowerCAmelCase ) self.assertTrue(np.allclose(transpose(__lowerCAmelCase ) , transpose(__lowerCAmelCase ).numpy() ) ) __magic_name__ :int = np.random.randn(3 , 4 , 5 ) __magic_name__ :Union[str, Any] = torch.tensor(__lowerCAmelCase ) self.assertTrue(np.allclose(transpose(__lowerCAmelCase , axes=(1, 2, 0) ) , transpose(__lowerCAmelCase , axes=(1, 2, 0) ).numpy() ) ) @require_tf def A ( self ): """simple docstring""" __magic_name__ :int = np.random.randn(3 , 4 ) __magic_name__ :Optional[Any] = tf.constant(__lowerCAmelCase ) self.assertTrue(np.allclose(transpose(__lowerCAmelCase ) , transpose(__lowerCAmelCase ).numpy() ) ) __magic_name__ :List[str] = np.random.randn(3 , 4 , 5 ) __magic_name__ :Optional[Any] = tf.constant(__lowerCAmelCase ) self.assertTrue(np.allclose(transpose(__lowerCAmelCase , axes=(1, 2, 0) ) , transpose(__lowerCAmelCase , axes=(1, 2, 0) ).numpy() ) ) @require_flax def A ( self ): """simple docstring""" __magic_name__ :int = np.random.randn(3 , 4 ) __magic_name__ :Dict = jnp.array(__lowerCAmelCase ) self.assertTrue(np.allclose(transpose(__lowerCAmelCase ) , np.asarray(transpose(__lowerCAmelCase ) ) ) ) __magic_name__ :Dict = np.random.randn(3 , 4 , 5 ) __magic_name__ :Dict = jnp.array(__lowerCAmelCase ) self.assertTrue(np.allclose(transpose(__lowerCAmelCase , axes=(1, 2, 0) ) , np.asarray(transpose(__lowerCAmelCase , axes=(1, 2, 0) ) ) ) ) def A ( self ): """simple docstring""" __magic_name__ :Any = np.random.randn(3 , 4 ) self.assertTrue(np.allclose(reshape(__lowerCAmelCase , (4, 3) ) , np.reshape(__lowerCAmelCase , (4, 3) ) ) ) __magic_name__ :Union[str, Any] = np.random.randn(3 , 4 , 5 ) self.assertTrue(np.allclose(reshape(__lowerCAmelCase , (1_2, 5) ) , np.reshape(__lowerCAmelCase , (1_2, 5) ) ) ) @require_torch def A ( self ): """simple docstring""" __magic_name__ :Dict = np.random.randn(3 , 4 ) __magic_name__ :Tuple = torch.tensor(__lowerCAmelCase ) self.assertTrue(np.allclose(reshape(__lowerCAmelCase , (4, 3) ) , reshape(__lowerCAmelCase , (4, 3) ).numpy() ) ) __magic_name__ :Union[str, Any] = np.random.randn(3 , 4 , 5 ) __magic_name__ :List[str] = torch.tensor(__lowerCAmelCase ) self.assertTrue(np.allclose(reshape(__lowerCAmelCase , (1_2, 5) ) , reshape(__lowerCAmelCase , (1_2, 5) ).numpy() ) ) @require_tf def A ( self ): """simple docstring""" __magic_name__ :Dict = np.random.randn(3 , 4 ) __magic_name__ :Union[str, Any] = tf.constant(__lowerCAmelCase ) self.assertTrue(np.allclose(reshape(__lowerCAmelCase , (4, 3) ) , reshape(__lowerCAmelCase , (4, 3) ).numpy() ) ) __magic_name__ :List[Any] = np.random.randn(3 , 4 , 5 ) __magic_name__ :Optional[int] = tf.constant(__lowerCAmelCase ) self.assertTrue(np.allclose(reshape(__lowerCAmelCase , (1_2, 5) ) , reshape(__lowerCAmelCase , (1_2, 5) ).numpy() ) ) @require_flax def A ( self ): """simple docstring""" __magic_name__ :List[str] = np.random.randn(3 , 4 ) __magic_name__ :Any = jnp.array(__lowerCAmelCase ) self.assertTrue(np.allclose(reshape(__lowerCAmelCase , (4, 3) ) , np.asarray(reshape(__lowerCAmelCase , (4, 3) ) ) ) ) __magic_name__ :List[Any] = np.random.randn(3 , 4 , 5 ) __magic_name__ :List[str] = jnp.array(__lowerCAmelCase ) self.assertTrue(np.allclose(reshape(__lowerCAmelCase , (1_2, 5) ) , np.asarray(reshape(__lowerCAmelCase , (1_2, 5) ) ) ) ) def A ( self ): """simple docstring""" __magic_name__ :List[Any] = np.random.randn(1 , 3 , 4 ) self.assertTrue(np.allclose(squeeze(__lowerCAmelCase ) , np.squeeze(__lowerCAmelCase ) ) ) __magic_name__ :Optional[Any] = np.random.randn(1 , 4 , 1 , 5 ) self.assertTrue(np.allclose(squeeze(__lowerCAmelCase , axis=2 ) , np.squeeze(__lowerCAmelCase , axis=2 ) ) ) @require_torch def A ( self ): """simple docstring""" __magic_name__ :Dict = np.random.randn(1 , 3 , 4 ) __magic_name__ :List[Any] = torch.tensor(__lowerCAmelCase ) self.assertTrue(np.allclose(squeeze(__lowerCAmelCase ) , squeeze(__lowerCAmelCase ).numpy() ) ) __magic_name__ :List[str] = np.random.randn(1 , 4 , 1 , 5 ) __magic_name__ :str = torch.tensor(__lowerCAmelCase ) self.assertTrue(np.allclose(squeeze(__lowerCAmelCase , axis=2 ) , squeeze(__lowerCAmelCase , axis=2 ).numpy() ) ) @require_tf def A ( self ): """simple docstring""" __magic_name__ :int = np.random.randn(1 , 3 , 4 ) __magic_name__ :Tuple = tf.constant(__lowerCAmelCase ) self.assertTrue(np.allclose(squeeze(__lowerCAmelCase ) , squeeze(__lowerCAmelCase ).numpy() ) ) __magic_name__ :Tuple = np.random.randn(1 , 4 , 1 , 5 ) __magic_name__ :Optional[int] = tf.constant(__lowerCAmelCase ) self.assertTrue(np.allclose(squeeze(__lowerCAmelCase , axis=2 ) , squeeze(__lowerCAmelCase , axis=2 ).numpy() ) ) @require_flax def A ( self ): """simple docstring""" __magic_name__ :Tuple = np.random.randn(1 , 3 , 4 ) __magic_name__ :Optional[Any] = jnp.array(__lowerCAmelCase ) self.assertTrue(np.allclose(squeeze(__lowerCAmelCase ) , np.asarray(squeeze(__lowerCAmelCase ) ) ) ) __magic_name__ :List[Any] = np.random.randn(1 , 4 , 1 , 5 ) __magic_name__ :Optional[Any] = jnp.array(__lowerCAmelCase ) self.assertTrue(np.allclose(squeeze(__lowerCAmelCase , axis=2 ) , np.asarray(squeeze(__lowerCAmelCase , axis=2 ) ) ) ) def A ( self ): """simple docstring""" __magic_name__ :Any = np.random.randn(3 , 4 ) self.assertTrue(np.allclose(expand_dims(__lowerCAmelCase , axis=1 ) , np.expand_dims(__lowerCAmelCase , axis=1 ) ) ) @require_torch def A ( self ): """simple docstring""" __magic_name__ :List[Any] = np.random.randn(3 , 4 ) __magic_name__ :Any = torch.tensor(__lowerCAmelCase ) self.assertTrue(np.allclose(expand_dims(__lowerCAmelCase , axis=1 ) , expand_dims(__lowerCAmelCase , axis=1 ).numpy() ) ) @require_tf def A ( self ): """simple docstring""" __magic_name__ :Union[str, Any] = np.random.randn(3 , 4 ) __magic_name__ :Union[str, Any] = tf.constant(__lowerCAmelCase ) self.assertTrue(np.allclose(expand_dims(__lowerCAmelCase , axis=1 ) , expand_dims(__lowerCAmelCase , axis=1 ).numpy() ) ) @require_flax def A ( self ): """simple docstring""" __magic_name__ :List[str] = np.random.randn(3 , 4 ) __magic_name__ :Tuple = jnp.array(__lowerCAmelCase ) self.assertTrue(np.allclose(expand_dims(__lowerCAmelCase , axis=1 ) , np.asarray(expand_dims(__lowerCAmelCase , axis=1 ) ) ) )
0
1
import copy from ...configuration_utils import PretrainedConfig from ...utils import logging from ..bit import BitConfig SCREAMING_SNAKE_CASE__ : str = logging.get_logger(__name__) SCREAMING_SNAKE_CASE__ : str = { """Intel/dpt-large""": """https://huggingface.co/Intel/dpt-large/resolve/main/config.json""", # See all DPT models at https://huggingface.co/models?filter=dpt } class lowerCamelCase_ ( lowerCamelCase ): a__ = '''dpt''' def __init__( self , __lowerCAmelCase=7_6_8 , __lowerCAmelCase=1_2 , __lowerCAmelCase=1_2 , __lowerCAmelCase=3_0_7_2 , __lowerCAmelCase="gelu" , __lowerCAmelCase=0.0 , __lowerCAmelCase=0.0 , __lowerCAmelCase=0.02 , __lowerCAmelCase=1E-12 , __lowerCAmelCase=3_8_4 , __lowerCAmelCase=1_6 , __lowerCAmelCase=3 , __lowerCAmelCase=False , __lowerCAmelCase=True , __lowerCAmelCase=[2, 5, 8, 1_1] , __lowerCAmelCase="project" , __lowerCAmelCase=[4, 2, 1, 0.5] , __lowerCAmelCase=[9_6, 1_9_2, 3_8_4, 7_6_8] , __lowerCAmelCase=2_5_6 , __lowerCAmelCase=-1 , __lowerCAmelCase=False , __lowerCAmelCase=True , __lowerCAmelCase=0.4 , __lowerCAmelCase=2_5_5 , __lowerCAmelCase=0.1 , __lowerCAmelCase=[1, 1_0_2_4, 2_4, 2_4] , __lowerCAmelCase=[0, 1] , __lowerCAmelCase=None , **__lowerCAmelCase , ): """simple docstring""" super().__init__(**__lowerCAmelCase ) __magic_name__ :str = hidden_size __magic_name__ :Union[str, Any] = is_hybrid if self.is_hybrid: if backbone_config is None: logger.info('''Initializing the config with a `BiT` backbone.''' ) __magic_name__ :Tuple = { '''global_padding''': '''same''', '''layer_type''': '''bottleneck''', '''depths''': [3, 4, 9], '''out_features''': ['''stage1''', '''stage2''', '''stage3'''], '''embedding_dynamic_padding''': True, } __magic_name__ :List[Any] = BitConfig(**__lowerCAmelCase ) elif isinstance(__lowerCAmelCase , __lowerCAmelCase ): logger.info('''Initializing the config with a `BiT` backbone.''' ) __magic_name__ :Tuple = BitConfig(**__lowerCAmelCase ) elif isinstance(__lowerCAmelCase , __lowerCAmelCase ): __magic_name__ :Dict = backbone_config else: raise ValueError( F'''backbone_config must be a dictionary or a `PretrainedConfig`, got {backbone_config.__class__}.''' ) __magic_name__ :Any = backbone_featmap_shape __magic_name__ :Any = neck_ignore_stages if readout_type != "project": raise ValueError('''Readout type must be \'project\' when using `DPT-hybrid` mode.''' ) else: __magic_name__ :Dict = None __magic_name__ :Dict = None __magic_name__ :List[Any] = [] __magic_name__ :int = num_hidden_layers __magic_name__ :Union[str, Any] = num_attention_heads __magic_name__ :Tuple = intermediate_size __magic_name__ :List[Any] = hidden_act __magic_name__ :int = hidden_dropout_prob __magic_name__ :Dict = attention_probs_dropout_prob __magic_name__ :int = initializer_range __magic_name__ :Tuple = layer_norm_eps __magic_name__ :List[Any] = image_size __magic_name__ :str = patch_size __magic_name__ :str = num_channels __magic_name__ :Union[str, Any] = qkv_bias __magic_name__ :Any = backbone_out_indices if readout_type not in ["ignore", "add", "project"]: raise ValueError('''Readout_type must be one of [\'ignore\', \'add\', \'project\']''' ) __magic_name__ :Tuple = readout_type __magic_name__ :List[Any] = reassemble_factors __magic_name__ :Any = neck_hidden_sizes __magic_name__ :int = fusion_hidden_size __magic_name__ :str = head_in_index __magic_name__ :Any = use_batch_norm_in_fusion_residual # auxiliary head attributes (semantic segmentation) __magic_name__ :Dict = use_auxiliary_head __magic_name__ :Dict = auxiliary_loss_weight __magic_name__ :Optional[int] = semantic_loss_ignore_index __magic_name__ :Any = semantic_classifier_dropout def A ( self ): """simple docstring""" __magic_name__ :List[str] = copy.deepcopy(self.__dict__ ) if output["backbone_config"] is not None: __magic_name__ :List[str] = self.backbone_config.to_dict() __magic_name__ :Optional[Any] = self.__class__.model_type return output
0
from pathlib import PurePosixPath from typing import Optional import fsspec from fsspec import AbstractFileSystem from huggingface_hub.hf_api import DatasetInfo from ..utils.file_utils import get_authentication_headers_for_url from ..utils.hub import hf_hub_url class lowerCamelCase_ ( lowerCamelCase ): a__ = '''''' a__ = '''hf-legacy''' # "hf://"" is reserved for hffs def __init__( self , __lowerCAmelCase = None , __lowerCAmelCase = None , **__lowerCAmelCase , ): """simple docstring""" super().__init__(self , **__lowerCAmelCase ) __magic_name__ :List[Any] = repo_info __magic_name__ :Dict = token __magic_name__ :Optional[Any] = None def A ( self ): """simple docstring""" if self.dir_cache is None: __magic_name__ :Any = {} for hf_file in self.repo_info.siblings: # TODO(QL): add sizes __magic_name__ :Optional[int] = { '''name''': hf_file.rfilename, '''size''': None, '''type''': '''file''', } self.dir_cache.update( { str(__lowerCAmelCase ): {'''name''': str(__lowerCAmelCase ), '''size''': None, '''type''': '''directory'''} for d in list(PurePosixPath(hf_file.rfilename ).parents )[:-1] } ) def A ( self , __lowerCAmelCase , __lowerCAmelCase = "rb" , **__lowerCAmelCase , ): """simple docstring""" if not isinstance(self.repo_info , __lowerCAmelCase ): raise NotImplementedError(F'''Open is only implemented for dataset repositories, but got {self.repo_info}''' ) __magic_name__ :Union[str, Any] = hf_hub_url(self.repo_info.id , __lowerCAmelCase , revision=self.repo_info.sha ) return fsspec.open( __lowerCAmelCase , mode=__lowerCAmelCase , headers=get_authentication_headers_for_url(__lowerCAmelCase , use_auth_token=self.token ) , client_kwargs={'''trust_env''': True} , ).open() def A ( self , __lowerCAmelCase , **__lowerCAmelCase ): """simple docstring""" self._get_dirs() __magic_name__ :str = self._strip_protocol(__lowerCAmelCase ) if path in self.dir_cache: return self.dir_cache[path] else: raise FileNotFoundError(__lowerCAmelCase ) def A ( self , __lowerCAmelCase , __lowerCAmelCase=False , **__lowerCAmelCase ): """simple docstring""" self._get_dirs() __magic_name__ :Union[str, Any] = PurePosixPath(path.strip('''/''' ) ) __magic_name__ :Dict = {} for p, f in self.dir_cache.items(): __magic_name__ :int = PurePosixPath(p.strip('''/''' ) ) __magic_name__ :Tuple = p.parent if root == path: __magic_name__ :Optional[Any] = f __magic_name__ :List[Any] = list(paths.values() ) if detail: return out else: return sorted(f['''name'''] for f in out )
0
1
import logging import os import sys from dataclasses import dataclass, field from typing import Optional from seqaseq_trainer import SeqaSeqTrainer from seqaseq_training_args import SeqaSeqTrainingArguments import transformers from transformers import ( AutoConfig, AutoModelForSeqaSeqLM, AutoTokenizer, HfArgumentParser, MBartTokenizer, MBartTokenizerFast, set_seed, ) from transformers.trainer_utils import EvaluationStrategy, is_main_process from transformers.training_args import ParallelMode from utils import ( SeqaSeqDataCollator, SeqaSeqDataset, assert_all_frozen, build_compute_metrics_fn, check_output_dir, freeze_embeds, freeze_params, lmap, save_json, use_task_specific_params, write_txt_file, ) SCREAMING_SNAKE_CASE__ : int = logging.getLogger(__name__) @dataclass class lowerCamelCase_ : a__ = field( metadata={'''help''': '''Path to pretrained model or model identifier from huggingface.co/models'''} ) a__ = field( default=lowerCamelCase , metadata={'''help''': '''Pretrained config name or path if not the same as model_name'''} ) a__ = field( default=lowerCamelCase , metadata={'''help''': '''Pretrained tokenizer name or path if not the same as model_name'''} ) a__ = field( default=lowerCamelCase , metadata={'''help''': '''Where do you want to store the pretrained models downloaded from huggingface.co'''} , ) a__ = field(default=lowerCamelCase , metadata={'''help''': '''Whether tp freeze the encoder.'''} ) a__ = field(default=lowerCamelCase , metadata={'''help''': '''Whether to freeze the embeddings.'''} ) @dataclass class lowerCamelCase_ : a__ = field( metadata={'''help''': '''The input data dir. Should contain the .tsv files (or other data files) for the task.'''} ) a__ = field( default='''summarization''' , metadata={'''help''': '''Task name, summarization (or summarization_{dataset} for pegasus) or translation'''} , ) a__ = field( default=10_24 , metadata={ '''help''': ( '''The maximum total input sequence length after tokenization. Sequences longer ''' '''than this will be truncated, sequences shorter will be padded.''' ) } , ) a__ = field( default=1_28 , metadata={ '''help''': ( '''The maximum total sequence length for target text after tokenization. Sequences longer ''' '''than this will be truncated, sequences shorter will be padded.''' ) } , ) a__ = field( default=1_42 , metadata={ '''help''': ( '''The maximum total sequence length for validation target text after tokenization. Sequences longer ''' '''than this will be truncated, sequences shorter will be padded. ''' '''This argument is also used to override the ``max_length`` param of ``model.generate``, which is used ''' '''during ``evaluate`` and ``predict``.''' ) } , ) a__ = field( default=1_42 , metadata={ '''help''': ( '''The maximum total sequence length for test target text after tokenization. Sequences longer ''' '''than this will be truncated, sequences shorter will be padded.''' ) } , ) a__ = field(default=-1 , metadata={'''help''': '''# training examples. -1 means use all.'''} ) a__ = field(default=-1 , metadata={'''help''': '''# validation examples. -1 means use all.'''} ) a__ = field(default=-1 , metadata={'''help''': '''# test examples. -1 means use all.'''} ) a__ = field(default=lowerCamelCase , metadata={'''help''': '''Source language id for translation.'''} ) a__ = field(default=lowerCamelCase , metadata={'''help''': '''Target language id for translation.'''} ) a__ = field(default=lowerCamelCase , metadata={'''help''': '''# num_beams to use for evaluation.'''} ) a__ = field( default=lowerCamelCase , metadata={'''help''': '''If only pad tokens should be ignored. This assumes that `config.pad_token_id` is defined.'''} , ) def __lowercase ( snake_case, snake_case, snake_case ): """simple docstring""" logger.info(f'''***** {split} metrics *****''' ) for key in sorted(metrics.keys() ): logger.info(f''' {key} = {metrics[key]}''' ) save_json(snake_case, os.path.join(snake_case, f'''{split}_results.json''' ) ) def __lowercase ( ): """simple docstring""" __magic_name__ :str = HfArgumentParser((ModelArguments, DataTrainingArguments, SeqaSeqTrainingArguments) ) if len(sys.argv ) == 2 and sys.argv[1].endswith('''.json''' ): # If we pass only one argument to the script and it's the path to a json file, # let's parse it to get our arguments. __magic_name__ , __magic_name__ , __magic_name__ :Union[str, Any] = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) ) else: __magic_name__ , __magic_name__ , __magic_name__ :Tuple = parser.parse_args_into_dataclasses() check_output_dir(snake_case ) # Setup logging logging.basicConfig( format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''', datefmt='''%m/%d/%Y %H:%M:%S''', level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN, ) logger.warning( '''Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s''', training_args.local_rank, training_args.device, training_args.n_gpu, bool(training_args.parallel_mode == ParallelMode.DISTRIBUTED ), training_args.fpaa, ) transformers.utils.logging.enable_default_handler() transformers.utils.logging.enable_explicit_format() # Set the verbosity to info of the Transformers logger (on main process only): if is_main_process(training_args.local_rank ): transformers.utils.logging.set_verbosity_info() logger.info('''Training/evaluation parameters %s''', snake_case ) # Set seed set_seed(training_args.seed ) # Load pretrained model and tokenizer # # Distributed training: # The .from_pretrained methods guarantee that only one local process can concurrently # download model & vocab. __magic_name__ :Optional[Any] = AutoConfig.from_pretrained( model_args.config_name if model_args.config_name else model_args.model_name_or_path, cache_dir=model_args.cache_dir, ) __magic_name__ :int = ('''encoder_layerdrop''', '''decoder_layerdrop''', '''dropout''', '''attention_dropout''') for p in extra_model_params: if getattr(snake_case, snake_case, snake_case ): assert hasattr(snake_case, snake_case ), f'''({config.__class__.__name__}) doesn\'t have a `{p}` attribute''' setattr(snake_case, snake_case, getattr(snake_case, snake_case ) ) __magic_name__ :Optional[Any] = AutoTokenizer.from_pretrained( model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path, cache_dir=model_args.cache_dir, ) __magic_name__ :List[Any] = AutoModelForSeqaSeqLM.from_pretrained( model_args.model_name_or_path, from_tf='''.ckpt''' in model_args.model_name_or_path, config=snake_case, cache_dir=model_args.cache_dir, ) # use task specific params use_task_specific_params(snake_case, data_args.task ) # set num_beams for evaluation if data_args.eval_beams is None: __magic_name__ :Tuple = model.config.num_beams # set decoder_start_token_id for MBart if model.config.decoder_start_token_id is None and isinstance(snake_case, (MBartTokenizer, MBartTokenizerFast) ): assert ( data_args.tgt_lang is not None and data_args.src_lang is not None ), "mBart requires --tgt_lang and --src_lang" if isinstance(snake_case, snake_case ): __magic_name__ :str = tokenizer.lang_code_to_id[data_args.tgt_lang] else: __magic_name__ :Tuple = tokenizer.convert_tokens_to_ids(data_args.tgt_lang ) if model_args.freeze_embeds: freeze_embeds(snake_case ) if model_args.freeze_encoder: freeze_params(model.get_encoder() ) assert_all_frozen(model.get_encoder() ) __magic_name__ :Any = SeqaSeqDataset # Get datasets __magic_name__ :List[str] = ( dataset_class( snake_case, type_path='''train''', data_dir=data_args.data_dir, n_obs=data_args.n_train, max_target_length=data_args.max_target_length, max_source_length=data_args.max_source_length, prefix=model.config.prefix or '''''', ) if training_args.do_train else None ) __magic_name__ :Optional[Any] = ( dataset_class( snake_case, type_path='''val''', data_dir=data_args.data_dir, n_obs=data_args.n_val, max_target_length=data_args.val_max_target_length, max_source_length=data_args.max_source_length, prefix=model.config.prefix or '''''', ) if training_args.do_eval or training_args.evaluation_strategy != EvaluationStrategy.NO else None ) __magic_name__ :Union[str, Any] = ( dataset_class( snake_case, type_path='''test''', data_dir=data_args.data_dir, n_obs=data_args.n_test, max_target_length=data_args.test_max_target_length, max_source_length=data_args.max_source_length, prefix=model.config.prefix or '''''', ) if training_args.do_predict else None ) # Initialize our Trainer __magic_name__ :Any = ( build_compute_metrics_fn(data_args.task, snake_case ) if training_args.predict_with_generate else None ) __magic_name__ :Optional[int] = SeqaSeqTrainer( model=snake_case, args=snake_case, data_args=snake_case, train_dataset=snake_case, eval_dataset=snake_case, data_collator=SeqaSeqDataCollator( snake_case, snake_case, model.config.decoder_start_token_id, training_args.tpu_num_cores ), compute_metrics=snake_case, tokenizer=snake_case, ) __magic_name__ :Union[str, Any] = {} # Training if training_args.do_train: logger.info('''*** Train ***''' ) __magic_name__ :Optional[Any] = trainer.train( model_path=model_args.model_name_or_path if os.path.isdir(model_args.model_name_or_path ) else None ) __magic_name__ :int = train_result.metrics __magic_name__ :Optional[Any] = data_args.n_train trainer.save_model() # this also saves the tokenizer if trainer.is_world_process_zero(): handle_metrics('''train''', snake_case, training_args.output_dir ) all_metrics.update(snake_case ) # Need to save the state, since Trainer.save_model saves only the tokenizer with the model trainer.state.save_to_json(os.path.join(training_args.output_dir, '''trainer_state.json''' ) ) # For convenience, we also re-save the tokenizer to the same directory, # so that you can share your model easily on huggingface.co/models =) tokenizer.save_pretrained(training_args.output_dir ) # Evaluation if training_args.do_eval: logger.info('''*** Evaluate ***''' ) __magic_name__ :int = trainer.evaluate(metric_key_prefix='''val''' ) __magic_name__ :int = data_args.n_val __magic_name__ :int = round(metrics['''val_loss'''], 4 ) if trainer.is_world_process_zero(): handle_metrics('''val''', snake_case, training_args.output_dir ) all_metrics.update(snake_case ) if training_args.do_predict: logger.info('''*** Predict ***''' ) __magic_name__ :Optional[int] = trainer.predict(test_dataset=snake_case, metric_key_prefix='''test''' ) __magic_name__ :Tuple = test_output.metrics __magic_name__ :str = data_args.n_test if trainer.is_world_process_zero(): __magic_name__ :Union[str, Any] = round(metrics['''test_loss'''], 4 ) handle_metrics('''test''', snake_case, training_args.output_dir ) all_metrics.update(snake_case ) if training_args.predict_with_generate: __magic_name__ :List[Any] = tokenizer.batch_decode( test_output.predictions, skip_special_tokens=snake_case, clean_up_tokenization_spaces=snake_case ) __magic_name__ :List[str] = lmap(str.strip, snake_case ) write_txt_file(snake_case, os.path.join(training_args.output_dir, '''test_generations.txt''' ) ) if trainer.is_world_process_zero(): save_json(snake_case, os.path.join(training_args.output_dir, '''all_results.json''' ) ) return all_metrics def __lowercase ( snake_case ): """simple docstring""" main() if __name__ == "__main__": main()
0
import pyarrow.parquet as pq import pytest from datasets import Audio, Dataset, DatasetDict, Features, NamedSplit, Sequence, Value, config from datasets.features.image import Image from datasets.io.parquet import ParquetDatasetReader, ParquetDatasetWriter, get_writer_batch_size from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases def __lowercase ( snake_case, snake_case ): """simple docstring""" assert isinstance(snake_case, snake_case ) assert dataset.num_rows == 4 assert dataset.num_columns == 3 assert dataset.column_names == ["col_1", "col_2", "col_3"] for feature, expected_dtype in expected_features.items(): assert dataset.features[feature].dtype == expected_dtype @pytest.mark.parametrize('''keep_in_memory''', [False, True] ) def __lowercase ( snake_case, snake_case, snake_case ): """simple docstring""" __magic_name__ :Tuple = tmp_path / '''cache''' __magic_name__ :int = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''} with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase(): __magic_name__ :Optional[Any] = ParquetDatasetReader(snake_case, cache_dir=snake_case, keep_in_memory=snake_case ).read() _check_parquet_dataset(snake_case, snake_case ) @pytest.mark.parametrize( '''features''', [ None, {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}, {'''col_1''': '''string''', '''col_2''': '''string''', '''col_3''': '''string'''}, {'''col_1''': '''int32''', '''col_2''': '''int32''', '''col_3''': '''int32'''}, {'''col_1''': '''float32''', '''col_2''': '''float32''', '''col_3''': '''float32'''}, ], ) def __lowercase ( snake_case, snake_case, snake_case ): """simple docstring""" __magic_name__ :List[str] = tmp_path / '''cache''' __magic_name__ :int = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''} __magic_name__ :Tuple = features.copy() if features else default_expected_features __magic_name__ :Union[str, Any] = ( Features({feature: Value(snake_case ) for feature, dtype in features.items()} ) if features is not None else None ) __magic_name__ :int = ParquetDatasetReader(snake_case, features=snake_case, cache_dir=snake_case ).read() _check_parquet_dataset(snake_case, snake_case ) @pytest.mark.parametrize('''split''', [None, NamedSplit('''train''' ), '''train''', '''test'''] ) def __lowercase ( snake_case, snake_case, snake_case ): """simple docstring""" __magic_name__ :str = tmp_path / '''cache''' __magic_name__ :List[Any] = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''} __magic_name__ :int = ParquetDatasetReader(snake_case, cache_dir=snake_case, split=snake_case ).read() _check_parquet_dataset(snake_case, snake_case ) assert dataset.split == split if split else "train" @pytest.mark.parametrize('''path_type''', [str, list] ) def __lowercase ( snake_case, snake_case, snake_case ): """simple docstring""" if issubclass(snake_case, snake_case ): __magic_name__ :Union[str, Any] = parquet_path elif issubclass(snake_case, snake_case ): __magic_name__ :Union[str, Any] = [parquet_path] __magic_name__ :Optional[int] = tmp_path / '''cache''' __magic_name__ :Optional[int] = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''} __magic_name__ :str = ParquetDatasetReader(snake_case, cache_dir=snake_case ).read() _check_parquet_dataset(snake_case, snake_case ) def __lowercase ( snake_case, snake_case, snake_case=("train",) ): """simple docstring""" assert isinstance(snake_case, snake_case ) for split in splits: __magic_name__ :Optional[Any] = dataset_dict[split] assert dataset.num_rows == 4 assert dataset.num_columns == 3 assert dataset.column_names == ["col_1", "col_2", "col_3"] for feature, expected_dtype in expected_features.items(): assert dataset.features[feature].dtype == expected_dtype @pytest.mark.parametrize('''keep_in_memory''', [False, True] ) def __lowercase ( snake_case, snake_case, snake_case ): """simple docstring""" __magic_name__ :Any = tmp_path / '''cache''' __magic_name__ :Optional[int] = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''} with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase(): __magic_name__ :Tuple = ParquetDatasetReader( {'''train''': parquet_path}, cache_dir=snake_case, keep_in_memory=snake_case ).read() _check_parquet_datasetdict(snake_case, snake_case ) @pytest.mark.parametrize( '''features''', [ None, {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}, {'''col_1''': '''string''', '''col_2''': '''string''', '''col_3''': '''string'''}, {'''col_1''': '''int32''', '''col_2''': '''int32''', '''col_3''': '''int32'''}, {'''col_1''': '''float32''', '''col_2''': '''float32''', '''col_3''': '''float32'''}, ], ) def __lowercase ( snake_case, snake_case, snake_case ): """simple docstring""" __magic_name__ :Optional[Any] = tmp_path / '''cache''' __magic_name__ :Dict = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''} __magic_name__ :int = features.copy() if features else default_expected_features __magic_name__ :List[Any] = ( Features({feature: Value(snake_case ) for feature, dtype in features.items()} ) if features is not None else None ) __magic_name__ :Optional[int] = ParquetDatasetReader({'''train''': parquet_path}, features=snake_case, cache_dir=snake_case ).read() _check_parquet_datasetdict(snake_case, snake_case ) @pytest.mark.parametrize('''split''', [None, NamedSplit('''train''' ), '''train''', '''test'''] ) def __lowercase ( snake_case, snake_case, snake_case ): """simple docstring""" if split: __magic_name__ :Dict = {split: parquet_path} else: __magic_name__ :Optional[int] = '''train''' __magic_name__ :Dict = {'''train''': parquet_path, '''test''': parquet_path} __magic_name__ :List[Any] = tmp_path / '''cache''' __magic_name__ :Optional[Any] = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''} __magic_name__ :Optional[Any] = ParquetDatasetReader(snake_case, cache_dir=snake_case ).read() _check_parquet_datasetdict(snake_case, snake_case, splits=list(path.keys() ) ) assert all(dataset[split].split == split for split in path.keys() ) def __lowercase ( snake_case, snake_case ): """simple docstring""" __magic_name__ :str = ParquetDatasetWriter(snake_case, tmp_path / '''foo.parquet''' ) assert writer.write() > 0 __magic_name__ :List[Any] = pq.ParquetFile(tmp_path / '''foo.parquet''' ) __magic_name__ :List[Any] = pf.read() assert dataset.data.table == output_table def __lowercase ( snake_case, snake_case ): """simple docstring""" __magic_name__ :List[str] = str(shared_datadir / '''test_image_rgb.jpg''' ) __magic_name__ :Tuple = {'''image''': [image_path]} __magic_name__ :List[Any] = Features({'''image''': Image()} ) __magic_name__ :Tuple = Dataset.from_dict(snake_case, features=snake_case ) __magic_name__ :Union[str, Any] = ParquetDatasetWriter(snake_case, tmp_path / '''foo.parquet''' ) assert writer.write() > 0 __magic_name__ :List[str] = Dataset.from_parquet(str(tmp_path / '''foo.parquet''' ) ) assert dataset.features == reloaded_dataset.features __magic_name__ :List[str] = ParquetDatasetReader(str(tmp_path / '''foo.parquet''' ), streaming=snake_case ).read() assert dataset.features == reloaded_iterable_dataset.features @pytest.mark.parametrize( '''feature, expected''', [ (Features({'''foo''': Value('''int32''' )} ), None), (Features({'''image''': Image(), '''foo''': Value('''int32''' )} ), config.PARQUET_ROW_GROUP_SIZE_FOR_IMAGE_DATASETS), (Features({'''nested''': Sequence(Audio() )} ), config.PARQUET_ROW_GROUP_SIZE_FOR_AUDIO_DATASETS), ], ) def __lowercase ( snake_case, snake_case ): """simple docstring""" assert get_writer_batch_size(snake_case ) == expected
0
1
import unittest from transformers import SqueezeBertConfig, is_torch_available from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST, SqueezeBertForMaskedLM, SqueezeBertForMultipleChoice, SqueezeBertForQuestionAnswering, SqueezeBertForSequenceClassification, SqueezeBertForTokenClassification, SqueezeBertModel, ) class lowerCamelCase_ ( lowerCamelCase ): def __init__( self , __lowerCAmelCase , __lowerCAmelCase=1_3 , __lowerCAmelCase=7 , __lowerCAmelCase=True , __lowerCAmelCase=True , __lowerCAmelCase=False , __lowerCAmelCase=True , __lowerCAmelCase=9_9 , __lowerCAmelCase=3_2 , __lowerCAmelCase=5 , __lowerCAmelCase=4 , __lowerCAmelCase=6_4 , __lowerCAmelCase="gelu" , __lowerCAmelCase=0.1 , __lowerCAmelCase=0.1 , __lowerCAmelCase=5_1_2 , __lowerCAmelCase=1_6 , __lowerCAmelCase=2 , __lowerCAmelCase=0.02 , __lowerCAmelCase=3 , __lowerCAmelCase=4 , __lowerCAmelCase=None , __lowerCAmelCase=2 , __lowerCAmelCase=2 , __lowerCAmelCase=2 , __lowerCAmelCase=2 , __lowerCAmelCase=4 , __lowerCAmelCase=1 , ): """simple docstring""" __magic_name__ :Union[str, Any] = parent __magic_name__ :int = batch_size __magic_name__ :int = seq_length __magic_name__ :List[Any] = is_training __magic_name__ :Any = use_input_mask __magic_name__ :List[Any] = use_token_type_ids __magic_name__ :Any = use_labels __magic_name__ :str = vocab_size __magic_name__ :Tuple = hidden_size __magic_name__ :List[str] = num_hidden_layers __magic_name__ :Dict = num_attention_heads __magic_name__ :str = intermediate_size __magic_name__ :Optional[int] = hidden_act __magic_name__ :List[Any] = hidden_dropout_prob __magic_name__ :List[str] = attention_probs_dropout_prob __magic_name__ :List[str] = max_position_embeddings __magic_name__ :Union[str, Any] = type_vocab_size __magic_name__ :int = type_sequence_label_size __magic_name__ :Optional[Any] = initializer_range __magic_name__ :int = num_labels __magic_name__ :Tuple = num_choices __magic_name__ :Optional[int] = scope __magic_name__ :Tuple = q_groups __magic_name__ :List[str] = k_groups __magic_name__ :str = v_groups __magic_name__ :Optional[Any] = post_attention_groups __magic_name__ :List[str] = intermediate_groups __magic_name__ :int = output_groups def A ( self ): """simple docstring""" __magic_name__ :Tuple = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) __magic_name__ :List[Any] = None if self.use_input_mask: __magic_name__ :Dict = random_attention_mask([self.batch_size, self.seq_length] ) __magic_name__ :int = None __magic_name__ :Optional[Any] = None __magic_name__ :Union[str, Any] = None if self.use_labels: __magic_name__ :Optional[int] = ids_tensor([self.batch_size] , self.type_sequence_label_size ) __magic_name__ :List[str] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) __magic_name__ :Dict = ids_tensor([self.batch_size] , self.num_choices ) __magic_name__ :Optional[int] = self.get_config() return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels def A ( self ): """simple docstring""" return SqueezeBertConfig( embedding_size=self.hidden_size , vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , attention_probs_dropout_prob=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , q_groups=self.q_groups , k_groups=self.k_groups , v_groups=self.v_groups , post_attention_groups=self.post_attention_groups , intermediate_groups=self.intermediate_groups , output_groups=self.output_groups , ) def A ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ): """simple docstring""" __magic_name__ :Optional[Any] = SqueezeBertModel(config=__lowerCAmelCase ) model.to(__lowerCAmelCase ) model.eval() __magic_name__ :str = model(__lowerCAmelCase , __lowerCAmelCase ) __magic_name__ :List[Any] = model(__lowerCAmelCase ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def A ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ): """simple docstring""" __magic_name__ :str = SqueezeBertForMaskedLM(config=__lowerCAmelCase ) model.to(__lowerCAmelCase ) model.eval() __magic_name__ :Optional[int] = model(__lowerCAmelCase , attention_mask=__lowerCAmelCase , labels=__lowerCAmelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def A ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ): """simple docstring""" __magic_name__ :Optional[Any] = SqueezeBertForQuestionAnswering(config=__lowerCAmelCase ) model.to(__lowerCAmelCase ) model.eval() __magic_name__ :Union[str, Any] = model( __lowerCAmelCase , attention_mask=__lowerCAmelCase , start_positions=__lowerCAmelCase , end_positions=__lowerCAmelCase ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def A ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ): """simple docstring""" __magic_name__ :Optional[Any] = self.num_labels __magic_name__ :int = SqueezeBertForSequenceClassification(__lowerCAmelCase ) model.to(__lowerCAmelCase ) model.eval() __magic_name__ :Optional[Any] = model(__lowerCAmelCase , attention_mask=__lowerCAmelCase , labels=__lowerCAmelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def A ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ): """simple docstring""" __magic_name__ :List[Any] = self.num_labels __magic_name__ :Union[str, Any] = SqueezeBertForTokenClassification(config=__lowerCAmelCase ) model.to(__lowerCAmelCase ) model.eval() __magic_name__ :Tuple = model(__lowerCAmelCase , attention_mask=__lowerCAmelCase , labels=__lowerCAmelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def A ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ): """simple docstring""" __magic_name__ :str = self.num_choices __magic_name__ :Dict = SqueezeBertForMultipleChoice(config=__lowerCAmelCase ) model.to(__lowerCAmelCase ) model.eval() __magic_name__ :Any = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() __magic_name__ :Tuple = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() __magic_name__ :Optional[int] = model( __lowerCAmelCase , attention_mask=__lowerCAmelCase , labels=__lowerCAmelCase , ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) ) def A ( self ): """simple docstring""" __magic_name__ :List[Any] = self.prepare_config_and_inputs() ((__magic_name__) , (__magic_name__) , (__magic_name__) , (__magic_name__) , (__magic_name__) , (__magic_name__)) :Tuple = config_and_inputs __magic_name__ :Dict = {'''input_ids''': input_ids, '''attention_mask''': input_mask} return config, inputs_dict @require_torch class lowerCamelCase_ ( lowerCamelCase , lowerCamelCase , unittest.TestCase ): a__ = ( ( SqueezeBertModel, SqueezeBertForMaskedLM, SqueezeBertForMultipleChoice, SqueezeBertForQuestionAnswering, SqueezeBertForSequenceClassification, SqueezeBertForTokenClassification, ) if is_torch_available() else None ) a__ = ( { '''feature-extraction''': SqueezeBertModel, '''fill-mask''': SqueezeBertForMaskedLM, '''question-answering''': SqueezeBertForQuestionAnswering, '''text-classification''': SqueezeBertForSequenceClassification, '''token-classification''': SqueezeBertForTokenClassification, '''zero-shot''': SqueezeBertForSequenceClassification, } if is_torch_available() else {} ) a__ = False a__ = True a__ = False def A ( self ): """simple docstring""" __magic_name__ :Any = SqueezeBertModelTester(self ) __magic_name__ :Optional[int] = ConfigTester(self , config_class=__lowerCAmelCase , dim=3_7 ) def A ( self ): """simple docstring""" self.config_tester.run_common_tests() def A ( self ): """simple docstring""" __magic_name__ :Union[str, Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_squeezebert_model(*__lowerCAmelCase ) def A ( self ): """simple docstring""" __magic_name__ :List[str] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_squeezebert_for_masked_lm(*__lowerCAmelCase ) def A ( self ): """simple docstring""" __magic_name__ :Dict = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_squeezebert_for_question_answering(*__lowerCAmelCase ) def A ( self ): """simple docstring""" __magic_name__ :Optional[int] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_squeezebert_for_sequence_classification(*__lowerCAmelCase ) def A ( self ): """simple docstring""" __magic_name__ :Tuple = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_squeezebert_for_token_classification(*__lowerCAmelCase ) def A ( self ): """simple docstring""" __magic_name__ :List[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_squeezebert_for_multiple_choice(*__lowerCAmelCase ) @slow def A ( self ): """simple docstring""" for model_name in SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: __magic_name__ :int = SqueezeBertModel.from_pretrained(__lowerCAmelCase ) self.assertIsNotNone(__lowerCAmelCase ) @require_sentencepiece @require_tokenizers @require_torch class lowerCamelCase_ ( unittest.TestCase ): @slow def A ( self ): """simple docstring""" __magic_name__ :int = SqueezeBertForSequenceClassification.from_pretrained('''squeezebert/squeezebert-mnli''' ) __magic_name__ :Optional[int] = torch.tensor([[1, 2_9_4_1_4, 2_3_2, 3_2_8, 7_4_0, 1_1_4_0, 1_2_6_9_5, 6_9, 1_3, 1_5_8_8, 2]] ) __magic_name__ :Tuple = model(__lowerCAmelCase )[0] __magic_name__ :Any = torch.Size((1, 3) ) self.assertEqual(output.shape , __lowerCAmelCase ) __magic_name__ :List[Any] = torch.tensor([[0.6401, -0.0349, -0.6041]] ) self.assertTrue(torch.allclose(__lowerCAmelCase , __lowerCAmelCase , atol=1E-4 ) )
0
def __lowercase ( snake_case ): """simple docstring""" if not isinstance(snake_case, snake_case ): raise ValueError('''multiplicative_persistence() only accepts integral values''' ) if num < 0: raise ValueError('''multiplicative_persistence() does not accept negative values''' ) __magic_name__ :str = 0 __magic_name__ :Dict = str(snake_case ) while len(snake_case ) != 1: __magic_name__ :Optional[Any] = [int(snake_case ) for i in num_string] __magic_name__ :Dict = 1 for i in range(0, len(snake_case ) ): total *= numbers[i] __magic_name__ :int = str(snake_case ) steps += 1 return steps def __lowercase ( snake_case ): """simple docstring""" if not isinstance(snake_case, snake_case ): raise ValueError('''additive_persistence() only accepts integral values''' ) if num < 0: raise ValueError('''additive_persistence() does not accept negative values''' ) __magic_name__ :str = 0 __magic_name__ :Union[str, Any] = str(snake_case ) while len(snake_case ) != 1: __magic_name__ :str = [int(snake_case ) for i in num_string] __magic_name__ :Optional[int] = 0 for i in range(0, len(snake_case ) ): total += numbers[i] __magic_name__ :int = str(snake_case ) steps += 1 return steps if __name__ == "__main__": import doctest doctest.testmod()
0
1
import fire from utils import calculate_rouge, save_json def __lowercase ( snake_case, snake_case, snake_case=None, **snake_case ): """simple docstring""" __magic_name__ :Dict = [x.strip() for x in open(snake_case ).readlines()] __magic_name__ :Tuple = [x.strip() for x in open(snake_case ).readlines()][: len(snake_case )] __magic_name__ :Dict = calculate_rouge(snake_case, snake_case, **snake_case ) if save_path is not None: save_json(snake_case, snake_case, indent=snake_case ) return metrics # these print nicely if __name__ == "__main__": fire.Fire(calculate_rouge_path)
0
import math import os import re import sys import unittest from pathlib import Path from typing import Tuple from unittest.mock import patch from parameterized import parameterized from transformers.testing_utils import ( CaptureStderr, ExtendSysPath, TestCasePlus, execute_subprocess_async, get_gpu_count, get_torch_dist_unique_port, require_apex, require_bitsandbytes, require_fairscale, require_torch, require_torch_gpu, require_torch_multi_gpu, require_torch_non_multi_gpu, slow, ) from transformers.trainer_callback import TrainerState from transformers.trainer_utils import set_seed SCREAMING_SNAKE_CASE__ : List[Any] = os.path.abspath(os.path.dirname(__file__)) with ExtendSysPath(f"{bindir}/../../examples/pytorch/translation"): from run_translation import main # noqa set_seed(42) SCREAMING_SNAKE_CASE__ : Optional[Any] = """sshleifer/student_marian_en_ro_6_1""" SCREAMING_SNAKE_CASE__ : List[Any] = """sshleifer/tiny-mbart""" @require_torch class lowerCamelCase_ ( lowerCamelCase ): def A ( self , __lowerCAmelCase=False , __lowerCAmelCase=None , __lowerCAmelCase=True , __lowerCAmelCase=True , __lowerCAmelCase=True , __lowerCAmelCase=True , ): """simple docstring""" __magic_name__ :List[Any] = self.run_trainer( eval_steps=1 , max_len=1_2 , model_name=__lowerCAmelCase , num_train_epochs=1 , distributed=__lowerCAmelCase , extra_args_str=__lowerCAmelCase , predict_with_generate=__lowerCAmelCase , do_train=__lowerCAmelCase , do_eval=__lowerCAmelCase , do_predict=__lowerCAmelCase , ) __magic_name__ :Any = TrainerState.load_from_json(os.path.join(__lowerCAmelCase , '''trainer_state.json''' ) ).log_history if not do_eval: return __magic_name__ :Any = [log for log in logs if '''eval_loss''' in log.keys()] __magic_name__ :str = eval_metrics[0] if predict_with_generate: assert "eval_bleu" in first_step_stats __magic_name__ :Tuple = eval_metrics[-1] assert isinstance(last_step_stats['''eval_bleu'''] , __lowerCAmelCase ) assert not math.isnan(float(last_step_stats['''eval_loss'''] ) ), "eval_loss must not be `nan`" @require_torch_non_multi_gpu def A ( self ): """simple docstring""" self.run_seqaseq_quick() @require_torch_multi_gpu def A ( self ): """simple docstring""" self.run_seqaseq_quick(distributed=__lowerCAmelCase ) @require_torch_multi_gpu def A ( self ): """simple docstring""" self.run_seqaseq_quick(distributed=__lowerCAmelCase ) @unittest.skip('''Requires an update of the env running those tests''' ) @require_torch_multi_gpu @require_fairscale def A ( self ): """simple docstring""" self.run_seqaseq_quick(distributed=__lowerCAmelCase , extra_args_str='''--sharded_ddp simple''' ) @unittest.skip('''Requires an update of the env running those tests''' ) @require_torch_multi_gpu @require_fairscale def A ( self ): """simple docstring""" self.run_seqaseq_quick(distributed=__lowerCAmelCase , extra_args_str='''--sharded_ddp simple --fp16''' ) @unittest.skip('''Requires an update of the env running those tests''' ) @require_torch_multi_gpu @require_fairscale def A ( self ): """simple docstring""" self.run_seqaseq_quick(distributed=__lowerCAmelCase , extra_args_str='''--sharded_ddp zero_dp_2''' , predict_with_generate=__lowerCAmelCase ) @unittest.skip('''Requires an update of the env running those tests''' ) @require_torch_multi_gpu @require_fairscale def A ( self ): """simple docstring""" self.run_seqaseq_quick( distributed=__lowerCAmelCase , extra_args_str='''--sharded_ddp zero_dp_2 --fp16''' , predict_with_generate=__lowerCAmelCase ) @require_apex @require_torch_gpu def A ( self ): """simple docstring""" # XXX: apex breaks the trainer if it's run twice e.g. run_seq2seq.main() from the same # program and it breaks other tests that run from the same pytest worker, therefore until this is # sorted out it must be run only in an external program, that is distributed=True in this # test and only under one or more gpus - if we want cpu will need to make a special test # # specifically to the problem traced it to self.optimizer.step() - if it's run 2nd time via # 2nd main() call it botches the future eval. # self.run_seqaseq_quick(distributed=__lowerCAmelCase , extra_args_str='''--fp16 --fp16_backend=apex''' ) # test 2nd time - was getting eval_loss': nan' # to reproduce the problem set distributed=False self.run_seqaseq_quick(distributed=__lowerCAmelCase , extra_args_str='''--fp16 --fp16_backend=apex''' ) @parameterized.expand(['''base''', '''low''', '''high''', '''mixed'''] ) @require_torch_multi_gpu def A ( self , __lowerCAmelCase ): """simple docstring""" # as each sub-test is slow-ish split into multiple sub-tests to avoid CI timeout __magic_name__ :Any = { # test with the default log_level - should be info and thus log info once '''base''': {'''extra_args_str''': '''''', '''n_matches''': 1}, # test with low log_level and log_level_replica - should be noisy on all processes # now the info string should appear twice on 2 processes '''low''': {'''extra_args_str''': '''--log_level debug --log_level_replica debug''', '''n_matches''': 2}, # test with high log_level and low log_level_replica # now the info string should appear once only on the replica '''high''': {'''extra_args_str''': '''--log_level error --log_level_replica debug''', '''n_matches''': 1}, # test with high log_level and log_level_replica - should be quiet on all processes '''mixed''': {'''extra_args_str''': '''--log_level error --log_level_replica error''', '''n_matches''': 0}, } __magic_name__ :Optional[Any] = experiments[experiment_id] __magic_name__ :List[Any] = {'''distributed''': True, '''predict_with_generate''': False, '''do_eval''': False, '''do_predict''': False} __magic_name__ :Optional[int] = '''Running training''' with CaptureStderr() as cl: self.run_seqaseq_quick(**__lowerCAmelCase , extra_args_str=data['''extra_args_str'''] ) __magic_name__ :int = len(re.findall(__lowerCAmelCase , cl.err ) ) self.assertEqual(__lowerCAmelCase , data['''n_matches'''] ) @slow def A ( self ): """simple docstring""" __magic_name__ :List[str] = self.run_trainer( eval_steps=2 , max_len=1_2_8 , model_name=__lowerCAmelCase , learning_rate=3E-4 , num_train_epochs=1_0 , distributed=__lowerCAmelCase , ) # Check metrics __magic_name__ :Optional[int] = TrainerState.load_from_json(os.path.join(__lowerCAmelCase , '''trainer_state.json''' ) ).log_history __magic_name__ :List[str] = [log for log in logs if '''eval_loss''' in log.keys()] __magic_name__ :Any = eval_metrics[0] __magic_name__ :int = eval_metrics[-1] assert first_step_stats["eval_loss"] > last_step_stats["eval_loss"], "model learned nothing" assert isinstance(last_step_stats['''eval_bleu'''] , __lowerCAmelCase ) # test if do_predict saves generations and metrics __magic_name__ :List[Any] = os.listdir(__lowerCAmelCase ) __magic_name__ :List[str] = {os.path.basename(__lowerCAmelCase ) for p in contents} assert "generated_predictions.txt" in contents assert "predict_results.json" in contents @slow @require_bitsandbytes def A ( self ): """simple docstring""" from transformers.training_args import OptimizerNames def train_and_return_metrics(__lowerCAmelCase ) -> Tuple[int, float]: __magic_name__ :str = '''--skip_memory_metrics 0''' __magic_name__ :Dict = self.run_trainer( max_len=1_2_8 , model_name=__lowerCAmelCase , learning_rate=3E-4 , num_train_epochs=1 , optim=__lowerCAmelCase , distributed=__lowerCAmelCase , extra_args_str=__lowerCAmelCase , do_eval=__lowerCAmelCase , do_predict=__lowerCAmelCase , n_gpus_to_use=1 , ) # Check metrics __magic_name__ :Optional[Any] = TrainerState.load_from_json(Path(__lowerCAmelCase , '''trainer_state.json''' ) ).log_history __magic_name__ :int = int(logs[0]['''train_mem_gpu_peaked_delta'''] / 2**2_0 ) __magic_name__ :Optional[Any] = int(logs[0]['''train_mem_gpu_alloc_delta'''] / 2**2_0 ) __magic_name__ :Any = logs[0]['''train_loss'''] return gpu_peak_mem_mb, gpu_alloc_mem_mb, loss __magic_name__ , __magic_name__ , __magic_name__ :int = train_and_return_metrics(OptimizerNames.ADAMW_TORCH.value ) __magic_name__ , __magic_name__ , __magic_name__ :Tuple = train_and_return_metrics(OptimizerNames.ADAMW_BNB.value ) __magic_name__ :Tuple = gpu_alloc_mem_orig - gpu_alloc_mem_bnb __magic_name__ :Tuple = gpu_peak_mem_orig + gpu_alloc_mem_orig __magic_name__ :List[Any] = gpu_peak_mem_bnb + gpu_alloc_mem_bnb __magic_name__ :Optional[int] = gpu_total_mem_orig - gpu_total_mem_bnb # sshleifer/student_marian_en_ro_6_1 has 54M parameter, 29M of which is `nn.Embedding` which # doesn't get quantized and remains in fp32. Therefore we only have 25M parameters quantized # in 2 bytes and the diff in optim memory usage is derived as so: # # - normal 25*8=~200MB (8 bytes per param) # - bnb 25*2= ~50MB (2 bytes per param) # # Thus we should expect ~150MB total memory saved. # # Peak memory should be the same - the total should be different by about that same margin # # After leaving a small margin to accommodate for differences between gpus let's check # that we have at least 120MB in savings __magic_name__ :Optional[Any] = 1_2_0 # uncomment the following if this test starts failing - requires py38 for a new print feature # gpu_peak_mem_diff = gpu_peak_mem_orig - gpu_peak_mem_bnb # print(f"{gpu_alloc_mem_orig=}MB {gpu_peak_mem_orig=}MB {gpu_alloc_mem_orig+gpu_peak_mem_orig=}MB") # print(f" {gpu_alloc_mem_bnb=}MB {gpu_peak_mem_bnb=}MB {gpu_alloc_mem_bnb+gpu_peak_mem_bnb=}MB") # print(f"{gpu_alloc_mem_diff=}MB") # print(f"{gpu_peak_mem_diff=}MB") # print(f"{gpu_total_mem_orig=}MB, {gpu_total_mem_bnb=}MB") # print(f"{gpu_total_mem_diff=}MB, {gpu_total_mem_diff=}MB") self.assertGreater( __lowerCAmelCase , __lowerCAmelCase , '''should use ~150MB less alloc gpu memory with BNB, compared to without it for this model but got''' F''' a difference of {gpu_alloc_mem_diff}MB, with gpu_alloc_mem_orig={gpu_alloc_mem_orig}MB and''' F''' gpu_alloc_mem_bnb={gpu_alloc_mem_bnb}MB''' , ) self.assertGreater( __lowerCAmelCase , __lowerCAmelCase , '''should use ~150MB less total gpu memory with BNB, compared to without it for this model but got''' F''' a difference of {gpu_total_mem_diff}MB, with gpu_total_mem_orig={gpu_total_mem_orig}MB and''' F''' gpu_total_mem_bnb={gpu_total_mem_bnb}MB''' , ) self.assertEqual( __lowerCAmelCase , __lowerCAmelCase , F'''loss should be the same, but got loss_orig={loss_orig}, loss_bnb={loss_bnb}''' ) def A ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = 3E-3 , __lowerCAmelCase = "adafactor" , __lowerCAmelCase = False , __lowerCAmelCase = None , __lowerCAmelCase = 0 , __lowerCAmelCase = True , __lowerCAmelCase = True , __lowerCAmelCase = True , __lowerCAmelCase = True , __lowerCAmelCase = None , ): """simple docstring""" __magic_name__ :int = self.test_file_dir / '''../fixtures/tests_samples/wmt_en_ro''' __magic_name__ :Dict = self.get_auto_remove_tmp_dir() __magic_name__ :Tuple = F''' --model_name_or_path {model_name} --train_file {data_dir}/train.json --validation_file {data_dir}/val.json --test_file {data_dir}/test.json --output_dir {output_dir} --overwrite_output_dir --max_train_samples 8 --max_source_length {max_len} --max_target_length {max_len} --do_train --num_train_epochs {str(__lowerCAmelCase )} --per_device_train_batch_size 4 --learning_rate {learning_rate} --warmup_steps 8 --logging_steps 0 --logging_strategy no --save_steps {str(__lowerCAmelCase )} --group_by_length --label_smoothing_factor 0.1 --target_lang ro_RO --source_lang en_XX '''.split() __magic_name__ :str = F''' --do_eval --per_device_eval_batch_size 4 --max_eval_samples 8 --val_max_target_length {max_len} --evaluation_strategy steps --eval_steps {str(__lowerCAmelCase )} '''.split() __magic_name__ :Dict = ''' --do_predict '''.split() __magic_name__ :Optional[int] = [] if do_train: args += args_train if do_eval: args += args_eval if do_predict: args += args_predict if predict_with_generate: args += "--predict_with_generate".split() if do_train: if optim == "adafactor": args += "--adafactor".split() else: args += F'''--optim {optim}'''.split() if extra_args_str is not None: args += extra_args_str.split() if distributed: if n_gpus_to_use is None: __magic_name__ :List[Any] = get_gpu_count() __magic_name__ :Tuple = get_torch_dist_unique_port() __magic_name__ :Union[str, Any] = F''' -m torch.distributed.run --nproc_per_node={n_gpus_to_use} --master_port={master_port} {self.examples_dir_str}/pytorch/translation/run_translation.py '''.split() __magic_name__ :Any = [sys.executable] + distributed_args + args # keep for quick debug # print(" ".join([f"\nPYTHONPATH={self.src_dir_str}"] +cmd)); die execute_subprocess_async(__lowerCAmelCase , env=self.get_env() ) else: __magic_name__ :List[Any] = ['''run_translation.py'''] + args with patch.object(__lowerCAmelCase , '''argv''' , __lowerCAmelCase ): main() return output_dir
0
1
import warnings from ...utils import is_sklearn_available, requires_backends if is_sklearn_available(): from scipy.stats import pearsonr, spearmanr from sklearn.metrics import fa_score, matthews_corrcoef __snake_case = ( '''This metric will be removed from the library soon, metrics should be handled with the 🤗 Evaluate ''' '''library. You can have a look at this example script for pointers: ''' '''https://github.com/huggingface/transformers/blob/main/examples/pytorch/text-classification/run_glue.py''' ) def _A ( _lowercase , _lowercase ) -> str: """simple docstring""" warnings.warn(_lowercase , _lowercase ) requires_backends(_lowercase , 'sklearn' ) return (preds == labels).mean() def _A ( _lowercase , _lowercase ) -> Optional[int]: """simple docstring""" warnings.warn(_lowercase , _lowercase ) requires_backends(_lowercase , 'sklearn' ) __UpperCamelCase = simple_accuracy(_lowercase , _lowercase ) __UpperCamelCase = fa_score(y_true=_lowercase , y_pred=_lowercase ) return { "acc": acc, "f1": fa, "acc_and_f1": (acc + fa) / 2, } def _A ( _lowercase , _lowercase ) -> List[str]: """simple docstring""" warnings.warn(_lowercase , _lowercase ) requires_backends(_lowercase , 'sklearn' ) __UpperCamelCase = pearsonr(_lowercase , _lowercase )[0] __UpperCamelCase = spearmanr(_lowercase , _lowercase )[0] return { "pearson": pearson_corr, "spearmanr": spearman_corr, "corr": (pearson_corr + spearman_corr) / 2, } def _A ( _lowercase , _lowercase , _lowercase ) -> Dict: """simple docstring""" warnings.warn(_lowercase , _lowercase ) requires_backends(_lowercase , 'sklearn' ) assert len(_lowercase ) == len(_lowercase ), f'''Predictions and labels have mismatched lengths {len(_lowercase )} and {len(_lowercase )}''' if task_name == "cola": return {"mcc": matthews_corrcoef(_lowercase , _lowercase )} elif task_name == "sst-2": return {"acc": simple_accuracy(_lowercase , _lowercase )} elif task_name == "mrpc": return acc_and_fa(_lowercase , _lowercase ) elif task_name == "sts-b": return pearson_and_spearman(_lowercase , _lowercase ) elif task_name == "qqp": return acc_and_fa(_lowercase , _lowercase ) elif task_name == "mnli": return {"mnli/acc": simple_accuracy(_lowercase , _lowercase )} elif task_name == "mnli-mm": return {"mnli-mm/acc": simple_accuracy(_lowercase , _lowercase )} elif task_name == "qnli": return {"acc": simple_accuracy(_lowercase , _lowercase )} elif task_name == "rte": return {"acc": simple_accuracy(_lowercase , _lowercase )} elif task_name == "wnli": return {"acc": simple_accuracy(_lowercase , _lowercase )} elif task_name == "hans": return {"acc": simple_accuracy(_lowercase , _lowercase )} else: raise KeyError(_lowercase ) def _A ( _lowercase , _lowercase , _lowercase ) -> Union[str, Any]: """simple docstring""" warnings.warn(_lowercase , _lowercase ) requires_backends(_lowercase , 'sklearn' ) if len(_lowercase ) != len(_lowercase ): raise ValueError(f'''Predictions and labels have mismatched lengths {len(_lowercase )} and {len(_lowercase )}''' ) if task_name == "xnli": return {"acc": simple_accuracy(_lowercase , _lowercase )} else: raise KeyError(_lowercase )
1
import sys SCREAMING_SNAKE_CASE__ : Optional[Any] = ( """73167176531330624919225119674426574742355349194934""" """96983520312774506326239578318016984801869478851843""" """85861560789112949495459501737958331952853208805511""" """12540698747158523863050715693290963295227443043557""" """66896648950445244523161731856403098711121722383113""" """62229893423380308135336276614282806444486645238749""" """30358907296290491560440772390713810515859307960866""" """70172427121883998797908792274921901699720888093776""" """65727333001053367881220235421809751254540594752243""" """52584907711670556013604839586446706324415722155397""" """53697817977846174064955149290862569321978468622482""" """83972241375657056057490261407972968652414535100474""" """82166370484403199890008895243450658541227588666881""" """16427171479924442928230863465674813919123162824586""" """17866458359124566529476545682848912883142607690042""" """24219022671055626321111109370544217506941658960408""" """07198403850962455444362981230987879927244284909188""" """84580156166097919133875499200524063689912560717606""" """05886116467109405077541002256983155200055935729725""" """71636269561882670428252483600823257530420752963450""" ) def __lowercase ( snake_case = N ): """simple docstring""" __magic_name__ :Optional[int] = -sys.maxsize - 1 for i in range(len(snake_case ) - 1_2 ): __magic_name__ :List[Any] = 1 for j in range(1_3 ): product *= int(n[i + j] ) if product > largest_product: __magic_name__ :str = product return largest_product if __name__ == "__main__": print(f"{solution() = }")
0
0
def SCREAMING_SNAKE_CASE_ ( _snake_case :int = 50 ) -> int: _A = [1] * (length + 1) for row_length in range(3 , length + 1 ): for block_length in range(3 , row_length + 1 ): for block_start in range(row_length - block_length ): ways_number[row_length] += ways_number[ row_length - block_start - block_length - 1 ] ways_number[row_length] += 1 return ways_number[length] if __name__ == "__main__": print(f'{solution() = }')
2
SCREAMING_SNAKE_CASE__ : Tuple = { """a""": """AAAAA""", """b""": """AAAAB""", """c""": """AAABA""", """d""": """AAABB""", """e""": """AABAA""", """f""": """AABAB""", """g""": """AABBA""", """h""": """AABBB""", """i""": """ABAAA""", """j""": """BBBAA""", """k""": """ABAAB""", """l""": """ABABA""", """m""": """ABABB""", """n""": """ABBAA""", """o""": """ABBAB""", """p""": """ABBBA""", """q""": """ABBBB""", """r""": """BAAAA""", """s""": """BAAAB""", """t""": """BAABA""", """u""": """BAABB""", """v""": """BBBAB""", """w""": """BABAA""", """x""": """BABAB""", """y""": """BABBA""", """z""": """BABBB""", """ """: """ """, } SCREAMING_SNAKE_CASE__ : Union[str, Any] = {value: key for key, value in encode_dict.items()} def __lowercase ( snake_case ): """simple docstring""" __magic_name__ :Tuple = '''''' for letter in word.lower(): if letter.isalpha() or letter == " ": encoded += encode_dict[letter] else: raise Exception('''encode() accepts only letters of the alphabet and spaces''' ) return encoded def __lowercase ( snake_case ): """simple docstring""" if set(snake_case ) - {"A", "B", " "} != set(): raise Exception('''decode() accepts only \'A\', \'B\' and spaces''' ) __magic_name__ :Dict = '''''' for word in coded.split(): while len(snake_case ) != 0: decoded += decode_dict[word[:5]] __magic_name__ :int = word[5:] decoded += " " return decoded.strip() if __name__ == "__main__": from doctest import testmod testmod()
0
0
'''simple docstring''' import uuid from typing import Any, Dict, List, Optional, Union from ..utils import add_end_docstrings, is_tf_available, is_torch_available, logging from .base import PIPELINE_INIT_ARGS, Pipeline if is_tf_available(): import tensorflow as tf if is_torch_available(): import torch lowerCAmelCase : Optional[Any] = logging.get_logger(__name__) class SCREAMING_SNAKE_CASE__ : def __init__( self , A_ = None , A_ = None , A_=None , A_=None )-> Optional[Any]: '''simple docstring''' if not conversation_id: UpperCamelCase = uuid.uuida() if past_user_inputs is None: UpperCamelCase = [] if generated_responses is None: UpperCamelCase = [] UpperCamelCase = conversation_id UpperCamelCase = past_user_inputs UpperCamelCase = generated_responses UpperCamelCase = text def __eq__( self , A_ )-> List[Any]: '''simple docstring''' if not isinstance(A_ , A_ ): return False if self.uuid == other.uuid: return True return ( self.new_user_input == other.new_user_input and self.past_user_inputs == other.past_user_inputs and self.generated_responses == other.generated_responses ) def UpperCAmelCase_ ( self , A_ , A_ = False )-> int: '''simple docstring''' if self.new_user_input: if overwrite: logger.warning( F'''User input added while unprocessed input was existing: "{self.new_user_input}" was overwritten ''' F'''with: "{text}".''' ) UpperCamelCase = text else: logger.warning( F'''User input added while unprocessed input was existing: "{self.new_user_input}" new input ''' F'''ignored: "{text}". Set `overwrite` to True to overwrite unprocessed user input''' ) else: UpperCamelCase = text def UpperCAmelCase_ ( self )-> Any: '''simple docstring''' if self.new_user_input: self.past_user_inputs.append(self.new_user_input ) UpperCamelCase = None def UpperCAmelCase_ ( self , A_ )-> int: '''simple docstring''' self.generated_responses.append(A_ ) def UpperCAmelCase_ ( self )-> List[str]: '''simple docstring''' for user_input, generated_response in zip(self.past_user_inputs , self.generated_responses ): yield True, user_input yield False, generated_response if self.new_user_input: yield True, self.new_user_input def __repr__( self )-> Any: '''simple docstring''' UpperCamelCase = F'''Conversation id: {self.uuid} \n''' for is_user, text in self.iter_texts(): UpperCamelCase = 'user' if is_user else 'bot' output += F'''{name} >> {text} \n''' return output @add_end_docstrings( snake_case_ , R""" min_length_for_response (`int`, *optional*, defaults to 32): The minimum length (in number of tokens) for a response. minimum_tokens (`int`, *optional*, defaults to 10): The minimum length of tokens to leave for a response. """ , ) class SCREAMING_SNAKE_CASE__ ( snake_case_): def __init__( self , *A_ , **A_ )-> Any: '''simple docstring''' super().__init__(*A_ , **A_ ) if self.tokenizer.pad_token_id is None: UpperCamelCase = self.tokenizer.eos_token def UpperCAmelCase_ ( self , A_=None , A_=None , A_=None , **A_ )-> Union[str, Any]: '''simple docstring''' UpperCamelCase = {} UpperCamelCase = {} UpperCamelCase = {} if min_length_for_response is not None: UpperCamelCase = min_length_for_response if minimum_tokens is not None: UpperCamelCase = minimum_tokens if "max_length" in generate_kwargs: UpperCamelCase = generate_kwargs['max_length'] # self.max_length = generate_kwargs.get("max_length", self.model.config.max_length) if clean_up_tokenization_spaces is not None: UpperCamelCase = clean_up_tokenization_spaces if generate_kwargs: forward_params.update(A_ ) return preprocess_params, forward_params, postprocess_params def __call__( self , A_ , A_=0 , **A_ )-> Any: '''simple docstring''' UpperCamelCase = super().__call__(A_ , num_workers=A_ , **A_ ) if isinstance(A_ , A_ ) and len(A_ ) == 1: return outputs[0] return outputs def UpperCAmelCase_ ( self , A_ , A_=32 )-> Dict[str, Any]: '''simple docstring''' if not isinstance(A_ , A_ ): raise ValueError('ConversationalPipeline, expects Conversation as inputs' ) if conversation.new_user_input is None: raise ValueError( F'''Conversation with UUID {type(conversation.uuid )} does not contain new user input to process. ''' 'Add user inputs with the conversation\'s `add_user_input` method' ) if hasattr(self.tokenizer , '_build_conversation_input_ids' ): UpperCamelCase = self.tokenizer._build_conversation_input_ids(A_ ) else: # If the tokenizer cannot handle conversations, we default to only the old version UpperCamelCase = self._legacy_parse_and_tokenize(A_ ) if self.framework == "pt": UpperCamelCase = torch.LongTensor([input_ids] ) elif self.framework == "tf": UpperCamelCase = tf.constant([input_ids] ) return {"input_ids": input_ids, "conversation": conversation} def UpperCAmelCase_ ( self , A_ , A_=10 , **A_ )-> Optional[Any]: '''simple docstring''' UpperCamelCase = generate_kwargs.get('max_length' , self.model.config.max_length ) UpperCamelCase = model_inputs['input_ids'].shape[1] if max_length - minimum_tokens < n: logger.warning(F'''Conversation input is to long ({n}), trimming it to ({max_length} - {minimum_tokens})''' ) UpperCamelCase = max_length - minimum_tokens UpperCamelCase = model_inputs['input_ids'][:, -trim:] if "attention_mask" in model_inputs: UpperCamelCase = model_inputs['attention_mask'][:, -trim:] UpperCamelCase = model_inputs.pop('conversation' ) UpperCamelCase = max_length UpperCamelCase = self.model.generate(**A_ , **A_ ) if self.model.config.is_encoder_decoder: UpperCamelCase = 1 else: UpperCamelCase = n return {"output_ids": output_ids[:, start_position:], "conversation": conversation} def UpperCAmelCase_ ( self , A_ , A_=True )-> Tuple: '''simple docstring''' UpperCamelCase = model_outputs['output_ids'] UpperCamelCase = self.tokenizer.decode( output_ids[0] , skip_special_tokens=A_ , clean_up_tokenization_spaces=A_ , ) UpperCamelCase = model_outputs['conversation'] conversation.mark_processed() conversation.append_response(A_ ) return conversation def UpperCAmelCase_ ( self , A_ )-> Dict: '''simple docstring''' UpperCamelCase = self.tokenizer.eos_token_id UpperCamelCase = [] for is_user, text in conversation.iter_texts(): if eos_token_id is not None: input_ids.extend(self.tokenizer.encode(A_ , add_special_tokens=A_ ) + [eos_token_id] ) else: input_ids.extend(self.tokenizer.encode(A_ , add_special_tokens=A_ ) ) if len(A_ ) > self.tokenizer.model_max_length: UpperCamelCase = input_ids[-self.tokenizer.model_max_length :] return input_ids
3
import argparse import torch from torch import nn from transformers import MaMaaaConfig, MaMaaaForConditionalGeneration def __lowercase ( snake_case ): """simple docstring""" __magic_name__ :Optional[Any] = [ '''encoder.version''', '''decoder.version''', '''model.encoder.version''', '''model.decoder.version''', '''decoder.output_projection.weight''', '''_float_tensor''', '''encoder.embed_positions._float_tensor''', '''decoder.embed_positions._float_tensor''', ] for k in ignore_keys: state_dict.pop(snake_case, snake_case ) def __lowercase ( snake_case ): """simple docstring""" __magic_name__ , __magic_name__ :Tuple = emb.weight.shape __magic_name__ :int = nn.Linear(snake_case, snake_case, bias=snake_case ) __magic_name__ :str = emb.weight.data return lin_layer def __lowercase ( snake_case ): """simple docstring""" __magic_name__ :int = torch.load(snake_case, map_location='''cpu''' ) __magic_name__ :Optional[Any] = mam_aaa['''args'''] or mam_aaa['''cfg''']['''model'''] __magic_name__ :List[Any] = mam_aaa['''model'''] remove_ignore_keys_(snake_case ) __magic_name__ :Tuple = state_dict['''encoder.embed_tokens.weight'''].shape[0] __magic_name__ :List[str] = MaMaaaConfig( vocab_size=snake_case, max_position_embeddings=1_0_2_4, encoder_layers=args.encoder_layers, decoder_layers=args.decoder_layers, encoder_attention_heads=args.encoder_attention_heads, decoder_attention_heads=args.decoder_attention_heads, encoder_ffn_dim=args.encoder_ffn_embed_dim, decoder_ffn_dim=args.decoder_ffn_embed_dim, d_model=args.encoder_embed_dim, encoder_layerdrop=args.encoder_layerdrop, decoder_layerdrop=args.decoder_layerdrop, dropout=args.dropout, attention_dropout=args.attention_dropout, activation_dropout=args.activation_dropout, activation_function='''relu''', ) __magic_name__ :int = state_dict['''decoder.embed_tokens.weight'''] __magic_name__ :List[str] = MaMaaaForConditionalGeneration(snake_case ) model.model.load_state_dict(snake_case, strict=snake_case ) __magic_name__ :List[str] = make_linear_from_emb(model.model.shared ) return model if __name__ == "__main__": SCREAMING_SNAKE_CASE__ : Optional[Any] = argparse.ArgumentParser() # Required parameters parser.add_argument("""fairseq_path""", type=str, help="""path to a model.pt on local filesystem.""") parser.add_argument("""pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""") SCREAMING_SNAKE_CASE__ : int = parser.parse_args() SCREAMING_SNAKE_CASE__ : Any = convert_fairseq_mamaaa_checkpoint_from_disk(args.fairseq_pathß) model.save_pretrained(args.pytorch_dump_folder_path)
0
0
"""simple docstring""" class a : def __init__( self , _snake_case ): """simple docstring""" lowerCAmelCase = size lowerCAmelCase = [0] * size lowerCAmelCase = [0] * size @staticmethod def UpperCamelCase__ ( _snake_case ): """simple docstring""" return index | (index + 1) @staticmethod def UpperCamelCase__ ( _snake_case ): """simple docstring""" return (index & (index + 1)) - 1 def UpperCamelCase__ ( self , _snake_case , _snake_case ): """simple docstring""" lowerCAmelCase = value while index < self.size: lowerCAmelCase = self.get_prev(_snake_case ) + 1 if current_left_border == index: lowerCAmelCase = value else: lowerCAmelCase = max(_snake_case , _snake_case , _snake_case ) lowerCAmelCase = self.get_next(_snake_case ) def UpperCamelCase__ ( self , _snake_case , _snake_case ): """simple docstring""" right -= 1 # Because of right is exclusive lowerCAmelCase = 0 while left <= right: lowerCAmelCase = self.get_prev(_snake_case ) if left <= current_left: lowerCAmelCase = max(_snake_case , self.tree[right] ) lowerCAmelCase = current_left else: lowerCAmelCase = max(_snake_case , self.arr[right] ) right -= 1 return result if __name__ == "__main__": import doctest doctest.testmod()
4
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available SCREAMING_SNAKE_CASE__ : Dict = { """configuration_canine""": ["""CANINE_PRETRAINED_CONFIG_ARCHIVE_MAP""", """CanineConfig"""], """tokenization_canine""": ["""CanineTokenizer"""], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: SCREAMING_SNAKE_CASE__ : str = [ """CANINE_PRETRAINED_MODEL_ARCHIVE_LIST""", """CanineForMultipleChoice""", """CanineForQuestionAnswering""", """CanineForSequenceClassification""", """CanineForTokenClassification""", """CanineLayer""", """CanineModel""", """CaninePreTrainedModel""", """load_tf_weights_in_canine""", ] if TYPE_CHECKING: from .configuration_canine import CANINE_PRETRAINED_CONFIG_ARCHIVE_MAP, CanineConfig from .tokenization_canine import CanineTokenizer try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_canine import ( CANINE_PRETRAINED_MODEL_ARCHIVE_LIST, CanineForMultipleChoice, CanineForQuestionAnswering, CanineForSequenceClassification, CanineForTokenClassification, CanineLayer, CanineModel, CaninePreTrainedModel, load_tf_weights_in_canine, ) else: import sys SCREAMING_SNAKE_CASE__ : Union[str, Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
0
0
'''simple docstring''' import json import logging import os import sys from time import time from unittest.mock import patch from transformers.testing_utils import TestCasePlus, require_torch_tpu logging.basicConfig(level=logging.DEBUG) _lowercase = logging.getLogger() def A (__lowerCamelCase :str ): _lowerCAmelCase = {} _lowerCAmelCase = os.path.join(__lowerCamelCase , """all_results.json""" ) if os.path.exists(__lowerCamelCase ): with open(__lowerCamelCase , """r""" ) as f: _lowerCAmelCase = json.load(__lowerCamelCase ) else: raise ValueError(f'can\'t find {path}' ) return results _lowercase = logging.StreamHandler(sys.stdout) logger.addHandler(stream_handler) @require_torch_tpu class UpperCAmelCase_ ( _SCREAMING_SNAKE_CASE ): '''simple docstring''' def _lowercase ( self ): """simple docstring""" import xla_spawn _lowerCAmelCase = self.get_auto_remove_tmp_dir() _lowerCAmelCase = F'\n ./examples/pytorch/text-classification/run_glue.py\n --num_cores=8\n ./examples/pytorch/text-classification/run_glue.py\n --model_name_or_path distilbert-base-uncased\n --output_dir {tmp_dir}\n --overwrite_output_dir\n --train_file ./tests/fixtures/tests_samples/MRPC/train.csv\n --validation_file ./tests/fixtures/tests_samples/MRPC/dev.csv\n --do_train\n --do_eval\n --debug tpu_metrics_debug\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=1\n --learning_rate=1e-4\n --max_steps=10\n --warmup_steps=2\n --seed=42\n --max_seq_length=128\n '.split() with patch.object(_lowercase , """argv""" , _lowercase ): _lowerCAmelCase = time() xla_spawn.main() _lowerCAmelCase = time() _lowerCAmelCase = get_results(_lowercase ) self.assertGreaterEqual(result["""eval_accuracy"""] , 0.75 ) # Assert that the script takes less than 500 seconds to make sure it doesn't hang. self.assertLess(end - start , 500 ) def _lowercase ( self ): """simple docstring""" import xla_spawn _lowerCAmelCase = """ ./tests/test_trainer_tpu.py --num_cores=8 ./tests/test_trainer_tpu.py """.split() with patch.object(_lowercase , """argv""" , _lowercase ): xla_spawn.main()
5
import warnings from ...processing_utils import ProcessorMixin from ...tokenization_utils_base import BatchEncoding class lowerCamelCase_ ( lowerCamelCase ): a__ = ['''image_processor''', '''tokenizer'''] a__ = '''ChineseCLIPImageProcessor''' a__ = ('''BertTokenizer''', '''BertTokenizerFast''') def __init__( self , __lowerCAmelCase=None , __lowerCAmelCase=None , **__lowerCAmelCase ): """simple docstring""" __magic_name__ :Tuple = None if "feature_extractor" in kwargs: warnings.warn( '''The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`''' ''' instead.''' , __lowerCAmelCase , ) __magic_name__ :Optional[Any] = kwargs.pop('''feature_extractor''' ) __magic_name__ :Tuple = image_processor if image_processor is not None else feature_extractor if image_processor is None: raise ValueError('''You need to specify an `image_processor`.''' ) if tokenizer is None: raise ValueError('''You need to specify a `tokenizer`.''' ) super().__init__(__lowerCAmelCase , __lowerCAmelCase ) __magic_name__ :List[Any] = self.image_processor def __call__( self , __lowerCAmelCase=None , __lowerCAmelCase=None , __lowerCAmelCase=None , **__lowerCAmelCase ): """simple docstring""" if text is None and images is None: raise ValueError('''You have to specify either text or images. Both cannot be none.''' ) if text is not None: __magic_name__ :int = self.tokenizer(__lowerCAmelCase , return_tensors=__lowerCAmelCase , **__lowerCAmelCase ) if images is not None: __magic_name__ :Dict = self.image_processor(__lowerCAmelCase , return_tensors=__lowerCAmelCase , **__lowerCAmelCase ) if text is not None and images is not None: __magic_name__ :Union[str, Any] = image_features.pixel_values return encoding elif text is not None: return encoding else: return BatchEncoding(data=dict(**__lowerCAmelCase ) , tensor_type=__lowerCAmelCase ) def A ( self , *__lowerCAmelCase , **__lowerCAmelCase ): """simple docstring""" return self.tokenizer.batch_decode(*__lowerCAmelCase , **__lowerCAmelCase ) def A ( self , *__lowerCAmelCase , **__lowerCAmelCase ): """simple docstring""" return self.tokenizer.decode(*__lowerCAmelCase , **__lowerCAmelCase ) @property def A ( self ): """simple docstring""" __magic_name__ :List[Any] = self.tokenizer.model_input_names __magic_name__ :Any = self.image_processor.model_input_names return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) ) @property def A ( self ): """simple docstring""" warnings.warn( '''`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.''' , __lowerCAmelCase , ) return self.image_processor_class
0
0
import collections import gzip import os import urllib import numpy from tensorflow.python.framework import dtypes, random_seed from tensorflow.python.platform import gfile from tensorflow.python.util.deprecation import deprecated _lowerCamelCase = collections.namedtuple('_Datasets', ['train', 'validation', 'test']) # CVDF mirror of http://yann.lecun.com/exdb/mnist/ _lowerCamelCase = 'https://storage.googleapis.com/cvdf-datasets/mnist/' def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: Union[str, Any] ): SCREAMING_SNAKE_CASE__ = numpy.dtype(numpy.uintaa ).newbyteorder(""">""" ) return numpy.frombuffer(bytestream.read(4 ) , dtype=UpperCamelCase__ )[0] @deprecated(UpperCamelCase__ , """Please use tf.data to implement this functionality.""" ) def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: List[Any] ): print("""Extracting""" , f.name ) with gzip.GzipFile(fileobj=UpperCamelCase__ ) as bytestream: SCREAMING_SNAKE_CASE__ = _readaa(UpperCamelCase__ ) if magic != 2_051: raise ValueError( """Invalid magic number %d in MNIST image file: %s""" % (magic, f.name) ) SCREAMING_SNAKE_CASE__ = _readaa(UpperCamelCase__ ) SCREAMING_SNAKE_CASE__ = _readaa(UpperCamelCase__ ) SCREAMING_SNAKE_CASE__ = _readaa(UpperCamelCase__ ) SCREAMING_SNAKE_CASE__ = bytestream.read(rows * cols * num_images ) SCREAMING_SNAKE_CASE__ = numpy.frombuffer(UpperCamelCase__ , dtype=numpy.uinta ) SCREAMING_SNAKE_CASE__ = data.reshape(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , 1 ) return data @deprecated(UpperCamelCase__ , """Please use tf.one_hot on tensors.""" ) def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: List[Any] , UpperCamelCase__: str ): SCREAMING_SNAKE_CASE__ = labels_dense.shape[0] SCREAMING_SNAKE_CASE__ = numpy.arange(UpperCamelCase__ ) * num_classes SCREAMING_SNAKE_CASE__ = numpy.zeros((num_labels, num_classes) ) SCREAMING_SNAKE_CASE__ = 1 return labels_one_hot @deprecated(UpperCamelCase__ , """Please use tf.data to implement this functionality.""" ) def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: Optional[Any] , UpperCamelCase__: Union[str, Any]=False , UpperCamelCase__: List[Any]=10 ): print("""Extracting""" , f.name ) with gzip.GzipFile(fileobj=UpperCamelCase__ ) as bytestream: SCREAMING_SNAKE_CASE__ = _readaa(UpperCamelCase__ ) if magic != 2_049: raise ValueError( """Invalid magic number %d in MNIST label file: %s""" % (magic, f.name) ) SCREAMING_SNAKE_CASE__ = _readaa(UpperCamelCase__ ) SCREAMING_SNAKE_CASE__ = bytestream.read(UpperCamelCase__ ) SCREAMING_SNAKE_CASE__ = numpy.frombuffer(UpperCamelCase__ , dtype=numpy.uinta ) if one_hot: return _dense_to_one_hot(UpperCamelCase__ , UpperCamelCase__ ) return labels class UpperCamelCase_ : @deprecated( __A , """Please use alternatives such as official/mnist/_DataSet.py""" """ from tensorflow/models.""" , ) def __init__( self :Optional[Any] , __A :List[Any] , __A :Tuple , __A :Union[str, Any]=False , __A :int=False , __A :Optional[Any]=dtypes.floataa , __A :Any=True , __A :Optional[Any]=None , ) -> Optional[Any]: """simple docstring""" SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = random_seed.get_seed(__A ) # If op level seed is not set, use whatever graph level seed is returned numpy.random.seed(seeda if seed is None else seeda ) SCREAMING_SNAKE_CASE__ = dtypes.as_dtype(__A ).base_dtype if dtype not in (dtypes.uinta, dtypes.floataa): raise TypeError("""Invalid image dtype %r, expected uint8 or float32""" % dtype ) if fake_data: SCREAMING_SNAKE_CASE__ = 1_0000 SCREAMING_SNAKE_CASE__ = one_hot else: assert ( images.shape[0] == labels.shape[0] ), f'''images.shape: {images.shape} labels.shape: {labels.shape}''' SCREAMING_SNAKE_CASE__ = images.shape[0] # Convert shape from [num examples, rows, columns, depth] # to [num examples, rows*columns] (assuming depth == 1) if reshape: assert images.shape[3] == 1 SCREAMING_SNAKE_CASE__ = images.reshape( images.shape[0] , images.shape[1] * images.shape[2] ) if dtype == dtypes.floataa: # Convert from [0, 255] -> [0.0, 1.0]. SCREAMING_SNAKE_CASE__ = images.astype(numpy.floataa ) SCREAMING_SNAKE_CASE__ = numpy.multiply(__A , 1.0 / 2_5_5.0 ) SCREAMING_SNAKE_CASE__ = images SCREAMING_SNAKE_CASE__ = labels SCREAMING_SNAKE_CASE__ = 0 SCREAMING_SNAKE_CASE__ = 0 @property def _snake_case ( self :str ) -> List[str]: """simple docstring""" return self._images @property def _snake_case ( self :str ) -> List[str]: """simple docstring""" return self._labels @property def _snake_case ( self :List[str] ) -> str: """simple docstring""" return self._num_examples @property def _snake_case ( self :List[Any] ) -> Optional[Any]: """simple docstring""" return self._epochs_completed def _snake_case ( self :Union[str, Any] , __A :Union[str, Any] , __A :str=False , __A :str=True ) -> List[str]: """simple docstring""" if fake_data: SCREAMING_SNAKE_CASE__ = [1] * 784 SCREAMING_SNAKE_CASE__ = [1] + [0] * 9 if self.one_hot else 0 return ( [fake_image for _ in range(__A )], [fake_label for _ in range(__A )], ) SCREAMING_SNAKE_CASE__ = self._index_in_epoch # Shuffle for the first epoch if self._epochs_completed == 0 and start == 0 and shuffle: SCREAMING_SNAKE_CASE__ = numpy.arange(self._num_examples ) numpy.random.shuffle(__A ) SCREAMING_SNAKE_CASE__ = self.images[perma] SCREAMING_SNAKE_CASE__ = self.labels[perma] # Go to the next epoch if start + batch_size > self._num_examples: # Finished epoch self._epochs_completed += 1 # Get the rest examples in this epoch SCREAMING_SNAKE_CASE__ = self._num_examples - start SCREAMING_SNAKE_CASE__ = self._images[start : self._num_examples] SCREAMING_SNAKE_CASE__ = self._labels[start : self._num_examples] # Shuffle the data if shuffle: SCREAMING_SNAKE_CASE__ = numpy.arange(self._num_examples ) numpy.random.shuffle(__A ) SCREAMING_SNAKE_CASE__ = self.images[perm] SCREAMING_SNAKE_CASE__ = self.labels[perm] # Start next epoch SCREAMING_SNAKE_CASE__ = 0 SCREAMING_SNAKE_CASE__ = batch_size - rest_num_examples SCREAMING_SNAKE_CASE__ = self._index_in_epoch SCREAMING_SNAKE_CASE__ = self._images[start:end] SCREAMING_SNAKE_CASE__ = self._labels[start:end] return ( numpy.concatenate((images_rest_part, images_new_part) , axis=0 ), numpy.concatenate((labels_rest_part, labels_new_part) , axis=0 ), ) else: self._index_in_epoch += batch_size SCREAMING_SNAKE_CASE__ = self._index_in_epoch return self._images[start:end], self._labels[start:end] @deprecated(UpperCamelCase__ , """Please write your own downloading logic.""" ) def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: List[Any] , UpperCamelCase__: Union[str, Any] , UpperCamelCase__: List[Any] ): if not gfile.Exists(UpperCamelCase__ ): gfile.MakeDirs(UpperCamelCase__ ) SCREAMING_SNAKE_CASE__ = os.path.join(UpperCamelCase__ , UpperCamelCase__ ) if not gfile.Exists(UpperCamelCase__ ): urllib.request.urlretrieve(UpperCamelCase__ , UpperCamelCase__ ) # noqa: S310 with gfile.GFile(UpperCamelCase__ ) as f: SCREAMING_SNAKE_CASE__ = f.size() print("""Successfully downloaded""" , UpperCamelCase__ , UpperCamelCase__ , """bytes.""" ) return filepath @deprecated( UpperCamelCase__ , """Please use alternatives such as:""" """ tensorflow_datasets.load('mnist')""" ) def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: Optional[Any] , UpperCamelCase__: Optional[Any]=False , UpperCamelCase__: Dict=False , UpperCamelCase__: Union[str, Any]=dtypes.floataa , UpperCamelCase__: Any=True , UpperCamelCase__: List[Any]=5_000 , UpperCamelCase__: List[Any]=None , UpperCamelCase__: List[Any]=DEFAULT_SOURCE_URL , ): if fake_data: def fake(): return _DataSet( [] , [] , fake_data=UpperCamelCase__ , one_hot=UpperCamelCase__ , dtype=UpperCamelCase__ , seed=UpperCamelCase__ ) SCREAMING_SNAKE_CASE__ = fake() SCREAMING_SNAKE_CASE__ = fake() SCREAMING_SNAKE_CASE__ = fake() return _Datasets(train=UpperCamelCase__ , validation=UpperCamelCase__ , test=UpperCamelCase__ ) if not source_url: # empty string check SCREAMING_SNAKE_CASE__ = DEFAULT_SOURCE_URL SCREAMING_SNAKE_CASE__ = """train-images-idx3-ubyte.gz""" SCREAMING_SNAKE_CASE__ = """train-labels-idx1-ubyte.gz""" SCREAMING_SNAKE_CASE__ = """t10k-images-idx3-ubyte.gz""" SCREAMING_SNAKE_CASE__ = """t10k-labels-idx1-ubyte.gz""" SCREAMING_SNAKE_CASE__ = _maybe_download( UpperCamelCase__ , UpperCamelCase__ , source_url + train_images_file ) with gfile.Open(UpperCamelCase__ , """rb""" ) as f: SCREAMING_SNAKE_CASE__ = _extract_images(UpperCamelCase__ ) SCREAMING_SNAKE_CASE__ = _maybe_download( UpperCamelCase__ , UpperCamelCase__ , source_url + train_labels_file ) with gfile.Open(UpperCamelCase__ , """rb""" ) as f: SCREAMING_SNAKE_CASE__ = _extract_labels(UpperCamelCase__ , one_hot=UpperCamelCase__ ) SCREAMING_SNAKE_CASE__ = _maybe_download( UpperCamelCase__ , UpperCamelCase__ , source_url + test_images_file ) with gfile.Open(UpperCamelCase__ , """rb""" ) as f: SCREAMING_SNAKE_CASE__ = _extract_images(UpperCamelCase__ ) SCREAMING_SNAKE_CASE__ = _maybe_download( UpperCamelCase__ , UpperCamelCase__ , source_url + test_labels_file ) with gfile.Open(UpperCamelCase__ , """rb""" ) as f: SCREAMING_SNAKE_CASE__ = _extract_labels(UpperCamelCase__ , one_hot=UpperCamelCase__ ) if not 0 <= validation_size <= len(UpperCamelCase__ ): SCREAMING_SNAKE_CASE__ = ( """Validation size should be between 0 and """ f'''{len(UpperCamelCase__ )}. Received: {validation_size}.''' ) raise ValueError(UpperCamelCase__ ) SCREAMING_SNAKE_CASE__ = train_images[:validation_size] SCREAMING_SNAKE_CASE__ = train_labels[:validation_size] SCREAMING_SNAKE_CASE__ = train_images[validation_size:] SCREAMING_SNAKE_CASE__ = train_labels[validation_size:] SCREAMING_SNAKE_CASE__ = {"""dtype""": dtype, """reshape""": reshape, """seed""": seed} SCREAMING_SNAKE_CASE__ = _DataSet(UpperCamelCase__ , UpperCamelCase__ , **UpperCamelCase__ ) SCREAMING_SNAKE_CASE__ = _DataSet(UpperCamelCase__ , UpperCamelCase__ , **UpperCamelCase__ ) SCREAMING_SNAKE_CASE__ = _DataSet(UpperCamelCase__ , UpperCamelCase__ , **UpperCamelCase__ ) return _Datasets(train=UpperCamelCase__ , validation=UpperCamelCase__ , test=UpperCamelCase__ )
6
from sklearn.metrics import matthews_corrcoef import datasets SCREAMING_SNAKE_CASE__ : Optional[Any] = """ Compute the Matthews correlation coefficient (MCC) The Matthews correlation coefficient is used in machine learning as a measure of the quality of binary and multiclass classifications. It takes into account true and false positives and negatives and is generally regarded as a balanced measure which can be used even if the classes are of very different sizes. The MCC is in essence a correlation coefficient value between -1 and +1. A coefficient of +1 represents a perfect prediction, 0 an average random prediction and -1 an inverse prediction. The statistic is also known as the phi coefficient. [source: Wikipedia] """ SCREAMING_SNAKE_CASE__ : Union[str, Any] = """ Args: predictions (list of int): Predicted labels, as returned by a model. references (list of int): Ground truth labels. sample_weight (list of int, float, or bool): Sample weights. Defaults to `None`. Returns: matthews_correlation (dict containing float): Matthews correlation. Examples: Example 1, a basic example with only predictions and references as inputs: >>> matthews_metric = datasets.load_metric(\"matthews_correlation\") >>> results = matthews_metric.compute(references=[1, 3, 2, 0, 3, 2], ... predictions=[1, 2, 2, 0, 3, 3]) >>> print(round(results['matthews_correlation'], 2)) 0.54 Example 2, the same example as above, but also including sample weights: >>> matthews_metric = datasets.load_metric(\"matthews_correlation\") >>> results = matthews_metric.compute(references=[1, 3, 2, 0, 3, 2], ... predictions=[1, 2, 2, 0, 3, 3], ... sample_weight=[0.5, 3, 1, 1, 1, 2]) >>> print(round(results['matthews_correlation'], 2)) 0.1 Example 3, the same example as above, but with sample weights that cause a negative correlation: >>> matthews_metric = datasets.load_metric(\"matthews_correlation\") >>> results = matthews_metric.compute(references=[1, 3, 2, 0, 3, 2], ... predictions=[1, 2, 2, 0, 3, 3], ... sample_weight=[0.5, 1, 0, 0, 0, 1]) >>> print(round(results['matthews_correlation'], 2)) -0.25 """ SCREAMING_SNAKE_CASE__ : int = """\ @article{scikit-learn, title={Scikit-learn: Machine Learning in {P}ython}, author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V. and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P. and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.}, journal={Journal of Machine Learning Research}, volume={12}, pages={2825--2830}, year={2011} } """ @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class lowerCamelCase_ ( datasets.Metric ): def A ( self ): """simple docstring""" return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { '''predictions''': datasets.Value('''int32''' ), '''references''': datasets.Value('''int32''' ), } ) , reference_urls=[ '''https://scikit-learn.org/stable/modules/generated/sklearn.metrics.matthews_corrcoef.html''' ] , ) def A ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase=None ): """simple docstring""" return { "matthews_correlation": float(matthews_corrcoef(__lowerCAmelCase , __lowerCAmelCase , sample_weight=__lowerCAmelCase ) ), }
0
0
"""simple docstring""" import baseaa def _snake_case ( _snake_case : str ) -> bytes: '''simple docstring''' return baseaa.aaaencode(string.encode('utf-8' ) ) def _snake_case ( _snake_case : bytes ) -> str: '''simple docstring''' return baseaa.aaadecode(_snake_case ).decode('utf-8' ) if __name__ == "__main__": import doctest doctest.testmod()
7
from __future__ import annotations def __lowercase ( snake_case, snake_case ): """simple docstring""" print(f'''Vertex\tShortest Distance from vertex {src}''' ) for i, d in enumerate(snake_case ): print(f'''{i}\t\t{d}''' ) def __lowercase ( snake_case, snake_case, snake_case ): """simple docstring""" for j in range(snake_case ): __magic_name__ , __magic_name__ , __magic_name__ :Tuple = (graph[j][k] for k in ['''src''', '''dst''', '''weight''']) if distance[u] != float('''inf''' ) and distance[u] + w < distance[v]: return True return False def __lowercase ( snake_case, snake_case, snake_case, snake_case ): """simple docstring""" __magic_name__ :List[Any] = [float('''inf''' )] * vertex_count __magic_name__ :Tuple = 0.0 for _ in range(vertex_count - 1 ): for j in range(snake_case ): __magic_name__ , __magic_name__ , __magic_name__ :Dict = (graph[j][k] for k in ['''src''', '''dst''', '''weight''']) if distance[u] != float('''inf''' ) and distance[u] + w < distance[v]: __magic_name__ :Tuple = distance[u] + w __magic_name__ :Tuple = check_negative_cycle(snake_case, snake_case, snake_case ) if negative_cycle_exists: raise Exception('''Negative cycle found''' ) return distance if __name__ == "__main__": import doctest doctest.testmod() SCREAMING_SNAKE_CASE__ : Tuple = int(input("""Enter number of vertices: """).strip()) SCREAMING_SNAKE_CASE__ : Any = int(input("""Enter number of edges: """).strip()) SCREAMING_SNAKE_CASE__ : list[dict[str, int]] = [{} for _ in range(E)] for i in range(E): print("""Edge """, i + 1) SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Dict = ( int(x) for x in input("""Enter source, destination, weight: """).strip().split(""" """) ) SCREAMING_SNAKE_CASE__ : Dict = {"""src""": src, """dst""": dest, """weight""": weight} SCREAMING_SNAKE_CASE__ : List[Any] = int(input("""\nEnter shortest path source:""").strip()) SCREAMING_SNAKE_CASE__ : List[str] = bellman_ford(graph, V, E, source) print_distance(shortest_distance, 0)
0
0
'''simple docstring''' def _lowerCAmelCase ( __snake_case : list ) -> list: __A : Dict = False while is_sorted is False: # Until all the indices are traversed keep looping __A : int = True for i in range(0 , len(__snake_case ) - 1 , 2 ): # iterating over all even indices if input_list[i] > input_list[i + 1]: __A ,__A : List[Any] = input_list[i + 1], input_list[i] # swapping if elements not in order __A : int = False for i in range(1 , len(__snake_case ) - 1 , 2 ): # iterating over all odd indices if input_list[i] > input_list[i + 1]: __A ,__A : Optional[int] = input_list[i + 1], input_list[i] # swapping if elements not in order __A : str = False return input_list if __name__ == "__main__": print('''Enter list to be sorted''') lowercase__ : Dict = [int(x) for x in input().split()] # inputing elements of the list in one line lowercase__ : Union[str, Any] = odd_even_sort(input_list) print('''The sorted list is''') print(sorted_list)
8
from __future__ import annotations import unittest from transformers import RoFormerConfig, is_tf_available from transformers.testing_utils import require_tf, slow from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import ( TFRoFormerForCausalLM, TFRoFormerForMaskedLM, TFRoFormerForMultipleChoice, TFRoFormerForQuestionAnswering, TFRoFormerForSequenceClassification, TFRoFormerForTokenClassification, TFRoFormerModel, ) from transformers.models.roformer.modeling_tf_roformer import ( TFRoFormerSelfAttention, TFRoFormerSinusoidalPositionalEmbedding, ) class lowerCamelCase_ : def __init__( self , __lowerCAmelCase , __lowerCAmelCase=1_3 , __lowerCAmelCase=7 , __lowerCAmelCase=True , __lowerCAmelCase=True , __lowerCAmelCase=True , __lowerCAmelCase=True , __lowerCAmelCase=9_9 , __lowerCAmelCase=3_2 , __lowerCAmelCase=2 , __lowerCAmelCase=4 , __lowerCAmelCase=3_7 , __lowerCAmelCase="gelu" , __lowerCAmelCase=0.1 , __lowerCAmelCase=0.1 , __lowerCAmelCase=5_1_2 , __lowerCAmelCase=1_6 , __lowerCAmelCase=2 , __lowerCAmelCase=0.02 , __lowerCAmelCase=3 , __lowerCAmelCase=4 , __lowerCAmelCase=None , ): """simple docstring""" __magic_name__ :Optional[int] = parent __magic_name__ :List[Any] = 1_3 __magic_name__ :Union[str, Any] = 7 __magic_name__ :Optional[Any] = True __magic_name__ :Tuple = True __magic_name__ :List[str] = True __magic_name__ :List[Any] = True __magic_name__ :int = 9_9 __magic_name__ :Any = 3_2 __magic_name__ :Union[str, Any] = 2 __magic_name__ :List[str] = 4 __magic_name__ :List[Any] = 3_7 __magic_name__ :Tuple = '''gelu''' __magic_name__ :Any = 0.1 __magic_name__ :str = 0.1 __magic_name__ :List[str] = 5_1_2 __magic_name__ :int = 1_6 __magic_name__ :Any = 2 __magic_name__ :List[Any] = 0.02 __magic_name__ :Optional[Any] = 3 __magic_name__ :Tuple = 4 __magic_name__ :Optional[Any] = None def A ( self ): """simple docstring""" __magic_name__ :Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) __magic_name__ :str = None if self.use_input_mask: __magic_name__ :Optional[int] = random_attention_mask([self.batch_size, self.seq_length] ) __magic_name__ :str = None if self.use_token_type_ids: __magic_name__ :List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) __magic_name__ :Union[str, Any] = None __magic_name__ :Tuple = None __magic_name__ :str = None if self.use_labels: __magic_name__ :List[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size ) __magic_name__ :List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) __magic_name__ :List[Any] = ids_tensor([self.batch_size] , self.num_choices ) __magic_name__ :str = RoFormerConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , return_dict=__lowerCAmelCase , ) return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def A ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ): """simple docstring""" __magic_name__ :int = TFRoFormerModel(config=__lowerCAmelCase ) __magic_name__ :Optional[Any] = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids} __magic_name__ :List[str] = [input_ids, input_mask] __magic_name__ :Any = model(__lowerCAmelCase ) __magic_name__ :List[str] = model(__lowerCAmelCase ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def A ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ): """simple docstring""" __magic_name__ :Dict = True __magic_name__ :List[str] = TFRoFormerForCausalLM(config=__lowerCAmelCase ) __magic_name__ :str = { '''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids, } __magic_name__ :Optional[Any] = model(__lowerCAmelCase )['''logits'''] self.parent.assertListEqual( list(prediction_scores.numpy().shape ) , [self.batch_size, self.seq_length, self.vocab_size] ) def A ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ): """simple docstring""" __magic_name__ :Optional[Any] = TFRoFormerForMaskedLM(config=__lowerCAmelCase ) __magic_name__ :Any = { '''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids, } __magic_name__ :Dict = model(__lowerCAmelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def A ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ): """simple docstring""" __magic_name__ :int = self.num_labels __magic_name__ :str = TFRoFormerForSequenceClassification(config=__lowerCAmelCase ) __magic_name__ :Optional[int] = { '''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids, } __magic_name__ :str = model(__lowerCAmelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def A ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ): """simple docstring""" __magic_name__ :Union[str, Any] = self.num_choices __magic_name__ :Tuple = TFRoFormerForMultipleChoice(config=__lowerCAmelCase ) __magic_name__ :int = tf.tile(tf.expand_dims(__lowerCAmelCase , 1 ) , (1, self.num_choices, 1) ) __magic_name__ :Optional[Any] = tf.tile(tf.expand_dims(__lowerCAmelCase , 1 ) , (1, self.num_choices, 1) ) __magic_name__ :Union[str, Any] = tf.tile(tf.expand_dims(__lowerCAmelCase , 1 ) , (1, self.num_choices, 1) ) __magic_name__ :str = { '''input_ids''': multiple_choice_inputs_ids, '''attention_mask''': multiple_choice_input_mask, '''token_type_ids''': multiple_choice_token_type_ids, } __magic_name__ :Tuple = model(__lowerCAmelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) ) def A ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ): """simple docstring""" __magic_name__ :Optional[int] = self.num_labels __magic_name__ :Any = TFRoFormerForTokenClassification(config=__lowerCAmelCase ) __magic_name__ :str = { '''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids, } __magic_name__ :Dict = model(__lowerCAmelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def A ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ): """simple docstring""" __magic_name__ :List[str] = TFRoFormerForQuestionAnswering(config=__lowerCAmelCase ) __magic_name__ :List[str] = { '''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids, } __magic_name__ :Union[str, Any] = model(__lowerCAmelCase ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def A ( self ): """simple docstring""" __magic_name__ :Union[str, Any] = self.prepare_config_and_inputs() ( ( __magic_name__ ) , ( __magic_name__ ) , ( __magic_name__ ) , ( __magic_name__ ) , ( __magic_name__ ) , ( __magic_name__ ) , ( __magic_name__ ) , ) :Union[str, Any] = config_and_inputs __magic_name__ :Optional[Any] = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask} return config, inputs_dict @require_tf class lowerCamelCase_ ( lowerCamelCase , lowerCamelCase , unittest.TestCase ): a__ = ( ( TFRoFormerModel, TFRoFormerForCausalLM, TFRoFormerForMaskedLM, TFRoFormerForQuestionAnswering, TFRoFormerForSequenceClassification, TFRoFormerForTokenClassification, TFRoFormerForMultipleChoice, ) if is_tf_available() else () ) a__ = ( { '''feature-extraction''': TFRoFormerModel, '''fill-mask''': TFRoFormerForMaskedLM, '''question-answering''': TFRoFormerForQuestionAnswering, '''text-classification''': TFRoFormerForSequenceClassification, '''text-generation''': TFRoFormerForCausalLM, '''token-classification''': TFRoFormerForTokenClassification, '''zero-shot''': TFRoFormerForSequenceClassification, } if is_tf_available() else {} ) a__ = False a__ = False def A ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ): """simple docstring""" if pipeline_test_casse_name == "TextGenerationPipelineTests": return True return False def A ( self ): """simple docstring""" __magic_name__ :List[str] = TFRoFormerModelTester(self ) __magic_name__ :List[str] = ConfigTester(self , config_class=__lowerCAmelCase , hidden_size=3_7 ) def A ( self ): """simple docstring""" self.config_tester.run_common_tests() def A ( self ): """simple docstring""" __magic_name__ :Optional[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*__lowerCAmelCase ) def A ( self ): """simple docstring""" __magic_name__ :Union[str, Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_lm(*__lowerCAmelCase ) def A ( self ): """simple docstring""" __magic_name__ :Tuple = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_lm_head(*__lowerCAmelCase ) def A ( self ): """simple docstring""" __magic_name__ :Optional[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_multiple_choice(*__lowerCAmelCase ) def A ( self ): """simple docstring""" __magic_name__ :Tuple = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_question_answering(*__lowerCAmelCase ) def A ( self ): """simple docstring""" __magic_name__ :Dict = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_sequence_classification(*__lowerCAmelCase ) def A ( self ): """simple docstring""" __magic_name__ :Optional[int] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_token_classification(*__lowerCAmelCase ) @slow def A ( self ): """simple docstring""" __magic_name__ :Optional[Any] = TFRoFormerModel.from_pretrained('''junnyu/roformer_chinese_base''' ) self.assertIsNotNone(__lowerCAmelCase ) @require_tf class lowerCamelCase_ ( unittest.TestCase ): @slow def A ( self ): """simple docstring""" __magic_name__ :int = TFRoFormerForMaskedLM.from_pretrained('''junnyu/roformer_chinese_base''' ) __magic_name__ :Dict = tf.constant([[0, 1, 2, 3, 4, 5]] ) __magic_name__ :Optional[Any] = model(__lowerCAmelCase )[0] # TODO Replace vocab size __magic_name__ :int = 5_0_0_0_0 __magic_name__ :Tuple = [1, 6, vocab_size] self.assertEqual(output.shape , __lowerCAmelCase ) print(output[:, :3, :3] ) # TODO Replace values below with what was printed above. __magic_name__ :Any = tf.constant( [ [ [-0.12053341, -1.0264901, 0.29221946], [-1.5133783, 0.197433, 0.15190607], [-5.0135403, -3.900256, -0.84038764], ] ] ) tf.debugging.assert_near(output[:, :3, :3] , __lowerCAmelCase , atol=1E-4 ) @require_tf class lowerCamelCase_ ( unittest.TestCase ): a__ = 1e-4 def A ( self ): """simple docstring""" __magic_name__ :Optional[int] = tf.constant([[4, 1_0]] ) __magic_name__ :Optional[int] = TFRoFormerSinusoidalPositionalEmbedding(num_positions=6 , embedding_dim=6 ) __magic_name__ :Optional[Any] = emba(input_ids.shape ) __magic_name__ :List[str] = tf.constant( [[0.0000, 0.0000, 0.0000, 1.0000, 1.0000, 1.0000], [0.8415, 0.0464, 0.0022, 0.5403, 0.9989, 1.0000]] ) tf.debugging.assert_near(__lowerCAmelCase , __lowerCAmelCase , atol=self.tolerance ) def A ( self ): """simple docstring""" __magic_name__ :Tuple = tf.constant( [ [0.0000, 0.0000, 0.0000, 0.0000, 0.0000], [0.8415, 0.8219, 0.8020, 0.7819, 0.7617], [0.9093, 0.9364, 0.9581, 0.9749, 0.9870], ] ) __magic_name__ :Union[str, Any] = TFRoFormerSinusoidalPositionalEmbedding(num_positions=5_1_2 , embedding_dim=5_1_2 ) emba([2, 1_6, 5_1_2] ) __magic_name__ :Optional[int] = emba.weight[:3, :5] tf.debugging.assert_near(__lowerCAmelCase , __lowerCAmelCase , atol=self.tolerance ) @require_tf class lowerCamelCase_ ( unittest.TestCase ): a__ = 1e-4 def A ( self ): """simple docstring""" # 2,12,16,64 __magic_name__ :int = tf.reshape(tf.range(2 * 1_2 * 1_6 * 6_4 , dtype=tf.floataa ) , shape=(2, 1_2, 1_6, 6_4) ) / 1_0_0 __magic_name__ :str = -tf.reshape(tf.range(2 * 1_2 * 1_6 * 6_4 , dtype=tf.floataa ) , shape=(2, 1_2, 1_6, 6_4) ) / 1_0_0 __magic_name__ :int = TFRoFormerSinusoidalPositionalEmbedding(num_positions=3_2 , embedding_dim=6_4 ) __magic_name__ :List[str] = embed_positions([2, 1_6, 7_6_8] )[None, None, :, :] __magic_name__ , __magic_name__ :Union[str, Any] = TFRoFormerSelfAttention.apply_rotary_position_embeddings( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) __magic_name__ :Tuple = tf.constant( [ [0.0000, 0.0100, 0.0200, 0.0300, 0.0400, 0.0500, 0.0600, 0.0700], [-0.2012, 0.8897, 0.0263, 0.9401, 0.2074, 0.9463, 0.3481, 0.9343], [-1.7057, 0.6271, -1.2145, 1.3897, -0.6303, 1.7647, -0.1173, 1.8985], [-2.1731, -1.6397, -2.7358, 0.2854, -2.1840, 1.7183, -1.3018, 2.4871], [0.2717, -3.6173, -2.9206, -2.1988, -3.6638, 0.3858, -2.9155, 2.2980], [3.9859, -2.1580, -0.7984, -4.4904, -4.1181, -2.0252, -4.4782, 1.1253], ] ) __magic_name__ :List[str] = tf.constant( [ [0.0000, -0.0100, -0.0200, -0.0300, -0.0400, -0.0500, -0.0600, -0.0700], [0.2012, -0.8897, -0.0263, -0.9401, -0.2074, -0.9463, -0.3481, -0.9343], [1.7057, -0.6271, 1.2145, -1.3897, 0.6303, -1.7647, 0.1173, -1.8985], [2.1731, 1.6397, 2.7358, -0.2854, 2.1840, -1.7183, 1.3018, -2.4871], [-0.2717, 3.6173, 2.9206, 2.1988, 3.6638, -0.3858, 2.9155, -2.2980], [-3.9859, 2.1580, 0.7984, 4.4904, 4.1181, 2.0252, 4.4782, -1.1253], ] ) tf.debugging.assert_near(query_layer[0, 0, :6, :8] , __lowerCAmelCase , atol=self.tolerance ) tf.debugging.assert_near(key_layer[0, 0, :6, :8] , __lowerCAmelCase , atol=self.tolerance )
0
0
from dataclasses import dataclass, field from typing import Tuple from ..utils import cached_property, is_tf_available, logging, requires_backends from .benchmark_args_utils import BenchmarkArguments if is_tf_available(): import tensorflow as tf SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__) @dataclass class __lowerCAmelCase ( UpperCAmelCase_ ): """simple docstring""" A__ : List[Any] = [ "no_inference", "no_cuda", "no_tpu", "no_speed", "no_memory", "no_env_print", "no_multi_process", ] def __init__( self : Union[str, Any] , **_snake_case : List[str] ): """simple docstring""" for deprecated_arg in self.deprecated_args: if deprecated_arg in kwargs: A__ = deprecated_arg[3:] A__ = not kwargs.pop(_snake_case ) logger.warning( F'''{deprecated_arg} is depreciated. Please use --no-{positive_arg} or''' F''' {positive_arg}={kwargs[positive_arg]}''' ) A__ = kwargs.pop('tpu_name' , self.tpu_name ) A__ = kwargs.pop('device_idx' , self.device_idx ) A__ = kwargs.pop('eager_mode' , self.eager_mode ) A__ = kwargs.pop('use_xla' , self.use_xla ) super().__init__(**_snake_case ) A__ : str = field( default=UpperCAmelCase_ , metadata={"help": "Name of TPU"} , ) A__ : int = field( default=0 , metadata={"help": "CPU / GPU device index. Defaults to 0."} , ) A__ : bool = field(default=UpperCAmelCase_ , metadata={"help": "Benchmark models in eager model."} ) A__ : bool = field( default=UpperCAmelCase_ , metadata={ "help": "Benchmark models using XLA JIT compilation. Note that `eager_model` has to be set to `False`." } , ) @cached_property def _a ( self : Dict ): """simple docstring""" requires_backends(self , ['tf'] ) A__ = None if self.tpu: try: if self.tpu_name: A__ = tf.distribute.cluster_resolver.TPUClusterResolver(self.tpu_name ) else: A__ = tf.distribute.cluster_resolver.TPUClusterResolver() except ValueError: A__ = None return tpu @cached_property def _a ( self : str ): """simple docstring""" requires_backends(self , ['tf'] ) if self.is_tpu: tf.config.experimental_connect_to_cluster(self._setup_tpu ) tf.tpu.experimental.initialize_tpu_system(self._setup_tpu ) A__ = tf.distribute.TPUStrategy(self._setup_tpu ) else: # currently no multi gpu is allowed if self.is_gpu: # TODO: Currently only single GPU is supported tf.config.set_visible_devices(self.gpu_list[self.device_idx] , 'GPU' ) A__ = tf.distribute.OneDeviceStrategy(device=F'''/gpu:{self.device_idx}''' ) else: tf.config.set_visible_devices([] , 'GPU' ) # disable GPU A__ = tf.distribute.OneDeviceStrategy(device=F'''/cpu:{self.device_idx}''' ) return strategy @property def _a ( self : Optional[Any] ): """simple docstring""" requires_backends(self , ['tf'] ) return self._setup_tpu is not None @property def _a ( self : Tuple ): """simple docstring""" requires_backends(self , ['tf'] ) return self._setup_strategy @property def _a ( self : Any ): """simple docstring""" requires_backends(self , ['tf'] ) return tf.config.list_physical_devices('GPU' ) @property def _a ( self : Dict ): """simple docstring""" requires_backends(self , ['tf'] ) if self.cuda: return len(self.gpu_list ) return 0 @property def _a ( self : List[Any] ): """simple docstring""" return self.n_gpu > 0
9
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available SCREAMING_SNAKE_CASE__ : Optional[int] = {"""tokenization_herbert""": ["""HerbertTokenizer"""]} try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: SCREAMING_SNAKE_CASE__ : Optional[Any] = ["""HerbertTokenizerFast"""] if TYPE_CHECKING: from .tokenization_herbert import HerbertTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_herbert_fast import HerbertTokenizerFast else: import sys SCREAMING_SNAKE_CASE__ : Union[str, Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
0
0
from __future__ import annotations import math _lowerCAmelCase = "2020.9.26" _lowerCAmelCase = "xcodz-dot, cclaus, dhruvmanila" def _snake_case ( __snake_case , __snake_case , __snake_case , __snake_case , __snake_case ): if not all(isinstance(__snake_case , (float, int) ) for val in locals().values() ): _UpperCamelCase = f"""Input values must either be float or int: {list(locals().values() )}""" raise TypeError(__snake_case ) _UpperCamelCase = ((x * distance) / (z + distance)) * scale _UpperCamelCase = ((y * distance) / (z + distance)) * scale return projected_x, projected_y def _snake_case ( __snake_case , __snake_case , __snake_case , __snake_case , __snake_case ): if not isinstance(__snake_case , __snake_case ): raise TypeError('''Axis must be a str''' ) _UpperCamelCase = locals() del input_variables["axis"] if not all(isinstance(__snake_case , (float, int) ) for val in input_variables.values() ): _UpperCamelCase = ( '''Input values except axis must either be float or int: ''' f"""{list(input_variables.values() )}""" ) raise TypeError(__snake_case ) _UpperCamelCase = (angle % 360) / 450 * 180 / math.pi if axis == "z": _UpperCamelCase = x * math.cos(__snake_case ) - y * math.sin(__snake_case ) _UpperCamelCase = y * math.cos(__snake_case ) + x * math.sin(__snake_case ) _UpperCamelCase = z elif axis == "x": _UpperCamelCase = y * math.cos(__snake_case ) - z * math.sin(__snake_case ) _UpperCamelCase = z * math.cos(__snake_case ) + y * math.sin(__snake_case ) _UpperCamelCase = x elif axis == "y": _UpperCamelCase = x * math.cos(__snake_case ) - z * math.sin(__snake_case ) _UpperCamelCase = z * math.cos(__snake_case ) + x * math.sin(__snake_case ) _UpperCamelCase = y else: raise ValueError('''not a valid axis, choose one of \'x\', \'y\', \'z\'''' ) return new_x, new_y, new_z if __name__ == "__main__": import doctest doctest.testmod() print(f'{convert_to_ad(1.0, 2.0, 3.0, 10.0, 10.0) = }') print(f'{rotate(1.0, 2.0, 3.0, "y", 90.0) = }')
10
import argparse import gdown import numpy as np import torch from huggingface_hub import hf_hub_download from transformers import ( CLIPTokenizer, CLIPTokenizerFast, VideoMAEImageProcessor, XCLIPConfig, XCLIPModel, XCLIPProcessor, XCLIPTextConfig, XCLIPVisionConfig, ) def __lowercase ( snake_case, snake_case ): """simple docstring""" __magic_name__ :str = XCLIPTextConfig() # derive patch size from model name __magic_name__ :Union[str, Any] = model_name.find('''patch''' ) __magic_name__ :Optional[Any] = int(model_name[start_idx + len('''patch''' ) : start_idx + len('''patch''' ) + 2] ) __magic_name__ :int = XCLIPVisionConfig(patch_size=snake_case, num_frames=snake_case ) if "large" in model_name: __magic_name__ :Dict = 7_6_8 __magic_name__ :int = 3_0_7_2 __magic_name__ :List[Any] = 1_2 __magic_name__ :str = 1_0_2_4 __magic_name__ :Any = 4_0_9_6 __magic_name__ :Optional[Any] = 1_6 __magic_name__ :Union[str, Any] = 2_4 __magic_name__ :Union[str, Any] = 7_6_8 __magic_name__ :Tuple = 3_0_7_2 if model_name == "xclip-large-patch14-16-frames": __magic_name__ :List[str] = 3_3_6 __magic_name__ :Any = XCLIPConfig.from_text_vision_configs(snake_case, snake_case ) if "large" in model_name: __magic_name__ :str = 7_6_8 return config def __lowercase ( snake_case ): """simple docstring""" if name == "token_embedding.weight": __magic_name__ :Any = name.replace('''token_embedding.weight''', '''text_model.embeddings.token_embedding.weight''' ) if name == "positional_embedding": __magic_name__ :Any = name.replace('''positional_embedding''', '''text_model.embeddings.position_embedding.weight''' ) if "ln_1" in name: __magic_name__ :List[str] = name.replace('''ln_1''', '''layer_norm1''' ) if "ln_2" in name: __magic_name__ :str = name.replace('''ln_2''', '''layer_norm2''' ) if "c_fc" in name: __magic_name__ :List[Any] = name.replace('''c_fc''', '''fc1''' ) if "c_proj" in name: __magic_name__ :Any = name.replace('''c_proj''', '''fc2''' ) if name.startswith('''transformer.resblocks''' ): __magic_name__ :Any = name.replace('''transformer.resblocks''', '''text_model.encoder.layers''' ) if "attn.out_proj" in name and "message" not in name: __magic_name__ :Union[str, Any] = name.replace('''attn.out_proj''', '''self_attn.out_proj''' ) if "ln_final" in name: __magic_name__ :Tuple = name.replace('''ln_final''', '''text_model.final_layer_norm''' ) # visual encoder if name == "visual.class_embedding": __magic_name__ :List[Any] = name.replace('''visual.class_embedding''', '''vision_model.embeddings.class_embedding''' ) if name == "visual.positional_embedding": __magic_name__ :Any = name.replace('''visual.positional_embedding''', '''vision_model.embeddings.position_embedding.weight''' ) if name.startswith('''visual.transformer.resblocks''' ): __magic_name__ :Union[str, Any] = name.replace('''visual.transformer.resblocks''', '''vision_model.encoder.layers''' ) if "visual.conv1" in name: __magic_name__ :Tuple = name.replace('''visual.conv1''', '''vision_model.embeddings.patch_embedding''' ) if "visual.ln_pre" in name: __magic_name__ :Tuple = name.replace('''visual.ln_pre''', '''vision_model.pre_layernorm''' ) if "visual.ln_post" in name: __magic_name__ :Optional[Any] = name.replace('''visual.ln_post''', '''vision_model.post_layernorm''' ) if "visual.proj" in name: __magic_name__ :Tuple = name.replace('''visual.proj''', '''visual_projection.weight''' ) if "text_projection" in name: __magic_name__ :int = name.replace('''text_projection''', '''text_projection.weight''' ) # things on top if "prompts_visual_proj" in name: __magic_name__ :int = name.replace('''prompts_visual_proj''', '''prompts_visual_projection''' ) if "prompts_visual_ln" in name: __magic_name__ :Dict = name.replace('''prompts_visual_ln''', '''prompts_visual_layernorm''' ) # mit if name == "mit.positional_embedding": __magic_name__ :List[Any] = name.replace('''positional''', '''position''' ) if name.startswith('''mit.resblocks''' ): __magic_name__ :Union[str, Any] = name.replace('''mit.resblocks''', '''mit.encoder.layers''' ) # prompts generator if name.startswith('''prompts_generator.norm''' ): __magic_name__ :str = name.replace('''prompts_generator.norm''', '''prompts_generator.layernorm''' ) return name def __lowercase ( snake_case, snake_case ): """simple docstring""" for key in orig_state_dict.copy().keys(): __magic_name__ :Any = orig_state_dict.pop(snake_case ) if "attn.in_proj" in key: __magic_name__ :str = key.split('''.''' ) if key.startswith('''visual''' ): __magic_name__ :List[Any] = key_split[3] __magic_name__ :List[Any] = config.vision_config.hidden_size if "message_attn" in key: if "weight" in key: __magic_name__ :List[Any] = val[ :dim, : ] __magic_name__ :List[str] = val[ dim : dim * 2, : ] __magic_name__ :List[str] = val[ -dim:, : ] else: __magic_name__ :str = val[ :dim ] __magic_name__ :Optional[int] = val[ dim : dim * 2 ] __magic_name__ :Any = val[ -dim: ] else: if "weight" in key: __magic_name__ :int = val[ :dim, : ] __magic_name__ :Union[str, Any] = val[ dim : dim * 2, : ] __magic_name__ :List[Any] = val[ -dim:, : ] else: __magic_name__ :Union[str, Any] = val[:dim] __magic_name__ :str = val[ dim : dim * 2 ] __magic_name__ :Dict = val[-dim:] elif key.startswith('''mit''' ): __magic_name__ :List[Any] = key_split[2] __magic_name__ :Any = config.vision_config.mit_hidden_size if "weight" in key: __magic_name__ :Union[str, Any] = val[:dim, :] __magic_name__ :Optional[int] = val[dim : dim * 2, :] __magic_name__ :int = val[-dim:, :] else: __magic_name__ :Tuple = val[:dim] __magic_name__ :Optional[int] = val[dim : dim * 2] __magic_name__ :Optional[int] = val[-dim:] else: __magic_name__ :Any = key_split[2] __magic_name__ :List[Any] = config.text_config.hidden_size if "weight" in key: __magic_name__ :Union[str, Any] = val[:dim, :] __magic_name__ :Tuple = val[ dim : dim * 2, : ] __magic_name__ :str = val[-dim:, :] else: __magic_name__ :int = val[:dim] __magic_name__ :Any = val[ dim : dim * 2 ] __magic_name__ :str = val[-dim:] else: __magic_name__ :Tuple = rename_key(snake_case ) if new_key_name in ["visual_projection.weight", "text_projection.weight"]: __magic_name__ :List[Any] = val.T __magic_name__ :Optional[Any] = val return orig_state_dict def __lowercase ( snake_case ): """simple docstring""" if num_frames == 8: __magic_name__ :Any = '''eating_spaghetti_8_frames.npy''' elif num_frames == 1_6: __magic_name__ :List[Any] = '''eating_spaghetti.npy''' elif num_frames == 3_2: __magic_name__ :Tuple = '''eating_spaghetti_32_frames.npy''' __magic_name__ :str = hf_hub_download( repo_id='''hf-internal-testing/spaghetti-video''', filename=snake_case, repo_type='''dataset''', ) __magic_name__ :List[Any] = np.load(snake_case ) return list(snake_case ) def __lowercase ( snake_case, snake_case=None, snake_case=False ): """simple docstring""" __magic_name__ :Union[str, Any] = { # fully supervised kinetics-400 checkpoints '''xclip-base-patch32''': '''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_32_8.pth''', '''xclip-base-patch32-16-frames''': ( '''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_32_16.pth''' ), '''xclip-base-patch16''': '''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_16_8.pth''', '''xclip-base-patch16-16-frames''': ( '''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_16_16.pth''' ), '''xclip-large-patch14''': '''https://drive.google.com/u/0/uc?id=1NUOImq0o5DlQTST17iIP3vG7DgmHQuCx&amp;export=download&amp;confirm=t&amp;uuid=b26caedc-88e2-473e-830a-9d158b653cdb''', '''xclip-large-patch14-16-frames''': '''https://drive.google.com/u/0/uc?id=1FOYgnJc097OJ4lGwtRCCydQyVPJEOH7d&amp;export=download&amp;confirm=t&amp;uuid=538fa810-e671-4050-b385-9a623f89804f''', # fully supervised kinetics-600 checkpoints '''xclip-base-patch16-kinetics-600''': ( '''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k600_16_8.pth''' ), '''xclip-base-patch16-kinetics-600-16-frames''': ( '''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k600_16_16.pth''' ), '''xclip-large-patch14-kinetics-600''': '''https://drive.google.com/u/0/uc?id=1FV8C1INuM91sLAN4ImjzePLIlpMSihwV&amp;export=download&amp;confirm=t&amp;uuid=141d4977-4a65-44ae-864f-4b0c19f838be''', # few shot '''xclip-base-patch16-hmdb-2-shot''': ( '''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_2.pth''' ), '''xclip-base-patch16-hmdb-4-shot''': ( '''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_4.pth''' ), '''xclip-base-patch16-hmdb-8-shot''': ( '''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_8.pth''' ), '''xclip-base-patch16-hmdb-16-shot''': ( '''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_16.pth''' ), '''xclip-base-patch16-ucf-2-shot''': ( '''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_2.pth''' ), '''xclip-base-patch16-ucf-4-shot''': ( '''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_4.pth''' ), '''xclip-base-patch16-ucf-8-shot''': ( '''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_8.pth''' ), '''xclip-base-patch16-ucf-16-shot''': ( '''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_16.pth''' ), # zero shot '''xclip-base-patch16-zero-shot''': '''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/zero.pth''', } __magic_name__ :Optional[int] = model_to_url[model_name] __magic_name__ :List[str] = 8 if "16-frames" in model_name: __magic_name__ :List[Any] = 1_6 elif "shot" in model_name: __magic_name__ :Dict = 3_2 __magic_name__ :str = get_xclip_config(snake_case, snake_case ) __magic_name__ :List[Any] = XCLIPModel(snake_case ) model.eval() if "drive" in checkpoint_url: __magic_name__ :Any = '''pytorch_model.bin''' gdown.cached_download(snake_case, snake_case, quiet=snake_case ) __magic_name__ :Optional[Any] = torch.load(snake_case, map_location='''cpu''' )['''model'''] else: __magic_name__ :Optional[int] = torch.hub.load_state_dict_from_url(snake_case )['''model'''] __magic_name__ :List[str] = convert_state_dict(snake_case, snake_case ) __magic_name__ :List[Any] = XCLIPModel(snake_case ) __magic_name__ , __magic_name__ :Optional[Any] = model.load_state_dict(snake_case, strict=snake_case ) assert missing_keys == ["text_model.embeddings.position_ids", "vision_model.embeddings.position_ids"] model.eval() __magic_name__ :str = 3_3_6 if model_name == '''xclip-large-patch14-16-frames''' else 2_2_4 __magic_name__ :Optional[int] = VideoMAEImageProcessor(size=snake_case ) __magic_name__ :Optional[int] = CLIPTokenizer.from_pretrained('''openai/clip-vit-base-patch32''' ) __magic_name__ :Tuple = CLIPTokenizerFast.from_pretrained('''openai/clip-vit-base-patch32''' ) __magic_name__ :Optional[int] = XCLIPProcessor(image_processor=snake_case, tokenizer=snake_case ) __magic_name__ :List[Any] = prepare_video(snake_case ) __magic_name__ :str = processor( text=['''playing sports''', '''eating spaghetti''', '''go shopping'''], videos=snake_case, return_tensors='''pt''', padding=snake_case ) print('''Shape of pixel values:''', inputs.pixel_values.shape ) with torch.no_grad(): __magic_name__ :Tuple = model(**snake_case ) # Verify outputs __magic_name__ :Any = outputs.logits_per_video __magic_name__ :str = logits_per_video.softmax(dim=1 ) print('''Probs:''', snake_case ) # kinetics-400 if model_name == "xclip-base-patch32": __magic_name__ :Dict = torch.tensor([[0.0019, 0.9951, 0.0030]] ) elif model_name == "xclip-base-patch32-16-frames": __magic_name__ :str = torch.tensor([[7.0_9_9_9E-0_4, 9.9_8_8_3E-0_1, 4.5_5_8_0E-0_4]] ) elif model_name == "xclip-base-patch16": __magic_name__ :Tuple = torch.tensor([[0.0083, 0.9681, 0.0236]] ) elif model_name == "xclip-base-patch16-16-frames": __magic_name__ :Tuple = torch.tensor([[7.6_9_3_7E-0_4, 9.9_7_2_8E-0_1, 1.9_4_7_3E-0_3]] ) elif model_name == "xclip-large-patch14": __magic_name__ :str = torch.tensor([[0.0062, 0.9864, 0.0075]] ) elif model_name == "xclip-large-patch14-16-frames": __magic_name__ :Optional[int] = torch.tensor([[3.3_8_7_7E-0_4, 9.9_9_3_7E-0_1, 2.8_8_8_8E-0_4]] ) # kinetics-600 elif model_name == "xclip-base-patch16-kinetics-600": __magic_name__ :Optional[int] = torch.tensor([[0.0555, 0.8914, 0.0531]] ) elif model_name == "xclip-base-patch16-kinetics-600-16-frames": __magic_name__ :List[str] = torch.tensor([[3.8_5_5_4E-0_4, 9.9_9_2_9E-0_1, 3.2_7_5_4E-0_4]] ) elif model_name == "xclip-large-patch14-kinetics-600": __magic_name__ :List[str] = torch.tensor([[0.0036, 0.9920, 0.0045]] ) # few shot elif model_name == "xclip-base-patch16-hmdb-2-shot": __magic_name__ :Tuple = torch.tensor([[7.1_8_9_0E-0_6, 9.9_9_9_4E-0_1, 5.6_5_5_9E-0_5]] ) elif model_name == "xclip-base-patch16-hmdb-4-shot": __magic_name__ :List[str] = torch.tensor([[1.0_3_2_0E-0_5, 9.9_9_9_3E-0_1, 6.2_4_3_5E-0_5]] ) elif model_name == "xclip-base-patch16-hmdb-8-shot": __magic_name__ :Optional[int] = torch.tensor([[4.1_3_7_7E-0_6, 9.9_9_9_0E-0_1, 9.8_3_8_6E-0_5]] ) elif model_name == "xclip-base-patch16-hmdb-16-shot": __magic_name__ :Optional[int] = torch.tensor([[4.1_3_4_7E-0_5, 9.9_9_6_2E-0_1, 3.3_4_1_1E-0_4]] ) elif model_name == "xclip-base-patch16-ucf-2-shot": __magic_name__ :Union[str, Any] = torch.tensor([[8.5_8_5_7E-0_5, 9.9_9_2_8E-0_1, 6.3_2_9_1E-0_4]] ) elif model_name == "xclip-base-patch16-ucf-4-shot": __magic_name__ :Union[str, Any] = torch.tensor([[8.5_8_5_7E-0_5, 9.9_9_2_8E-0_1, 6.3_2_9_1E-0_4]] ) elif model_name == "xclip-base-patch16-ucf-8-shot": __magic_name__ :Optional[int] = torch.tensor([[0.0027, 0.9904, 0.0070]] ) elif model_name == "xclip-base-patch16-ucf-16-shot": __magic_name__ :Any = torch.tensor([[9.8_2_1_9E-0_4, 9.9_5_9_3E-0_1, 3.0_8_6_3E-0_3]] ) # zero shot elif model_name == "xclip-base-patch16-zero-shot": __magic_name__ :Optional[int] = torch.tensor([[3.5_0_8_2E-0_4, 9.9_7_8_5E-0_1, 1.7_9_6_6E-0_3]] ) else: raise ValueError(f'''Model name {model_name} not supported''' ) assert torch.allclose(snake_case, snake_case, atol=1E-3 ) print('''Looks ok!''' ) if pytorch_dump_folder_path is not None: print(f'''Saving model {model_name} to {pytorch_dump_folder_path}''' ) model.save_pretrained(snake_case ) if push_to_hub: print('''Pushing model, processor and slow tokenizer files to the hub...''' ) model.push_to_hub(snake_case, organization='''nielsr''' ) processor.push_to_hub(snake_case, organization='''nielsr''' ) slow_tokenizer.push_to_hub(snake_case, organization='''nielsr''' ) if __name__ == "__main__": SCREAMING_SNAKE_CASE__ : Optional[Any] = argparse.ArgumentParser() # Required parameters parser.add_argument( """--model_name""", default="""xclip-base-patch32""", type=str, help="""Name of the model.""", ) parser.add_argument( """--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory.""" ) parser.add_argument( """--push_to_hub""", action="""store_true""", help="""Whether or not to push the converted model to the 🤗 hub.""" ) SCREAMING_SNAKE_CASE__ : List[Any] = parser.parse_args() convert_xclip_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
0
0
'''simple docstring''' from ...configuration_utils import PretrainedConfig lowercase_ = { "google/tapas-base-finetuned-sqa": ( "https://huggingface.co/google/tapas-base-finetuned-sqa/resolve/main/config.json" ), "google/tapas-base-finetuned-wtq": ( "https://huggingface.co/google/tapas-base-finetuned-wtq/resolve/main/config.json" ), "google/tapas-base-finetuned-wikisql-supervised": ( "https://huggingface.co/google/tapas-base-finetuned-wikisql-supervised/resolve/main/config.json" ), "google/tapas-base-finetuned-tabfact": ( "https://huggingface.co/google/tapas-base-finetuned-tabfact/resolve/main/config.json" ), } class __A ( A ): '''simple docstring''' __lowerCamelCase : Optional[int] = 'tapas' def __init__(self , A=30_522 , A=768 , A=12 , A=12 , A=3_072 , A="gelu" , A=0.1 , A=0.1 , A=1_024 , A=[3, 256, 256, 2, 256, 256, 10] , A=0.02 , A=1E-12 , A=0 , A=10.0 , A=0 , A=1.0 , A=None , A=1.0 , A=False , A=None , A=1.0 , A=1.0 , A=False , A=False , A="ratio" , A=None , A=None , A=64 , A=32 , A=False , A=True , A=False , A=False , A=True , A=False , A=None , A=None , **A , ) -> Dict: """simple docstring""" super().__init__(pad_token_id=A , **A ) # BERT hyperparameters (with updated max_position_embeddings and type_vocab_sizes) _a = vocab_size _a = hidden_size _a = num_hidden_layers _a = num_attention_heads _a = hidden_act _a = intermediate_size _a = hidden_dropout_prob _a = attention_probs_dropout_prob _a = max_position_embeddings _a = type_vocab_sizes _a = initializer_range _a = layer_norm_eps # Fine-tuning task hyperparameters _a = positive_label_weight _a = num_aggregation_labels _a = aggregation_loss_weight _a = use_answer_as_supervision _a = answer_loss_importance _a = use_normalized_answer_loss _a = huber_loss_delta _a = temperature _a = aggregation_temperature _a = use_gumbel_for_cells _a = use_gumbel_for_aggregation _a = average_approximation_function _a = cell_selection_preference _a = answer_loss_cutoff _a = max_num_rows _a = max_num_columns _a = average_logits_per_cell _a = select_one_column _a = allow_empty_column_selection _a = init_cell_selection_weights_to_zero _a = reset_position_index_per_cell _a = disable_per_token_loss # Aggregation hyperparameters _a = aggregation_labels _a = no_aggregation_label_index if isinstance(self.aggregation_labels , A ): _a = {int(A ): v for k, v in aggregation_labels.items()}
11
import numpy as np import torch from torch.utils.data import Dataset from utils import logger class lowerCamelCase_ ( lowerCamelCase ): def __init__( self , __lowerCAmelCase , __lowerCAmelCase ): """simple docstring""" __magic_name__ :Optional[int] = params __magic_name__ :Any = np.array(__lowerCAmelCase ) __magic_name__ :Optional[Any] = np.array([len(__lowerCAmelCase ) for t in data] ) self.check() self.remove_long_sequences() self.remove_empty_sequences() self.remove_unknown_sequences() self.check() self.print_statistics() def __getitem__( self , __lowerCAmelCase ): """simple docstring""" return (self.token_ids[index], self.lengths[index]) def __len__( self ): """simple docstring""" return len(self.lengths ) def A ( self ): """simple docstring""" assert len(self.token_ids ) == len(self.lengths ) assert all(self.lengths[i] == len(self.token_ids[i] ) for i in range(len(self.lengths ) ) ) def A ( self ): """simple docstring""" __magic_name__ :Any = self.params.max_model_input_size __magic_name__ :int = self.lengths > max_len logger.info(F'''Splitting {sum(__lowerCAmelCase )} too long sequences.''' ) def divide_chunks(__lowerCAmelCase , __lowerCAmelCase ): return [l[i : i + n] for i in range(0 , len(__lowerCAmelCase ) , __lowerCAmelCase )] __magic_name__ :Optional[int] = [] __magic_name__ :List[Any] = [] if self.params.mlm: __magic_name__ , __magic_name__ :Optional[Any] = self.params.special_tok_ids['''cls_token'''], self.params.special_tok_ids['''sep_token'''] else: __magic_name__ , __magic_name__ :Tuple = self.params.special_tok_ids['''bos_token'''], self.params.special_tok_ids['''eos_token'''] for seq_, len_ in zip(self.token_ids , self.lengths ): assert (seq_[0] == cls_id) and (seq_[-1] == sep_id), seq_ if len_ <= max_len: new_tok_ids.append(seq_ ) new_lengths.append(len_ ) else: __magic_name__ :int = [] for sub_s in divide_chunks(seq_ , max_len - 2 ): if sub_s[0] != cls_id: __magic_name__ :List[Any] = np.insert(__lowerCAmelCase , 0 , __lowerCAmelCase ) if sub_s[-1] != sep_id: __magic_name__ :Union[str, Any] = np.insert(__lowerCAmelCase , len(__lowerCAmelCase ) , __lowerCAmelCase ) assert len(__lowerCAmelCase ) <= max_len assert (sub_s[0] == cls_id) and (sub_s[-1] == sep_id), sub_s sub_seqs.append(__lowerCAmelCase ) new_tok_ids.extend(__lowerCAmelCase ) new_lengths.extend([len(__lowerCAmelCase ) for l in sub_seqs] ) __magic_name__ :Tuple = np.array(__lowerCAmelCase ) __magic_name__ :Optional[int] = np.array(__lowerCAmelCase ) def A ( self ): """simple docstring""" __magic_name__ :Optional[Any] = len(self ) __magic_name__ :int = self.lengths > 1_1 __magic_name__ :List[str] = self.token_ids[indices] __magic_name__ :Union[str, Any] = self.lengths[indices] __magic_name__ :List[str] = len(self ) logger.info(F'''Remove {init_size - new_size} too short (<=11 tokens) sequences.''' ) def A ( self ): """simple docstring""" if "unk_token" not in self.params.special_tok_ids: return else: __magic_name__ :Tuple = self.params.special_tok_ids['''unk_token'''] __magic_name__ :Dict = len(self ) __magic_name__ :Tuple = np.array([np.count_nonzero(a == unk_token_id ) for a in self.token_ids] ) __magic_name__ :int = (unk_occs / self.lengths) < 0.5 __magic_name__ :str = self.token_ids[indices] __magic_name__ :str = self.lengths[indices] __magic_name__ :Any = len(self ) logger.info(F'''Remove {init_size - new_size} sequences with a high level of unknown tokens (50%).''' ) def A ( self ): """simple docstring""" if not self.params.is_master: return logger.info(F'''{len(self )} sequences''' ) # data_len = sum(self.lengths) # nb_unique_tokens = len(Counter(list(chain(*self.token_ids)))) # logger.info(f'{data_len} tokens ({nb_unique_tokens} unique)') # unk_idx = self.params.special_tok_ids['unk_token'] # nb_unknown = sum([(t==unk_idx).sum() for t in self.token_ids]) # logger.info(f'{nb_unknown} unknown tokens (covering {100*nb_unknown/data_len:.2f}% of the data)') def A ( self , __lowerCAmelCase ): """simple docstring""" __magic_name__ :Optional[Any] = [t[0] for t in batch] __magic_name__ :List[Any] = [t[1] for t in batch] assert len(__lowerCAmelCase ) == len(__lowerCAmelCase ) # Max for paddings __magic_name__ :Tuple = max(__lowerCAmelCase ) # Pad token ids if self.params.mlm: __magic_name__ :Any = self.params.special_tok_ids['''pad_token'''] else: __magic_name__ :str = self.params.special_tok_ids['''unk_token'''] __magic_name__ :Any = [list(t.astype(__lowerCAmelCase ) ) + [pad_idx] * (max_seq_len_ - len(__lowerCAmelCase )) for t in token_ids] assert len(tk_ ) == len(__lowerCAmelCase ) assert all(len(__lowerCAmelCase ) == max_seq_len_ for t in tk_ ) __magic_name__ :Optional[int] = torch.tensor(tk_ ) # (bs, max_seq_len_) __magic_name__ :Optional[int] = torch.tensor(__lowerCAmelCase ) # (bs) return tk_t, lg_t
0
0
from __future__ import annotations import unittest from transformers import AutoTokenizer, MBartConfig, is_tf_available from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow from transformers.utils import cached_property from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import TFAutoModelForSeqaSeqLM, TFMBartForConditionalGeneration, TFMBartModel @require_tf class _snake_case : __lowerCAmelCase : Optional[Any] = MBartConfig __lowerCAmelCase : Tuple = {} __lowerCAmelCase : Any = 'gelu' def __init__( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=13 , SCREAMING_SNAKE_CASE_=7 , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=False , SCREAMING_SNAKE_CASE_=99 , SCREAMING_SNAKE_CASE_=32 , SCREAMING_SNAKE_CASE_=2 , SCREAMING_SNAKE_CASE_=4 , SCREAMING_SNAKE_CASE_=37 , SCREAMING_SNAKE_CASE_=0.1 , SCREAMING_SNAKE_CASE_=0.1 , SCREAMING_SNAKE_CASE_=20 , SCREAMING_SNAKE_CASE_=2 , SCREAMING_SNAKE_CASE_=1 , SCREAMING_SNAKE_CASE_=0 , ): '''simple docstring''' lowercase__ : str = parent lowercase__ : Tuple = batch_size lowercase__ : List[str] = seq_length lowercase__ : List[Any] = is_training lowercase__ : str = use_labels lowercase__ : Any = vocab_size lowercase__ : str = hidden_size lowercase__ : str = num_hidden_layers lowercase__ : Optional[int] = num_attention_heads lowercase__ : List[str] = intermediate_size lowercase__ : int = hidden_dropout_prob lowercase__ : Dict = attention_probs_dropout_prob lowercase__ : List[str] = max_position_embeddings lowercase__ : Any = eos_token_id lowercase__ : Dict = pad_token_id lowercase__ : Dict = bos_token_id def lowercase__ ( self): '''simple docstring''' lowercase__ : Optional[int] = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size) lowercase__ : str = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size) , 1) lowercase__ : Dict = tf.concat([input_ids, eos_tensor] , axis=1) lowercase__ : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size) lowercase__ : List[Any] = self.config_cls( vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , ) lowercase__ : Tuple = prepare_mbart_inputs_dict(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_) return config, inputs_dict def lowercase__ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_): '''simple docstring''' lowercase__ : Optional[Any] = TFMBartModel(config=SCREAMING_SNAKE_CASE_).get_decoder() lowercase__ : Optional[Any] = inputs_dict["""input_ids"""] lowercase__ : List[str] = input_ids[:1, :] lowercase__ : Optional[Any] = inputs_dict["""attention_mask"""][:1, :] lowercase__ : List[str] = inputs_dict["""head_mask"""] lowercase__ : Any = 1 # first forward pass lowercase__ : List[str] = model(SCREAMING_SNAKE_CASE_ , attention_mask=SCREAMING_SNAKE_CASE_ , head_mask=SCREAMING_SNAKE_CASE_ , use_cache=SCREAMING_SNAKE_CASE_) lowercase__ , lowercase__ : int = outputs.to_tuple() lowercase__ : str = past_key_values[1] def UpperCamelCase ( lowercase_ , lowercase_ , lowercase_ , lowercase_=None , lowercase_=None , lowercase_=None , lowercase_=None , lowercase_=None , ) -> str: '''simple docstring''' if attention_mask is None: lowercase__ : Optional[int] = tf.cast(tf.math.not_equal(lowercase_ , config.pad_token_id ) , tf.inta ) if decoder_attention_mask is None: lowercase__ : Optional[Any] = tf.concat( [ tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta ), tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ) , tf.inta ), ] , axis=-1 , ) if head_mask is None: lowercase__ : Optional[Any] = tf.ones((config.encoder_layers, config.encoder_attention_heads) ) if decoder_head_mask is None: lowercase__ : Tuple = tf.ones((config.decoder_layers, config.decoder_attention_heads) ) if cross_attn_head_mask is None: lowercase__ : Dict = tf.ones((config.decoder_layers, config.decoder_attention_heads) ) return { "input_ids": input_ids, "decoder_input_ids": decoder_input_ids, "attention_mask": attention_mask, "decoder_attention_mask": decoder_attention_mask, "head_mask": head_mask, "decoder_head_mask": decoder_head_mask, "cross_attn_head_mask": cross_attn_head_mask, } @require_tf class _snake_case ( UpperCAmelCase_ , UpperCAmelCase_ , unittest.TestCase ): __lowerCAmelCase : Any = (TFMBartForConditionalGeneration, TFMBartModel) if is_tf_available() else () __lowerCAmelCase : List[Any] = (TFMBartForConditionalGeneration,) if is_tf_available() else () __lowerCAmelCase : Optional[Any] = ( { 'conversational': TFMBartForConditionalGeneration, 'feature-extraction': TFMBartModel, 'summarization': TFMBartForConditionalGeneration, 'text2text-generation': TFMBartForConditionalGeneration, 'translation': TFMBartForConditionalGeneration, } if is_tf_available() else {} ) __lowerCAmelCase : Optional[int] = True __lowerCAmelCase : str = False __lowerCAmelCase : Union[str, Any] = False def lowercase__ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_): '''simple docstring''' if pipeline_test_casse_name != "FeatureExtractionPipelineTests": # Exception encountered when calling layer '...' return True return False def lowercase__ ( self): '''simple docstring''' lowercase__ : Union[str, Any] = TFMBartModelTester(self) lowercase__ : Optional[int] = ConfigTester(self , config_class=SCREAMING_SNAKE_CASE_) def lowercase__ ( self): '''simple docstring''' self.config_tester.run_common_tests() def lowercase__ ( self): '''simple docstring''' lowercase__ : List[str] = self.model_tester.prepare_config_and_inputs_for_common() self.model_tester.check_decoder_model_past_large_inputs(*SCREAMING_SNAKE_CASE_) @require_sentencepiece @require_tokenizers @require_tf class _snake_case ( unittest.TestCase ): __lowerCAmelCase : Optional[Any] = [ ' UN Chief Says There Is No Military Solution in Syria', ] __lowerCAmelCase : Any = [ 'Şeful ONU declară că nu există o soluţie militară în Siria', ] __lowerCAmelCase : int = 'facebook/mbart-large-en-ro' @cached_property def lowercase__ ( self): '''simple docstring''' return AutoTokenizer.from_pretrained(self.model_name) @cached_property def lowercase__ ( self): '''simple docstring''' lowercase__ : Optional[Any] = TFAutoModelForSeqaSeqLM.from_pretrained(self.model_name) return model def lowercase__ ( self , **SCREAMING_SNAKE_CASE_): '''simple docstring''' lowercase__ : Dict = self.translate_src_text(**SCREAMING_SNAKE_CASE_) self.assertListEqual(self.expected_text , SCREAMING_SNAKE_CASE_) def lowercase__ ( self , **SCREAMING_SNAKE_CASE_): '''simple docstring''' lowercase__ : List[Any] = self.tokenizer(self.src_text , **SCREAMING_SNAKE_CASE_ , return_tensors="""tf""") lowercase__ : Any = self.model.generate( model_inputs.input_ids , attention_mask=model_inputs.attention_mask , num_beams=2) lowercase__ : Union[str, Any] = self.tokenizer.batch_decode(SCREAMING_SNAKE_CASE_ , skip_special_tokens=SCREAMING_SNAKE_CASE_) return generated_words @slow def lowercase__ ( self): '''simple docstring''' self._assert_generated_batch_equal_expected()
12
import os from shutil import copyfile from typing import Any, Dict, List, Optional, Tuple import sentencepiece as spm from ...tokenization_utils import PreTrainedTokenizer from ...utils import logging SCREAMING_SNAKE_CASE__ : str = logging.get_logger(__name__) SCREAMING_SNAKE_CASE__ : Tuple = """▁""" SCREAMING_SNAKE_CASE__ : Union[str, Any] = {"""vocab_file""": """spiece.model"""} SCREAMING_SNAKE_CASE__ : List[Any] = { """vocab_file""": { """google/reformer-crime-and-punishment""": ( """https://huggingface.co/google/reformer-crime-and-punishment/resolve/main/spiece.model""" ) } } SCREAMING_SNAKE_CASE__ : Optional[int] = { """google/reformer-crime-and-punishment""": 52_42_88, } class lowerCamelCase_ ( lowerCamelCase ): a__ = VOCAB_FILES_NAMES a__ = PRETRAINED_VOCAB_FILES_MAP a__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES a__ = ['''input_ids''', '''attention_mask'''] def __init__( self , __lowerCAmelCase , __lowerCAmelCase="</s>" , __lowerCAmelCase="<unk>" , __lowerCAmelCase=[] , __lowerCAmelCase = None , **__lowerCAmelCase , ): """simple docstring""" __magic_name__ :int = {} if sp_model_kwargs is None else sp_model_kwargs super().__init__( eos_token=__lowerCAmelCase , unk_token=__lowerCAmelCase , additional_special_tokens=__lowerCAmelCase , sp_model_kwargs=self.sp_model_kwargs , **__lowerCAmelCase , ) __magic_name__ :Optional[Any] = vocab_file __magic_name__ :int = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(__lowerCAmelCase ) @property def A ( self ): """simple docstring""" return self.sp_model.get_piece_size() def A ( self ): """simple docstring""" __magic_name__ :str = {self.convert_ids_to_tokens(__lowerCAmelCase ): i for i in range(self.vocab_size )} vocab.update(self.added_tokens_encoder ) return vocab def __getstate__( self ): """simple docstring""" __magic_name__ :Optional[Any] = self.__dict__.copy() __magic_name__ :Optional[Any] = None return state def __setstate__( self , __lowerCAmelCase ): """simple docstring""" __magic_name__ :Any = d # for backward compatibility if not hasattr(self , '''sp_model_kwargs''' ): __magic_name__ :Optional[int] = {} __magic_name__ :Union[str, Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(self.vocab_file ) def A ( self , __lowerCAmelCase ): """simple docstring""" return self.sp_model.encode(__lowerCAmelCase , out_type=__lowerCAmelCase ) def A ( self , __lowerCAmelCase ): """simple docstring""" return self.sp_model.piece_to_id(__lowerCAmelCase ) def A ( self , __lowerCAmelCase ): """simple docstring""" if index < self.sp_model.get_piece_size(): __magic_name__ :int = self.sp_model.IdToPiece(__lowerCAmelCase ) return token def A ( self , __lowerCAmelCase ): """simple docstring""" __magic_name__ :Optional[Any] = [] __magic_name__ :Tuple = '''''' for token in tokens: # make sure that special tokens are not decoded using sentencepiece model if token in self.all_special_tokens: out_string += self.sp_model.decode(__lowerCAmelCase ) + token __magic_name__ :Optional[Any] = [] else: current_sub_tokens.append(__lowerCAmelCase ) out_string += self.sp_model.decode(__lowerCAmelCase ) return out_string.strip() def A ( self , __lowerCAmelCase , __lowerCAmelCase = None ): """simple docstring""" if not os.path.isdir(__lowerCAmelCase ): logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' ) return __magic_name__ :Optional[int] = os.path.join( __lowerCAmelCase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(__lowerCAmelCase ) and os.path.isfile(self.vocab_file ): copyfile(self.vocab_file , __lowerCAmelCase ) elif not os.path.isfile(self.vocab_file ): with open(__lowerCAmelCase , '''wb''' ) as fi: __magic_name__ :Dict = self.sp_model.serialized_model_proto() fi.write(__lowerCAmelCase ) return (out_vocab_file,)
0
0
'''simple docstring''' from collections import namedtuple import requests from lxml import html # type: ignore A__ : Tuple = namedtuple("""covid_data""", """cases deaths recovered""") def UpperCAmelCase__ ( UpperCAmelCase_ : str = "https://www.worldometers.info/coronavirus/" ) -> covid_data: __lowerCamelCase : Union[str, Any] = '//div[@class = "maincounter-number"]/span/text()' return covid_data(*html.fromstring(requests.get(UpperCAmelCase_ ).content ).xpath(UpperCAmelCase_ ) ) A__ : str = """Total COVID-19 cases in the world: {} Total deaths due to COVID-19 in the world: {} Total COVID-19 patients recovered in the world: {}""" print(fmt.format(*covid_stats()))
13
import os import unittest from transformers import MobileBertTokenizer, MobileBertTokenizerFast from transformers.models.bert.tokenization_bert import ( VOCAB_FILES_NAMES, BasicTokenizer, WordpieceTokenizer, _is_control, _is_punctuation, _is_whitespace, ) from transformers.testing_utils import require_tokenizers, slow from ...test_tokenization_common import TokenizerTesterMixin, filter_non_english @require_tokenizers class lowerCamelCase_ ( lowerCamelCase , unittest.TestCase ): a__ = MobileBertTokenizer a__ = MobileBertTokenizerFast a__ = True a__ = True a__ = filter_non_english a__ = '''google/mobilebert-uncased''' def A ( self ): """simple docstring""" super().setUp() __magic_name__ :Tuple = [ '''[UNK]''', '''[CLS]''', '''[SEP]''', '''[PAD]''', '''[MASK]''', '''want''', '''##want''', '''##ed''', '''wa''', '''un''', '''runn''', '''##ing''', ''',''', '''low''', '''lowest''', ] __magic_name__ :Dict = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] ) with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer: vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) ) __magic_name__ :List[str] = [ (tokenizer_def[0], self.pre_trained_model_path, tokenizer_def[2]) # else the 'google/' prefix is stripped for tokenizer_def in self.tokenizers_list ] def A ( self , __lowerCAmelCase ): """simple docstring""" __magic_name__ :Union[str, Any] = '''UNwant\u00E9d,running''' __magic_name__ :int = '''unwanted, running''' return input_text, output_text def A ( self ): """simple docstring""" __magic_name__ :Optional[int] = self.tokenizer_class(self.vocab_file ) __magic_name__ :List[Any] = tokenizer.tokenize('''UNwant\u00E9d,running''' ) self.assertListEqual(__lowerCAmelCase , ['''un''', '''##want''', '''##ed''', ''',''', '''runn''', '''##ing'''] ) self.assertListEqual(tokenizer.convert_tokens_to_ids(__lowerCAmelCase ) , [9, 6, 7, 1_2, 1_0, 1_1] ) def A ( self ): """simple docstring""" if not self.test_rust_tokenizer: return __magic_name__ :int = self.get_tokenizer() __magic_name__ :Tuple = self.get_rust_tokenizer() __magic_name__ :List[str] = '''UNwant\u00E9d,running''' __magic_name__ :Optional[Any] = tokenizer.tokenize(__lowerCAmelCase ) __magic_name__ :List[Any] = rust_tokenizer.tokenize(__lowerCAmelCase ) self.assertListEqual(__lowerCAmelCase , __lowerCAmelCase ) __magic_name__ :int = tokenizer.encode(__lowerCAmelCase , add_special_tokens=__lowerCAmelCase ) __magic_name__ :str = rust_tokenizer.encode(__lowerCAmelCase , add_special_tokens=__lowerCAmelCase ) self.assertListEqual(__lowerCAmelCase , __lowerCAmelCase ) __magic_name__ :List[Any] = self.get_rust_tokenizer() __magic_name__ :Any = tokenizer.encode(__lowerCAmelCase ) __magic_name__ :Any = rust_tokenizer.encode(__lowerCAmelCase ) self.assertListEqual(__lowerCAmelCase , __lowerCAmelCase ) # With lower casing __magic_name__ :Any = self.get_tokenizer(do_lower_case=__lowerCAmelCase ) __magic_name__ :List[Any] = self.get_rust_tokenizer(do_lower_case=__lowerCAmelCase ) __magic_name__ :Dict = '''UNwant\u00E9d,running''' __magic_name__ :Tuple = tokenizer.tokenize(__lowerCAmelCase ) __magic_name__ :Union[str, Any] = rust_tokenizer.tokenize(__lowerCAmelCase ) self.assertListEqual(__lowerCAmelCase , __lowerCAmelCase ) __magic_name__ :Optional[Any] = tokenizer.encode(__lowerCAmelCase , add_special_tokens=__lowerCAmelCase ) __magic_name__ :Dict = rust_tokenizer.encode(__lowerCAmelCase , add_special_tokens=__lowerCAmelCase ) self.assertListEqual(__lowerCAmelCase , __lowerCAmelCase ) __magic_name__ :Tuple = self.get_rust_tokenizer() __magic_name__ :Dict = tokenizer.encode(__lowerCAmelCase ) __magic_name__ :List[Any] = rust_tokenizer.encode(__lowerCAmelCase ) self.assertListEqual(__lowerCAmelCase , __lowerCAmelCase ) def A ( self ): """simple docstring""" __magic_name__ :Optional[int] = BasicTokenizer() self.assertListEqual(tokenizer.tokenize('''ah\u535A\u63A8zz''' ) , ['''ah''', '''\u535A''', '''\u63A8''', '''zz'''] ) def A ( self ): """simple docstring""" __magic_name__ :List[Any] = BasicTokenizer(do_lower_case=__lowerCAmelCase ) self.assertListEqual( tokenizer.tokenize(''' \tHeLLo!how \n Are yoU? ''' ) , ['''hello''', '''!''', '''how''', '''are''', '''you''', '''?'''] ) self.assertListEqual(tokenizer.tokenize('''H\u00E9llo''' ) , ['''hello'''] ) def A ( self ): """simple docstring""" __magic_name__ :Union[str, Any] = BasicTokenizer(do_lower_case=__lowerCAmelCase , strip_accents=__lowerCAmelCase ) self.assertListEqual( tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''hällo''', '''!''', '''how''', '''are''', '''you''', '''?'''] ) self.assertListEqual(tokenizer.tokenize('''H\u00E9llo''' ) , ['''h\u00E9llo'''] ) def A ( self ): """simple docstring""" __magic_name__ :Dict = BasicTokenizer(do_lower_case=__lowerCAmelCase , strip_accents=__lowerCAmelCase ) self.assertListEqual( tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''hallo''', '''!''', '''how''', '''are''', '''you''', '''?'''] ) self.assertListEqual(tokenizer.tokenize('''H\u00E9llo''' ) , ['''hello'''] ) def A ( self ): """simple docstring""" __magic_name__ :Optional[int] = BasicTokenizer(do_lower_case=__lowerCAmelCase ) self.assertListEqual( tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''hallo''', '''!''', '''how''', '''are''', '''you''', '''?'''] ) self.assertListEqual(tokenizer.tokenize('''H\u00E9llo''' ) , ['''hello'''] ) def A ( self ): """simple docstring""" __magic_name__ :List[str] = BasicTokenizer(do_lower_case=__lowerCAmelCase ) self.assertListEqual( tokenizer.tokenize(''' \tHeLLo!how \n Are yoU? ''' ) , ['''HeLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?'''] ) def A ( self ): """simple docstring""" __magic_name__ :int = BasicTokenizer(do_lower_case=__lowerCAmelCase , strip_accents=__lowerCAmelCase ) self.assertListEqual( tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''HäLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?'''] ) def A ( self ): """simple docstring""" __magic_name__ :Optional[int] = BasicTokenizer(do_lower_case=__lowerCAmelCase , strip_accents=__lowerCAmelCase ) self.assertListEqual( tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''HaLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?'''] ) def A ( self ): """simple docstring""" __magic_name__ :Optional[Any] = BasicTokenizer(do_lower_case=__lowerCAmelCase , never_split=['''[UNK]'''] ) self.assertListEqual( tokenizer.tokenize(''' \tHeLLo!how \n Are yoU? [UNK]''' ) , ['''HeLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?''', '''[UNK]'''] ) def A ( self ): """simple docstring""" __magic_name__ :int = ['''[UNK]''', '''[CLS]''', '''[SEP]''', '''want''', '''##want''', '''##ed''', '''wa''', '''un''', '''runn''', '''##ing'''] __magic_name__ :Union[str, Any] = {} for i, token in enumerate(__lowerCAmelCase ): __magic_name__ :Tuple = i __magic_name__ :List[Any] = WordpieceTokenizer(vocab=__lowerCAmelCase , unk_token='''[UNK]''' ) self.assertListEqual(tokenizer.tokenize('''''' ) , [] ) self.assertListEqual(tokenizer.tokenize('''unwanted running''' ) , ['''un''', '''##want''', '''##ed''', '''runn''', '''##ing'''] ) self.assertListEqual(tokenizer.tokenize('''unwantedX running''' ) , ['''[UNK]''', '''runn''', '''##ing'''] ) def A ( self ): """simple docstring""" self.assertTrue(_is_whitespace(''' ''' ) ) self.assertTrue(_is_whitespace('''\t''' ) ) self.assertTrue(_is_whitespace('''\r''' ) ) self.assertTrue(_is_whitespace('''\n''' ) ) self.assertTrue(_is_whitespace('''\u00A0''' ) ) self.assertFalse(_is_whitespace('''A''' ) ) self.assertFalse(_is_whitespace('''-''' ) ) def A ( self ): """simple docstring""" self.assertTrue(_is_control('''\u0005''' ) ) self.assertFalse(_is_control('''A''' ) ) self.assertFalse(_is_control(''' ''' ) ) self.assertFalse(_is_control('''\t''' ) ) self.assertFalse(_is_control('''\r''' ) ) def A ( self ): """simple docstring""" self.assertTrue(_is_punctuation('''-''' ) ) self.assertTrue(_is_punctuation('''$''' ) ) self.assertTrue(_is_punctuation('''`''' ) ) self.assertTrue(_is_punctuation('''.''' ) ) self.assertFalse(_is_punctuation('''A''' ) ) self.assertFalse(_is_punctuation(''' ''' ) ) def A ( self ): """simple docstring""" __magic_name__ :Any = self.get_tokenizer() __magic_name__ :Any = self.get_rust_tokenizer() # Example taken from the issue https://github.com/huggingface/tokenizers/issues/340 self.assertListEqual([tokenizer.tokenize(__lowerCAmelCase ) for t in ['''Test''', '''\xad''', '''test''']] , [['''[UNK]'''], [], ['''[UNK]''']] ) self.assertListEqual( [rust_tokenizer.tokenize(__lowerCAmelCase ) for t in ['''Test''', '''\xad''', '''test''']] , [['''[UNK]'''], [], ['''[UNK]''']] ) @slow def A ( self ): """simple docstring""" __magic_name__ :Optional[int] = self.tokenizer_class.from_pretrained('''google/mobilebert-uncased''' ) __magic_name__ :Optional[int] = tokenizer.encode('''sequence builders''' , add_special_tokens=__lowerCAmelCase ) __magic_name__ :List[Any] = tokenizer.encode('''multi-sequence build''' , add_special_tokens=__lowerCAmelCase ) __magic_name__ :Union[str, Any] = tokenizer.build_inputs_with_special_tokens(__lowerCAmelCase ) __magic_name__ :List[Any] = tokenizer.build_inputs_with_special_tokens(__lowerCAmelCase , __lowerCAmelCase ) assert encoded_sentence == [1_0_1] + text + [1_0_2] assert encoded_pair == [1_0_1] + text + [1_0_2] + text_a + [1_0_2] def A ( self ): """simple docstring""" for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})''' ): __magic_name__ :Optional[Any] = self.rust_tokenizer_class.from_pretrained(__lowerCAmelCase , **__lowerCAmelCase ) __magic_name__ :Optional[int] = F'''A, naïve {tokenizer_r.mask_token} AllenNLP sentence.''' __magic_name__ :Optional[Any] = tokenizer_r.encode_plus( __lowerCAmelCase , return_attention_mask=__lowerCAmelCase , return_token_type_ids=__lowerCAmelCase , return_offsets_mapping=__lowerCAmelCase , add_special_tokens=__lowerCAmelCase , ) __magic_name__ :Any = tokenizer_r.do_lower_case if hasattr(__lowerCAmelCase , '''do_lower_case''' ) else False __magic_name__ :Optional[int] = ( [ ((0, 0), tokenizer_r.cls_token), ((0, 1), '''A'''), ((1, 2), ''','''), ((3, 5), '''na'''), ((5, 6), '''##ï'''), ((6, 8), '''##ve'''), ((9, 1_5), tokenizer_r.mask_token), ((1_6, 2_1), '''Allen'''), ((2_1, 2_3), '''##NL'''), ((2_3, 2_4), '''##P'''), ((2_5, 3_3), '''sentence'''), ((3_3, 3_4), '''.'''), ((0, 0), tokenizer_r.sep_token), ] if not do_lower_case else [ ((0, 0), tokenizer_r.cls_token), ((0, 1), '''a'''), ((1, 2), ''','''), ((3, 8), '''naive'''), ((9, 1_5), tokenizer_r.mask_token), ((1_6, 2_1), '''allen'''), ((2_1, 2_3), '''##nl'''), ((2_3, 2_4), '''##p'''), ((2_5, 3_3), '''sentence'''), ((3_3, 3_4), '''.'''), ((0, 0), tokenizer_r.sep_token), ] ) self.assertEqual( [e[1] for e in expected_results] , tokenizer_r.convert_ids_to_tokens(tokens['''input_ids'''] ) ) self.assertEqual([e[0] for e in expected_results] , tokens['''offset_mapping'''] ) def A ( self ): """simple docstring""" __magic_name__ :Dict = ['''的''', '''人''', '''有'''] __magic_name__ :Any = ''''''.join(__lowerCAmelCase ) for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})''' ): __magic_name__ :Optional[Any] = True __magic_name__ :Optional[int] = self.tokenizer_class.from_pretrained(__lowerCAmelCase , **__lowerCAmelCase ) __magic_name__ :Tuple = self.rust_tokenizer_class.from_pretrained(__lowerCAmelCase , **__lowerCAmelCase ) __magic_name__ :Dict = tokenizer_p.encode(__lowerCAmelCase , add_special_tokens=__lowerCAmelCase ) __magic_name__ :List[str] = tokenizer_r.encode(__lowerCAmelCase , add_special_tokens=__lowerCAmelCase ) __magic_name__ :Dict = tokenizer_r.convert_ids_to_tokens(__lowerCAmelCase ) __magic_name__ :Union[str, Any] = tokenizer_p.convert_ids_to_tokens(__lowerCAmelCase ) # it is expected that each Chinese character is not preceded by "##" self.assertListEqual(__lowerCAmelCase , __lowerCAmelCase ) self.assertListEqual(__lowerCAmelCase , __lowerCAmelCase ) __magic_name__ :List[str] = False __magic_name__ :Tuple = self.rust_tokenizer_class.from_pretrained(__lowerCAmelCase , **__lowerCAmelCase ) __magic_name__ :List[str] = self.tokenizer_class.from_pretrained(__lowerCAmelCase , **__lowerCAmelCase ) __magic_name__ :Optional[Any] = tokenizer_r.encode(__lowerCAmelCase , add_special_tokens=__lowerCAmelCase ) __magic_name__ :Union[str, Any] = tokenizer_p.encode(__lowerCAmelCase , add_special_tokens=__lowerCAmelCase ) __magic_name__ :List[str] = tokenizer_r.convert_ids_to_tokens(__lowerCAmelCase ) __magic_name__ :Optional[int] = tokenizer_p.convert_ids_to_tokens(__lowerCAmelCase ) # it is expected that only the first Chinese character is not preceded by "##". __magic_name__ :Dict = [ F'''##{token}''' if idx != 0 else token for idx, token in enumerate(__lowerCAmelCase ) ] self.assertListEqual(__lowerCAmelCase , __lowerCAmelCase ) self.assertListEqual(__lowerCAmelCase , __lowerCAmelCase )
0
0
import tempfile import torch from diffusers import ( DEISMultistepScheduler, DPMSolverMultistepScheduler, DPMSolverSinglestepScheduler, UniPCMultistepScheduler, ) from .test_schedulers import SchedulerCommonTest class UpperCAmelCase_ ( __lowercase ): """simple docstring""" UpperCAmelCase__ : Union[str, Any] = (DPMSolverSinglestepScheduler,) UpperCAmelCase__ : Any = (("num_inference_steps", 25),) def __lowercase ( self , **_a ) -> List[str]: _a : List[str] = { '''num_train_timesteps''': 1_0_0_0, '''beta_start''': 0.0001, '''beta_end''': 0.02, '''beta_schedule''': '''linear''', '''solver_order''': 2, '''prediction_type''': '''epsilon''', '''thresholding''': False, '''sample_max_value''': 1.0, '''algorithm_type''': '''dpmsolver++''', '''solver_type''': '''midpoint''', '''lambda_min_clipped''': -float('''inf''' ), '''variance_type''': None, } config.update(**_a ) return config def __lowercase ( self , _a=0 , **_a ) -> Any: _a : List[str] = dict(self.forward_default_kwargs ) _a : Optional[Any] = kwargs.pop('''num_inference_steps''' , _a ) _a : Dict = self.dummy_sample _a : Any = 0.1 * sample _a : Optional[Any] = [residual + 0.2, residual + 0.15, residual + 0.10] for scheduler_class in self.scheduler_classes: _a : List[str] = self.get_scheduler_config(**_a ) _a : str = scheduler_class(**_a ) scheduler.set_timesteps(_a ) # copy over dummy past residuals _a : Tuple = dummy_past_residuals[: scheduler.config.solver_order] with tempfile.TemporaryDirectory() as tmpdirname: scheduler.save_config(_a ) _a : List[str] = scheduler_class.from_pretrained(_a ) new_scheduler.set_timesteps(_a ) # copy over dummy past residuals _a : int = dummy_past_residuals[: new_scheduler.config.solver_order] _a , _a : Any = sample, sample for t in range(_a , time_step + scheduler.config.solver_order + 1 ): _a : Dict = scheduler.step(_a , _a , _a , **_a ).prev_sample _a : List[str] = new_scheduler.step(_a , _a , _a , **_a ).prev_sample assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical" def __lowercase ( self ) -> Dict: pass def __lowercase ( self , _a=0 , **_a ) -> List[Any]: _a : List[str] = dict(self.forward_default_kwargs ) _a : Union[str, Any] = kwargs.pop('''num_inference_steps''' , _a ) _a : Union[str, Any] = self.dummy_sample _a : List[str] = 0.1 * sample _a : Optional[Any] = [residual + 0.2, residual + 0.15, residual + 0.10] for scheduler_class in self.scheduler_classes: _a : int = self.get_scheduler_config() _a : Optional[int] = scheduler_class(**_a ) scheduler.set_timesteps(_a ) # copy over dummy past residuals (must be after setting timesteps) _a : str = dummy_past_residuals[: scheduler.config.solver_order] with tempfile.TemporaryDirectory() as tmpdirname: scheduler.save_config(_a ) _a : str = scheduler_class.from_pretrained(_a ) # copy over dummy past residuals new_scheduler.set_timesteps(_a ) # copy over dummy past residual (must be after setting timesteps) _a : Optional[Any] = dummy_past_residuals[: new_scheduler.config.solver_order] _a : int = scheduler.step(_a , _a , _a , **_a ).prev_sample _a : int = new_scheduler.step(_a , _a , _a , **_a ).prev_sample assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical" def __lowercase ( self , _a=None , **_a ) -> Dict: if scheduler is None: _a : Optional[Any] = self.scheduler_classes[0] _a : List[str] = self.get_scheduler_config(**_a ) _a : Union[str, Any] = scheduler_class(**_a ) _a : int = self.scheduler_classes[0] _a : List[Any] = self.get_scheduler_config(**_a ) _a : Union[str, Any] = scheduler_class(**_a ) _a : Optional[Any] = 1_0 _a : List[Any] = self.dummy_model() _a : Union[str, Any] = self.dummy_sample_deter scheduler.set_timesteps(_a ) for i, t in enumerate(scheduler.timesteps ): _a : Any = model(_a , _a ) _a : Dict = scheduler.step(_a , _a , _a ).prev_sample return sample def __lowercase ( self ) -> Dict: _a : Optional[int] = DPMSolverSinglestepScheduler(**self.get_scheduler_config() ) _a : Union[str, Any] = 5_0 _a : Any = self.dummy_model() _a : Tuple = self.dummy_sample_deter scheduler.set_timesteps(_a ) # make sure that the first t is uneven for i, t in enumerate(scheduler.timesteps[3:] ): _a : List[Any] = model(_a , _a ) _a : Optional[int] = scheduler.step(_a , _a , _a ).prev_sample _a : Union[str, Any] = torch.mean(torch.abs(_a ) ) assert abs(result_mean.item() - 0.2574 ) < 1e-3 def __lowercase ( self ) -> int: for timesteps in [2_5, 5_0, 1_0_0, 9_9_9, 1_0_0_0]: self.check_over_configs(num_train_timesteps=_a ) def __lowercase ( self ) -> List[Any]: # make sure that iterating over schedulers with same config names gives same results # for defaults _a : Dict = DPMSolverSinglestepScheduler(**self.get_scheduler_config() ) _a : Dict = self.full_loop(scheduler=_a ) _a : Optional[Any] = torch.mean(torch.abs(_a ) ) assert abs(result_mean.item() - 0.2791 ) < 1e-3 _a : int = DEISMultistepScheduler.from_config(scheduler.config ) _a : Union[str, Any] = DPMSolverMultistepScheduler.from_config(scheduler.config ) _a : List[Any] = UniPCMultistepScheduler.from_config(scheduler.config ) _a : Union[str, Any] = DPMSolverSinglestepScheduler.from_config(scheduler.config ) _a : Optional[int] = self.full_loop(scheduler=_a ) _a : List[str] = torch.mean(torch.abs(_a ) ) assert abs(result_mean.item() - 0.2791 ) < 1e-3 def __lowercase ( self ) -> Union[str, Any]: self.check_over_configs(thresholding=_a ) for order in [1, 2, 3]: for solver_type in ["midpoint", "heun"]: for threshold in [0.5, 1.0, 2.0]: for prediction_type in ["epsilon", "sample"]: self.check_over_configs( thresholding=_a , prediction_type=_a , sample_max_value=_a , algorithm_type='''dpmsolver++''' , solver_order=_a , solver_type=_a , ) def __lowercase ( self ) -> Tuple: for prediction_type in ["epsilon", "v_prediction"]: self.check_over_configs(prediction_type=_a ) def __lowercase ( self ) -> Dict: for algorithm_type in ["dpmsolver", "dpmsolver++"]: for solver_type in ["midpoint", "heun"]: for order in [1, 2, 3]: for prediction_type in ["epsilon", "sample"]: self.check_over_configs( solver_order=_a , solver_type=_a , prediction_type=_a , algorithm_type=_a , ) _a : List[Any] = self.full_loop( solver_order=_a , solver_type=_a , prediction_type=_a , algorithm_type=_a , ) assert not torch.isnan(_a ).any(), "Samples have nan numbers" def __lowercase ( self ) -> List[Any]: self.check_over_configs(lower_order_final=_a ) self.check_over_configs(lower_order_final=_a ) def __lowercase ( self ) -> List[Any]: self.check_over_configs(lambda_min_clipped=-float('''inf''' ) ) self.check_over_configs(lambda_min_clipped=-5.1 ) def __lowercase ( self ) -> int: self.check_over_configs(variance_type=_a ) self.check_over_configs(variance_type='''learned_range''' ) def __lowercase ( self ) -> Optional[Any]: for num_inference_steps in [1, 2, 3, 5, 1_0, 5_0, 1_0_0, 9_9_9, 1_0_0_0]: self.check_over_forward(num_inference_steps=_a , time_step=0 ) def __lowercase ( self ) -> Optional[Any]: _a : Any = self.full_loop() _a : Optional[Any] = torch.mean(torch.abs(_a ) ) assert abs(result_mean.item() - 0.2791 ) < 1e-3 def __lowercase ( self ) -> Optional[int]: _a : Optional[Any] = self.full_loop(use_karras_sigmas=_a ) _a : int = torch.mean(torch.abs(_a ) ) assert abs(result_mean.item() - 0.2248 ) < 1e-3 def __lowercase ( self ) -> Optional[Any]: _a : Dict = self.full_loop(prediction_type='''v_prediction''' ) _a : Optional[int] = torch.mean(torch.abs(_a ) ) assert abs(result_mean.item() - 0.1453 ) < 1e-3 def __lowercase ( self ) -> str: _a : List[str] = self.full_loop(prediction_type='''v_prediction''' , use_karras_sigmas=_a ) _a : Tuple = torch.mean(torch.abs(_a ) ) assert abs(result_mean.item() - 0.0649 ) < 1e-3 def __lowercase ( self ) -> List[str]: _a : Union[str, Any] = self.scheduler_classes[0] _a : Dict = self.get_scheduler_config(thresholding=_a , dynamic_thresholding_ratio=0 ) _a : str = scheduler_class(**_a ) _a : Dict = 1_0 _a : Optional[Any] = self.dummy_model() _a : List[str] = self.dummy_sample_deter.half() scheduler.set_timesteps(_a ) for i, t in enumerate(scheduler.timesteps ): _a : Any = model(_a , _a ) _a : int = scheduler.step(_a , _a , _a ).prev_sample assert sample.dtype == torch.floataa
14
import logging import os import quant_trainer import torch from torch.utils.data import DataLoader from transformers import Trainer, is_torch_tpu_available from transformers.trainer_utils import PredictionOutput SCREAMING_SNAKE_CASE__ : List[str] = logging.getLogger(__name__) if is_torch_tpu_available(check_device=False): import torch_xla.core.xla_model as xm import torch_xla.debug.metrics as met class lowerCamelCase_ ( lowerCamelCase ): def __init__( self , *__lowerCAmelCase , __lowerCAmelCase=None , __lowerCAmelCase=None , __lowerCAmelCase=None , **__lowerCAmelCase ): """simple docstring""" super().__init__(*__lowerCAmelCase , **__lowerCAmelCase ) __magic_name__ :Any = eval_examples __magic_name__ :str = post_process_function __magic_name__ :int = quant_trainer_args __magic_name__ :List[str] = 1_2_8 # default number of calibration samples def A ( self , __lowerCAmelCase=None ): """simple docstring""" if calib_dataset is None and self.calib_dataset is None: raise ValueError('''Trainer: calibration requires an calib_dataset.''' ) __magic_name__ :Optional[Any] = calib_dataset if calib_dataset is not None else self.calib_dataset __magic_name__ :Optional[int] = self._remove_unused_columns(__lowerCAmelCase , description='''Calibration''' ) return DataLoader( __lowerCAmelCase , batch_size=self.args.eval_batch_size , collate_fn=self.data_collator , drop_last=self.args.dataloader_drop_last , num_workers=self.args.dataloader_num_workers , pin_memory=self.args.dataloader_pin_memory , shuffle=__lowerCAmelCase , ) def A ( self , __lowerCAmelCase=None ): """simple docstring""" __magic_name__ :Dict = self.train_dataset if calib_dataset is None else calib_dataset __magic_name__ :Any = self.get_calib_dataloader(__lowerCAmelCase ) __magic_name__ :List[str] = self.model quant_trainer.configure_model(__lowerCAmelCase , self.quant_trainer_args , calib=__lowerCAmelCase ) model.eval() quant_trainer.enable_calibration(__lowerCAmelCase ) logger.info('''***** Running calibration *****''' ) logger.info(F''' Num examples = {self.calib_num}''' ) logger.info(F''' Batch size = {calib_dataloader.batch_size}''' ) for step, inputs in enumerate(__lowerCAmelCase ): # Prediction step __magic_name__ , __magic_name__ , __magic_name__ :str = self.prediction_step(__lowerCAmelCase , __lowerCAmelCase , prediction_loss_only=__lowerCAmelCase ) if (step + 1) * calib_dataloader.batch_size >= self.calib_num: break quant_trainer.finish_calibration(__lowerCAmelCase , self.quant_trainer_args ) __magic_name__ :Any = model def A ( self , __lowerCAmelCase=None , __lowerCAmelCase=None , __lowerCAmelCase=None , __lowerCAmelCase = "eval" ): """simple docstring""" __magic_name__ :Tuple = self.eval_dataset if eval_dataset is None else eval_dataset __magic_name__ :Optional[Any] = self.get_eval_dataloader(__lowerCAmelCase ) __magic_name__ :str = self.eval_examples if eval_examples is None else eval_examples # Temporarily disable metric computation, we will do it in the loop here. __magic_name__ :Any = self.compute_metrics __magic_name__ :List[Any] = None __magic_name__ :List[str] = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop try: __magic_name__ :Optional[Any] = eval_loop( __lowerCAmelCase , description='''Evaluation''' , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=__lowerCAmelCase , ) finally: __magic_name__ :Union[str, Any] = compute_metrics if self.post_process_function is not None and self.compute_metrics is not None: __magic_name__ :Union[str, Any] = self.post_process_function(__lowerCAmelCase , __lowerCAmelCase , output.predictions ) __magic_name__ :int = self.compute_metrics(__lowerCAmelCase ) # Prefix all keys with metric_key_prefix + '_' for key in list(metrics.keys() ): if not key.startswith(F'''{metric_key_prefix}_''' ): __magic_name__ :Dict = metrics.pop(__lowerCAmelCase ) self.log(__lowerCAmelCase ) else: __magic_name__ :List[str] = {} if self.args.tpu_metrics_debug or self.args.debug: # tpu-comment: Logging debug metrics for PyTorch/XLA (compile, execute times, ops, etc.) xm.master_print(met.metrics_report() ) __magic_name__ :Optional[Any] = self.callback_handler.on_evaluate(self.args , self.state , self.control , __lowerCAmelCase ) return metrics def A ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase=None , __lowerCAmelCase = "test" ): """simple docstring""" __magic_name__ :int = self.get_test_dataloader(__lowerCAmelCase ) # Temporarily disable metric computation, we will do it in the loop here. __magic_name__ :Dict = self.compute_metrics __magic_name__ :str = None __magic_name__ :Optional[int] = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop try: __magic_name__ :int = eval_loop( __lowerCAmelCase , description='''Prediction''' , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=__lowerCAmelCase , ) finally: __magic_name__ :List[Any] = compute_metrics if self.post_process_function is None or self.compute_metrics is None: return output __magic_name__ :Optional[Any] = self.post_process_function(__lowerCAmelCase , __lowerCAmelCase , output.predictions , '''predict''' ) __magic_name__ :Dict = self.compute_metrics(__lowerCAmelCase ) # Prefix all keys with metric_key_prefix + '_' for key in list(metrics.keys() ): if not key.startswith(F'''{metric_key_prefix}_''' ): __magic_name__ :List[str] = metrics.pop(__lowerCAmelCase ) return PredictionOutput(predictions=predictions.predictions , label_ids=predictions.label_ids , metrics=__lowerCAmelCase ) def A ( self , __lowerCAmelCase="./" ): """simple docstring""" __magic_name__ :List[Any] = self.eval_dataset __magic_name__ :Any = self.get_eval_dataloader(__lowerCAmelCase ) __magic_name__ :int = next(iter(__lowerCAmelCase ) ) # saving device - to make it consistent __magic_name__ :str = torch.device('''cuda''' if torch.cuda.is_available() else '''cpu''' ) # convert to tuple __magic_name__ :int = tuple(v.to(__lowerCAmelCase ) for k, v in batch.items() ) logger.info('''Converting model to be onnx compatible''' ) from pytorch_quantization.nn import TensorQuantizer __magic_name__ :Any = True __magic_name__ :Optional[int] = self.model.to(__lowerCAmelCase ) model.eval() model.float() __magic_name__ :Any = model.module if hasattr(__lowerCAmelCase , '''module''' ) else model quant_trainer.configure_model(__lowerCAmelCase , self.quant_trainer_args ) __magic_name__ :int = os.path.join(__lowerCAmelCase , '''model.onnx''' ) logger.info(F'''exporting model to {output_model_file}''' ) __magic_name__ :Dict = {0: '''batch_size''', 1: '''seq_len'''} torch.onnx.export( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , export_params=__lowerCAmelCase , opset_version=1_3 , do_constant_folding=__lowerCAmelCase , input_names=['''input_ids''', '''attention_mask''', '''token_type_ids'''] , output_names=['''output_start_logits''', '''output_end_logits'''] , dynamic_axes={ '''input_ids''': axes, '''attention_mask''': axes, '''token_type_ids''': axes, '''output_start_logits''': axes, '''output_end_logits''': axes, } , verbose=__lowerCAmelCase , ) logger.info('''onnx export finished''' )
0
0
import copy import re class A : '''simple docstring''' A__ = '''hp''' A__ = {} A__ = None @classmethod def lowerCamelCase__ (cls : str , _UpperCAmelCase : Any , _UpperCAmelCase : List[str] ) -> Tuple: """simple docstring""" lowercase__ = prefix lowercase__ = defaults cls.build_naming_info() @staticmethod def lowerCamelCase__ (_UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : int ) -> Tuple: """simple docstring""" if len(_UpperCAmelCase ) == 0: return "" lowercase__ = None if any(char.isdigit() for char in word ): raise Exception(f'''Parameters should not contain numbers: \'{word}\' contains a number''' ) if word in info["short_word"]: return info["short_word"][word] for prefix_len in range(1 , len(_UpperCAmelCase ) + 1 ): lowercase__ = word[:prefix_len] if prefix in info["reverse_short_word"]: continue else: lowercase__ = prefix break if short_word is None: # Paranoid fallback def int_to_alphabetic(_UpperCAmelCase : Union[str, Any] ): lowercase__ = """""" while integer != 0: lowercase__ = chr(ord("""A""" ) + integer % 10 ) + s integer //= 10 return s lowercase__ = 0 while True: lowercase__ = word + """#""" + int_to_alphabetic(_UpperCAmelCase ) if sword in info["reverse_short_word"]: continue else: lowercase__ = sword break lowercase__ = short_word lowercase__ = word return short_word @staticmethod def lowerCamelCase__ (_UpperCAmelCase : Dict , _UpperCAmelCase : Optional[int] ) -> Union[str, Any]: """simple docstring""" lowercase__ = param_name.split("""_""" ) lowercase__ = [TrialShortNamer.shortname_for_word(_UpperCAmelCase , _UpperCAmelCase ) for word in words] # We try to create a separatorless short name, but if there is a collision we have to fallback # to a separated short name lowercase__ = ["""""", """_"""] for separator in separators: lowercase__ = separator.join(_UpperCAmelCase ) if shortname not in info["reverse_short_param"]: lowercase__ = shortname lowercase__ = param_name return shortname return param_name @staticmethod def lowerCamelCase__ (_UpperCAmelCase : Optional[Any] , _UpperCAmelCase : Optional[int] ) -> int: """simple docstring""" lowercase__ = TrialShortNamer.shortname_for_key(_UpperCAmelCase , _UpperCAmelCase ) lowercase__ = short_name lowercase__ = param_name @classmethod def lowerCamelCase__ (cls : Union[str, Any] ) -> Tuple: """simple docstring""" if cls.NAMING_INFO is not None: return lowercase__ = { """short_word""": {}, """reverse_short_word""": {}, """short_param""": {}, """reverse_short_param""": {}, } lowercase__ = list(cls.DEFAULTS.keys() ) for k in field_keys: cls.add_new_param_name(_UpperCAmelCase , _UpperCAmelCase ) lowercase__ = info @classmethod def lowerCamelCase__ (cls : str , _UpperCAmelCase : Tuple ) -> List[str]: """simple docstring""" cls.build_naming_info() assert cls.PREFIX is not None lowercase__ = [copy.copy(cls.PREFIX )] for k, v in params.items(): if k not in cls.DEFAULTS: raise Exception(f'''You should provide a default value for the param name {k} with value {v}''' ) if v == cls.DEFAULTS[k]: # The default value is not added to the name continue lowercase__ = cls.NAMING_INFO["""short_param"""][k] if isinstance(_UpperCAmelCase , _UpperCAmelCase ): lowercase__ = 1 if v else 0 lowercase__ = """""" if isinstance(_UpperCAmelCase , (int, float) ) else """-""" lowercase__ = f'''{key}{sep}{v}''' name.append(_UpperCAmelCase ) return "_".join(_UpperCAmelCase ) @classmethod def lowerCamelCase__ (cls : Union[str, Any] , _UpperCAmelCase : Union[str, Any] ) -> Union[str, Any]: """simple docstring""" lowercase__ = repr[len(cls.PREFIX ) + 1 :] if repr == "": lowercase__ = [] else: lowercase__ = repr.split("""_""" ) lowercase__ = {} for value in values: if "-" in value: lowercase__ , lowercase__ = value.split("""-""" ) else: lowercase__ = re.sub("""[0-9.]""" , """""" , _UpperCAmelCase ) lowercase__ = float(re.sub("""[^0-9.]""" , """""" , _UpperCAmelCase ) ) lowercase__ = cls.NAMING_INFO["""reverse_short_param"""][p_k] lowercase__ = p_v for k in cls.DEFAULTS: if k not in parameters: lowercase__ = cls.DEFAULTS[k] return parameters
15
def __lowercase ( snake_case ): """simple docstring""" return "".join([hex(snake_case )[2:].zfill(2 ).upper() for byte in list(snake_case )] ) def __lowercase ( snake_case ): """simple docstring""" if (len(snake_case ) % 2) != 0: raise ValueError( '''Base16 encoded data is invalid: Data does not have an even number of hex digits.''' ) # Check the character set - the standard base16 alphabet # is uppercase according to RFC3548 section 6 if not set(snake_case ) <= set('''0123456789ABCDEF''' ): raise ValueError( '''Base16 encoded data is invalid: Data is not uppercase hex or it contains invalid characters.''' ) # For every two hexadecimal digits (= a byte), turn it into an integer. # Then, string the result together into bytes, and return it. return bytes(int(data[i] + data[i + 1], 1_6 ) for i in range(0, len(snake_case ), 2 ) ) if __name__ == "__main__": import doctest doctest.testmod()
0
0
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available __A : Optional[Any] = { 'configuration_swinv2': ['SWINV2_PRETRAINED_CONFIG_ARCHIVE_MAP', 'Swinv2Config'], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __A : Union[str, Any] = [ 'SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST', 'Swinv2ForImageClassification', 'Swinv2ForMaskedImageModeling', 'Swinv2Model', 'Swinv2PreTrainedModel', ] if TYPE_CHECKING: from .configuration_swinva import SWINV2_PRETRAINED_CONFIG_ARCHIVE_MAP, SwinvaConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_swinva import ( SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST, SwinvaForImageClassification, SwinvaForMaskedImageModeling, SwinvaModel, SwinvaPreTrainedModel, ) else: import sys __A : Dict = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
16
import pytest import requests from datasets.utils.file_utils import http_head from .utils import OfflineSimulationMode, RequestWouldHangIndefinitelyError, offline @pytest.mark.integration def __lowercase ( ): """simple docstring""" with offline(OfflineSimulationMode.CONNECTION_TIMES_OUT ): with pytest.raises(snake_case ): requests.request('''GET''', '''https://huggingface.co''' ) with pytest.raises(requests.exceptions.ConnectTimeout ): requests.request('''GET''', '''https://huggingface.co''', timeout=1.0 ) @pytest.mark.integration def __lowercase ( ): """simple docstring""" with offline(OfflineSimulationMode.CONNECTION_FAILS ): with pytest.raises(requests.exceptions.ConnectionError ): requests.request('''GET''', '''https://huggingface.co''' ) def __lowercase ( ): """simple docstring""" with offline(OfflineSimulationMode.HF_DATASETS_OFFLINE_SET_TO_1 ): with pytest.raises(snake_case ): http_head('''https://huggingface.co''' )
0
0
import math def __SCREAMING_SNAKE_CASE ( a__ : list ,a__ : int = 0 ,a__ : int = 0 ) -> list: __A : Optional[int] = end or len(a__ ) for i in range(a__ ,a__ ): __A : List[Any] = i __A : Optional[int] = array[i] while temp_index != start and temp_index_value < array[temp_index - 1]: __A : Union[str, Any] = array[temp_index - 1] temp_index -= 1 __A : List[Any] = temp_index_value return array def __SCREAMING_SNAKE_CASE ( a__ : list ,a__ : int ,a__ : int ) -> None: # Max Heap __A : Optional[int] = index __A : List[Any] = 2 * index + 1 # Left Node __A : Optional[Any] = 2 * index + 2 # Right Node if left_index < heap_size and array[largest] < array[left_index]: __A : str = left_index if right_index < heap_size and array[largest] < array[right_index]: __A : Any = right_index if largest != index: __A , __A : Optional[Any] = array[largest], array[index] heapify(a__ ,a__ ,a__ ) def __SCREAMING_SNAKE_CASE ( a__ : list ) -> list: __A : Tuple = len(a__ ) for i in range(n // 2 ,-1 ,-1 ): heapify(a__ ,a__ ,a__ ) for i in range(n - 1 ,0 ,-1 ): __A , __A : Any = array[0], array[i] heapify(a__ ,0 ,a__ ) return array def __SCREAMING_SNAKE_CASE ( a__ : list ,a__ : int ,a__ : int ,a__ : int ) -> int: if (array[first_index] > array[middle_index]) != ( array[first_index] > array[last_index] ): return array[first_index] elif (array[middle_index] > array[first_index]) != ( array[middle_index] > array[last_index] ): return array[middle_index] else: return array[last_index] def __SCREAMING_SNAKE_CASE ( a__ : list ,a__ : int ,a__ : int ,a__ : int ) -> int: __A : Tuple = low __A : str = high while True: while array[i] < pivot: i += 1 j -= 1 while pivot < array[j]: j -= 1 if i >= j: return i __A , __A : int = array[j], array[i] i += 1 def __SCREAMING_SNAKE_CASE ( a__ : list ) -> list: if len(a__ ) == 0: return array __A : int = 2 * math.ceil(math.loga(len(a__ ) ) ) __A : int = 16 return intro_sort(a__ ,0 ,len(a__ ) ,a__ ,a__ ) def __SCREAMING_SNAKE_CASE ( a__ : list ,a__ : int ,a__ : int ,a__ : int ,a__ : int ) -> list: while end - start > size_threshold: if max_depth == 0: return heap_sort(a__ ) max_depth -= 1 __A : Union[str, Any] = median_of_a(a__ ,a__ ,start + ((end - start) // 2) + 1 ,end - 1 ) __A : List[Any] = partition(a__ ,a__ ,a__ ,a__ ) intro_sort(a__ ,a__ ,a__ ,a__ ,a__ ) __A : Optional[int] = p return insertion_sort(a__ ,a__ ,a__ ) if __name__ == "__main__": import doctest doctest.testmod() UpperCAmelCase_ : int = input('''Enter numbers separated by a comma : ''').strip() UpperCAmelCase_ : Any = [float(item) for item in user_input.split(''',''')] print(sort(unsorted))
17
import math from collections.abc import Iterator from itertools import takewhile def __lowercase ( snake_case ): """simple docstring""" if 1 < number < 4: # 2 and 3 are primes return True elif number < 2 or number % 2 == 0 or number % 3 == 0: # Negatives, 0, 1, all even numbers, all multiples of 3 are not primes return False # All primes number are in format of 6k +/- 1 for i in range(5, int(math.sqrt(snake_case ) + 1 ), 6 ): if number % i == 0 or number % (i + 2) == 0: return False return True def __lowercase ( ): """simple docstring""" __magic_name__ :str = 2 while True: if is_prime(snake_case ): yield num num += 1 def __lowercase ( snake_case = 2_0_0_0_0_0_0 ): """simple docstring""" return sum(takewhile(lambda snake_case : x < n, prime_generator() ) ) if __name__ == "__main__": print(f"{solution() = }")
0
0
'''simple docstring''' import argparse import json import os import re import torch from transformers import BloomConfig, BloomModel from transformers.file_utils import CONFIG_NAME, WEIGHTS_NAME from transformers.utils import logging logging.set_verbosity_info() _SCREAMING_SNAKE_CASE = [ "word_embeddings_layernorm.weight", "word_embeddings_layernorm.bias", "input_layernorm.weight", "input_layernorm.bias", "post_attention_layernorm.weight", "post_attention_layernorm.bias", "self_attention.dense.bias", "mlp.dense_4h_to_h.bias", "ln_f.weight", "ln_f.bias", ] _SCREAMING_SNAKE_CASE = [ "mlp.dense_4h_to_h.weight", "self_attention.dense.weight", ] def __a(SCREAMING_SNAKE_CASE_ : List[Any] , SCREAMING_SNAKE_CASE_ : Any ): '''simple docstring''' _lowerCAmelCase = { "word_embeddings.weight": "word_embeddings.weight", "word_embeddings.norm.weight": "word_embeddings_layernorm.weight", "word_embeddings.norm.bias": "word_embeddings_layernorm.bias", "weight": "ln_f.weight", "bias": "ln_f.bias", } if key in layer_rename_map: return layer_rename_map[key] # Handle transformer blocks _lowerCAmelCase = int(re.match(R".*layer_(\d*).*" , SCREAMING_SNAKE_CASE_ )[1] ) layer_number -= 3 return F'''h.{layer_number}.''' + key def __a(SCREAMING_SNAKE_CASE_ : Dict ): '''simple docstring''' if dtype == torch.bool: return 1 / 8 _lowerCAmelCase = re.search(R"[^\d](\d+)$" , str(SCREAMING_SNAKE_CASE_ ) ) if bit_search is None: raise ValueError(F'''`dtype` is not a valid dtype: {dtype}.''' ) _lowerCAmelCase = int(bit_search.groups()[0] ) return bit_size // 8 def __a(SCREAMING_SNAKE_CASE_ : Optional[Any] , SCREAMING_SNAKE_CASE_ : Dict , SCREAMING_SNAKE_CASE_ : Union[str, Any] , SCREAMING_SNAKE_CASE_ : Optional[int] , SCREAMING_SNAKE_CASE_ : List[Any] ): '''simple docstring''' if bloom_config_file == "": _lowerCAmelCase = BloomConfig() else: _lowerCAmelCase = BloomConfig.from_json_file(SCREAMING_SNAKE_CASE_ ) if shard_model: _lowerCAmelCase = os.listdir(SCREAMING_SNAKE_CASE_ ) _lowerCAmelCase = sorted(filter(lambda SCREAMING_SNAKE_CASE_ : s.startswith("layer" ) and "model_00" in s , SCREAMING_SNAKE_CASE_ ) ) _lowerCAmelCase = {"weight_map": {}, "metadata": {}} _lowerCAmelCase = 0 _lowerCAmelCase = None _lowerCAmelCase = BloomConfig() for j, file in enumerate(SCREAMING_SNAKE_CASE_ ): print("Processing file: {}".format(SCREAMING_SNAKE_CASE_ ) ) _lowerCAmelCase = None for i in range(SCREAMING_SNAKE_CASE_ ): # load all TP files _lowerCAmelCase = file.replace("model_00" , F'''model_0{i}''' ) _lowerCAmelCase = torch.load(os.path.join(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) , map_location="cpu" ) # Rename keys in the transformers names _lowerCAmelCase = list(temp.keys() ) for key in keys: _lowerCAmelCase = temp.pop(SCREAMING_SNAKE_CASE_ ) if tensors is None: _lowerCAmelCase = temp else: for key in tensors.keys(): if any(key.endswith(SCREAMING_SNAKE_CASE_ ) for end in WEIGHTS_TO_AVERAGE_ENDSWITH ): # We average (sum and then divide) some weights accross TP ranks (see https://github.com/bigscience-workshop/Megatron-DeepSpeed/blob/olruwase/sync_layer_norms/megatron/training.py#L425) tensors[key] += temp[key] else: # Some weights are RowParallelLinear in Megatron-Deepspeed, others are ColumnParallel _lowerCAmelCase = 1 if any(text in key for text in WEIGHTS_WITH_ROW_PARALLELISM_CONTAIN ) else 0 # We concatenate these weights accross TP ranks _lowerCAmelCase = torch.cat([tensors[key], temp[key]] , dim=SCREAMING_SNAKE_CASE_ ) # Divide by the number of TP the weights we want to average for key in tensors.keys(): if any(key.endswith(SCREAMING_SNAKE_CASE_ ) for end in WEIGHTS_TO_AVERAGE_ENDSWITH ): _lowerCAmelCase = tensors[key] / pretraining_tp torch.save( SCREAMING_SNAKE_CASE_ , os.path.join( SCREAMING_SNAKE_CASE_ , "pytorch_model_{}-of-{}.bin".format(str(j + 1 ).zfill(5 ) , str(len(SCREAMING_SNAKE_CASE_ ) ).zfill(5 ) ) , ) , ) for key in tensors.keys(): _lowerCAmelCase = tensors[key] total_size += value.numel() * get_dtype_size(value.dtype ) if key not in index_dict["weight_map"]: _lowerCAmelCase = "pytorch_model_{}-of-{}.bin".format( str(j + 1 ).zfill(5 ) , str(len(SCREAMING_SNAKE_CASE_ ) ).zfill(5 ) ) _lowerCAmelCase = BloomConfig() _lowerCAmelCase = pytorch_dump_folder_path + "/" + CONFIG_NAME _lowerCAmelCase = total_size with open(SCREAMING_SNAKE_CASE_ , "w" , encoding="utf-8" ) as f: f.write(config.to_json_string() ) with open(os.path.join(SCREAMING_SNAKE_CASE_ , WEIGHTS_NAME + ".index.json" ) , "w" , encoding="utf-8" ) as f: _lowerCAmelCase = json.dumps(SCREAMING_SNAKE_CASE_ , indent=2 , sort_keys=SCREAMING_SNAKE_CASE_ ) + "\n" f.write(SCREAMING_SNAKE_CASE_ ) else: _lowerCAmelCase = BloomModel(SCREAMING_SNAKE_CASE_ ) _lowerCAmelCase = os.listdir(SCREAMING_SNAKE_CASE_ ) _lowerCAmelCase = sorted(filter(lambda SCREAMING_SNAKE_CASE_ : s.startswith("layer" ) and "model_00" in s , SCREAMING_SNAKE_CASE_ ) ) _lowerCAmelCase = None for i, file in enumerate(SCREAMING_SNAKE_CASE_ ): _lowerCAmelCase = None for i in range(SCREAMING_SNAKE_CASE_ ): # load all TP files _lowerCAmelCase = file.replace("model_00" , F'''model_0{i}''' ) _lowerCAmelCase = torch.load(os.path.join(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) , map_location="cpu" ) # Rename keys in the transformers names _lowerCAmelCase = list(temp.keys() ) for key in keys: _lowerCAmelCase = temp.pop(SCREAMING_SNAKE_CASE_ ) if tensors is None: _lowerCAmelCase = temp else: for key in tensors.keys(): # We average (sum and then divide) some weights accross TP ranks (see https://github.com/bigscience-workshop/Megatron-DeepSpeed/blob/olruwase/sync_layer_norms/megatron/training.py#L425) if any(key.endswith(SCREAMING_SNAKE_CASE_ ) for end in WEIGHTS_TO_AVERAGE_ENDSWITH ): tensors[key] += temp[key] else: # Some weights are RowParallelLinear in Megatron-Deepspeed, others are ColumnParallel _lowerCAmelCase = 1 if any(text in key for text in WEIGHTS_WITH_ROW_PARALLELISM_CONTAIN ) else 0 # We concatenate these weights accross TP ranks _lowerCAmelCase = torch.cat([tensors[key], temp[key]] , dim=SCREAMING_SNAKE_CASE_ ) # Divide by the number of TP the weights we want to average for key in tensors.keys(): if any(key.endswith(SCREAMING_SNAKE_CASE_ ) for end in WEIGHTS_TO_AVERAGE_ENDSWITH ): _lowerCAmelCase = tensors[key] / pretraining_tp _lowerCAmelCase = model.load_state_dict(SCREAMING_SNAKE_CASE_ , strict=SCREAMING_SNAKE_CASE_ ) assert not other_keys.unexpected_keys, F'''The keys {other_keys.unexpected_keys} are unexpected''' if missing_keys is None: _lowerCAmelCase = set(other_keys.missing_keys ) else: _lowerCAmelCase = missing_keys.intersection(set(other_keys.missing_keys ) ) assert not missing_keys, F'''The keys {missing_keys} are missing''' # Save pytorch-model os.makedirs(SCREAMING_SNAKE_CASE_ , exist_ok=SCREAMING_SNAKE_CASE_ ) _lowerCAmelCase = pytorch_dump_folder_path + "/" + WEIGHTS_NAME _lowerCAmelCase = pytorch_dump_folder_path + "/" + CONFIG_NAME print(F'''Save PyTorch model to {pytorch_weights_dump_path} with dtype {config.torch_dtype}''' ) if config.torch_dtype is not None: _lowerCAmelCase = model.to(config.torch_dtype ) torch.save(model.state_dict() , SCREAMING_SNAKE_CASE_ ) print(F'''Save configuration file to {pytorch_config_dump_path}''' ) with open(SCREAMING_SNAKE_CASE_ , "w" , encoding="utf-8" ) as f: f.write(config.to_json_string() ) if __name__ == "__main__": _SCREAMING_SNAKE_CASE = argparse.ArgumentParser() # Required parameters parser.add_argument( "--bloom_checkpoint_path", default=None, type=str, required=True, help="Path to the Megatron-LM checkpoint path.", ) parser.add_argument( "--pytorch_dump_folder_path", default=None, type=str, required=True, help="Path to the output PyTorch model." ) parser.add_argument( "--bloom_config_file", default="", type=str, help=( "An optional config json file corresponding to the pre-trained model. \n" "This specifies the model architecture." ), ) parser.add_argument( "--shard_model", action="store_true", help="An optional setting to shard the output model \nThis enables sharding the converted checkpoint", ) parser.add_argument( "--pretraining_tp", default=4, type=int, help="Pretraining TP rank that has been used when training the model in Megatron-LM \n", ) _SCREAMING_SNAKE_CASE = parser.parse_args() convert_bloom_checkpoint_to_pytorch( args.bloom_checkpoint_path, args.bloom_config_file, args.pytorch_dump_folder_path, args.shard_model, args.pretraining_tp, )
18
import unittest import numpy as np from transformers.testing_utils import require_flax, require_tf, require_torch from transformers.utils import ( expand_dims, flatten_dict, is_flax_available, is_tf_available, is_torch_available, reshape, squeeze, transpose, ) if is_flax_available(): import jax.numpy as jnp if is_tf_available(): import tensorflow as tf if is_torch_available(): import torch class lowerCamelCase_ ( unittest.TestCase ): def A ( self ): """simple docstring""" __magic_name__ :List[Any] = { '''task_specific_params''': { '''summarization''': {'''length_penalty''': 1.0, '''max_length''': 1_2_8, '''min_length''': 1_2, '''num_beams''': 4}, '''summarization_cnn''': {'''length_penalty''': 2.0, '''max_length''': 1_4_2, '''min_length''': 5_6, '''num_beams''': 4}, '''summarization_xsum''': {'''length_penalty''': 1.0, '''max_length''': 6_2, '''min_length''': 1_1, '''num_beams''': 6}, } } __magic_name__ :List[str] = { '''task_specific_params.summarization.length_penalty''': 1.0, '''task_specific_params.summarization.max_length''': 1_2_8, '''task_specific_params.summarization.min_length''': 1_2, '''task_specific_params.summarization.num_beams''': 4, '''task_specific_params.summarization_cnn.length_penalty''': 2.0, '''task_specific_params.summarization_cnn.max_length''': 1_4_2, '''task_specific_params.summarization_cnn.min_length''': 5_6, '''task_specific_params.summarization_cnn.num_beams''': 4, '''task_specific_params.summarization_xsum.length_penalty''': 1.0, '''task_specific_params.summarization_xsum.max_length''': 6_2, '''task_specific_params.summarization_xsum.min_length''': 1_1, '''task_specific_params.summarization_xsum.num_beams''': 6, } self.assertEqual(flatten_dict(__lowerCAmelCase ) , __lowerCAmelCase ) def A ( self ): """simple docstring""" __magic_name__ :Optional[Any] = np.random.randn(3 , 4 ) self.assertTrue(np.allclose(transpose(__lowerCAmelCase ) , x.transpose() ) ) __magic_name__ :List[Any] = np.random.randn(3 , 4 , 5 ) self.assertTrue(np.allclose(transpose(__lowerCAmelCase , axes=(1, 2, 0) ) , x.transpose((1, 2, 0) ) ) ) @require_torch def A ( self ): """simple docstring""" __magic_name__ :Optional[Any] = np.random.randn(3 , 4 ) __magic_name__ :Tuple = torch.tensor(__lowerCAmelCase ) self.assertTrue(np.allclose(transpose(__lowerCAmelCase ) , transpose(__lowerCAmelCase ).numpy() ) ) __magic_name__ :int = np.random.randn(3 , 4 , 5 ) __magic_name__ :Union[str, Any] = torch.tensor(__lowerCAmelCase ) self.assertTrue(np.allclose(transpose(__lowerCAmelCase , axes=(1, 2, 0) ) , transpose(__lowerCAmelCase , axes=(1, 2, 0) ).numpy() ) ) @require_tf def A ( self ): """simple docstring""" __magic_name__ :int = np.random.randn(3 , 4 ) __magic_name__ :Optional[Any] = tf.constant(__lowerCAmelCase ) self.assertTrue(np.allclose(transpose(__lowerCAmelCase ) , transpose(__lowerCAmelCase ).numpy() ) ) __magic_name__ :List[str] = np.random.randn(3 , 4 , 5 ) __magic_name__ :Optional[Any] = tf.constant(__lowerCAmelCase ) self.assertTrue(np.allclose(transpose(__lowerCAmelCase , axes=(1, 2, 0) ) , transpose(__lowerCAmelCase , axes=(1, 2, 0) ).numpy() ) ) @require_flax def A ( self ): """simple docstring""" __magic_name__ :int = np.random.randn(3 , 4 ) __magic_name__ :Dict = jnp.array(__lowerCAmelCase ) self.assertTrue(np.allclose(transpose(__lowerCAmelCase ) , np.asarray(transpose(__lowerCAmelCase ) ) ) ) __magic_name__ :Dict = np.random.randn(3 , 4 , 5 ) __magic_name__ :Dict = jnp.array(__lowerCAmelCase ) self.assertTrue(np.allclose(transpose(__lowerCAmelCase , axes=(1, 2, 0) ) , np.asarray(transpose(__lowerCAmelCase , axes=(1, 2, 0) ) ) ) ) def A ( self ): """simple docstring""" __magic_name__ :Any = np.random.randn(3 , 4 ) self.assertTrue(np.allclose(reshape(__lowerCAmelCase , (4, 3) ) , np.reshape(__lowerCAmelCase , (4, 3) ) ) ) __magic_name__ :Union[str, Any] = np.random.randn(3 , 4 , 5 ) self.assertTrue(np.allclose(reshape(__lowerCAmelCase , (1_2, 5) ) , np.reshape(__lowerCAmelCase , (1_2, 5) ) ) ) @require_torch def A ( self ): """simple docstring""" __magic_name__ :Dict = np.random.randn(3 , 4 ) __magic_name__ :Tuple = torch.tensor(__lowerCAmelCase ) self.assertTrue(np.allclose(reshape(__lowerCAmelCase , (4, 3) ) , reshape(__lowerCAmelCase , (4, 3) ).numpy() ) ) __magic_name__ :Union[str, Any] = np.random.randn(3 , 4 , 5 ) __magic_name__ :List[str] = torch.tensor(__lowerCAmelCase ) self.assertTrue(np.allclose(reshape(__lowerCAmelCase , (1_2, 5) ) , reshape(__lowerCAmelCase , (1_2, 5) ).numpy() ) ) @require_tf def A ( self ): """simple docstring""" __magic_name__ :Dict = np.random.randn(3 , 4 ) __magic_name__ :Union[str, Any] = tf.constant(__lowerCAmelCase ) self.assertTrue(np.allclose(reshape(__lowerCAmelCase , (4, 3) ) , reshape(__lowerCAmelCase , (4, 3) ).numpy() ) ) __magic_name__ :List[Any] = np.random.randn(3 , 4 , 5 ) __magic_name__ :Optional[int] = tf.constant(__lowerCAmelCase ) self.assertTrue(np.allclose(reshape(__lowerCAmelCase , (1_2, 5) ) , reshape(__lowerCAmelCase , (1_2, 5) ).numpy() ) ) @require_flax def A ( self ): """simple docstring""" __magic_name__ :List[str] = np.random.randn(3 , 4 ) __magic_name__ :Any = jnp.array(__lowerCAmelCase ) self.assertTrue(np.allclose(reshape(__lowerCAmelCase , (4, 3) ) , np.asarray(reshape(__lowerCAmelCase , (4, 3) ) ) ) ) __magic_name__ :List[Any] = np.random.randn(3 , 4 , 5 ) __magic_name__ :List[str] = jnp.array(__lowerCAmelCase ) self.assertTrue(np.allclose(reshape(__lowerCAmelCase , (1_2, 5) ) , np.asarray(reshape(__lowerCAmelCase , (1_2, 5) ) ) ) ) def A ( self ): """simple docstring""" __magic_name__ :List[Any] = np.random.randn(1 , 3 , 4 ) self.assertTrue(np.allclose(squeeze(__lowerCAmelCase ) , np.squeeze(__lowerCAmelCase ) ) ) __magic_name__ :Optional[Any] = np.random.randn(1 , 4 , 1 , 5 ) self.assertTrue(np.allclose(squeeze(__lowerCAmelCase , axis=2 ) , np.squeeze(__lowerCAmelCase , axis=2 ) ) ) @require_torch def A ( self ): """simple docstring""" __magic_name__ :Dict = np.random.randn(1 , 3 , 4 ) __magic_name__ :List[Any] = torch.tensor(__lowerCAmelCase ) self.assertTrue(np.allclose(squeeze(__lowerCAmelCase ) , squeeze(__lowerCAmelCase ).numpy() ) ) __magic_name__ :List[str] = np.random.randn(1 , 4 , 1 , 5 ) __magic_name__ :str = torch.tensor(__lowerCAmelCase ) self.assertTrue(np.allclose(squeeze(__lowerCAmelCase , axis=2 ) , squeeze(__lowerCAmelCase , axis=2 ).numpy() ) ) @require_tf def A ( self ): """simple docstring""" __magic_name__ :int = np.random.randn(1 , 3 , 4 ) __magic_name__ :Tuple = tf.constant(__lowerCAmelCase ) self.assertTrue(np.allclose(squeeze(__lowerCAmelCase ) , squeeze(__lowerCAmelCase ).numpy() ) ) __magic_name__ :Tuple = np.random.randn(1 , 4 , 1 , 5 ) __magic_name__ :Optional[int] = tf.constant(__lowerCAmelCase ) self.assertTrue(np.allclose(squeeze(__lowerCAmelCase , axis=2 ) , squeeze(__lowerCAmelCase , axis=2 ).numpy() ) ) @require_flax def A ( self ): """simple docstring""" __magic_name__ :Tuple = np.random.randn(1 , 3 , 4 ) __magic_name__ :Optional[Any] = jnp.array(__lowerCAmelCase ) self.assertTrue(np.allclose(squeeze(__lowerCAmelCase ) , np.asarray(squeeze(__lowerCAmelCase ) ) ) ) __magic_name__ :List[Any] = np.random.randn(1 , 4 , 1 , 5 ) __magic_name__ :Optional[Any] = jnp.array(__lowerCAmelCase ) self.assertTrue(np.allclose(squeeze(__lowerCAmelCase , axis=2 ) , np.asarray(squeeze(__lowerCAmelCase , axis=2 ) ) ) ) def A ( self ): """simple docstring""" __magic_name__ :Any = np.random.randn(3 , 4 ) self.assertTrue(np.allclose(expand_dims(__lowerCAmelCase , axis=1 ) , np.expand_dims(__lowerCAmelCase , axis=1 ) ) ) @require_torch def A ( self ): """simple docstring""" __magic_name__ :List[Any] = np.random.randn(3 , 4 ) __magic_name__ :Any = torch.tensor(__lowerCAmelCase ) self.assertTrue(np.allclose(expand_dims(__lowerCAmelCase , axis=1 ) , expand_dims(__lowerCAmelCase , axis=1 ).numpy() ) ) @require_tf def A ( self ): """simple docstring""" __magic_name__ :Union[str, Any] = np.random.randn(3 , 4 ) __magic_name__ :Union[str, Any] = tf.constant(__lowerCAmelCase ) self.assertTrue(np.allclose(expand_dims(__lowerCAmelCase , axis=1 ) , expand_dims(__lowerCAmelCase , axis=1 ).numpy() ) ) @require_flax def A ( self ): """simple docstring""" __magic_name__ :List[str] = np.random.randn(3 , 4 ) __magic_name__ :Tuple = jnp.array(__lowerCAmelCase ) self.assertTrue(np.allclose(expand_dims(__lowerCAmelCase , axis=1 ) , np.asarray(expand_dims(__lowerCAmelCase , axis=1 ) ) ) )
0
0
"""simple docstring""" import functools import operator from ...configuration_utils import PretrainedConfig from ...utils import logging _a = logging.get_logger(__name__) _a = { """microsoft/wavlm-base""": """https://huggingface.co/microsoft/wavlm-base/resolve/main/config.json""", # See all WavLM models at https://huggingface.co/models?filter=wavlm } class _UpperCAmelCase( lowerCamelCase ): lowercase__ = 'wavlm' def __init__( self , __a=32 , __a=7_68 , __a=12 , __a=12 , __a=30_72 , __a="gelu" , __a=0.1 , __a=0.1 , __a=0.1 , __a=0.0 , __a=0.1 , __a=0.1 , __a=0.02 , __a=1e-5 , __a="group" , __a="gelu" , __a=(5_12, 5_12, 5_12, 5_12, 5_12, 5_12, 5_12) , __a=(5, 2, 2, 2, 2, 2, 2) , __a=(10, 3, 3, 3, 3, 2, 2) , __a=False , __a=1_28 , __a=16 , __a=3_20 , __a=8_00 , __a=False , __a=True , __a=0.05 , __a=10 , __a=2 , __a=0.0 , __a=10 , __a=3_20 , __a=2 , __a=0.1 , __a=1_00 , __a=2_56 , __a=2_56 , __a=0.1 , __a="mean" , __a=False , __a=False , __a=2_56 , __a=(5_12, 5_12, 5_12, 5_12, 15_00) , __a=(5, 3, 3, 1, 1) , __a=(1, 2, 3, 1, 1) , __a=5_12 , __a=80 , __a=0 , __a=1 , __a=2 , __a=False , __a=3 , __a=2 , __a=3 , __a=None , **__a , ) -> Union[str, Any]: '''simple docstring''' super().__init__(**__a , pad_token_id=__a , bos_token_id=__a , eos_token_id=__a) _UpperCamelCase = hidden_size _UpperCamelCase = feat_extract_norm _UpperCamelCase = feat_extract_activation _UpperCamelCase = list(__a) _UpperCamelCase = list(__a) _UpperCamelCase = list(__a) _UpperCamelCase = conv_bias _UpperCamelCase = num_buckets _UpperCamelCase = max_bucket_distance _UpperCamelCase = num_conv_pos_embeddings _UpperCamelCase = num_conv_pos_embedding_groups _UpperCamelCase = len(self.conv_dim) _UpperCamelCase = num_hidden_layers _UpperCamelCase = intermediate_size _UpperCamelCase = hidden_act _UpperCamelCase = num_attention_heads _UpperCamelCase = hidden_dropout _UpperCamelCase = attention_dropout _UpperCamelCase = activation_dropout _UpperCamelCase = feat_proj_dropout _UpperCamelCase = final_dropout _UpperCamelCase = layerdrop _UpperCamelCase = layer_norm_eps _UpperCamelCase = initializer_range _UpperCamelCase = num_ctc_classes _UpperCamelCase = vocab_size _UpperCamelCase = do_stable_layer_norm _UpperCamelCase = use_weighted_layer_sum _UpperCamelCase = classifier_proj_size if ( (len(self.conv_stride) != self.num_feat_extract_layers) or (len(self.conv_kernel) != self.num_feat_extract_layers) or (len(self.conv_dim) != self.num_feat_extract_layers) ): raise ValueError( '''Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` ==''' ''' `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) =''' F''' {len(self.conv_dim)}`, `len(config.conv_stride) = {len(self.conv_stride)}`,''' F''' `len(config.conv_kernel) = {len(self.conv_kernel)}`.''') # fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779 _UpperCamelCase = apply_spec_augment _UpperCamelCase = mask_time_prob _UpperCamelCase = mask_time_length _UpperCamelCase = mask_time_min_masks _UpperCamelCase = mask_feature_prob _UpperCamelCase = mask_feature_length # parameters for pretraining with codevector quantized representations _UpperCamelCase = num_codevectors_per_group _UpperCamelCase = num_codevector_groups _UpperCamelCase = contrastive_logits_temperature _UpperCamelCase = num_negatives _UpperCamelCase = codevector_dim _UpperCamelCase = proj_codevector_dim _UpperCamelCase = diversity_loss_weight # ctc loss _UpperCamelCase = ctc_loss_reduction _UpperCamelCase = ctc_zero_infinity # adapter _UpperCamelCase = add_adapter _UpperCamelCase = adapter_kernel_size _UpperCamelCase = adapter_stride _UpperCamelCase = num_adapter_layers _UpperCamelCase = output_hidden_size or hidden_size # SequenceClassification-specific parameter. Feel free to ignore for other classes. _UpperCamelCase = classifier_proj_size # XVector-specific parameters. Feel free to ignore for other classes. _UpperCamelCase = list(__a) _UpperCamelCase = list(__a) _UpperCamelCase = list(__a) _UpperCamelCase = xvector_output_dim @property def UpperCAmelCase ( self) -> Optional[Any]: '''simple docstring''' return functools.reduce(operator.mul , self.conv_stride , 1)
19
from pathlib import PurePosixPath from typing import Optional import fsspec from fsspec import AbstractFileSystem from huggingface_hub.hf_api import DatasetInfo from ..utils.file_utils import get_authentication_headers_for_url from ..utils.hub import hf_hub_url class lowerCamelCase_ ( lowerCamelCase ): a__ = '''''' a__ = '''hf-legacy''' # "hf://"" is reserved for hffs def __init__( self , __lowerCAmelCase = None , __lowerCAmelCase = None , **__lowerCAmelCase , ): """simple docstring""" super().__init__(self , **__lowerCAmelCase ) __magic_name__ :List[Any] = repo_info __magic_name__ :Dict = token __magic_name__ :Optional[Any] = None def A ( self ): """simple docstring""" if self.dir_cache is None: __magic_name__ :Any = {} for hf_file in self.repo_info.siblings: # TODO(QL): add sizes __magic_name__ :Optional[int] = { '''name''': hf_file.rfilename, '''size''': None, '''type''': '''file''', } self.dir_cache.update( { str(__lowerCAmelCase ): {'''name''': str(__lowerCAmelCase ), '''size''': None, '''type''': '''directory'''} for d in list(PurePosixPath(hf_file.rfilename ).parents )[:-1] } ) def A ( self , __lowerCAmelCase , __lowerCAmelCase = "rb" , **__lowerCAmelCase , ): """simple docstring""" if not isinstance(self.repo_info , __lowerCAmelCase ): raise NotImplementedError(F'''Open is only implemented for dataset repositories, but got {self.repo_info}''' ) __magic_name__ :Union[str, Any] = hf_hub_url(self.repo_info.id , __lowerCAmelCase , revision=self.repo_info.sha ) return fsspec.open( __lowerCAmelCase , mode=__lowerCAmelCase , headers=get_authentication_headers_for_url(__lowerCAmelCase , use_auth_token=self.token ) , client_kwargs={'''trust_env''': True} , ).open() def A ( self , __lowerCAmelCase , **__lowerCAmelCase ): """simple docstring""" self._get_dirs() __magic_name__ :str = self._strip_protocol(__lowerCAmelCase ) if path in self.dir_cache: return self.dir_cache[path] else: raise FileNotFoundError(__lowerCAmelCase ) def A ( self , __lowerCAmelCase , __lowerCAmelCase=False , **__lowerCAmelCase ): """simple docstring""" self._get_dirs() __magic_name__ :Union[str, Any] = PurePosixPath(path.strip('''/''' ) ) __magic_name__ :Dict = {} for p, f in self.dir_cache.items(): __magic_name__ :int = PurePosixPath(p.strip('''/''' ) ) __magic_name__ :Tuple = p.parent if root == path: __magic_name__ :Optional[Any] = f __magic_name__ :List[Any] = list(paths.values() ) if detail: return out else: return sorted(f['''name'''] for f in out )
0
0
from typing import Dict, List, Optional, Union import numpy as np from transformers.utils import is_vision_available from transformers.utils.generic import TensorType from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import ( center_crop, get_resize_output_image_size, normalize, rescale, resize, to_channel_dimension_format, ) from ...image_utils import ( IMAGENET_STANDARD_MEAN, IMAGENET_STANDARD_STD, ChannelDimension, ImageInput, PILImageResampling, is_valid_image, to_numpy_array, valid_images, ) from ...utils import logging if is_vision_available(): import PIL _lowerCAmelCase: int = logging.get_logger(__name__) def _lowercase( __a : List[str] ): if isinstance(__a , (list, tuple) ) and isinstance(videos[0] , (list, tuple) ) and is_valid_image(videos[0][0] ): return videos elif isinstance(__a , (list, tuple) ) and is_valid_image(videos[0] ): return [videos] elif is_valid_image(__a ): return [[videos]] raise ValueError(f"""Could not make batched video from {videos}""" ) class lowercase_ (lowercase__ ): snake_case =['pixel_values'] def __init__( self , lowercase_ = True , lowercase_ = None , lowercase_ = PILImageResampling.BILINEAR , lowercase_ = True , lowercase_ = None , lowercase_ = True , lowercase_ = 1 / 255 , lowercase_ = True , lowercase_ = True , lowercase_ = None , lowercase_ = None , **lowercase_ , ) -> None: super().__init__(**lowercase_) a__ =size if size is not None else {'shortest_edge': 256} a__ =get_size_dict(lowercase_ , default_to_square=lowercase_) a__ =crop_size if crop_size is not None else {'height': 224, 'width': 224} a__ =get_size_dict(lowercase_ , param_name='crop_size') a__ =do_resize a__ =size a__ =do_center_crop a__ =crop_size a__ =resample a__ =do_rescale a__ =rescale_factor a__ =offset a__ =do_normalize a__ =image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN a__ =image_std if image_std is not None else IMAGENET_STANDARD_STD def __UpperCamelCase ( self , lowercase_ , lowercase_ , lowercase_ = PILImageResampling.BILINEAR , lowercase_ = None , **lowercase_ , ) -> np.ndarray: a__ =get_size_dict(lowercase_ , default_to_square=lowercase_) if "shortest_edge" in size: a__ =get_resize_output_image_size(lowercase_ , size['shortest_edge'] , default_to_square=lowercase_) elif "height" in size and "width" in size: a__ =(size['height'], size['width']) else: raise ValueError(F"""Size must have 'height' and 'width' or 'shortest_edge' as keys. Got {size.keys()}""") return resize(lowercase_ , size=lowercase_ , resample=lowercase_ , data_format=lowercase_ , **lowercase_) def __UpperCamelCase ( self , lowercase_ , lowercase_ , lowercase_ = None , **lowercase_ , ) -> np.ndarray: a__ =get_size_dict(lowercase_) if "height" not in size or "width" not in size: raise ValueError(F"""Size must have 'height' and 'width' as keys. Got {size.keys()}""") return center_crop(lowercase_ , size=(size['height'], size['width']) , data_format=lowercase_ , **lowercase_) def __UpperCamelCase ( self , lowercase_ , lowercase_ , lowercase_ = True , lowercase_ = None , **lowercase_ , ) -> List[str]: a__ =image.astype(np.floataa) if offset: a__ =image - (scale / 2) return rescale(lowercase_ , scale=lowercase_ , data_format=lowercase_ , **lowercase_) def __UpperCamelCase ( self , lowercase_ , lowercase_ , lowercase_ , lowercase_ = None , **lowercase_ , ) -> np.ndarray: return normalize(lowercase_ , mean=lowercase_ , std=lowercase_ , data_format=lowercase_ , **lowercase_) def __UpperCamelCase ( self , lowercase_ , lowercase_ = None , lowercase_ = None , lowercase_ = None , lowercase_ = None , lowercase_ = None , lowercase_ = None , lowercase_ = None , lowercase_ = None , lowercase_ = None , lowercase_ = None , lowercase_ = None , lowercase_ = ChannelDimension.FIRST , ) -> np.ndarray: if do_resize and size is None or resample is None: raise ValueError('Size and resample must be specified if do_resize is True.') if do_center_crop and crop_size is None: raise ValueError('Crop size must be specified if do_center_crop is True.') if do_rescale and rescale_factor is None: raise ValueError('Rescale factor must be specified if do_rescale is True.') if do_normalize and (image_mean is None or image_std is None): raise ValueError('Image mean and std must be specified if do_normalize is True.') if offset and not do_rescale: raise ValueError('For offset, do_rescale must also be set to True.') # All transformations expect numpy arrays. a__ =to_numpy_array(lowercase_) if do_resize: a__ =self.resize(image=lowercase_ , size=lowercase_ , resample=lowercase_) if do_center_crop: a__ =self.center_crop(lowercase_ , size=lowercase_) if do_rescale: a__ =self.rescale(image=lowercase_ , scale=lowercase_ , offset=lowercase_) if do_normalize: a__ =self.normalize(image=lowercase_ , mean=lowercase_ , std=lowercase_) a__ =to_channel_dimension_format(lowercase_ , lowercase_) return image def __UpperCamelCase ( self , lowercase_ , lowercase_ = None , lowercase_ = None , lowercase_ = None , lowercase_ = None , lowercase_ = None , lowercase_ = None , lowercase_ = None , lowercase_ = None , lowercase_ = None , lowercase_ = None , lowercase_ = None , lowercase_ = None , lowercase_ = ChannelDimension.FIRST , **lowercase_ , ) -> PIL.Image.Image: a__ =do_resize if do_resize is not None else self.do_resize a__ =resample if resample is not None else self.resample a__ =do_center_crop if do_center_crop is not None else self.do_center_crop a__ =do_rescale if do_rescale is not None else self.do_rescale a__ =rescale_factor if rescale_factor is not None else self.rescale_factor a__ =offset if offset is not None else self.offset a__ =do_normalize if do_normalize is not None else self.do_normalize a__ =image_mean if image_mean is not None else self.image_mean a__ =image_std if image_std is not None else self.image_std a__ =size if size is not None else self.size a__ =get_size_dict(lowercase_ , default_to_square=lowercase_) a__ =crop_size if crop_size is not None else self.crop_size a__ =get_size_dict(lowercase_ , param_name='crop_size') if not valid_images(lowercase_): raise ValueError( 'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, ' 'torch.Tensor, tf.Tensor or jax.ndarray.') a__ =make_batched(lowercase_) a__ =[ [ self._preprocess_image( image=lowercase_ , do_resize=lowercase_ , size=lowercase_ , resample=lowercase_ , do_center_crop=lowercase_ , crop_size=lowercase_ , do_rescale=lowercase_ , rescale_factor=lowercase_ , offset=lowercase_ , do_normalize=lowercase_ , image_mean=lowercase_ , image_std=lowercase_ , data_format=lowercase_ , ) for img in video ] for video in videos ] a__ ={'pixel_values': videos} return BatchFeature(data=lowercase_ , tensor_type=lowercase_)
20
import pyarrow.parquet as pq import pytest from datasets import Audio, Dataset, DatasetDict, Features, NamedSplit, Sequence, Value, config from datasets.features.image import Image from datasets.io.parquet import ParquetDatasetReader, ParquetDatasetWriter, get_writer_batch_size from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases def __lowercase ( snake_case, snake_case ): """simple docstring""" assert isinstance(snake_case, snake_case ) assert dataset.num_rows == 4 assert dataset.num_columns == 3 assert dataset.column_names == ["col_1", "col_2", "col_3"] for feature, expected_dtype in expected_features.items(): assert dataset.features[feature].dtype == expected_dtype @pytest.mark.parametrize('''keep_in_memory''', [False, True] ) def __lowercase ( snake_case, snake_case, snake_case ): """simple docstring""" __magic_name__ :Tuple = tmp_path / '''cache''' __magic_name__ :int = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''} with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase(): __magic_name__ :Optional[Any] = ParquetDatasetReader(snake_case, cache_dir=snake_case, keep_in_memory=snake_case ).read() _check_parquet_dataset(snake_case, snake_case ) @pytest.mark.parametrize( '''features''', [ None, {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}, {'''col_1''': '''string''', '''col_2''': '''string''', '''col_3''': '''string'''}, {'''col_1''': '''int32''', '''col_2''': '''int32''', '''col_3''': '''int32'''}, {'''col_1''': '''float32''', '''col_2''': '''float32''', '''col_3''': '''float32'''}, ], ) def __lowercase ( snake_case, snake_case, snake_case ): """simple docstring""" __magic_name__ :List[str] = tmp_path / '''cache''' __magic_name__ :int = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''} __magic_name__ :Tuple = features.copy() if features else default_expected_features __magic_name__ :Union[str, Any] = ( Features({feature: Value(snake_case ) for feature, dtype in features.items()} ) if features is not None else None ) __magic_name__ :int = ParquetDatasetReader(snake_case, features=snake_case, cache_dir=snake_case ).read() _check_parquet_dataset(snake_case, snake_case ) @pytest.mark.parametrize('''split''', [None, NamedSplit('''train''' ), '''train''', '''test'''] ) def __lowercase ( snake_case, snake_case, snake_case ): """simple docstring""" __magic_name__ :str = tmp_path / '''cache''' __magic_name__ :List[Any] = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''} __magic_name__ :int = ParquetDatasetReader(snake_case, cache_dir=snake_case, split=snake_case ).read() _check_parquet_dataset(snake_case, snake_case ) assert dataset.split == split if split else "train" @pytest.mark.parametrize('''path_type''', [str, list] ) def __lowercase ( snake_case, snake_case, snake_case ): """simple docstring""" if issubclass(snake_case, snake_case ): __magic_name__ :Union[str, Any] = parquet_path elif issubclass(snake_case, snake_case ): __magic_name__ :Union[str, Any] = [parquet_path] __magic_name__ :Optional[int] = tmp_path / '''cache''' __magic_name__ :Optional[int] = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''} __magic_name__ :str = ParquetDatasetReader(snake_case, cache_dir=snake_case ).read() _check_parquet_dataset(snake_case, snake_case ) def __lowercase ( snake_case, snake_case, snake_case=("train",) ): """simple docstring""" assert isinstance(snake_case, snake_case ) for split in splits: __magic_name__ :Optional[Any] = dataset_dict[split] assert dataset.num_rows == 4 assert dataset.num_columns == 3 assert dataset.column_names == ["col_1", "col_2", "col_3"] for feature, expected_dtype in expected_features.items(): assert dataset.features[feature].dtype == expected_dtype @pytest.mark.parametrize('''keep_in_memory''', [False, True] ) def __lowercase ( snake_case, snake_case, snake_case ): """simple docstring""" __magic_name__ :Any = tmp_path / '''cache''' __magic_name__ :Optional[int] = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''} with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase(): __magic_name__ :Tuple = ParquetDatasetReader( {'''train''': parquet_path}, cache_dir=snake_case, keep_in_memory=snake_case ).read() _check_parquet_datasetdict(snake_case, snake_case ) @pytest.mark.parametrize( '''features''', [ None, {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}, {'''col_1''': '''string''', '''col_2''': '''string''', '''col_3''': '''string'''}, {'''col_1''': '''int32''', '''col_2''': '''int32''', '''col_3''': '''int32'''}, {'''col_1''': '''float32''', '''col_2''': '''float32''', '''col_3''': '''float32'''}, ], ) def __lowercase ( snake_case, snake_case, snake_case ): """simple docstring""" __magic_name__ :Optional[Any] = tmp_path / '''cache''' __magic_name__ :Dict = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''} __magic_name__ :int = features.copy() if features else default_expected_features __magic_name__ :List[Any] = ( Features({feature: Value(snake_case ) for feature, dtype in features.items()} ) if features is not None else None ) __magic_name__ :Optional[int] = ParquetDatasetReader({'''train''': parquet_path}, features=snake_case, cache_dir=snake_case ).read() _check_parquet_datasetdict(snake_case, snake_case ) @pytest.mark.parametrize('''split''', [None, NamedSplit('''train''' ), '''train''', '''test'''] ) def __lowercase ( snake_case, snake_case, snake_case ): """simple docstring""" if split: __magic_name__ :Dict = {split: parquet_path} else: __magic_name__ :Optional[int] = '''train''' __magic_name__ :Dict = {'''train''': parquet_path, '''test''': parquet_path} __magic_name__ :List[Any] = tmp_path / '''cache''' __magic_name__ :Optional[Any] = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''} __magic_name__ :Optional[Any] = ParquetDatasetReader(snake_case, cache_dir=snake_case ).read() _check_parquet_datasetdict(snake_case, snake_case, splits=list(path.keys() ) ) assert all(dataset[split].split == split for split in path.keys() ) def __lowercase ( snake_case, snake_case ): """simple docstring""" __magic_name__ :str = ParquetDatasetWriter(snake_case, tmp_path / '''foo.parquet''' ) assert writer.write() > 0 __magic_name__ :List[Any] = pq.ParquetFile(tmp_path / '''foo.parquet''' ) __magic_name__ :List[Any] = pf.read() assert dataset.data.table == output_table def __lowercase ( snake_case, snake_case ): """simple docstring""" __magic_name__ :List[str] = str(shared_datadir / '''test_image_rgb.jpg''' ) __magic_name__ :Tuple = {'''image''': [image_path]} __magic_name__ :List[Any] = Features({'''image''': Image()} ) __magic_name__ :Tuple = Dataset.from_dict(snake_case, features=snake_case ) __magic_name__ :Union[str, Any] = ParquetDatasetWriter(snake_case, tmp_path / '''foo.parquet''' ) assert writer.write() > 0 __magic_name__ :List[str] = Dataset.from_parquet(str(tmp_path / '''foo.parquet''' ) ) assert dataset.features == reloaded_dataset.features __magic_name__ :List[str] = ParquetDatasetReader(str(tmp_path / '''foo.parquet''' ), streaming=snake_case ).read() assert dataset.features == reloaded_iterable_dataset.features @pytest.mark.parametrize( '''feature, expected''', [ (Features({'''foo''': Value('''int32''' )} ), None), (Features({'''image''': Image(), '''foo''': Value('''int32''' )} ), config.PARQUET_ROW_GROUP_SIZE_FOR_IMAGE_DATASETS), (Features({'''nested''': Sequence(Audio() )} ), config.PARQUET_ROW_GROUP_SIZE_FOR_AUDIO_DATASETS), ], ) def __lowercase ( snake_case, snake_case ): """simple docstring""" assert get_writer_batch_size(snake_case ) == expected
0
0
from ...configuration_utils import PretrainedConfig from ...utils import logging UpperCAmelCase_ : Dict = logging.get_logger(__name__) UpperCAmelCase_ : str = { "facebook/nllb-moe-54B": "https://huggingface.co/facebook/nllb-moe-54b/resolve/main/config.json", } class __A ( UpperCamelCase__ ): UpperCamelCase = """nllb-moe""" UpperCamelCase = ["""past_key_values"""] UpperCamelCase = {"""num_attention_heads""": """encoder_attention_heads""", """hidden_size""": """d_model"""} def __init__( self :str , __snake_case :str=12_81_12 , __snake_case :str=10_24 , __snake_case :List[Any]=12 , __snake_case :Union[str, Any]=40_96 , __snake_case :Any=16 , __snake_case :Union[str, Any]=12 , __snake_case :int=40_96 , __snake_case :List[Any]=16 , __snake_case :Optional[int]=0.05 , __snake_case :Tuple=0.05 , __snake_case :Dict=True , __snake_case :Optional[int]=True , __snake_case :str="relu" , __snake_case :List[str]=10_24 , __snake_case :Union[str, Any]=0.1 , __snake_case :List[Any]=0.1 , __snake_case :int=0.0 , __snake_case :int=0.02 , __snake_case :Union[str, Any]=2 , __snake_case :Union[str, Any]=True , __snake_case :Optional[int]=False , __snake_case :Any="float32" , __snake_case :Union[str, Any]=False , __snake_case :Tuple=1_28 , __snake_case :str=64 , __snake_case :Dict=4 , __snake_case :Tuple=4 , __snake_case :Optional[Any]=0.001 , __snake_case :Optional[Any]=0.001 , __snake_case :List[Any]="all" , __snake_case :int=False , __snake_case :List[Any]=False , __snake_case :Optional[int]=1.0 , __snake_case :List[str]=0.2 , __snake_case :int=1 , __snake_case :Dict=0 , __snake_case :List[str]=2 , __snake_case :Dict=False , **__snake_case :Optional[Any] , ): '''simple docstring''' __magic_name__ : Tuple =vocab_size __magic_name__ : Optional[Any] =max_position_embeddings __magic_name__ : List[Any] =d_model __magic_name__ : Union[str, Any] =encoder_ffn_dim __magic_name__ : List[Any] =encoder_layers __magic_name__ : Optional[int] =encoder_attention_heads __magic_name__ : Optional[Any] =decoder_ffn_dim __magic_name__ : List[str] =decoder_layers __magic_name__ : Any =decoder_attention_heads __magic_name__ : int =dropout __magic_name__ : Any =attention_dropout __magic_name__ : Optional[Any] =activation_dropout __magic_name__ : int =activation_function __magic_name__ : str =init_std __magic_name__ : Optional[int] =encoder_layerdrop __magic_name__ : str =decoder_layerdrop __magic_name__ : Optional[int] =use_cache __magic_name__ : Union[str, Any] =encoder_layers __magic_name__ : int =scale_embedding # scale factor will be sqrt(d_model) if True __magic_name__ : Any =router_z_loss_coef __magic_name__ : Tuple =router_aux_loss_coef __magic_name__ : Optional[Any] =decoder_sparse_step __magic_name__ : Dict =encoder_sparse_step __magic_name__ : List[Any] =num_experts __magic_name__ : Tuple =expert_capacity __magic_name__ : Union[str, Any] =router_bias if router_dtype not in ["float32", "float16", "bfloat16"]: raise ValueError(f"`router_dtype` must be one of 'float32', 'float16' or 'bfloat16', got {router_dtype}" ) __magic_name__ : str =router_dtype __magic_name__ : Union[str, Any] =router_ignore_padding_tokens __magic_name__ : Dict =batch_prioritized_routing __magic_name__ : Optional[Any] =second_expert_policy __magic_name__ : Tuple =normalize_router_prob_before_dropping __magic_name__ : Any =moe_eval_capacity_token_fraction __magic_name__ : List[str] =moe_token_dropout __magic_name__ : str =output_router_logits super().__init__( pad_token_id=__snake_case , bos_token_id=__snake_case , eos_token_id=__snake_case , is_encoder_decoder=__snake_case , decoder_start_token_id=__snake_case , **__snake_case , )
21
def __lowercase ( snake_case ): """simple docstring""" if not isinstance(snake_case, snake_case ): raise ValueError('''multiplicative_persistence() only accepts integral values''' ) if num < 0: raise ValueError('''multiplicative_persistence() does not accept negative values''' ) __magic_name__ :str = 0 __magic_name__ :Dict = str(snake_case ) while len(snake_case ) != 1: __magic_name__ :Optional[Any] = [int(snake_case ) for i in num_string] __magic_name__ :Dict = 1 for i in range(0, len(snake_case ) ): total *= numbers[i] __magic_name__ :int = str(snake_case ) steps += 1 return steps def __lowercase ( snake_case ): """simple docstring""" if not isinstance(snake_case, snake_case ): raise ValueError('''additive_persistence() only accepts integral values''' ) if num < 0: raise ValueError('''additive_persistence() does not accept negative values''' ) __magic_name__ :str = 0 __magic_name__ :Union[str, Any] = str(snake_case ) while len(snake_case ) != 1: __magic_name__ :str = [int(snake_case ) for i in num_string] __magic_name__ :Optional[int] = 0 for i in range(0, len(snake_case ) ): total += numbers[i] __magic_name__ :int = str(snake_case ) steps += 1 return steps if __name__ == "__main__": import doctest doctest.testmod()
0
0
'''simple docstring''' import argparse import json import os import evaluate import torch from datasets import load_dataset from torch.optim import AdamW from torch.utils.data import DataLoader from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed from accelerate import Accelerator, DistributedType from accelerate.utils.deepspeed import DummyOptim, DummyScheduler _snake_case : List[Any] = 16 _snake_case : Tuple = 32 def snake_case_ (UpperCamelCase : Accelerator , UpperCamelCase : int = 16 , UpperCamelCase : str = "bert-base-cased" ): '''simple docstring''' _a = AutoTokenizer.from_pretrained(UpperCamelCase ) _a = load_dataset('''glue''' , '''mrpc''' ) def tokenize_function(UpperCamelCase : Optional[Any] ): # max_length=None => use the model max length (it's actually the default) _a = tokenizer(examples['''sentence1'''] , examples['''sentence2'''] , truncation=UpperCamelCase , max_length=UpperCamelCase ) return outputs # Apply the method we just defined to all the examples in all the splits of the dataset _a = datasets.map( UpperCamelCase , batched=UpperCamelCase , remove_columns=['''idx''', '''sentence1''', '''sentence2'''] , load_from_cache_file=UpperCamelCase ) # We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the # transformers library _a = tokenized_datasets.rename_column('''label''' , '''labels''' ) def collate_fn(UpperCamelCase : Union[str, Any] ): # On TPU it's best to pad everything to the same length or training will be very slow. if accelerator.distributed_type == DistributedType.TPU: return tokenizer.pad(UpperCamelCase , padding='''max_length''' , max_length=128 , return_tensors='''pt''' ) return tokenizer.pad(UpperCamelCase , padding='''longest''' , return_tensors='''pt''' ) # Instantiate dataloaders. _a = DataLoader( tokenized_datasets['''train'''] , shuffle=UpperCamelCase , collate_fn=UpperCamelCase , batch_size=UpperCamelCase ) _a = DataLoader( tokenized_datasets['''validation'''] , shuffle=UpperCamelCase , collate_fn=UpperCamelCase , batch_size=UpperCamelCase ) return train_dataloader, eval_dataloader def snake_case_ (UpperCamelCase : Optional[int] , UpperCamelCase : Optional[int] ): '''simple docstring''' _a = Accelerator() # Sample hyper-parameters for learning rate, batch size, seed and a few other HPs _a = config['''lr'''] _a = int(config['''num_epochs'''] ) _a = int(config['''seed'''] ) _a = int(config['''batch_size'''] ) _a = args.model_name_or_path set_seed(UpperCamelCase ) _a , _a = get_dataloaders(UpperCamelCase , UpperCamelCase , UpperCamelCase ) # Instantiate the model (we build the model here so that the seed also control new weights initialization) _a = AutoModelForSequenceClassification.from_pretrained(UpperCamelCase , return_dict=UpperCamelCase ) # Instantiate optimizer _a = ( AdamW if accelerator.state.deepspeed_plugin is None or '''optimizer''' not in accelerator.state.deepspeed_plugin.deepspeed_config else DummyOptim ) _a = optimizer_cls(params=model.parameters() , lr=UpperCamelCase ) if accelerator.state.deepspeed_plugin is not None: _a = accelerator.state.deepspeed_plugin.deepspeed_config[ '''gradient_accumulation_steps''' ] else: _a = 1 _a = (len(UpperCamelCase ) * num_epochs) // gradient_accumulation_steps # Instantiate scheduler if ( accelerator.state.deepspeed_plugin is None or "scheduler" not in accelerator.state.deepspeed_plugin.deepspeed_config ): _a = get_linear_schedule_with_warmup( optimizer=UpperCamelCase , num_warmup_steps=0 , num_training_steps=UpperCamelCase , ) else: _a = DummyScheduler(UpperCamelCase , total_num_steps=UpperCamelCase , warmup_num_steps=0 ) # Prepare everything # There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the # prepare method. _a , _a , _a , _a , _a = accelerator.prepare( UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase ) # We need to keep track of how many total steps we have iterated over _a = 0 # We also need to keep track of the stating epoch so files are named properly _a = 0 # Now we train the model _a = evaluate.load('''glue''' , '''mrpc''' ) _a = 0 _a = {} for epoch in range(UpperCamelCase , UpperCamelCase ): model.train() for step, batch in enumerate(UpperCamelCase ): _a = model(**UpperCamelCase ) _a = outputs.loss _a = loss / gradient_accumulation_steps accelerator.backward(UpperCamelCase ) if step % gradient_accumulation_steps == 0: optimizer.step() lr_scheduler.step() optimizer.zero_grad() overall_step += 1 model.eval() _a = 0 for step, batch in enumerate(UpperCamelCase ): # We could avoid this line since we set the accelerator with `device_placement=True`. batch.to(accelerator.device ) with torch.no_grad(): _a = model(**UpperCamelCase ) _a = outputs.logits.argmax(dim=-1 ) # It is slightly faster to call this once, than multiple times _a , _a = accelerator.gather( (predictions, batch['''labels''']) ) # If we are in a multiprocess environment, the last batch has duplicates if accelerator.use_distributed: if step == len(UpperCamelCase ) - 1: _a = predictions[: len(eval_dataloader.dataset ) - samples_seen] _a = references[: len(eval_dataloader.dataset ) - samples_seen] else: samples_seen += references.shape[0] metric.add_batch( predictions=UpperCamelCase , references=UpperCamelCase , ) _a = metric.compute() # Use accelerator.print to print only on the main process. accelerator.print(f'epoch {epoch}:' , UpperCamelCase ) _a = eval_metric['''accuracy'''] if best_performance < eval_metric["accuracy"]: _a = eval_metric['''accuracy'''] if args.performance_lower_bound is not None: assert ( args.performance_lower_bound <= best_performance ), f'Best performance metric {best_performance} is lower than the lower bound {args.performance_lower_bound}' accelerator.wait_for_everyone() if accelerator.is_main_process: with open(os.path.join(args.output_dir , '''all_results.json''' ) , '''w''' ) as f: json.dump(UpperCamelCase , UpperCamelCase ) def snake_case_ (): '''simple docstring''' _a = argparse.ArgumentParser(description='''Simple example of training script tracking peak GPU memory usage.''' ) parser.add_argument( '''--model_name_or_path''' , type=UpperCamelCase , default='''bert-base-cased''' , help='''Path to pretrained model or model identifier from huggingface.co/models.''' , required=UpperCamelCase , ) parser.add_argument( '''--output_dir''' , type=UpperCamelCase , default='''.''' , help='''Optional save directory where all checkpoint folders will be stored. Default is the current working directory.''' , ) parser.add_argument( '''--performance_lower_bound''' , type=UpperCamelCase , default=UpperCamelCase , help='''Optional lower bound for the performance metric. If set, the training will throw error when the performance metric drops below this value.''' , ) parser.add_argument( '''--num_epochs''' , type=UpperCamelCase , default=3 , help='''Number of train epochs.''' , ) _a = parser.parse_args() _a = {'''lr''': 2e-5, '''num_epochs''': args.num_epochs, '''seed''': 42, '''batch_size''': 16} training_function(UpperCamelCase , UpperCamelCase ) if __name__ == "__main__": main()
22
import math import os import re import sys import unittest from pathlib import Path from typing import Tuple from unittest.mock import patch from parameterized import parameterized from transformers.testing_utils import ( CaptureStderr, ExtendSysPath, TestCasePlus, execute_subprocess_async, get_gpu_count, get_torch_dist_unique_port, require_apex, require_bitsandbytes, require_fairscale, require_torch, require_torch_gpu, require_torch_multi_gpu, require_torch_non_multi_gpu, slow, ) from transformers.trainer_callback import TrainerState from transformers.trainer_utils import set_seed SCREAMING_SNAKE_CASE__ : List[Any] = os.path.abspath(os.path.dirname(__file__)) with ExtendSysPath(f"{bindir}/../../examples/pytorch/translation"): from run_translation import main # noqa set_seed(42) SCREAMING_SNAKE_CASE__ : Optional[Any] = """sshleifer/student_marian_en_ro_6_1""" SCREAMING_SNAKE_CASE__ : List[Any] = """sshleifer/tiny-mbart""" @require_torch class lowerCamelCase_ ( lowerCamelCase ): def A ( self , __lowerCAmelCase=False , __lowerCAmelCase=None , __lowerCAmelCase=True , __lowerCAmelCase=True , __lowerCAmelCase=True , __lowerCAmelCase=True , ): """simple docstring""" __magic_name__ :List[Any] = self.run_trainer( eval_steps=1 , max_len=1_2 , model_name=__lowerCAmelCase , num_train_epochs=1 , distributed=__lowerCAmelCase , extra_args_str=__lowerCAmelCase , predict_with_generate=__lowerCAmelCase , do_train=__lowerCAmelCase , do_eval=__lowerCAmelCase , do_predict=__lowerCAmelCase , ) __magic_name__ :Any = TrainerState.load_from_json(os.path.join(__lowerCAmelCase , '''trainer_state.json''' ) ).log_history if not do_eval: return __magic_name__ :Any = [log for log in logs if '''eval_loss''' in log.keys()] __magic_name__ :str = eval_metrics[0] if predict_with_generate: assert "eval_bleu" in first_step_stats __magic_name__ :Tuple = eval_metrics[-1] assert isinstance(last_step_stats['''eval_bleu'''] , __lowerCAmelCase ) assert not math.isnan(float(last_step_stats['''eval_loss'''] ) ), "eval_loss must not be `nan`" @require_torch_non_multi_gpu def A ( self ): """simple docstring""" self.run_seqaseq_quick() @require_torch_multi_gpu def A ( self ): """simple docstring""" self.run_seqaseq_quick(distributed=__lowerCAmelCase ) @require_torch_multi_gpu def A ( self ): """simple docstring""" self.run_seqaseq_quick(distributed=__lowerCAmelCase ) @unittest.skip('''Requires an update of the env running those tests''' ) @require_torch_multi_gpu @require_fairscale def A ( self ): """simple docstring""" self.run_seqaseq_quick(distributed=__lowerCAmelCase , extra_args_str='''--sharded_ddp simple''' ) @unittest.skip('''Requires an update of the env running those tests''' ) @require_torch_multi_gpu @require_fairscale def A ( self ): """simple docstring""" self.run_seqaseq_quick(distributed=__lowerCAmelCase , extra_args_str='''--sharded_ddp simple --fp16''' ) @unittest.skip('''Requires an update of the env running those tests''' ) @require_torch_multi_gpu @require_fairscale def A ( self ): """simple docstring""" self.run_seqaseq_quick(distributed=__lowerCAmelCase , extra_args_str='''--sharded_ddp zero_dp_2''' , predict_with_generate=__lowerCAmelCase ) @unittest.skip('''Requires an update of the env running those tests''' ) @require_torch_multi_gpu @require_fairscale def A ( self ): """simple docstring""" self.run_seqaseq_quick( distributed=__lowerCAmelCase , extra_args_str='''--sharded_ddp zero_dp_2 --fp16''' , predict_with_generate=__lowerCAmelCase ) @require_apex @require_torch_gpu def A ( self ): """simple docstring""" # XXX: apex breaks the trainer if it's run twice e.g. run_seq2seq.main() from the same # program and it breaks other tests that run from the same pytest worker, therefore until this is # sorted out it must be run only in an external program, that is distributed=True in this # test and only under one or more gpus - if we want cpu will need to make a special test # # specifically to the problem traced it to self.optimizer.step() - if it's run 2nd time via # 2nd main() call it botches the future eval. # self.run_seqaseq_quick(distributed=__lowerCAmelCase , extra_args_str='''--fp16 --fp16_backend=apex''' ) # test 2nd time - was getting eval_loss': nan' # to reproduce the problem set distributed=False self.run_seqaseq_quick(distributed=__lowerCAmelCase , extra_args_str='''--fp16 --fp16_backend=apex''' ) @parameterized.expand(['''base''', '''low''', '''high''', '''mixed'''] ) @require_torch_multi_gpu def A ( self , __lowerCAmelCase ): """simple docstring""" # as each sub-test is slow-ish split into multiple sub-tests to avoid CI timeout __magic_name__ :Any = { # test with the default log_level - should be info and thus log info once '''base''': {'''extra_args_str''': '''''', '''n_matches''': 1}, # test with low log_level and log_level_replica - should be noisy on all processes # now the info string should appear twice on 2 processes '''low''': {'''extra_args_str''': '''--log_level debug --log_level_replica debug''', '''n_matches''': 2}, # test with high log_level and low log_level_replica # now the info string should appear once only on the replica '''high''': {'''extra_args_str''': '''--log_level error --log_level_replica debug''', '''n_matches''': 1}, # test with high log_level and log_level_replica - should be quiet on all processes '''mixed''': {'''extra_args_str''': '''--log_level error --log_level_replica error''', '''n_matches''': 0}, } __magic_name__ :Optional[Any] = experiments[experiment_id] __magic_name__ :List[Any] = {'''distributed''': True, '''predict_with_generate''': False, '''do_eval''': False, '''do_predict''': False} __magic_name__ :Optional[int] = '''Running training''' with CaptureStderr() as cl: self.run_seqaseq_quick(**__lowerCAmelCase , extra_args_str=data['''extra_args_str'''] ) __magic_name__ :int = len(re.findall(__lowerCAmelCase , cl.err ) ) self.assertEqual(__lowerCAmelCase , data['''n_matches'''] ) @slow def A ( self ): """simple docstring""" __magic_name__ :List[str] = self.run_trainer( eval_steps=2 , max_len=1_2_8 , model_name=__lowerCAmelCase , learning_rate=3E-4 , num_train_epochs=1_0 , distributed=__lowerCAmelCase , ) # Check metrics __magic_name__ :Optional[int] = TrainerState.load_from_json(os.path.join(__lowerCAmelCase , '''trainer_state.json''' ) ).log_history __magic_name__ :List[str] = [log for log in logs if '''eval_loss''' in log.keys()] __magic_name__ :Any = eval_metrics[0] __magic_name__ :int = eval_metrics[-1] assert first_step_stats["eval_loss"] > last_step_stats["eval_loss"], "model learned nothing" assert isinstance(last_step_stats['''eval_bleu'''] , __lowerCAmelCase ) # test if do_predict saves generations and metrics __magic_name__ :List[Any] = os.listdir(__lowerCAmelCase ) __magic_name__ :List[str] = {os.path.basename(__lowerCAmelCase ) for p in contents} assert "generated_predictions.txt" in contents assert "predict_results.json" in contents @slow @require_bitsandbytes def A ( self ): """simple docstring""" from transformers.training_args import OptimizerNames def train_and_return_metrics(__lowerCAmelCase ) -> Tuple[int, float]: __magic_name__ :str = '''--skip_memory_metrics 0''' __magic_name__ :Dict = self.run_trainer( max_len=1_2_8 , model_name=__lowerCAmelCase , learning_rate=3E-4 , num_train_epochs=1 , optim=__lowerCAmelCase , distributed=__lowerCAmelCase , extra_args_str=__lowerCAmelCase , do_eval=__lowerCAmelCase , do_predict=__lowerCAmelCase , n_gpus_to_use=1 , ) # Check metrics __magic_name__ :Optional[Any] = TrainerState.load_from_json(Path(__lowerCAmelCase , '''trainer_state.json''' ) ).log_history __magic_name__ :int = int(logs[0]['''train_mem_gpu_peaked_delta'''] / 2**2_0 ) __magic_name__ :Optional[Any] = int(logs[0]['''train_mem_gpu_alloc_delta'''] / 2**2_0 ) __magic_name__ :Any = logs[0]['''train_loss'''] return gpu_peak_mem_mb, gpu_alloc_mem_mb, loss __magic_name__ , __magic_name__ , __magic_name__ :int = train_and_return_metrics(OptimizerNames.ADAMW_TORCH.value ) __magic_name__ , __magic_name__ , __magic_name__ :Tuple = train_and_return_metrics(OptimizerNames.ADAMW_BNB.value ) __magic_name__ :Tuple = gpu_alloc_mem_orig - gpu_alloc_mem_bnb __magic_name__ :Tuple = gpu_peak_mem_orig + gpu_alloc_mem_orig __magic_name__ :List[Any] = gpu_peak_mem_bnb + gpu_alloc_mem_bnb __magic_name__ :Optional[int] = gpu_total_mem_orig - gpu_total_mem_bnb # sshleifer/student_marian_en_ro_6_1 has 54M parameter, 29M of which is `nn.Embedding` which # doesn't get quantized and remains in fp32. Therefore we only have 25M parameters quantized # in 2 bytes and the diff in optim memory usage is derived as so: # # - normal 25*8=~200MB (8 bytes per param) # - bnb 25*2= ~50MB (2 bytes per param) # # Thus we should expect ~150MB total memory saved. # # Peak memory should be the same - the total should be different by about that same margin # # After leaving a small margin to accommodate for differences between gpus let's check # that we have at least 120MB in savings __magic_name__ :Optional[Any] = 1_2_0 # uncomment the following if this test starts failing - requires py38 for a new print feature # gpu_peak_mem_diff = gpu_peak_mem_orig - gpu_peak_mem_bnb # print(f"{gpu_alloc_mem_orig=}MB {gpu_peak_mem_orig=}MB {gpu_alloc_mem_orig+gpu_peak_mem_orig=}MB") # print(f" {gpu_alloc_mem_bnb=}MB {gpu_peak_mem_bnb=}MB {gpu_alloc_mem_bnb+gpu_peak_mem_bnb=}MB") # print(f"{gpu_alloc_mem_diff=}MB") # print(f"{gpu_peak_mem_diff=}MB") # print(f"{gpu_total_mem_orig=}MB, {gpu_total_mem_bnb=}MB") # print(f"{gpu_total_mem_diff=}MB, {gpu_total_mem_diff=}MB") self.assertGreater( __lowerCAmelCase , __lowerCAmelCase , '''should use ~150MB less alloc gpu memory with BNB, compared to without it for this model but got''' F''' a difference of {gpu_alloc_mem_diff}MB, with gpu_alloc_mem_orig={gpu_alloc_mem_orig}MB and''' F''' gpu_alloc_mem_bnb={gpu_alloc_mem_bnb}MB''' , ) self.assertGreater( __lowerCAmelCase , __lowerCAmelCase , '''should use ~150MB less total gpu memory with BNB, compared to without it for this model but got''' F''' a difference of {gpu_total_mem_diff}MB, with gpu_total_mem_orig={gpu_total_mem_orig}MB and''' F''' gpu_total_mem_bnb={gpu_total_mem_bnb}MB''' , ) self.assertEqual( __lowerCAmelCase , __lowerCAmelCase , F'''loss should be the same, but got loss_orig={loss_orig}, loss_bnb={loss_bnb}''' ) def A ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = 3E-3 , __lowerCAmelCase = "adafactor" , __lowerCAmelCase = False , __lowerCAmelCase = None , __lowerCAmelCase = 0 , __lowerCAmelCase = True , __lowerCAmelCase = True , __lowerCAmelCase = True , __lowerCAmelCase = True , __lowerCAmelCase = None , ): """simple docstring""" __magic_name__ :int = self.test_file_dir / '''../fixtures/tests_samples/wmt_en_ro''' __magic_name__ :Dict = self.get_auto_remove_tmp_dir() __magic_name__ :Tuple = F''' --model_name_or_path {model_name} --train_file {data_dir}/train.json --validation_file {data_dir}/val.json --test_file {data_dir}/test.json --output_dir {output_dir} --overwrite_output_dir --max_train_samples 8 --max_source_length {max_len} --max_target_length {max_len} --do_train --num_train_epochs {str(__lowerCAmelCase )} --per_device_train_batch_size 4 --learning_rate {learning_rate} --warmup_steps 8 --logging_steps 0 --logging_strategy no --save_steps {str(__lowerCAmelCase )} --group_by_length --label_smoothing_factor 0.1 --target_lang ro_RO --source_lang en_XX '''.split() __magic_name__ :str = F''' --do_eval --per_device_eval_batch_size 4 --max_eval_samples 8 --val_max_target_length {max_len} --evaluation_strategy steps --eval_steps {str(__lowerCAmelCase )} '''.split() __magic_name__ :Dict = ''' --do_predict '''.split() __magic_name__ :Optional[int] = [] if do_train: args += args_train if do_eval: args += args_eval if do_predict: args += args_predict if predict_with_generate: args += "--predict_with_generate".split() if do_train: if optim == "adafactor": args += "--adafactor".split() else: args += F'''--optim {optim}'''.split() if extra_args_str is not None: args += extra_args_str.split() if distributed: if n_gpus_to_use is None: __magic_name__ :List[Any] = get_gpu_count() __magic_name__ :Tuple = get_torch_dist_unique_port() __magic_name__ :Union[str, Any] = F''' -m torch.distributed.run --nproc_per_node={n_gpus_to_use} --master_port={master_port} {self.examples_dir_str}/pytorch/translation/run_translation.py '''.split() __magic_name__ :Any = [sys.executable] + distributed_args + args # keep for quick debug # print(" ".join([f"\nPYTHONPATH={self.src_dir_str}"] +cmd)); die execute_subprocess_async(__lowerCAmelCase , env=self.get_env() ) else: __magic_name__ :List[Any] = ['''run_translation.py'''] + args with patch.object(__lowerCAmelCase , '''argv''' , __lowerCAmelCase ): main() return output_dir
0
0
from ...configuration_utils import PretrainedConfig class _a ( UpperCAmelCase__ ): """simple docstring""" A_ = """bert-generation""" def __init__( self , _UpperCAmelCase=50358 , _UpperCAmelCase=1024 , _UpperCAmelCase=24 , _UpperCAmelCase=16 , _UpperCAmelCase=4096 , _UpperCAmelCase="gelu" , _UpperCAmelCase=0.1 , _UpperCAmelCase=0.1 , _UpperCAmelCase=512 , _UpperCAmelCase=0.0_2 , _UpperCAmelCase=1e-12 , _UpperCAmelCase=0 , _UpperCAmelCase=2 , _UpperCAmelCase=1 , _UpperCAmelCase="absolute" , _UpperCAmelCase=True , **_UpperCAmelCase , ) -> Optional[int]: super().__init__(pad_token_id=_UpperCAmelCase , bos_token_id=_UpperCAmelCase , eos_token_id=_UpperCAmelCase , **_UpperCAmelCase ) UpperCamelCase_ = vocab_size UpperCamelCase_ = hidden_size UpperCamelCase_ = num_hidden_layers UpperCamelCase_ = num_attention_heads UpperCamelCase_ = hidden_act UpperCamelCase_ = intermediate_size UpperCamelCase_ = hidden_dropout_prob UpperCamelCase_ = attention_probs_dropout_prob UpperCamelCase_ = max_position_embeddings UpperCamelCase_ = initializer_range UpperCamelCase_ = layer_norm_eps UpperCamelCase_ = position_embedding_type UpperCamelCase_ = use_cache
23
import sys SCREAMING_SNAKE_CASE__ : Optional[Any] = ( """73167176531330624919225119674426574742355349194934""" """96983520312774506326239578318016984801869478851843""" """85861560789112949495459501737958331952853208805511""" """12540698747158523863050715693290963295227443043557""" """66896648950445244523161731856403098711121722383113""" """62229893423380308135336276614282806444486645238749""" """30358907296290491560440772390713810515859307960866""" """70172427121883998797908792274921901699720888093776""" """65727333001053367881220235421809751254540594752243""" """52584907711670556013604839586446706324415722155397""" """53697817977846174064955149290862569321978468622482""" """83972241375657056057490261407972968652414535100474""" """82166370484403199890008895243450658541227588666881""" """16427171479924442928230863465674813919123162824586""" """17866458359124566529476545682848912883142607690042""" """24219022671055626321111109370544217506941658960408""" """07198403850962455444362981230987879927244284909188""" """84580156166097919133875499200524063689912560717606""" """05886116467109405077541002256983155200055935729725""" """71636269561882670428252483600823257530420752963450""" ) def __lowercase ( snake_case = N ): """simple docstring""" __magic_name__ :Optional[int] = -sys.maxsize - 1 for i in range(len(snake_case ) - 1_2 ): __magic_name__ :List[Any] = 1 for j in range(1_3 ): product *= int(n[i + j] ) if product > largest_product: __magic_name__ :str = product return largest_product if __name__ == "__main__": print(f"{solution() = }")
0
0
'''simple docstring''' import os import re import shutil from argparse import ArgumentParser, Namespace from datasets.commands import BaseDatasetsCLICommand from datasets.utils.logging import get_logger UpperCAmelCase_ : Tuple = '''<<<<<<< This should probably be modified because it mentions: ''' UpperCAmelCase_ : Optional[int] = '''======= >>>>>>> ''' UpperCAmelCase_ : Tuple = [ '''TextEncoderConfig''', '''ByteTextEncoder''', '''SubwordTextEncoder''', '''encoder_config''', '''maybe_build_from_corpus''', '''manual_dir''', ] UpperCAmelCase_ : Optional[int] = [ # (pattern, replacement) # Order is important here for some replacements (R'''tfds\.core''', R'''datasets'''), (R'''tf\.io\.gfile\.GFile''', R'''open'''), (R'''tf\.([\w\d]+)''', R'''datasets.Value(\'\1\')'''), (R'''tfds\.features\.Text\(\)''', R'''datasets.Value(\'string\')'''), (R'''tfds\.features\.Text\(''', R'''datasets.Value(\'string\'),'''), (R'''features\s*=\s*tfds.features.FeaturesDict\(''', R'''features=datasets.Features('''), (R'''tfds\.features\.FeaturesDict\(''', R'''dict('''), (R'''The TensorFlow Datasets Authors''', R'''The TensorFlow Datasets Authors and the HuggingFace Datasets Authors'''), (R'''tfds\.''', R'''datasets.'''), (R'''dl_manager\.manual_dir''', R'''self.config.data_dir'''), (R'''self\.builder_config''', R'''self.config'''), ] def _UpperCamelCase (_lowerCamelCase : Namespace )-> int: '''simple docstring''' return ConvertCommand(args.tfds_path , args.datasets_directory ) class lowerCAmelCase ( __lowerCAmelCase): @staticmethod def lowerCAmelCase ( __SCREAMING_SNAKE_CASE ) -> Optional[Any]: '''simple docstring''' __snake_case = parser.add_parser( '''convert''' , help='''Convert a TensorFlow Datasets dataset to a HuggingFace Datasets dataset.''' , ) train_parser.add_argument( '''--tfds_path''' , type=__SCREAMING_SNAKE_CASE , required=__SCREAMING_SNAKE_CASE , help='''Path to a TensorFlow Datasets folder to convert or a single tfds file to convert.''' , ) train_parser.add_argument( '''--datasets_directory''' , type=__SCREAMING_SNAKE_CASE , required=__SCREAMING_SNAKE_CASE , help='''Path to the HuggingFace Datasets folder.''' ) train_parser.set_defaults(func=__SCREAMING_SNAKE_CASE ) def __init__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , *__SCREAMING_SNAKE_CASE ) -> Dict: '''simple docstring''' __snake_case = get_logger('''datasets-cli/converting''' ) __snake_case = tfds_path __snake_case = datasets_directory def lowerCAmelCase ( self ) -> int: '''simple docstring''' if os.path.isdir(self._tfds_path ): __snake_case = os.path.abspath(self._tfds_path ) elif os.path.isfile(self._tfds_path ): __snake_case = os.path.dirname(self._tfds_path ) else: raise ValueError('''--tfds_path is neither a directory nor a file. Please check path.''' ) __snake_case = os.path.abspath(self._datasets_directory ) self._logger.info(F'''Converting datasets from {abs_tfds_path} to {abs_datasets_path}''' ) __snake_case = [] __snake_case = [] __snake_case = {} if os.path.isdir(self._tfds_path ): __snake_case = os.listdir(__SCREAMING_SNAKE_CASE ) else: __snake_case = [os.path.basename(self._tfds_path )] for f_name in file_names: self._logger.info(F'''Looking at file {f_name}''' ) __snake_case = os.path.join(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) __snake_case = os.path.join(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) if not os.path.isfile(__SCREAMING_SNAKE_CASE ) or "__init__" in f_name or "_test" in f_name or ".py" not in f_name: self._logger.info('''Skipping file''' ) continue with open(__SCREAMING_SNAKE_CASE , encoding='''utf-8''' ) as f: __snake_case = f.readlines() __snake_case = [] __snake_case = False __snake_case = False __snake_case = [] for line in lines: __snake_case = line # Convert imports if "import tensorflow.compat.v2 as tf" in out_line: continue elif "@tfds.core" in out_line: continue elif "builder=self" in out_line: continue elif "import tensorflow_datasets.public_api as tfds" in out_line: __snake_case = '''import datasets\n''' elif "import tensorflow" in out_line: # order is important here __snake_case = '''''' continue elif "from absl import logging" in out_line: __snake_case = '''from datasets import logging\n''' elif "getLogger" in out_line: __snake_case = out_line.replace('''getLogger''' , '''get_logger''' ) elif any(expression in out_line for expression in TO_HIGHLIGHT ): __snake_case = True __snake_case = list(filter(lambda __SCREAMING_SNAKE_CASE : e in out_line , __SCREAMING_SNAKE_CASE ) ) out_lines.append(HIGHLIGHT_MESSAGE_PRE + str(__SCREAMING_SNAKE_CASE ) + '''\n''' ) out_lines.append(__SCREAMING_SNAKE_CASE ) out_lines.append(__SCREAMING_SNAKE_CASE ) continue else: for pattern, replacement in TO_CONVERT: __snake_case = re.sub(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) # Take care of saving utilities (to later move them together with main script) if "tensorflow_datasets" in out_line: __snake_case = re.match(r'''from\stensorflow_datasets.*import\s([^\.\r\n]+)''' , __SCREAMING_SNAKE_CASE ) tfds_imports.extend(imp.strip() for imp in match.group(1 ).split(''',''' ) ) __snake_case = '''from . import ''' + match.group(1 ) # Check we have not forget anything if "tf." in out_line or "tfds." in out_line or "tensorflow_datasets" in out_line: raise ValueError(F'''Error converting {out_line.strip()}''' ) if "GeneratorBasedBuilder" in out_line or "BeamBasedBuilder" in out_line: __snake_case = True out_lines.append(__SCREAMING_SNAKE_CASE ) if is_builder or "wmt" in f_name: # We create a new directory for each dataset __snake_case = f_name.replace('''.py''' , '''''' ) __snake_case = os.path.join(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) __snake_case = os.path.join(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) os.makedirs(__SCREAMING_SNAKE_CASE , exist_ok=__SCREAMING_SNAKE_CASE ) self._logger.info(F'''Adding directory {output_dir}''' ) imports_to_builder_map.update({imp: output_dir for imp in tfds_imports} ) else: # Utilities will be moved at the end utils_files.append(__SCREAMING_SNAKE_CASE ) if needs_manual_update: with_manual_update.append(__SCREAMING_SNAKE_CASE ) with open(__SCREAMING_SNAKE_CASE , '''w''' , encoding='''utf-8''' ) as f: f.writelines(__SCREAMING_SNAKE_CASE ) self._logger.info(F'''Converted in {output_file}''' ) for utils_file in utils_files: try: __snake_case = os.path.basename(__SCREAMING_SNAKE_CASE ) __snake_case = imports_to_builder_map[f_name.replace('''.py''' , '''''' )] self._logger.info(F'''Moving {dest_folder} to {utils_file}''' ) shutil.copy(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) except KeyError: self._logger.error(F'''Cannot find destination folder for {utils_file}. Please copy manually.''' ) if with_manual_update: for file_path in with_manual_update: self._logger.warning( F'''You need to manually update file {file_path} to remove configurations using \'TextEncoderConfig\'.''' )
24
SCREAMING_SNAKE_CASE__ : Tuple = { """a""": """AAAAA""", """b""": """AAAAB""", """c""": """AAABA""", """d""": """AAABB""", """e""": """AABAA""", """f""": """AABAB""", """g""": """AABBA""", """h""": """AABBB""", """i""": """ABAAA""", """j""": """BBBAA""", """k""": """ABAAB""", """l""": """ABABA""", """m""": """ABABB""", """n""": """ABBAA""", """o""": """ABBAB""", """p""": """ABBBA""", """q""": """ABBBB""", """r""": """BAAAA""", """s""": """BAAAB""", """t""": """BAABA""", """u""": """BAABB""", """v""": """BBBAB""", """w""": """BABAA""", """x""": """BABAB""", """y""": """BABBA""", """z""": """BABBB""", """ """: """ """, } SCREAMING_SNAKE_CASE__ : Union[str, Any] = {value: key for key, value in encode_dict.items()} def __lowercase ( snake_case ): """simple docstring""" __magic_name__ :Tuple = '''''' for letter in word.lower(): if letter.isalpha() or letter == " ": encoded += encode_dict[letter] else: raise Exception('''encode() accepts only letters of the alphabet and spaces''' ) return encoded def __lowercase ( snake_case ): """simple docstring""" if set(snake_case ) - {"A", "B", " "} != set(): raise Exception('''decode() accepts only \'A\', \'B\' and spaces''' ) __magic_name__ :Dict = '''''' for word in coded.split(): while len(snake_case ) != 0: decoded += decode_dict[word[:5]] __magic_name__ :int = word[5:] decoded += " " return decoded.strip() if __name__ == "__main__": from doctest import testmod testmod()
0
0
# Note: if you intend to run this script make sure you look under scripts/fsmt/ # to locate the appropriate script to do the work correctly. There is a set of scripts to: # - download and prepare data and run the conversion script # - perform eval to get the best hparam into the config # - generate model_cards - useful if you have multiple models from the same paper import argparse import json import os import re from collections import OrderedDict from os.path import basename, dirname import fairseq import torch from fairseq import hub_utils from fairseq.data.dictionary import Dictionary from transformers import FSMTConfig, FSMTForConditionalGeneration from transformers.models.fsmt.tokenization_fsmt import VOCAB_FILES_NAMES from transformers.tokenization_utils_base import TOKENIZER_CONFIG_FILE from transformers.utils import WEIGHTS_NAME, logging logging.set_verbosity_warning() a_ = 2 # based on the results of a search on a range of `num_beams`, `length_penalty` and `early_stopping` # values against wmt19 test data to obtain the best BLEU scores, we will use the following defaults: # # * `num_beams`: 5 (higher scores better, but requires more memory/is slower, can be adjusted by users) # * `early_stopping`: `False` consistently scored better # * `length_penalty` varied, so will assign the best one depending on the model a_ = { # fairseq: 'wmt19-ru-en': {'length_penalty': 1.1}, 'wmt19-en-ru': {'length_penalty': 1.15}, 'wmt19-en-de': {'length_penalty': 1.0}, 'wmt19-de-en': {'length_penalty': 1.1}, # allenai: 'wmt16-en-de-dist-12-1': {'length_penalty': 0.6}, 'wmt16-en-de-dist-6-1': {'length_penalty': 0.6}, 'wmt16-en-de-12-1': {'length_penalty': 0.8}, 'wmt19-de-en-6-6-base': {'length_penalty': 0.6}, 'wmt19-de-en-6-6-big': {'length_penalty': 0.6}, } # this remaps the different models to their organization names a_ = {} for m in ["wmt19-ru-en", "wmt19-en-ru", "wmt19-en-de", "wmt19-de-en"]: a_ = 'facebook' for m in [ "wmt16-en-de-dist-12-1", "wmt16-en-de-dist-6-1", "wmt16-en-de-12-1", "wmt19-de-en-6-6-base", "wmt19-de-en-6-6-big", ]: a_ = 'allenai' def lowerCamelCase__ ( _a): # (1) remove word breaking symbol, (2) add word ending symbol where the word is not broken up, # e.g.: d = {'le@@': 5, 'tt@@': 6, 'er': 7} => {'le': 5, 'tt': 6, 'er</w>': 7} SCREAMING_SNAKE_CASE : Union[str, Any] = dict((re.sub(r"@@$" , "" , _a), v) if k.endswith("@@") else (re.sub(r"$" , "</w>" , _a), v) for k, v in d.items()) SCREAMING_SNAKE_CASE : Optional[int] = "<s> <pad> </s> <unk>".split() # restore the special tokens for k in keep_keys: del da[f"{k}</w>"] SCREAMING_SNAKE_CASE : Optional[int] = d[k] # restore return da def lowerCamelCase__ ( _a , _a): # prep assert os.path.exists(_a) os.makedirs(_a , exist_ok=_a) print(f"Writing results to {pytorch_dump_folder_path}") # handle various types of models SCREAMING_SNAKE_CASE : Dict = basename(_a) SCREAMING_SNAKE_CASE : Union[str, Any] = dirname(_a) SCREAMING_SNAKE_CASE : Optional[int] = fairseq.model_parallel.models.transformer.ModelParallelTransformerModel SCREAMING_SNAKE_CASE : int = cls.hub_models() SCREAMING_SNAKE_CASE : List[Any] = {"bpe": "fastbpe", "tokenizer": "moses"} SCREAMING_SNAKE_CASE : List[Any] = "." # note: since the model dump is old, fairseq has upgraded its model some # time later, and it does a whole lot of rewrites and splits on the saved # weights, therefore we can't use torch.load() directly on the model file. # see: upgrade_state_dict(state_dict) in fairseq_model.py print(f"using checkpoint {checkpoint_file}") SCREAMING_SNAKE_CASE : Dict = hub_utils.from_pretrained( _a , _a , _a , archive_map=_a , **_a) SCREAMING_SNAKE_CASE : int = vars(chkpt["args"]["model"]) SCREAMING_SNAKE_CASE : Union[str, Any] = args["source_lang"] SCREAMING_SNAKE_CASE : List[Any] = args["target_lang"] SCREAMING_SNAKE_CASE : Any = dirname(_a) SCREAMING_SNAKE_CASE : Optional[Any] = basename(_a) # dicts SCREAMING_SNAKE_CASE : Tuple = os.path.join(_a , f"dict.{src_lang}.txt") SCREAMING_SNAKE_CASE : Dict = os.path.join(_a , f"dict.{tgt_lang}.txt") SCREAMING_SNAKE_CASE : Dict = Dictionary.load(_a) SCREAMING_SNAKE_CASE : str = rewrite_dict_keys(src_dict.indices) SCREAMING_SNAKE_CASE : int = len(_a) SCREAMING_SNAKE_CASE : Any = os.path.join(_a , "vocab-src.json") print(f"Generating {src_vocab_file} of {src_vocab_size} of {src_lang} records") with open(_a , "w" , encoding="utf-8") as f: f.write(json.dumps(_a , ensure_ascii=_a , indent=_a)) # detect whether this is a do_lower_case situation, which can be derived by checking whether we # have at least one uppercase letter in the source vocab SCREAMING_SNAKE_CASE : Tuple = True for k in src_vocab.keys(): if not k.islower(): SCREAMING_SNAKE_CASE : Dict = False break SCREAMING_SNAKE_CASE : List[Any] = Dictionary.load(_a) SCREAMING_SNAKE_CASE : Optional[int] = rewrite_dict_keys(tgt_dict.indices) SCREAMING_SNAKE_CASE : Union[str, Any] = len(_a) SCREAMING_SNAKE_CASE : int = os.path.join(_a , "vocab-tgt.json") print(f"Generating {tgt_vocab_file} of {tgt_vocab_size} of {tgt_lang} records") with open(_a , "w" , encoding="utf-8") as f: f.write(json.dumps(_a , ensure_ascii=_a , indent=_a)) # merges_file (bpecodes) SCREAMING_SNAKE_CASE : Union[str, Any] = os.path.join(_a , VOCAB_FILES_NAMES["merges_file"]) for fn in ["bpecodes", "code"]: # older fairseq called the merges file "code" SCREAMING_SNAKE_CASE : Optional[Any] = os.path.join(_a , _a) if os.path.exists(_a): break with open(_a , encoding="utf-8") as fin: SCREAMING_SNAKE_CASE : Optional[Any] = fin.read() SCREAMING_SNAKE_CASE : Dict = re.sub(r" \d+$" , "" , _a , 0 , re.M) # remove frequency number print(f"Generating {merges_file}") with open(_a , "w" , encoding="utf-8") as fout: fout.write(_a) # model config SCREAMING_SNAKE_CASE : Dict = os.path.join(_a , "config.json") # validate bpe/tokenizer config, as currently it's hardcoded to moses+fastbpe - # may have to modify the tokenizer if a different type is used by a future model assert args["bpe"] == "fastbpe", f"need to extend tokenizer to support bpe={args['bpe']}" assert args["tokenizer"] == "moses", f"need to extend tokenizer to support bpe={args['tokenizer']}" SCREAMING_SNAKE_CASE : int = { "architectures": ["FSMTForConditionalGeneration"], "model_type": "fsmt", "activation_dropout": args["activation_dropout"], "activation_function": "relu", "attention_dropout": args["attention_dropout"], "d_model": args["decoder_embed_dim"], "dropout": args["dropout"], "init_std": 0.02, "max_position_embeddings": args["max_source_positions"], "num_hidden_layers": args["encoder_layers"], "src_vocab_size": src_vocab_size, "tgt_vocab_size": tgt_vocab_size, "langs": [src_lang, tgt_lang], "encoder_attention_heads": args["encoder_attention_heads"], "encoder_ffn_dim": args["encoder_ffn_embed_dim"], "encoder_layerdrop": args["encoder_layerdrop"], "encoder_layers": args["encoder_layers"], "decoder_attention_heads": args["decoder_attention_heads"], "decoder_ffn_dim": args["decoder_ffn_embed_dim"], "decoder_layerdrop": args["decoder_layerdrop"], "decoder_layers": args["decoder_layers"], "bos_token_id": 0, "pad_token_id": 1, "eos_token_id": 2, "is_encoder_decoder": True, "scale_embedding": not args["no_scale_embedding"], "tie_word_embeddings": args["share_all_embeddings"], } # good hparam defaults to start with SCREAMING_SNAKE_CASE : List[Any] = 5 SCREAMING_SNAKE_CASE : List[str] = False if model_dir in best_score_hparams and "length_penalty" in best_score_hparams[model_dir]: SCREAMING_SNAKE_CASE : Tuple = best_score_hparams[model_dir]["length_penalty"] else: SCREAMING_SNAKE_CASE : int = 1.0 print(f"Generating {fsmt_model_config_file}") with open(_a , "w" , encoding="utf-8") as f: f.write(json.dumps(_a , ensure_ascii=_a , indent=_a)) # tokenizer config SCREAMING_SNAKE_CASE : int = os.path.join(_a , _a) SCREAMING_SNAKE_CASE : Dict = { "langs": [src_lang, tgt_lang], "model_max_length": 1024, "do_lower_case": do_lower_case, } print(f"Generating {fsmt_tokenizer_config_file}") with open(_a , "w" , encoding="utf-8") as f: f.write(json.dumps(_a , ensure_ascii=_a , indent=_a)) # model SCREAMING_SNAKE_CASE : Tuple = chkpt["models"][0] SCREAMING_SNAKE_CASE : List[Any] = model.state_dict() # rename keys to start with 'model.' SCREAMING_SNAKE_CASE : Any = OrderedDict(("model." + k, v) for k, v in model_state_dict.items()) # remove unneeded keys SCREAMING_SNAKE_CASE : Optional[int] = [ "model.model", "model.encoder.version", "model.decoder.version", "model.encoder_embed_tokens.weight", "model.decoder_embed_tokens.weight", "model.encoder.embed_positions._float_tensor", "model.decoder.embed_positions._float_tensor", ] for k in ignore_keys: model_state_dict.pop(_a , _a) SCREAMING_SNAKE_CASE : Any = FSMTConfig.from_pretrained(_a) SCREAMING_SNAKE_CASE : int = FSMTForConditionalGeneration(_a) # check that it loads ok model_new.load_state_dict(_a , strict=_a) # save SCREAMING_SNAKE_CASE : Tuple = os.path.join(_a , _a) print(f"Generating {pytorch_weights_dump_path}") torch.save(_a , _a) print("Conversion is done!") print("\nLast step is to upload the files to s3") print(f"cd {data_root}") print(f"transformers-cli upload {model_dir}") if __name__ == "__main__": a_ = argparse.ArgumentParser() # Required parameters parser.add_argument( '--fsmt_checkpoint_path', default=None, type=str, required=True, help=( 'Path to the official PyTorch checkpoint file which is expected to reside in the dump dir with dicts,' ' bpecodes, etc.' ), ) parser.add_argument( '--pytorch_dump_folder_path', default=None, type=str, required=True, help='Path to the output PyTorch model.' ) a_ = parser.parse_args() convert_fsmt_checkpoint_to_pytorch(args.fsmt_checkpoint_path, args.pytorch_dump_folder_path)
25
import argparse import torch from torch import nn from transformers import MaMaaaConfig, MaMaaaForConditionalGeneration def __lowercase ( snake_case ): """simple docstring""" __magic_name__ :Optional[Any] = [ '''encoder.version''', '''decoder.version''', '''model.encoder.version''', '''model.decoder.version''', '''decoder.output_projection.weight''', '''_float_tensor''', '''encoder.embed_positions._float_tensor''', '''decoder.embed_positions._float_tensor''', ] for k in ignore_keys: state_dict.pop(snake_case, snake_case ) def __lowercase ( snake_case ): """simple docstring""" __magic_name__ , __magic_name__ :Tuple = emb.weight.shape __magic_name__ :int = nn.Linear(snake_case, snake_case, bias=snake_case ) __magic_name__ :str = emb.weight.data return lin_layer def __lowercase ( snake_case ): """simple docstring""" __magic_name__ :int = torch.load(snake_case, map_location='''cpu''' ) __magic_name__ :Optional[Any] = mam_aaa['''args'''] or mam_aaa['''cfg''']['''model'''] __magic_name__ :List[Any] = mam_aaa['''model'''] remove_ignore_keys_(snake_case ) __magic_name__ :Tuple = state_dict['''encoder.embed_tokens.weight'''].shape[0] __magic_name__ :List[str] = MaMaaaConfig( vocab_size=snake_case, max_position_embeddings=1_0_2_4, encoder_layers=args.encoder_layers, decoder_layers=args.decoder_layers, encoder_attention_heads=args.encoder_attention_heads, decoder_attention_heads=args.decoder_attention_heads, encoder_ffn_dim=args.encoder_ffn_embed_dim, decoder_ffn_dim=args.decoder_ffn_embed_dim, d_model=args.encoder_embed_dim, encoder_layerdrop=args.encoder_layerdrop, decoder_layerdrop=args.decoder_layerdrop, dropout=args.dropout, attention_dropout=args.attention_dropout, activation_dropout=args.activation_dropout, activation_function='''relu''', ) __magic_name__ :int = state_dict['''decoder.embed_tokens.weight'''] __magic_name__ :List[str] = MaMaaaForConditionalGeneration(snake_case ) model.model.load_state_dict(snake_case, strict=snake_case ) __magic_name__ :List[str] = make_linear_from_emb(model.model.shared ) return model if __name__ == "__main__": SCREAMING_SNAKE_CASE__ : Optional[Any] = argparse.ArgumentParser() # Required parameters parser.add_argument("""fairseq_path""", type=str, help="""path to a model.pt on local filesystem.""") parser.add_argument("""pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""") SCREAMING_SNAKE_CASE__ : int = parser.parse_args() SCREAMING_SNAKE_CASE__ : Any = convert_fairseq_mamaaa_checkpoint_from_disk(args.fairseq_pathß) model.save_pretrained(args.pytorch_dump_folder_path)
0
0
'''simple docstring''' import math import sys def _a ( _lowerCamelCase ) -> str: """simple docstring""" __snake_case : List[str] = """""" try: with open(_lowerCamelCase , """rb""" ) as binary_file: __snake_case : Optional[Any] = binary_file.read() for dat in data: __snake_case : Union[str, Any] = F'''{dat:08b}''' result += curr_byte return result except OSError: print("""File not accessible""" ) sys.exit() def _a ( _lowerCamelCase ) -> str: """simple docstring""" __snake_case : int = {"""0""": """0""", """1""": """1"""} __snake_case , __snake_case : List[str] = """""", """""" __snake_case : Dict = len(_lowerCamelCase ) for i in range(len(_lowerCamelCase ) ): curr_string += data_bits[i] if curr_string not in lexicon: continue __snake_case : str = lexicon[curr_string] result += last_match_id __snake_case : List[str] = last_match_id + """0""" if math.loga(_lowerCamelCase ).is_integer(): __snake_case : Optional[int] = {} for curr_key in list(_lowerCamelCase ): __snake_case : Optional[Any] = lexicon.pop(_lowerCamelCase ) __snake_case : Optional[int] = new_lex __snake_case : List[Any] = last_match_id + """1""" index += 1 __snake_case : List[Any] = """""" return result def _a ( _lowerCamelCase , _lowerCamelCase ) -> None: """simple docstring""" __snake_case : Optional[Any] = 8 try: with open(_lowerCamelCase , """wb""" ) as opened_file: __snake_case : Any = [ to_write[i : i + byte_length] for i in range(0 , len(_lowerCamelCase ) , _lowerCamelCase ) ] if len(result_byte_array[-1] ) % byte_length == 0: result_byte_array.append("""10000000""" ) else: result_byte_array[-1] += "1" + "0" * ( byte_length - len(result_byte_array[-1] ) - 1 ) for elem in result_byte_array[:-1]: opened_file.write(int(_lowerCamelCase , 2 ).to_bytes(1 , byteorder="""big""" ) ) except OSError: print("""File not accessible""" ) sys.exit() def _a ( _lowerCamelCase ) -> str: """simple docstring""" __snake_case : List[Any] = 0 for letter in data_bits: if letter == "1": break counter += 1 __snake_case : Optional[int] = data_bits[counter:] __snake_case : int = data_bits[counter + 1 :] return data_bits def _a ( _lowerCamelCase , _lowerCamelCase ) -> None: """simple docstring""" __snake_case : Union[str, Any] = read_file_binary(_lowerCamelCase ) __snake_case : Optional[int] = remove_prefix(_lowerCamelCase ) __snake_case : int = decompress_data(_lowerCamelCase ) write_file_binary(_lowerCamelCase , _lowerCamelCase ) if __name__ == "__main__": compress(sys.argv[1], sys.argv[2])
26
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available SCREAMING_SNAKE_CASE__ : Dict = { """configuration_canine""": ["""CANINE_PRETRAINED_CONFIG_ARCHIVE_MAP""", """CanineConfig"""], """tokenization_canine""": ["""CanineTokenizer"""], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: SCREAMING_SNAKE_CASE__ : str = [ """CANINE_PRETRAINED_MODEL_ARCHIVE_LIST""", """CanineForMultipleChoice""", """CanineForQuestionAnswering""", """CanineForSequenceClassification""", """CanineForTokenClassification""", """CanineLayer""", """CanineModel""", """CaninePreTrainedModel""", """load_tf_weights_in_canine""", ] if TYPE_CHECKING: from .configuration_canine import CANINE_PRETRAINED_CONFIG_ARCHIVE_MAP, CanineConfig from .tokenization_canine import CanineTokenizer try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_canine import ( CANINE_PRETRAINED_MODEL_ARCHIVE_LIST, CanineForMultipleChoice, CanineForQuestionAnswering, CanineForSequenceClassification, CanineForTokenClassification, CanineLayer, CanineModel, CaninePreTrainedModel, load_tf_weights_in_canine, ) else: import sys SCREAMING_SNAKE_CASE__ : Union[str, Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
0
0
import math def __lowerCAmelCase( _SCREAMING_SNAKE_CASE ) -> str: """simple docstring""" _A = 0 _A = 0 while num > 0: _A = num % 8 _A = octal + (remainder * math.floor(math.pow(10 , _SCREAMING_SNAKE_CASE ) )) counter += 1 _A = math.floor(num / 8 ) # basically /= 8 without remainder if any # This formatting removes trailing '.0' from `octal`. return F"0o{int(_SCREAMING_SNAKE_CASE )}" def __lowerCAmelCase( ) -> None: """simple docstring""" print('\n2 in octal is:' ) print(decimal_to_octal(2 ) ) # = 2 print('\n8 in octal is:' ) print(decimal_to_octal(8 ) ) # = 10 print('\n65 in octal is:' ) print(decimal_to_octal(65 ) ) # = 101 print('\n216 in octal is:' ) print(decimal_to_octal(216 ) ) # = 330 print('\n512 in octal is:' ) print(decimal_to_octal(512 ) ) # = 1000 print('\n' ) if __name__ == "__main__": main()
27
import warnings from ...processing_utils import ProcessorMixin from ...tokenization_utils_base import BatchEncoding class lowerCamelCase_ ( lowerCamelCase ): a__ = ['''image_processor''', '''tokenizer'''] a__ = '''ChineseCLIPImageProcessor''' a__ = ('''BertTokenizer''', '''BertTokenizerFast''') def __init__( self , __lowerCAmelCase=None , __lowerCAmelCase=None , **__lowerCAmelCase ): """simple docstring""" __magic_name__ :Tuple = None if "feature_extractor" in kwargs: warnings.warn( '''The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`''' ''' instead.''' , __lowerCAmelCase , ) __magic_name__ :Optional[Any] = kwargs.pop('''feature_extractor''' ) __magic_name__ :Tuple = image_processor if image_processor is not None else feature_extractor if image_processor is None: raise ValueError('''You need to specify an `image_processor`.''' ) if tokenizer is None: raise ValueError('''You need to specify a `tokenizer`.''' ) super().__init__(__lowerCAmelCase , __lowerCAmelCase ) __magic_name__ :List[Any] = self.image_processor def __call__( self , __lowerCAmelCase=None , __lowerCAmelCase=None , __lowerCAmelCase=None , **__lowerCAmelCase ): """simple docstring""" if text is None and images is None: raise ValueError('''You have to specify either text or images. Both cannot be none.''' ) if text is not None: __magic_name__ :int = self.tokenizer(__lowerCAmelCase , return_tensors=__lowerCAmelCase , **__lowerCAmelCase ) if images is not None: __magic_name__ :Dict = self.image_processor(__lowerCAmelCase , return_tensors=__lowerCAmelCase , **__lowerCAmelCase ) if text is not None and images is not None: __magic_name__ :Union[str, Any] = image_features.pixel_values return encoding elif text is not None: return encoding else: return BatchEncoding(data=dict(**__lowerCAmelCase ) , tensor_type=__lowerCAmelCase ) def A ( self , *__lowerCAmelCase , **__lowerCAmelCase ): """simple docstring""" return self.tokenizer.batch_decode(*__lowerCAmelCase , **__lowerCAmelCase ) def A ( self , *__lowerCAmelCase , **__lowerCAmelCase ): """simple docstring""" return self.tokenizer.decode(*__lowerCAmelCase , **__lowerCAmelCase ) @property def A ( self ): """simple docstring""" __magic_name__ :List[Any] = self.tokenizer.model_input_names __magic_name__ :Any = self.image_processor.model_input_names return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) ) @property def A ( self ): """simple docstring""" warnings.warn( '''`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.''' , __lowerCAmelCase , ) return self.image_processor_class
0
0
'''simple docstring''' import logging import os from .state import PartialState class _a ( logging.LoggerAdapter ): '''simple docstring''' @staticmethod def UpperCamelCase_ ( A ): '''simple docstring''' SCREAMING_SNAKE_CASE : int = PartialState() return not main_process_only or (main_process_only and state.is_main_process) def UpperCamelCase_ ( self, A, A, *A, **A ): '''simple docstring''' if PartialState._shared_state == {}: raise RuntimeError( 'You must initialize the accelerate state by calling either `PartialState()` or `Accelerator()` before using the logging utility.' ) SCREAMING_SNAKE_CASE : int = kwargs.pop('main_process_only', A ) SCREAMING_SNAKE_CASE : List[Any] = kwargs.pop('in_order', A ) if self.isEnabledFor(A ): if self._should_log(A ): SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Dict = self.process(A, A ) self.logger.log(A, A, *A, **A ) elif in_order: SCREAMING_SNAKE_CASE : List[str] = PartialState() for i in range(state.num_processes ): if i == state.process_index: SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : List[str] = self.process(A, A ) self.logger.log(A, A, *A, **A ) state.wait_for_everyone() def lowercase__( __UpperCamelCase: str ,__UpperCamelCase: str = None ): """simple docstring""" if log_level is None: SCREAMING_SNAKE_CASE : Union[str, Any] = os.environ.get('ACCELERATE_LOG_LEVEL' ,__UpperCamelCase ) SCREAMING_SNAKE_CASE : Dict = logging.getLogger(__UpperCamelCase ) if log_level is not None: logger.setLevel(log_level.upper() ) logger.root.setLevel(log_level.upper() ) return MultiProcessAdapter(__UpperCamelCase ,{} )
28
from sklearn.metrics import matthews_corrcoef import datasets SCREAMING_SNAKE_CASE__ : Optional[Any] = """ Compute the Matthews correlation coefficient (MCC) The Matthews correlation coefficient is used in machine learning as a measure of the quality of binary and multiclass classifications. It takes into account true and false positives and negatives and is generally regarded as a balanced measure which can be used even if the classes are of very different sizes. The MCC is in essence a correlation coefficient value between -1 and +1. A coefficient of +1 represents a perfect prediction, 0 an average random prediction and -1 an inverse prediction. The statistic is also known as the phi coefficient. [source: Wikipedia] """ SCREAMING_SNAKE_CASE__ : Union[str, Any] = """ Args: predictions (list of int): Predicted labels, as returned by a model. references (list of int): Ground truth labels. sample_weight (list of int, float, or bool): Sample weights. Defaults to `None`. Returns: matthews_correlation (dict containing float): Matthews correlation. Examples: Example 1, a basic example with only predictions and references as inputs: >>> matthews_metric = datasets.load_metric(\"matthews_correlation\") >>> results = matthews_metric.compute(references=[1, 3, 2, 0, 3, 2], ... predictions=[1, 2, 2, 0, 3, 3]) >>> print(round(results['matthews_correlation'], 2)) 0.54 Example 2, the same example as above, but also including sample weights: >>> matthews_metric = datasets.load_metric(\"matthews_correlation\") >>> results = matthews_metric.compute(references=[1, 3, 2, 0, 3, 2], ... predictions=[1, 2, 2, 0, 3, 3], ... sample_weight=[0.5, 3, 1, 1, 1, 2]) >>> print(round(results['matthews_correlation'], 2)) 0.1 Example 3, the same example as above, but with sample weights that cause a negative correlation: >>> matthews_metric = datasets.load_metric(\"matthews_correlation\") >>> results = matthews_metric.compute(references=[1, 3, 2, 0, 3, 2], ... predictions=[1, 2, 2, 0, 3, 3], ... sample_weight=[0.5, 1, 0, 0, 0, 1]) >>> print(round(results['matthews_correlation'], 2)) -0.25 """ SCREAMING_SNAKE_CASE__ : int = """\ @article{scikit-learn, title={Scikit-learn: Machine Learning in {P}ython}, author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V. and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P. and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.}, journal={Journal of Machine Learning Research}, volume={12}, pages={2825--2830}, year={2011} } """ @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class lowerCamelCase_ ( datasets.Metric ): def A ( self ): """simple docstring""" return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { '''predictions''': datasets.Value('''int32''' ), '''references''': datasets.Value('''int32''' ), } ) , reference_urls=[ '''https://scikit-learn.org/stable/modules/generated/sklearn.metrics.matthews_corrcoef.html''' ] , ) def A ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase=None ): """simple docstring""" return { "matthews_correlation": float(matthews_corrcoef(__lowerCAmelCase , __lowerCAmelCase , sample_weight=__lowerCAmelCase ) ), }
0
0
"""simple docstring""" import inspect import os import sys import unittest import accelerate from accelerate.test_utils import execute_subprocess_async, require_tpu class __lowerCamelCase ( unittest.TestCase ): def UpperCAmelCase__ ( self ): lowerCamelCase_ = inspect.getfile(accelerate.test_utils ) lowerCamelCase_ = os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ['''scripts''', '''test_script.py'''] ) lowerCamelCase_ = os.path.sep.join(inspect.getfile(self.__class__ ).split(os.path.sep )[:-1] ) @require_tpu def UpperCAmelCase__ ( self ): lowerCamelCase_ = f"\n {self.test_dir}/xla_spawn.py\n --num_cores 8\n {self.test_file_path}\n ".split() lowerCamelCase_ = [sys.executable] + distributed_args execute_subprocess_async(UpperCAmelCase , env=os.environ.copy() )
29
from __future__ import annotations def __lowercase ( snake_case, snake_case ): """simple docstring""" print(f'''Vertex\tShortest Distance from vertex {src}''' ) for i, d in enumerate(snake_case ): print(f'''{i}\t\t{d}''' ) def __lowercase ( snake_case, snake_case, snake_case ): """simple docstring""" for j in range(snake_case ): __magic_name__ , __magic_name__ , __magic_name__ :Tuple = (graph[j][k] for k in ['''src''', '''dst''', '''weight''']) if distance[u] != float('''inf''' ) and distance[u] + w < distance[v]: return True return False def __lowercase ( snake_case, snake_case, snake_case, snake_case ): """simple docstring""" __magic_name__ :List[Any] = [float('''inf''' )] * vertex_count __magic_name__ :Tuple = 0.0 for _ in range(vertex_count - 1 ): for j in range(snake_case ): __magic_name__ , __magic_name__ , __magic_name__ :Dict = (graph[j][k] for k in ['''src''', '''dst''', '''weight''']) if distance[u] != float('''inf''' ) and distance[u] + w < distance[v]: __magic_name__ :Tuple = distance[u] + w __magic_name__ :Tuple = check_negative_cycle(snake_case, snake_case, snake_case ) if negative_cycle_exists: raise Exception('''Negative cycle found''' ) return distance if __name__ == "__main__": import doctest doctest.testmod() SCREAMING_SNAKE_CASE__ : Tuple = int(input("""Enter number of vertices: """).strip()) SCREAMING_SNAKE_CASE__ : Any = int(input("""Enter number of edges: """).strip()) SCREAMING_SNAKE_CASE__ : list[dict[str, int]] = [{} for _ in range(E)] for i in range(E): print("""Edge """, i + 1) SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Dict = ( int(x) for x in input("""Enter source, destination, weight: """).strip().split(""" """) ) SCREAMING_SNAKE_CASE__ : Dict = {"""src""": src, """dst""": dest, """weight""": weight} SCREAMING_SNAKE_CASE__ : List[Any] = int(input("""\nEnter shortest path source:""").strip()) SCREAMING_SNAKE_CASE__ : List[str] = bellman_ford(graph, V, E, source) print_distance(shortest_distance, 0)
0
0
import itertools import json import linecache import os import pickle import re import socket import string from collections import Counter from logging import getLogger from pathlib import Path from typing import Callable, Dict, Iterable, List import git import torch from torch.utils.data import Dataset from transformers import BartTokenizer, RagTokenizer, TaTokenizer def lowerCamelCase__ ( _lowercase , _lowercase , _lowercase , _lowercase , _lowercase=True , _lowercase="pt" ): '''simple docstring''' UpperCAmelCase_ : Dict = {'''add_prefix_space''': True} if isinstance(_lowercase , _lowercase ) and not line.startswith(''' ''' ) else {} UpperCAmelCase_ : List[str] = padding_side return tokenizer( [line] , max_length=_lowercase , padding='''max_length''' if pad_to_max_length else None , truncation=_lowercase , return_tensors=_lowercase , add_special_tokens=_lowercase , **_lowercase , ) def lowerCamelCase__ ( _lowercase , _lowercase , _lowercase=None , ): '''simple docstring''' UpperCAmelCase_ : str = input_ids.ne(_lowercase ).any(dim=0 ) if attention_mask is None: return input_ids[:, keep_column_mask] else: return (input_ids[:, keep_column_mask], attention_mask[:, keep_column_mask]) class __a( _a ): """simple docstring""" def __init__( self ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE="train" ,_SCREAMING_SNAKE_CASE=None ,_SCREAMING_SNAKE_CASE=None ,_SCREAMING_SNAKE_CASE=None ,_SCREAMING_SNAKE_CASE="" ,) -> Tuple: super().__init__() UpperCAmelCase_ : Optional[Any] = Path(_SCREAMING_SNAKE_CASE ).joinpath(type_path + '''.source''' ) UpperCAmelCase_ : List[Any] = Path(_SCREAMING_SNAKE_CASE ).joinpath(type_path + '''.target''' ) UpperCAmelCase_ : Tuple = self.get_char_lens(self.src_file ) UpperCAmelCase_ : Any = max_source_length UpperCAmelCase_ : str = max_target_length assert min(self.src_lens ) > 0, f'''found empty line in {self.src_file}''' UpperCAmelCase_ : str = tokenizer UpperCAmelCase_ : Union[str, Any] = prefix if n_obs is not None: UpperCAmelCase_ : Dict = self.src_lens[:n_obs] UpperCAmelCase_ : Union[str, Any] = src_lang UpperCAmelCase_ : Any = tgt_lang def __len__( self ) -> Any: return len(self.src_lens ) def __getitem__( self ,_SCREAMING_SNAKE_CASE ) -> Dict[str, torch.Tensor]: UpperCAmelCase_ : Any = index + 1 # linecache starts at 1 UpperCAmelCase_ : int = self.prefix + linecache.getline(str(self.src_file ) ,_SCREAMING_SNAKE_CASE ).rstrip('''\n''' ) UpperCAmelCase_ : int = linecache.getline(str(self.tgt_file ) ,_SCREAMING_SNAKE_CASE ).rstrip('''\n''' ) assert source_line, f'''empty source line for index {index}''' assert tgt_line, f'''empty tgt line for index {index}''' # Need to add eos token manually for T5 if isinstance(self.tokenizer ,_SCREAMING_SNAKE_CASE ): source_line += self.tokenizer.eos_token tgt_line += self.tokenizer.eos_token # Pad source and target to the right UpperCAmelCase_ : Tuple = ( self.tokenizer.question_encoder if isinstance(self.tokenizer ,_SCREAMING_SNAKE_CASE ) else self.tokenizer ) UpperCAmelCase_ : Dict = self.tokenizer.generator if isinstance(self.tokenizer ,_SCREAMING_SNAKE_CASE ) else self.tokenizer UpperCAmelCase_ : Tuple = encode_line(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,self.max_source_length ,'''right''' ) UpperCAmelCase_ : Tuple = encode_line(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,self.max_target_length ,'''right''' ) UpperCAmelCase_ : Optional[Any] = source_inputs['''input_ids'''].squeeze() UpperCAmelCase_ : Optional[Any] = target_inputs['''input_ids'''].squeeze() UpperCAmelCase_ : Tuple = source_inputs['''attention_mask'''].squeeze() return { "input_ids": source_ids, "attention_mask": src_mask, "decoder_input_ids": target_ids, } @staticmethod def a__ ( _SCREAMING_SNAKE_CASE ) -> Optional[int]: return [len(_SCREAMING_SNAKE_CASE ) for x in Path(_SCREAMING_SNAKE_CASE ).open().readlines()] def a__ ( self ,_SCREAMING_SNAKE_CASE ) -> Dict[str, torch.Tensor]: UpperCAmelCase_ : int = torch.stack([x['''input_ids'''] for x in batch] ) UpperCAmelCase_ : Dict = torch.stack([x['''attention_mask'''] for x in batch] ) UpperCAmelCase_ : Tuple = torch.stack([x['''decoder_input_ids'''] for x in batch] ) UpperCAmelCase_ : int = ( self.tokenizer.generator.pad_token_id if isinstance(self.tokenizer ,_SCREAMING_SNAKE_CASE ) else self.tokenizer.pad_token_id ) UpperCAmelCase_ : Tuple = ( self.tokenizer.question_encoder.pad_token_id if isinstance(self.tokenizer ,_SCREAMING_SNAKE_CASE ) else self.tokenizer.pad_token_id ) UpperCAmelCase_ : str = trim_batch(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) UpperCAmelCase_, UpperCAmelCase_ : Union[str, Any] = trim_batch(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,attention_mask=_SCREAMING_SNAKE_CASE ) UpperCAmelCase_ : Tuple = { '''input_ids''': source_ids, '''attention_mask''': source_mask, '''decoder_input_ids''': y, } return batch __a = getLogger(__name__) def lowerCamelCase__ ( _lowercase ): '''simple docstring''' return list(itertools.chain.from_iterable(_lowercase ) ) def lowerCamelCase__ ( _lowercase ): '''simple docstring''' UpperCAmelCase_ : str = get_git_info() save_json(_lowercase , os.path.join(_lowercase , '''git_log.json''' ) ) def lowerCamelCase__ ( _lowercase , _lowercase , _lowercase=4 , **_lowercase ): '''simple docstring''' with open(_lowercase , '''w''' ) as f: json.dump(_lowercase , _lowercase , indent=_lowercase , **_lowercase ) def lowerCamelCase__ ( _lowercase ): '''simple docstring''' with open(_lowercase ) as f: return json.load(_lowercase ) def lowerCamelCase__ ( ): '''simple docstring''' UpperCAmelCase_ : List[Any] = git.Repo(search_parent_directories=_lowercase ) UpperCAmelCase_ : Union[str, Any] = { '''repo_id''': str(_lowercase ), '''repo_sha''': str(repo.head.object.hexsha ), '''repo_branch''': str(repo.active_branch ), '''hostname''': str(socket.gethostname() ), } return repo_infos def lowerCamelCase__ ( _lowercase , _lowercase ): '''simple docstring''' return list(map(_lowercase , _lowercase ) ) def lowerCamelCase__ ( _lowercase , _lowercase ): '''simple docstring''' with open(_lowercase , '''wb''' ) as f: return pickle.dump(_lowercase , _lowercase ) def lowerCamelCase__ ( _lowercase ): '''simple docstring''' def remove_articles(_lowercase ): return re.sub(r'''\b(a|an|the)\b''' , ''' ''' , _lowercase ) def white_space_fix(_lowercase ): return " ".join(text.split() ) def remove_punc(_lowercase ): UpperCAmelCase_ : Any = set(string.punctuation ) return "".join(ch for ch in text if ch not in exclude ) def lower(_lowercase ): return text.lower() return white_space_fix(remove_articles(remove_punc(lower(_lowercase ) ) ) ) def lowerCamelCase__ ( _lowercase , _lowercase ): '''simple docstring''' UpperCAmelCase_ : Any = normalize_answer(_lowercase ).split() UpperCAmelCase_ : List[Any] = normalize_answer(_lowercase ).split() UpperCAmelCase_ : Any = Counter(_lowercase ) & Counter(_lowercase ) UpperCAmelCase_ : Tuple = sum(common.values() ) if num_same == 0: return 0 UpperCAmelCase_ : str = 1.0 * num_same / len(_lowercase ) UpperCAmelCase_ : Union[str, Any] = 1.0 * num_same / len(_lowercase ) UpperCAmelCase_ : Any = (2 * precision * recall) / (precision + recall) return fa def lowerCamelCase__ ( _lowercase , _lowercase ): '''simple docstring''' return normalize_answer(_lowercase ) == normalize_answer(_lowercase ) def lowerCamelCase__ ( _lowercase , _lowercase ): '''simple docstring''' assert len(_lowercase ) == len(_lowercase ) UpperCAmelCase_ : Optional[int] = 0 for hypo, pred in zip(_lowercase , _lowercase ): em += exact_match_score(_lowercase , _lowercase ) if len(_lowercase ) > 0: em /= len(_lowercase ) return {"em": em} def lowerCamelCase__ ( _lowercase ): '''simple docstring''' return model_prefix.startswith('''rag''' ) def lowerCamelCase__ ( _lowercase , _lowercase , _lowercase ): '''simple docstring''' UpperCAmelCase_ : Optional[int] = {p: p for p in extra_params} # T5 models don't have `dropout` param, they have `dropout_rate` instead UpperCAmelCase_ : Dict = '''dropout_rate''' for p in extra_params: if getattr(_lowercase , _lowercase , _lowercase ): if not hasattr(_lowercase , _lowercase ) and not hasattr(_lowercase , equivalent_param[p] ): logger.info('''config doesn\'t have a `{}` attribute'''.format(_lowercase ) ) delattr(_lowercase , _lowercase ) continue UpperCAmelCase_ : Union[str, Any] = p if hasattr(_lowercase , _lowercase ) else equivalent_param[p] setattr(_lowercase , _lowercase , getattr(_lowercase , _lowercase ) ) delattr(_lowercase , _lowercase ) return hparams, config
30
from __future__ import annotations import unittest from transformers import RoFormerConfig, is_tf_available from transformers.testing_utils import require_tf, slow from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import ( TFRoFormerForCausalLM, TFRoFormerForMaskedLM, TFRoFormerForMultipleChoice, TFRoFormerForQuestionAnswering, TFRoFormerForSequenceClassification, TFRoFormerForTokenClassification, TFRoFormerModel, ) from transformers.models.roformer.modeling_tf_roformer import ( TFRoFormerSelfAttention, TFRoFormerSinusoidalPositionalEmbedding, ) class lowerCamelCase_ : def __init__( self , __lowerCAmelCase , __lowerCAmelCase=1_3 , __lowerCAmelCase=7 , __lowerCAmelCase=True , __lowerCAmelCase=True , __lowerCAmelCase=True , __lowerCAmelCase=True , __lowerCAmelCase=9_9 , __lowerCAmelCase=3_2 , __lowerCAmelCase=2 , __lowerCAmelCase=4 , __lowerCAmelCase=3_7 , __lowerCAmelCase="gelu" , __lowerCAmelCase=0.1 , __lowerCAmelCase=0.1 , __lowerCAmelCase=5_1_2 , __lowerCAmelCase=1_6 , __lowerCAmelCase=2 , __lowerCAmelCase=0.02 , __lowerCAmelCase=3 , __lowerCAmelCase=4 , __lowerCAmelCase=None , ): """simple docstring""" __magic_name__ :Optional[int] = parent __magic_name__ :List[Any] = 1_3 __magic_name__ :Union[str, Any] = 7 __magic_name__ :Optional[Any] = True __magic_name__ :Tuple = True __magic_name__ :List[str] = True __magic_name__ :List[Any] = True __magic_name__ :int = 9_9 __magic_name__ :Any = 3_2 __magic_name__ :Union[str, Any] = 2 __magic_name__ :List[str] = 4 __magic_name__ :List[Any] = 3_7 __magic_name__ :Tuple = '''gelu''' __magic_name__ :Any = 0.1 __magic_name__ :str = 0.1 __magic_name__ :List[str] = 5_1_2 __magic_name__ :int = 1_6 __magic_name__ :Any = 2 __magic_name__ :List[Any] = 0.02 __magic_name__ :Optional[Any] = 3 __magic_name__ :Tuple = 4 __magic_name__ :Optional[Any] = None def A ( self ): """simple docstring""" __magic_name__ :Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) __magic_name__ :str = None if self.use_input_mask: __magic_name__ :Optional[int] = random_attention_mask([self.batch_size, self.seq_length] ) __magic_name__ :str = None if self.use_token_type_ids: __magic_name__ :List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) __magic_name__ :Union[str, Any] = None __magic_name__ :Tuple = None __magic_name__ :str = None if self.use_labels: __magic_name__ :List[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size ) __magic_name__ :List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) __magic_name__ :List[Any] = ids_tensor([self.batch_size] , self.num_choices ) __magic_name__ :str = RoFormerConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , return_dict=__lowerCAmelCase , ) return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def A ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ): """simple docstring""" __magic_name__ :int = TFRoFormerModel(config=__lowerCAmelCase ) __magic_name__ :Optional[Any] = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids} __magic_name__ :List[str] = [input_ids, input_mask] __magic_name__ :Any = model(__lowerCAmelCase ) __magic_name__ :List[str] = model(__lowerCAmelCase ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def A ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ): """simple docstring""" __magic_name__ :Dict = True __magic_name__ :List[str] = TFRoFormerForCausalLM(config=__lowerCAmelCase ) __magic_name__ :str = { '''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids, } __magic_name__ :Optional[Any] = model(__lowerCAmelCase )['''logits'''] self.parent.assertListEqual( list(prediction_scores.numpy().shape ) , [self.batch_size, self.seq_length, self.vocab_size] ) def A ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ): """simple docstring""" __magic_name__ :Optional[Any] = TFRoFormerForMaskedLM(config=__lowerCAmelCase ) __magic_name__ :Any = { '''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids, } __magic_name__ :Dict = model(__lowerCAmelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def A ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ): """simple docstring""" __magic_name__ :int = self.num_labels __magic_name__ :str = TFRoFormerForSequenceClassification(config=__lowerCAmelCase ) __magic_name__ :Optional[int] = { '''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids, } __magic_name__ :str = model(__lowerCAmelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def A ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ): """simple docstring""" __magic_name__ :Union[str, Any] = self.num_choices __magic_name__ :Tuple = TFRoFormerForMultipleChoice(config=__lowerCAmelCase ) __magic_name__ :int = tf.tile(tf.expand_dims(__lowerCAmelCase , 1 ) , (1, self.num_choices, 1) ) __magic_name__ :Optional[Any] = tf.tile(tf.expand_dims(__lowerCAmelCase , 1 ) , (1, self.num_choices, 1) ) __magic_name__ :Union[str, Any] = tf.tile(tf.expand_dims(__lowerCAmelCase , 1 ) , (1, self.num_choices, 1) ) __magic_name__ :str = { '''input_ids''': multiple_choice_inputs_ids, '''attention_mask''': multiple_choice_input_mask, '''token_type_ids''': multiple_choice_token_type_ids, } __magic_name__ :Tuple = model(__lowerCAmelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) ) def A ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ): """simple docstring""" __magic_name__ :Optional[int] = self.num_labels __magic_name__ :Any = TFRoFormerForTokenClassification(config=__lowerCAmelCase ) __magic_name__ :str = { '''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids, } __magic_name__ :Dict = model(__lowerCAmelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def A ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ): """simple docstring""" __magic_name__ :List[str] = TFRoFormerForQuestionAnswering(config=__lowerCAmelCase ) __magic_name__ :List[str] = { '''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids, } __magic_name__ :Union[str, Any] = model(__lowerCAmelCase ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def A ( self ): """simple docstring""" __magic_name__ :Union[str, Any] = self.prepare_config_and_inputs() ( ( __magic_name__ ) , ( __magic_name__ ) , ( __magic_name__ ) , ( __magic_name__ ) , ( __magic_name__ ) , ( __magic_name__ ) , ( __magic_name__ ) , ) :Union[str, Any] = config_and_inputs __magic_name__ :Optional[Any] = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask} return config, inputs_dict @require_tf class lowerCamelCase_ ( lowerCamelCase , lowerCamelCase , unittest.TestCase ): a__ = ( ( TFRoFormerModel, TFRoFormerForCausalLM, TFRoFormerForMaskedLM, TFRoFormerForQuestionAnswering, TFRoFormerForSequenceClassification, TFRoFormerForTokenClassification, TFRoFormerForMultipleChoice, ) if is_tf_available() else () ) a__ = ( { '''feature-extraction''': TFRoFormerModel, '''fill-mask''': TFRoFormerForMaskedLM, '''question-answering''': TFRoFormerForQuestionAnswering, '''text-classification''': TFRoFormerForSequenceClassification, '''text-generation''': TFRoFormerForCausalLM, '''token-classification''': TFRoFormerForTokenClassification, '''zero-shot''': TFRoFormerForSequenceClassification, } if is_tf_available() else {} ) a__ = False a__ = False def A ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ): """simple docstring""" if pipeline_test_casse_name == "TextGenerationPipelineTests": return True return False def A ( self ): """simple docstring""" __magic_name__ :List[str] = TFRoFormerModelTester(self ) __magic_name__ :List[str] = ConfigTester(self , config_class=__lowerCAmelCase , hidden_size=3_7 ) def A ( self ): """simple docstring""" self.config_tester.run_common_tests() def A ( self ): """simple docstring""" __magic_name__ :Optional[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*__lowerCAmelCase ) def A ( self ): """simple docstring""" __magic_name__ :Union[str, Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_lm(*__lowerCAmelCase ) def A ( self ): """simple docstring""" __magic_name__ :Tuple = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_lm_head(*__lowerCAmelCase ) def A ( self ): """simple docstring""" __magic_name__ :Optional[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_multiple_choice(*__lowerCAmelCase ) def A ( self ): """simple docstring""" __magic_name__ :Tuple = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_question_answering(*__lowerCAmelCase ) def A ( self ): """simple docstring""" __magic_name__ :Dict = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_sequence_classification(*__lowerCAmelCase ) def A ( self ): """simple docstring""" __magic_name__ :Optional[int] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_token_classification(*__lowerCAmelCase ) @slow def A ( self ): """simple docstring""" __magic_name__ :Optional[Any] = TFRoFormerModel.from_pretrained('''junnyu/roformer_chinese_base''' ) self.assertIsNotNone(__lowerCAmelCase ) @require_tf class lowerCamelCase_ ( unittest.TestCase ): @slow def A ( self ): """simple docstring""" __magic_name__ :int = TFRoFormerForMaskedLM.from_pretrained('''junnyu/roformer_chinese_base''' ) __magic_name__ :Dict = tf.constant([[0, 1, 2, 3, 4, 5]] ) __magic_name__ :Optional[Any] = model(__lowerCAmelCase )[0] # TODO Replace vocab size __magic_name__ :int = 5_0_0_0_0 __magic_name__ :Tuple = [1, 6, vocab_size] self.assertEqual(output.shape , __lowerCAmelCase ) print(output[:, :3, :3] ) # TODO Replace values below with what was printed above. __magic_name__ :Any = tf.constant( [ [ [-0.12053341, -1.0264901, 0.29221946], [-1.5133783, 0.197433, 0.15190607], [-5.0135403, -3.900256, -0.84038764], ] ] ) tf.debugging.assert_near(output[:, :3, :3] , __lowerCAmelCase , atol=1E-4 ) @require_tf class lowerCamelCase_ ( unittest.TestCase ): a__ = 1e-4 def A ( self ): """simple docstring""" __magic_name__ :Optional[int] = tf.constant([[4, 1_0]] ) __magic_name__ :Optional[int] = TFRoFormerSinusoidalPositionalEmbedding(num_positions=6 , embedding_dim=6 ) __magic_name__ :Optional[Any] = emba(input_ids.shape ) __magic_name__ :List[str] = tf.constant( [[0.0000, 0.0000, 0.0000, 1.0000, 1.0000, 1.0000], [0.8415, 0.0464, 0.0022, 0.5403, 0.9989, 1.0000]] ) tf.debugging.assert_near(__lowerCAmelCase , __lowerCAmelCase , atol=self.tolerance ) def A ( self ): """simple docstring""" __magic_name__ :Tuple = tf.constant( [ [0.0000, 0.0000, 0.0000, 0.0000, 0.0000], [0.8415, 0.8219, 0.8020, 0.7819, 0.7617], [0.9093, 0.9364, 0.9581, 0.9749, 0.9870], ] ) __magic_name__ :Union[str, Any] = TFRoFormerSinusoidalPositionalEmbedding(num_positions=5_1_2 , embedding_dim=5_1_2 ) emba([2, 1_6, 5_1_2] ) __magic_name__ :Optional[int] = emba.weight[:3, :5] tf.debugging.assert_near(__lowerCAmelCase , __lowerCAmelCase , atol=self.tolerance ) @require_tf class lowerCamelCase_ ( unittest.TestCase ): a__ = 1e-4 def A ( self ): """simple docstring""" # 2,12,16,64 __magic_name__ :int = tf.reshape(tf.range(2 * 1_2 * 1_6 * 6_4 , dtype=tf.floataa ) , shape=(2, 1_2, 1_6, 6_4) ) / 1_0_0 __magic_name__ :str = -tf.reshape(tf.range(2 * 1_2 * 1_6 * 6_4 , dtype=tf.floataa ) , shape=(2, 1_2, 1_6, 6_4) ) / 1_0_0 __magic_name__ :int = TFRoFormerSinusoidalPositionalEmbedding(num_positions=3_2 , embedding_dim=6_4 ) __magic_name__ :List[str] = embed_positions([2, 1_6, 7_6_8] )[None, None, :, :] __magic_name__ , __magic_name__ :Union[str, Any] = TFRoFormerSelfAttention.apply_rotary_position_embeddings( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) __magic_name__ :Tuple = tf.constant( [ [0.0000, 0.0100, 0.0200, 0.0300, 0.0400, 0.0500, 0.0600, 0.0700], [-0.2012, 0.8897, 0.0263, 0.9401, 0.2074, 0.9463, 0.3481, 0.9343], [-1.7057, 0.6271, -1.2145, 1.3897, -0.6303, 1.7647, -0.1173, 1.8985], [-2.1731, -1.6397, -2.7358, 0.2854, -2.1840, 1.7183, -1.3018, 2.4871], [0.2717, -3.6173, -2.9206, -2.1988, -3.6638, 0.3858, -2.9155, 2.2980], [3.9859, -2.1580, -0.7984, -4.4904, -4.1181, -2.0252, -4.4782, 1.1253], ] ) __magic_name__ :List[str] = tf.constant( [ [0.0000, -0.0100, -0.0200, -0.0300, -0.0400, -0.0500, -0.0600, -0.0700], [0.2012, -0.8897, -0.0263, -0.9401, -0.2074, -0.9463, -0.3481, -0.9343], [1.7057, -0.6271, 1.2145, -1.3897, 0.6303, -1.7647, 0.1173, -1.8985], [2.1731, 1.6397, 2.7358, -0.2854, 2.1840, -1.7183, 1.3018, -2.4871], [-0.2717, 3.6173, 2.9206, 2.1988, 3.6638, -0.3858, 2.9155, -2.2980], [-3.9859, 2.1580, 0.7984, 4.4904, 4.1181, 2.0252, 4.4782, -1.1253], ] ) tf.debugging.assert_near(query_layer[0, 0, :6, :8] , __lowerCAmelCase , atol=self.tolerance ) tf.debugging.assert_near(key_layer[0, 0, :6, :8] , __lowerCAmelCase , atol=self.tolerance )
0
0
# Algorithm for the pigeonhole sorting def UpperCAmelCase_ ( __UpperCAmelCase : Any ) -> List[str]: SCREAMING_SNAKE_CASE_ = min(__UpperCAmelCase ) # min() finds the minimum value SCREAMING_SNAKE_CASE_ = max(__UpperCAmelCase ) # max() finds the maximum value SCREAMING_SNAKE_CASE_ = max_val - min_val + 1 # size is difference of max and min values plus one # list of pigeonholes of size equal to the variable size SCREAMING_SNAKE_CASE_ = [0] * size # Populate the pigeonholes. for x in a: assert isinstance(__UpperCAmelCase , __UpperCAmelCase ), "integers only please" holes[x - min_val] += 1 # Putting the elements back into the array in an order. SCREAMING_SNAKE_CASE_ = 0 for count in range(__UpperCAmelCase ): while holes[count] > 0: holes[count] -= 1 SCREAMING_SNAKE_CASE_ = count + min_val i += 1 def UpperCAmelCase_ ( ) -> Optional[int]: SCREAMING_SNAKE_CASE_ = [8, 3, 2, 7, 4, 6, 8] pigeonhole_sort(__UpperCAmelCase ) print('Sorted order is:' , ' '.join(__UpperCAmelCase ) ) if __name__ == "__main__": main()
31
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available SCREAMING_SNAKE_CASE__ : Optional[int] = {"""tokenization_herbert""": ["""HerbertTokenizer"""]} try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: SCREAMING_SNAKE_CASE__ : Optional[Any] = ["""HerbertTokenizerFast"""] if TYPE_CHECKING: from .tokenization_herbert import HerbertTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_herbert_fast import HerbertTokenizerFast else: import sys SCREAMING_SNAKE_CASE__ : Union[str, Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
0
0
from math import sqrt def A__ ( SCREAMING_SNAKE_CASE_ : int ) -> bool: """simple docstring""" if 1 < number < 4: # 2 and 3 are primes return True elif number < 2 or number % 2 == 0 or number % 3 == 0: # Negatives, 0, 1, all even numbers, all multiples of 3 are not primes return False # All primes number are in format of 6k +/- 1 for i in range(5 , int(sqrt(SCREAMING_SNAKE_CASE_ ) + 1 ) , 6 ): if number % i == 0 or number % (i + 2) == 0: return False return True def A__ ( SCREAMING_SNAKE_CASE_ : int = 1_00_01 ) -> int: """simple docstring""" _UpperCAmelCase = 0 _UpperCAmelCase = 1 while count != nth and number < 3: number += 1 if is_prime(SCREAMING_SNAKE_CASE_ ): count += 1 while count != nth: number += 2 if is_prime(SCREAMING_SNAKE_CASE_ ): count += 1 return number if __name__ == "__main__": print(f'''{solution() = }''')
32
import argparse import gdown import numpy as np import torch from huggingface_hub import hf_hub_download from transformers import ( CLIPTokenizer, CLIPTokenizerFast, VideoMAEImageProcessor, XCLIPConfig, XCLIPModel, XCLIPProcessor, XCLIPTextConfig, XCLIPVisionConfig, ) def __lowercase ( snake_case, snake_case ): """simple docstring""" __magic_name__ :str = XCLIPTextConfig() # derive patch size from model name __magic_name__ :Union[str, Any] = model_name.find('''patch''' ) __magic_name__ :Optional[Any] = int(model_name[start_idx + len('''patch''' ) : start_idx + len('''patch''' ) + 2] ) __magic_name__ :int = XCLIPVisionConfig(patch_size=snake_case, num_frames=snake_case ) if "large" in model_name: __magic_name__ :Dict = 7_6_8 __magic_name__ :int = 3_0_7_2 __magic_name__ :List[Any] = 1_2 __magic_name__ :str = 1_0_2_4 __magic_name__ :Any = 4_0_9_6 __magic_name__ :Optional[Any] = 1_6 __magic_name__ :Union[str, Any] = 2_4 __magic_name__ :Union[str, Any] = 7_6_8 __magic_name__ :Tuple = 3_0_7_2 if model_name == "xclip-large-patch14-16-frames": __magic_name__ :List[str] = 3_3_6 __magic_name__ :Any = XCLIPConfig.from_text_vision_configs(snake_case, snake_case ) if "large" in model_name: __magic_name__ :str = 7_6_8 return config def __lowercase ( snake_case ): """simple docstring""" if name == "token_embedding.weight": __magic_name__ :Any = name.replace('''token_embedding.weight''', '''text_model.embeddings.token_embedding.weight''' ) if name == "positional_embedding": __magic_name__ :Any = name.replace('''positional_embedding''', '''text_model.embeddings.position_embedding.weight''' ) if "ln_1" in name: __magic_name__ :List[str] = name.replace('''ln_1''', '''layer_norm1''' ) if "ln_2" in name: __magic_name__ :str = name.replace('''ln_2''', '''layer_norm2''' ) if "c_fc" in name: __magic_name__ :List[Any] = name.replace('''c_fc''', '''fc1''' ) if "c_proj" in name: __magic_name__ :Any = name.replace('''c_proj''', '''fc2''' ) if name.startswith('''transformer.resblocks''' ): __magic_name__ :Any = name.replace('''transformer.resblocks''', '''text_model.encoder.layers''' ) if "attn.out_proj" in name and "message" not in name: __magic_name__ :Union[str, Any] = name.replace('''attn.out_proj''', '''self_attn.out_proj''' ) if "ln_final" in name: __magic_name__ :Tuple = name.replace('''ln_final''', '''text_model.final_layer_norm''' ) # visual encoder if name == "visual.class_embedding": __magic_name__ :List[Any] = name.replace('''visual.class_embedding''', '''vision_model.embeddings.class_embedding''' ) if name == "visual.positional_embedding": __magic_name__ :Any = name.replace('''visual.positional_embedding''', '''vision_model.embeddings.position_embedding.weight''' ) if name.startswith('''visual.transformer.resblocks''' ): __magic_name__ :Union[str, Any] = name.replace('''visual.transformer.resblocks''', '''vision_model.encoder.layers''' ) if "visual.conv1" in name: __magic_name__ :Tuple = name.replace('''visual.conv1''', '''vision_model.embeddings.patch_embedding''' ) if "visual.ln_pre" in name: __magic_name__ :Tuple = name.replace('''visual.ln_pre''', '''vision_model.pre_layernorm''' ) if "visual.ln_post" in name: __magic_name__ :Optional[Any] = name.replace('''visual.ln_post''', '''vision_model.post_layernorm''' ) if "visual.proj" in name: __magic_name__ :Tuple = name.replace('''visual.proj''', '''visual_projection.weight''' ) if "text_projection" in name: __magic_name__ :int = name.replace('''text_projection''', '''text_projection.weight''' ) # things on top if "prompts_visual_proj" in name: __magic_name__ :int = name.replace('''prompts_visual_proj''', '''prompts_visual_projection''' ) if "prompts_visual_ln" in name: __magic_name__ :Dict = name.replace('''prompts_visual_ln''', '''prompts_visual_layernorm''' ) # mit if name == "mit.positional_embedding": __magic_name__ :List[Any] = name.replace('''positional''', '''position''' ) if name.startswith('''mit.resblocks''' ): __magic_name__ :Union[str, Any] = name.replace('''mit.resblocks''', '''mit.encoder.layers''' ) # prompts generator if name.startswith('''prompts_generator.norm''' ): __magic_name__ :str = name.replace('''prompts_generator.norm''', '''prompts_generator.layernorm''' ) return name def __lowercase ( snake_case, snake_case ): """simple docstring""" for key in orig_state_dict.copy().keys(): __magic_name__ :Any = orig_state_dict.pop(snake_case ) if "attn.in_proj" in key: __magic_name__ :str = key.split('''.''' ) if key.startswith('''visual''' ): __magic_name__ :List[Any] = key_split[3] __magic_name__ :List[Any] = config.vision_config.hidden_size if "message_attn" in key: if "weight" in key: __magic_name__ :List[Any] = val[ :dim, : ] __magic_name__ :List[str] = val[ dim : dim * 2, : ] __magic_name__ :List[str] = val[ -dim:, : ] else: __magic_name__ :str = val[ :dim ] __magic_name__ :Optional[int] = val[ dim : dim * 2 ] __magic_name__ :Any = val[ -dim: ] else: if "weight" in key: __magic_name__ :int = val[ :dim, : ] __magic_name__ :Union[str, Any] = val[ dim : dim * 2, : ] __magic_name__ :List[Any] = val[ -dim:, : ] else: __magic_name__ :Union[str, Any] = val[:dim] __magic_name__ :str = val[ dim : dim * 2 ] __magic_name__ :Dict = val[-dim:] elif key.startswith('''mit''' ): __magic_name__ :List[Any] = key_split[2] __magic_name__ :Any = config.vision_config.mit_hidden_size if "weight" in key: __magic_name__ :Union[str, Any] = val[:dim, :] __magic_name__ :Optional[int] = val[dim : dim * 2, :] __magic_name__ :int = val[-dim:, :] else: __magic_name__ :Tuple = val[:dim] __magic_name__ :Optional[int] = val[dim : dim * 2] __magic_name__ :Optional[int] = val[-dim:] else: __magic_name__ :Any = key_split[2] __magic_name__ :List[Any] = config.text_config.hidden_size if "weight" in key: __magic_name__ :Union[str, Any] = val[:dim, :] __magic_name__ :Tuple = val[ dim : dim * 2, : ] __magic_name__ :str = val[-dim:, :] else: __magic_name__ :int = val[:dim] __magic_name__ :Any = val[ dim : dim * 2 ] __magic_name__ :str = val[-dim:] else: __magic_name__ :Tuple = rename_key(snake_case ) if new_key_name in ["visual_projection.weight", "text_projection.weight"]: __magic_name__ :List[Any] = val.T __magic_name__ :Optional[Any] = val return orig_state_dict def __lowercase ( snake_case ): """simple docstring""" if num_frames == 8: __magic_name__ :Any = '''eating_spaghetti_8_frames.npy''' elif num_frames == 1_6: __magic_name__ :List[Any] = '''eating_spaghetti.npy''' elif num_frames == 3_2: __magic_name__ :Tuple = '''eating_spaghetti_32_frames.npy''' __magic_name__ :str = hf_hub_download( repo_id='''hf-internal-testing/spaghetti-video''', filename=snake_case, repo_type='''dataset''', ) __magic_name__ :List[Any] = np.load(snake_case ) return list(snake_case ) def __lowercase ( snake_case, snake_case=None, snake_case=False ): """simple docstring""" __magic_name__ :Union[str, Any] = { # fully supervised kinetics-400 checkpoints '''xclip-base-patch32''': '''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_32_8.pth''', '''xclip-base-patch32-16-frames''': ( '''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_32_16.pth''' ), '''xclip-base-patch16''': '''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_16_8.pth''', '''xclip-base-patch16-16-frames''': ( '''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_16_16.pth''' ), '''xclip-large-patch14''': '''https://drive.google.com/u/0/uc?id=1NUOImq0o5DlQTST17iIP3vG7DgmHQuCx&amp;export=download&amp;confirm=t&amp;uuid=b26caedc-88e2-473e-830a-9d158b653cdb''', '''xclip-large-patch14-16-frames''': '''https://drive.google.com/u/0/uc?id=1FOYgnJc097OJ4lGwtRCCydQyVPJEOH7d&amp;export=download&amp;confirm=t&amp;uuid=538fa810-e671-4050-b385-9a623f89804f''', # fully supervised kinetics-600 checkpoints '''xclip-base-patch16-kinetics-600''': ( '''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k600_16_8.pth''' ), '''xclip-base-patch16-kinetics-600-16-frames''': ( '''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k600_16_16.pth''' ), '''xclip-large-patch14-kinetics-600''': '''https://drive.google.com/u/0/uc?id=1FV8C1INuM91sLAN4ImjzePLIlpMSihwV&amp;export=download&amp;confirm=t&amp;uuid=141d4977-4a65-44ae-864f-4b0c19f838be''', # few shot '''xclip-base-patch16-hmdb-2-shot''': ( '''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_2.pth''' ), '''xclip-base-patch16-hmdb-4-shot''': ( '''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_4.pth''' ), '''xclip-base-patch16-hmdb-8-shot''': ( '''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_8.pth''' ), '''xclip-base-patch16-hmdb-16-shot''': ( '''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_16.pth''' ), '''xclip-base-patch16-ucf-2-shot''': ( '''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_2.pth''' ), '''xclip-base-patch16-ucf-4-shot''': ( '''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_4.pth''' ), '''xclip-base-patch16-ucf-8-shot''': ( '''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_8.pth''' ), '''xclip-base-patch16-ucf-16-shot''': ( '''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_16.pth''' ), # zero shot '''xclip-base-patch16-zero-shot''': '''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/zero.pth''', } __magic_name__ :Optional[int] = model_to_url[model_name] __magic_name__ :List[str] = 8 if "16-frames" in model_name: __magic_name__ :List[Any] = 1_6 elif "shot" in model_name: __magic_name__ :Dict = 3_2 __magic_name__ :str = get_xclip_config(snake_case, snake_case ) __magic_name__ :List[Any] = XCLIPModel(snake_case ) model.eval() if "drive" in checkpoint_url: __magic_name__ :Any = '''pytorch_model.bin''' gdown.cached_download(snake_case, snake_case, quiet=snake_case ) __magic_name__ :Optional[Any] = torch.load(snake_case, map_location='''cpu''' )['''model'''] else: __magic_name__ :Optional[int] = torch.hub.load_state_dict_from_url(snake_case )['''model'''] __magic_name__ :List[str] = convert_state_dict(snake_case, snake_case ) __magic_name__ :List[Any] = XCLIPModel(snake_case ) __magic_name__ , __magic_name__ :Optional[Any] = model.load_state_dict(snake_case, strict=snake_case ) assert missing_keys == ["text_model.embeddings.position_ids", "vision_model.embeddings.position_ids"] model.eval() __magic_name__ :str = 3_3_6 if model_name == '''xclip-large-patch14-16-frames''' else 2_2_4 __magic_name__ :Optional[int] = VideoMAEImageProcessor(size=snake_case ) __magic_name__ :Optional[int] = CLIPTokenizer.from_pretrained('''openai/clip-vit-base-patch32''' ) __magic_name__ :Tuple = CLIPTokenizerFast.from_pretrained('''openai/clip-vit-base-patch32''' ) __magic_name__ :Optional[int] = XCLIPProcessor(image_processor=snake_case, tokenizer=snake_case ) __magic_name__ :List[Any] = prepare_video(snake_case ) __magic_name__ :str = processor( text=['''playing sports''', '''eating spaghetti''', '''go shopping'''], videos=snake_case, return_tensors='''pt''', padding=snake_case ) print('''Shape of pixel values:''', inputs.pixel_values.shape ) with torch.no_grad(): __magic_name__ :Tuple = model(**snake_case ) # Verify outputs __magic_name__ :Any = outputs.logits_per_video __magic_name__ :str = logits_per_video.softmax(dim=1 ) print('''Probs:''', snake_case ) # kinetics-400 if model_name == "xclip-base-patch32": __magic_name__ :Dict = torch.tensor([[0.0019, 0.9951, 0.0030]] ) elif model_name == "xclip-base-patch32-16-frames": __magic_name__ :str = torch.tensor([[7.0_9_9_9E-0_4, 9.9_8_8_3E-0_1, 4.5_5_8_0E-0_4]] ) elif model_name == "xclip-base-patch16": __magic_name__ :Tuple = torch.tensor([[0.0083, 0.9681, 0.0236]] ) elif model_name == "xclip-base-patch16-16-frames": __magic_name__ :Tuple = torch.tensor([[7.6_9_3_7E-0_4, 9.9_7_2_8E-0_1, 1.9_4_7_3E-0_3]] ) elif model_name == "xclip-large-patch14": __magic_name__ :str = torch.tensor([[0.0062, 0.9864, 0.0075]] ) elif model_name == "xclip-large-patch14-16-frames": __magic_name__ :Optional[int] = torch.tensor([[3.3_8_7_7E-0_4, 9.9_9_3_7E-0_1, 2.8_8_8_8E-0_4]] ) # kinetics-600 elif model_name == "xclip-base-patch16-kinetics-600": __magic_name__ :Optional[int] = torch.tensor([[0.0555, 0.8914, 0.0531]] ) elif model_name == "xclip-base-patch16-kinetics-600-16-frames": __magic_name__ :List[str] = torch.tensor([[3.8_5_5_4E-0_4, 9.9_9_2_9E-0_1, 3.2_7_5_4E-0_4]] ) elif model_name == "xclip-large-patch14-kinetics-600": __magic_name__ :List[str] = torch.tensor([[0.0036, 0.9920, 0.0045]] ) # few shot elif model_name == "xclip-base-patch16-hmdb-2-shot": __magic_name__ :Tuple = torch.tensor([[7.1_8_9_0E-0_6, 9.9_9_9_4E-0_1, 5.6_5_5_9E-0_5]] ) elif model_name == "xclip-base-patch16-hmdb-4-shot": __magic_name__ :List[str] = torch.tensor([[1.0_3_2_0E-0_5, 9.9_9_9_3E-0_1, 6.2_4_3_5E-0_5]] ) elif model_name == "xclip-base-patch16-hmdb-8-shot": __magic_name__ :Optional[int] = torch.tensor([[4.1_3_7_7E-0_6, 9.9_9_9_0E-0_1, 9.8_3_8_6E-0_5]] ) elif model_name == "xclip-base-patch16-hmdb-16-shot": __magic_name__ :Optional[int] = torch.tensor([[4.1_3_4_7E-0_5, 9.9_9_6_2E-0_1, 3.3_4_1_1E-0_4]] ) elif model_name == "xclip-base-patch16-ucf-2-shot": __magic_name__ :Union[str, Any] = torch.tensor([[8.5_8_5_7E-0_5, 9.9_9_2_8E-0_1, 6.3_2_9_1E-0_4]] ) elif model_name == "xclip-base-patch16-ucf-4-shot": __magic_name__ :Union[str, Any] = torch.tensor([[8.5_8_5_7E-0_5, 9.9_9_2_8E-0_1, 6.3_2_9_1E-0_4]] ) elif model_name == "xclip-base-patch16-ucf-8-shot": __magic_name__ :Optional[int] = torch.tensor([[0.0027, 0.9904, 0.0070]] ) elif model_name == "xclip-base-patch16-ucf-16-shot": __magic_name__ :Any = torch.tensor([[9.8_2_1_9E-0_4, 9.9_5_9_3E-0_1, 3.0_8_6_3E-0_3]] ) # zero shot elif model_name == "xclip-base-patch16-zero-shot": __magic_name__ :Optional[int] = torch.tensor([[3.5_0_8_2E-0_4, 9.9_7_8_5E-0_1, 1.7_9_6_6E-0_3]] ) else: raise ValueError(f'''Model name {model_name} not supported''' ) assert torch.allclose(snake_case, snake_case, atol=1E-3 ) print('''Looks ok!''' ) if pytorch_dump_folder_path is not None: print(f'''Saving model {model_name} to {pytorch_dump_folder_path}''' ) model.save_pretrained(snake_case ) if push_to_hub: print('''Pushing model, processor and slow tokenizer files to the hub...''' ) model.push_to_hub(snake_case, organization='''nielsr''' ) processor.push_to_hub(snake_case, organization='''nielsr''' ) slow_tokenizer.push_to_hub(snake_case, organization='''nielsr''' ) if __name__ == "__main__": SCREAMING_SNAKE_CASE__ : Optional[Any] = argparse.ArgumentParser() # Required parameters parser.add_argument( """--model_name""", default="""xclip-base-patch32""", type=str, help="""Name of the model.""", ) parser.add_argument( """--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory.""" ) parser.add_argument( """--push_to_hub""", action="""store_true""", help="""Whether or not to push the converted model to the 🤗 hub.""" ) SCREAMING_SNAKE_CASE__ : List[Any] = parser.parse_args() convert_xclip_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
0
0
import os import re import warnings from shutil import copyfile from typing import TYPE_CHECKING, Any, Dict, List, Optional, Tuple import sentencepiece as spm from ...tokenization_utils import PreTrainedTokenizer if TYPE_CHECKING: from ...tokenization_utils_base import TextInput from ...utils import logging lowerCamelCase__ : int = logging.get_logger(__name__) lowerCamelCase__ : str = {"""vocab_file""": """spiece.model"""} lowerCamelCase__ : Optional[Any] = { """vocab_file""": { """t5-small""": """https://huggingface.co/t5-small/resolve/main/spiece.model""", """t5-base""": """https://huggingface.co/t5-base/resolve/main/spiece.model""", """t5-large""": """https://huggingface.co/t5-large/resolve/main/spiece.model""", """t5-3b""": """https://huggingface.co/t5-3b/resolve/main/spiece.model""", """t5-11b""": """https://huggingface.co/t5-11b/resolve/main/spiece.model""", } } # TODO(PVP) - this should be removed in Transformers v5 lowerCamelCase__ : str = { """t5-small""": 5_1_2, """t5-base""": 5_1_2, """t5-large""": 5_1_2, """t5-3b""": 5_1_2, """t5-11b""": 5_1_2, } lowerCamelCase__ : Union[str, Any] = """▁""" class __magic_name__ (snake_case_ ): '''simple docstring''' __lowercase : Optional[Any] = VOCAB_FILES_NAMES __lowercase : str = PRETRAINED_VOCAB_FILES_MAP __lowercase : List[str] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES __lowercase : Dict = ['input_ids', 'attention_mask'] def __init__( self:int , _a:Any , _a:List[str]="</s>" , _a:Union[str, Any]="<unk>" , _a:List[Any]="<pad>" , _a:Optional[Any]=1_00 , _a:List[str]=None , _a:Optional[Dict[str, Any]] = None , _a:int=True , **_a:int , ): # Add extra_ids to the special token list if extra_ids > 0 and additional_special_tokens is None: snake_case__ = [F"""<extra_id_{i}>""" for i in range(_a )] elif extra_ids > 0 and additional_special_tokens is not None: # Check that we have the right number of extra_id special tokens snake_case__ = len(set(filter(lambda _a : bool('''extra_id''' in str(_a ) ) , _a ) ) ) if extra_tokens != extra_ids: raise ValueError( F"""Both extra_ids ({extra_ids}) and additional_special_tokens ({additional_special_tokens}) are""" ''' provided to T5Tokenizer. In this case the additional_special_tokens must include the extra_ids''' ''' tokens''' ) if legacy: logger.warning_once( F"""You are using the legacy behaviour of the {self.__class__}. This means that tokens that come after special tokens will not be properly handled. We recommend you to""" ''' read the related pull request available at https://github.com/huggingface/transformers/pull/24565''' ) snake_case__ = legacy snake_case__ = {} if sp_model_kwargs is None else sp_model_kwargs super().__init__( eos_token=_a , unk_token=_a , pad_token=_a , extra_ids=_a , additional_special_tokens=_a , sp_model_kwargs=self.sp_model_kwargs , legacy=_a , **_a , ) snake_case__ = vocab_file snake_case__ = extra_ids snake_case__ = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(_a ) @staticmethod def SCREAMING_SNAKE_CASE__ ( _a:Optional[int] , _a:Union[str, Any] , _a:str ): if pretrained_model_name_or_path in TaTokenizer.max_model_input_sizes: snake_case__ = TaTokenizer.max_model_input_sizes[pretrained_model_name_or_path] if init_max_model_length is not None and init_max_model_length != max_model_length: return init_max_model_length elif init_max_model_length is None: warnings.warn( '''This tokenizer was incorrectly instantiated with a model max length of''' F""" {deprecated_max_model_length} which will be corrected in Transformers v5.\nFor now, this""" ''' behavior is kept to avoid breaking backwards compatibility when padding/encoding with''' ''' `truncation is True`.\n- Be aware that you SHOULD NOT rely on''' F""" {pretrained_model_name_or_path} automatically truncating your input to""" F""" {deprecated_max_model_length} when padding/encoding.\n- If you want to encode/pad to sequences""" F""" longer than {deprecated_max_model_length} you can either instantiate this tokenizer with""" ''' `model_max_length` or pass `max_length` when encoding/padding.\n- To avoid this warning, please''' ''' instantiate this tokenizer with `model_max_length` set to your preferred value.''' , _a , ) return max_model_length @property def SCREAMING_SNAKE_CASE__ ( self:List[str] ): return self.sp_model.get_piece_size() + self._extra_ids def SCREAMING_SNAKE_CASE__ ( self:Optional[int] ): snake_case__ = {self.convert_ids_to_tokens(_a ): i for i in range(self.vocab_size )} vocab.update(self.added_tokens_encoder ) return vocab def SCREAMING_SNAKE_CASE__ ( self:Optional[Any] , _a:List[int] , _a:Optional[List[int]] = None , _a:bool = False ): if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=_a , token_ids_a=_a , already_has_special_tokens=_a ) # normal case: some special tokens if token_ids_a is None: return ([0] * len(_a )) + [1] return ([0] * len(_a )) + [1] + ([0] * len(_a )) + [1] def SCREAMING_SNAKE_CASE__ ( self:Union[str, Any] ): return list( set(filter(lambda _a : bool(re.search(r'''<extra_id_\d+>''' , _a ) ) is not None , self.additional_special_tokens ) ) ) def SCREAMING_SNAKE_CASE__ ( self:Optional[int] ): return [self._convert_token_to_id(_a ) for token in self.get_sentinel_tokens()] def SCREAMING_SNAKE_CASE__ ( self:int , _a:List[int] ): if len(_a ) > 0 and token_ids[-1] == self.eos_token_id: warnings.warn( F"""This sequence already has {self.eos_token}. In future versions this behavior may lead to duplicated""" ''' eos tokens being added.''' ) return token_ids else: return token_ids + [self.eos_token_id] def SCREAMING_SNAKE_CASE__ ( self:Tuple , _a:List[int] , _a:Optional[List[int]] = None ): snake_case__ = [self.eos_token_id] if token_ids_a is None: return len(token_ids_a + eos ) * [0] return len(token_ids_a + eos + token_ids_a + eos ) * [0] def SCREAMING_SNAKE_CASE__ ( self:List[str] , _a:List[int] , _a:Optional[List[int]] = None ): snake_case__ = self._add_eos_if_not_present(_a ) if token_ids_a is None: return token_ids_a else: snake_case__ = self._add_eos_if_not_present(_a ) return token_ids_a + token_ids_a def __getstate__( self:Dict ): snake_case__ = self.__dict__.copy() snake_case__ = None return state def __setstate__( self:Tuple , _a:Union[str, Any] ): snake_case__ = d # for backward compatibility if not hasattr(self , '''sp_model_kwargs''' ): snake_case__ = {} snake_case__ = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(self.vocab_file ) def SCREAMING_SNAKE_CASE__ ( self:List[Any] , _a:"TextInput" , **_a:Tuple ): # Replace the SPIECE_UNDERLINE with a space to make sure SPIECE_UNDERLINE is only used at # the beginning of the text if not self.legacy: snake_case__ = SPIECE_UNDERLINE + text.replace(_a , ''' ''' ) return super().tokenize(_a , **_a ) def SCREAMING_SNAKE_CASE__ ( self:Union[str, Any] , _a:Optional[int] , **_a:str ): if not self.legacy: snake_case__ = text.startswith(_a ) if is_first: snake_case__ = text[1:] snake_case__ = self.sp_model.encode(_a , out_type=_a ) if not self.legacy and not is_first and not text.startswith(''' ''' ) and tokens[0].startswith(_a ): snake_case__ = ([tokens[0][1:]] if len(tokens[0] ) > 1 else []) + tokens[1:] return tokens def SCREAMING_SNAKE_CASE__ ( self:Dict , _a:int ): if token.startswith('''<extra_id_''' ): snake_case__ = re.match(r'''<extra_id_(\d+)>''' , _a ) snake_case__ = int(match.group(1 ) ) return self.vocab_size - num - 1 return self.sp_model.piece_to_id(_a ) def SCREAMING_SNAKE_CASE__ ( self:Tuple , _a:List[Any] ): if index < self.sp_model.get_piece_size(): snake_case__ = self.sp_model.IdToPiece(_a ) else: snake_case__ = F"""<extra_id_{self.vocab_size - 1 - index}>""" return token def SCREAMING_SNAKE_CASE__ ( self:Tuple , _a:Dict ): snake_case__ = [] snake_case__ = '''''' snake_case__ = False for token in tokens: # make sure that special tokens are not decoded using sentencepiece model if token in self.all_special_tokens: if not prev_is_special: out_string += " " out_string += self.sp_model.decode(_a ) + token snake_case__ = True snake_case__ = [] else: current_sub_tokens.append(_a ) snake_case__ = False out_string += self.sp_model.decode(_a ) return out_string.strip() def SCREAMING_SNAKE_CASE__ ( self:Union[str, Any] , _a:str , _a:Optional[str] = None ): if not os.path.isdir(_a ): logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" ) return snake_case__ = os.path.join( _a , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(_a ) and os.path.isfile(self.vocab_file ): copyfile(self.vocab_file , _a ) elif not os.path.isfile(self.vocab_file ): with open(_a , '''wb''' ) as fi: snake_case__ = self.sp_model.serialized_model_proto() fi.write(_a ) return (out_vocab_file,)
33
import numpy as np import torch from torch.utils.data import Dataset from utils import logger class lowerCamelCase_ ( lowerCamelCase ): def __init__( self , __lowerCAmelCase , __lowerCAmelCase ): """simple docstring""" __magic_name__ :Optional[int] = params __magic_name__ :Any = np.array(__lowerCAmelCase ) __magic_name__ :Optional[Any] = np.array([len(__lowerCAmelCase ) for t in data] ) self.check() self.remove_long_sequences() self.remove_empty_sequences() self.remove_unknown_sequences() self.check() self.print_statistics() def __getitem__( self , __lowerCAmelCase ): """simple docstring""" return (self.token_ids[index], self.lengths[index]) def __len__( self ): """simple docstring""" return len(self.lengths ) def A ( self ): """simple docstring""" assert len(self.token_ids ) == len(self.lengths ) assert all(self.lengths[i] == len(self.token_ids[i] ) for i in range(len(self.lengths ) ) ) def A ( self ): """simple docstring""" __magic_name__ :Any = self.params.max_model_input_size __magic_name__ :int = self.lengths > max_len logger.info(F'''Splitting {sum(__lowerCAmelCase )} too long sequences.''' ) def divide_chunks(__lowerCAmelCase , __lowerCAmelCase ): return [l[i : i + n] for i in range(0 , len(__lowerCAmelCase ) , __lowerCAmelCase )] __magic_name__ :Optional[int] = [] __magic_name__ :List[Any] = [] if self.params.mlm: __magic_name__ , __magic_name__ :Optional[Any] = self.params.special_tok_ids['''cls_token'''], self.params.special_tok_ids['''sep_token'''] else: __magic_name__ , __magic_name__ :Tuple = self.params.special_tok_ids['''bos_token'''], self.params.special_tok_ids['''eos_token'''] for seq_, len_ in zip(self.token_ids , self.lengths ): assert (seq_[0] == cls_id) and (seq_[-1] == sep_id), seq_ if len_ <= max_len: new_tok_ids.append(seq_ ) new_lengths.append(len_ ) else: __magic_name__ :int = [] for sub_s in divide_chunks(seq_ , max_len - 2 ): if sub_s[0] != cls_id: __magic_name__ :List[Any] = np.insert(__lowerCAmelCase , 0 , __lowerCAmelCase ) if sub_s[-1] != sep_id: __magic_name__ :Union[str, Any] = np.insert(__lowerCAmelCase , len(__lowerCAmelCase ) , __lowerCAmelCase ) assert len(__lowerCAmelCase ) <= max_len assert (sub_s[0] == cls_id) and (sub_s[-1] == sep_id), sub_s sub_seqs.append(__lowerCAmelCase ) new_tok_ids.extend(__lowerCAmelCase ) new_lengths.extend([len(__lowerCAmelCase ) for l in sub_seqs] ) __magic_name__ :Tuple = np.array(__lowerCAmelCase ) __magic_name__ :Optional[int] = np.array(__lowerCAmelCase ) def A ( self ): """simple docstring""" __magic_name__ :Optional[Any] = len(self ) __magic_name__ :int = self.lengths > 1_1 __magic_name__ :List[str] = self.token_ids[indices] __magic_name__ :Union[str, Any] = self.lengths[indices] __magic_name__ :List[str] = len(self ) logger.info(F'''Remove {init_size - new_size} too short (<=11 tokens) sequences.''' ) def A ( self ): """simple docstring""" if "unk_token" not in self.params.special_tok_ids: return else: __magic_name__ :Tuple = self.params.special_tok_ids['''unk_token'''] __magic_name__ :Dict = len(self ) __magic_name__ :Tuple = np.array([np.count_nonzero(a == unk_token_id ) for a in self.token_ids] ) __magic_name__ :int = (unk_occs / self.lengths) < 0.5 __magic_name__ :str = self.token_ids[indices] __magic_name__ :str = self.lengths[indices] __magic_name__ :Any = len(self ) logger.info(F'''Remove {init_size - new_size} sequences with a high level of unknown tokens (50%).''' ) def A ( self ): """simple docstring""" if not self.params.is_master: return logger.info(F'''{len(self )} sequences''' ) # data_len = sum(self.lengths) # nb_unique_tokens = len(Counter(list(chain(*self.token_ids)))) # logger.info(f'{data_len} tokens ({nb_unique_tokens} unique)') # unk_idx = self.params.special_tok_ids['unk_token'] # nb_unknown = sum([(t==unk_idx).sum() for t in self.token_ids]) # logger.info(f'{nb_unknown} unknown tokens (covering {100*nb_unknown/data_len:.2f}% of the data)') def A ( self , __lowerCAmelCase ): """simple docstring""" __magic_name__ :Optional[Any] = [t[0] for t in batch] __magic_name__ :List[Any] = [t[1] for t in batch] assert len(__lowerCAmelCase ) == len(__lowerCAmelCase ) # Max for paddings __magic_name__ :Tuple = max(__lowerCAmelCase ) # Pad token ids if self.params.mlm: __magic_name__ :Any = self.params.special_tok_ids['''pad_token'''] else: __magic_name__ :str = self.params.special_tok_ids['''unk_token'''] __magic_name__ :Any = [list(t.astype(__lowerCAmelCase ) ) + [pad_idx] * (max_seq_len_ - len(__lowerCAmelCase )) for t in token_ids] assert len(tk_ ) == len(__lowerCAmelCase ) assert all(len(__lowerCAmelCase ) == max_seq_len_ for t in tk_ ) __magic_name__ :Optional[int] = torch.tensor(tk_ ) # (bs, max_seq_len_) __magic_name__ :Optional[int] = torch.tensor(__lowerCAmelCase ) # (bs) return tk_t, lg_t
0
0
"""simple docstring""" from math import factorial class snake_case_ : """simple docstring""" def __init__( self , lowerCamelCase_ , lowerCamelCase_) -> str: UpperCamelCase = real if isinstance(lowerCamelCase_ , lowerCamelCase_): UpperCamelCase = [1] * rank else: UpperCamelCase = rank def __repr__( self) -> Any: return ( F'{self.real}+' F'{"+".join(str(lowerCamelCase_)+"E"+str(n+1)for n,dual in enumerate(self.duals))}' ) def UpperCAmelCase__ ( self) -> Optional[Any]: UpperCamelCase = self.duals.copy() while cur[-1] == 0: cur.pop(-1) return Dual(self.real , lowerCamelCase_) def __add__( self , lowerCamelCase_) -> List[str]: if not isinstance(lowerCamelCase_ , lowerCamelCase_): return Dual(self.real + other , self.duals) UpperCamelCase = self.duals.copy() UpperCamelCase = other.duals.copy() if len(lowerCamelCase_) > len(lowerCamelCase_): o_dual.extend([1] * (len(lowerCamelCase_) - len(lowerCamelCase_))) elif len(lowerCamelCase_) < len(lowerCamelCase_): s_dual.extend([1] * (len(lowerCamelCase_) - len(lowerCamelCase_))) UpperCamelCase = [] for i in range(len(lowerCamelCase_)): new_duals.append(s_dual[i] + o_dual[i]) return Dual(self.real + other.real , lowerCamelCase_) A_ = __add__ def __sub__( self , lowerCamelCase_) -> str: return self + other * -1 def __mul__( self , lowerCamelCase_) -> Union[str, Any]: if not isinstance(lowerCamelCase_ , lowerCamelCase_): UpperCamelCase = [] for i in self.duals: new_duals.append(i * other) return Dual(self.real * other , lowerCamelCase_) UpperCamelCase = [0] * (len(self.duals) + len(other.duals) + 1) for i, item in enumerate(self.duals): for j, jtem in enumerate(other.duals): new_duals[i + j + 1] += item * jtem for k in range(len(self.duals)): new_duals[k] += self.duals[k] * other.real for index in range(len(other.duals)): new_duals[index] += other.duals[index] * self.real return Dual(self.real * other.real , lowerCamelCase_) A_ = __mul__ def __truediv__( self , lowerCamelCase_) -> List[str]: if not isinstance(lowerCamelCase_ , lowerCamelCase_): UpperCamelCase = [] for i in self.duals: new_duals.append(i / other) return Dual(self.real / other , lowerCamelCase_) raise ValueError def __floordiv__( self , lowerCamelCase_) -> Optional[Any]: if not isinstance(lowerCamelCase_ , lowerCamelCase_): UpperCamelCase = [] for i in self.duals: new_duals.append(i // other) return Dual(self.real // other , lowerCamelCase_) raise ValueError def __pow__( self , lowerCamelCase_) -> str: if n < 0 or isinstance(lowerCamelCase_ , lowerCamelCase_): raise ValueError('''power must be a positive integer''') if n == 0: return 1 if n == 1: return self UpperCamelCase = self for _ in range(n - 1): x *= self return x def __snake_case ( _lowercase ,_lowercase ,_lowercase ): """simple docstring""" if not callable(_lowercase ): raise ValueError('''differentiate() requires a function as input for func''' ) if not isinstance(_lowercase ,(float, int) ): raise ValueError('''differentiate() requires a float as input for position''' ) if not isinstance(_lowercase ,_lowercase ): raise ValueError('''differentiate() requires an int as input for order''' ) UpperCamelCase = Dual(_lowercase ,1 ) UpperCamelCase = func(_lowercase ) if order == 0: return result.real return result.duals[order - 1] * factorial(_lowercase ) if __name__ == "__main__": import doctest doctest.testmod() def __snake_case ( _lowercase ): """simple docstring""" return y**2 * y**4 print(differentiate(f, 9, 2))
34
import os from shutil import copyfile from typing import Any, Dict, List, Optional, Tuple import sentencepiece as spm from ...tokenization_utils import PreTrainedTokenizer from ...utils import logging SCREAMING_SNAKE_CASE__ : str = logging.get_logger(__name__) SCREAMING_SNAKE_CASE__ : Tuple = """▁""" SCREAMING_SNAKE_CASE__ : Union[str, Any] = {"""vocab_file""": """spiece.model"""} SCREAMING_SNAKE_CASE__ : List[Any] = { """vocab_file""": { """google/reformer-crime-and-punishment""": ( """https://huggingface.co/google/reformer-crime-and-punishment/resolve/main/spiece.model""" ) } } SCREAMING_SNAKE_CASE__ : Optional[int] = { """google/reformer-crime-and-punishment""": 52_42_88, } class lowerCamelCase_ ( lowerCamelCase ): a__ = VOCAB_FILES_NAMES a__ = PRETRAINED_VOCAB_FILES_MAP a__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES a__ = ['''input_ids''', '''attention_mask'''] def __init__( self , __lowerCAmelCase , __lowerCAmelCase="</s>" , __lowerCAmelCase="<unk>" , __lowerCAmelCase=[] , __lowerCAmelCase = None , **__lowerCAmelCase , ): """simple docstring""" __magic_name__ :int = {} if sp_model_kwargs is None else sp_model_kwargs super().__init__( eos_token=__lowerCAmelCase , unk_token=__lowerCAmelCase , additional_special_tokens=__lowerCAmelCase , sp_model_kwargs=self.sp_model_kwargs , **__lowerCAmelCase , ) __magic_name__ :Optional[Any] = vocab_file __magic_name__ :int = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(__lowerCAmelCase ) @property def A ( self ): """simple docstring""" return self.sp_model.get_piece_size() def A ( self ): """simple docstring""" __magic_name__ :str = {self.convert_ids_to_tokens(__lowerCAmelCase ): i for i in range(self.vocab_size )} vocab.update(self.added_tokens_encoder ) return vocab def __getstate__( self ): """simple docstring""" __magic_name__ :Optional[Any] = self.__dict__.copy() __magic_name__ :Optional[Any] = None return state def __setstate__( self , __lowerCAmelCase ): """simple docstring""" __magic_name__ :Any = d # for backward compatibility if not hasattr(self , '''sp_model_kwargs''' ): __magic_name__ :Optional[int] = {} __magic_name__ :Union[str, Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(self.vocab_file ) def A ( self , __lowerCAmelCase ): """simple docstring""" return self.sp_model.encode(__lowerCAmelCase , out_type=__lowerCAmelCase ) def A ( self , __lowerCAmelCase ): """simple docstring""" return self.sp_model.piece_to_id(__lowerCAmelCase ) def A ( self , __lowerCAmelCase ): """simple docstring""" if index < self.sp_model.get_piece_size(): __magic_name__ :int = self.sp_model.IdToPiece(__lowerCAmelCase ) return token def A ( self , __lowerCAmelCase ): """simple docstring""" __magic_name__ :Optional[Any] = [] __magic_name__ :Tuple = '''''' for token in tokens: # make sure that special tokens are not decoded using sentencepiece model if token in self.all_special_tokens: out_string += self.sp_model.decode(__lowerCAmelCase ) + token __magic_name__ :Optional[Any] = [] else: current_sub_tokens.append(__lowerCAmelCase ) out_string += self.sp_model.decode(__lowerCAmelCase ) return out_string.strip() def A ( self , __lowerCAmelCase , __lowerCAmelCase = None ): """simple docstring""" if not os.path.isdir(__lowerCAmelCase ): logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' ) return __magic_name__ :Optional[int] = os.path.join( __lowerCAmelCase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(__lowerCAmelCase ) and os.path.isfile(self.vocab_file ): copyfile(self.vocab_file , __lowerCAmelCase ) elif not os.path.isfile(self.vocab_file ): with open(__lowerCAmelCase , '''wb''' ) as fi: __magic_name__ :Dict = self.sp_model.serialized_model_proto() fi.write(__lowerCAmelCase ) return (out_vocab_file,)
0
0
import unittest import numpy as np from datasets import load_dataset from transformers.testing_utils import require_torch, require_vision from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import BeitImageProcessor class lowercase ( unittest.TestCase ): def __init__( self : Union[str, Any] , _lowercase : Tuple , _lowercase : Dict=7 , _lowercase : List[Any]=3 , _lowercase : str=18 , _lowercase : Optional[int]=30 , _lowercase : List[Any]=4_00 , _lowercase : Any=True , _lowercase : Optional[int]=None , _lowercase : int=True , _lowercase : List[str]=None , _lowercase : int=True , _lowercase : List[str]=[0.5, 0.5, 0.5] , _lowercase : Dict=[0.5, 0.5, 0.5] , _lowercase : Union[str, Any]=False , ): SCREAMING_SNAKE_CASE__ : Optional[int] = size if size is not None else {'''height''': 20, '''width''': 20} SCREAMING_SNAKE_CASE__ : Optional[int] = crop_size if crop_size is not None else {'''height''': 18, '''width''': 18} SCREAMING_SNAKE_CASE__ : str = parent SCREAMING_SNAKE_CASE__ : Dict = batch_size SCREAMING_SNAKE_CASE__ : Optional[int] = num_channels SCREAMING_SNAKE_CASE__ : Optional[Any] = image_size SCREAMING_SNAKE_CASE__ : List[str] = min_resolution SCREAMING_SNAKE_CASE__ : Dict = max_resolution SCREAMING_SNAKE_CASE__ : int = do_resize SCREAMING_SNAKE_CASE__ : List[Any] = size SCREAMING_SNAKE_CASE__ : Union[str, Any] = do_center_crop SCREAMING_SNAKE_CASE__ : Tuple = crop_size SCREAMING_SNAKE_CASE__ : List[Any] = do_normalize SCREAMING_SNAKE_CASE__ : str = image_mean SCREAMING_SNAKE_CASE__ : int = image_std SCREAMING_SNAKE_CASE__ : Optional[int] = do_reduce_labels def lowercase__ ( self : Tuple ): return { "do_resize": self.do_resize, "size": self.size, "do_center_crop": self.do_center_crop, "crop_size": self.crop_size, "do_normalize": self.do_normalize, "image_mean": self.image_mean, "image_std": self.image_std, "do_reduce_labels": self.do_reduce_labels, } def a ( ) -> Any: '''simple docstring''' SCREAMING_SNAKE_CASE__ : Any = load_dataset('''hf-internal-testing/fixtures_ade20k''' , split='''test''' ) SCREAMING_SNAKE_CASE__ : str = Image.open(dataset[0]['''file'''] ) SCREAMING_SNAKE_CASE__ : Dict = Image.open(dataset[1]['''file'''] ) return image, map def a ( ) -> str: '''simple docstring''' SCREAMING_SNAKE_CASE__ : str = load_dataset('''hf-internal-testing/fixtures_ade20k''' , split='''test''' ) SCREAMING_SNAKE_CASE__ : Any = Image.open(ds[0]['''file'''] ) SCREAMING_SNAKE_CASE__ : Dict = Image.open(ds[1]['''file'''] ) SCREAMING_SNAKE_CASE__ : List[str] = Image.open(ds[2]['''file'''] ) SCREAMING_SNAKE_CASE__ : List[str] = Image.open(ds[3]['''file'''] ) return [imagea, imagea], [mapa, mapa] @require_torch @require_vision class lowercase ( _UpperCAmelCase , unittest.TestCase ): lowerCamelCase : Tuple = BeitImageProcessor if is_vision_available() else None def lowercase__ ( self : List[Any] ): SCREAMING_SNAKE_CASE__ : Tuple = BeitImageProcessingTester(self ) @property def lowercase__ ( self : Optional[Any] ): return self.image_processor_tester.prepare_image_processor_dict() def lowercase__ ( self : Tuple ): SCREAMING_SNAKE_CASE__ : Optional[Any] = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(_lowercase , '''do_resize''' ) ) self.assertTrue(hasattr(_lowercase , '''size''' ) ) self.assertTrue(hasattr(_lowercase , '''do_center_crop''' ) ) self.assertTrue(hasattr(_lowercase , '''center_crop''' ) ) self.assertTrue(hasattr(_lowercase , '''do_normalize''' ) ) self.assertTrue(hasattr(_lowercase , '''image_mean''' ) ) self.assertTrue(hasattr(_lowercase , '''image_std''' ) ) def lowercase__ ( self : Optional[Any] ): SCREAMING_SNAKE_CASE__ : List[Any] = self.image_processing_class.from_dict(self.image_processor_dict ) self.assertEqual(image_processor.size , {'''height''': 20, '''width''': 20} ) self.assertEqual(image_processor.crop_size , {'''height''': 18, '''width''': 18} ) self.assertEqual(image_processor.do_reduce_labels , _lowercase ) SCREAMING_SNAKE_CASE__ : Tuple = self.image_processing_class.from_dict( self.image_processor_dict , size=42 , crop_size=84 , reduce_labels=_lowercase ) self.assertEqual(image_processor.size , {'''height''': 42, '''width''': 42} ) self.assertEqual(image_processor.crop_size , {'''height''': 84, '''width''': 84} ) self.assertEqual(image_processor.do_reduce_labels , _lowercase ) def lowercase__ ( self : str ): pass def lowercase__ ( self : int ): # Initialize image_processing SCREAMING_SNAKE_CASE__ : Any = self.image_processing_class(**self.image_processor_dict ) # create random PIL images SCREAMING_SNAKE_CASE__ : str = prepare_image_inputs(self.image_processor_tester , equal_resolution=_lowercase ) for image in image_inputs: self.assertIsInstance(_lowercase , Image.Image ) # Test not batched input SCREAMING_SNAKE_CASE__ : Optional[Any] = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) , ) # Test batched SCREAMING_SNAKE_CASE__ : List[Any] = image_processing(_lowercase , return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) , ) def lowercase__ ( self : Dict ): # Initialize image_processing SCREAMING_SNAKE_CASE__ : Dict = self.image_processing_class(**self.image_processor_dict ) # create random numpy tensors SCREAMING_SNAKE_CASE__ : Any = prepare_image_inputs(self.image_processor_tester , equal_resolution=_lowercase , numpify=_lowercase ) for image in image_inputs: self.assertIsInstance(_lowercase , np.ndarray ) # Test not batched input SCREAMING_SNAKE_CASE__ : Tuple = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) , ) # Test batched SCREAMING_SNAKE_CASE__ : int = image_processing(_lowercase , return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) , ) def lowercase__ ( self : Optional[Any] ): # Initialize image_processing SCREAMING_SNAKE_CASE__ : List[Any] = self.image_processing_class(**self.image_processor_dict ) # create random PyTorch tensors SCREAMING_SNAKE_CASE__ : Optional[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=_lowercase , torchify=_lowercase ) for image in image_inputs: self.assertIsInstance(_lowercase , torch.Tensor ) # Test not batched input SCREAMING_SNAKE_CASE__ : Optional[int] = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) , ) # Test batched SCREAMING_SNAKE_CASE__ : Any = image_processing(_lowercase , return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) , ) def lowercase__ ( self : Optional[int] ): # Initialize image_processing SCREAMING_SNAKE_CASE__ : Any = self.image_processing_class(**self.image_processor_dict ) # create random PyTorch tensors SCREAMING_SNAKE_CASE__ : Tuple = prepare_image_inputs(self.image_processor_tester , equal_resolution=_lowercase , torchify=_lowercase ) SCREAMING_SNAKE_CASE__ : Any = [] for image in image_inputs: self.assertIsInstance(_lowercase , torch.Tensor ) maps.append(torch.zeros(image.shape[-2:] ).long() ) # Test not batched input SCREAMING_SNAKE_CASE__ : Optional[Any] = image_processing(image_inputs[0] , maps[0] , return_tensors='''pt''' ) self.assertEqual( encoding['''pixel_values'''].shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) , ) self.assertEqual( encoding['''labels'''].shape , ( 1, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) , ) self.assertEqual(encoding['''labels'''].dtype , torch.long ) self.assertTrue(encoding['''labels'''].min().item() >= 0 ) self.assertTrue(encoding['''labels'''].max().item() <= 2_55 ) # Test batched SCREAMING_SNAKE_CASE__ : str = image_processing(_lowercase , _lowercase , return_tensors='''pt''' ) self.assertEqual( encoding['''pixel_values'''].shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) , ) self.assertEqual( encoding['''labels'''].shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) , ) self.assertEqual(encoding['''labels'''].dtype , torch.long ) self.assertTrue(encoding['''labels'''].min().item() >= 0 ) self.assertTrue(encoding['''labels'''].max().item() <= 2_55 ) # Test not batched input (PIL images) SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Tuple = prepare_semantic_single_inputs() SCREAMING_SNAKE_CASE__ : int = image_processing(_lowercase , _lowercase , return_tensors='''pt''' ) self.assertEqual( encoding['''pixel_values'''].shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) , ) self.assertEqual( encoding['''labels'''].shape , ( 1, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) , ) self.assertEqual(encoding['''labels'''].dtype , torch.long ) self.assertTrue(encoding['''labels'''].min().item() >= 0 ) self.assertTrue(encoding['''labels'''].max().item() <= 2_55 ) # Test batched input (PIL images) SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Dict = prepare_semantic_batch_inputs() SCREAMING_SNAKE_CASE__ : Any = image_processing(_lowercase , _lowercase , return_tensors='''pt''' ) self.assertEqual( encoding['''pixel_values'''].shape , ( 2, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) , ) self.assertEqual( encoding['''labels'''].shape , ( 2, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) , ) self.assertEqual(encoding['''labels'''].dtype , torch.long ) self.assertTrue(encoding['''labels'''].min().item() >= 0 ) self.assertTrue(encoding['''labels'''].max().item() <= 2_55 ) def lowercase__ ( self : int ): # Initialize image_processing SCREAMING_SNAKE_CASE__ : str = self.image_processing_class(**self.image_processor_dict ) # ADE20k has 150 classes, and the background is included, so labels should be between 0 and 150 SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : List[str] = prepare_semantic_single_inputs() SCREAMING_SNAKE_CASE__ : List[str] = image_processing(_lowercase , _lowercase , return_tensors='''pt''' ) self.assertTrue(encoding['''labels'''].min().item() >= 0 ) self.assertTrue(encoding['''labels'''].max().item() <= 1_50 ) SCREAMING_SNAKE_CASE__ : List[Any] = True SCREAMING_SNAKE_CASE__ : Union[str, Any] = image_processing(_lowercase , _lowercase , return_tensors='''pt''' ) self.assertTrue(encoding['''labels'''].min().item() >= 0 ) self.assertTrue(encoding['''labels'''].max().item() <= 2_55 )
35
import os import unittest from transformers import MobileBertTokenizer, MobileBertTokenizerFast from transformers.models.bert.tokenization_bert import ( VOCAB_FILES_NAMES, BasicTokenizer, WordpieceTokenizer, _is_control, _is_punctuation, _is_whitespace, ) from transformers.testing_utils import require_tokenizers, slow from ...test_tokenization_common import TokenizerTesterMixin, filter_non_english @require_tokenizers class lowerCamelCase_ ( lowerCamelCase , unittest.TestCase ): a__ = MobileBertTokenizer a__ = MobileBertTokenizerFast a__ = True a__ = True a__ = filter_non_english a__ = '''google/mobilebert-uncased''' def A ( self ): """simple docstring""" super().setUp() __magic_name__ :Tuple = [ '''[UNK]''', '''[CLS]''', '''[SEP]''', '''[PAD]''', '''[MASK]''', '''want''', '''##want''', '''##ed''', '''wa''', '''un''', '''runn''', '''##ing''', ''',''', '''low''', '''lowest''', ] __magic_name__ :Dict = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] ) with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer: vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) ) __magic_name__ :List[str] = [ (tokenizer_def[0], self.pre_trained_model_path, tokenizer_def[2]) # else the 'google/' prefix is stripped for tokenizer_def in self.tokenizers_list ] def A ( self , __lowerCAmelCase ): """simple docstring""" __magic_name__ :Union[str, Any] = '''UNwant\u00E9d,running''' __magic_name__ :int = '''unwanted, running''' return input_text, output_text def A ( self ): """simple docstring""" __magic_name__ :Optional[int] = self.tokenizer_class(self.vocab_file ) __magic_name__ :List[Any] = tokenizer.tokenize('''UNwant\u00E9d,running''' ) self.assertListEqual(__lowerCAmelCase , ['''un''', '''##want''', '''##ed''', ''',''', '''runn''', '''##ing'''] ) self.assertListEqual(tokenizer.convert_tokens_to_ids(__lowerCAmelCase ) , [9, 6, 7, 1_2, 1_0, 1_1] ) def A ( self ): """simple docstring""" if not self.test_rust_tokenizer: return __magic_name__ :int = self.get_tokenizer() __magic_name__ :Tuple = self.get_rust_tokenizer() __magic_name__ :List[str] = '''UNwant\u00E9d,running''' __magic_name__ :Optional[Any] = tokenizer.tokenize(__lowerCAmelCase ) __magic_name__ :List[Any] = rust_tokenizer.tokenize(__lowerCAmelCase ) self.assertListEqual(__lowerCAmelCase , __lowerCAmelCase ) __magic_name__ :int = tokenizer.encode(__lowerCAmelCase , add_special_tokens=__lowerCAmelCase ) __magic_name__ :str = rust_tokenizer.encode(__lowerCAmelCase , add_special_tokens=__lowerCAmelCase ) self.assertListEqual(__lowerCAmelCase , __lowerCAmelCase ) __magic_name__ :List[Any] = self.get_rust_tokenizer() __magic_name__ :Any = tokenizer.encode(__lowerCAmelCase ) __magic_name__ :Any = rust_tokenizer.encode(__lowerCAmelCase ) self.assertListEqual(__lowerCAmelCase , __lowerCAmelCase ) # With lower casing __magic_name__ :Any = self.get_tokenizer(do_lower_case=__lowerCAmelCase ) __magic_name__ :List[Any] = self.get_rust_tokenizer(do_lower_case=__lowerCAmelCase ) __magic_name__ :Dict = '''UNwant\u00E9d,running''' __magic_name__ :Tuple = tokenizer.tokenize(__lowerCAmelCase ) __magic_name__ :Union[str, Any] = rust_tokenizer.tokenize(__lowerCAmelCase ) self.assertListEqual(__lowerCAmelCase , __lowerCAmelCase ) __magic_name__ :Optional[Any] = tokenizer.encode(__lowerCAmelCase , add_special_tokens=__lowerCAmelCase ) __magic_name__ :Dict = rust_tokenizer.encode(__lowerCAmelCase , add_special_tokens=__lowerCAmelCase ) self.assertListEqual(__lowerCAmelCase , __lowerCAmelCase ) __magic_name__ :Tuple = self.get_rust_tokenizer() __magic_name__ :Dict = tokenizer.encode(__lowerCAmelCase ) __magic_name__ :List[Any] = rust_tokenizer.encode(__lowerCAmelCase ) self.assertListEqual(__lowerCAmelCase , __lowerCAmelCase ) def A ( self ): """simple docstring""" __magic_name__ :Optional[int] = BasicTokenizer() self.assertListEqual(tokenizer.tokenize('''ah\u535A\u63A8zz''' ) , ['''ah''', '''\u535A''', '''\u63A8''', '''zz'''] ) def A ( self ): """simple docstring""" __magic_name__ :List[Any] = BasicTokenizer(do_lower_case=__lowerCAmelCase ) self.assertListEqual( tokenizer.tokenize(''' \tHeLLo!how \n Are yoU? ''' ) , ['''hello''', '''!''', '''how''', '''are''', '''you''', '''?'''] ) self.assertListEqual(tokenizer.tokenize('''H\u00E9llo''' ) , ['''hello'''] ) def A ( self ): """simple docstring""" __magic_name__ :Union[str, Any] = BasicTokenizer(do_lower_case=__lowerCAmelCase , strip_accents=__lowerCAmelCase ) self.assertListEqual( tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''hällo''', '''!''', '''how''', '''are''', '''you''', '''?'''] ) self.assertListEqual(tokenizer.tokenize('''H\u00E9llo''' ) , ['''h\u00E9llo'''] ) def A ( self ): """simple docstring""" __magic_name__ :Dict = BasicTokenizer(do_lower_case=__lowerCAmelCase , strip_accents=__lowerCAmelCase ) self.assertListEqual( tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''hallo''', '''!''', '''how''', '''are''', '''you''', '''?'''] ) self.assertListEqual(tokenizer.tokenize('''H\u00E9llo''' ) , ['''hello'''] ) def A ( self ): """simple docstring""" __magic_name__ :Optional[int] = BasicTokenizer(do_lower_case=__lowerCAmelCase ) self.assertListEqual( tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''hallo''', '''!''', '''how''', '''are''', '''you''', '''?'''] ) self.assertListEqual(tokenizer.tokenize('''H\u00E9llo''' ) , ['''hello'''] ) def A ( self ): """simple docstring""" __magic_name__ :List[str] = BasicTokenizer(do_lower_case=__lowerCAmelCase ) self.assertListEqual( tokenizer.tokenize(''' \tHeLLo!how \n Are yoU? ''' ) , ['''HeLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?'''] ) def A ( self ): """simple docstring""" __magic_name__ :int = BasicTokenizer(do_lower_case=__lowerCAmelCase , strip_accents=__lowerCAmelCase ) self.assertListEqual( tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''HäLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?'''] ) def A ( self ): """simple docstring""" __magic_name__ :Optional[int] = BasicTokenizer(do_lower_case=__lowerCAmelCase , strip_accents=__lowerCAmelCase ) self.assertListEqual( tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''HaLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?'''] ) def A ( self ): """simple docstring""" __magic_name__ :Optional[Any] = BasicTokenizer(do_lower_case=__lowerCAmelCase , never_split=['''[UNK]'''] ) self.assertListEqual( tokenizer.tokenize(''' \tHeLLo!how \n Are yoU? [UNK]''' ) , ['''HeLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?''', '''[UNK]'''] ) def A ( self ): """simple docstring""" __magic_name__ :int = ['''[UNK]''', '''[CLS]''', '''[SEP]''', '''want''', '''##want''', '''##ed''', '''wa''', '''un''', '''runn''', '''##ing'''] __magic_name__ :Union[str, Any] = {} for i, token in enumerate(__lowerCAmelCase ): __magic_name__ :Tuple = i __magic_name__ :List[Any] = WordpieceTokenizer(vocab=__lowerCAmelCase , unk_token='''[UNK]''' ) self.assertListEqual(tokenizer.tokenize('''''' ) , [] ) self.assertListEqual(tokenizer.tokenize('''unwanted running''' ) , ['''un''', '''##want''', '''##ed''', '''runn''', '''##ing'''] ) self.assertListEqual(tokenizer.tokenize('''unwantedX running''' ) , ['''[UNK]''', '''runn''', '''##ing'''] ) def A ( self ): """simple docstring""" self.assertTrue(_is_whitespace(''' ''' ) ) self.assertTrue(_is_whitespace('''\t''' ) ) self.assertTrue(_is_whitespace('''\r''' ) ) self.assertTrue(_is_whitespace('''\n''' ) ) self.assertTrue(_is_whitespace('''\u00A0''' ) ) self.assertFalse(_is_whitespace('''A''' ) ) self.assertFalse(_is_whitespace('''-''' ) ) def A ( self ): """simple docstring""" self.assertTrue(_is_control('''\u0005''' ) ) self.assertFalse(_is_control('''A''' ) ) self.assertFalse(_is_control(''' ''' ) ) self.assertFalse(_is_control('''\t''' ) ) self.assertFalse(_is_control('''\r''' ) ) def A ( self ): """simple docstring""" self.assertTrue(_is_punctuation('''-''' ) ) self.assertTrue(_is_punctuation('''$''' ) ) self.assertTrue(_is_punctuation('''`''' ) ) self.assertTrue(_is_punctuation('''.''' ) ) self.assertFalse(_is_punctuation('''A''' ) ) self.assertFalse(_is_punctuation(''' ''' ) ) def A ( self ): """simple docstring""" __magic_name__ :Any = self.get_tokenizer() __magic_name__ :Any = self.get_rust_tokenizer() # Example taken from the issue https://github.com/huggingface/tokenizers/issues/340 self.assertListEqual([tokenizer.tokenize(__lowerCAmelCase ) for t in ['''Test''', '''\xad''', '''test''']] , [['''[UNK]'''], [], ['''[UNK]''']] ) self.assertListEqual( [rust_tokenizer.tokenize(__lowerCAmelCase ) for t in ['''Test''', '''\xad''', '''test''']] , [['''[UNK]'''], [], ['''[UNK]''']] ) @slow def A ( self ): """simple docstring""" __magic_name__ :Optional[int] = self.tokenizer_class.from_pretrained('''google/mobilebert-uncased''' ) __magic_name__ :Optional[int] = tokenizer.encode('''sequence builders''' , add_special_tokens=__lowerCAmelCase ) __magic_name__ :List[Any] = tokenizer.encode('''multi-sequence build''' , add_special_tokens=__lowerCAmelCase ) __magic_name__ :Union[str, Any] = tokenizer.build_inputs_with_special_tokens(__lowerCAmelCase ) __magic_name__ :List[Any] = tokenizer.build_inputs_with_special_tokens(__lowerCAmelCase , __lowerCAmelCase ) assert encoded_sentence == [1_0_1] + text + [1_0_2] assert encoded_pair == [1_0_1] + text + [1_0_2] + text_a + [1_0_2] def A ( self ): """simple docstring""" for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})''' ): __magic_name__ :Optional[Any] = self.rust_tokenizer_class.from_pretrained(__lowerCAmelCase , **__lowerCAmelCase ) __magic_name__ :Optional[int] = F'''A, naïve {tokenizer_r.mask_token} AllenNLP sentence.''' __magic_name__ :Optional[Any] = tokenizer_r.encode_plus( __lowerCAmelCase , return_attention_mask=__lowerCAmelCase , return_token_type_ids=__lowerCAmelCase , return_offsets_mapping=__lowerCAmelCase , add_special_tokens=__lowerCAmelCase , ) __magic_name__ :Any = tokenizer_r.do_lower_case if hasattr(__lowerCAmelCase , '''do_lower_case''' ) else False __magic_name__ :Optional[int] = ( [ ((0, 0), tokenizer_r.cls_token), ((0, 1), '''A'''), ((1, 2), ''','''), ((3, 5), '''na'''), ((5, 6), '''##ï'''), ((6, 8), '''##ve'''), ((9, 1_5), tokenizer_r.mask_token), ((1_6, 2_1), '''Allen'''), ((2_1, 2_3), '''##NL'''), ((2_3, 2_4), '''##P'''), ((2_5, 3_3), '''sentence'''), ((3_3, 3_4), '''.'''), ((0, 0), tokenizer_r.sep_token), ] if not do_lower_case else [ ((0, 0), tokenizer_r.cls_token), ((0, 1), '''a'''), ((1, 2), ''','''), ((3, 8), '''naive'''), ((9, 1_5), tokenizer_r.mask_token), ((1_6, 2_1), '''allen'''), ((2_1, 2_3), '''##nl'''), ((2_3, 2_4), '''##p'''), ((2_5, 3_3), '''sentence'''), ((3_3, 3_4), '''.'''), ((0, 0), tokenizer_r.sep_token), ] ) self.assertEqual( [e[1] for e in expected_results] , tokenizer_r.convert_ids_to_tokens(tokens['''input_ids'''] ) ) self.assertEqual([e[0] for e in expected_results] , tokens['''offset_mapping'''] ) def A ( self ): """simple docstring""" __magic_name__ :Dict = ['''的''', '''人''', '''有'''] __magic_name__ :Any = ''''''.join(__lowerCAmelCase ) for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})''' ): __magic_name__ :Optional[Any] = True __magic_name__ :Optional[int] = self.tokenizer_class.from_pretrained(__lowerCAmelCase , **__lowerCAmelCase ) __magic_name__ :Tuple = self.rust_tokenizer_class.from_pretrained(__lowerCAmelCase , **__lowerCAmelCase ) __magic_name__ :Dict = tokenizer_p.encode(__lowerCAmelCase , add_special_tokens=__lowerCAmelCase ) __magic_name__ :List[str] = tokenizer_r.encode(__lowerCAmelCase , add_special_tokens=__lowerCAmelCase ) __magic_name__ :Dict = tokenizer_r.convert_ids_to_tokens(__lowerCAmelCase ) __magic_name__ :Union[str, Any] = tokenizer_p.convert_ids_to_tokens(__lowerCAmelCase ) # it is expected that each Chinese character is not preceded by "##" self.assertListEqual(__lowerCAmelCase , __lowerCAmelCase ) self.assertListEqual(__lowerCAmelCase , __lowerCAmelCase ) __magic_name__ :List[str] = False __magic_name__ :Tuple = self.rust_tokenizer_class.from_pretrained(__lowerCAmelCase , **__lowerCAmelCase ) __magic_name__ :List[str] = self.tokenizer_class.from_pretrained(__lowerCAmelCase , **__lowerCAmelCase ) __magic_name__ :Optional[Any] = tokenizer_r.encode(__lowerCAmelCase , add_special_tokens=__lowerCAmelCase ) __magic_name__ :Union[str, Any] = tokenizer_p.encode(__lowerCAmelCase , add_special_tokens=__lowerCAmelCase ) __magic_name__ :List[str] = tokenizer_r.convert_ids_to_tokens(__lowerCAmelCase ) __magic_name__ :Optional[int] = tokenizer_p.convert_ids_to_tokens(__lowerCAmelCase ) # it is expected that only the first Chinese character is not preceded by "##". __magic_name__ :Dict = [ F'''##{token}''' if idx != 0 else token for idx, token in enumerate(__lowerCAmelCase ) ] self.assertListEqual(__lowerCAmelCase , __lowerCAmelCase ) self.assertListEqual(__lowerCAmelCase , __lowerCAmelCase )
0
0
def lowercase ( __A : int = 100 ) -> int: '''simple docstring''' snake_case : Dict = set() snake_case : Optional[Any] = 0 snake_case : List[str] = n + 1 # maximum limit for a in range(2 , __A ): for b in range(2 , __A ): snake_case : List[Any] = a**b # calculates the current power collect_powers.add(__A ) # adds the result to the set return len(__A ) if __name__ == "__main__": print('''Number of terms ''', solution(int(str(input()).strip())))
36
import logging import os import quant_trainer import torch from torch.utils.data import DataLoader from transformers import Trainer, is_torch_tpu_available from transformers.trainer_utils import PredictionOutput SCREAMING_SNAKE_CASE__ : List[str] = logging.getLogger(__name__) if is_torch_tpu_available(check_device=False): import torch_xla.core.xla_model as xm import torch_xla.debug.metrics as met class lowerCamelCase_ ( lowerCamelCase ): def __init__( self , *__lowerCAmelCase , __lowerCAmelCase=None , __lowerCAmelCase=None , __lowerCAmelCase=None , **__lowerCAmelCase ): """simple docstring""" super().__init__(*__lowerCAmelCase , **__lowerCAmelCase ) __magic_name__ :Any = eval_examples __magic_name__ :str = post_process_function __magic_name__ :int = quant_trainer_args __magic_name__ :List[str] = 1_2_8 # default number of calibration samples def A ( self , __lowerCAmelCase=None ): """simple docstring""" if calib_dataset is None and self.calib_dataset is None: raise ValueError('''Trainer: calibration requires an calib_dataset.''' ) __magic_name__ :Optional[Any] = calib_dataset if calib_dataset is not None else self.calib_dataset __magic_name__ :Optional[int] = self._remove_unused_columns(__lowerCAmelCase , description='''Calibration''' ) return DataLoader( __lowerCAmelCase , batch_size=self.args.eval_batch_size , collate_fn=self.data_collator , drop_last=self.args.dataloader_drop_last , num_workers=self.args.dataloader_num_workers , pin_memory=self.args.dataloader_pin_memory , shuffle=__lowerCAmelCase , ) def A ( self , __lowerCAmelCase=None ): """simple docstring""" __magic_name__ :Dict = self.train_dataset if calib_dataset is None else calib_dataset __magic_name__ :Any = self.get_calib_dataloader(__lowerCAmelCase ) __magic_name__ :List[str] = self.model quant_trainer.configure_model(__lowerCAmelCase , self.quant_trainer_args , calib=__lowerCAmelCase ) model.eval() quant_trainer.enable_calibration(__lowerCAmelCase ) logger.info('''***** Running calibration *****''' ) logger.info(F''' Num examples = {self.calib_num}''' ) logger.info(F''' Batch size = {calib_dataloader.batch_size}''' ) for step, inputs in enumerate(__lowerCAmelCase ): # Prediction step __magic_name__ , __magic_name__ , __magic_name__ :str = self.prediction_step(__lowerCAmelCase , __lowerCAmelCase , prediction_loss_only=__lowerCAmelCase ) if (step + 1) * calib_dataloader.batch_size >= self.calib_num: break quant_trainer.finish_calibration(__lowerCAmelCase , self.quant_trainer_args ) __magic_name__ :Any = model def A ( self , __lowerCAmelCase=None , __lowerCAmelCase=None , __lowerCAmelCase=None , __lowerCAmelCase = "eval" ): """simple docstring""" __magic_name__ :Tuple = self.eval_dataset if eval_dataset is None else eval_dataset __magic_name__ :Optional[Any] = self.get_eval_dataloader(__lowerCAmelCase ) __magic_name__ :str = self.eval_examples if eval_examples is None else eval_examples # Temporarily disable metric computation, we will do it in the loop here. __magic_name__ :Any = self.compute_metrics __magic_name__ :List[Any] = None __magic_name__ :List[str] = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop try: __magic_name__ :Optional[Any] = eval_loop( __lowerCAmelCase , description='''Evaluation''' , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=__lowerCAmelCase , ) finally: __magic_name__ :Union[str, Any] = compute_metrics if self.post_process_function is not None and self.compute_metrics is not None: __magic_name__ :Union[str, Any] = self.post_process_function(__lowerCAmelCase , __lowerCAmelCase , output.predictions ) __magic_name__ :int = self.compute_metrics(__lowerCAmelCase ) # Prefix all keys with metric_key_prefix + '_' for key in list(metrics.keys() ): if not key.startswith(F'''{metric_key_prefix}_''' ): __magic_name__ :Dict = metrics.pop(__lowerCAmelCase ) self.log(__lowerCAmelCase ) else: __magic_name__ :List[str] = {} if self.args.tpu_metrics_debug or self.args.debug: # tpu-comment: Logging debug metrics for PyTorch/XLA (compile, execute times, ops, etc.) xm.master_print(met.metrics_report() ) __magic_name__ :Optional[Any] = self.callback_handler.on_evaluate(self.args , self.state , self.control , __lowerCAmelCase ) return metrics def A ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase=None , __lowerCAmelCase = "test" ): """simple docstring""" __magic_name__ :int = self.get_test_dataloader(__lowerCAmelCase ) # Temporarily disable metric computation, we will do it in the loop here. __magic_name__ :Dict = self.compute_metrics __magic_name__ :str = None __magic_name__ :Optional[int] = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop try: __magic_name__ :int = eval_loop( __lowerCAmelCase , description='''Prediction''' , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=__lowerCAmelCase , ) finally: __magic_name__ :List[Any] = compute_metrics if self.post_process_function is None or self.compute_metrics is None: return output __magic_name__ :Optional[Any] = self.post_process_function(__lowerCAmelCase , __lowerCAmelCase , output.predictions , '''predict''' ) __magic_name__ :Dict = self.compute_metrics(__lowerCAmelCase ) # Prefix all keys with metric_key_prefix + '_' for key in list(metrics.keys() ): if not key.startswith(F'''{metric_key_prefix}_''' ): __magic_name__ :List[str] = metrics.pop(__lowerCAmelCase ) return PredictionOutput(predictions=predictions.predictions , label_ids=predictions.label_ids , metrics=__lowerCAmelCase ) def A ( self , __lowerCAmelCase="./" ): """simple docstring""" __magic_name__ :List[Any] = self.eval_dataset __magic_name__ :Any = self.get_eval_dataloader(__lowerCAmelCase ) __magic_name__ :int = next(iter(__lowerCAmelCase ) ) # saving device - to make it consistent __magic_name__ :str = torch.device('''cuda''' if torch.cuda.is_available() else '''cpu''' ) # convert to tuple __magic_name__ :int = tuple(v.to(__lowerCAmelCase ) for k, v in batch.items() ) logger.info('''Converting model to be onnx compatible''' ) from pytorch_quantization.nn import TensorQuantizer __magic_name__ :Any = True __magic_name__ :Optional[int] = self.model.to(__lowerCAmelCase ) model.eval() model.float() __magic_name__ :Any = model.module if hasattr(__lowerCAmelCase , '''module''' ) else model quant_trainer.configure_model(__lowerCAmelCase , self.quant_trainer_args ) __magic_name__ :int = os.path.join(__lowerCAmelCase , '''model.onnx''' ) logger.info(F'''exporting model to {output_model_file}''' ) __magic_name__ :Dict = {0: '''batch_size''', 1: '''seq_len'''} torch.onnx.export( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , export_params=__lowerCAmelCase , opset_version=1_3 , do_constant_folding=__lowerCAmelCase , input_names=['''input_ids''', '''attention_mask''', '''token_type_ids'''] , output_names=['''output_start_logits''', '''output_end_logits'''] , dynamic_axes={ '''input_ids''': axes, '''attention_mask''': axes, '''token_type_ids''': axes, '''output_start_logits''': axes, '''output_end_logits''': axes, } , verbose=__lowerCAmelCase , ) logger.info('''onnx export finished''' )
0
0
import argparse import json from typing import List from ltp import LTP from transformers import BertTokenizer def UpperCamelCase_ ( __a ) -> Optional[int]: # This defines a "chinese character" as anything in the CJK Unicode block: # https://en.wikipedia.org/wiki/CJK_Unified_Ideographs_(Unicode_block) # # Note that the CJK Unicode block is NOT all Japanese and Korean characters, # despite its name. The modern Korean Hangul alphabet is a different block, # as is Japanese Hiragana and Katakana. Those alphabets are used to write # space-separated words, so they are not treated specially and handled # like the all of the other languages. if ( (cp >= 0X4e00 and cp <= 0X9fff) or (cp >= 0X3400 and cp <= 0X4dbf) # or (cp >= 0X20000 and cp <= 0X2a6df) # or (cp >= 0X2a700 and cp <= 0X2b73f) # or (cp >= 0X2b740 and cp <= 0X2b81f) # or (cp >= 0X2b820 and cp <= 0X2ceaf) # or (cp >= 0Xf900 and cp <= 0Xfaff) or (cp >= 0X2f800 and cp <= 0X2fa1f) # ): # return True return False def UpperCamelCase_ ( __a ) -> Optional[int]: # word like '180' or '身高' or '神' for char in word: a__ : Optional[Any] = ord(__a ) if not _is_chinese_char(__a ): return 0 return 1 def UpperCamelCase_ ( __a ) -> Optional[int]: a__ : Optional[Any] = set() for token in tokens: a__ : Optional[Any] = len(__a ) > 1 and is_chinese(__a ) if chinese_word: word_set.add(__a ) a__ : Optional[Any] = list(__a ) return word_list def UpperCamelCase_ ( __a , __a ) -> Optional[int]: if not chinese_word_set: return bert_tokens a__ : int = max([len(__a ) for w in chinese_word_set] ) a__ : Tuple = bert_tokens a__, a__ : Union[str, Any] = 0, len(__a ) while start < end: a__ : Tuple = True if is_chinese(bert_word[start] ): a__ : Optional[int] = min(end - start , __a ) for i in range(__a , 1 , -1 ): a__ : Tuple = "".join(bert_word[start : start + i] ) if whole_word in chinese_word_set: for j in range(start + 1 , start + i ): a__ : Union[str, Any] = "##" + bert_word[j] a__ : str = start + i a__ : Optional[Any] = False break if single_word: start += 1 return bert_word def UpperCamelCase_ ( __a , __a , __a ) -> Optional[Any]: a__ : Any = [] for i in range(0 , len(__a ) , 100 ): a__ : List[str] = ltp_tokenizer.seg(lines[i : i + 100] )[0] a__ : Optional[Any] = [get_chinese_word(__a ) for r in res] ltp_res.extend(__a ) assert len(__a ) == len(__a ) a__ : List[Any] = [] for i in range(0 , len(__a ) , 100 ): a__ : List[Any] = bert_tokenizer(lines[i : i + 100] , add_special_tokens=__a , truncation=__a , max_length=512 ) bert_res.extend(res["input_ids"] ) assert len(__a ) == len(__a ) a__ : List[Any] = [] for input_ids, chinese_word in zip(__a , __a ): a__ : List[Any] = [] for id in input_ids: a__ : Tuple = bert_tokenizer._convert_id_to_token(__a ) input_tokens.append(__a ) a__ : List[str] = add_sub_symbol(__a , __a ) a__ : Tuple = [] # We only save pos of chinese subwords start with ##, which mean is part of a whole word. for i, token in enumerate(__a ): if token[:2] == "##": a__ : Optional[Any] = token[2:] # save chinese tokens' pos if len(__a ) == 1 and _is_chinese_char(ord(__a ) ): ref_id.append(__a ) ref_ids.append(__a ) assert len(__a ) == len(__a ) return ref_ids def UpperCamelCase_ ( __a ) -> Dict: # For Chinese (Ro)Bert, the best result is from : RoBERTa-wwm-ext (https://github.com/ymcui/Chinese-BERT-wwm) # If we want to fine-tune these model, we have to use same tokenizer : LTP (https://github.com/HIT-SCIR/ltp) with open(args.file_name , "r" , encoding="utf-8" ) as f: a__ : int = f.readlines() a__ : Dict = [line.strip() for line in data if len(__a ) > 0 and not line.isspace()] # avoid delimiter like '\u2029' a__ : List[Any] = LTP(args.ltp ) # faster in GPU device a__ : List[str] = BertTokenizer.from_pretrained(args.bert ) a__ : Optional[int] = prepare_ref(__a , __a , __a ) with open(args.save_path , "w" , encoding="utf-8" ) as f: a__ : List[str] = [json.dumps(__a ) + "\n" for ref in ref_ids] f.writelines(__a ) if __name__ == "__main__": UpperCamelCase : List[str] = argparse.ArgumentParser(description="""prepare_chinese_ref""") parser.add_argument( """--file_name""", type=str, default="""./resources/chinese-demo.txt""", help="""file need process, same as training data in lm""", ) parser.add_argument( """--ltp""", type=str, default="""./resources/ltp""", help="""resources for LTP tokenizer, usually a path""" ) parser.add_argument("""--bert""", type=str, default="""./resources/robert""", help="""resources for Bert tokenizer""") parser.add_argument("""--save_path""", type=str, default="""./resources/ref.txt""", help="""path to save res""") UpperCamelCase : Dict = parser.parse_args() main(args)
37
def __lowercase ( snake_case ): """simple docstring""" return "".join([hex(snake_case )[2:].zfill(2 ).upper() for byte in list(snake_case )] ) def __lowercase ( snake_case ): """simple docstring""" if (len(snake_case ) % 2) != 0: raise ValueError( '''Base16 encoded data is invalid: Data does not have an even number of hex digits.''' ) # Check the character set - the standard base16 alphabet # is uppercase according to RFC3548 section 6 if not set(snake_case ) <= set('''0123456789ABCDEF''' ): raise ValueError( '''Base16 encoded data is invalid: Data is not uppercase hex or it contains invalid characters.''' ) # For every two hexadecimal digits (= a byte), turn it into an integer. # Then, string the result together into bytes, and return it. return bytes(int(data[i] + data[i + 1], 1_6 ) for i in range(0, len(snake_case ), 2 ) ) if __name__ == "__main__": import doctest doctest.testmod()
0
0
'''simple docstring''' from PIL import Image def UpperCamelCase__ ( __magic_name__ : Image ) -> Image: '''simple docstring''' snake_case__ , snake_case__ : Tuple = image.size snake_case__ : List[Any] = 0 snake_case__ : Optional[Any] = image.load() for i in range(__magic_name__ ): for j in range(__magic_name__ ): snake_case__ : Any = pixels[j, i] mean += pixel mean //= width * height for j in range(__magic_name__ ): for i in range(__magic_name__ ): snake_case__ : Any = 2_55 if pixels[i, j] > mean else 0 return image if __name__ == "__main__": A_ : Any = mean_threshold(Image.open("path_to_image").convert("L")) image.save("output_image_path")
38
import pytest import requests from datasets.utils.file_utils import http_head from .utils import OfflineSimulationMode, RequestWouldHangIndefinitelyError, offline @pytest.mark.integration def __lowercase ( ): """simple docstring""" with offline(OfflineSimulationMode.CONNECTION_TIMES_OUT ): with pytest.raises(snake_case ): requests.request('''GET''', '''https://huggingface.co''' ) with pytest.raises(requests.exceptions.ConnectTimeout ): requests.request('''GET''', '''https://huggingface.co''', timeout=1.0 ) @pytest.mark.integration def __lowercase ( ): """simple docstring""" with offline(OfflineSimulationMode.CONNECTION_FAILS ): with pytest.raises(requests.exceptions.ConnectionError ): requests.request('''GET''', '''https://huggingface.co''' ) def __lowercase ( ): """simple docstring""" with offline(OfflineSimulationMode.HF_DATASETS_OFFLINE_SET_TO_1 ): with pytest.raises(snake_case ): http_head('''https://huggingface.co''' )
0
0
import itertools import json import os import unittest from transformers import AddedToken, LongformerTokenizer, LongformerTokenizerFast from transformers.models.longformer.tokenization_longformer import VOCAB_FILES_NAMES from transformers.testing_utils import require_tokenizers, slow from ...test_tokenization_common import TokenizerTesterMixin @require_tokenizers class snake_case_ ( __A , unittest.TestCase ): '''simple docstring''' SCREAMING_SNAKE_CASE : List[Any] = LongformerTokenizer SCREAMING_SNAKE_CASE : List[Any] = True SCREAMING_SNAKE_CASE : Any = LongformerTokenizerFast SCREAMING_SNAKE_CASE : str = True def snake_case__( self : List[str] ) ->Optional[int]: super().setUp() # Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt snake_case_ = [ '''l''', '''o''', '''w''', '''e''', '''r''', '''s''', '''t''', '''i''', '''d''', '''n''', '''\u0120''', '''\u0120l''', '''\u0120n''', '''\u0120lo''', '''\u0120low''', '''er''', '''\u0120lowest''', '''\u0120newer''', '''\u0120wider''', '''<unk>''', ] snake_case_ = dict(zip(_UpperCamelCase , range(len(_UpperCamelCase ) ) ) ) snake_case_ = ['''#version: 0.2''', '''\u0120 l''', '''\u0120l o''', '''\u0120lo w''', '''e r''', ''''''] snake_case_ = {'''unk_token''': '''<unk>'''} snake_case_ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] ) snake_case_ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] ) with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp: fp.write(json.dumps(_UpperCamelCase ) + '''\n''' ) with open(self.merges_file , '''w''' , encoding='''utf-8''' ) as fp: fp.write('''\n'''.join(_UpperCamelCase ) ) def snake_case__( self : Union[str, Any] , **_UpperCamelCase : int ) ->Union[str, Any]: kwargs.update(self.special_tokens_map ) return self.tokenizer_class.from_pretrained(self.tmpdirname , **_UpperCamelCase ) def snake_case__( self : int , **_UpperCamelCase : Dict ) ->List[Any]: kwargs.update(self.special_tokens_map ) return self.rust_tokenizer_class.from_pretrained(self.tmpdirname , **_UpperCamelCase ) def snake_case__( self : str , _UpperCamelCase : Optional[Any] ) ->str: snake_case_ = '''lower newer''' snake_case_ = '''lower newer''' return input_text, output_text def snake_case__( self : List[Any] ) ->int: snake_case_ = self.tokenizer_class(self.vocab_file , self.merges_file , **self.special_tokens_map ) snake_case_ = '''lower newer''' snake_case_ = ['''l''', '''o''', '''w''', '''er''', '''\u0120''', '''n''', '''e''', '''w''', '''er'''] snake_case_ = tokenizer.tokenize(_UpperCamelCase ) # , add_prefix_space=True) self.assertListEqual(_UpperCamelCase , _UpperCamelCase ) snake_case_ = tokens + [tokenizer.unk_token] snake_case_ = [0, 1, 2, 1_5, 1_0, 9, 3, 2, 1_5, 1_9] self.assertListEqual(tokenizer.convert_tokens_to_ids(_UpperCamelCase ) , _UpperCamelCase ) def snake_case__( self : Optional[int] ) ->Any: snake_case_ = self.get_tokenizer() self.assertListEqual(tokenizer.encode('''Hello world!''' , add_special_tokens=_UpperCamelCase ) , [0, 3_1_4_1_4, 2_3_2, 3_2_8, 2] ) self.assertListEqual( tokenizer.encode('''Hello world! cécé herlolip 418''' , add_special_tokens=_UpperCamelCase ) , [0, 3_1_4_1_4, 2_3_2, 3_2_8, 7_4_0, 1_1_4_0, 1_2_6_9_5, 6_9, 4_6_0_7_8, 1_5_8_8, 2] , ) @slow def snake_case__( self : Optional[int] ) ->int: snake_case_ = self.tokenizer_class.from_pretrained('''allenai/longformer-base-4096''' ) snake_case_ = tokenizer.encode('''sequence builders''' , add_special_tokens=_UpperCamelCase ) snake_case_ = tokenizer.encode('''multi-sequence build''' , add_special_tokens=_UpperCamelCase ) snake_case_ = tokenizer.encode( '''sequence builders''' , add_special_tokens=_UpperCamelCase , add_prefix_space=_UpperCamelCase ) snake_case_ = tokenizer.encode( '''sequence builders''' , '''multi-sequence build''' , add_special_tokens=_UpperCamelCase , add_prefix_space=_UpperCamelCase ) snake_case_ = tokenizer.build_inputs_with_special_tokens(_UpperCamelCase ) snake_case_ = tokenizer.build_inputs_with_special_tokens(_UpperCamelCase , _UpperCamelCase ) assert encoded_sentence == encoded_text_from_decode assert encoded_pair == encoded_pair_from_decode def snake_case__( self : Any ) ->Tuple: snake_case_ = self.get_tokenizer() snake_case_ = '''Encode this sequence.''' snake_case_ = tokenizer.byte_encoder[''' '''.encode('''utf-8''' )[0]] # Testing encoder arguments snake_case_ = tokenizer.encode(_UpperCamelCase , add_special_tokens=_UpperCamelCase , add_prefix_space=_UpperCamelCase ) snake_case_ = tokenizer.convert_ids_to_tokens(encoded[0] )[0] self.assertNotEqual(_UpperCamelCase , _UpperCamelCase ) snake_case_ = tokenizer.encode(_UpperCamelCase , add_special_tokens=_UpperCamelCase , add_prefix_space=_UpperCamelCase ) snake_case_ = tokenizer.convert_ids_to_tokens(encoded[0] )[0] self.assertEqual(_UpperCamelCase , _UpperCamelCase ) tokenizer.add_special_tokens({'''bos_token''': '''<s>'''} ) snake_case_ = tokenizer.encode(_UpperCamelCase , add_special_tokens=_UpperCamelCase ) snake_case_ = tokenizer.convert_ids_to_tokens(encoded[1] )[0] self.assertNotEqual(_UpperCamelCase , _UpperCamelCase ) # Testing spaces after special tokens snake_case_ = '''<mask>''' tokenizer.add_special_tokens( {'''mask_token''': AddedToken(_UpperCamelCase , lstrip=_UpperCamelCase , rstrip=_UpperCamelCase )} ) # mask token has a left space snake_case_ = tokenizer.convert_tokens_to_ids(_UpperCamelCase ) snake_case_ = '''Encode <mask> sequence''' snake_case_ = '''Encode <mask>sequence''' snake_case_ = tokenizer.encode(_UpperCamelCase ) snake_case_ = encoded.index(_UpperCamelCase ) snake_case_ = tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1] )[0] self.assertEqual(_UpperCamelCase , _UpperCamelCase ) snake_case_ = tokenizer.encode(_UpperCamelCase ) snake_case_ = encoded.index(_UpperCamelCase ) snake_case_ = tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1] )[0] self.assertNotEqual(_UpperCamelCase , _UpperCamelCase ) def snake_case__( self : Optional[int] ) ->Optional[int]: pass def snake_case__( self : List[Any] ) ->str: for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(f'''{tokenizer.__class__.__name__} ({pretrained_name})''' ): snake_case_ = self.rust_tokenizer_class.from_pretrained(_UpperCamelCase , **_UpperCamelCase ) snake_case_ = self.tokenizer_class.from_pretrained(_UpperCamelCase , **_UpperCamelCase ) snake_case_ = '''A, <mask> AllenNLP sentence.''' snake_case_ = tokenizer_r.encode_plus(_UpperCamelCase , add_special_tokens=_UpperCamelCase , return_token_type_ids=_UpperCamelCase ) snake_case_ = tokenizer_p.encode_plus(_UpperCamelCase , add_special_tokens=_UpperCamelCase , return_token_type_ids=_UpperCamelCase ) # token_type_ids should put 0 everywhere self.assertEqual(sum(tokens_r['''token_type_ids'''] ) , sum(tokens_p['''token_type_ids'''] ) ) # attention_mask should put 1 everywhere, so sum over length should be 1 self.assertEqual( sum(tokens_r['''attention_mask'''] ) / len(tokens_r['''attention_mask'''] ) , sum(tokens_p['''attention_mask'''] ) / len(tokens_p['''attention_mask'''] ) , ) snake_case_ = tokenizer_r.convert_ids_to_tokens(tokens_r['''input_ids'''] ) snake_case_ = tokenizer_p.convert_ids_to_tokens(tokens_p['''input_ids'''] ) # Rust correctly handles the space before the mask while python doesnt self.assertSequenceEqual(tokens_p['''input_ids'''] , [0, 2_5_0, 6, 5_0_2_6_4, 3_8_2_3, 4_8_7, 2_1_9_9_2, 3_6_4_5, 4, 2] ) self.assertSequenceEqual(tokens_r['''input_ids'''] , [0, 2_5_0, 6, 5_0_2_6_4, 3_8_2_3, 4_8_7, 2_1_9_9_2, 3_6_4_5, 4, 2] ) self.assertSequenceEqual( _UpperCamelCase , ['''<s>''', '''A''', ''',''', '''<mask>''', '''ĠAllen''', '''N''', '''LP''', '''Ġsentence''', '''.''', '''</s>'''] ) self.assertSequenceEqual( _UpperCamelCase , ['''<s>''', '''A''', ''',''', '''<mask>''', '''ĠAllen''', '''N''', '''LP''', '''Ġsentence''', '''.''', '''</s>'''] ) def snake_case__( self : str ) ->int: for trim_offsets, add_prefix_space in itertools.product([True, False] , repeat=2 ): snake_case_ = self.rust_tokenizer_class.from_pretrained( self.tmpdirname , use_fast=_UpperCamelCase , add_prefix_space=_UpperCamelCase , trim_offsets=_UpperCamelCase ) snake_case_ = json.loads(tokenizer_r.backend_tokenizer.pre_tokenizer.__getstate__() ) snake_case_ = json.loads(tokenizer_r.backend_tokenizer.post_processor.__getstate__() ) self.assertEqual(pre_tokenizer_state['''add_prefix_space'''] , _UpperCamelCase ) self.assertEqual(post_processor_state['''add_prefix_space'''] , _UpperCamelCase ) self.assertEqual(post_processor_state['''trim_offsets'''] , _UpperCamelCase ) def snake_case__( self : str ) ->Union[str, Any]: # Test which aims to verify that the offsets are well adapted to the argument `add_prefix_space` and # `trim_offsets` for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(f'''{tokenizer.__class__.__name__} ({pretrained_name})''' ): snake_case_ = '''hello''' # `hello` is a token in the vocabulary of `pretrained_name` snake_case_ = f'''{text_of_1_token} {text_of_1_token}''' snake_case_ = self.rust_tokenizer_class.from_pretrained( _UpperCamelCase , use_fast=_UpperCamelCase , add_prefix_space=_UpperCamelCase , trim_offsets=_UpperCamelCase ) snake_case_ = tokenizer_r(_UpperCamelCase , return_offsets_mapping=_UpperCamelCase , add_special_tokens=_UpperCamelCase ) self.assertEqual(encoding.offset_mapping[0] , (0, len(_UpperCamelCase )) ) self.assertEqual( encoding.offset_mapping[1] , (len(_UpperCamelCase ) + 1, len(_UpperCamelCase ) + 1 + len(_UpperCamelCase )) , ) snake_case_ = self.rust_tokenizer_class.from_pretrained( _UpperCamelCase , use_fast=_UpperCamelCase , add_prefix_space=_UpperCamelCase , trim_offsets=_UpperCamelCase ) snake_case_ = tokenizer_r(_UpperCamelCase , return_offsets_mapping=_UpperCamelCase , add_special_tokens=_UpperCamelCase ) self.assertEqual(encoding.offset_mapping[0] , (0, len(_UpperCamelCase )) ) self.assertEqual( encoding.offset_mapping[1] , (len(_UpperCamelCase ) + 1, len(_UpperCamelCase ) + 1 + len(_UpperCamelCase )) , ) snake_case_ = self.rust_tokenizer_class.from_pretrained( _UpperCamelCase , use_fast=_UpperCamelCase , add_prefix_space=_UpperCamelCase , trim_offsets=_UpperCamelCase ) snake_case_ = tokenizer_r(_UpperCamelCase , return_offsets_mapping=_UpperCamelCase , add_special_tokens=_UpperCamelCase ) self.assertEqual(encoding.offset_mapping[0] , (0, len(_UpperCamelCase )) ) self.assertEqual( encoding.offset_mapping[1] , (len(_UpperCamelCase ), len(_UpperCamelCase ) + 1 + len(_UpperCamelCase )) , ) snake_case_ = self.rust_tokenizer_class.from_pretrained( _UpperCamelCase , use_fast=_UpperCamelCase , add_prefix_space=_UpperCamelCase , trim_offsets=_UpperCamelCase ) snake_case_ = tokenizer_r(_UpperCamelCase , return_offsets_mapping=_UpperCamelCase , add_special_tokens=_UpperCamelCase ) self.assertEqual(encoding.offset_mapping[0] , (0, len(_UpperCamelCase )) ) self.assertEqual( encoding.offset_mapping[1] , (len(_UpperCamelCase ), len(_UpperCamelCase ) + 1 + len(_UpperCamelCase )) , ) snake_case_ = f''' {text}''' # tokenizer_r = self.rust_tokenizer_class.from_pretrained( # pretrained_name, use_fast=True, add_prefix_space=True, trim_offsets=True # ) # encoding = tokenizer_r(text, return_offsets_mapping=True, add_special_tokens=False) # self.assertEqual(encoding.offset_mapping[0], (1, 1 + len(text_of_1_token))) # self.assertEqual( # encoding.offset_mapping[1], # (1 + len(text_of_1_token) + 1, 1 + len(text_of_1_token) + 1 + len(text_of_1_token)), # ) snake_case_ = self.rust_tokenizer_class.from_pretrained( _UpperCamelCase , use_fast=_UpperCamelCase , add_prefix_space=_UpperCamelCase , trim_offsets=_UpperCamelCase ) snake_case_ = tokenizer_r(_UpperCamelCase , return_offsets_mapping=_UpperCamelCase , add_special_tokens=_UpperCamelCase ) self.assertEqual(encoding.offset_mapping[0] , (1, 1 + len(_UpperCamelCase )) ) self.assertEqual( encoding.offset_mapping[1] , (1 + len(_UpperCamelCase ) + 1, 1 + len(_UpperCamelCase ) + 1 + len(_UpperCamelCase )) , ) snake_case_ = self.rust_tokenizer_class.from_pretrained( _UpperCamelCase , use_fast=_UpperCamelCase , add_prefix_space=_UpperCamelCase , trim_offsets=_UpperCamelCase ) snake_case_ = tokenizer_r(_UpperCamelCase , return_offsets_mapping=_UpperCamelCase , add_special_tokens=_UpperCamelCase ) self.assertEqual(encoding.offset_mapping[0] , (0, 1 + len(_UpperCamelCase )) ) self.assertEqual( encoding.offset_mapping[1] , (1 + len(_UpperCamelCase ), 1 + len(_UpperCamelCase ) + 1 + len(_UpperCamelCase )) , ) snake_case_ = self.rust_tokenizer_class.from_pretrained( _UpperCamelCase , use_fast=_UpperCamelCase , add_prefix_space=_UpperCamelCase , trim_offsets=_UpperCamelCase ) snake_case_ = tokenizer_r(_UpperCamelCase , return_offsets_mapping=_UpperCamelCase , add_special_tokens=_UpperCamelCase ) self.assertEqual(encoding.offset_mapping[0] , (0, 1 + len(_UpperCamelCase )) ) self.assertEqual( encoding.offset_mapping[1] , (1 + len(_UpperCamelCase ), 1 + len(_UpperCamelCase ) + 1 + len(_UpperCamelCase )) , )
39
import math from collections.abc import Iterator from itertools import takewhile def __lowercase ( snake_case ): """simple docstring""" if 1 < number < 4: # 2 and 3 are primes return True elif number < 2 or number % 2 == 0 or number % 3 == 0: # Negatives, 0, 1, all even numbers, all multiples of 3 are not primes return False # All primes number are in format of 6k +/- 1 for i in range(5, int(math.sqrt(snake_case ) + 1 ), 6 ): if number % i == 0 or number % (i + 2) == 0: return False return True def __lowercase ( ): """simple docstring""" __magic_name__ :str = 2 while True: if is_prime(snake_case ): yield num num += 1 def __lowercase ( snake_case = 2_0_0_0_0_0_0 ): """simple docstring""" return sum(takewhile(lambda snake_case : x < n, prime_generator() ) ) if __name__ == "__main__": print(f"{solution() = }")
0
0
import bza import gzip import lzma import os import shutil import struct import tarfile import warnings import zipfile from abc import ABC, abstractmethod from pathlib import Path from typing import Dict, List, Optional, Type, Union from .. import config from .filelock import FileLock from .logging import get_logger __UpperCAmelCase = get_logger(__name__) class lowerCAmelCase_ : def __init__( self, SCREAMING_SNAKE_CASE_ = None ) -> Dict: UpperCamelCase : List[str] = ( os.path.join(SCREAMING_SNAKE_CASE_, config.EXTRACTED_DATASETS_DIR ) if cache_dir else config.EXTRACTED_DATASETS_PATH ) UpperCamelCase : Any = Extractor def snake_case_ ( self, SCREAMING_SNAKE_CASE_ ) -> str: from .file_utils import hash_url_to_filename # Path where we extract compressed archives # We extract in the cache dir, and get the extracted path name by hashing the original path" UpperCamelCase : int = os.path.abspath(SCREAMING_SNAKE_CASE_ ) return os.path.join(self.extract_dir, hash_url_to_filename(SCREAMING_SNAKE_CASE_ ) ) def snake_case_ ( self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ) -> bool: return force_extract or ( not os.path.isfile(SCREAMING_SNAKE_CASE_ ) and not (os.path.isdir(SCREAMING_SNAKE_CASE_ ) and os.listdir(SCREAMING_SNAKE_CASE_ )) ) def snake_case_ ( self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ = False ) -> str: UpperCamelCase : Tuple = self.extractor.infer_extractor_format(SCREAMING_SNAKE_CASE_ ) if not extractor_format: return input_path UpperCamelCase : List[Any] = self._get_output_path(SCREAMING_SNAKE_CASE_ ) if self._do_extract(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ): self.extractor.extract(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ) return output_path class lowerCAmelCase_ ( a__ ): @classmethod @abstractmethod def snake_case_ ( cls, SCREAMING_SNAKE_CASE_, **SCREAMING_SNAKE_CASE_ ) -> bool: ... @staticmethod @abstractmethod def snake_case_ ( SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ) -> None: ... class lowerCAmelCase_ ( a__ , a__ ): UpperCAmelCase__ : List[bytes] = [] @staticmethod def snake_case_ ( SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ) -> Any: with open(SCREAMING_SNAKE_CASE_, 'rb' ) as f: return f.read(SCREAMING_SNAKE_CASE_ ) @classmethod def snake_case_ ( cls, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ = b"" ) -> bool: if not magic_number: UpperCamelCase : str = max(len(SCREAMING_SNAKE_CASE_ ) for cls_magic_number in cls.magic_numbers ) try: UpperCamelCase : List[str] = cls.read_magic_number(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ) except OSError: return False return any(magic_number.startswith(SCREAMING_SNAKE_CASE_ ) for cls_magic_number in cls.magic_numbers ) class lowerCAmelCase_ ( a__ ): @classmethod def snake_case_ ( cls, SCREAMING_SNAKE_CASE_, **SCREAMING_SNAKE_CASE_ ) -> bool: return tarfile.is_tarfile(SCREAMING_SNAKE_CASE_ ) @staticmethod def snake_case_ ( SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ) -> Optional[Any]: def resolved(SCREAMING_SNAKE_CASE_ ) -> str: return os.path.realpath(os.path.abspath(SCREAMING_SNAKE_CASE_ ) ) def badpath(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ) -> bool: # joinpath will ignore base if path is absolute return not resolved(os.path.join(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ) ).startswith(SCREAMING_SNAKE_CASE_ ) def badlink(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ) -> bool: # Links are interpreted relative to the directory containing the link UpperCamelCase : Any = resolved(os.path.join(SCREAMING_SNAKE_CASE_, os.path.dirname(info.name ) ) ) return badpath(info.linkname, base=SCREAMING_SNAKE_CASE_ ) UpperCamelCase : Union[str, Any] = resolved(SCREAMING_SNAKE_CASE_ ) for finfo in members: if badpath(finfo.name, SCREAMING_SNAKE_CASE_ ): logger.error(F"""Extraction of {finfo.name} is blocked (illegal path)""" ) elif finfo.issym() and badlink(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ): logger.error(F"""Extraction of {finfo.name} is blocked: Symlink to {finfo.linkname}""" ) elif finfo.islnk() and badlink(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ): logger.error(F"""Extraction of {finfo.name} is blocked: Hard link to {finfo.linkname}""" ) else: yield finfo @staticmethod def snake_case_ ( SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ) -> None: os.makedirs(SCREAMING_SNAKE_CASE_, exist_ok=SCREAMING_SNAKE_CASE_ ) UpperCamelCase : List[Any] = tarfile.open(SCREAMING_SNAKE_CASE_ ) tar_file.extractall(SCREAMING_SNAKE_CASE_, members=TarExtractor.safemembers(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ) ) tar_file.close() class lowerCAmelCase_ ( a__ ): UpperCAmelCase__ : Dict = [b"\x1F\x8B"] @staticmethod def snake_case_ ( SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ) -> None: with gzip.open(SCREAMING_SNAKE_CASE_, 'rb' ) as gzip_file: with open(SCREAMING_SNAKE_CASE_, 'wb' ) as extracted_file: shutil.copyfileobj(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ) class lowerCAmelCase_ ( a__ ): UpperCAmelCase__ : Optional[Any] = [ b"PK\x03\x04", b"PK\x05\x06", # empty archive b"PK\x07\x08", # spanned archive ] @classmethod def snake_case_ ( cls, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ = b"" ) -> bool: if super().is_extractable(SCREAMING_SNAKE_CASE_, magic_number=SCREAMING_SNAKE_CASE_ ): return True try: # Alternative version of zipfile.is_zipfile that has less false positives, but misses executable zip archives. # From: https://github.com/python/cpython/pull/5053 from zipfile import ( _CD_SIGNATURE, _ECD_DISK_NUMBER, _ECD_DISK_START, _ECD_ENTRIES_TOTAL, _ECD_OFFSET, _ECD_SIZE, _EndRecData, sizeCentralDir, stringCentralDir, structCentralDir, ) with open(SCREAMING_SNAKE_CASE_, 'rb' ) as fp: UpperCamelCase : Dict = _EndRecData(SCREAMING_SNAKE_CASE_ ) if endrec: if endrec[_ECD_ENTRIES_TOTAL] == 0 and endrec[_ECD_SIZE] == 0 and endrec[_ECD_OFFSET] == 0: return True # Empty zipfiles are still zipfiles elif endrec[_ECD_DISK_NUMBER] == endrec[_ECD_DISK_START]: fp.seek(endrec[_ECD_OFFSET] ) # Central directory is on the same disk if fp.tell() == endrec[_ECD_OFFSET] and endrec[_ECD_SIZE] >= sizeCentralDir: UpperCamelCase : str = fp.read(SCREAMING_SNAKE_CASE_ ) # CD is where we expect it to be if len(SCREAMING_SNAKE_CASE_ ) == sizeCentralDir: UpperCamelCase : int = struct.unpack(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ) # CD is the right size if centdir[_CD_SIGNATURE] == stringCentralDir: return True # First central directory entry has correct magic number return False except Exception: # catch all errors in case future python versions change the zipfile internals return False @staticmethod def snake_case_ ( SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ) -> None: os.makedirs(SCREAMING_SNAKE_CASE_, exist_ok=SCREAMING_SNAKE_CASE_ ) with zipfile.ZipFile(SCREAMING_SNAKE_CASE_, 'r' ) as zip_file: zip_file.extractall(SCREAMING_SNAKE_CASE_ ) zip_file.close() class lowerCAmelCase_ ( a__ ): UpperCAmelCase__ : List[str] = [b"\xFD\x37\x7A\x58\x5A\x00"] @staticmethod def snake_case_ ( SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ) -> None: with lzma.open(SCREAMING_SNAKE_CASE_ ) as compressed_file: with open(SCREAMING_SNAKE_CASE_, 'wb' ) as extracted_file: shutil.copyfileobj(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ) class lowerCAmelCase_ ( a__ ): UpperCAmelCase__ : Optional[Any] = [b"Rar!\x1a\x07\x00", b"Rar!\x1a\x07\x01\x00"] # RAR_ID # RAR5_ID @staticmethod def snake_case_ ( SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ) -> None: if not config.RARFILE_AVAILABLE: raise ImportError('Please pip install rarfile' ) import rarfile os.makedirs(SCREAMING_SNAKE_CASE_, exist_ok=SCREAMING_SNAKE_CASE_ ) UpperCamelCase : Any = rarfile.RarFile(SCREAMING_SNAKE_CASE_ ) rf.extractall(SCREAMING_SNAKE_CASE_ ) rf.close() class lowerCAmelCase_ ( a__ ): UpperCAmelCase__ : Any = [b"\x28\xb5\x2F\xFD"] @staticmethod def snake_case_ ( SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ) -> None: if not config.ZSTANDARD_AVAILABLE: raise ImportError('Please pip install zstandard' ) import zstandard as zstd UpperCamelCase : Any = zstd.ZstdDecompressor() with open(SCREAMING_SNAKE_CASE_, 'rb' ) as ifh, open(SCREAMING_SNAKE_CASE_, 'wb' ) as ofh: dctx.copy_stream(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ) class lowerCAmelCase_ ( a__ ): UpperCAmelCase__ : int = [b"\x42\x5A\x68"] @staticmethod def snake_case_ ( SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ) -> None: with bza.open(SCREAMING_SNAKE_CASE_, 'rb' ) as compressed_file: with open(SCREAMING_SNAKE_CASE_, 'wb' ) as extracted_file: shutil.copyfileobj(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ) class lowerCAmelCase_ ( a__ ): UpperCAmelCase__ : List[str] = [b"\x37\x7A\xBC\xAF\x27\x1C"] @staticmethod def snake_case_ ( SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ) -> None: if not config.PY7ZR_AVAILABLE: raise ImportError('Please pip install py7zr' ) import pyazr os.makedirs(SCREAMING_SNAKE_CASE_, exist_ok=SCREAMING_SNAKE_CASE_ ) with pyazr.SevenZipFile(SCREAMING_SNAKE_CASE_, 'r' ) as archive: archive.extractall(SCREAMING_SNAKE_CASE_ ) class lowerCAmelCase_ ( a__ ): UpperCAmelCase__ : Optional[Any] = [b"\x04\x22\x4D\x18"] @staticmethod def snake_case_ ( SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ) -> None: if not config.LZ4_AVAILABLE: raise ImportError('Please pip install lz4' ) import lza.frame with lza.frame.open(SCREAMING_SNAKE_CASE_, 'rb' ) as compressed_file: with open(SCREAMING_SNAKE_CASE_, 'wb' ) as extracted_file: shutil.copyfileobj(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ) class lowerCAmelCase_ : # Put zip file to the last, b/c it is possible wrongly detected as zip (I guess it means: as tar or gzip) UpperCAmelCase__ : Dict[str, Type[BaseExtractor]] = { "tar": TarExtractor, "gzip": GzipExtractor, "zip": ZipExtractor, "xz": XzExtractor, "rar": RarExtractor, "zstd": ZstdExtractor, "bz2": BzipaExtractor, "7z": SevenZipExtractor, # <Added version="2.4.0"/> "lz4": LzaExtractor, # <Added version="2.4.0"/> } @classmethod def snake_case_ ( cls ) -> Dict: return max( len(SCREAMING_SNAKE_CASE_ ) for extractor in cls.extractors.values() if issubclass(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ) for extractor_magic_number in extractor.magic_numbers ) @staticmethod def snake_case_ ( SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ) -> Tuple: try: return MagicNumberBaseExtractor.read_magic_number(SCREAMING_SNAKE_CASE_, magic_number_length=SCREAMING_SNAKE_CASE_ ) except OSError: return b"" @classmethod def snake_case_ ( cls, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ = False ) -> bool: warnings.warn( 'Method \'is_extractable\' was deprecated in version 2.4.0 and will be removed in 3.0.0. ' 'Use \'infer_extractor_format\' instead.', category=SCREAMING_SNAKE_CASE_, ) UpperCamelCase : List[Any] = cls.infer_extractor_format(SCREAMING_SNAKE_CASE_ ) if extractor_format: return True if not return_extractor else (True, cls.extractors[extractor_format]) return False if not return_extractor else (False, None) @classmethod def snake_case_ ( cls, SCREAMING_SNAKE_CASE_ ) -> str: # <Added version="2.4.0"/> UpperCamelCase : Union[str, Any] = cls._get_magic_number_max_length() UpperCamelCase : List[Any] = cls._read_magic_number(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ) for extractor_format, extractor in cls.extractors.items(): if extractor.is_extractable(SCREAMING_SNAKE_CASE_, magic_number=SCREAMING_SNAKE_CASE_ ): return extractor_format @classmethod def snake_case_ ( cls, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ = None, SCREAMING_SNAKE_CASE_ = "deprecated", ) -> None: os.makedirs(os.path.dirname(SCREAMING_SNAKE_CASE_ ), exist_ok=SCREAMING_SNAKE_CASE_ ) # Prevent parallel extractions UpperCamelCase : List[Any] = str(Path(SCREAMING_SNAKE_CASE_ ).with_suffix('.lock' ) ) with FileLock(SCREAMING_SNAKE_CASE_ ): shutil.rmtree(SCREAMING_SNAKE_CASE_, ignore_errors=SCREAMING_SNAKE_CASE_ ) if extractor_format or extractor != "deprecated": if extractor != "deprecated" or not isinstance(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ): # passed as positional arg warnings.warn( 'Parameter \'extractor\' was deprecated in version 2.4.0 and will be removed in 3.0.0. ' 'Use \'extractor_format\' instead.', category=SCREAMING_SNAKE_CASE_, ) UpperCamelCase : Union[str, Any] = extractor if extractor != 'deprecated' else extractor_format else: UpperCamelCase : List[Any] = cls.extractors[extractor_format] return extractor.extract(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ) else: warnings.warn( 'Parameter \'extractor_format\' was made required in version 2.4.0 and not passing it will raise an ' 'exception in 3.0.0.', category=SCREAMING_SNAKE_CASE_, ) for extractor in cls.extractors.values(): if extractor.is_extractable(SCREAMING_SNAKE_CASE_ ): return extractor.extract(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ )
40
import unittest import numpy as np from transformers.testing_utils import require_flax, require_tf, require_torch from transformers.utils import ( expand_dims, flatten_dict, is_flax_available, is_tf_available, is_torch_available, reshape, squeeze, transpose, ) if is_flax_available(): import jax.numpy as jnp if is_tf_available(): import tensorflow as tf if is_torch_available(): import torch class lowerCamelCase_ ( unittest.TestCase ): def A ( self ): """simple docstring""" __magic_name__ :List[Any] = { '''task_specific_params''': { '''summarization''': {'''length_penalty''': 1.0, '''max_length''': 1_2_8, '''min_length''': 1_2, '''num_beams''': 4}, '''summarization_cnn''': {'''length_penalty''': 2.0, '''max_length''': 1_4_2, '''min_length''': 5_6, '''num_beams''': 4}, '''summarization_xsum''': {'''length_penalty''': 1.0, '''max_length''': 6_2, '''min_length''': 1_1, '''num_beams''': 6}, } } __magic_name__ :List[str] = { '''task_specific_params.summarization.length_penalty''': 1.0, '''task_specific_params.summarization.max_length''': 1_2_8, '''task_specific_params.summarization.min_length''': 1_2, '''task_specific_params.summarization.num_beams''': 4, '''task_specific_params.summarization_cnn.length_penalty''': 2.0, '''task_specific_params.summarization_cnn.max_length''': 1_4_2, '''task_specific_params.summarization_cnn.min_length''': 5_6, '''task_specific_params.summarization_cnn.num_beams''': 4, '''task_specific_params.summarization_xsum.length_penalty''': 1.0, '''task_specific_params.summarization_xsum.max_length''': 6_2, '''task_specific_params.summarization_xsum.min_length''': 1_1, '''task_specific_params.summarization_xsum.num_beams''': 6, } self.assertEqual(flatten_dict(__lowerCAmelCase ) , __lowerCAmelCase ) def A ( self ): """simple docstring""" __magic_name__ :Optional[Any] = np.random.randn(3 , 4 ) self.assertTrue(np.allclose(transpose(__lowerCAmelCase ) , x.transpose() ) ) __magic_name__ :List[Any] = np.random.randn(3 , 4 , 5 ) self.assertTrue(np.allclose(transpose(__lowerCAmelCase , axes=(1, 2, 0) ) , x.transpose((1, 2, 0) ) ) ) @require_torch def A ( self ): """simple docstring""" __magic_name__ :Optional[Any] = np.random.randn(3 , 4 ) __magic_name__ :Tuple = torch.tensor(__lowerCAmelCase ) self.assertTrue(np.allclose(transpose(__lowerCAmelCase ) , transpose(__lowerCAmelCase ).numpy() ) ) __magic_name__ :int = np.random.randn(3 , 4 , 5 ) __magic_name__ :Union[str, Any] = torch.tensor(__lowerCAmelCase ) self.assertTrue(np.allclose(transpose(__lowerCAmelCase , axes=(1, 2, 0) ) , transpose(__lowerCAmelCase , axes=(1, 2, 0) ).numpy() ) ) @require_tf def A ( self ): """simple docstring""" __magic_name__ :int = np.random.randn(3 , 4 ) __magic_name__ :Optional[Any] = tf.constant(__lowerCAmelCase ) self.assertTrue(np.allclose(transpose(__lowerCAmelCase ) , transpose(__lowerCAmelCase ).numpy() ) ) __magic_name__ :List[str] = np.random.randn(3 , 4 , 5 ) __magic_name__ :Optional[Any] = tf.constant(__lowerCAmelCase ) self.assertTrue(np.allclose(transpose(__lowerCAmelCase , axes=(1, 2, 0) ) , transpose(__lowerCAmelCase , axes=(1, 2, 0) ).numpy() ) ) @require_flax def A ( self ): """simple docstring""" __magic_name__ :int = np.random.randn(3 , 4 ) __magic_name__ :Dict = jnp.array(__lowerCAmelCase ) self.assertTrue(np.allclose(transpose(__lowerCAmelCase ) , np.asarray(transpose(__lowerCAmelCase ) ) ) ) __magic_name__ :Dict = np.random.randn(3 , 4 , 5 ) __magic_name__ :Dict = jnp.array(__lowerCAmelCase ) self.assertTrue(np.allclose(transpose(__lowerCAmelCase , axes=(1, 2, 0) ) , np.asarray(transpose(__lowerCAmelCase , axes=(1, 2, 0) ) ) ) ) def A ( self ): """simple docstring""" __magic_name__ :Any = np.random.randn(3 , 4 ) self.assertTrue(np.allclose(reshape(__lowerCAmelCase , (4, 3) ) , np.reshape(__lowerCAmelCase , (4, 3) ) ) ) __magic_name__ :Union[str, Any] = np.random.randn(3 , 4 , 5 ) self.assertTrue(np.allclose(reshape(__lowerCAmelCase , (1_2, 5) ) , np.reshape(__lowerCAmelCase , (1_2, 5) ) ) ) @require_torch def A ( self ): """simple docstring""" __magic_name__ :Dict = np.random.randn(3 , 4 ) __magic_name__ :Tuple = torch.tensor(__lowerCAmelCase ) self.assertTrue(np.allclose(reshape(__lowerCAmelCase , (4, 3) ) , reshape(__lowerCAmelCase , (4, 3) ).numpy() ) ) __magic_name__ :Union[str, Any] = np.random.randn(3 , 4 , 5 ) __magic_name__ :List[str] = torch.tensor(__lowerCAmelCase ) self.assertTrue(np.allclose(reshape(__lowerCAmelCase , (1_2, 5) ) , reshape(__lowerCAmelCase , (1_2, 5) ).numpy() ) ) @require_tf def A ( self ): """simple docstring""" __magic_name__ :Dict = np.random.randn(3 , 4 ) __magic_name__ :Union[str, Any] = tf.constant(__lowerCAmelCase ) self.assertTrue(np.allclose(reshape(__lowerCAmelCase , (4, 3) ) , reshape(__lowerCAmelCase , (4, 3) ).numpy() ) ) __magic_name__ :List[Any] = np.random.randn(3 , 4 , 5 ) __magic_name__ :Optional[int] = tf.constant(__lowerCAmelCase ) self.assertTrue(np.allclose(reshape(__lowerCAmelCase , (1_2, 5) ) , reshape(__lowerCAmelCase , (1_2, 5) ).numpy() ) ) @require_flax def A ( self ): """simple docstring""" __magic_name__ :List[str] = np.random.randn(3 , 4 ) __magic_name__ :Any = jnp.array(__lowerCAmelCase ) self.assertTrue(np.allclose(reshape(__lowerCAmelCase , (4, 3) ) , np.asarray(reshape(__lowerCAmelCase , (4, 3) ) ) ) ) __magic_name__ :List[Any] = np.random.randn(3 , 4 , 5 ) __magic_name__ :List[str] = jnp.array(__lowerCAmelCase ) self.assertTrue(np.allclose(reshape(__lowerCAmelCase , (1_2, 5) ) , np.asarray(reshape(__lowerCAmelCase , (1_2, 5) ) ) ) ) def A ( self ): """simple docstring""" __magic_name__ :List[Any] = np.random.randn(1 , 3 , 4 ) self.assertTrue(np.allclose(squeeze(__lowerCAmelCase ) , np.squeeze(__lowerCAmelCase ) ) ) __magic_name__ :Optional[Any] = np.random.randn(1 , 4 , 1 , 5 ) self.assertTrue(np.allclose(squeeze(__lowerCAmelCase , axis=2 ) , np.squeeze(__lowerCAmelCase , axis=2 ) ) ) @require_torch def A ( self ): """simple docstring""" __magic_name__ :Dict = np.random.randn(1 , 3 , 4 ) __magic_name__ :List[Any] = torch.tensor(__lowerCAmelCase ) self.assertTrue(np.allclose(squeeze(__lowerCAmelCase ) , squeeze(__lowerCAmelCase ).numpy() ) ) __magic_name__ :List[str] = np.random.randn(1 , 4 , 1 , 5 ) __magic_name__ :str = torch.tensor(__lowerCAmelCase ) self.assertTrue(np.allclose(squeeze(__lowerCAmelCase , axis=2 ) , squeeze(__lowerCAmelCase , axis=2 ).numpy() ) ) @require_tf def A ( self ): """simple docstring""" __magic_name__ :int = np.random.randn(1 , 3 , 4 ) __magic_name__ :Tuple = tf.constant(__lowerCAmelCase ) self.assertTrue(np.allclose(squeeze(__lowerCAmelCase ) , squeeze(__lowerCAmelCase ).numpy() ) ) __magic_name__ :Tuple = np.random.randn(1 , 4 , 1 , 5 ) __magic_name__ :Optional[int] = tf.constant(__lowerCAmelCase ) self.assertTrue(np.allclose(squeeze(__lowerCAmelCase , axis=2 ) , squeeze(__lowerCAmelCase , axis=2 ).numpy() ) ) @require_flax def A ( self ): """simple docstring""" __magic_name__ :Tuple = np.random.randn(1 , 3 , 4 ) __magic_name__ :Optional[Any] = jnp.array(__lowerCAmelCase ) self.assertTrue(np.allclose(squeeze(__lowerCAmelCase ) , np.asarray(squeeze(__lowerCAmelCase ) ) ) ) __magic_name__ :List[Any] = np.random.randn(1 , 4 , 1 , 5 ) __magic_name__ :Optional[Any] = jnp.array(__lowerCAmelCase ) self.assertTrue(np.allclose(squeeze(__lowerCAmelCase , axis=2 ) , np.asarray(squeeze(__lowerCAmelCase , axis=2 ) ) ) ) def A ( self ): """simple docstring""" __magic_name__ :Any = np.random.randn(3 , 4 ) self.assertTrue(np.allclose(expand_dims(__lowerCAmelCase , axis=1 ) , np.expand_dims(__lowerCAmelCase , axis=1 ) ) ) @require_torch def A ( self ): """simple docstring""" __magic_name__ :List[Any] = np.random.randn(3 , 4 ) __magic_name__ :Any = torch.tensor(__lowerCAmelCase ) self.assertTrue(np.allclose(expand_dims(__lowerCAmelCase , axis=1 ) , expand_dims(__lowerCAmelCase , axis=1 ).numpy() ) ) @require_tf def A ( self ): """simple docstring""" __magic_name__ :Union[str, Any] = np.random.randn(3 , 4 ) __magic_name__ :Union[str, Any] = tf.constant(__lowerCAmelCase ) self.assertTrue(np.allclose(expand_dims(__lowerCAmelCase , axis=1 ) , expand_dims(__lowerCAmelCase , axis=1 ).numpy() ) ) @require_flax def A ( self ): """simple docstring""" __magic_name__ :List[str] = np.random.randn(3 , 4 ) __magic_name__ :Tuple = jnp.array(__lowerCAmelCase ) self.assertTrue(np.allclose(expand_dims(__lowerCAmelCase , axis=1 ) , np.asarray(expand_dims(__lowerCAmelCase , axis=1 ) ) ) )
0
0
'''simple docstring''' def _A ( A__ , A__ ): """simple docstring""" return numa ^ numa < 0 if __name__ == "__main__": import doctest doctest.testmod()
41
from pathlib import PurePosixPath from typing import Optional import fsspec from fsspec import AbstractFileSystem from huggingface_hub.hf_api import DatasetInfo from ..utils.file_utils import get_authentication_headers_for_url from ..utils.hub import hf_hub_url class lowerCamelCase_ ( lowerCamelCase ): a__ = '''''' a__ = '''hf-legacy''' # "hf://"" is reserved for hffs def __init__( self , __lowerCAmelCase = None , __lowerCAmelCase = None , **__lowerCAmelCase , ): """simple docstring""" super().__init__(self , **__lowerCAmelCase ) __magic_name__ :List[Any] = repo_info __magic_name__ :Dict = token __magic_name__ :Optional[Any] = None def A ( self ): """simple docstring""" if self.dir_cache is None: __magic_name__ :Any = {} for hf_file in self.repo_info.siblings: # TODO(QL): add sizes __magic_name__ :Optional[int] = { '''name''': hf_file.rfilename, '''size''': None, '''type''': '''file''', } self.dir_cache.update( { str(__lowerCAmelCase ): {'''name''': str(__lowerCAmelCase ), '''size''': None, '''type''': '''directory'''} for d in list(PurePosixPath(hf_file.rfilename ).parents )[:-1] } ) def A ( self , __lowerCAmelCase , __lowerCAmelCase = "rb" , **__lowerCAmelCase , ): """simple docstring""" if not isinstance(self.repo_info , __lowerCAmelCase ): raise NotImplementedError(F'''Open is only implemented for dataset repositories, but got {self.repo_info}''' ) __magic_name__ :Union[str, Any] = hf_hub_url(self.repo_info.id , __lowerCAmelCase , revision=self.repo_info.sha ) return fsspec.open( __lowerCAmelCase , mode=__lowerCAmelCase , headers=get_authentication_headers_for_url(__lowerCAmelCase , use_auth_token=self.token ) , client_kwargs={'''trust_env''': True} , ).open() def A ( self , __lowerCAmelCase , **__lowerCAmelCase ): """simple docstring""" self._get_dirs() __magic_name__ :str = self._strip_protocol(__lowerCAmelCase ) if path in self.dir_cache: return self.dir_cache[path] else: raise FileNotFoundError(__lowerCAmelCase ) def A ( self , __lowerCAmelCase , __lowerCAmelCase=False , **__lowerCAmelCase ): """simple docstring""" self._get_dirs() __magic_name__ :Union[str, Any] = PurePosixPath(path.strip('''/''' ) ) __magic_name__ :Dict = {} for p, f in self.dir_cache.items(): __magic_name__ :int = PurePosixPath(p.strip('''/''' ) ) __magic_name__ :Tuple = p.parent if root == path: __magic_name__ :Optional[Any] = f __magic_name__ :List[Any] = list(paths.values() ) if detail: return out else: return sorted(f['''name'''] for f in out )
0
0
'''simple docstring''' import argparse import pytorch_lightning as pl import torch from torch import nn from transformers import LongformerForQuestionAnswering, LongformerModel class UpperCAmelCase ( pl.LightningModule ): '''simple docstring''' def __init__( self , SCREAMING_SNAKE_CASE_ ) -> int: '''simple docstring''' super().__init__() lowerCamelCase_ = model lowerCamelCase_ = 2 lowerCamelCase_ = nn.Linear(self.model.config.hidden_size , self.num_labels ) def UpperCamelCase( self ) -> Dict: '''simple docstring''' pass def _UpperCamelCase ( __UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ) -> int: # load longformer model from model identifier lowerCamelCase_ = LongformerModel.from_pretrained(__UpperCamelCase ) lowerCamelCase_ = LightningModel(__UpperCamelCase ) lowerCamelCase_ = torch.load(__UpperCamelCase ,map_location=torch.device('cpu' ) ) lightning_model.load_state_dict(ckpt['state_dict'] ) # init longformer question answering model lowerCamelCase_ = LongformerForQuestionAnswering.from_pretrained(__UpperCamelCase ) # transfer weights longformer_for_qa.longformer.load_state_dict(lightning_model.model.state_dict() ) longformer_for_qa.qa_outputs.load_state_dict(lightning_model.qa_outputs.state_dict() ) longformer_for_qa.eval() # save model longformer_for_qa.save_pretrained(__UpperCamelCase ) print(f'''Conversion successful. Model saved under {pytorch_dump_folder_path}''' ) if __name__ == "__main__": A_ = argparse.ArgumentParser() # Required parameters parser.add_argument( "--longformer_model", default=None, type=str, required=True, help="model identifier of longformer. Should be either `longformer-base-4096` or `longformer-large-4096`.", ) parser.add_argument( "--longformer_question_answering_ckpt_path", default=None, type=str, required=True, help="Path the official PyTorch Lightning Checkpoint.", ) parser.add_argument( "--pytorch_dump_folder_path", default=None, type=str, required=True, help="Path to the output PyTorch model." ) A_ = parser.parse_args() convert_longformer_qa_checkpoint_to_pytorch( args.longformer_model, args.longformer_question_answering_ckpt_path, args.pytorch_dump_folder_path )
42
import pyarrow.parquet as pq import pytest from datasets import Audio, Dataset, DatasetDict, Features, NamedSplit, Sequence, Value, config from datasets.features.image import Image from datasets.io.parquet import ParquetDatasetReader, ParquetDatasetWriter, get_writer_batch_size from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases def __lowercase ( snake_case, snake_case ): """simple docstring""" assert isinstance(snake_case, snake_case ) assert dataset.num_rows == 4 assert dataset.num_columns == 3 assert dataset.column_names == ["col_1", "col_2", "col_3"] for feature, expected_dtype in expected_features.items(): assert dataset.features[feature].dtype == expected_dtype @pytest.mark.parametrize('''keep_in_memory''', [False, True] ) def __lowercase ( snake_case, snake_case, snake_case ): """simple docstring""" __magic_name__ :Tuple = tmp_path / '''cache''' __magic_name__ :int = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''} with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase(): __magic_name__ :Optional[Any] = ParquetDatasetReader(snake_case, cache_dir=snake_case, keep_in_memory=snake_case ).read() _check_parquet_dataset(snake_case, snake_case ) @pytest.mark.parametrize( '''features''', [ None, {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}, {'''col_1''': '''string''', '''col_2''': '''string''', '''col_3''': '''string'''}, {'''col_1''': '''int32''', '''col_2''': '''int32''', '''col_3''': '''int32'''}, {'''col_1''': '''float32''', '''col_2''': '''float32''', '''col_3''': '''float32'''}, ], ) def __lowercase ( snake_case, snake_case, snake_case ): """simple docstring""" __magic_name__ :List[str] = tmp_path / '''cache''' __magic_name__ :int = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''} __magic_name__ :Tuple = features.copy() if features else default_expected_features __magic_name__ :Union[str, Any] = ( Features({feature: Value(snake_case ) for feature, dtype in features.items()} ) if features is not None else None ) __magic_name__ :int = ParquetDatasetReader(snake_case, features=snake_case, cache_dir=snake_case ).read() _check_parquet_dataset(snake_case, snake_case ) @pytest.mark.parametrize('''split''', [None, NamedSplit('''train''' ), '''train''', '''test'''] ) def __lowercase ( snake_case, snake_case, snake_case ): """simple docstring""" __magic_name__ :str = tmp_path / '''cache''' __magic_name__ :List[Any] = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''} __magic_name__ :int = ParquetDatasetReader(snake_case, cache_dir=snake_case, split=snake_case ).read() _check_parquet_dataset(snake_case, snake_case ) assert dataset.split == split if split else "train" @pytest.mark.parametrize('''path_type''', [str, list] ) def __lowercase ( snake_case, snake_case, snake_case ): """simple docstring""" if issubclass(snake_case, snake_case ): __magic_name__ :Union[str, Any] = parquet_path elif issubclass(snake_case, snake_case ): __magic_name__ :Union[str, Any] = [parquet_path] __magic_name__ :Optional[int] = tmp_path / '''cache''' __magic_name__ :Optional[int] = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''} __magic_name__ :str = ParquetDatasetReader(snake_case, cache_dir=snake_case ).read() _check_parquet_dataset(snake_case, snake_case ) def __lowercase ( snake_case, snake_case, snake_case=("train",) ): """simple docstring""" assert isinstance(snake_case, snake_case ) for split in splits: __magic_name__ :Optional[Any] = dataset_dict[split] assert dataset.num_rows == 4 assert dataset.num_columns == 3 assert dataset.column_names == ["col_1", "col_2", "col_3"] for feature, expected_dtype in expected_features.items(): assert dataset.features[feature].dtype == expected_dtype @pytest.mark.parametrize('''keep_in_memory''', [False, True] ) def __lowercase ( snake_case, snake_case, snake_case ): """simple docstring""" __magic_name__ :Any = tmp_path / '''cache''' __magic_name__ :Optional[int] = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''} with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase(): __magic_name__ :Tuple = ParquetDatasetReader( {'''train''': parquet_path}, cache_dir=snake_case, keep_in_memory=snake_case ).read() _check_parquet_datasetdict(snake_case, snake_case ) @pytest.mark.parametrize( '''features''', [ None, {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}, {'''col_1''': '''string''', '''col_2''': '''string''', '''col_3''': '''string'''}, {'''col_1''': '''int32''', '''col_2''': '''int32''', '''col_3''': '''int32'''}, {'''col_1''': '''float32''', '''col_2''': '''float32''', '''col_3''': '''float32'''}, ], ) def __lowercase ( snake_case, snake_case, snake_case ): """simple docstring""" __magic_name__ :Optional[Any] = tmp_path / '''cache''' __magic_name__ :Dict = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''} __magic_name__ :int = features.copy() if features else default_expected_features __magic_name__ :List[Any] = ( Features({feature: Value(snake_case ) for feature, dtype in features.items()} ) if features is not None else None ) __magic_name__ :Optional[int] = ParquetDatasetReader({'''train''': parquet_path}, features=snake_case, cache_dir=snake_case ).read() _check_parquet_datasetdict(snake_case, snake_case ) @pytest.mark.parametrize('''split''', [None, NamedSplit('''train''' ), '''train''', '''test'''] ) def __lowercase ( snake_case, snake_case, snake_case ): """simple docstring""" if split: __magic_name__ :Dict = {split: parquet_path} else: __magic_name__ :Optional[int] = '''train''' __magic_name__ :Dict = {'''train''': parquet_path, '''test''': parquet_path} __magic_name__ :List[Any] = tmp_path / '''cache''' __magic_name__ :Optional[Any] = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''} __magic_name__ :Optional[Any] = ParquetDatasetReader(snake_case, cache_dir=snake_case ).read() _check_parquet_datasetdict(snake_case, snake_case, splits=list(path.keys() ) ) assert all(dataset[split].split == split for split in path.keys() ) def __lowercase ( snake_case, snake_case ): """simple docstring""" __magic_name__ :str = ParquetDatasetWriter(snake_case, tmp_path / '''foo.parquet''' ) assert writer.write() > 0 __magic_name__ :List[Any] = pq.ParquetFile(tmp_path / '''foo.parquet''' ) __magic_name__ :List[Any] = pf.read() assert dataset.data.table == output_table def __lowercase ( snake_case, snake_case ): """simple docstring""" __magic_name__ :List[str] = str(shared_datadir / '''test_image_rgb.jpg''' ) __magic_name__ :Tuple = {'''image''': [image_path]} __magic_name__ :List[Any] = Features({'''image''': Image()} ) __magic_name__ :Tuple = Dataset.from_dict(snake_case, features=snake_case ) __magic_name__ :Union[str, Any] = ParquetDatasetWriter(snake_case, tmp_path / '''foo.parquet''' ) assert writer.write() > 0 __magic_name__ :List[str] = Dataset.from_parquet(str(tmp_path / '''foo.parquet''' ) ) assert dataset.features == reloaded_dataset.features __magic_name__ :List[str] = ParquetDatasetReader(str(tmp_path / '''foo.parquet''' ), streaming=snake_case ).read() assert dataset.features == reloaded_iterable_dataset.features @pytest.mark.parametrize( '''feature, expected''', [ (Features({'''foo''': Value('''int32''' )} ), None), (Features({'''image''': Image(), '''foo''': Value('''int32''' )} ), config.PARQUET_ROW_GROUP_SIZE_FOR_IMAGE_DATASETS), (Features({'''nested''': Sequence(Audio() )} ), config.PARQUET_ROW_GROUP_SIZE_FOR_AUDIO_DATASETS), ], ) def __lowercase ( snake_case, snake_case ): """simple docstring""" assert get_writer_batch_size(snake_case ) == expected
0
0
import math def _a ( SCREAMING_SNAKE_CASE ): """simple docstring""" return math.sqrt(SCREAMING_SNAKE_CASE ) * math.sqrt(SCREAMING_SNAKE_CASE ) == num def _a ( SCREAMING_SNAKE_CASE ): """simple docstring""" lowercase__ = 0 lowercase__ = n while left <= right: lowercase__ = (left + right) // 2 if mid**2 == n: return True elif mid**2 > n: lowercase__ = mid - 1 else: lowercase__ = mid + 1 return False if __name__ == "__main__": import doctest doctest.testmod()
43
def __lowercase ( snake_case ): """simple docstring""" if not isinstance(snake_case, snake_case ): raise ValueError('''multiplicative_persistence() only accepts integral values''' ) if num < 0: raise ValueError('''multiplicative_persistence() does not accept negative values''' ) __magic_name__ :str = 0 __magic_name__ :Dict = str(snake_case ) while len(snake_case ) != 1: __magic_name__ :Optional[Any] = [int(snake_case ) for i in num_string] __magic_name__ :Dict = 1 for i in range(0, len(snake_case ) ): total *= numbers[i] __magic_name__ :int = str(snake_case ) steps += 1 return steps def __lowercase ( snake_case ): """simple docstring""" if not isinstance(snake_case, snake_case ): raise ValueError('''additive_persistence() only accepts integral values''' ) if num < 0: raise ValueError('''additive_persistence() does not accept negative values''' ) __magic_name__ :str = 0 __magic_name__ :Union[str, Any] = str(snake_case ) while len(snake_case ) != 1: __magic_name__ :str = [int(snake_case ) for i in num_string] __magic_name__ :Optional[int] = 0 for i in range(0, len(snake_case ) ): total += numbers[i] __magic_name__ :int = str(snake_case ) steps += 1 return steps if __name__ == "__main__": import doctest doctest.testmod()
0
0
'''simple docstring''' from ...configuration_utils import PretrainedConfig from ...utils import logging UpperCAmelCase_ : Optional[Any] = logging.get_logger(__name__) UpperCAmelCase_ : List[str] = { 'EleutherAI/gpt-neox-20b': 'https://huggingface.co/EleutherAI/gpt-neox-20b/resolve/main/config.json', # See all GPTNeoX models at https://huggingface.co/models?filter=gpt_neox } class UpperCAmelCase__ ( A ): lowerCAmelCase_ = 'gpt_neox' def __init__( self : Optional[Any],__A : Any=5_0_4_3_2,__A : Optional[Any]=6_1_4_4,__A : Optional[int]=4_4,__A : int=6_4,__A : List[str]=2_4_5_7_6,__A : Union[str, Any]="gelu",__A : Optional[int]=0.25,__A : List[str]=1_0_0_0_0,__A : Optional[Any]=0.0,__A : Optional[Any]=0.0,__A : Tuple=0.1,__A : List[Any]=2_0_4_8,__A : int=0.02,__A : Tuple=1e-5,__A : Dict=True,__A : int=0,__A : Optional[int]=2,__A : int=False,__A : List[Any]=True,__A : List[Any]=None,**__A : Dict,): super().__init__(bos_token_id=__A,eos_token_id=__A,**__A ) _lowerCamelCase : int = vocab_size _lowerCamelCase : Dict = max_position_embeddings _lowerCamelCase : List[Any] = hidden_size _lowerCamelCase : Optional[int] = num_hidden_layers _lowerCamelCase : str = num_attention_heads _lowerCamelCase : Optional[Any] = intermediate_size _lowerCamelCase : Optional[int] = hidden_act _lowerCamelCase : List[str] = rotary_pct _lowerCamelCase : Tuple = rotary_emb_base _lowerCamelCase : Optional[int] = attention_dropout _lowerCamelCase : List[Any] = hidden_dropout _lowerCamelCase : str = classifier_dropout _lowerCamelCase : List[str] = initializer_range _lowerCamelCase : Any = layer_norm_eps _lowerCamelCase : Dict = use_cache _lowerCamelCase : Dict = tie_word_embeddings _lowerCamelCase : List[str] = use_parallel_residual _lowerCamelCase : Tuple = rope_scaling self._rope_scaling_validation() if self.hidden_size % self.num_attention_heads != 0: raise ValueError( "The hidden size is not divisble by the number of attention heads! Make sure to update them!" ) def lowerCamelCase_ ( self : Optional[Any] ): if self.rope_scaling is None: return if not isinstance(self.rope_scaling,__A ) or len(self.rope_scaling ) != 2: raise ValueError( "`rope_scaling` must be a dictionary with with two fields, `name` and `factor`, " f'got {self.rope_scaling}' ) _lowerCamelCase : Union[str, Any] = self.rope_scaling.get("type",__A ) _lowerCamelCase : Tuple = self.rope_scaling.get("factor",__A ) if rope_scaling_type is None or rope_scaling_type not in ["linear", "dynamic"]: raise ValueError( f'`rope_scaling`\'s name field must be one of [\'linear\', \'dynamic\'], got {rope_scaling_type}' ) if rope_scaling_factor is None or not isinstance(__A,__A ) or rope_scaling_factor <= 1.0: raise ValueError(f'`rope_scaling`\'s factor field must be an float > 1, got {rope_scaling_factor}' )
44
import math import os import re import sys import unittest from pathlib import Path from typing import Tuple from unittest.mock import patch from parameterized import parameterized from transformers.testing_utils import ( CaptureStderr, ExtendSysPath, TestCasePlus, execute_subprocess_async, get_gpu_count, get_torch_dist_unique_port, require_apex, require_bitsandbytes, require_fairscale, require_torch, require_torch_gpu, require_torch_multi_gpu, require_torch_non_multi_gpu, slow, ) from transformers.trainer_callback import TrainerState from transformers.trainer_utils import set_seed SCREAMING_SNAKE_CASE__ : List[Any] = os.path.abspath(os.path.dirname(__file__)) with ExtendSysPath(f"{bindir}/../../examples/pytorch/translation"): from run_translation import main # noqa set_seed(42) SCREAMING_SNAKE_CASE__ : Optional[Any] = """sshleifer/student_marian_en_ro_6_1""" SCREAMING_SNAKE_CASE__ : List[Any] = """sshleifer/tiny-mbart""" @require_torch class lowerCamelCase_ ( lowerCamelCase ): def A ( self , __lowerCAmelCase=False , __lowerCAmelCase=None , __lowerCAmelCase=True , __lowerCAmelCase=True , __lowerCAmelCase=True , __lowerCAmelCase=True , ): """simple docstring""" __magic_name__ :List[Any] = self.run_trainer( eval_steps=1 , max_len=1_2 , model_name=__lowerCAmelCase , num_train_epochs=1 , distributed=__lowerCAmelCase , extra_args_str=__lowerCAmelCase , predict_with_generate=__lowerCAmelCase , do_train=__lowerCAmelCase , do_eval=__lowerCAmelCase , do_predict=__lowerCAmelCase , ) __magic_name__ :Any = TrainerState.load_from_json(os.path.join(__lowerCAmelCase , '''trainer_state.json''' ) ).log_history if not do_eval: return __magic_name__ :Any = [log for log in logs if '''eval_loss''' in log.keys()] __magic_name__ :str = eval_metrics[0] if predict_with_generate: assert "eval_bleu" in first_step_stats __magic_name__ :Tuple = eval_metrics[-1] assert isinstance(last_step_stats['''eval_bleu'''] , __lowerCAmelCase ) assert not math.isnan(float(last_step_stats['''eval_loss'''] ) ), "eval_loss must not be `nan`" @require_torch_non_multi_gpu def A ( self ): """simple docstring""" self.run_seqaseq_quick() @require_torch_multi_gpu def A ( self ): """simple docstring""" self.run_seqaseq_quick(distributed=__lowerCAmelCase ) @require_torch_multi_gpu def A ( self ): """simple docstring""" self.run_seqaseq_quick(distributed=__lowerCAmelCase ) @unittest.skip('''Requires an update of the env running those tests''' ) @require_torch_multi_gpu @require_fairscale def A ( self ): """simple docstring""" self.run_seqaseq_quick(distributed=__lowerCAmelCase , extra_args_str='''--sharded_ddp simple''' ) @unittest.skip('''Requires an update of the env running those tests''' ) @require_torch_multi_gpu @require_fairscale def A ( self ): """simple docstring""" self.run_seqaseq_quick(distributed=__lowerCAmelCase , extra_args_str='''--sharded_ddp simple --fp16''' ) @unittest.skip('''Requires an update of the env running those tests''' ) @require_torch_multi_gpu @require_fairscale def A ( self ): """simple docstring""" self.run_seqaseq_quick(distributed=__lowerCAmelCase , extra_args_str='''--sharded_ddp zero_dp_2''' , predict_with_generate=__lowerCAmelCase ) @unittest.skip('''Requires an update of the env running those tests''' ) @require_torch_multi_gpu @require_fairscale def A ( self ): """simple docstring""" self.run_seqaseq_quick( distributed=__lowerCAmelCase , extra_args_str='''--sharded_ddp zero_dp_2 --fp16''' , predict_with_generate=__lowerCAmelCase ) @require_apex @require_torch_gpu def A ( self ): """simple docstring""" # XXX: apex breaks the trainer if it's run twice e.g. run_seq2seq.main() from the same # program and it breaks other tests that run from the same pytest worker, therefore until this is # sorted out it must be run only in an external program, that is distributed=True in this # test and only under one or more gpus - if we want cpu will need to make a special test # # specifically to the problem traced it to self.optimizer.step() - if it's run 2nd time via # 2nd main() call it botches the future eval. # self.run_seqaseq_quick(distributed=__lowerCAmelCase , extra_args_str='''--fp16 --fp16_backend=apex''' ) # test 2nd time - was getting eval_loss': nan' # to reproduce the problem set distributed=False self.run_seqaseq_quick(distributed=__lowerCAmelCase , extra_args_str='''--fp16 --fp16_backend=apex''' ) @parameterized.expand(['''base''', '''low''', '''high''', '''mixed'''] ) @require_torch_multi_gpu def A ( self , __lowerCAmelCase ): """simple docstring""" # as each sub-test is slow-ish split into multiple sub-tests to avoid CI timeout __magic_name__ :Any = { # test with the default log_level - should be info and thus log info once '''base''': {'''extra_args_str''': '''''', '''n_matches''': 1}, # test with low log_level and log_level_replica - should be noisy on all processes # now the info string should appear twice on 2 processes '''low''': {'''extra_args_str''': '''--log_level debug --log_level_replica debug''', '''n_matches''': 2}, # test with high log_level and low log_level_replica # now the info string should appear once only on the replica '''high''': {'''extra_args_str''': '''--log_level error --log_level_replica debug''', '''n_matches''': 1}, # test with high log_level and log_level_replica - should be quiet on all processes '''mixed''': {'''extra_args_str''': '''--log_level error --log_level_replica error''', '''n_matches''': 0}, } __magic_name__ :Optional[Any] = experiments[experiment_id] __magic_name__ :List[Any] = {'''distributed''': True, '''predict_with_generate''': False, '''do_eval''': False, '''do_predict''': False} __magic_name__ :Optional[int] = '''Running training''' with CaptureStderr() as cl: self.run_seqaseq_quick(**__lowerCAmelCase , extra_args_str=data['''extra_args_str'''] ) __magic_name__ :int = len(re.findall(__lowerCAmelCase , cl.err ) ) self.assertEqual(__lowerCAmelCase , data['''n_matches'''] ) @slow def A ( self ): """simple docstring""" __magic_name__ :List[str] = self.run_trainer( eval_steps=2 , max_len=1_2_8 , model_name=__lowerCAmelCase , learning_rate=3E-4 , num_train_epochs=1_0 , distributed=__lowerCAmelCase , ) # Check metrics __magic_name__ :Optional[int] = TrainerState.load_from_json(os.path.join(__lowerCAmelCase , '''trainer_state.json''' ) ).log_history __magic_name__ :List[str] = [log for log in logs if '''eval_loss''' in log.keys()] __magic_name__ :Any = eval_metrics[0] __magic_name__ :int = eval_metrics[-1] assert first_step_stats["eval_loss"] > last_step_stats["eval_loss"], "model learned nothing" assert isinstance(last_step_stats['''eval_bleu'''] , __lowerCAmelCase ) # test if do_predict saves generations and metrics __magic_name__ :List[Any] = os.listdir(__lowerCAmelCase ) __magic_name__ :List[str] = {os.path.basename(__lowerCAmelCase ) for p in contents} assert "generated_predictions.txt" in contents assert "predict_results.json" in contents @slow @require_bitsandbytes def A ( self ): """simple docstring""" from transformers.training_args import OptimizerNames def train_and_return_metrics(__lowerCAmelCase ) -> Tuple[int, float]: __magic_name__ :str = '''--skip_memory_metrics 0''' __magic_name__ :Dict = self.run_trainer( max_len=1_2_8 , model_name=__lowerCAmelCase , learning_rate=3E-4 , num_train_epochs=1 , optim=__lowerCAmelCase , distributed=__lowerCAmelCase , extra_args_str=__lowerCAmelCase , do_eval=__lowerCAmelCase , do_predict=__lowerCAmelCase , n_gpus_to_use=1 , ) # Check metrics __magic_name__ :Optional[Any] = TrainerState.load_from_json(Path(__lowerCAmelCase , '''trainer_state.json''' ) ).log_history __magic_name__ :int = int(logs[0]['''train_mem_gpu_peaked_delta'''] / 2**2_0 ) __magic_name__ :Optional[Any] = int(logs[0]['''train_mem_gpu_alloc_delta'''] / 2**2_0 ) __magic_name__ :Any = logs[0]['''train_loss'''] return gpu_peak_mem_mb, gpu_alloc_mem_mb, loss __magic_name__ , __magic_name__ , __magic_name__ :int = train_and_return_metrics(OptimizerNames.ADAMW_TORCH.value ) __magic_name__ , __magic_name__ , __magic_name__ :Tuple = train_and_return_metrics(OptimizerNames.ADAMW_BNB.value ) __magic_name__ :Tuple = gpu_alloc_mem_orig - gpu_alloc_mem_bnb __magic_name__ :Tuple = gpu_peak_mem_orig + gpu_alloc_mem_orig __magic_name__ :List[Any] = gpu_peak_mem_bnb + gpu_alloc_mem_bnb __magic_name__ :Optional[int] = gpu_total_mem_orig - gpu_total_mem_bnb # sshleifer/student_marian_en_ro_6_1 has 54M parameter, 29M of which is `nn.Embedding` which # doesn't get quantized and remains in fp32. Therefore we only have 25M parameters quantized # in 2 bytes and the diff in optim memory usage is derived as so: # # - normal 25*8=~200MB (8 bytes per param) # - bnb 25*2= ~50MB (2 bytes per param) # # Thus we should expect ~150MB total memory saved. # # Peak memory should be the same - the total should be different by about that same margin # # After leaving a small margin to accommodate for differences between gpus let's check # that we have at least 120MB in savings __magic_name__ :Optional[Any] = 1_2_0 # uncomment the following if this test starts failing - requires py38 for a new print feature # gpu_peak_mem_diff = gpu_peak_mem_orig - gpu_peak_mem_bnb # print(f"{gpu_alloc_mem_orig=}MB {gpu_peak_mem_orig=}MB {gpu_alloc_mem_orig+gpu_peak_mem_orig=}MB") # print(f" {gpu_alloc_mem_bnb=}MB {gpu_peak_mem_bnb=}MB {gpu_alloc_mem_bnb+gpu_peak_mem_bnb=}MB") # print(f"{gpu_alloc_mem_diff=}MB") # print(f"{gpu_peak_mem_diff=}MB") # print(f"{gpu_total_mem_orig=}MB, {gpu_total_mem_bnb=}MB") # print(f"{gpu_total_mem_diff=}MB, {gpu_total_mem_diff=}MB") self.assertGreater( __lowerCAmelCase , __lowerCAmelCase , '''should use ~150MB less alloc gpu memory with BNB, compared to without it for this model but got''' F''' a difference of {gpu_alloc_mem_diff}MB, with gpu_alloc_mem_orig={gpu_alloc_mem_orig}MB and''' F''' gpu_alloc_mem_bnb={gpu_alloc_mem_bnb}MB''' , ) self.assertGreater( __lowerCAmelCase , __lowerCAmelCase , '''should use ~150MB less total gpu memory with BNB, compared to without it for this model but got''' F''' a difference of {gpu_total_mem_diff}MB, with gpu_total_mem_orig={gpu_total_mem_orig}MB and''' F''' gpu_total_mem_bnb={gpu_total_mem_bnb}MB''' , ) self.assertEqual( __lowerCAmelCase , __lowerCAmelCase , F'''loss should be the same, but got loss_orig={loss_orig}, loss_bnb={loss_bnb}''' ) def A ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = 3E-3 , __lowerCAmelCase = "adafactor" , __lowerCAmelCase = False , __lowerCAmelCase = None , __lowerCAmelCase = 0 , __lowerCAmelCase = True , __lowerCAmelCase = True , __lowerCAmelCase = True , __lowerCAmelCase = True , __lowerCAmelCase = None , ): """simple docstring""" __magic_name__ :int = self.test_file_dir / '''../fixtures/tests_samples/wmt_en_ro''' __magic_name__ :Dict = self.get_auto_remove_tmp_dir() __magic_name__ :Tuple = F''' --model_name_or_path {model_name} --train_file {data_dir}/train.json --validation_file {data_dir}/val.json --test_file {data_dir}/test.json --output_dir {output_dir} --overwrite_output_dir --max_train_samples 8 --max_source_length {max_len} --max_target_length {max_len} --do_train --num_train_epochs {str(__lowerCAmelCase )} --per_device_train_batch_size 4 --learning_rate {learning_rate} --warmup_steps 8 --logging_steps 0 --logging_strategy no --save_steps {str(__lowerCAmelCase )} --group_by_length --label_smoothing_factor 0.1 --target_lang ro_RO --source_lang en_XX '''.split() __magic_name__ :str = F''' --do_eval --per_device_eval_batch_size 4 --max_eval_samples 8 --val_max_target_length {max_len} --evaluation_strategy steps --eval_steps {str(__lowerCAmelCase )} '''.split() __magic_name__ :Dict = ''' --do_predict '''.split() __magic_name__ :Optional[int] = [] if do_train: args += args_train if do_eval: args += args_eval if do_predict: args += args_predict if predict_with_generate: args += "--predict_with_generate".split() if do_train: if optim == "adafactor": args += "--adafactor".split() else: args += F'''--optim {optim}'''.split() if extra_args_str is not None: args += extra_args_str.split() if distributed: if n_gpus_to_use is None: __magic_name__ :List[Any] = get_gpu_count() __magic_name__ :Tuple = get_torch_dist_unique_port() __magic_name__ :Union[str, Any] = F''' -m torch.distributed.run --nproc_per_node={n_gpus_to_use} --master_port={master_port} {self.examples_dir_str}/pytorch/translation/run_translation.py '''.split() __magic_name__ :Any = [sys.executable] + distributed_args + args # keep for quick debug # print(" ".join([f"\nPYTHONPATH={self.src_dir_str}"] +cmd)); die execute_subprocess_async(__lowerCAmelCase , env=self.get_env() ) else: __magic_name__ :List[Any] = ['''run_translation.py'''] + args with patch.object(__lowerCAmelCase , '''argv''' , __lowerCAmelCase ): main() return output_dir
0
0
import logging import os import sys from dataclasses import dataclass, field from typing import Optional import torch from datasets import load_dataset from torchvision.transforms import Compose, Lambda, Normalize, RandomHorizontalFlip, RandomResizedCrop, ToTensor from torchvision.transforms.functional import InterpolationMode import transformers from transformers import ( HfArgumentParser, Trainer, TrainingArguments, ViTImageProcessor, ViTMAEConfig, ViTMAEForPreTraining, ) from transformers.trainer_utils import get_last_checkpoint from transformers.utils import check_min_version, send_example_telemetry from transformers.utils.versions import require_version UpperCamelCase = logging.getLogger(__name__) # Will error if the minimal version of Transformers is not installed. Remove at your own risks. check_min_version("4.31.0") require_version("datasets>=1.8.0", "To fix: pip install -r examples/pytorch/image-pretraining/requirements.txt") @dataclass class lowerCAmelCase_ : """simple docstring""" _snake_case : Optional[str] = field( default="""cifar10""" , metadata={"""help""": """Name of a dataset from the datasets package"""} ) _snake_case : Optional[str] = field( default=lowercase , metadata={"""help""": """The configuration name of the dataset to use (via the datasets library)."""} ) _snake_case : Optional[str] = field( default=lowercase , metadata={"""help""": """The column name of the images in the files."""} ) _snake_case : Optional[str] = field(default=lowercase , metadata={"""help""": """A folder containing the training data."""} ) _snake_case : Optional[str] = field(default=lowercase , metadata={"""help""": """A folder containing the validation data."""} ) _snake_case : Optional[float] = field( default=0.15 , metadata={"""help""": """Percent to split off of train for validation."""} ) _snake_case : Optional[int] = field( default=lowercase , metadata={ """help""": ( """For debugging purposes or quicker training, truncate the number of training examples to this """ """value if set.""" ) } , ) _snake_case : Optional[int] = field( default=lowercase , metadata={ """help""": ( """For debugging purposes or quicker training, truncate the number of evaluation examples to this """ """value if set.""" ) } , ) def __a ( self :List[str] ): UpperCamelCase__ :Optional[Any] = {} if self.train_dir is not None: UpperCamelCase__ :int = self.train_dir if self.validation_dir is not None: UpperCamelCase__ :List[str] = self.validation_dir UpperCamelCase__ :Optional[int] = data_files if data_files else None @dataclass class lowerCAmelCase_ : """simple docstring""" _snake_case : str = field( default=lowercase , metadata={ """help""": ( """The model checkpoint for weights initialization.Don't set if you want to train a model from scratch.""" ) } , ) _snake_case : Optional[str] = field( default=lowercase , metadata={"""help""": """Pretrained config name or path if not the same as model_name_or_path"""} ) _snake_case : Optional[str] = field( default=lowercase , metadata={ """help""": ( """Override some existing default config settings when a model is trained from scratch. Example: """ """n_embd=10,resid_pdrop=0.2,scale_attn_weights=false,summary_type=cls_index""" ) } , ) _snake_case : Optional[str] = field( default=lowercase , metadata={"""help""": """Where do you want to store the pretrained models downloaded from s3"""} ) _snake_case : str = field( default="""main""" , metadata={"""help""": """The specific model version to use (can be a branch name, tag name or commit id)."""} , ) _snake_case : str = field(default=lowercase , metadata={"""help""": """Name or path of preprocessor config."""} ) _snake_case : bool = field( default=lowercase , metadata={ """help""": ( """Will use the token generated when running `huggingface-cli login` (necessary to use this script """ """with private models).""" ) } , ) _snake_case : float = field( default=0.75 , metadata={"""help""": """The ratio of the number of masked tokens in the input sequence."""} ) _snake_case : bool = field( default=lowercase , metadata={"""help""": """Whether or not to train with normalized pixel values as target."""} ) @dataclass class lowerCAmelCase_ ( lowercase ): """simple docstring""" _snake_case : float = field( default=1e-3 , metadata={"""help""": """Base learning rate: absolute_lr = base_lr * total_batch_size / 256."""} ) def A ( lowercase__ : Union[str, Any] ) -> Dict: UpperCamelCase__ :Union[str, Any] = torch.stack([example["""pixel_values"""] for example in examples] ) return {"pixel_values": pixel_values} def A ( ) -> Optional[int]: # See all possible arguments in src/transformers/training_args.py # or by passing the --help flag to this script. # We now keep distinct sets of args, for a cleaner separation of concerns. UpperCamelCase__ :Optional[int] = HfArgumentParser((ModelArguments, DataTrainingArguments, CustomTrainingArguments) ) if len(sys.argv ) == 2 and sys.argv[1].endswith(""".json""" ): # If we pass only one argument to the script and it's the path to a json file, # let's parse it to get our arguments. UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ :str = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) ) else: UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ :Union[str, Any] = parser.parse_args_into_dataclasses() # Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The # information sent is the one passed as arguments along with your Python/PyTorch versions. send_example_telemetry("""run_mae""" , lowercase__ , lowercase__ ) # Setup logging logging.basicConfig( format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""" , datefmt="""%m/%d/%Y %H:%M:%S""" , handlers=[logging.StreamHandler(sys.stdout )] , ) if training_args.should_log: # The default of training_args.log_level is passive, so we set log level at info here to have that default. transformers.utils.logging.set_verbosity_info() UpperCamelCase__ :List[str] = training_args.get_process_log_level() logger.setLevel(lowercase__ ) transformers.utils.logging.set_verbosity(lowercase__ ) transformers.utils.logging.enable_default_handler() transformers.utils.logging.enable_explicit_format() # Log on each process the small summary: logger.warning( f"""Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}""" + f"""distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}""" ) logger.info(f"""Training/evaluation parameters {training_args}""" ) # Detecting last checkpoint. UpperCamelCase__ :Union[str, Any] = None if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir: UpperCamelCase__ :List[str] = get_last_checkpoint(training_args.output_dir ) if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0: raise ValueError( f"""Output directory ({training_args.output_dir}) already exists and is not empty. """ """Use --overwrite_output_dir to overcome.""" ) elif last_checkpoint is not None and training_args.resume_from_checkpoint is None: logger.info( f"""Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change """ """the `--output_dir` or add `--overwrite_output_dir` to train from scratch.""" ) # Initialize our dataset. UpperCamelCase__ :Tuple = load_dataset( data_args.dataset_name , data_args.dataset_config_name , data_files=data_args.data_files , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , ) # If we don't have a validation split, split off a percentage of train as validation. UpperCamelCase__ :int = None if """validation""" in ds.keys() else data_args.train_val_split if isinstance(data_args.train_val_split , lowercase__ ) and data_args.train_val_split > 0.0: UpperCamelCase__ :Optional[Any] = ds["""train"""].train_test_split(data_args.train_val_split ) UpperCamelCase__ :Union[str, Any] = split["""train"""] UpperCamelCase__ :Any = split["""test"""] # Load pretrained model and image processor # # Distributed training: # The .from_pretrained methods guarantee that only one local process can concurrently # download model & vocab. UpperCamelCase__ :Optional[int] = { """cache_dir""": model_args.cache_dir, """revision""": model_args.model_revision, """use_auth_token""": True if model_args.use_auth_token else None, } if model_args.config_name: UpperCamelCase__ :Any = ViTMAEConfig.from_pretrained(model_args.config_name , **lowercase__ ) elif model_args.model_name_or_path: UpperCamelCase__ :Union[str, Any] = ViTMAEConfig.from_pretrained(model_args.model_name_or_path , **lowercase__ ) else: UpperCamelCase__ :Optional[Any] = ViTMAEConfig() logger.warning("""You are instantiating a new config instance from scratch.""" ) if model_args.config_overrides is not None: logger.info(f"""Overriding config: {model_args.config_overrides}""" ) config.update_from_string(model_args.config_overrides ) logger.info(f"""New config: {config}""" ) # adapt config config.update( { """mask_ratio""": model_args.mask_ratio, """norm_pix_loss""": model_args.norm_pix_loss, } ) # create image processor if model_args.image_processor_name: UpperCamelCase__ :str = ViTImageProcessor.from_pretrained(model_args.image_processor_name , **lowercase__ ) elif model_args.model_name_or_path: UpperCamelCase__ :Dict = ViTImageProcessor.from_pretrained(model_args.model_name_or_path , **lowercase__ ) else: UpperCamelCase__ :Tuple = ViTImageProcessor() # create model if model_args.model_name_or_path: UpperCamelCase__ :Any = ViTMAEForPreTraining.from_pretrained( model_args.model_name_or_path , from_tf=bool(""".ckpt""" in model_args.model_name_or_path ) , config=lowercase__ , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ) else: logger.info("""Training new model from scratch""" ) UpperCamelCase__ :Optional[int] = ViTMAEForPreTraining(lowercase__ ) if training_args.do_train: UpperCamelCase__ :Optional[Any] = ds["""train"""].column_names else: UpperCamelCase__ :Union[str, Any] = ds["""validation"""].column_names if data_args.image_column_name is not None: UpperCamelCase__ :Union[str, Any] = data_args.image_column_name elif "image" in column_names: UpperCamelCase__ :Optional[Any] = """image""" elif "img" in column_names: UpperCamelCase__ :List[str] = """img""" else: UpperCamelCase__ :List[Any] = column_names[0] # transformations as done in original MAE paper # source: https://github.com/facebookresearch/mae/blob/main/main_pretrain.py if "shortest_edge" in image_processor.size: UpperCamelCase__ :List[str] = image_processor.size["""shortest_edge"""] else: UpperCamelCase__ :int = (image_processor.size["""height"""], image_processor.size["""width"""]) UpperCamelCase__ :Any = Compose( [ Lambda(lambda lowercase__ : img.convert("""RGB""" ) if img.mode != "RGB" else img ), RandomResizedCrop(lowercase__ , scale=(0.2, 1.0) , interpolation=InterpolationMode.BICUBIC ), RandomHorizontalFlip(), ToTensor(), Normalize(mean=image_processor.image_mean , std=image_processor.image_std ), ] ) def preprocess_images(lowercase__ : Tuple ): UpperCamelCase__ :List[Any] = [transforms(lowercase__ ) for image in examples[image_column_name]] return examples if training_args.do_train: if "train" not in ds: raise ValueError("""--do_train requires a train dataset""" ) if data_args.max_train_samples is not None: UpperCamelCase__ :Optional[int] = ds["""train"""].shuffle(seed=training_args.seed ).select(range(data_args.max_train_samples ) ) # Set the training transforms ds["train"].set_transform(lowercase__ ) if training_args.do_eval: if "validation" not in ds: raise ValueError("""--do_eval requires a validation dataset""" ) if data_args.max_eval_samples is not None: UpperCamelCase__ :Optional[Any] = ( ds["""validation"""].shuffle(seed=training_args.seed ).select(range(data_args.max_eval_samples ) ) ) # Set the validation transforms ds["validation"].set_transform(lowercase__ ) # Compute absolute learning rate UpperCamelCase__ :Tuple = ( training_args.train_batch_size * training_args.gradient_accumulation_steps * training_args.world_size ) if training_args.base_learning_rate is not None: UpperCamelCase__ :Any = training_args.base_learning_rate * total_train_batch_size / 256 # Initialize our trainer UpperCamelCase__ :Union[str, Any] = Trainer( model=lowercase__ , args=lowercase__ , train_dataset=ds["""train"""] if training_args.do_train else None , eval_dataset=ds["""validation"""] if training_args.do_eval else None , tokenizer=lowercase__ , data_collator=lowercase__ , ) # Training if training_args.do_train: UpperCamelCase__ :Any = None if training_args.resume_from_checkpoint is not None: UpperCamelCase__ :int = training_args.resume_from_checkpoint elif last_checkpoint is not None: UpperCamelCase__ :Dict = last_checkpoint UpperCamelCase__ :Union[str, Any] = trainer.train(resume_from_checkpoint=lowercase__ ) trainer.save_model() trainer.log_metrics("""train""" , train_result.metrics ) trainer.save_metrics("""train""" , train_result.metrics ) trainer.save_state() # Evaluation if training_args.do_eval: UpperCamelCase__ :int = trainer.evaluate() trainer.log_metrics("""eval""" , lowercase__ ) trainer.save_metrics("""eval""" , lowercase__ ) # Write model card and (optionally) push to hub UpperCamelCase__ :Optional[int] = { """tasks""": """masked-auto-encoding""", """dataset""": data_args.dataset_name, """tags""": ["""masked-auto-encoding"""], } if training_args.push_to_hub: trainer.push_to_hub(**lowercase__ ) else: trainer.create_model_card(**lowercase__ ) def A ( lowercase__ : Union[str, Any] ) -> Dict: # For xla_spawn (TPUs) main() if __name__ == "__main__": main()
45
import sys SCREAMING_SNAKE_CASE__ : Optional[Any] = ( """73167176531330624919225119674426574742355349194934""" """96983520312774506326239578318016984801869478851843""" """85861560789112949495459501737958331952853208805511""" """12540698747158523863050715693290963295227443043557""" """66896648950445244523161731856403098711121722383113""" """62229893423380308135336276614282806444486645238749""" """30358907296290491560440772390713810515859307960866""" """70172427121883998797908792274921901699720888093776""" """65727333001053367881220235421809751254540594752243""" """52584907711670556013604839586446706324415722155397""" """53697817977846174064955149290862569321978468622482""" """83972241375657056057490261407972968652414535100474""" """82166370484403199890008895243450658541227588666881""" """16427171479924442928230863465674813919123162824586""" """17866458359124566529476545682848912883142607690042""" """24219022671055626321111109370544217506941658960408""" """07198403850962455444362981230987879927244284909188""" """84580156166097919133875499200524063689912560717606""" """05886116467109405077541002256983155200055935729725""" """71636269561882670428252483600823257530420752963450""" ) def __lowercase ( snake_case = N ): """simple docstring""" __magic_name__ :Optional[int] = -sys.maxsize - 1 for i in range(len(snake_case ) - 1_2 ): __magic_name__ :List[Any] = 1 for j in range(1_3 ): product *= int(n[i + j] ) if product > largest_product: __magic_name__ :str = product return largest_product if __name__ == "__main__": print(f"{solution() = }")
0
0
"""simple docstring""" from ...configuration_utils import PretrainedConfig from ...utils import logging _lowerCAmelCase : List[str] = logging.get_logger(__name__) _lowerCAmelCase : List[str] = { '''google/vivit-b-16x2-kinetics400''': ( '''https://huggingface.co/google/vivit-b-16x2-kinetics400/resolve/main/config.json''' ), # See all Vivit models at https://huggingface.co/models?filter=vivit } class A_ ( _a ): lowerCAmelCase__ = 'vivit' def __init__( self: List[Any] ,__lowerCAmelCase: int=224 ,__lowerCAmelCase: Any=32 ,__lowerCAmelCase: str=[2, 16, 16] ,__lowerCAmelCase: Optional[Any]=3 ,__lowerCAmelCase: List[str]=768 ,__lowerCAmelCase: List[Any]=12 ,__lowerCAmelCase: Optional[int]=12 ,__lowerCAmelCase: Optional[Any]=3_072 ,__lowerCAmelCase: Any="gelu_fast" ,__lowerCAmelCase: Tuple=0.0 ,__lowerCAmelCase: Any=0.0 ,__lowerCAmelCase: Union[str, Any]=0.02 ,__lowerCAmelCase: List[str]=1e-06 ,__lowerCAmelCase: Optional[Any]=True ,**__lowerCAmelCase: Optional[int] ,): '''simple docstring''' _lowerCamelCase : Any = hidden_size _lowerCamelCase : List[str] = num_hidden_layers _lowerCamelCase : Union[str, Any] = num_attention_heads _lowerCamelCase : Any = intermediate_size _lowerCamelCase : Tuple = hidden_act _lowerCamelCase : Tuple = hidden_dropout_prob _lowerCamelCase : Optional[Any] = attention_probs_dropout_prob _lowerCamelCase : List[Any] = initializer_range _lowerCamelCase : int = layer_norm_eps _lowerCamelCase : Tuple = image_size _lowerCamelCase : Dict = num_frames _lowerCamelCase : Optional[int] = tubelet_size _lowerCamelCase : int = num_channels _lowerCamelCase : List[str] = qkv_bias super().__init__(**__lowerCAmelCase )
46
SCREAMING_SNAKE_CASE__ : Tuple = { """a""": """AAAAA""", """b""": """AAAAB""", """c""": """AAABA""", """d""": """AAABB""", """e""": """AABAA""", """f""": """AABAB""", """g""": """AABBA""", """h""": """AABBB""", """i""": """ABAAA""", """j""": """BBBAA""", """k""": """ABAAB""", """l""": """ABABA""", """m""": """ABABB""", """n""": """ABBAA""", """o""": """ABBAB""", """p""": """ABBBA""", """q""": """ABBBB""", """r""": """BAAAA""", """s""": """BAAAB""", """t""": """BAABA""", """u""": """BAABB""", """v""": """BBBAB""", """w""": """BABAA""", """x""": """BABAB""", """y""": """BABBA""", """z""": """BABBB""", """ """: """ """, } SCREAMING_SNAKE_CASE__ : Union[str, Any] = {value: key for key, value in encode_dict.items()} def __lowercase ( snake_case ): """simple docstring""" __magic_name__ :Tuple = '''''' for letter in word.lower(): if letter.isalpha() or letter == " ": encoded += encode_dict[letter] else: raise Exception('''encode() accepts only letters of the alphabet and spaces''' ) return encoded def __lowercase ( snake_case ): """simple docstring""" if set(snake_case ) - {"A", "B", " "} != set(): raise Exception('''decode() accepts only \'A\', \'B\' and spaces''' ) __magic_name__ :Dict = '''''' for word in coded.split(): while len(snake_case ) != 0: decoded += decode_dict[word[:5]] __magic_name__ :int = word[5:] decoded += " " return decoded.strip() if __name__ == "__main__": from doctest import testmod testmod()
0
0
from typing import List, Optional, Union from ...configuration_utils import PretrainedConfig from ...utils import logging SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__) SCREAMING_SNAKE_CASE__ = { '''huggingface/informer-tourism-monthly''': ( '''https://huggingface.co/huggingface/informer-tourism-monthly/resolve/main/config.json''' ), # See all Informer models at https://huggingface.co/models?filter=informer } class _UpperCamelCase( __lowerCamelCase ): __SCREAMING_SNAKE_CASE : List[Any] = '''informer''' __SCREAMING_SNAKE_CASE : List[Any] = { '''hidden_size''': '''d_model''', '''num_attention_heads''': '''encoder_attention_heads''', '''num_hidden_layers''': '''encoder_layers''', } def __init__( self : Optional[int] , SCREAMING_SNAKE_CASE__ : Optional[int] = None , SCREAMING_SNAKE_CASE__ : Optional[int] = None , SCREAMING_SNAKE_CASE__ : str = "student_t" , SCREAMING_SNAKE_CASE__ : str = "nll" , SCREAMING_SNAKE_CASE__ : int = 1 , SCREAMING_SNAKE_CASE__ : List[int] = None , SCREAMING_SNAKE_CASE__ : Optional[Union[str, bool]] = "mean" , SCREAMING_SNAKE_CASE__ : int = 0 , SCREAMING_SNAKE_CASE__ : int = 0 , SCREAMING_SNAKE_CASE__ : int = 0 , SCREAMING_SNAKE_CASE__ : int = 0 , SCREAMING_SNAKE_CASE__ : Optional[List[int]] = None , SCREAMING_SNAKE_CASE__ : Optional[List[int]] = None , SCREAMING_SNAKE_CASE__ : int = 6_4 , SCREAMING_SNAKE_CASE__ : int = 3_2 , SCREAMING_SNAKE_CASE__ : int = 3_2 , SCREAMING_SNAKE_CASE__ : int = 2 , SCREAMING_SNAKE_CASE__ : int = 2 , SCREAMING_SNAKE_CASE__ : int = 2 , SCREAMING_SNAKE_CASE__ : int = 2 , SCREAMING_SNAKE_CASE__ : bool = True , SCREAMING_SNAKE_CASE__ : str = "gelu" , SCREAMING_SNAKE_CASE__ : float = 0.05 , SCREAMING_SNAKE_CASE__ : float = 0.1 , SCREAMING_SNAKE_CASE__ : float = 0.1 , SCREAMING_SNAKE_CASE__ : float = 0.1 , SCREAMING_SNAKE_CASE__ : float = 0.1 , SCREAMING_SNAKE_CASE__ : int = 1_0_0 , SCREAMING_SNAKE_CASE__ : float = 0.02 , SCREAMING_SNAKE_CASE__ : Union[str, Any]=True , SCREAMING_SNAKE_CASE__ : str = "prob" , SCREAMING_SNAKE_CASE__ : int = 5 , SCREAMING_SNAKE_CASE__ : bool = True , **SCREAMING_SNAKE_CASE__ : Tuple , ): '''simple docstring''' __a : Dict = prediction_length __a : Tuple = context_length or prediction_length __a : Tuple = distribution_output __a : Tuple = loss __a : str = input_size __a : Dict = num_time_features __a : Optional[int] = lags_sequence if lags_sequence is not None else [1, 2, 3, 4, 5, 6, 7] __a : str = scaling __a : Tuple = num_dynamic_real_features __a : int = num_static_real_features __a : Dict = num_static_categorical_features # set cardinality if cardinality and num_static_categorical_features > 0: if len(SCREAMING_SNAKE_CASE__ ) != num_static_categorical_features: raise ValueError( 'The cardinality should be a list of the same length as `num_static_categorical_features`' ) __a : Optional[Any] = cardinality else: __a : Optional[int] = [0] # set embedding_dimension if embedding_dimension and num_static_categorical_features > 0: if len(SCREAMING_SNAKE_CASE__ ) != num_static_categorical_features: raise ValueError( 'The embedding dimension should be a list of the same length as `num_static_categorical_features`' ) __a : int = embedding_dimension else: __a : List[Any] = [min(5_0 , (cat + 1) // 2 ) for cat in self.cardinality] __a : int = num_parallel_samples # Transformer architecture configuration __a : str = input_size * len(self.lags_sequence ) + self._number_of_features __a : Optional[int] = d_model __a : Union[str, Any] = encoder_attention_heads __a : int = decoder_attention_heads __a : Any = encoder_ffn_dim __a : Union[str, Any] = decoder_ffn_dim __a : List[Any] = encoder_layers __a : Optional[int] = decoder_layers __a : int = dropout __a : Optional[Any] = attention_dropout __a : Dict = activation_dropout __a : Union[str, Any] = encoder_layerdrop __a : Optional[int] = decoder_layerdrop __a : List[str] = activation_function __a : str = init_std __a : Optional[int] = use_cache # Informer __a : Union[str, Any] = attention_type __a : str = sampling_factor __a : Dict = distil super().__init__(is_encoder_decoder=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ) @property def __lowerCAmelCase ( self : Any ): '''simple docstring''' return ( sum(self.embedding_dimension ) + self.num_dynamic_real_features + self.num_time_features + self.num_static_real_features + self.input_size * 2 # the log1p(abs(loc)) and log(scale) features )
47
import argparse import torch from torch import nn from transformers import MaMaaaConfig, MaMaaaForConditionalGeneration def __lowercase ( snake_case ): """simple docstring""" __magic_name__ :Optional[Any] = [ '''encoder.version''', '''decoder.version''', '''model.encoder.version''', '''model.decoder.version''', '''decoder.output_projection.weight''', '''_float_tensor''', '''encoder.embed_positions._float_tensor''', '''decoder.embed_positions._float_tensor''', ] for k in ignore_keys: state_dict.pop(snake_case, snake_case ) def __lowercase ( snake_case ): """simple docstring""" __magic_name__ , __magic_name__ :Tuple = emb.weight.shape __magic_name__ :int = nn.Linear(snake_case, snake_case, bias=snake_case ) __magic_name__ :str = emb.weight.data return lin_layer def __lowercase ( snake_case ): """simple docstring""" __magic_name__ :int = torch.load(snake_case, map_location='''cpu''' ) __magic_name__ :Optional[Any] = mam_aaa['''args'''] or mam_aaa['''cfg''']['''model'''] __magic_name__ :List[Any] = mam_aaa['''model'''] remove_ignore_keys_(snake_case ) __magic_name__ :Tuple = state_dict['''encoder.embed_tokens.weight'''].shape[0] __magic_name__ :List[str] = MaMaaaConfig( vocab_size=snake_case, max_position_embeddings=1_0_2_4, encoder_layers=args.encoder_layers, decoder_layers=args.decoder_layers, encoder_attention_heads=args.encoder_attention_heads, decoder_attention_heads=args.decoder_attention_heads, encoder_ffn_dim=args.encoder_ffn_embed_dim, decoder_ffn_dim=args.decoder_ffn_embed_dim, d_model=args.encoder_embed_dim, encoder_layerdrop=args.encoder_layerdrop, decoder_layerdrop=args.decoder_layerdrop, dropout=args.dropout, attention_dropout=args.attention_dropout, activation_dropout=args.activation_dropout, activation_function='''relu''', ) __magic_name__ :int = state_dict['''decoder.embed_tokens.weight'''] __magic_name__ :List[str] = MaMaaaForConditionalGeneration(snake_case ) model.model.load_state_dict(snake_case, strict=snake_case ) __magic_name__ :List[str] = make_linear_from_emb(model.model.shared ) return model if __name__ == "__main__": SCREAMING_SNAKE_CASE__ : Optional[Any] = argparse.ArgumentParser() # Required parameters parser.add_argument("""fairseq_path""", type=str, help="""path to a model.pt on local filesystem.""") parser.add_argument("""pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""") SCREAMING_SNAKE_CASE__ : int = parser.parse_args() SCREAMING_SNAKE_CASE__ : Any = convert_fairseq_mamaaa_checkpoint_from_disk(args.fairseq_pathß) model.save_pretrained(args.pytorch_dump_folder_path)
0
0
'''simple docstring''' from ...configuration_utils import PretrainedConfig from ...utils import logging UpperCAmelCase__ : int = logging.get_logger(__name__) class A ( SCREAMING_SNAKE_CASE__ ): snake_case__ :Any = 'timm_backbone' def __init__( self : Tuple , __magic_name__ : Tuple=None , __magic_name__ : Optional[Any]=3 , __magic_name__ : Dict=True , __magic_name__ : str=True , __magic_name__ : List[Any]=None , **__magic_name__ : Tuple , ): """simple docstring""" super().__init__(**__magic_name__ ) lowerCAmelCase__ = backbone lowerCAmelCase__ = num_channels lowerCAmelCase__ = features_only lowerCAmelCase__ = use_pretrained_backbone lowerCAmelCase__ = True lowerCAmelCase__ = out_indices if out_indices is not None else (-1,)
48
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available SCREAMING_SNAKE_CASE__ : Dict = { """configuration_canine""": ["""CANINE_PRETRAINED_CONFIG_ARCHIVE_MAP""", """CanineConfig"""], """tokenization_canine""": ["""CanineTokenizer"""], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: SCREAMING_SNAKE_CASE__ : str = [ """CANINE_PRETRAINED_MODEL_ARCHIVE_LIST""", """CanineForMultipleChoice""", """CanineForQuestionAnswering""", """CanineForSequenceClassification""", """CanineForTokenClassification""", """CanineLayer""", """CanineModel""", """CaninePreTrainedModel""", """load_tf_weights_in_canine""", ] if TYPE_CHECKING: from .configuration_canine import CANINE_PRETRAINED_CONFIG_ARCHIVE_MAP, CanineConfig from .tokenization_canine import CanineTokenizer try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_canine import ( CANINE_PRETRAINED_MODEL_ARCHIVE_LIST, CanineForMultipleChoice, CanineForQuestionAnswering, CanineForSequenceClassification, CanineForTokenClassification, CanineLayer, CanineModel, CaninePreTrainedModel, load_tf_weights_in_canine, ) else: import sys SCREAMING_SNAKE_CASE__ : Union[str, Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
0
0
"""simple docstring""" from math import pi, sqrt, tan def lowercase__ ( snake_case_ :float ): if side_length < 0: raise ValueError('''surface_area_cube() only accepts non-negative values''' ) return 6 * side_length**2 def lowercase__ ( snake_case_ :float , snake_case_ :float , snake_case_ :float ): if length < 0 or breadth < 0 or height < 0: raise ValueError('''surface_area_cuboid() only accepts non-negative values''' ) return 2 * ((length * breadth) + (breadth * height) + (length * height)) def lowercase__ ( snake_case_ :float ): if radius < 0: raise ValueError('''surface_area_sphere() only accepts non-negative values''' ) return 4 * pi * radius**2 def lowercase__ ( snake_case_ :float ): if radius < 0: raise ValueError('''surface_area_hemisphere() only accepts non-negative values''' ) return 3 * pi * radius**2 def lowercase__ ( snake_case_ :float , snake_case_ :float ): if radius < 0 or height < 0: raise ValueError('''surface_area_cone() only accepts non-negative values''' ) return pi * radius * (radius + (height**2 + radius**2) ** 0.5) def lowercase__ ( snake_case_ :float , snake_case_ :float , snake_case_ :float ): if radius_a < 0 or radius_a < 0 or height < 0: raise ValueError( '''surface_area_conical_frustum() only accepts non-negative values''' ) __UpperCAmelCase = (height**2 + (radius_a - radius_a) ** 2) ** 0.5 return pi * ((slant_height * (radius_a + radius_a)) + radius_a**2 + radius_a**2) def lowercase__ ( snake_case_ :float , snake_case_ :float ): if radius < 0 or height < 0: raise ValueError('''surface_area_cylinder() only accepts non-negative values''' ) return 2 * pi * radius * (height + radius) def lowercase__ ( snake_case_ :float , snake_case_ :float ): if torus_radius < 0 or tube_radius < 0: raise ValueError('''surface_area_torus() only accepts non-negative values''' ) if torus_radius < tube_radius: raise ValueError( '''surface_area_torus() does not support spindle or self intersecting tori''' ) return 4 * pow(snake_case_ , 2 ) * torus_radius * tube_radius def lowercase__ ( snake_case_ :float , snake_case_ :float ): if length < 0 or width < 0: raise ValueError('''area_rectangle() only accepts non-negative values''' ) return length * width def lowercase__ ( snake_case_ :float ): if side_length < 0: raise ValueError('''area_square() only accepts non-negative values''' ) return side_length**2 def lowercase__ ( snake_case_ :float , snake_case_ :float ): if base < 0 or height < 0: raise ValueError('''area_triangle() only accepts non-negative values''' ) return (base * height) / 2 def lowercase__ ( snake_case_ :float , snake_case_ :float , snake_case_ :float ): if sidea < 0 or sidea < 0 or sidea < 0: raise ValueError('''area_triangle_three_sides() only accepts non-negative values''' ) elif sidea + sidea < sidea or sidea + sidea < sidea or sidea + sidea < sidea: raise ValueError('''Given three sides do not form a triangle''' ) __UpperCAmelCase = (sidea + sidea + sidea) / 2 __UpperCAmelCase = sqrt( semi_perimeter * (semi_perimeter - sidea) * (semi_perimeter - sidea) * (semi_perimeter - sidea) ) return area def lowercase__ ( snake_case_ :float , snake_case_ :float ): if base < 0 or height < 0: raise ValueError('''area_parallelogram() only accepts non-negative values''' ) return base * height def lowercase__ ( snake_case_ :float , snake_case_ :float , snake_case_ :float ): if basea < 0 or basea < 0 or height < 0: raise ValueError('''area_trapezium() only accepts non-negative values''' ) return 1 / 2 * (basea + basea) * height def lowercase__ ( snake_case_ :float ): if radius < 0: raise ValueError('''area_circle() only accepts non-negative values''' ) return pi * radius**2 def lowercase__ ( snake_case_ :float , snake_case_ :float ): if radius_x < 0 or radius_y < 0: raise ValueError('''area_ellipse() only accepts non-negative values''' ) return pi * radius_x * radius_y def lowercase__ ( snake_case_ :float , snake_case_ :float ): if diagonal_a < 0 or diagonal_a < 0: raise ValueError('''area_rhombus() only accepts non-negative values''' ) return 1 / 2 * diagonal_a * diagonal_a def lowercase__ ( snake_case_ :int , snake_case_ :float ): if not isinstance(snake_case_ , snake_case_ ) or sides < 3: raise ValueError( '''area_reg_polygon() only accepts integers greater than or \ equal to three as number of sides''' ) elif length < 0: raise ValueError( '''area_reg_polygon() only accepts non-negative values as \ length of a side''' ) return (sides * length**2) / (4 * tan(pi / sides )) return (sides * length**2) / (4 * tan(pi / sides )) if __name__ == "__main__": import doctest doctest.testmod(verbose=True) # verbose so we can see methods missing tests print('[DEMO] Areas of various geometric shapes: \n') print(f"""Rectangle: {area_rectangle(10, 20) = }""") print(f"""Square: {area_square(10) = }""") print(f"""Triangle: {area_triangle(10, 10) = }""") print(f"""Triangle: {area_triangle_three_sides(5, 12, 13) = }""") print(f"""Parallelogram: {area_parallelogram(10, 20) = }""") print(f"""Rhombus: {area_rhombus(10, 20) = }""") print(f"""Trapezium: {area_trapezium(10, 20, 30) = }""") print(f"""Circle: {area_circle(20) = }""") print(f"""Ellipse: {area_ellipse(10, 20) = }""") print('\nSurface Areas of various geometric shapes: \n') print(f"""Cube: {surface_area_cube(20) = }""") print(f"""Cuboid: {surface_area_cuboid(10, 20, 30) = }""") print(f"""Sphere: {surface_area_sphere(20) = }""") print(f"""Hemisphere: {surface_area_hemisphere(20) = }""") print(f"""Cone: {surface_area_cone(10, 20) = }""") print(f"""Conical Frustum: {surface_area_conical_frustum(10, 20, 30) = }""") print(f"""Cylinder: {surface_area_cylinder(10, 20) = }""") print(f"""Torus: {surface_area_torus(20, 10) = }""") print(f"""Equilateral Triangle: {area_reg_polygon(3, 10) = }""") print(f"""Square: {area_reg_polygon(4, 10) = }""") print(f"""Reqular Pentagon: {area_reg_polygon(5, 10) = }""")
49
import warnings from ...processing_utils import ProcessorMixin from ...tokenization_utils_base import BatchEncoding class lowerCamelCase_ ( lowerCamelCase ): a__ = ['''image_processor''', '''tokenizer'''] a__ = '''ChineseCLIPImageProcessor''' a__ = ('''BertTokenizer''', '''BertTokenizerFast''') def __init__( self , __lowerCAmelCase=None , __lowerCAmelCase=None , **__lowerCAmelCase ): """simple docstring""" __magic_name__ :Tuple = None if "feature_extractor" in kwargs: warnings.warn( '''The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`''' ''' instead.''' , __lowerCAmelCase , ) __magic_name__ :Optional[Any] = kwargs.pop('''feature_extractor''' ) __magic_name__ :Tuple = image_processor if image_processor is not None else feature_extractor if image_processor is None: raise ValueError('''You need to specify an `image_processor`.''' ) if tokenizer is None: raise ValueError('''You need to specify a `tokenizer`.''' ) super().__init__(__lowerCAmelCase , __lowerCAmelCase ) __magic_name__ :List[Any] = self.image_processor def __call__( self , __lowerCAmelCase=None , __lowerCAmelCase=None , __lowerCAmelCase=None , **__lowerCAmelCase ): """simple docstring""" if text is None and images is None: raise ValueError('''You have to specify either text or images. Both cannot be none.''' ) if text is not None: __magic_name__ :int = self.tokenizer(__lowerCAmelCase , return_tensors=__lowerCAmelCase , **__lowerCAmelCase ) if images is not None: __magic_name__ :Dict = self.image_processor(__lowerCAmelCase , return_tensors=__lowerCAmelCase , **__lowerCAmelCase ) if text is not None and images is not None: __magic_name__ :Union[str, Any] = image_features.pixel_values return encoding elif text is not None: return encoding else: return BatchEncoding(data=dict(**__lowerCAmelCase ) , tensor_type=__lowerCAmelCase ) def A ( self , *__lowerCAmelCase , **__lowerCAmelCase ): """simple docstring""" return self.tokenizer.batch_decode(*__lowerCAmelCase , **__lowerCAmelCase ) def A ( self , *__lowerCAmelCase , **__lowerCAmelCase ): """simple docstring""" return self.tokenizer.decode(*__lowerCAmelCase , **__lowerCAmelCase ) @property def A ( self ): """simple docstring""" __magic_name__ :List[Any] = self.tokenizer.model_input_names __magic_name__ :Any = self.image_processor.model_input_names return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) ) @property def A ( self ): """simple docstring""" warnings.warn( '''`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.''' , __lowerCAmelCase , ) return self.image_processor_class
0
0
'''simple docstring''' from __future__ import annotations from scipy.special import comb # type: ignore class UpperCamelCase__ : '''simple docstring''' def __init__( self ,_lowerCAmelCase ): lowerCamelCase__ = list_of_points # Degree determines the flexibility of the curve. # Degree = 1 will produce a straight line. lowerCamelCase__ = len(_lowerCAmelCase ) - 1 def UpperCamelCase_ ( self ,_lowerCAmelCase ): assert 0 <= t <= 1, "Time t must be between 0 and 1." lowerCamelCase__ = [] for i in range(len(self.list_of_points ) ): # basis function for each i output_values.append( comb(self.degree ,_lowerCAmelCase ) * ((1 - t) ** (self.degree - i)) * (t**i) ) # the basis must sum up to 1 for it to produce a valid Bezier curve. assert round(sum(_lowerCAmelCase ) ,5 ) == 1 return output_values def UpperCamelCase_ ( self ,_lowerCAmelCase ): assert 0 <= t <= 1, "Time t must be between 0 and 1." lowerCamelCase__ = self.basis_function(_lowerCAmelCase ) lowerCamelCase__ = 0.0 lowerCamelCase__ = 0.0 for i in range(len(self.list_of_points ) ): # For all points, sum up the product of i-th basis function and i-th point. x += basis_function[i] * self.list_of_points[i][0] y += basis_function[i] * self.list_of_points[i][1] return (x, y) def UpperCamelCase_ ( self ,_lowerCAmelCase = 0.01 ): from matplotlib import pyplot as plt # type: ignore lowerCamelCase__ = [] # x coordinates of points to plot lowerCamelCase__ = [] # y coordinates of points to plot lowerCamelCase__ = 0.0 while t <= 1: lowerCamelCase__ = self.bezier_curve_function(_lowerCAmelCase ) to_plot_x.append(value[0] ) to_plot_y.append(value[1] ) t += step_size lowerCamelCase__ = [i[0] for i in self.list_of_points] lowerCamelCase__ = [i[1] for i in self.list_of_points] plt.plot( _lowerCAmelCase ,_lowerCAmelCase ,color="""blue""" ,label="""Curve of Degree """ + str(self.degree ) ,) plt.scatter(_lowerCAmelCase ,_lowerCAmelCase ,color="""red""" ,label="""Control Points""" ) plt.legend() plt.show() if __name__ == "__main__": import doctest doctest.testmod() BezierCurve([(1, 2), (3, 5)]).plot_curve() # degree 1 BezierCurve([(0, 0), (5, 5), (5, 0)]).plot_curve() # degree 2 BezierCurve([(0, 0), (5, 5), (5, 0), (2.5, -2.5)]).plot_curve() # degree 3
50
from sklearn.metrics import matthews_corrcoef import datasets SCREAMING_SNAKE_CASE__ : Optional[Any] = """ Compute the Matthews correlation coefficient (MCC) The Matthews correlation coefficient is used in machine learning as a measure of the quality of binary and multiclass classifications. It takes into account true and false positives and negatives and is generally regarded as a balanced measure which can be used even if the classes are of very different sizes. The MCC is in essence a correlation coefficient value between -1 and +1. A coefficient of +1 represents a perfect prediction, 0 an average random prediction and -1 an inverse prediction. The statistic is also known as the phi coefficient. [source: Wikipedia] """ SCREAMING_SNAKE_CASE__ : Union[str, Any] = """ Args: predictions (list of int): Predicted labels, as returned by a model. references (list of int): Ground truth labels. sample_weight (list of int, float, or bool): Sample weights. Defaults to `None`. Returns: matthews_correlation (dict containing float): Matthews correlation. Examples: Example 1, a basic example with only predictions and references as inputs: >>> matthews_metric = datasets.load_metric(\"matthews_correlation\") >>> results = matthews_metric.compute(references=[1, 3, 2, 0, 3, 2], ... predictions=[1, 2, 2, 0, 3, 3]) >>> print(round(results['matthews_correlation'], 2)) 0.54 Example 2, the same example as above, but also including sample weights: >>> matthews_metric = datasets.load_metric(\"matthews_correlation\") >>> results = matthews_metric.compute(references=[1, 3, 2, 0, 3, 2], ... predictions=[1, 2, 2, 0, 3, 3], ... sample_weight=[0.5, 3, 1, 1, 1, 2]) >>> print(round(results['matthews_correlation'], 2)) 0.1 Example 3, the same example as above, but with sample weights that cause a negative correlation: >>> matthews_metric = datasets.load_metric(\"matthews_correlation\") >>> results = matthews_metric.compute(references=[1, 3, 2, 0, 3, 2], ... predictions=[1, 2, 2, 0, 3, 3], ... sample_weight=[0.5, 1, 0, 0, 0, 1]) >>> print(round(results['matthews_correlation'], 2)) -0.25 """ SCREAMING_SNAKE_CASE__ : int = """\ @article{scikit-learn, title={Scikit-learn: Machine Learning in {P}ython}, author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V. and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P. and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.}, journal={Journal of Machine Learning Research}, volume={12}, pages={2825--2830}, year={2011} } """ @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class lowerCamelCase_ ( datasets.Metric ): def A ( self ): """simple docstring""" return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { '''predictions''': datasets.Value('''int32''' ), '''references''': datasets.Value('''int32''' ), } ) , reference_urls=[ '''https://scikit-learn.org/stable/modules/generated/sklearn.metrics.matthews_corrcoef.html''' ] , ) def A ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase=None ): """simple docstring""" return { "matthews_correlation": float(matthews_corrcoef(__lowerCAmelCase , __lowerCAmelCase , sample_weight=__lowerCAmelCase ) ), }
0
0
'''simple docstring''' from collections import OrderedDict from typing import Mapping from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging a__ : Dict = logging.get_logger(__name__) a__ : Tuple = { 'hustvl/yolos-small': 'https://huggingface.co/hustvl/yolos-small/resolve/main/config.json', # See all YOLOS models at https://huggingface.co/models?filter=yolos } class lowerCAmelCase__ ( UpperCAmelCase_ ): '''simple docstring''' _lowerCamelCase ="yolos" def __init__( self : Any , a__ : List[Any]=768 , a__ : str=12 , a__ : Tuple=12 , a__ : Union[str, Any]=3072 , a__ : int="gelu" , a__ : Tuple=0.0 , a__ : Dict=0.0 , a__ : Dict=0.02 , a__ : Tuple=1e-1_2 , a__ : str=[512, 864] , a__ : List[Any]=16 , a__ : Dict=3 , a__ : str=True , a__ : Union[str, Any]=100 , a__ : Tuple=True , a__ : List[str]=False , a__ : Optional[Any]=1 , a__ : Dict=5 , a__ : int=2 , a__ : str=5 , a__ : Tuple=2 , a__ : Tuple=0.1 , **a__ : Optional[int] , ): super().__init__(**a__ ) UpperCAmelCase = hidden_size UpperCAmelCase = num_hidden_layers UpperCAmelCase = num_attention_heads UpperCAmelCase = intermediate_size UpperCAmelCase = hidden_act UpperCAmelCase = hidden_dropout_prob UpperCAmelCase = attention_probs_dropout_prob UpperCAmelCase = initializer_range UpperCAmelCase = layer_norm_eps UpperCAmelCase = image_size UpperCAmelCase = patch_size UpperCAmelCase = num_channels UpperCAmelCase = qkv_bias UpperCAmelCase = num_detection_tokens UpperCAmelCase = use_mid_position_embeddings UpperCAmelCase = auxiliary_loss # Hungarian matcher UpperCAmelCase = class_cost UpperCAmelCase = bbox_cost UpperCAmelCase = giou_cost # Loss coefficients UpperCAmelCase = bbox_loss_coefficient UpperCAmelCase = giou_loss_coefficient UpperCAmelCase = eos_coefficient class lowerCAmelCase__ ( UpperCAmelCase_ ): '''simple docstring''' _lowerCamelCase =version.parse("1.11" ) @property def __snake_case ( self : str ): return OrderedDict( [ ('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}), ] ) @property def __snake_case ( self : List[str] ): return 1e-4 @property def __snake_case ( self : Union[str, Any] ): return 12
51
from __future__ import annotations def __lowercase ( snake_case, snake_case ): """simple docstring""" print(f'''Vertex\tShortest Distance from vertex {src}''' ) for i, d in enumerate(snake_case ): print(f'''{i}\t\t{d}''' ) def __lowercase ( snake_case, snake_case, snake_case ): """simple docstring""" for j in range(snake_case ): __magic_name__ , __magic_name__ , __magic_name__ :Tuple = (graph[j][k] for k in ['''src''', '''dst''', '''weight''']) if distance[u] != float('''inf''' ) and distance[u] + w < distance[v]: return True return False def __lowercase ( snake_case, snake_case, snake_case, snake_case ): """simple docstring""" __magic_name__ :List[Any] = [float('''inf''' )] * vertex_count __magic_name__ :Tuple = 0.0 for _ in range(vertex_count - 1 ): for j in range(snake_case ): __magic_name__ , __magic_name__ , __magic_name__ :Dict = (graph[j][k] for k in ['''src''', '''dst''', '''weight''']) if distance[u] != float('''inf''' ) and distance[u] + w < distance[v]: __magic_name__ :Tuple = distance[u] + w __magic_name__ :Tuple = check_negative_cycle(snake_case, snake_case, snake_case ) if negative_cycle_exists: raise Exception('''Negative cycle found''' ) return distance if __name__ == "__main__": import doctest doctest.testmod() SCREAMING_SNAKE_CASE__ : Tuple = int(input("""Enter number of vertices: """).strip()) SCREAMING_SNAKE_CASE__ : Any = int(input("""Enter number of edges: """).strip()) SCREAMING_SNAKE_CASE__ : list[dict[str, int]] = [{} for _ in range(E)] for i in range(E): print("""Edge """, i + 1) SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Dict = ( int(x) for x in input("""Enter source, destination, weight: """).strip().split(""" """) ) SCREAMING_SNAKE_CASE__ : Dict = {"""src""": src, """dst""": dest, """weight""": weight} SCREAMING_SNAKE_CASE__ : List[Any] = int(input("""\nEnter shortest path source:""").strip()) SCREAMING_SNAKE_CASE__ : List[str] = bellman_ford(graph, V, E, source) print_distance(shortest_distance, 0)
0
0
"""simple docstring""" from ...configuration_utils import PretrainedConfig from ...utils import logging A = logging.get_logger(__name__) A = { '''microsoft/trocr-base-handwritten''': ( '''https://huggingface.co/microsoft/trocr-base-handwritten/resolve/main/config.json''' ), # See all TrOCR models at https://huggingface.co/models?filter=trocr } class __lowercase ( _UpperCamelCase ): '''simple docstring''' __lowerCAmelCase = '''trocr''' __lowerCAmelCase = ['''past_key_values'''] __lowerCAmelCase = { '''num_attention_heads''': '''decoder_attention_heads''', '''hidden_size''': '''d_model''', '''num_hidden_layers''': '''decoder_layers''', } def __init__( self , _UpperCAmelCase=50265 , _UpperCAmelCase=1024 , _UpperCAmelCase=12 , _UpperCAmelCase=16 , _UpperCAmelCase=4096 , _UpperCAmelCase="gelu" , _UpperCAmelCase=512 , _UpperCAmelCase=0.1 , _UpperCAmelCase=0.0 , _UpperCAmelCase=0.0 , _UpperCAmelCase=2 , _UpperCAmelCase=0.0_2 , _UpperCAmelCase=0.0 , _UpperCAmelCase=True , _UpperCAmelCase=False , _UpperCAmelCase=True , _UpperCAmelCase=True , _UpperCAmelCase=1 , _UpperCAmelCase=0 , _UpperCAmelCase=2 , **_UpperCAmelCase , ): __a : List[str] = vocab_size __a : Optional[Any] = d_model __a : Optional[Any] = decoder_layers __a : Union[str, Any] = decoder_attention_heads __a : int = decoder_ffn_dim __a : List[Any] = activation_function __a : Any = max_position_embeddings __a : Dict = dropout __a : List[Any] = attention_dropout __a : Optional[Any] = activation_dropout __a : str = init_std __a : List[str] = decoder_layerdrop __a : Union[str, Any] = use_cache __a : Optional[Any] = scale_embedding __a : List[Any] = use_learned_position_embeddings __a : Optional[int] = layernorm_embedding super().__init__( pad_token_id=_UpperCAmelCase , bos_token_id=_UpperCAmelCase , eos_token_id=_UpperCAmelCase , decoder_start_token_id=_UpperCAmelCase , **_UpperCAmelCase , )
52
from __future__ import annotations import unittest from transformers import RoFormerConfig, is_tf_available from transformers.testing_utils import require_tf, slow from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import ( TFRoFormerForCausalLM, TFRoFormerForMaskedLM, TFRoFormerForMultipleChoice, TFRoFormerForQuestionAnswering, TFRoFormerForSequenceClassification, TFRoFormerForTokenClassification, TFRoFormerModel, ) from transformers.models.roformer.modeling_tf_roformer import ( TFRoFormerSelfAttention, TFRoFormerSinusoidalPositionalEmbedding, ) class lowerCamelCase_ : def __init__( self , __lowerCAmelCase , __lowerCAmelCase=1_3 , __lowerCAmelCase=7 , __lowerCAmelCase=True , __lowerCAmelCase=True , __lowerCAmelCase=True , __lowerCAmelCase=True , __lowerCAmelCase=9_9 , __lowerCAmelCase=3_2 , __lowerCAmelCase=2 , __lowerCAmelCase=4 , __lowerCAmelCase=3_7 , __lowerCAmelCase="gelu" , __lowerCAmelCase=0.1 , __lowerCAmelCase=0.1 , __lowerCAmelCase=5_1_2 , __lowerCAmelCase=1_6 , __lowerCAmelCase=2 , __lowerCAmelCase=0.02 , __lowerCAmelCase=3 , __lowerCAmelCase=4 , __lowerCAmelCase=None , ): """simple docstring""" __magic_name__ :Optional[int] = parent __magic_name__ :List[Any] = 1_3 __magic_name__ :Union[str, Any] = 7 __magic_name__ :Optional[Any] = True __magic_name__ :Tuple = True __magic_name__ :List[str] = True __magic_name__ :List[Any] = True __magic_name__ :int = 9_9 __magic_name__ :Any = 3_2 __magic_name__ :Union[str, Any] = 2 __magic_name__ :List[str] = 4 __magic_name__ :List[Any] = 3_7 __magic_name__ :Tuple = '''gelu''' __magic_name__ :Any = 0.1 __magic_name__ :str = 0.1 __magic_name__ :List[str] = 5_1_2 __magic_name__ :int = 1_6 __magic_name__ :Any = 2 __magic_name__ :List[Any] = 0.02 __magic_name__ :Optional[Any] = 3 __magic_name__ :Tuple = 4 __magic_name__ :Optional[Any] = None def A ( self ): """simple docstring""" __magic_name__ :Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) __magic_name__ :str = None if self.use_input_mask: __magic_name__ :Optional[int] = random_attention_mask([self.batch_size, self.seq_length] ) __magic_name__ :str = None if self.use_token_type_ids: __magic_name__ :List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) __magic_name__ :Union[str, Any] = None __magic_name__ :Tuple = None __magic_name__ :str = None if self.use_labels: __magic_name__ :List[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size ) __magic_name__ :List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) __magic_name__ :List[Any] = ids_tensor([self.batch_size] , self.num_choices ) __magic_name__ :str = RoFormerConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , return_dict=__lowerCAmelCase , ) return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def A ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ): """simple docstring""" __magic_name__ :int = TFRoFormerModel(config=__lowerCAmelCase ) __magic_name__ :Optional[Any] = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids} __magic_name__ :List[str] = [input_ids, input_mask] __magic_name__ :Any = model(__lowerCAmelCase ) __magic_name__ :List[str] = model(__lowerCAmelCase ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def A ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ): """simple docstring""" __magic_name__ :Dict = True __magic_name__ :List[str] = TFRoFormerForCausalLM(config=__lowerCAmelCase ) __magic_name__ :str = { '''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids, } __magic_name__ :Optional[Any] = model(__lowerCAmelCase )['''logits'''] self.parent.assertListEqual( list(prediction_scores.numpy().shape ) , [self.batch_size, self.seq_length, self.vocab_size] ) def A ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ): """simple docstring""" __magic_name__ :Optional[Any] = TFRoFormerForMaskedLM(config=__lowerCAmelCase ) __magic_name__ :Any = { '''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids, } __magic_name__ :Dict = model(__lowerCAmelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def A ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ): """simple docstring""" __magic_name__ :int = self.num_labels __magic_name__ :str = TFRoFormerForSequenceClassification(config=__lowerCAmelCase ) __magic_name__ :Optional[int] = { '''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids, } __magic_name__ :str = model(__lowerCAmelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def A ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ): """simple docstring""" __magic_name__ :Union[str, Any] = self.num_choices __magic_name__ :Tuple = TFRoFormerForMultipleChoice(config=__lowerCAmelCase ) __magic_name__ :int = tf.tile(tf.expand_dims(__lowerCAmelCase , 1 ) , (1, self.num_choices, 1) ) __magic_name__ :Optional[Any] = tf.tile(tf.expand_dims(__lowerCAmelCase , 1 ) , (1, self.num_choices, 1) ) __magic_name__ :Union[str, Any] = tf.tile(tf.expand_dims(__lowerCAmelCase , 1 ) , (1, self.num_choices, 1) ) __magic_name__ :str = { '''input_ids''': multiple_choice_inputs_ids, '''attention_mask''': multiple_choice_input_mask, '''token_type_ids''': multiple_choice_token_type_ids, } __magic_name__ :Tuple = model(__lowerCAmelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) ) def A ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ): """simple docstring""" __magic_name__ :Optional[int] = self.num_labels __magic_name__ :Any = TFRoFormerForTokenClassification(config=__lowerCAmelCase ) __magic_name__ :str = { '''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids, } __magic_name__ :Dict = model(__lowerCAmelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def A ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ): """simple docstring""" __magic_name__ :List[str] = TFRoFormerForQuestionAnswering(config=__lowerCAmelCase ) __magic_name__ :List[str] = { '''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids, } __magic_name__ :Union[str, Any] = model(__lowerCAmelCase ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def A ( self ): """simple docstring""" __magic_name__ :Union[str, Any] = self.prepare_config_and_inputs() ( ( __magic_name__ ) , ( __magic_name__ ) , ( __magic_name__ ) , ( __magic_name__ ) , ( __magic_name__ ) , ( __magic_name__ ) , ( __magic_name__ ) , ) :Union[str, Any] = config_and_inputs __magic_name__ :Optional[Any] = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask} return config, inputs_dict @require_tf class lowerCamelCase_ ( lowerCamelCase , lowerCamelCase , unittest.TestCase ): a__ = ( ( TFRoFormerModel, TFRoFormerForCausalLM, TFRoFormerForMaskedLM, TFRoFormerForQuestionAnswering, TFRoFormerForSequenceClassification, TFRoFormerForTokenClassification, TFRoFormerForMultipleChoice, ) if is_tf_available() else () ) a__ = ( { '''feature-extraction''': TFRoFormerModel, '''fill-mask''': TFRoFormerForMaskedLM, '''question-answering''': TFRoFormerForQuestionAnswering, '''text-classification''': TFRoFormerForSequenceClassification, '''text-generation''': TFRoFormerForCausalLM, '''token-classification''': TFRoFormerForTokenClassification, '''zero-shot''': TFRoFormerForSequenceClassification, } if is_tf_available() else {} ) a__ = False a__ = False def A ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ): """simple docstring""" if pipeline_test_casse_name == "TextGenerationPipelineTests": return True return False def A ( self ): """simple docstring""" __magic_name__ :List[str] = TFRoFormerModelTester(self ) __magic_name__ :List[str] = ConfigTester(self , config_class=__lowerCAmelCase , hidden_size=3_7 ) def A ( self ): """simple docstring""" self.config_tester.run_common_tests() def A ( self ): """simple docstring""" __magic_name__ :Optional[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*__lowerCAmelCase ) def A ( self ): """simple docstring""" __magic_name__ :Union[str, Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_lm(*__lowerCAmelCase ) def A ( self ): """simple docstring""" __magic_name__ :Tuple = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_lm_head(*__lowerCAmelCase ) def A ( self ): """simple docstring""" __magic_name__ :Optional[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_multiple_choice(*__lowerCAmelCase ) def A ( self ): """simple docstring""" __magic_name__ :Tuple = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_question_answering(*__lowerCAmelCase ) def A ( self ): """simple docstring""" __magic_name__ :Dict = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_sequence_classification(*__lowerCAmelCase ) def A ( self ): """simple docstring""" __magic_name__ :Optional[int] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_token_classification(*__lowerCAmelCase ) @slow def A ( self ): """simple docstring""" __magic_name__ :Optional[Any] = TFRoFormerModel.from_pretrained('''junnyu/roformer_chinese_base''' ) self.assertIsNotNone(__lowerCAmelCase ) @require_tf class lowerCamelCase_ ( unittest.TestCase ): @slow def A ( self ): """simple docstring""" __magic_name__ :int = TFRoFormerForMaskedLM.from_pretrained('''junnyu/roformer_chinese_base''' ) __magic_name__ :Dict = tf.constant([[0, 1, 2, 3, 4, 5]] ) __magic_name__ :Optional[Any] = model(__lowerCAmelCase )[0] # TODO Replace vocab size __magic_name__ :int = 5_0_0_0_0 __magic_name__ :Tuple = [1, 6, vocab_size] self.assertEqual(output.shape , __lowerCAmelCase ) print(output[:, :3, :3] ) # TODO Replace values below with what was printed above. __magic_name__ :Any = tf.constant( [ [ [-0.12053341, -1.0264901, 0.29221946], [-1.5133783, 0.197433, 0.15190607], [-5.0135403, -3.900256, -0.84038764], ] ] ) tf.debugging.assert_near(output[:, :3, :3] , __lowerCAmelCase , atol=1E-4 ) @require_tf class lowerCamelCase_ ( unittest.TestCase ): a__ = 1e-4 def A ( self ): """simple docstring""" __magic_name__ :Optional[int] = tf.constant([[4, 1_0]] ) __magic_name__ :Optional[int] = TFRoFormerSinusoidalPositionalEmbedding(num_positions=6 , embedding_dim=6 ) __magic_name__ :Optional[Any] = emba(input_ids.shape ) __magic_name__ :List[str] = tf.constant( [[0.0000, 0.0000, 0.0000, 1.0000, 1.0000, 1.0000], [0.8415, 0.0464, 0.0022, 0.5403, 0.9989, 1.0000]] ) tf.debugging.assert_near(__lowerCAmelCase , __lowerCAmelCase , atol=self.tolerance ) def A ( self ): """simple docstring""" __magic_name__ :Tuple = tf.constant( [ [0.0000, 0.0000, 0.0000, 0.0000, 0.0000], [0.8415, 0.8219, 0.8020, 0.7819, 0.7617], [0.9093, 0.9364, 0.9581, 0.9749, 0.9870], ] ) __magic_name__ :Union[str, Any] = TFRoFormerSinusoidalPositionalEmbedding(num_positions=5_1_2 , embedding_dim=5_1_2 ) emba([2, 1_6, 5_1_2] ) __magic_name__ :Optional[int] = emba.weight[:3, :5] tf.debugging.assert_near(__lowerCAmelCase , __lowerCAmelCase , atol=self.tolerance ) @require_tf class lowerCamelCase_ ( unittest.TestCase ): a__ = 1e-4 def A ( self ): """simple docstring""" # 2,12,16,64 __magic_name__ :int = tf.reshape(tf.range(2 * 1_2 * 1_6 * 6_4 , dtype=tf.floataa ) , shape=(2, 1_2, 1_6, 6_4) ) / 1_0_0 __magic_name__ :str = -tf.reshape(tf.range(2 * 1_2 * 1_6 * 6_4 , dtype=tf.floataa ) , shape=(2, 1_2, 1_6, 6_4) ) / 1_0_0 __magic_name__ :int = TFRoFormerSinusoidalPositionalEmbedding(num_positions=3_2 , embedding_dim=6_4 ) __magic_name__ :List[str] = embed_positions([2, 1_6, 7_6_8] )[None, None, :, :] __magic_name__ , __magic_name__ :Union[str, Any] = TFRoFormerSelfAttention.apply_rotary_position_embeddings( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) __magic_name__ :Tuple = tf.constant( [ [0.0000, 0.0100, 0.0200, 0.0300, 0.0400, 0.0500, 0.0600, 0.0700], [-0.2012, 0.8897, 0.0263, 0.9401, 0.2074, 0.9463, 0.3481, 0.9343], [-1.7057, 0.6271, -1.2145, 1.3897, -0.6303, 1.7647, -0.1173, 1.8985], [-2.1731, -1.6397, -2.7358, 0.2854, -2.1840, 1.7183, -1.3018, 2.4871], [0.2717, -3.6173, -2.9206, -2.1988, -3.6638, 0.3858, -2.9155, 2.2980], [3.9859, -2.1580, -0.7984, -4.4904, -4.1181, -2.0252, -4.4782, 1.1253], ] ) __magic_name__ :List[str] = tf.constant( [ [0.0000, -0.0100, -0.0200, -0.0300, -0.0400, -0.0500, -0.0600, -0.0700], [0.2012, -0.8897, -0.0263, -0.9401, -0.2074, -0.9463, -0.3481, -0.9343], [1.7057, -0.6271, 1.2145, -1.3897, 0.6303, -1.7647, 0.1173, -1.8985], [2.1731, 1.6397, 2.7358, -0.2854, 2.1840, -1.7183, 1.3018, -2.4871], [-0.2717, 3.6173, 2.9206, 2.1988, 3.6638, -0.3858, 2.9155, -2.2980], [-3.9859, 2.1580, 0.7984, 4.4904, 4.1181, 2.0252, 4.4782, -1.1253], ] ) tf.debugging.assert_near(query_layer[0, 0, :6, :8] , __lowerCAmelCase , atol=self.tolerance ) tf.debugging.assert_near(key_layer[0, 0, :6, :8] , __lowerCAmelCase , atol=self.tolerance )
0
0
from dataclasses import dataclass, field from typing import TYPE_CHECKING, Any, ClassVar, Dict, List, Optional, Union import pyarrow as pa if TYPE_CHECKING: from .features import FeatureType @dataclass class _UpperCAmelCase : """simple docstring""" a_ = 42 a_ = None # Automatically constructed a_ = "dict" a_ = None a_ = field(default="""Translation""" , init=_UpperCamelCase , repr=_UpperCamelCase ) def __call__( self : int ) -> Dict: return pa.struct({lang: pa.string() for lang in sorted(self.languages )} ) def lowercase ( self : List[str] ) -> Union["FeatureType", Dict[str, "FeatureType"]]: from .features import Value return {k: Value('string' ) for k in sorted(self.languages )} @dataclass class _UpperCAmelCase : """simple docstring""" a_ = None a_ = None a_ = None # Automatically constructed a_ = "dict" a_ = None a_ = field(default="""TranslationVariableLanguages""" , init=_UpperCamelCase , repr=_UpperCamelCase ) def lowercase ( self : str ) -> Union[str, Any]: __lowerCAmelCase = sorted(set(self.languages ) ) if self.languages else None __lowerCAmelCase = len(self.languages ) if self.languages else None def __call__( self : Union[str, Any] ) -> str: return pa.struct({'language': pa.list_(pa.string() ), 'translation': pa.list_(pa.string() )} ) def lowercase ( self : Optional[int] , lowerCAmelCase_ : List[Any] ) -> List[str]: __lowerCAmelCase = set(self.languages ) if self.languages and set(lowerCAmelCase_ ) - lang_set: raise ValueError( f"""Some languages in example ({", ".join(sorted(set(lowerCAmelCase_ ) - lang_set ) )}) are not in valid set ({", ".join(lowerCAmelCase_ )}).""" ) # Convert dictionary into tuples, splitting out cases where there are # multiple translations for a single language. __lowerCAmelCase = [] for lang, text in translation_dict.items(): if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ): translation_tuples.append((lang, text) ) else: translation_tuples.extend([(lang, el) for el in text] ) # Ensure translations are in ascending order by language code. __lowerCAmelCase , __lowerCAmelCase = zip(*sorted(lowerCAmelCase_ ) ) return {"language": languages, "translation": translations} def lowercase ( self : int ) -> Union["FeatureType", Dict[str, "FeatureType"]]: from .features import Sequence, Value return { "language": Sequence(Value('string' ) ), "translation": Sequence(Value('string' ) ), }
53
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available SCREAMING_SNAKE_CASE__ : Optional[int] = {"""tokenization_herbert""": ["""HerbertTokenizer"""]} try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: SCREAMING_SNAKE_CASE__ : Optional[Any] = ["""HerbertTokenizerFast"""] if TYPE_CHECKING: from .tokenization_herbert import HerbertTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_herbert_fast import HerbertTokenizerFast else: import sys SCREAMING_SNAKE_CASE__ : Union[str, Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
0
0
import random from .binary_exp_mod import bin_exp_mod def a__ ( lowercase__ , lowercase__=1_0_0_0 ): '''simple docstring''' if n < 2: return False if n % 2 == 0: return n == 2 # this means n is odd UpperCAmelCase_ =n - 1 UpperCAmelCase_ =0 while d % 2 == 0: d /= 2 exp += 1 # n - 1=d*(2**exp) UpperCAmelCase_ =0 while count < prec: UpperCAmelCase_ =random.randint(2 , n - 1 ) UpperCAmelCase_ =bin_exp_mod(lowercase__ , lowercase__ , lowercase__ ) if b != 1: UpperCAmelCase_ =True for _ in range(lowercase__ ): if b == n - 1: UpperCAmelCase_ =False break UpperCAmelCase_ =b * b b %= n if flag: return False count += 1 return True if __name__ == "__main__": __lowercase : str =abs(int(input("""Enter bound : """).strip())) print("""Here's the list of primes:""") print(""", """.join(str(i) for i in range(n + 1) if is_prime_big(i)))
54
import argparse import gdown import numpy as np import torch from huggingface_hub import hf_hub_download from transformers import ( CLIPTokenizer, CLIPTokenizerFast, VideoMAEImageProcessor, XCLIPConfig, XCLIPModel, XCLIPProcessor, XCLIPTextConfig, XCLIPVisionConfig, ) def __lowercase ( snake_case, snake_case ): """simple docstring""" __magic_name__ :str = XCLIPTextConfig() # derive patch size from model name __magic_name__ :Union[str, Any] = model_name.find('''patch''' ) __magic_name__ :Optional[Any] = int(model_name[start_idx + len('''patch''' ) : start_idx + len('''patch''' ) + 2] ) __magic_name__ :int = XCLIPVisionConfig(patch_size=snake_case, num_frames=snake_case ) if "large" in model_name: __magic_name__ :Dict = 7_6_8 __magic_name__ :int = 3_0_7_2 __magic_name__ :List[Any] = 1_2 __magic_name__ :str = 1_0_2_4 __magic_name__ :Any = 4_0_9_6 __magic_name__ :Optional[Any] = 1_6 __magic_name__ :Union[str, Any] = 2_4 __magic_name__ :Union[str, Any] = 7_6_8 __magic_name__ :Tuple = 3_0_7_2 if model_name == "xclip-large-patch14-16-frames": __magic_name__ :List[str] = 3_3_6 __magic_name__ :Any = XCLIPConfig.from_text_vision_configs(snake_case, snake_case ) if "large" in model_name: __magic_name__ :str = 7_6_8 return config def __lowercase ( snake_case ): """simple docstring""" if name == "token_embedding.weight": __magic_name__ :Any = name.replace('''token_embedding.weight''', '''text_model.embeddings.token_embedding.weight''' ) if name == "positional_embedding": __magic_name__ :Any = name.replace('''positional_embedding''', '''text_model.embeddings.position_embedding.weight''' ) if "ln_1" in name: __magic_name__ :List[str] = name.replace('''ln_1''', '''layer_norm1''' ) if "ln_2" in name: __magic_name__ :str = name.replace('''ln_2''', '''layer_norm2''' ) if "c_fc" in name: __magic_name__ :List[Any] = name.replace('''c_fc''', '''fc1''' ) if "c_proj" in name: __magic_name__ :Any = name.replace('''c_proj''', '''fc2''' ) if name.startswith('''transformer.resblocks''' ): __magic_name__ :Any = name.replace('''transformer.resblocks''', '''text_model.encoder.layers''' ) if "attn.out_proj" in name and "message" not in name: __magic_name__ :Union[str, Any] = name.replace('''attn.out_proj''', '''self_attn.out_proj''' ) if "ln_final" in name: __magic_name__ :Tuple = name.replace('''ln_final''', '''text_model.final_layer_norm''' ) # visual encoder if name == "visual.class_embedding": __magic_name__ :List[Any] = name.replace('''visual.class_embedding''', '''vision_model.embeddings.class_embedding''' ) if name == "visual.positional_embedding": __magic_name__ :Any = name.replace('''visual.positional_embedding''', '''vision_model.embeddings.position_embedding.weight''' ) if name.startswith('''visual.transformer.resblocks''' ): __magic_name__ :Union[str, Any] = name.replace('''visual.transformer.resblocks''', '''vision_model.encoder.layers''' ) if "visual.conv1" in name: __magic_name__ :Tuple = name.replace('''visual.conv1''', '''vision_model.embeddings.patch_embedding''' ) if "visual.ln_pre" in name: __magic_name__ :Tuple = name.replace('''visual.ln_pre''', '''vision_model.pre_layernorm''' ) if "visual.ln_post" in name: __magic_name__ :Optional[Any] = name.replace('''visual.ln_post''', '''vision_model.post_layernorm''' ) if "visual.proj" in name: __magic_name__ :Tuple = name.replace('''visual.proj''', '''visual_projection.weight''' ) if "text_projection" in name: __magic_name__ :int = name.replace('''text_projection''', '''text_projection.weight''' ) # things on top if "prompts_visual_proj" in name: __magic_name__ :int = name.replace('''prompts_visual_proj''', '''prompts_visual_projection''' ) if "prompts_visual_ln" in name: __magic_name__ :Dict = name.replace('''prompts_visual_ln''', '''prompts_visual_layernorm''' ) # mit if name == "mit.positional_embedding": __magic_name__ :List[Any] = name.replace('''positional''', '''position''' ) if name.startswith('''mit.resblocks''' ): __magic_name__ :Union[str, Any] = name.replace('''mit.resblocks''', '''mit.encoder.layers''' ) # prompts generator if name.startswith('''prompts_generator.norm''' ): __magic_name__ :str = name.replace('''prompts_generator.norm''', '''prompts_generator.layernorm''' ) return name def __lowercase ( snake_case, snake_case ): """simple docstring""" for key in orig_state_dict.copy().keys(): __magic_name__ :Any = orig_state_dict.pop(snake_case ) if "attn.in_proj" in key: __magic_name__ :str = key.split('''.''' ) if key.startswith('''visual''' ): __magic_name__ :List[Any] = key_split[3] __magic_name__ :List[Any] = config.vision_config.hidden_size if "message_attn" in key: if "weight" in key: __magic_name__ :List[Any] = val[ :dim, : ] __magic_name__ :List[str] = val[ dim : dim * 2, : ] __magic_name__ :List[str] = val[ -dim:, : ] else: __magic_name__ :str = val[ :dim ] __magic_name__ :Optional[int] = val[ dim : dim * 2 ] __magic_name__ :Any = val[ -dim: ] else: if "weight" in key: __magic_name__ :int = val[ :dim, : ] __magic_name__ :Union[str, Any] = val[ dim : dim * 2, : ] __magic_name__ :List[Any] = val[ -dim:, : ] else: __magic_name__ :Union[str, Any] = val[:dim] __magic_name__ :str = val[ dim : dim * 2 ] __magic_name__ :Dict = val[-dim:] elif key.startswith('''mit''' ): __magic_name__ :List[Any] = key_split[2] __magic_name__ :Any = config.vision_config.mit_hidden_size if "weight" in key: __magic_name__ :Union[str, Any] = val[:dim, :] __magic_name__ :Optional[int] = val[dim : dim * 2, :] __magic_name__ :int = val[-dim:, :] else: __magic_name__ :Tuple = val[:dim] __magic_name__ :Optional[int] = val[dim : dim * 2] __magic_name__ :Optional[int] = val[-dim:] else: __magic_name__ :Any = key_split[2] __magic_name__ :List[Any] = config.text_config.hidden_size if "weight" in key: __magic_name__ :Union[str, Any] = val[:dim, :] __magic_name__ :Tuple = val[ dim : dim * 2, : ] __magic_name__ :str = val[-dim:, :] else: __magic_name__ :int = val[:dim] __magic_name__ :Any = val[ dim : dim * 2 ] __magic_name__ :str = val[-dim:] else: __magic_name__ :Tuple = rename_key(snake_case ) if new_key_name in ["visual_projection.weight", "text_projection.weight"]: __magic_name__ :List[Any] = val.T __magic_name__ :Optional[Any] = val return orig_state_dict def __lowercase ( snake_case ): """simple docstring""" if num_frames == 8: __magic_name__ :Any = '''eating_spaghetti_8_frames.npy''' elif num_frames == 1_6: __magic_name__ :List[Any] = '''eating_spaghetti.npy''' elif num_frames == 3_2: __magic_name__ :Tuple = '''eating_spaghetti_32_frames.npy''' __magic_name__ :str = hf_hub_download( repo_id='''hf-internal-testing/spaghetti-video''', filename=snake_case, repo_type='''dataset''', ) __magic_name__ :List[Any] = np.load(snake_case ) return list(snake_case ) def __lowercase ( snake_case, snake_case=None, snake_case=False ): """simple docstring""" __magic_name__ :Union[str, Any] = { # fully supervised kinetics-400 checkpoints '''xclip-base-patch32''': '''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_32_8.pth''', '''xclip-base-patch32-16-frames''': ( '''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_32_16.pth''' ), '''xclip-base-patch16''': '''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_16_8.pth''', '''xclip-base-patch16-16-frames''': ( '''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_16_16.pth''' ), '''xclip-large-patch14''': '''https://drive.google.com/u/0/uc?id=1NUOImq0o5DlQTST17iIP3vG7DgmHQuCx&amp;export=download&amp;confirm=t&amp;uuid=b26caedc-88e2-473e-830a-9d158b653cdb''', '''xclip-large-patch14-16-frames''': '''https://drive.google.com/u/0/uc?id=1FOYgnJc097OJ4lGwtRCCydQyVPJEOH7d&amp;export=download&amp;confirm=t&amp;uuid=538fa810-e671-4050-b385-9a623f89804f''', # fully supervised kinetics-600 checkpoints '''xclip-base-patch16-kinetics-600''': ( '''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k600_16_8.pth''' ), '''xclip-base-patch16-kinetics-600-16-frames''': ( '''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k600_16_16.pth''' ), '''xclip-large-patch14-kinetics-600''': '''https://drive.google.com/u/0/uc?id=1FV8C1INuM91sLAN4ImjzePLIlpMSihwV&amp;export=download&amp;confirm=t&amp;uuid=141d4977-4a65-44ae-864f-4b0c19f838be''', # few shot '''xclip-base-patch16-hmdb-2-shot''': ( '''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_2.pth''' ), '''xclip-base-patch16-hmdb-4-shot''': ( '''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_4.pth''' ), '''xclip-base-patch16-hmdb-8-shot''': ( '''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_8.pth''' ), '''xclip-base-patch16-hmdb-16-shot''': ( '''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_16.pth''' ), '''xclip-base-patch16-ucf-2-shot''': ( '''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_2.pth''' ), '''xclip-base-patch16-ucf-4-shot''': ( '''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_4.pth''' ), '''xclip-base-patch16-ucf-8-shot''': ( '''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_8.pth''' ), '''xclip-base-patch16-ucf-16-shot''': ( '''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_16.pth''' ), # zero shot '''xclip-base-patch16-zero-shot''': '''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/zero.pth''', } __magic_name__ :Optional[int] = model_to_url[model_name] __magic_name__ :List[str] = 8 if "16-frames" in model_name: __magic_name__ :List[Any] = 1_6 elif "shot" in model_name: __magic_name__ :Dict = 3_2 __magic_name__ :str = get_xclip_config(snake_case, snake_case ) __magic_name__ :List[Any] = XCLIPModel(snake_case ) model.eval() if "drive" in checkpoint_url: __magic_name__ :Any = '''pytorch_model.bin''' gdown.cached_download(snake_case, snake_case, quiet=snake_case ) __magic_name__ :Optional[Any] = torch.load(snake_case, map_location='''cpu''' )['''model'''] else: __magic_name__ :Optional[int] = torch.hub.load_state_dict_from_url(snake_case )['''model'''] __magic_name__ :List[str] = convert_state_dict(snake_case, snake_case ) __magic_name__ :List[Any] = XCLIPModel(snake_case ) __magic_name__ , __magic_name__ :Optional[Any] = model.load_state_dict(snake_case, strict=snake_case ) assert missing_keys == ["text_model.embeddings.position_ids", "vision_model.embeddings.position_ids"] model.eval() __magic_name__ :str = 3_3_6 if model_name == '''xclip-large-patch14-16-frames''' else 2_2_4 __magic_name__ :Optional[int] = VideoMAEImageProcessor(size=snake_case ) __magic_name__ :Optional[int] = CLIPTokenizer.from_pretrained('''openai/clip-vit-base-patch32''' ) __magic_name__ :Tuple = CLIPTokenizerFast.from_pretrained('''openai/clip-vit-base-patch32''' ) __magic_name__ :Optional[int] = XCLIPProcessor(image_processor=snake_case, tokenizer=snake_case ) __magic_name__ :List[Any] = prepare_video(snake_case ) __magic_name__ :str = processor( text=['''playing sports''', '''eating spaghetti''', '''go shopping'''], videos=snake_case, return_tensors='''pt''', padding=snake_case ) print('''Shape of pixel values:''', inputs.pixel_values.shape ) with torch.no_grad(): __magic_name__ :Tuple = model(**snake_case ) # Verify outputs __magic_name__ :Any = outputs.logits_per_video __magic_name__ :str = logits_per_video.softmax(dim=1 ) print('''Probs:''', snake_case ) # kinetics-400 if model_name == "xclip-base-patch32": __magic_name__ :Dict = torch.tensor([[0.0019, 0.9951, 0.0030]] ) elif model_name == "xclip-base-patch32-16-frames": __magic_name__ :str = torch.tensor([[7.0_9_9_9E-0_4, 9.9_8_8_3E-0_1, 4.5_5_8_0E-0_4]] ) elif model_name == "xclip-base-patch16": __magic_name__ :Tuple = torch.tensor([[0.0083, 0.9681, 0.0236]] ) elif model_name == "xclip-base-patch16-16-frames": __magic_name__ :Tuple = torch.tensor([[7.6_9_3_7E-0_4, 9.9_7_2_8E-0_1, 1.9_4_7_3E-0_3]] ) elif model_name == "xclip-large-patch14": __magic_name__ :str = torch.tensor([[0.0062, 0.9864, 0.0075]] ) elif model_name == "xclip-large-patch14-16-frames": __magic_name__ :Optional[int] = torch.tensor([[3.3_8_7_7E-0_4, 9.9_9_3_7E-0_1, 2.8_8_8_8E-0_4]] ) # kinetics-600 elif model_name == "xclip-base-patch16-kinetics-600": __magic_name__ :Optional[int] = torch.tensor([[0.0555, 0.8914, 0.0531]] ) elif model_name == "xclip-base-patch16-kinetics-600-16-frames": __magic_name__ :List[str] = torch.tensor([[3.8_5_5_4E-0_4, 9.9_9_2_9E-0_1, 3.2_7_5_4E-0_4]] ) elif model_name == "xclip-large-patch14-kinetics-600": __magic_name__ :List[str] = torch.tensor([[0.0036, 0.9920, 0.0045]] ) # few shot elif model_name == "xclip-base-patch16-hmdb-2-shot": __magic_name__ :Tuple = torch.tensor([[7.1_8_9_0E-0_6, 9.9_9_9_4E-0_1, 5.6_5_5_9E-0_5]] ) elif model_name == "xclip-base-patch16-hmdb-4-shot": __magic_name__ :List[str] = torch.tensor([[1.0_3_2_0E-0_5, 9.9_9_9_3E-0_1, 6.2_4_3_5E-0_5]] ) elif model_name == "xclip-base-patch16-hmdb-8-shot": __magic_name__ :Optional[int] = torch.tensor([[4.1_3_7_7E-0_6, 9.9_9_9_0E-0_1, 9.8_3_8_6E-0_5]] ) elif model_name == "xclip-base-patch16-hmdb-16-shot": __magic_name__ :Optional[int] = torch.tensor([[4.1_3_4_7E-0_5, 9.9_9_6_2E-0_1, 3.3_4_1_1E-0_4]] ) elif model_name == "xclip-base-patch16-ucf-2-shot": __magic_name__ :Union[str, Any] = torch.tensor([[8.5_8_5_7E-0_5, 9.9_9_2_8E-0_1, 6.3_2_9_1E-0_4]] ) elif model_name == "xclip-base-patch16-ucf-4-shot": __magic_name__ :Union[str, Any] = torch.tensor([[8.5_8_5_7E-0_5, 9.9_9_2_8E-0_1, 6.3_2_9_1E-0_4]] ) elif model_name == "xclip-base-patch16-ucf-8-shot": __magic_name__ :Optional[int] = torch.tensor([[0.0027, 0.9904, 0.0070]] ) elif model_name == "xclip-base-patch16-ucf-16-shot": __magic_name__ :Any = torch.tensor([[9.8_2_1_9E-0_4, 9.9_5_9_3E-0_1, 3.0_8_6_3E-0_3]] ) # zero shot elif model_name == "xclip-base-patch16-zero-shot": __magic_name__ :Optional[int] = torch.tensor([[3.5_0_8_2E-0_4, 9.9_7_8_5E-0_1, 1.7_9_6_6E-0_3]] ) else: raise ValueError(f'''Model name {model_name} not supported''' ) assert torch.allclose(snake_case, snake_case, atol=1E-3 ) print('''Looks ok!''' ) if pytorch_dump_folder_path is not None: print(f'''Saving model {model_name} to {pytorch_dump_folder_path}''' ) model.save_pretrained(snake_case ) if push_to_hub: print('''Pushing model, processor and slow tokenizer files to the hub...''' ) model.push_to_hub(snake_case, organization='''nielsr''' ) processor.push_to_hub(snake_case, organization='''nielsr''' ) slow_tokenizer.push_to_hub(snake_case, organization='''nielsr''' ) if __name__ == "__main__": SCREAMING_SNAKE_CASE__ : Optional[Any] = argparse.ArgumentParser() # Required parameters parser.add_argument( """--model_name""", default="""xclip-base-patch32""", type=str, help="""Name of the model.""", ) parser.add_argument( """--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory.""" ) parser.add_argument( """--push_to_hub""", action="""store_true""", help="""Whether or not to push the converted model to the 🤗 hub.""" ) SCREAMING_SNAKE_CASE__ : List[Any] = parser.parse_args() convert_xclip_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
0
0
from .constants import ( MODEL_NAME, OPTIMIZER_NAME, RNG_STATE_NAME, SAFE_WEIGHTS_INDEX_NAME, SAFE_WEIGHTS_NAME, SCALER_NAME, SCHEDULER_NAME, TORCH_LAUNCH_PARAMS, WEIGHTS_INDEX_NAME, WEIGHTS_NAME, ) from .dataclasses import ( BnbQuantizationConfig, ComputeEnvironment, CustomDtype, DeepSpeedPlugin, DistributedDataParallelKwargs, DistributedType, DynamoBackend, FPaRecipeKwargs, FullyShardedDataParallelPlugin, GradientAccumulationPlugin, GradScalerKwargs, InitProcessGroupKwargs, KwargsHandler, LoggerType, MegatronLMPlugin, PrecisionType, ProjectConfiguration, RNGType, SageMakerDistributedType, TensorInformation, TorchDynamoPlugin, ) from .environment import get_int_from_env, parse_choice_from_env, parse_flag_from_env from .imports import ( get_ccl_version, is_abit_bnb_available, is_abit_bnb_available, is_aim_available, is_bfaa_available, is_bnb_available, is_botoa_available, is_ccl_available, is_comet_ml_available, is_datasets_available, is_deepspeed_available, is_fpa_available, is_ipex_available, is_megatron_lm_available, is_mlflow_available, is_mps_available, is_npu_available, is_rich_available, is_safetensors_available, is_sagemaker_available, is_tensorboard_available, is_tpu_available, is_transformers_available, is_wandb_available, is_xpu_available, ) from .modeling import ( check_device_map, check_tied_parameters_in_config, check_tied_parameters_on_same_device, compute_module_sizes, convert_file_size_to_int, dtype_byte_size, find_tied_parameters, get_balanced_memory, get_max_layer_size, get_max_memory, get_mixed_precision_context_manager, id_tensor_storage, infer_auto_device_map, load_checkpoint_in_model, load_offloaded_weights, load_state_dict, named_module_tensors, retie_parameters, set_module_tensor_to_device, shard_checkpoint, ) from .offload import ( OffloadedWeightsLoader, PrefixedDataset, extract_submodules_state_dict, load_offloaded_weight, offload_state_dict, offload_weight, save_offload_index, ) from .operations import ( broadcast, broadcast_object_list, concatenate, convert_outputs_to_fpaa, convert_to_fpaa, find_batch_size, find_device, gather, gather_object, get_data_structure, honor_type, initialize_tensors, is_namedtuple, is_tensor_information, is_torch_tensor, listify, pad_across_processes, recursively_apply, reduce, send_to_device, slice_tensors, ) from .versions import compare_versions, is_torch_version if is_deepspeed_available(): from .deepspeed import ( DeepSpeedEngineWrapper, DeepSpeedOptimizerWrapper, DeepSpeedSchedulerWrapper, DummyOptim, DummyScheduler, HfDeepSpeedConfig, ) from .bnb import has_abit_bnb_layers, load_and_quantize_model from .fsdp_utils import load_fsdp_model, load_fsdp_optimizer, save_fsdp_model, save_fsdp_optimizer from .launch import ( PrepareForLaunch, _filter_args, prepare_deepspeed_cmd_env, prepare_multi_gpu_env, prepare_sagemager_args_inputs, prepare_simple_launcher_cmd_env, prepare_tpu, ) from .megatron_lm import ( AbstractTrainStep, BertTrainStep, GPTTrainStep, MegatronEngine, MegatronLMDummyDataLoader, MegatronLMDummyScheduler, MegatronLMOptimizerWrapper, MegatronLMSchedulerWrapper, TaTrainStep, avg_losses_across_data_parallel_group, gather_across_data_parallel_groups, ) from .megatron_lm import initialize as megatron_lm_initialize from .megatron_lm import prepare_data_loader as megatron_lm_prepare_data_loader from .megatron_lm import prepare_model as megatron_lm_prepare_model from .megatron_lm import prepare_optimizer as megatron_lm_prepare_optimizer from .megatron_lm import prepare_scheduler as megatron_lm_prepare_scheduler from .memory import find_executable_batch_size, release_memory from .other import ( extract_model_from_parallel, get_pretty_name, is_port_in_use, merge_dicts, patch_environment, save, wait_for_everyone, write_basic_config, ) from .random import set_seed, synchronize_rng_state, synchronize_rng_states from .torch_xla import install_xla from .tqdm import tqdm from .transformer_engine import convert_model, has_transformer_engine_layers
55
import numpy as np import torch from torch.utils.data import Dataset from utils import logger class lowerCamelCase_ ( lowerCamelCase ): def __init__( self , __lowerCAmelCase , __lowerCAmelCase ): """simple docstring""" __magic_name__ :Optional[int] = params __magic_name__ :Any = np.array(__lowerCAmelCase ) __magic_name__ :Optional[Any] = np.array([len(__lowerCAmelCase ) for t in data] ) self.check() self.remove_long_sequences() self.remove_empty_sequences() self.remove_unknown_sequences() self.check() self.print_statistics() def __getitem__( self , __lowerCAmelCase ): """simple docstring""" return (self.token_ids[index], self.lengths[index]) def __len__( self ): """simple docstring""" return len(self.lengths ) def A ( self ): """simple docstring""" assert len(self.token_ids ) == len(self.lengths ) assert all(self.lengths[i] == len(self.token_ids[i] ) for i in range(len(self.lengths ) ) ) def A ( self ): """simple docstring""" __magic_name__ :Any = self.params.max_model_input_size __magic_name__ :int = self.lengths > max_len logger.info(F'''Splitting {sum(__lowerCAmelCase )} too long sequences.''' ) def divide_chunks(__lowerCAmelCase , __lowerCAmelCase ): return [l[i : i + n] for i in range(0 , len(__lowerCAmelCase ) , __lowerCAmelCase )] __magic_name__ :Optional[int] = [] __magic_name__ :List[Any] = [] if self.params.mlm: __magic_name__ , __magic_name__ :Optional[Any] = self.params.special_tok_ids['''cls_token'''], self.params.special_tok_ids['''sep_token'''] else: __magic_name__ , __magic_name__ :Tuple = self.params.special_tok_ids['''bos_token'''], self.params.special_tok_ids['''eos_token'''] for seq_, len_ in zip(self.token_ids , self.lengths ): assert (seq_[0] == cls_id) and (seq_[-1] == sep_id), seq_ if len_ <= max_len: new_tok_ids.append(seq_ ) new_lengths.append(len_ ) else: __magic_name__ :int = [] for sub_s in divide_chunks(seq_ , max_len - 2 ): if sub_s[0] != cls_id: __magic_name__ :List[Any] = np.insert(__lowerCAmelCase , 0 , __lowerCAmelCase ) if sub_s[-1] != sep_id: __magic_name__ :Union[str, Any] = np.insert(__lowerCAmelCase , len(__lowerCAmelCase ) , __lowerCAmelCase ) assert len(__lowerCAmelCase ) <= max_len assert (sub_s[0] == cls_id) and (sub_s[-1] == sep_id), sub_s sub_seqs.append(__lowerCAmelCase ) new_tok_ids.extend(__lowerCAmelCase ) new_lengths.extend([len(__lowerCAmelCase ) for l in sub_seqs] ) __magic_name__ :Tuple = np.array(__lowerCAmelCase ) __magic_name__ :Optional[int] = np.array(__lowerCAmelCase ) def A ( self ): """simple docstring""" __magic_name__ :Optional[Any] = len(self ) __magic_name__ :int = self.lengths > 1_1 __magic_name__ :List[str] = self.token_ids[indices] __magic_name__ :Union[str, Any] = self.lengths[indices] __magic_name__ :List[str] = len(self ) logger.info(F'''Remove {init_size - new_size} too short (<=11 tokens) sequences.''' ) def A ( self ): """simple docstring""" if "unk_token" not in self.params.special_tok_ids: return else: __magic_name__ :Tuple = self.params.special_tok_ids['''unk_token'''] __magic_name__ :Dict = len(self ) __magic_name__ :Tuple = np.array([np.count_nonzero(a == unk_token_id ) for a in self.token_ids] ) __magic_name__ :int = (unk_occs / self.lengths) < 0.5 __magic_name__ :str = self.token_ids[indices] __magic_name__ :str = self.lengths[indices] __magic_name__ :Any = len(self ) logger.info(F'''Remove {init_size - new_size} sequences with a high level of unknown tokens (50%).''' ) def A ( self ): """simple docstring""" if not self.params.is_master: return logger.info(F'''{len(self )} sequences''' ) # data_len = sum(self.lengths) # nb_unique_tokens = len(Counter(list(chain(*self.token_ids)))) # logger.info(f'{data_len} tokens ({nb_unique_tokens} unique)') # unk_idx = self.params.special_tok_ids['unk_token'] # nb_unknown = sum([(t==unk_idx).sum() for t in self.token_ids]) # logger.info(f'{nb_unknown} unknown tokens (covering {100*nb_unknown/data_len:.2f}% of the data)') def A ( self , __lowerCAmelCase ): """simple docstring""" __magic_name__ :Optional[Any] = [t[0] for t in batch] __magic_name__ :List[Any] = [t[1] for t in batch] assert len(__lowerCAmelCase ) == len(__lowerCAmelCase ) # Max for paddings __magic_name__ :Tuple = max(__lowerCAmelCase ) # Pad token ids if self.params.mlm: __magic_name__ :Any = self.params.special_tok_ids['''pad_token'''] else: __magic_name__ :str = self.params.special_tok_ids['''unk_token'''] __magic_name__ :Any = [list(t.astype(__lowerCAmelCase ) ) + [pad_idx] * (max_seq_len_ - len(__lowerCAmelCase )) for t in token_ids] assert len(tk_ ) == len(__lowerCAmelCase ) assert all(len(__lowerCAmelCase ) == max_seq_len_ for t in tk_ ) __magic_name__ :Optional[int] = torch.tensor(tk_ ) # (bs, max_seq_len_) __magic_name__ :Optional[int] = torch.tensor(__lowerCAmelCase ) # (bs) return tk_t, lg_t
0
0
'''simple docstring''' import unittest import numpy as np import torch from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer from diffusers import ( AutoencoderKL, DDIMScheduler, DPMSolverMultistepScheduler, TextToVideoSDPipeline, UNetaDConditionModel, ) from diffusers.utils import is_xformers_available, load_numpy, skip_mps, slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_PARAMS from ..test_pipelines_common import PipelineTesterMixin enable_full_determinism() @skip_mps class _lowercase ( __lowercase , unittest.TestCase ): _SCREAMING_SNAKE_CASE : str = TextToVideoSDPipeline _SCREAMING_SNAKE_CASE : Tuple = TEXT_TO_IMAGE_PARAMS _SCREAMING_SNAKE_CASE : List[str] = TEXT_TO_IMAGE_BATCH_PARAMS # No `output_type`. _SCREAMING_SNAKE_CASE : List[str] = frozenset( [ "num_inference_steps", "generator", "latents", "return_dict", "callback", "callback_steps", ] ) def a ( self : Union[str, Any] ) -> Dict: torch.manual_seed(0 ) __snake_case = UNetaDConditionModel( block_out_channels=(32, 64, 64, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('CrossAttnDownBlock3D', 'CrossAttnDownBlock3D', 'CrossAttnDownBlock3D', 'DownBlock3D') , up_block_types=('UpBlock3D', 'CrossAttnUpBlock3D', 'CrossAttnUpBlock3D', 'CrossAttnUpBlock3D') , cross_attention_dim=32 , attention_head_dim=4 , ) __snake_case = DDIMScheduler( beta_start=0.0_0_0_8_5 , beta_end=0.0_1_2 , beta_schedule='scaled_linear' , clip_sample=SCREAMING_SNAKE_CASE_ , set_alpha_to_one=SCREAMING_SNAKE_CASE_ , ) torch.manual_seed(0 ) __snake_case = AutoencoderKL( block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=4 , sample_size=128 , ) torch.manual_seed(0 ) __snake_case = CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , hidden_act='gelu' , projection_dim=512 , ) __snake_case = CLIPTextModel(SCREAMING_SNAKE_CASE_ ) __snake_case = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' ) __snake_case = { 'unet': unet, 'scheduler': scheduler, 'vae': vae, 'text_encoder': text_encoder, 'tokenizer': tokenizer, } return components def a ( self : Any , SCREAMING_SNAKE_CASE_ : Optional[int] , SCREAMING_SNAKE_CASE_ : Union[str, Any]=0 ) -> Union[str, Any]: if str(SCREAMING_SNAKE_CASE_ ).startswith('mps' ): __snake_case = torch.manual_seed(SCREAMING_SNAKE_CASE_ ) else: __snake_case = torch.Generator(device=SCREAMING_SNAKE_CASE_ ).manual_seed(SCREAMING_SNAKE_CASE_ ) __snake_case = { 'prompt': 'A painting of a squirrel eating a burger', 'generator': generator, 'num_inference_steps': 2, 'guidance_scale': 6.0, 'output_type': 'pt', } return inputs def a ( self : Any ) -> Union[str, Any]: __snake_case = 'cpu' # ensure determinism for the device-dependent torch.Generator __snake_case = self.get_dummy_components() __snake_case = TextToVideoSDPipeline(**SCREAMING_SNAKE_CASE_ ) __snake_case = sd_pipe.to(SCREAMING_SNAKE_CASE_ ) sd_pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE_ ) __snake_case = self.get_dummy_inputs(SCREAMING_SNAKE_CASE_ ) __snake_case = 'np' __snake_case = sd_pipe(**SCREAMING_SNAKE_CASE_ ).frames __snake_case = frames[0][-3:, -3:, -1] assert frames[0].shape == (64, 64, 3) __snake_case = np.array([1_5_8.0, 1_6_0.0, 1_5_3.0, 1_2_5.0, 1_0_0.0, 1_2_1.0, 1_1_1.0, 9_3.0, 1_1_3.0] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2 def a ( self : Optional[int] ) -> str: self._test_attention_slicing_forward_pass(test_mean_pixel_difference=SCREAMING_SNAKE_CASE_ , expected_max_diff=3e-3 ) @unittest.skipIf( torch_device != 'cuda' or not is_xformers_available() , reason='XFormers attention is only available with CUDA and `xformers` installed' , ) def a ( self : Any ) -> int: self._test_xformers_attention_forwardGenerator_pass(test_mean_pixel_difference=SCREAMING_SNAKE_CASE_ , expected_max_diff=1e-2 ) @unittest.skip(reason='Batching needs to be properly figured out first for this pipeline.' ) def a ( self : List[str] ) -> Optional[int]: pass @unittest.skip(reason='Batching needs to be properly figured out first for this pipeline.' ) def a ( self : Optional[int] ) -> Any: pass @unittest.skip(reason='`num_images_per_prompt` argument is not supported for this pipeline.' ) def a ( self : List[Any] ) -> List[str]: pass def a ( self : Dict ) -> Optional[int]: return super().test_progress_bar() @slow @skip_mps class _lowercase ( unittest.TestCase ): def a ( self : List[Any] ) -> str: __snake_case = load_numpy( 'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/text_to_video/video.npy' ) __snake_case = TextToVideoSDPipeline.from_pretrained('damo-vilab/text-to-video-ms-1.7b' ) __snake_case = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config ) __snake_case = pipe.to('cuda' ) __snake_case = 'Spiderman is surfing' __snake_case = torch.Generator(device='cpu' ).manual_seed(0 ) __snake_case = pipe(SCREAMING_SNAKE_CASE_ , generator=SCREAMING_SNAKE_CASE_ , num_inference_steps=25 , output_type='pt' ).frames __snake_case = video_frames.cpu().numpy() assert np.abs(expected_video - video ).mean() < 5e-2 def a ( self : Dict ) -> Any: __snake_case = load_numpy( 'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/text_to_video/video_2step.npy' ) __snake_case = TextToVideoSDPipeline.from_pretrained('damo-vilab/text-to-video-ms-1.7b' ) __snake_case = pipe.to('cuda' ) __snake_case = 'Spiderman is surfing' __snake_case = torch.Generator(device='cpu' ).manual_seed(0 ) __snake_case = pipe(SCREAMING_SNAKE_CASE_ , generator=SCREAMING_SNAKE_CASE_ , num_inference_steps=2 , output_type='pt' ).frames __snake_case = video_frames.cpu().numpy() assert np.abs(expected_video - video ).mean() < 5e-2
56
import os from shutil import copyfile from typing import Any, Dict, List, Optional, Tuple import sentencepiece as spm from ...tokenization_utils import PreTrainedTokenizer from ...utils import logging SCREAMING_SNAKE_CASE__ : str = logging.get_logger(__name__) SCREAMING_SNAKE_CASE__ : Tuple = """▁""" SCREAMING_SNAKE_CASE__ : Union[str, Any] = {"""vocab_file""": """spiece.model"""} SCREAMING_SNAKE_CASE__ : List[Any] = { """vocab_file""": { """google/reformer-crime-and-punishment""": ( """https://huggingface.co/google/reformer-crime-and-punishment/resolve/main/spiece.model""" ) } } SCREAMING_SNAKE_CASE__ : Optional[int] = { """google/reformer-crime-and-punishment""": 52_42_88, } class lowerCamelCase_ ( lowerCamelCase ): a__ = VOCAB_FILES_NAMES a__ = PRETRAINED_VOCAB_FILES_MAP a__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES a__ = ['''input_ids''', '''attention_mask'''] def __init__( self , __lowerCAmelCase , __lowerCAmelCase="</s>" , __lowerCAmelCase="<unk>" , __lowerCAmelCase=[] , __lowerCAmelCase = None , **__lowerCAmelCase , ): """simple docstring""" __magic_name__ :int = {} if sp_model_kwargs is None else sp_model_kwargs super().__init__( eos_token=__lowerCAmelCase , unk_token=__lowerCAmelCase , additional_special_tokens=__lowerCAmelCase , sp_model_kwargs=self.sp_model_kwargs , **__lowerCAmelCase , ) __magic_name__ :Optional[Any] = vocab_file __magic_name__ :int = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(__lowerCAmelCase ) @property def A ( self ): """simple docstring""" return self.sp_model.get_piece_size() def A ( self ): """simple docstring""" __magic_name__ :str = {self.convert_ids_to_tokens(__lowerCAmelCase ): i for i in range(self.vocab_size )} vocab.update(self.added_tokens_encoder ) return vocab def __getstate__( self ): """simple docstring""" __magic_name__ :Optional[Any] = self.__dict__.copy() __magic_name__ :Optional[Any] = None return state def __setstate__( self , __lowerCAmelCase ): """simple docstring""" __magic_name__ :Any = d # for backward compatibility if not hasattr(self , '''sp_model_kwargs''' ): __magic_name__ :Optional[int] = {} __magic_name__ :Union[str, Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(self.vocab_file ) def A ( self , __lowerCAmelCase ): """simple docstring""" return self.sp_model.encode(__lowerCAmelCase , out_type=__lowerCAmelCase ) def A ( self , __lowerCAmelCase ): """simple docstring""" return self.sp_model.piece_to_id(__lowerCAmelCase ) def A ( self , __lowerCAmelCase ): """simple docstring""" if index < self.sp_model.get_piece_size(): __magic_name__ :int = self.sp_model.IdToPiece(__lowerCAmelCase ) return token def A ( self , __lowerCAmelCase ): """simple docstring""" __magic_name__ :Optional[Any] = [] __magic_name__ :Tuple = '''''' for token in tokens: # make sure that special tokens are not decoded using sentencepiece model if token in self.all_special_tokens: out_string += self.sp_model.decode(__lowerCAmelCase ) + token __magic_name__ :Optional[Any] = [] else: current_sub_tokens.append(__lowerCAmelCase ) out_string += self.sp_model.decode(__lowerCAmelCase ) return out_string.strip() def A ( self , __lowerCAmelCase , __lowerCAmelCase = None ): """simple docstring""" if not os.path.isdir(__lowerCAmelCase ): logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' ) return __magic_name__ :Optional[int] = os.path.join( __lowerCAmelCase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(__lowerCAmelCase ) and os.path.isfile(self.vocab_file ): copyfile(self.vocab_file , __lowerCAmelCase ) elif not os.path.isfile(self.vocab_file ): with open(__lowerCAmelCase , '''wb''' ) as fi: __magic_name__ :Dict = self.sp_model.serialized_model_proto() fi.write(__lowerCAmelCase ) return (out_vocab_file,)
0
0
from typing import TYPE_CHECKING # rely on isort to merge the imports from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available A_ : Dict = { 'configuration_autoformer': [ 'AUTOFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP', 'AutoformerConfig', ], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: A_ : Optional[Any] = [ 'AUTOFORMER_PRETRAINED_MODEL_ARCHIVE_LIST', 'AutoformerForPrediction', 'AutoformerModel', 'AutoformerPreTrainedModel', ] if TYPE_CHECKING: from .configuration_autoformer import ( AUTOFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, AutoformerConfig, ) try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_autoformer import ( AUTOFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, AutoformerForPrediction, AutoformerModel, AutoformerPreTrainedModel, ) else: import sys A_ : Optional[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
57
import os import unittest from transformers import MobileBertTokenizer, MobileBertTokenizerFast from transformers.models.bert.tokenization_bert import ( VOCAB_FILES_NAMES, BasicTokenizer, WordpieceTokenizer, _is_control, _is_punctuation, _is_whitespace, ) from transformers.testing_utils import require_tokenizers, slow from ...test_tokenization_common import TokenizerTesterMixin, filter_non_english @require_tokenizers class lowerCamelCase_ ( lowerCamelCase , unittest.TestCase ): a__ = MobileBertTokenizer a__ = MobileBertTokenizerFast a__ = True a__ = True a__ = filter_non_english a__ = '''google/mobilebert-uncased''' def A ( self ): """simple docstring""" super().setUp() __magic_name__ :Tuple = [ '''[UNK]''', '''[CLS]''', '''[SEP]''', '''[PAD]''', '''[MASK]''', '''want''', '''##want''', '''##ed''', '''wa''', '''un''', '''runn''', '''##ing''', ''',''', '''low''', '''lowest''', ] __magic_name__ :Dict = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] ) with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer: vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) ) __magic_name__ :List[str] = [ (tokenizer_def[0], self.pre_trained_model_path, tokenizer_def[2]) # else the 'google/' prefix is stripped for tokenizer_def in self.tokenizers_list ] def A ( self , __lowerCAmelCase ): """simple docstring""" __magic_name__ :Union[str, Any] = '''UNwant\u00E9d,running''' __magic_name__ :int = '''unwanted, running''' return input_text, output_text def A ( self ): """simple docstring""" __magic_name__ :Optional[int] = self.tokenizer_class(self.vocab_file ) __magic_name__ :List[Any] = tokenizer.tokenize('''UNwant\u00E9d,running''' ) self.assertListEqual(__lowerCAmelCase , ['''un''', '''##want''', '''##ed''', ''',''', '''runn''', '''##ing'''] ) self.assertListEqual(tokenizer.convert_tokens_to_ids(__lowerCAmelCase ) , [9, 6, 7, 1_2, 1_0, 1_1] ) def A ( self ): """simple docstring""" if not self.test_rust_tokenizer: return __magic_name__ :int = self.get_tokenizer() __magic_name__ :Tuple = self.get_rust_tokenizer() __magic_name__ :List[str] = '''UNwant\u00E9d,running''' __magic_name__ :Optional[Any] = tokenizer.tokenize(__lowerCAmelCase ) __magic_name__ :List[Any] = rust_tokenizer.tokenize(__lowerCAmelCase ) self.assertListEqual(__lowerCAmelCase , __lowerCAmelCase ) __magic_name__ :int = tokenizer.encode(__lowerCAmelCase , add_special_tokens=__lowerCAmelCase ) __magic_name__ :str = rust_tokenizer.encode(__lowerCAmelCase , add_special_tokens=__lowerCAmelCase ) self.assertListEqual(__lowerCAmelCase , __lowerCAmelCase ) __magic_name__ :List[Any] = self.get_rust_tokenizer() __magic_name__ :Any = tokenizer.encode(__lowerCAmelCase ) __magic_name__ :Any = rust_tokenizer.encode(__lowerCAmelCase ) self.assertListEqual(__lowerCAmelCase , __lowerCAmelCase ) # With lower casing __magic_name__ :Any = self.get_tokenizer(do_lower_case=__lowerCAmelCase ) __magic_name__ :List[Any] = self.get_rust_tokenizer(do_lower_case=__lowerCAmelCase ) __magic_name__ :Dict = '''UNwant\u00E9d,running''' __magic_name__ :Tuple = tokenizer.tokenize(__lowerCAmelCase ) __magic_name__ :Union[str, Any] = rust_tokenizer.tokenize(__lowerCAmelCase ) self.assertListEqual(__lowerCAmelCase , __lowerCAmelCase ) __magic_name__ :Optional[Any] = tokenizer.encode(__lowerCAmelCase , add_special_tokens=__lowerCAmelCase ) __magic_name__ :Dict = rust_tokenizer.encode(__lowerCAmelCase , add_special_tokens=__lowerCAmelCase ) self.assertListEqual(__lowerCAmelCase , __lowerCAmelCase ) __magic_name__ :Tuple = self.get_rust_tokenizer() __magic_name__ :Dict = tokenizer.encode(__lowerCAmelCase ) __magic_name__ :List[Any] = rust_tokenizer.encode(__lowerCAmelCase ) self.assertListEqual(__lowerCAmelCase , __lowerCAmelCase ) def A ( self ): """simple docstring""" __magic_name__ :Optional[int] = BasicTokenizer() self.assertListEqual(tokenizer.tokenize('''ah\u535A\u63A8zz''' ) , ['''ah''', '''\u535A''', '''\u63A8''', '''zz'''] ) def A ( self ): """simple docstring""" __magic_name__ :List[Any] = BasicTokenizer(do_lower_case=__lowerCAmelCase ) self.assertListEqual( tokenizer.tokenize(''' \tHeLLo!how \n Are yoU? ''' ) , ['''hello''', '''!''', '''how''', '''are''', '''you''', '''?'''] ) self.assertListEqual(tokenizer.tokenize('''H\u00E9llo''' ) , ['''hello'''] ) def A ( self ): """simple docstring""" __magic_name__ :Union[str, Any] = BasicTokenizer(do_lower_case=__lowerCAmelCase , strip_accents=__lowerCAmelCase ) self.assertListEqual( tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''hällo''', '''!''', '''how''', '''are''', '''you''', '''?'''] ) self.assertListEqual(tokenizer.tokenize('''H\u00E9llo''' ) , ['''h\u00E9llo'''] ) def A ( self ): """simple docstring""" __magic_name__ :Dict = BasicTokenizer(do_lower_case=__lowerCAmelCase , strip_accents=__lowerCAmelCase ) self.assertListEqual( tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''hallo''', '''!''', '''how''', '''are''', '''you''', '''?'''] ) self.assertListEqual(tokenizer.tokenize('''H\u00E9llo''' ) , ['''hello'''] ) def A ( self ): """simple docstring""" __magic_name__ :Optional[int] = BasicTokenizer(do_lower_case=__lowerCAmelCase ) self.assertListEqual( tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''hallo''', '''!''', '''how''', '''are''', '''you''', '''?'''] ) self.assertListEqual(tokenizer.tokenize('''H\u00E9llo''' ) , ['''hello'''] ) def A ( self ): """simple docstring""" __magic_name__ :List[str] = BasicTokenizer(do_lower_case=__lowerCAmelCase ) self.assertListEqual( tokenizer.tokenize(''' \tHeLLo!how \n Are yoU? ''' ) , ['''HeLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?'''] ) def A ( self ): """simple docstring""" __magic_name__ :int = BasicTokenizer(do_lower_case=__lowerCAmelCase , strip_accents=__lowerCAmelCase ) self.assertListEqual( tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''HäLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?'''] ) def A ( self ): """simple docstring""" __magic_name__ :Optional[int] = BasicTokenizer(do_lower_case=__lowerCAmelCase , strip_accents=__lowerCAmelCase ) self.assertListEqual( tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''HaLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?'''] ) def A ( self ): """simple docstring""" __magic_name__ :Optional[Any] = BasicTokenizer(do_lower_case=__lowerCAmelCase , never_split=['''[UNK]'''] ) self.assertListEqual( tokenizer.tokenize(''' \tHeLLo!how \n Are yoU? [UNK]''' ) , ['''HeLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?''', '''[UNK]'''] ) def A ( self ): """simple docstring""" __magic_name__ :int = ['''[UNK]''', '''[CLS]''', '''[SEP]''', '''want''', '''##want''', '''##ed''', '''wa''', '''un''', '''runn''', '''##ing'''] __magic_name__ :Union[str, Any] = {} for i, token in enumerate(__lowerCAmelCase ): __magic_name__ :Tuple = i __magic_name__ :List[Any] = WordpieceTokenizer(vocab=__lowerCAmelCase , unk_token='''[UNK]''' ) self.assertListEqual(tokenizer.tokenize('''''' ) , [] ) self.assertListEqual(tokenizer.tokenize('''unwanted running''' ) , ['''un''', '''##want''', '''##ed''', '''runn''', '''##ing'''] ) self.assertListEqual(tokenizer.tokenize('''unwantedX running''' ) , ['''[UNK]''', '''runn''', '''##ing'''] ) def A ( self ): """simple docstring""" self.assertTrue(_is_whitespace(''' ''' ) ) self.assertTrue(_is_whitespace('''\t''' ) ) self.assertTrue(_is_whitespace('''\r''' ) ) self.assertTrue(_is_whitespace('''\n''' ) ) self.assertTrue(_is_whitespace('''\u00A0''' ) ) self.assertFalse(_is_whitespace('''A''' ) ) self.assertFalse(_is_whitespace('''-''' ) ) def A ( self ): """simple docstring""" self.assertTrue(_is_control('''\u0005''' ) ) self.assertFalse(_is_control('''A''' ) ) self.assertFalse(_is_control(''' ''' ) ) self.assertFalse(_is_control('''\t''' ) ) self.assertFalse(_is_control('''\r''' ) ) def A ( self ): """simple docstring""" self.assertTrue(_is_punctuation('''-''' ) ) self.assertTrue(_is_punctuation('''$''' ) ) self.assertTrue(_is_punctuation('''`''' ) ) self.assertTrue(_is_punctuation('''.''' ) ) self.assertFalse(_is_punctuation('''A''' ) ) self.assertFalse(_is_punctuation(''' ''' ) ) def A ( self ): """simple docstring""" __magic_name__ :Any = self.get_tokenizer() __magic_name__ :Any = self.get_rust_tokenizer() # Example taken from the issue https://github.com/huggingface/tokenizers/issues/340 self.assertListEqual([tokenizer.tokenize(__lowerCAmelCase ) for t in ['''Test''', '''\xad''', '''test''']] , [['''[UNK]'''], [], ['''[UNK]''']] ) self.assertListEqual( [rust_tokenizer.tokenize(__lowerCAmelCase ) for t in ['''Test''', '''\xad''', '''test''']] , [['''[UNK]'''], [], ['''[UNK]''']] ) @slow def A ( self ): """simple docstring""" __magic_name__ :Optional[int] = self.tokenizer_class.from_pretrained('''google/mobilebert-uncased''' ) __magic_name__ :Optional[int] = tokenizer.encode('''sequence builders''' , add_special_tokens=__lowerCAmelCase ) __magic_name__ :List[Any] = tokenizer.encode('''multi-sequence build''' , add_special_tokens=__lowerCAmelCase ) __magic_name__ :Union[str, Any] = tokenizer.build_inputs_with_special_tokens(__lowerCAmelCase ) __magic_name__ :List[Any] = tokenizer.build_inputs_with_special_tokens(__lowerCAmelCase , __lowerCAmelCase ) assert encoded_sentence == [1_0_1] + text + [1_0_2] assert encoded_pair == [1_0_1] + text + [1_0_2] + text_a + [1_0_2] def A ( self ): """simple docstring""" for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})''' ): __magic_name__ :Optional[Any] = self.rust_tokenizer_class.from_pretrained(__lowerCAmelCase , **__lowerCAmelCase ) __magic_name__ :Optional[int] = F'''A, naïve {tokenizer_r.mask_token} AllenNLP sentence.''' __magic_name__ :Optional[Any] = tokenizer_r.encode_plus( __lowerCAmelCase , return_attention_mask=__lowerCAmelCase , return_token_type_ids=__lowerCAmelCase , return_offsets_mapping=__lowerCAmelCase , add_special_tokens=__lowerCAmelCase , ) __magic_name__ :Any = tokenizer_r.do_lower_case if hasattr(__lowerCAmelCase , '''do_lower_case''' ) else False __magic_name__ :Optional[int] = ( [ ((0, 0), tokenizer_r.cls_token), ((0, 1), '''A'''), ((1, 2), ''','''), ((3, 5), '''na'''), ((5, 6), '''##ï'''), ((6, 8), '''##ve'''), ((9, 1_5), tokenizer_r.mask_token), ((1_6, 2_1), '''Allen'''), ((2_1, 2_3), '''##NL'''), ((2_3, 2_4), '''##P'''), ((2_5, 3_3), '''sentence'''), ((3_3, 3_4), '''.'''), ((0, 0), tokenizer_r.sep_token), ] if not do_lower_case else [ ((0, 0), tokenizer_r.cls_token), ((0, 1), '''a'''), ((1, 2), ''','''), ((3, 8), '''naive'''), ((9, 1_5), tokenizer_r.mask_token), ((1_6, 2_1), '''allen'''), ((2_1, 2_3), '''##nl'''), ((2_3, 2_4), '''##p'''), ((2_5, 3_3), '''sentence'''), ((3_3, 3_4), '''.'''), ((0, 0), tokenizer_r.sep_token), ] ) self.assertEqual( [e[1] for e in expected_results] , tokenizer_r.convert_ids_to_tokens(tokens['''input_ids'''] ) ) self.assertEqual([e[0] for e in expected_results] , tokens['''offset_mapping'''] ) def A ( self ): """simple docstring""" __magic_name__ :Dict = ['''的''', '''人''', '''有'''] __magic_name__ :Any = ''''''.join(__lowerCAmelCase ) for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})''' ): __magic_name__ :Optional[Any] = True __magic_name__ :Optional[int] = self.tokenizer_class.from_pretrained(__lowerCAmelCase , **__lowerCAmelCase ) __magic_name__ :Tuple = self.rust_tokenizer_class.from_pretrained(__lowerCAmelCase , **__lowerCAmelCase ) __magic_name__ :Dict = tokenizer_p.encode(__lowerCAmelCase , add_special_tokens=__lowerCAmelCase ) __magic_name__ :List[str] = tokenizer_r.encode(__lowerCAmelCase , add_special_tokens=__lowerCAmelCase ) __magic_name__ :Dict = tokenizer_r.convert_ids_to_tokens(__lowerCAmelCase ) __magic_name__ :Union[str, Any] = tokenizer_p.convert_ids_to_tokens(__lowerCAmelCase ) # it is expected that each Chinese character is not preceded by "##" self.assertListEqual(__lowerCAmelCase , __lowerCAmelCase ) self.assertListEqual(__lowerCAmelCase , __lowerCAmelCase ) __magic_name__ :List[str] = False __magic_name__ :Tuple = self.rust_tokenizer_class.from_pretrained(__lowerCAmelCase , **__lowerCAmelCase ) __magic_name__ :List[str] = self.tokenizer_class.from_pretrained(__lowerCAmelCase , **__lowerCAmelCase ) __magic_name__ :Optional[Any] = tokenizer_r.encode(__lowerCAmelCase , add_special_tokens=__lowerCAmelCase ) __magic_name__ :Union[str, Any] = tokenizer_p.encode(__lowerCAmelCase , add_special_tokens=__lowerCAmelCase ) __magic_name__ :List[str] = tokenizer_r.convert_ids_to_tokens(__lowerCAmelCase ) __magic_name__ :Optional[int] = tokenizer_p.convert_ids_to_tokens(__lowerCAmelCase ) # it is expected that only the first Chinese character is not preceded by "##". __magic_name__ :Dict = [ F'''##{token}''' if idx != 0 else token for idx, token in enumerate(__lowerCAmelCase ) ] self.assertListEqual(__lowerCAmelCase , __lowerCAmelCase ) self.assertListEqual(__lowerCAmelCase , __lowerCAmelCase )
0
0
"""simple docstring""" def __lowerCAmelCase ( __UpperCamelCase : int ): '''simple docstring''' if divisor % 5 == 0 or divisor % 2 == 0: return 0 snake_case_ : Any = 1 snake_case_ : Optional[int] = 1 while repunit: snake_case_ : Optional[int] = (1_0 * repunit + 1) % divisor repunit_index += 1 return repunit_index def __lowerCAmelCase ( __UpperCamelCase : int = 1_0_0_0_0_0_0 ): '''simple docstring''' snake_case_ : Tuple = limit - 1 if divisor % 2 == 0: divisor += 1 while least_divisible_repunit(__UpperCamelCase ) <= limit: divisor += 2 return divisor if __name__ == "__main__": print(F'''{solution() = }''')
58
import logging import os import quant_trainer import torch from torch.utils.data import DataLoader from transformers import Trainer, is_torch_tpu_available from transformers.trainer_utils import PredictionOutput SCREAMING_SNAKE_CASE__ : List[str] = logging.getLogger(__name__) if is_torch_tpu_available(check_device=False): import torch_xla.core.xla_model as xm import torch_xla.debug.metrics as met class lowerCamelCase_ ( lowerCamelCase ): def __init__( self , *__lowerCAmelCase , __lowerCAmelCase=None , __lowerCAmelCase=None , __lowerCAmelCase=None , **__lowerCAmelCase ): """simple docstring""" super().__init__(*__lowerCAmelCase , **__lowerCAmelCase ) __magic_name__ :Any = eval_examples __magic_name__ :str = post_process_function __magic_name__ :int = quant_trainer_args __magic_name__ :List[str] = 1_2_8 # default number of calibration samples def A ( self , __lowerCAmelCase=None ): """simple docstring""" if calib_dataset is None and self.calib_dataset is None: raise ValueError('''Trainer: calibration requires an calib_dataset.''' ) __magic_name__ :Optional[Any] = calib_dataset if calib_dataset is not None else self.calib_dataset __magic_name__ :Optional[int] = self._remove_unused_columns(__lowerCAmelCase , description='''Calibration''' ) return DataLoader( __lowerCAmelCase , batch_size=self.args.eval_batch_size , collate_fn=self.data_collator , drop_last=self.args.dataloader_drop_last , num_workers=self.args.dataloader_num_workers , pin_memory=self.args.dataloader_pin_memory , shuffle=__lowerCAmelCase , ) def A ( self , __lowerCAmelCase=None ): """simple docstring""" __magic_name__ :Dict = self.train_dataset if calib_dataset is None else calib_dataset __magic_name__ :Any = self.get_calib_dataloader(__lowerCAmelCase ) __magic_name__ :List[str] = self.model quant_trainer.configure_model(__lowerCAmelCase , self.quant_trainer_args , calib=__lowerCAmelCase ) model.eval() quant_trainer.enable_calibration(__lowerCAmelCase ) logger.info('''***** Running calibration *****''' ) logger.info(F''' Num examples = {self.calib_num}''' ) logger.info(F''' Batch size = {calib_dataloader.batch_size}''' ) for step, inputs in enumerate(__lowerCAmelCase ): # Prediction step __magic_name__ , __magic_name__ , __magic_name__ :str = self.prediction_step(__lowerCAmelCase , __lowerCAmelCase , prediction_loss_only=__lowerCAmelCase ) if (step + 1) * calib_dataloader.batch_size >= self.calib_num: break quant_trainer.finish_calibration(__lowerCAmelCase , self.quant_trainer_args ) __magic_name__ :Any = model def A ( self , __lowerCAmelCase=None , __lowerCAmelCase=None , __lowerCAmelCase=None , __lowerCAmelCase = "eval" ): """simple docstring""" __magic_name__ :Tuple = self.eval_dataset if eval_dataset is None else eval_dataset __magic_name__ :Optional[Any] = self.get_eval_dataloader(__lowerCAmelCase ) __magic_name__ :str = self.eval_examples if eval_examples is None else eval_examples # Temporarily disable metric computation, we will do it in the loop here. __magic_name__ :Any = self.compute_metrics __magic_name__ :List[Any] = None __magic_name__ :List[str] = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop try: __magic_name__ :Optional[Any] = eval_loop( __lowerCAmelCase , description='''Evaluation''' , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=__lowerCAmelCase , ) finally: __magic_name__ :Union[str, Any] = compute_metrics if self.post_process_function is not None and self.compute_metrics is not None: __magic_name__ :Union[str, Any] = self.post_process_function(__lowerCAmelCase , __lowerCAmelCase , output.predictions ) __magic_name__ :int = self.compute_metrics(__lowerCAmelCase ) # Prefix all keys with metric_key_prefix + '_' for key in list(metrics.keys() ): if not key.startswith(F'''{metric_key_prefix}_''' ): __magic_name__ :Dict = metrics.pop(__lowerCAmelCase ) self.log(__lowerCAmelCase ) else: __magic_name__ :List[str] = {} if self.args.tpu_metrics_debug or self.args.debug: # tpu-comment: Logging debug metrics for PyTorch/XLA (compile, execute times, ops, etc.) xm.master_print(met.metrics_report() ) __magic_name__ :Optional[Any] = self.callback_handler.on_evaluate(self.args , self.state , self.control , __lowerCAmelCase ) return metrics def A ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase=None , __lowerCAmelCase = "test" ): """simple docstring""" __magic_name__ :int = self.get_test_dataloader(__lowerCAmelCase ) # Temporarily disable metric computation, we will do it in the loop here. __magic_name__ :Dict = self.compute_metrics __magic_name__ :str = None __magic_name__ :Optional[int] = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop try: __magic_name__ :int = eval_loop( __lowerCAmelCase , description='''Prediction''' , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=__lowerCAmelCase , ) finally: __magic_name__ :List[Any] = compute_metrics if self.post_process_function is None or self.compute_metrics is None: return output __magic_name__ :Optional[Any] = self.post_process_function(__lowerCAmelCase , __lowerCAmelCase , output.predictions , '''predict''' ) __magic_name__ :Dict = self.compute_metrics(__lowerCAmelCase ) # Prefix all keys with metric_key_prefix + '_' for key in list(metrics.keys() ): if not key.startswith(F'''{metric_key_prefix}_''' ): __magic_name__ :List[str] = metrics.pop(__lowerCAmelCase ) return PredictionOutput(predictions=predictions.predictions , label_ids=predictions.label_ids , metrics=__lowerCAmelCase ) def A ( self , __lowerCAmelCase="./" ): """simple docstring""" __magic_name__ :List[Any] = self.eval_dataset __magic_name__ :Any = self.get_eval_dataloader(__lowerCAmelCase ) __magic_name__ :int = next(iter(__lowerCAmelCase ) ) # saving device - to make it consistent __magic_name__ :str = torch.device('''cuda''' if torch.cuda.is_available() else '''cpu''' ) # convert to tuple __magic_name__ :int = tuple(v.to(__lowerCAmelCase ) for k, v in batch.items() ) logger.info('''Converting model to be onnx compatible''' ) from pytorch_quantization.nn import TensorQuantizer __magic_name__ :Any = True __magic_name__ :Optional[int] = self.model.to(__lowerCAmelCase ) model.eval() model.float() __magic_name__ :Any = model.module if hasattr(__lowerCAmelCase , '''module''' ) else model quant_trainer.configure_model(__lowerCAmelCase , self.quant_trainer_args ) __magic_name__ :int = os.path.join(__lowerCAmelCase , '''model.onnx''' ) logger.info(F'''exporting model to {output_model_file}''' ) __magic_name__ :Dict = {0: '''batch_size''', 1: '''seq_len'''} torch.onnx.export( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , export_params=__lowerCAmelCase , opset_version=1_3 , do_constant_folding=__lowerCAmelCase , input_names=['''input_ids''', '''attention_mask''', '''token_type_ids'''] , output_names=['''output_start_logits''', '''output_end_logits'''] , dynamic_axes={ '''input_ids''': axes, '''attention_mask''': axes, '''token_type_ids''': axes, '''output_start_logits''': axes, '''output_end_logits''': axes, } , verbose=__lowerCAmelCase , ) logger.info('''onnx export finished''' )
0
0
import json import os import shutil import tempfile from unittest import TestCase from transformers import BartTokenizer, BartTokenizerFast, DPRQuestionEncoderTokenizer, DPRQuestionEncoderTokenizerFast from transformers.models.bart.configuration_bart import BartConfig from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES as DPR_VOCAB_FILES_NAMES from transformers.models.dpr.configuration_dpr import DPRConfig from transformers.models.roberta.tokenization_roberta import VOCAB_FILES_NAMES as BART_VOCAB_FILES_NAMES from transformers.testing_utils import require_faiss, require_tokenizers, require_torch, slow from transformers.utils import is_datasets_available, is_faiss_available, is_torch_available if is_torch_available() and is_datasets_available() and is_faiss_available(): from transformers.models.rag.configuration_rag import RagConfig from transformers.models.rag.tokenization_rag import RagTokenizer @require_faiss @require_torch class _SCREAMING_SNAKE_CASE ( __SCREAMING_SNAKE_CASE ): '''simple docstring''' def SCREAMING_SNAKE_CASE_ (self : Union[str, Any]) ->Tuple: '''simple docstring''' lowerCamelCase__: str =tempfile.mkdtemp() lowerCamelCase__: Optional[int] =8 # DPR tok lowerCamelCase__: Dict =[ "[UNK]", "[CLS]", "[SEP]", "[PAD]", "[MASK]", "want", "##want", "##ed", "wa", "un", "runn", "##ing", ",", "low", "lowest", ] lowerCamelCase__: Optional[Any] =os.path.join(self.tmpdirname , "dpr_tokenizer") os.makedirs(UpperCAmelCase_ , exist_ok=UpperCAmelCase_) lowerCamelCase__: Any =os.path.join(UpperCAmelCase_ , DPR_VOCAB_FILES_NAMES["vocab_file"]) with open(self.vocab_file , "w" , encoding="utf-8") as vocab_writer: vocab_writer.write("".join([x + "\n" for x in vocab_tokens])) # BART tok lowerCamelCase__: Union[str, Any] =[ "l", "o", "w", "e", "r", "s", "t", "i", "d", "n", "\u0120", "\u0120l", "\u0120n", "\u0120lo", "\u0120low", "er", "\u0120lowest", "\u0120newer", "\u0120wider", "<unk>", ] lowerCamelCase__: Optional[int] =dict(zip(UpperCAmelCase_ , range(len(UpperCAmelCase_)))) lowerCamelCase__: int =["#version: 0.2", "\u0120 l", "\u0120l o", "\u0120lo w", "e r", ""] lowerCamelCase__: List[str] ={"unk_token": "<unk>"} lowerCamelCase__: Dict =os.path.join(self.tmpdirname , "bart_tokenizer") os.makedirs(UpperCAmelCase_ , exist_ok=UpperCAmelCase_) lowerCamelCase__: Any =os.path.join(UpperCAmelCase_ , BART_VOCAB_FILES_NAMES["vocab_file"]) lowerCamelCase__: Tuple =os.path.join(UpperCAmelCase_ , BART_VOCAB_FILES_NAMES["merges_file"]) with open(self.vocab_file , "w" , encoding="utf-8") as fp: fp.write(json.dumps(UpperCAmelCase_) + "\n") with open(self.merges_file , "w" , encoding="utf-8") as fp: fp.write("\n".join(UpperCAmelCase_)) def SCREAMING_SNAKE_CASE_ (self : Tuple) ->DPRQuestionEncoderTokenizer: '''simple docstring''' return DPRQuestionEncoderTokenizer.from_pretrained(os.path.join(self.tmpdirname , "dpr_tokenizer")) def SCREAMING_SNAKE_CASE_ (self : Tuple) ->BartTokenizer: '''simple docstring''' return BartTokenizer.from_pretrained(os.path.join(self.tmpdirname , "bart_tokenizer")) def SCREAMING_SNAKE_CASE_ (self : Dict) ->Union[str, Any]: '''simple docstring''' shutil.rmtree(self.tmpdirname) @require_tokenizers def SCREAMING_SNAKE_CASE_ (self : int) ->List[Any]: '''simple docstring''' lowerCamelCase__: Tuple =os.path.join(self.tmpdirname , "rag_tokenizer") lowerCamelCase__: Optional[Any] =RagConfig(question_encoder=DPRConfig().to_dict() , generator=BartConfig().to_dict()) lowerCamelCase__: Union[str, Any] =RagTokenizer(question_encoder=self.get_dpr_tokenizer() , generator=self.get_bart_tokenizer()) rag_config.save_pretrained(UpperCAmelCase_) rag_tokenizer.save_pretrained(UpperCAmelCase_) lowerCamelCase__: Union[str, Any] =RagTokenizer.from_pretrained(UpperCAmelCase_ , config=UpperCAmelCase_) self.assertIsInstance(new_rag_tokenizer.question_encoder , UpperCAmelCase_) self.assertEqual(new_rag_tokenizer.question_encoder.get_vocab() , rag_tokenizer.question_encoder.get_vocab()) self.assertIsInstance(new_rag_tokenizer.generator , UpperCAmelCase_) self.assertEqual(new_rag_tokenizer.generator.get_vocab() , rag_tokenizer.generator.get_vocab()) @slow def SCREAMING_SNAKE_CASE_ (self : Dict) ->List[str]: '''simple docstring''' lowerCamelCase__: int =RagTokenizer.from_pretrained("facebook/rag-token-nq") lowerCamelCase__: str =[ "who got the first nobel prize in physics", "when is the next deadpool movie being released", "which mode is used for short wave broadcast service", "who is the owner of reading football club", "when is the next scandal episode coming out", "when is the last time the philadelphia won the superbowl", "what is the most current adobe flash player version", "how many episodes are there in dragon ball z", "what is the first step in the evolution of the eye", "where is gall bladder situated in human body", "what is the main mineral in lithium batteries", "who is the president of usa right now", "where do the greasers live in the outsiders", "panda is a national animal of which country", "what is the name of manchester united stadium", ] lowerCamelCase__: Dict =tokenizer(UpperCAmelCase_) self.assertIsNotNone(UpperCAmelCase_) @slow def SCREAMING_SNAKE_CASE_ (self : Union[str, Any]) ->Optional[int]: '''simple docstring''' lowerCamelCase__: List[Any] =RagTokenizer.from_pretrained("facebook/rag-sequence-nq") lowerCamelCase__: Optional[int] =[ "who got the first nobel prize in physics", "when is the next deadpool movie being released", "which mode is used for short wave broadcast service", "who is the owner of reading football club", "when is the next scandal episode coming out", "when is the last time the philadelphia won the superbowl", "what is the most current adobe flash player version", "how many episodes are there in dragon ball z", "what is the first step in the evolution of the eye", "where is gall bladder situated in human body", "what is the main mineral in lithium batteries", "who is the president of usa right now", "where do the greasers live in the outsiders", "panda is a national animal of which country", "what is the name of manchester united stadium", ] lowerCamelCase__: List[Any] =tokenizer(UpperCAmelCase_) self.assertIsNotNone(UpperCAmelCase_)
59
def __lowercase ( snake_case ): """simple docstring""" return "".join([hex(snake_case )[2:].zfill(2 ).upper() for byte in list(snake_case )] ) def __lowercase ( snake_case ): """simple docstring""" if (len(snake_case ) % 2) != 0: raise ValueError( '''Base16 encoded data is invalid: Data does not have an even number of hex digits.''' ) # Check the character set - the standard base16 alphabet # is uppercase according to RFC3548 section 6 if not set(snake_case ) <= set('''0123456789ABCDEF''' ): raise ValueError( '''Base16 encoded data is invalid: Data is not uppercase hex or it contains invalid characters.''' ) # For every two hexadecimal digits (= a byte), turn it into an integer. # Then, string the result together into bytes, and return it. return bytes(int(data[i] + data[i + 1], 1_6 ) for i in range(0, len(snake_case ), 2 ) ) if __name__ == "__main__": import doctest doctest.testmod()
0
0
import json import os from functools import lru_cache from typing import List, Optional, Tuple import regex as re from ...tokenization_utils import AddedToken, PreTrainedTokenizer from ...utils import logging lowerCAmelCase_ = logging.get_logger(__name__) lowerCAmelCase_ = {'''vocab_file''': '''vocab.json''', '''merges_file''': '''merges.txt'''} lowerCAmelCase_ = { '''vocab_file''': { '''allenai/longformer-base-4096''': '''https://huggingface.co/allenai/longformer-base-4096/resolve/main/vocab.json''', '''allenai/longformer-large-4096''': ( '''https://huggingface.co/allenai/longformer-large-4096/resolve/main/vocab.json''' ), '''allenai/longformer-large-4096-finetuned-triviaqa''': ( '''https://huggingface.co/allenai/longformer-large-4096-finetuned-triviaqa/resolve/main/vocab.json''' ), '''allenai/longformer-base-4096-extra.pos.embd.only''': ( '''https://huggingface.co/allenai/longformer-base-4096-extra.pos.embd.only/resolve/main/vocab.json''' ), '''allenai/longformer-large-4096-extra.pos.embd.only''': ( '''https://huggingface.co/allenai/longformer-large-4096-extra.pos.embd.only/resolve/main/vocab.json''' ), }, '''merges_file''': { '''allenai/longformer-base-4096''': '''https://huggingface.co/allenai/longformer-base-4096/resolve/main/merges.txt''', '''allenai/longformer-large-4096''': ( '''https://huggingface.co/allenai/longformer-large-4096/resolve/main/merges.txt''' ), '''allenai/longformer-large-4096-finetuned-triviaqa''': ( '''https://huggingface.co/allenai/longformer-large-4096-finetuned-triviaqa/resolve/main/merges.txt''' ), '''allenai/longformer-base-4096-extra.pos.embd.only''': ( '''https://huggingface.co/allenai/longformer-base-4096-extra.pos.embd.only/resolve/main/merges.txt''' ), '''allenai/longformer-large-4096-extra.pos.embd.only''': ( '''https://huggingface.co/allenai/longformer-large-4096-extra.pos.embd.only/resolve/main/merges.txt''' ), }, } lowerCAmelCase_ = { '''allenai/longformer-base-4096''': 4_0_9_6, '''allenai/longformer-large-4096''': 4_0_9_6, '''allenai/longformer-large-4096-finetuned-triviaqa''': 4_0_9_6, '''allenai/longformer-base-4096-extra.pos.embd.only''': 4_0_9_6, '''allenai/longformer-large-4096-extra.pos.embd.only''': 4_0_9_6, } @lru_cache() # Copied from transformers.models.roberta.tokenization_roberta.bytes_to_unicode def lowerCamelCase_ ( ) -> List[str]: """simple docstring""" snake_case_ : str = ( list(range(ord('''!''' ) , ord('''~''' ) + 1 ) ) + list(range(ord('''¡''' ) , ord('''¬''' ) + 1 ) ) + list(range(ord('''®''' ) , ord('''ÿ''' ) + 1 ) ) ) snake_case_ : Union[str, Any] = bs[:] snake_case_ : Dict = 0 for b in range(2**8 ): if b not in bs: bs.append(_UpperCamelCase ) cs.append(2**8 + n ) n += 1 snake_case_ : Any = [chr(_UpperCamelCase ) for n in cs] return dict(zip(_UpperCamelCase , _UpperCamelCase ) ) def lowerCamelCase_ ( _UpperCamelCase ) -> Dict: """simple docstring""" snake_case_ : List[str] = set() snake_case_ : List[Any] = word[0] for char in word[1:]: pairs.add((prev_char, char) ) snake_case_ : Optional[int] = char return pairs class __lowerCAmelCase ( _a ): lowerCamelCase_ : Tuple = VOCAB_FILES_NAMES lowerCamelCase_ : Optional[int] = PRETRAINED_VOCAB_FILES_MAP lowerCamelCase_ : Any = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES lowerCamelCase_ : List[Any] = ['''input_ids''', '''attention_mask'''] def __init__(self , __magic_name__ , __magic_name__ , __magic_name__="replace" , __magic_name__="<s>" , __magic_name__="</s>" , __magic_name__="</s>" , __magic_name__="<s>" , __magic_name__="<unk>" , __magic_name__="<pad>" , __magic_name__="<mask>" , __magic_name__=False , **__magic_name__ , ) -> Union[str, Any]: '''simple docstring''' snake_case_ : int = AddedToken(__magic_name__ , lstrip=__magic_name__ , rstrip=__magic_name__ ) if isinstance(__magic_name__ , __magic_name__ ) else bos_token snake_case_ : List[Any] = AddedToken(__magic_name__ , lstrip=__magic_name__ , rstrip=__magic_name__ ) if isinstance(__magic_name__ , __magic_name__ ) else eos_token snake_case_ : Dict = AddedToken(__magic_name__ , lstrip=__magic_name__ , rstrip=__magic_name__ ) if isinstance(__magic_name__ , __magic_name__ ) else sep_token snake_case_ : Optional[int] = AddedToken(__magic_name__ , lstrip=__magic_name__ , rstrip=__magic_name__ ) if isinstance(__magic_name__ , __magic_name__ ) else cls_token snake_case_ : Union[str, Any] = AddedToken(__magic_name__ , lstrip=__magic_name__ , rstrip=__magic_name__ ) if isinstance(__magic_name__ , __magic_name__ ) else unk_token snake_case_ : Any = AddedToken(__magic_name__ , lstrip=__magic_name__ , rstrip=__magic_name__ ) if isinstance(__magic_name__ , __magic_name__ ) else pad_token # Mask token behave like a normal word, i.e. include the space before it snake_case_ : str = AddedToken(__magic_name__ , lstrip=__magic_name__ , rstrip=__magic_name__ ) if isinstance(__magic_name__ , __magic_name__ ) else mask_token super().__init__( errors=__magic_name__ , bos_token=__magic_name__ , eos_token=__magic_name__ , unk_token=__magic_name__ , sep_token=__magic_name__ , cls_token=__magic_name__ , pad_token=__magic_name__ , mask_token=__magic_name__ , add_prefix_space=__magic_name__ , **__magic_name__ , ) with open(__magic_name__ , encoding='''utf-8''' ) as vocab_handle: snake_case_ : Optional[Any] = json.load(__magic_name__ ) snake_case_ : Optional[int] = {v: k for k, v in self.encoder.items()} snake_case_ : Optional[Any] = errors # how to handle errors in decoding snake_case_ : Union[str, Any] = bytes_to_unicode() snake_case_ : str = {v: k for k, v in self.byte_encoder.items()} with open(__magic_name__ , encoding='''utf-8''' ) as merges_handle: snake_case_ : Dict = merges_handle.read().split('''\n''' )[1:-1] snake_case_ : Optional[int] = [tuple(merge.split() ) for merge in bpe_merges] snake_case_ : Union[str, Any] = dict(zip(__magic_name__ , range(len(__magic_name__ ) ) ) ) snake_case_ : int = {} snake_case_ : Tuple = add_prefix_space # Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions snake_case_ : Optional[Any] = re.compile(R'''\'s|\'t|\'re|\'ve|\'m|\'ll|\'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+''' ) @property def lowerCamelCase (self ) -> Any: '''simple docstring''' return len(self.encoder ) def lowerCamelCase (self ) -> Dict: '''simple docstring''' return dict(self.encoder , **self.added_tokens_encoder ) def lowerCamelCase (self , __magic_name__ ) -> Union[str, Any]: '''simple docstring''' if token in self.cache: return self.cache[token] snake_case_ : str = tuple(__magic_name__ ) snake_case_ : Dict = get_pairs(__magic_name__ ) if not pairs: return token while True: snake_case_ : Optional[int] = min(__magic_name__ , key=lambda __magic_name__ : self.bpe_ranks.get(__magic_name__ , float('''inf''' ) ) ) if bigram not in self.bpe_ranks: break snake_case_ , snake_case_ : List[Any] = bigram snake_case_ : int = [] snake_case_ : int = 0 while i < len(__magic_name__ ): try: snake_case_ : Union[str, Any] = word.index(__magic_name__ , __magic_name__ ) except ValueError: new_word.extend(word[i:] ) break else: new_word.extend(word[i:j] ) snake_case_ : Any = j if word[i] == first and i < len(__magic_name__ ) - 1 and word[i + 1] == second: new_word.append(first + second ) i += 2 else: new_word.append(word[i] ) i += 1 snake_case_ : Dict = tuple(__magic_name__ ) snake_case_ : List[str] = new_word if len(__magic_name__ ) == 1: break else: snake_case_ : Optional[int] = get_pairs(__magic_name__ ) snake_case_ : List[str] = ''' '''.join(__magic_name__ ) snake_case_ : List[Any] = word return word def lowerCamelCase (self , __magic_name__ ) -> str: '''simple docstring''' snake_case_ : Tuple = [] for token in re.findall(self.pat , __magic_name__ ): snake_case_ : Optional[Any] = ''''''.join( self.byte_encoder[b] for b in token.encode('''utf-8''' ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case) bpe_tokens.extend(bpe_token for bpe_token in self.bpe(__magic_name__ ).split(''' ''' ) ) return bpe_tokens def lowerCamelCase (self , __magic_name__ ) -> Any: '''simple docstring''' return self.encoder.get(__magic_name__ , self.encoder.get(self.unk_token ) ) def lowerCamelCase (self , __magic_name__ ) -> Dict: '''simple docstring''' return self.decoder.get(__magic_name__ ) def lowerCamelCase (self , __magic_name__ ) -> Dict: '''simple docstring''' snake_case_ : Tuple = ''''''.join(__magic_name__ ) snake_case_ : Dict = bytearray([self.byte_decoder[c] for c in text] ).decode('''utf-8''' , errors=self.errors ) return text def lowerCamelCase (self , __magic_name__ , __magic_name__ = None ) -> Tuple[str]: '''simple docstring''' if not os.path.isdir(__magic_name__ ): logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' ) return snake_case_ : Dict = os.path.join( __magic_name__ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] ) snake_case_ : Optional[int] = os.path.join( __magic_name__ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''merges_file'''] ) with open(__magic_name__ , '''w''' , encoding='''utf-8''' ) as f: f.write(json.dumps(self.encoder , indent=2 , sort_keys=__magic_name__ , ensure_ascii=__magic_name__ ) + '''\n''' ) snake_case_ : List[str] = 0 with open(__magic_name__ , '''w''' , encoding='''utf-8''' ) as writer: writer.write('''#version: 0.2\n''' ) for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda __magic_name__ : kv[1] ): if index != token_index: logger.warning( F'''Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.''' ''' Please check that the tokenizer is not corrupted!''' ) snake_case_ : Optional[int] = token_index writer.write(''' '''.join(__magic_name__ ) + '''\n''' ) index += 1 return vocab_file, merge_file def lowerCamelCase (self , __magic_name__ , __magic_name__ = None ) -> List[int]: '''simple docstring''' if token_ids_a is None: return [self.cls_token_id] + token_ids_a + [self.sep_token_id] snake_case_ : Any = [self.cls_token_id] snake_case_ : List[str] = [self.sep_token_id] return cls + token_ids_a + sep + sep + token_ids_a + sep def lowerCamelCase (self , __magic_name__ , __magic_name__ = None , __magic_name__ = False ) -> List[int]: '''simple docstring''' if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=__magic_name__ , token_ids_a=__magic_name__ , already_has_special_tokens=__magic_name__ ) if token_ids_a is None: return [1] + ([0] * len(__magic_name__ )) + [1] return [1] + ([0] * len(__magic_name__ )) + [1, 1] + ([0] * len(__magic_name__ )) + [1] def lowerCamelCase (self , __magic_name__ , __magic_name__ = None ) -> List[int]: '''simple docstring''' snake_case_ : Any = [self.sep_token_id] snake_case_ : Union[str, Any] = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0] def lowerCamelCase (self , __magic_name__ , __magic_name__=False , **__magic_name__ ) -> Dict: '''simple docstring''' snake_case_ : Optional[int] = kwargs.pop('''add_prefix_space''' , self.add_prefix_space ) if (is_split_into_words or add_prefix_space) and (len(__magic_name__ ) > 0 and not text[0].isspace()): snake_case_ : Optional[Any] = ''' ''' + text return (text, kwargs)
60
import pytest import requests from datasets.utils.file_utils import http_head from .utils import OfflineSimulationMode, RequestWouldHangIndefinitelyError, offline @pytest.mark.integration def __lowercase ( ): """simple docstring""" with offline(OfflineSimulationMode.CONNECTION_TIMES_OUT ): with pytest.raises(snake_case ): requests.request('''GET''', '''https://huggingface.co''' ) with pytest.raises(requests.exceptions.ConnectTimeout ): requests.request('''GET''', '''https://huggingface.co''', timeout=1.0 ) @pytest.mark.integration def __lowercase ( ): """simple docstring""" with offline(OfflineSimulationMode.CONNECTION_FAILS ): with pytest.raises(requests.exceptions.ConnectionError ): requests.request('''GET''', '''https://huggingface.co''' ) def __lowercase ( ): """simple docstring""" with offline(OfflineSimulationMode.HF_DATASETS_OFFLINE_SET_TO_1 ): with pytest.raises(snake_case ): http_head('''https://huggingface.co''' )
0
0
from typing import Dict, List, Optional, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import ( center_crop, get_resize_output_image_size, normalize, rescale, resize, to_channel_dimension_format, ) from ...image_utils import ( IMAGENET_STANDARD_MEAN, IMAGENET_STANDARD_STD, ChannelDimension, ImageInput, PILImageResampling, is_valid_image, to_numpy_array, valid_images, ) from ...utils import TensorType, is_vision_available, logging if is_vision_available(): import PIL UpperCamelCase = logging.get_logger(__name__) def _A ( lowerCAmelCase_ : Optional[Any] ): """simple docstring""" if isinstance(lowerCAmelCase_ , (list, tuple) ) and isinstance(videos[0] , (list, tuple) ) and is_valid_image(videos[0][0] ): return videos elif isinstance(lowerCAmelCase_ , (list, tuple) ) and is_valid_image(videos[0] ): return [videos] elif is_valid_image(lowerCAmelCase_ ): return [[videos]] raise ValueError(F'Could not make batched video from {videos}' ) class __lowerCamelCase ( UpperCamelCase__ ): """simple docstring""" snake_case__ = ["pixel_values"] def __init__( self : Any , SCREAMING_SNAKE_CASE__ : bool = True , SCREAMING_SNAKE_CASE__ : Dict[str, int] = None , SCREAMING_SNAKE_CASE__ : PILImageResampling = PILImageResampling.BILINEAR , SCREAMING_SNAKE_CASE__ : bool = True , SCREAMING_SNAKE_CASE__ : Dict[str, int] = None , SCREAMING_SNAKE_CASE__ : bool = True , SCREAMING_SNAKE_CASE__ : Union[int, float] = 1 / 255 , SCREAMING_SNAKE_CASE__ : bool = True , SCREAMING_SNAKE_CASE__ : Optional[Union[float, List[float]]] = None , SCREAMING_SNAKE_CASE__ : Optional[Union[float, List[float]]] = None , **SCREAMING_SNAKE_CASE__ : Optional[Any] , ) -> None: super().__init__(**SCREAMING_SNAKE_CASE__ ) lowerCAmelCase__ = size if size is not None else {"shortest_edge": 224} lowerCAmelCase__ = get_size_dict(SCREAMING_SNAKE_CASE__ , default_to_square=SCREAMING_SNAKE_CASE__ ) lowerCAmelCase__ = crop_size if crop_size is not None else {"height": 224, "width": 224} lowerCAmelCase__ = get_size_dict(SCREAMING_SNAKE_CASE__ , param_name="crop_size" ) lowerCAmelCase__ = do_resize lowerCAmelCase__ = size lowerCAmelCase__ = do_center_crop lowerCAmelCase__ = crop_size lowerCAmelCase__ = resample lowerCAmelCase__ = do_rescale lowerCAmelCase__ = rescale_factor lowerCAmelCase__ = do_normalize lowerCAmelCase__ = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN lowerCAmelCase__ = image_std if image_std is not None else IMAGENET_STANDARD_STD def a ( self : Optional[Any] , SCREAMING_SNAKE_CASE__ : np.ndarray , SCREAMING_SNAKE_CASE__ : Dict[str, int] , SCREAMING_SNAKE_CASE__ : PILImageResampling = PILImageResampling.BILINEAR , SCREAMING_SNAKE_CASE__ : Optional[Union[str, ChannelDimension]] = None , **SCREAMING_SNAKE_CASE__ : int , ) -> np.ndarray: lowerCAmelCase__ = get_size_dict(SCREAMING_SNAKE_CASE__ , default_to_square=SCREAMING_SNAKE_CASE__ ) if "shortest_edge" in size: lowerCAmelCase__ = get_resize_output_image_size(SCREAMING_SNAKE_CASE__ , size["shortest_edge"] , default_to_square=SCREAMING_SNAKE_CASE__ ) elif "height" in size and "width" in size: lowerCAmelCase__ = (size["height"], size["width"]) else: raise ValueError(f'Size must have \'height\' and \'width\' or \'shortest_edge\' as keys. Got {size.keys()}' ) return resize(SCREAMING_SNAKE_CASE__ , size=SCREAMING_SNAKE_CASE__ , resample=SCREAMING_SNAKE_CASE__ , data_format=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ) def a ( self : Optional[int] , SCREAMING_SNAKE_CASE__ : np.ndarray , SCREAMING_SNAKE_CASE__ : Dict[str, int] , SCREAMING_SNAKE_CASE__ : Optional[Union[str, ChannelDimension]] = None , **SCREAMING_SNAKE_CASE__ : Dict , ) -> np.ndarray: lowerCAmelCase__ = get_size_dict(SCREAMING_SNAKE_CASE__ ) if "height" not in size or "width" not in size: raise ValueError(f'Size must have \'height\' and \'width\' as keys. Got {size.keys()}' ) return center_crop(SCREAMING_SNAKE_CASE__ , size=(size["height"], size["width"]) , data_format=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ) def a ( self : Optional[int] , SCREAMING_SNAKE_CASE__ : np.ndarray , SCREAMING_SNAKE_CASE__ : Union[int, float] , SCREAMING_SNAKE_CASE__ : Optional[Union[str, ChannelDimension]] = None , **SCREAMING_SNAKE_CASE__ : Optional[Any] , ) -> Optional[Any]: return rescale(SCREAMING_SNAKE_CASE__ , scale=SCREAMING_SNAKE_CASE__ , data_format=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ) def a ( self : int , SCREAMING_SNAKE_CASE__ : np.ndarray , SCREAMING_SNAKE_CASE__ : Union[float, List[float]] , SCREAMING_SNAKE_CASE__ : Union[float, List[float]] , SCREAMING_SNAKE_CASE__ : Optional[Union[str, ChannelDimension]] = None , **SCREAMING_SNAKE_CASE__ : str , ) -> np.ndarray: return normalize(SCREAMING_SNAKE_CASE__ , mean=SCREAMING_SNAKE_CASE__ , std=SCREAMING_SNAKE_CASE__ , data_format=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ) def a ( self : Optional[Any] , SCREAMING_SNAKE_CASE__ : ImageInput , SCREAMING_SNAKE_CASE__ : bool = None , SCREAMING_SNAKE_CASE__ : Dict[str, int] = None , SCREAMING_SNAKE_CASE__ : PILImageResampling = None , SCREAMING_SNAKE_CASE__ : bool = None , SCREAMING_SNAKE_CASE__ : Dict[str, int] = None , SCREAMING_SNAKE_CASE__ : bool = None , SCREAMING_SNAKE_CASE__ : float = None , SCREAMING_SNAKE_CASE__ : bool = None , SCREAMING_SNAKE_CASE__ : Optional[Union[float, List[float]]] = None , SCREAMING_SNAKE_CASE__ : Optional[Union[float, List[float]]] = None , SCREAMING_SNAKE_CASE__ : Optional[ChannelDimension] = ChannelDimension.FIRST , ) -> np.ndarray: if do_resize and size is None or resample is None: raise ValueError("Size and resample must be specified if do_resize is True." ) if do_center_crop and crop_size is None: raise ValueError("Crop size must be specified if do_center_crop is True." ) if do_rescale and rescale_factor is None: raise ValueError("Rescale factor must be specified if do_rescale is True." ) if do_normalize and (image_mean is None or image_std is None): raise ValueError("Image mean and std must be specified if do_normalize is True." ) # All transformations expect numpy arrays. lowerCAmelCase__ = to_numpy_array(SCREAMING_SNAKE_CASE__ ) if do_resize: lowerCAmelCase__ = self.resize(image=SCREAMING_SNAKE_CASE__ , size=SCREAMING_SNAKE_CASE__ , resample=SCREAMING_SNAKE_CASE__ ) if do_center_crop: lowerCAmelCase__ = self.center_crop(SCREAMING_SNAKE_CASE__ , size=SCREAMING_SNAKE_CASE__ ) if do_rescale: lowerCAmelCase__ = self.rescale(image=SCREAMING_SNAKE_CASE__ , scale=SCREAMING_SNAKE_CASE__ ) if do_normalize: lowerCAmelCase__ = self.normalize(image=SCREAMING_SNAKE_CASE__ , mean=SCREAMING_SNAKE_CASE__ , std=SCREAMING_SNAKE_CASE__ ) lowerCAmelCase__ = to_channel_dimension_format(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) return image def a ( self : Union[str, Any] , SCREAMING_SNAKE_CASE__ : ImageInput , SCREAMING_SNAKE_CASE__ : bool = None , SCREAMING_SNAKE_CASE__ : Dict[str, int] = None , SCREAMING_SNAKE_CASE__ : PILImageResampling = None , SCREAMING_SNAKE_CASE__ : bool = None , SCREAMING_SNAKE_CASE__ : Dict[str, int] = None , SCREAMING_SNAKE_CASE__ : bool = None , SCREAMING_SNAKE_CASE__ : float = None , SCREAMING_SNAKE_CASE__ : bool = None , SCREAMING_SNAKE_CASE__ : Optional[Union[float, List[float]]] = None , SCREAMING_SNAKE_CASE__ : Optional[Union[float, List[float]]] = None , SCREAMING_SNAKE_CASE__ : Optional[Union[str, TensorType]] = None , SCREAMING_SNAKE_CASE__ : ChannelDimension = ChannelDimension.FIRST , **SCREAMING_SNAKE_CASE__ : int , ) -> PIL.Image.Image: lowerCAmelCase__ = do_resize if do_resize is not None else self.do_resize lowerCAmelCase__ = resample if resample is not None else self.resample lowerCAmelCase__ = do_center_crop if do_center_crop is not None else self.do_center_crop lowerCAmelCase__ = do_rescale if do_rescale is not None else self.do_rescale lowerCAmelCase__ = rescale_factor if rescale_factor is not None else self.rescale_factor lowerCAmelCase__ = do_normalize if do_normalize is not None else self.do_normalize lowerCAmelCase__ = image_mean if image_mean is not None else self.image_mean lowerCAmelCase__ = image_std if image_std is not None else self.image_std lowerCAmelCase__ = size if size is not None else self.size lowerCAmelCase__ = get_size_dict(SCREAMING_SNAKE_CASE__ , default_to_square=SCREAMING_SNAKE_CASE__ ) lowerCAmelCase__ = crop_size if crop_size is not None else self.crop_size lowerCAmelCase__ = get_size_dict(SCREAMING_SNAKE_CASE__ , param_name="crop_size" ) if not valid_images(SCREAMING_SNAKE_CASE__ ): raise ValueError( "Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, " "torch.Tensor, tf.Tensor or jax.ndarray." ) lowerCAmelCase__ = make_batched(SCREAMING_SNAKE_CASE__ ) lowerCAmelCase__ = [ [ self._preprocess_image( image=SCREAMING_SNAKE_CASE__ , do_resize=SCREAMING_SNAKE_CASE__ , size=SCREAMING_SNAKE_CASE__ , resample=SCREAMING_SNAKE_CASE__ , do_center_crop=SCREAMING_SNAKE_CASE__ , crop_size=SCREAMING_SNAKE_CASE__ , do_rescale=SCREAMING_SNAKE_CASE__ , rescale_factor=SCREAMING_SNAKE_CASE__ , do_normalize=SCREAMING_SNAKE_CASE__ , image_mean=SCREAMING_SNAKE_CASE__ , image_std=SCREAMING_SNAKE_CASE__ , data_format=SCREAMING_SNAKE_CASE__ , ) for img in video ] for video in videos ] lowerCAmelCase__ = {"pixel_values": videos} return BatchFeature(data=SCREAMING_SNAKE_CASE__ , tensor_type=SCREAMING_SNAKE_CASE__ )
61
import math from collections.abc import Iterator from itertools import takewhile def __lowercase ( snake_case ): """simple docstring""" if 1 < number < 4: # 2 and 3 are primes return True elif number < 2 or number % 2 == 0 or number % 3 == 0: # Negatives, 0, 1, all even numbers, all multiples of 3 are not primes return False # All primes number are in format of 6k +/- 1 for i in range(5, int(math.sqrt(snake_case ) + 1 ), 6 ): if number % i == 0 or number % (i + 2) == 0: return False return True def __lowercase ( ): """simple docstring""" __magic_name__ :str = 2 while True: if is_prime(snake_case ): yield num num += 1 def __lowercase ( snake_case = 2_0_0_0_0_0_0 ): """simple docstring""" return sum(takewhile(lambda snake_case : x < n, prime_generator() ) ) if __name__ == "__main__": print(f"{solution() = }")
0
0
# Copyright 2023 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typing import TYPE_CHECKING from ..models.auto import AutoModelForVisionaSeq from ..utils import requires_backends from .base import PipelineTool if TYPE_CHECKING: from PIL import Image class SCREAMING_SNAKE_CASE ( lowerCAmelCase ): '''simple docstring''' UpperCamelCase_ : Tuple = '''Salesforce/blip-image-captioning-base''' UpperCamelCase_ : List[str] = ( '''This is a tool that generates a description of an image. It takes an input named `image` which should be the ''' '''image to caption, and returns a text that contains the description in English.''' ) UpperCamelCase_ : str = '''image_captioner''' UpperCamelCase_ : Any = AutoModelForVisionaSeq UpperCamelCase_ : List[Any] = ['''image'''] UpperCamelCase_ : Optional[int] = ['''text'''] def __init__( self : List[str] , *UpperCAmelCase_ : Any , **UpperCAmelCase_ : str ): requires_backends(self , ["vision"] ) super().__init__(*UpperCAmelCase_ , **UpperCAmelCase_ ) def _A ( self : List[Any] , UpperCAmelCase_ : "Image" ): return self.pre_processor(images=UpperCAmelCase_ , return_tensors="pt" ) def _A ( self : List[str] , UpperCAmelCase_ : int ): return self.model.generate(**UpperCAmelCase_ ) def _A ( self : Optional[int] , UpperCAmelCase_ : Dict ): return self.pre_processor.batch_decode(UpperCAmelCase_ , skip_special_tokens=UpperCAmelCase_ )[0].strip()
62
import unittest import numpy as np from transformers.testing_utils import require_flax, require_tf, require_torch from transformers.utils import ( expand_dims, flatten_dict, is_flax_available, is_tf_available, is_torch_available, reshape, squeeze, transpose, ) if is_flax_available(): import jax.numpy as jnp if is_tf_available(): import tensorflow as tf if is_torch_available(): import torch class lowerCamelCase_ ( unittest.TestCase ): def A ( self ): """simple docstring""" __magic_name__ :List[Any] = { '''task_specific_params''': { '''summarization''': {'''length_penalty''': 1.0, '''max_length''': 1_2_8, '''min_length''': 1_2, '''num_beams''': 4}, '''summarization_cnn''': {'''length_penalty''': 2.0, '''max_length''': 1_4_2, '''min_length''': 5_6, '''num_beams''': 4}, '''summarization_xsum''': {'''length_penalty''': 1.0, '''max_length''': 6_2, '''min_length''': 1_1, '''num_beams''': 6}, } } __magic_name__ :List[str] = { '''task_specific_params.summarization.length_penalty''': 1.0, '''task_specific_params.summarization.max_length''': 1_2_8, '''task_specific_params.summarization.min_length''': 1_2, '''task_specific_params.summarization.num_beams''': 4, '''task_specific_params.summarization_cnn.length_penalty''': 2.0, '''task_specific_params.summarization_cnn.max_length''': 1_4_2, '''task_specific_params.summarization_cnn.min_length''': 5_6, '''task_specific_params.summarization_cnn.num_beams''': 4, '''task_specific_params.summarization_xsum.length_penalty''': 1.0, '''task_specific_params.summarization_xsum.max_length''': 6_2, '''task_specific_params.summarization_xsum.min_length''': 1_1, '''task_specific_params.summarization_xsum.num_beams''': 6, } self.assertEqual(flatten_dict(__lowerCAmelCase ) , __lowerCAmelCase ) def A ( self ): """simple docstring""" __magic_name__ :Optional[Any] = np.random.randn(3 , 4 ) self.assertTrue(np.allclose(transpose(__lowerCAmelCase ) , x.transpose() ) ) __magic_name__ :List[Any] = np.random.randn(3 , 4 , 5 ) self.assertTrue(np.allclose(transpose(__lowerCAmelCase , axes=(1, 2, 0) ) , x.transpose((1, 2, 0) ) ) ) @require_torch def A ( self ): """simple docstring""" __magic_name__ :Optional[Any] = np.random.randn(3 , 4 ) __magic_name__ :Tuple = torch.tensor(__lowerCAmelCase ) self.assertTrue(np.allclose(transpose(__lowerCAmelCase ) , transpose(__lowerCAmelCase ).numpy() ) ) __magic_name__ :int = np.random.randn(3 , 4 , 5 ) __magic_name__ :Union[str, Any] = torch.tensor(__lowerCAmelCase ) self.assertTrue(np.allclose(transpose(__lowerCAmelCase , axes=(1, 2, 0) ) , transpose(__lowerCAmelCase , axes=(1, 2, 0) ).numpy() ) ) @require_tf def A ( self ): """simple docstring""" __magic_name__ :int = np.random.randn(3 , 4 ) __magic_name__ :Optional[Any] = tf.constant(__lowerCAmelCase ) self.assertTrue(np.allclose(transpose(__lowerCAmelCase ) , transpose(__lowerCAmelCase ).numpy() ) ) __magic_name__ :List[str] = np.random.randn(3 , 4 , 5 ) __magic_name__ :Optional[Any] = tf.constant(__lowerCAmelCase ) self.assertTrue(np.allclose(transpose(__lowerCAmelCase , axes=(1, 2, 0) ) , transpose(__lowerCAmelCase , axes=(1, 2, 0) ).numpy() ) ) @require_flax def A ( self ): """simple docstring""" __magic_name__ :int = np.random.randn(3 , 4 ) __magic_name__ :Dict = jnp.array(__lowerCAmelCase ) self.assertTrue(np.allclose(transpose(__lowerCAmelCase ) , np.asarray(transpose(__lowerCAmelCase ) ) ) ) __magic_name__ :Dict = np.random.randn(3 , 4 , 5 ) __magic_name__ :Dict = jnp.array(__lowerCAmelCase ) self.assertTrue(np.allclose(transpose(__lowerCAmelCase , axes=(1, 2, 0) ) , np.asarray(transpose(__lowerCAmelCase , axes=(1, 2, 0) ) ) ) ) def A ( self ): """simple docstring""" __magic_name__ :Any = np.random.randn(3 , 4 ) self.assertTrue(np.allclose(reshape(__lowerCAmelCase , (4, 3) ) , np.reshape(__lowerCAmelCase , (4, 3) ) ) ) __magic_name__ :Union[str, Any] = np.random.randn(3 , 4 , 5 ) self.assertTrue(np.allclose(reshape(__lowerCAmelCase , (1_2, 5) ) , np.reshape(__lowerCAmelCase , (1_2, 5) ) ) ) @require_torch def A ( self ): """simple docstring""" __magic_name__ :Dict = np.random.randn(3 , 4 ) __magic_name__ :Tuple = torch.tensor(__lowerCAmelCase ) self.assertTrue(np.allclose(reshape(__lowerCAmelCase , (4, 3) ) , reshape(__lowerCAmelCase , (4, 3) ).numpy() ) ) __magic_name__ :Union[str, Any] = np.random.randn(3 , 4 , 5 ) __magic_name__ :List[str] = torch.tensor(__lowerCAmelCase ) self.assertTrue(np.allclose(reshape(__lowerCAmelCase , (1_2, 5) ) , reshape(__lowerCAmelCase , (1_2, 5) ).numpy() ) ) @require_tf def A ( self ): """simple docstring""" __magic_name__ :Dict = np.random.randn(3 , 4 ) __magic_name__ :Union[str, Any] = tf.constant(__lowerCAmelCase ) self.assertTrue(np.allclose(reshape(__lowerCAmelCase , (4, 3) ) , reshape(__lowerCAmelCase , (4, 3) ).numpy() ) ) __magic_name__ :List[Any] = np.random.randn(3 , 4 , 5 ) __magic_name__ :Optional[int] = tf.constant(__lowerCAmelCase ) self.assertTrue(np.allclose(reshape(__lowerCAmelCase , (1_2, 5) ) , reshape(__lowerCAmelCase , (1_2, 5) ).numpy() ) ) @require_flax def A ( self ): """simple docstring""" __magic_name__ :List[str] = np.random.randn(3 , 4 ) __magic_name__ :Any = jnp.array(__lowerCAmelCase ) self.assertTrue(np.allclose(reshape(__lowerCAmelCase , (4, 3) ) , np.asarray(reshape(__lowerCAmelCase , (4, 3) ) ) ) ) __magic_name__ :List[Any] = np.random.randn(3 , 4 , 5 ) __magic_name__ :List[str] = jnp.array(__lowerCAmelCase ) self.assertTrue(np.allclose(reshape(__lowerCAmelCase , (1_2, 5) ) , np.asarray(reshape(__lowerCAmelCase , (1_2, 5) ) ) ) ) def A ( self ): """simple docstring""" __magic_name__ :List[Any] = np.random.randn(1 , 3 , 4 ) self.assertTrue(np.allclose(squeeze(__lowerCAmelCase ) , np.squeeze(__lowerCAmelCase ) ) ) __magic_name__ :Optional[Any] = np.random.randn(1 , 4 , 1 , 5 ) self.assertTrue(np.allclose(squeeze(__lowerCAmelCase , axis=2 ) , np.squeeze(__lowerCAmelCase , axis=2 ) ) ) @require_torch def A ( self ): """simple docstring""" __magic_name__ :Dict = np.random.randn(1 , 3 , 4 ) __magic_name__ :List[Any] = torch.tensor(__lowerCAmelCase ) self.assertTrue(np.allclose(squeeze(__lowerCAmelCase ) , squeeze(__lowerCAmelCase ).numpy() ) ) __magic_name__ :List[str] = np.random.randn(1 , 4 , 1 , 5 ) __magic_name__ :str = torch.tensor(__lowerCAmelCase ) self.assertTrue(np.allclose(squeeze(__lowerCAmelCase , axis=2 ) , squeeze(__lowerCAmelCase , axis=2 ).numpy() ) ) @require_tf def A ( self ): """simple docstring""" __magic_name__ :int = np.random.randn(1 , 3 , 4 ) __magic_name__ :Tuple = tf.constant(__lowerCAmelCase ) self.assertTrue(np.allclose(squeeze(__lowerCAmelCase ) , squeeze(__lowerCAmelCase ).numpy() ) ) __magic_name__ :Tuple = np.random.randn(1 , 4 , 1 , 5 ) __magic_name__ :Optional[int] = tf.constant(__lowerCAmelCase ) self.assertTrue(np.allclose(squeeze(__lowerCAmelCase , axis=2 ) , squeeze(__lowerCAmelCase , axis=2 ).numpy() ) ) @require_flax def A ( self ): """simple docstring""" __magic_name__ :Tuple = np.random.randn(1 , 3 , 4 ) __magic_name__ :Optional[Any] = jnp.array(__lowerCAmelCase ) self.assertTrue(np.allclose(squeeze(__lowerCAmelCase ) , np.asarray(squeeze(__lowerCAmelCase ) ) ) ) __magic_name__ :List[Any] = np.random.randn(1 , 4 , 1 , 5 ) __magic_name__ :Optional[Any] = jnp.array(__lowerCAmelCase ) self.assertTrue(np.allclose(squeeze(__lowerCAmelCase , axis=2 ) , np.asarray(squeeze(__lowerCAmelCase , axis=2 ) ) ) ) def A ( self ): """simple docstring""" __magic_name__ :Any = np.random.randn(3 , 4 ) self.assertTrue(np.allclose(expand_dims(__lowerCAmelCase , axis=1 ) , np.expand_dims(__lowerCAmelCase , axis=1 ) ) ) @require_torch def A ( self ): """simple docstring""" __magic_name__ :List[Any] = np.random.randn(3 , 4 ) __magic_name__ :Any = torch.tensor(__lowerCAmelCase ) self.assertTrue(np.allclose(expand_dims(__lowerCAmelCase , axis=1 ) , expand_dims(__lowerCAmelCase , axis=1 ).numpy() ) ) @require_tf def A ( self ): """simple docstring""" __magic_name__ :Union[str, Any] = np.random.randn(3 , 4 ) __magic_name__ :Union[str, Any] = tf.constant(__lowerCAmelCase ) self.assertTrue(np.allclose(expand_dims(__lowerCAmelCase , axis=1 ) , expand_dims(__lowerCAmelCase , axis=1 ).numpy() ) ) @require_flax def A ( self ): """simple docstring""" __magic_name__ :List[str] = np.random.randn(3 , 4 ) __magic_name__ :Tuple = jnp.array(__lowerCAmelCase ) self.assertTrue(np.allclose(expand_dims(__lowerCAmelCase , axis=1 ) , np.asarray(expand_dims(__lowerCAmelCase , axis=1 ) ) ) )
0
0
from dataclasses import dataclass, field from typing import Tuple from ..utils import cached_property, is_tf_available, logging, requires_backends from .benchmark_args_utils import BenchmarkArguments if is_tf_available(): import tensorflow as tf a : Tuple = logging.get_logger(__name__) @dataclass class a ( lowercase__ ): """simple docstring""" a : List[str] = [ 'no_inference', 'no_cuda', 'no_tpu', 'no_speed', 'no_memory', 'no_env_print', 'no_multi_process', ] def __init__( self : Union[str, Any] , **__lowercase : List[Any] ) -> Dict: for deprecated_arg in self.deprecated_args: if deprecated_arg in kwargs: __UpperCAmelCase : Any = deprecated_arg[3:] __UpperCAmelCase : List[Any] = not kwargs.pop(__lowercase ) logger.warning( f"""{deprecated_arg} is depreciated. Please use --no-{positive_arg} or""" f""" {positive_arg}={kwargs[positive_arg]}""" ) __UpperCAmelCase : Tuple = kwargs.pop("""tpu_name""" , self.tpu_name ) __UpperCAmelCase : str = kwargs.pop("""device_idx""" , self.device_idx ) __UpperCAmelCase : Tuple = kwargs.pop("""eager_mode""" , self.eager_mode ) __UpperCAmelCase : Optional[Any] = kwargs.pop("""use_xla""" , self.use_xla ) super().__init__(**__lowercase ) a : str = field( default=lowercase__ , metadata={'help': 'Name of TPU'} , ) a : int = field( default=0 , metadata={'help': 'CPU / GPU device index. Defaults to 0.'} , ) a : bool = field(default=lowercase__ , metadata={'help': 'Benchmark models in eager model.'} ) a : bool = field( default=lowercase__ , metadata={ 'help': 'Benchmark models using XLA JIT compilation. Note that `eager_model` has to be set to `False`.' } , ) @cached_property def UpperCAmelCase ( self : int ) -> Tuple["tf.distribute.cluster_resolver.TPUClusterResolver"]: requires_backends(self , ["""tf"""] ) __UpperCAmelCase : List[str] = None if self.tpu: try: if self.tpu_name: __UpperCAmelCase : int = tf.distribute.cluster_resolver.TPUClusterResolver(self.tpu_name ) else: __UpperCAmelCase : Optional[int] = tf.distribute.cluster_resolver.TPUClusterResolver() except ValueError: __UpperCAmelCase : Any = None return tpu @cached_property def UpperCAmelCase ( self : List[str] ) -> Tuple["tf.distribute.Strategy", "tf.distribute.cluster_resolver.TPUClusterResolver"]: requires_backends(self , ["""tf"""] ) if self.is_tpu: tf.config.experimental_connect_to_cluster(self._setup_tpu ) tf.tpu.experimental.initialize_tpu_system(self._setup_tpu ) __UpperCAmelCase : Tuple = tf.distribute.TPUStrategy(self._setup_tpu ) else: # currently no multi gpu is allowed if self.is_gpu: # TODO: Currently only single GPU is supported tf.config.set_visible_devices(self.gpu_list[self.device_idx] , """GPU""" ) __UpperCAmelCase : Tuple = tf.distribute.OneDeviceStrategy(device=f"""/gpu:{self.device_idx}""" ) else: tf.config.set_visible_devices([] , """GPU""" ) # disable GPU __UpperCAmelCase : int = tf.distribute.OneDeviceStrategy(device=f"""/cpu:{self.device_idx}""" ) return strategy @property def UpperCAmelCase ( self : Optional[Any] ) -> bool: requires_backends(self , ["""tf"""] ) return self._setup_tpu is not None @property def UpperCAmelCase ( self : Dict ) -> "tf.distribute.Strategy": requires_backends(self , ["""tf"""] ) return self._setup_strategy @property def UpperCAmelCase ( self : Any ) -> int: requires_backends(self , ["""tf"""] ) return tf.config.list_physical_devices("""GPU""" ) @property def UpperCAmelCase ( self : List[Any] ) -> int: requires_backends(self , ["""tf"""] ) if self.cuda: return len(self.gpu_list ) return 0 @property def UpperCAmelCase ( self : str ) -> bool: return self.n_gpu > 0
63
from pathlib import PurePosixPath from typing import Optional import fsspec from fsspec import AbstractFileSystem from huggingface_hub.hf_api import DatasetInfo from ..utils.file_utils import get_authentication_headers_for_url from ..utils.hub import hf_hub_url class lowerCamelCase_ ( lowerCamelCase ): a__ = '''''' a__ = '''hf-legacy''' # "hf://"" is reserved for hffs def __init__( self , __lowerCAmelCase = None , __lowerCAmelCase = None , **__lowerCAmelCase , ): """simple docstring""" super().__init__(self , **__lowerCAmelCase ) __magic_name__ :List[Any] = repo_info __magic_name__ :Dict = token __magic_name__ :Optional[Any] = None def A ( self ): """simple docstring""" if self.dir_cache is None: __magic_name__ :Any = {} for hf_file in self.repo_info.siblings: # TODO(QL): add sizes __magic_name__ :Optional[int] = { '''name''': hf_file.rfilename, '''size''': None, '''type''': '''file''', } self.dir_cache.update( { str(__lowerCAmelCase ): {'''name''': str(__lowerCAmelCase ), '''size''': None, '''type''': '''directory'''} for d in list(PurePosixPath(hf_file.rfilename ).parents )[:-1] } ) def A ( self , __lowerCAmelCase , __lowerCAmelCase = "rb" , **__lowerCAmelCase , ): """simple docstring""" if not isinstance(self.repo_info , __lowerCAmelCase ): raise NotImplementedError(F'''Open is only implemented for dataset repositories, but got {self.repo_info}''' ) __magic_name__ :Union[str, Any] = hf_hub_url(self.repo_info.id , __lowerCAmelCase , revision=self.repo_info.sha ) return fsspec.open( __lowerCAmelCase , mode=__lowerCAmelCase , headers=get_authentication_headers_for_url(__lowerCAmelCase , use_auth_token=self.token ) , client_kwargs={'''trust_env''': True} , ).open() def A ( self , __lowerCAmelCase , **__lowerCAmelCase ): """simple docstring""" self._get_dirs() __magic_name__ :str = self._strip_protocol(__lowerCAmelCase ) if path in self.dir_cache: return self.dir_cache[path] else: raise FileNotFoundError(__lowerCAmelCase ) def A ( self , __lowerCAmelCase , __lowerCAmelCase=False , **__lowerCAmelCase ): """simple docstring""" self._get_dirs() __magic_name__ :Union[str, Any] = PurePosixPath(path.strip('''/''' ) ) __magic_name__ :Dict = {} for p, f in self.dir_cache.items(): __magic_name__ :int = PurePosixPath(p.strip('''/''' ) ) __magic_name__ :Tuple = p.parent if root == path: __magic_name__ :Optional[Any] = f __magic_name__ :List[Any] = list(paths.values() ) if detail: return out else: return sorted(f['''name'''] for f in out )
0
0
import warnings from ...configuration_utils import PretrainedConfig from ...utils import logging lowercase_ : Any = logging.get_logger(__name__) lowercase_ : Optional[int] = { 'RUCAIBox/mvp': 'https://huggingface.co/RUCAIBox/mvp/resolve/main/config.json', } class _lowerCamelCase ( UpperCamelCase_ ): __a = "mvp" __a = ["past_key_values"] __a = {"num_attention_heads": "encoder_attention_heads", "hidden_size": "d_model"} def __init__( self , lowerCAmelCase=50267 , lowerCAmelCase=1024 , lowerCAmelCase=12 , lowerCAmelCase=4096 , lowerCAmelCase=16 , lowerCAmelCase=12 , lowerCAmelCase=4096 , lowerCAmelCase=16 , lowerCAmelCase=0.0 , lowerCAmelCase=0.0 , lowerCAmelCase="gelu" , lowerCAmelCase=1024 , lowerCAmelCase=0.1 , lowerCAmelCase=0.0 , lowerCAmelCase=0.0 , lowerCAmelCase=0.02 , lowerCAmelCase=0.0 , lowerCAmelCase=False , lowerCAmelCase=True , lowerCAmelCase=1 , lowerCAmelCase=0 , lowerCAmelCase=2 , lowerCAmelCase=True , lowerCAmelCase=2 , lowerCAmelCase=2 , lowerCAmelCase=False , lowerCAmelCase=100 , lowerCAmelCase=800 , **lowerCAmelCase , ) -> Dict: SCREAMING_SNAKE_CASE__: List[Any]= vocab_size SCREAMING_SNAKE_CASE__: Optional[int]= max_position_embeddings SCREAMING_SNAKE_CASE__: int= d_model SCREAMING_SNAKE_CASE__: Optional[Any]= encoder_ffn_dim SCREAMING_SNAKE_CASE__: Dict= encoder_layers SCREAMING_SNAKE_CASE__: Optional[Any]= encoder_attention_heads SCREAMING_SNAKE_CASE__: List[str]= decoder_ffn_dim SCREAMING_SNAKE_CASE__: Union[str, Any]= decoder_layers SCREAMING_SNAKE_CASE__: Dict= decoder_attention_heads SCREAMING_SNAKE_CASE__: Any= dropout SCREAMING_SNAKE_CASE__: str= attention_dropout SCREAMING_SNAKE_CASE__: List[Any]= activation_dropout SCREAMING_SNAKE_CASE__: Optional[int]= activation_function SCREAMING_SNAKE_CASE__: Optional[int]= init_std SCREAMING_SNAKE_CASE__: List[str]= encoder_layerdrop SCREAMING_SNAKE_CASE__: Union[str, Any]= decoder_layerdrop SCREAMING_SNAKE_CASE__: List[Any]= classifier_dropout SCREAMING_SNAKE_CASE__: List[Any]= use_cache SCREAMING_SNAKE_CASE__: Dict= encoder_layers SCREAMING_SNAKE_CASE__: List[Any]= scale_embedding # scale factor will be sqrt(d_model) if True SCREAMING_SNAKE_CASE__: int= use_prompt SCREAMING_SNAKE_CASE__: List[Any]= prompt_length SCREAMING_SNAKE_CASE__: str= prompt_mid_dim super().__init__( pad_token_id=lowerCAmelCase , bos_token_id=lowerCAmelCase , eos_token_id=lowerCAmelCase , is_encoder_decoder=lowerCAmelCase , decoder_start_token_id=lowerCAmelCase , forced_eos_token_id=lowerCAmelCase , **lowerCAmelCase , ) if self.forced_bos_token_id is None and kwargs.get('''force_bos_token_to_be_generated''' , lowerCAmelCase ): SCREAMING_SNAKE_CASE__: Union[str, Any]= self.bos_token_id warnings.warn( f'Please make sure the config includes `forced_bos_token_id={self.bos_token_id}` in future versions. ' '''The config can simply be saved and uploaded again to be fixed.''' )
64
import pyarrow.parquet as pq import pytest from datasets import Audio, Dataset, DatasetDict, Features, NamedSplit, Sequence, Value, config from datasets.features.image import Image from datasets.io.parquet import ParquetDatasetReader, ParquetDatasetWriter, get_writer_batch_size from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases def __lowercase ( snake_case, snake_case ): """simple docstring""" assert isinstance(snake_case, snake_case ) assert dataset.num_rows == 4 assert dataset.num_columns == 3 assert dataset.column_names == ["col_1", "col_2", "col_3"] for feature, expected_dtype in expected_features.items(): assert dataset.features[feature].dtype == expected_dtype @pytest.mark.parametrize('''keep_in_memory''', [False, True] ) def __lowercase ( snake_case, snake_case, snake_case ): """simple docstring""" __magic_name__ :Tuple = tmp_path / '''cache''' __magic_name__ :int = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''} with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase(): __magic_name__ :Optional[Any] = ParquetDatasetReader(snake_case, cache_dir=snake_case, keep_in_memory=snake_case ).read() _check_parquet_dataset(snake_case, snake_case ) @pytest.mark.parametrize( '''features''', [ None, {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}, {'''col_1''': '''string''', '''col_2''': '''string''', '''col_3''': '''string'''}, {'''col_1''': '''int32''', '''col_2''': '''int32''', '''col_3''': '''int32'''}, {'''col_1''': '''float32''', '''col_2''': '''float32''', '''col_3''': '''float32'''}, ], ) def __lowercase ( snake_case, snake_case, snake_case ): """simple docstring""" __magic_name__ :List[str] = tmp_path / '''cache''' __magic_name__ :int = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''} __magic_name__ :Tuple = features.copy() if features else default_expected_features __magic_name__ :Union[str, Any] = ( Features({feature: Value(snake_case ) for feature, dtype in features.items()} ) if features is not None else None ) __magic_name__ :int = ParquetDatasetReader(snake_case, features=snake_case, cache_dir=snake_case ).read() _check_parquet_dataset(snake_case, snake_case ) @pytest.mark.parametrize('''split''', [None, NamedSplit('''train''' ), '''train''', '''test'''] ) def __lowercase ( snake_case, snake_case, snake_case ): """simple docstring""" __magic_name__ :str = tmp_path / '''cache''' __magic_name__ :List[Any] = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''} __magic_name__ :int = ParquetDatasetReader(snake_case, cache_dir=snake_case, split=snake_case ).read() _check_parquet_dataset(snake_case, snake_case ) assert dataset.split == split if split else "train" @pytest.mark.parametrize('''path_type''', [str, list] ) def __lowercase ( snake_case, snake_case, snake_case ): """simple docstring""" if issubclass(snake_case, snake_case ): __magic_name__ :Union[str, Any] = parquet_path elif issubclass(snake_case, snake_case ): __magic_name__ :Union[str, Any] = [parquet_path] __magic_name__ :Optional[int] = tmp_path / '''cache''' __magic_name__ :Optional[int] = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''} __magic_name__ :str = ParquetDatasetReader(snake_case, cache_dir=snake_case ).read() _check_parquet_dataset(snake_case, snake_case ) def __lowercase ( snake_case, snake_case, snake_case=("train",) ): """simple docstring""" assert isinstance(snake_case, snake_case ) for split in splits: __magic_name__ :Optional[Any] = dataset_dict[split] assert dataset.num_rows == 4 assert dataset.num_columns == 3 assert dataset.column_names == ["col_1", "col_2", "col_3"] for feature, expected_dtype in expected_features.items(): assert dataset.features[feature].dtype == expected_dtype @pytest.mark.parametrize('''keep_in_memory''', [False, True] ) def __lowercase ( snake_case, snake_case, snake_case ): """simple docstring""" __magic_name__ :Any = tmp_path / '''cache''' __magic_name__ :Optional[int] = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''} with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase(): __magic_name__ :Tuple = ParquetDatasetReader( {'''train''': parquet_path}, cache_dir=snake_case, keep_in_memory=snake_case ).read() _check_parquet_datasetdict(snake_case, snake_case ) @pytest.mark.parametrize( '''features''', [ None, {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}, {'''col_1''': '''string''', '''col_2''': '''string''', '''col_3''': '''string'''}, {'''col_1''': '''int32''', '''col_2''': '''int32''', '''col_3''': '''int32'''}, {'''col_1''': '''float32''', '''col_2''': '''float32''', '''col_3''': '''float32'''}, ], ) def __lowercase ( snake_case, snake_case, snake_case ): """simple docstring""" __magic_name__ :Optional[Any] = tmp_path / '''cache''' __magic_name__ :Dict = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''} __magic_name__ :int = features.copy() if features else default_expected_features __magic_name__ :List[Any] = ( Features({feature: Value(snake_case ) for feature, dtype in features.items()} ) if features is not None else None ) __magic_name__ :Optional[int] = ParquetDatasetReader({'''train''': parquet_path}, features=snake_case, cache_dir=snake_case ).read() _check_parquet_datasetdict(snake_case, snake_case ) @pytest.mark.parametrize('''split''', [None, NamedSplit('''train''' ), '''train''', '''test'''] ) def __lowercase ( snake_case, snake_case, snake_case ): """simple docstring""" if split: __magic_name__ :Dict = {split: parquet_path} else: __magic_name__ :Optional[int] = '''train''' __magic_name__ :Dict = {'''train''': parquet_path, '''test''': parquet_path} __magic_name__ :List[Any] = tmp_path / '''cache''' __magic_name__ :Optional[Any] = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''} __magic_name__ :Optional[Any] = ParquetDatasetReader(snake_case, cache_dir=snake_case ).read() _check_parquet_datasetdict(snake_case, snake_case, splits=list(path.keys() ) ) assert all(dataset[split].split == split for split in path.keys() ) def __lowercase ( snake_case, snake_case ): """simple docstring""" __magic_name__ :str = ParquetDatasetWriter(snake_case, tmp_path / '''foo.parquet''' ) assert writer.write() > 0 __magic_name__ :List[Any] = pq.ParquetFile(tmp_path / '''foo.parquet''' ) __magic_name__ :List[Any] = pf.read() assert dataset.data.table == output_table def __lowercase ( snake_case, snake_case ): """simple docstring""" __magic_name__ :List[str] = str(shared_datadir / '''test_image_rgb.jpg''' ) __magic_name__ :Tuple = {'''image''': [image_path]} __magic_name__ :List[Any] = Features({'''image''': Image()} ) __magic_name__ :Tuple = Dataset.from_dict(snake_case, features=snake_case ) __magic_name__ :Union[str, Any] = ParquetDatasetWriter(snake_case, tmp_path / '''foo.parquet''' ) assert writer.write() > 0 __magic_name__ :List[str] = Dataset.from_parquet(str(tmp_path / '''foo.parquet''' ) ) assert dataset.features == reloaded_dataset.features __magic_name__ :List[str] = ParquetDatasetReader(str(tmp_path / '''foo.parquet''' ), streaming=snake_case ).read() assert dataset.features == reloaded_iterable_dataset.features @pytest.mark.parametrize( '''feature, expected''', [ (Features({'''foo''': Value('''int32''' )} ), None), (Features({'''image''': Image(), '''foo''': Value('''int32''' )} ), config.PARQUET_ROW_GROUP_SIZE_FOR_IMAGE_DATASETS), (Features({'''nested''': Sequence(Audio() )} ), config.PARQUET_ROW_GROUP_SIZE_FOR_AUDIO_DATASETS), ], ) def __lowercase ( snake_case, snake_case ): """simple docstring""" assert get_writer_batch_size(snake_case ) == expected
0
0
"""simple docstring""" def lowerCAmelCase ( __UpperCamelCase ): '''simple docstring''' if not all(x.isalpha() for x in string ): raise ValueError("""String must only contain alphabetic characters.""" ) UpperCAmelCase__ : List[Any] = sorted(string.lower() ) return len(__UpperCamelCase ) == len(set(__UpperCamelCase ) ) if __name__ == "__main__": __UpperCAmelCase = input('Enter a string ').strip() __UpperCAmelCase = is_isogram(input_str) print(F"{input_str} is {'an' if isogram else 'not an'} isogram.")
65
def __lowercase ( snake_case ): """simple docstring""" if not isinstance(snake_case, snake_case ): raise ValueError('''multiplicative_persistence() only accepts integral values''' ) if num < 0: raise ValueError('''multiplicative_persistence() does not accept negative values''' ) __magic_name__ :str = 0 __magic_name__ :Dict = str(snake_case ) while len(snake_case ) != 1: __magic_name__ :Optional[Any] = [int(snake_case ) for i in num_string] __magic_name__ :Dict = 1 for i in range(0, len(snake_case ) ): total *= numbers[i] __magic_name__ :int = str(snake_case ) steps += 1 return steps def __lowercase ( snake_case ): """simple docstring""" if not isinstance(snake_case, snake_case ): raise ValueError('''additive_persistence() only accepts integral values''' ) if num < 0: raise ValueError('''additive_persistence() does not accept negative values''' ) __magic_name__ :str = 0 __magic_name__ :Union[str, Any] = str(snake_case ) while len(snake_case ) != 1: __magic_name__ :str = [int(snake_case ) for i in num_string] __magic_name__ :Optional[int] = 0 for i in range(0, len(snake_case ) ): total += numbers[i] __magic_name__ :int = str(snake_case ) steps += 1 return steps if __name__ == "__main__": import doctest doctest.testmod()
0
0
from math import isqrt def __magic_name__ ( SCREAMING_SNAKE_CASE ) -> bool: return all(number % divisor != 0 for divisor in range(2 , isqrt(SCREAMING_SNAKE_CASE ) + 1 ) ) def __magic_name__ ( SCREAMING_SNAKE_CASE = 10**6 ) -> int: _lowercase : List[Any] = 0 _lowercase : Dict = 1 _lowercase : Any = 7 while prime_candidate < max_prime: primes_count += is_prime(SCREAMING_SNAKE_CASE ) cube_index += 1 prime_candidate += 6 * cube_index return primes_count if __name__ == "__main__": print(f'''{solution() = }''')
66
import math import os import re import sys import unittest from pathlib import Path from typing import Tuple from unittest.mock import patch from parameterized import parameterized from transformers.testing_utils import ( CaptureStderr, ExtendSysPath, TestCasePlus, execute_subprocess_async, get_gpu_count, get_torch_dist_unique_port, require_apex, require_bitsandbytes, require_fairscale, require_torch, require_torch_gpu, require_torch_multi_gpu, require_torch_non_multi_gpu, slow, ) from transformers.trainer_callback import TrainerState from transformers.trainer_utils import set_seed SCREAMING_SNAKE_CASE__ : List[Any] = os.path.abspath(os.path.dirname(__file__)) with ExtendSysPath(f"{bindir}/../../examples/pytorch/translation"): from run_translation import main # noqa set_seed(42) SCREAMING_SNAKE_CASE__ : Optional[Any] = """sshleifer/student_marian_en_ro_6_1""" SCREAMING_SNAKE_CASE__ : List[Any] = """sshleifer/tiny-mbart""" @require_torch class lowerCamelCase_ ( lowerCamelCase ): def A ( self , __lowerCAmelCase=False , __lowerCAmelCase=None , __lowerCAmelCase=True , __lowerCAmelCase=True , __lowerCAmelCase=True , __lowerCAmelCase=True , ): """simple docstring""" __magic_name__ :List[Any] = self.run_trainer( eval_steps=1 , max_len=1_2 , model_name=__lowerCAmelCase , num_train_epochs=1 , distributed=__lowerCAmelCase , extra_args_str=__lowerCAmelCase , predict_with_generate=__lowerCAmelCase , do_train=__lowerCAmelCase , do_eval=__lowerCAmelCase , do_predict=__lowerCAmelCase , ) __magic_name__ :Any = TrainerState.load_from_json(os.path.join(__lowerCAmelCase , '''trainer_state.json''' ) ).log_history if not do_eval: return __magic_name__ :Any = [log for log in logs if '''eval_loss''' in log.keys()] __magic_name__ :str = eval_metrics[0] if predict_with_generate: assert "eval_bleu" in first_step_stats __magic_name__ :Tuple = eval_metrics[-1] assert isinstance(last_step_stats['''eval_bleu'''] , __lowerCAmelCase ) assert not math.isnan(float(last_step_stats['''eval_loss'''] ) ), "eval_loss must not be `nan`" @require_torch_non_multi_gpu def A ( self ): """simple docstring""" self.run_seqaseq_quick() @require_torch_multi_gpu def A ( self ): """simple docstring""" self.run_seqaseq_quick(distributed=__lowerCAmelCase ) @require_torch_multi_gpu def A ( self ): """simple docstring""" self.run_seqaseq_quick(distributed=__lowerCAmelCase ) @unittest.skip('''Requires an update of the env running those tests''' ) @require_torch_multi_gpu @require_fairscale def A ( self ): """simple docstring""" self.run_seqaseq_quick(distributed=__lowerCAmelCase , extra_args_str='''--sharded_ddp simple''' ) @unittest.skip('''Requires an update of the env running those tests''' ) @require_torch_multi_gpu @require_fairscale def A ( self ): """simple docstring""" self.run_seqaseq_quick(distributed=__lowerCAmelCase , extra_args_str='''--sharded_ddp simple --fp16''' ) @unittest.skip('''Requires an update of the env running those tests''' ) @require_torch_multi_gpu @require_fairscale def A ( self ): """simple docstring""" self.run_seqaseq_quick(distributed=__lowerCAmelCase , extra_args_str='''--sharded_ddp zero_dp_2''' , predict_with_generate=__lowerCAmelCase ) @unittest.skip('''Requires an update of the env running those tests''' ) @require_torch_multi_gpu @require_fairscale def A ( self ): """simple docstring""" self.run_seqaseq_quick( distributed=__lowerCAmelCase , extra_args_str='''--sharded_ddp zero_dp_2 --fp16''' , predict_with_generate=__lowerCAmelCase ) @require_apex @require_torch_gpu def A ( self ): """simple docstring""" # XXX: apex breaks the trainer if it's run twice e.g. run_seq2seq.main() from the same # program and it breaks other tests that run from the same pytest worker, therefore until this is # sorted out it must be run only in an external program, that is distributed=True in this # test and only under one or more gpus - if we want cpu will need to make a special test # # specifically to the problem traced it to self.optimizer.step() - if it's run 2nd time via # 2nd main() call it botches the future eval. # self.run_seqaseq_quick(distributed=__lowerCAmelCase , extra_args_str='''--fp16 --fp16_backend=apex''' ) # test 2nd time - was getting eval_loss': nan' # to reproduce the problem set distributed=False self.run_seqaseq_quick(distributed=__lowerCAmelCase , extra_args_str='''--fp16 --fp16_backend=apex''' ) @parameterized.expand(['''base''', '''low''', '''high''', '''mixed'''] ) @require_torch_multi_gpu def A ( self , __lowerCAmelCase ): """simple docstring""" # as each sub-test is slow-ish split into multiple sub-tests to avoid CI timeout __magic_name__ :Any = { # test with the default log_level - should be info and thus log info once '''base''': {'''extra_args_str''': '''''', '''n_matches''': 1}, # test with low log_level and log_level_replica - should be noisy on all processes # now the info string should appear twice on 2 processes '''low''': {'''extra_args_str''': '''--log_level debug --log_level_replica debug''', '''n_matches''': 2}, # test with high log_level and low log_level_replica # now the info string should appear once only on the replica '''high''': {'''extra_args_str''': '''--log_level error --log_level_replica debug''', '''n_matches''': 1}, # test with high log_level and log_level_replica - should be quiet on all processes '''mixed''': {'''extra_args_str''': '''--log_level error --log_level_replica error''', '''n_matches''': 0}, } __magic_name__ :Optional[Any] = experiments[experiment_id] __magic_name__ :List[Any] = {'''distributed''': True, '''predict_with_generate''': False, '''do_eval''': False, '''do_predict''': False} __magic_name__ :Optional[int] = '''Running training''' with CaptureStderr() as cl: self.run_seqaseq_quick(**__lowerCAmelCase , extra_args_str=data['''extra_args_str'''] ) __magic_name__ :int = len(re.findall(__lowerCAmelCase , cl.err ) ) self.assertEqual(__lowerCAmelCase , data['''n_matches'''] ) @slow def A ( self ): """simple docstring""" __magic_name__ :List[str] = self.run_trainer( eval_steps=2 , max_len=1_2_8 , model_name=__lowerCAmelCase , learning_rate=3E-4 , num_train_epochs=1_0 , distributed=__lowerCAmelCase , ) # Check metrics __magic_name__ :Optional[int] = TrainerState.load_from_json(os.path.join(__lowerCAmelCase , '''trainer_state.json''' ) ).log_history __magic_name__ :List[str] = [log for log in logs if '''eval_loss''' in log.keys()] __magic_name__ :Any = eval_metrics[0] __magic_name__ :int = eval_metrics[-1] assert first_step_stats["eval_loss"] > last_step_stats["eval_loss"], "model learned nothing" assert isinstance(last_step_stats['''eval_bleu'''] , __lowerCAmelCase ) # test if do_predict saves generations and metrics __magic_name__ :List[Any] = os.listdir(__lowerCAmelCase ) __magic_name__ :List[str] = {os.path.basename(__lowerCAmelCase ) for p in contents} assert "generated_predictions.txt" in contents assert "predict_results.json" in contents @slow @require_bitsandbytes def A ( self ): """simple docstring""" from transformers.training_args import OptimizerNames def train_and_return_metrics(__lowerCAmelCase ) -> Tuple[int, float]: __magic_name__ :str = '''--skip_memory_metrics 0''' __magic_name__ :Dict = self.run_trainer( max_len=1_2_8 , model_name=__lowerCAmelCase , learning_rate=3E-4 , num_train_epochs=1 , optim=__lowerCAmelCase , distributed=__lowerCAmelCase , extra_args_str=__lowerCAmelCase , do_eval=__lowerCAmelCase , do_predict=__lowerCAmelCase , n_gpus_to_use=1 , ) # Check metrics __magic_name__ :Optional[Any] = TrainerState.load_from_json(Path(__lowerCAmelCase , '''trainer_state.json''' ) ).log_history __magic_name__ :int = int(logs[0]['''train_mem_gpu_peaked_delta'''] / 2**2_0 ) __magic_name__ :Optional[Any] = int(logs[0]['''train_mem_gpu_alloc_delta'''] / 2**2_0 ) __magic_name__ :Any = logs[0]['''train_loss'''] return gpu_peak_mem_mb, gpu_alloc_mem_mb, loss __magic_name__ , __magic_name__ , __magic_name__ :int = train_and_return_metrics(OptimizerNames.ADAMW_TORCH.value ) __magic_name__ , __magic_name__ , __magic_name__ :Tuple = train_and_return_metrics(OptimizerNames.ADAMW_BNB.value ) __magic_name__ :Tuple = gpu_alloc_mem_orig - gpu_alloc_mem_bnb __magic_name__ :Tuple = gpu_peak_mem_orig + gpu_alloc_mem_orig __magic_name__ :List[Any] = gpu_peak_mem_bnb + gpu_alloc_mem_bnb __magic_name__ :Optional[int] = gpu_total_mem_orig - gpu_total_mem_bnb # sshleifer/student_marian_en_ro_6_1 has 54M parameter, 29M of which is `nn.Embedding` which # doesn't get quantized and remains in fp32. Therefore we only have 25M parameters quantized # in 2 bytes and the diff in optim memory usage is derived as so: # # - normal 25*8=~200MB (8 bytes per param) # - bnb 25*2= ~50MB (2 bytes per param) # # Thus we should expect ~150MB total memory saved. # # Peak memory should be the same - the total should be different by about that same margin # # After leaving a small margin to accommodate for differences between gpus let's check # that we have at least 120MB in savings __magic_name__ :Optional[Any] = 1_2_0 # uncomment the following if this test starts failing - requires py38 for a new print feature # gpu_peak_mem_diff = gpu_peak_mem_orig - gpu_peak_mem_bnb # print(f"{gpu_alloc_mem_orig=}MB {gpu_peak_mem_orig=}MB {gpu_alloc_mem_orig+gpu_peak_mem_orig=}MB") # print(f" {gpu_alloc_mem_bnb=}MB {gpu_peak_mem_bnb=}MB {gpu_alloc_mem_bnb+gpu_peak_mem_bnb=}MB") # print(f"{gpu_alloc_mem_diff=}MB") # print(f"{gpu_peak_mem_diff=}MB") # print(f"{gpu_total_mem_orig=}MB, {gpu_total_mem_bnb=}MB") # print(f"{gpu_total_mem_diff=}MB, {gpu_total_mem_diff=}MB") self.assertGreater( __lowerCAmelCase , __lowerCAmelCase , '''should use ~150MB less alloc gpu memory with BNB, compared to without it for this model but got''' F''' a difference of {gpu_alloc_mem_diff}MB, with gpu_alloc_mem_orig={gpu_alloc_mem_orig}MB and''' F''' gpu_alloc_mem_bnb={gpu_alloc_mem_bnb}MB''' , ) self.assertGreater( __lowerCAmelCase , __lowerCAmelCase , '''should use ~150MB less total gpu memory with BNB, compared to without it for this model but got''' F''' a difference of {gpu_total_mem_diff}MB, with gpu_total_mem_orig={gpu_total_mem_orig}MB and''' F''' gpu_total_mem_bnb={gpu_total_mem_bnb}MB''' , ) self.assertEqual( __lowerCAmelCase , __lowerCAmelCase , F'''loss should be the same, but got loss_orig={loss_orig}, loss_bnb={loss_bnb}''' ) def A ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = 3E-3 , __lowerCAmelCase = "adafactor" , __lowerCAmelCase = False , __lowerCAmelCase = None , __lowerCAmelCase = 0 , __lowerCAmelCase = True , __lowerCAmelCase = True , __lowerCAmelCase = True , __lowerCAmelCase = True , __lowerCAmelCase = None , ): """simple docstring""" __magic_name__ :int = self.test_file_dir / '''../fixtures/tests_samples/wmt_en_ro''' __magic_name__ :Dict = self.get_auto_remove_tmp_dir() __magic_name__ :Tuple = F''' --model_name_or_path {model_name} --train_file {data_dir}/train.json --validation_file {data_dir}/val.json --test_file {data_dir}/test.json --output_dir {output_dir} --overwrite_output_dir --max_train_samples 8 --max_source_length {max_len} --max_target_length {max_len} --do_train --num_train_epochs {str(__lowerCAmelCase )} --per_device_train_batch_size 4 --learning_rate {learning_rate} --warmup_steps 8 --logging_steps 0 --logging_strategy no --save_steps {str(__lowerCAmelCase )} --group_by_length --label_smoothing_factor 0.1 --target_lang ro_RO --source_lang en_XX '''.split() __magic_name__ :str = F''' --do_eval --per_device_eval_batch_size 4 --max_eval_samples 8 --val_max_target_length {max_len} --evaluation_strategy steps --eval_steps {str(__lowerCAmelCase )} '''.split() __magic_name__ :Dict = ''' --do_predict '''.split() __magic_name__ :Optional[int] = [] if do_train: args += args_train if do_eval: args += args_eval if do_predict: args += args_predict if predict_with_generate: args += "--predict_with_generate".split() if do_train: if optim == "adafactor": args += "--adafactor".split() else: args += F'''--optim {optim}'''.split() if extra_args_str is not None: args += extra_args_str.split() if distributed: if n_gpus_to_use is None: __magic_name__ :List[Any] = get_gpu_count() __magic_name__ :Tuple = get_torch_dist_unique_port() __magic_name__ :Union[str, Any] = F''' -m torch.distributed.run --nproc_per_node={n_gpus_to_use} --master_port={master_port} {self.examples_dir_str}/pytorch/translation/run_translation.py '''.split() __magic_name__ :Any = [sys.executable] + distributed_args + args # keep for quick debug # print(" ".join([f"\nPYTHONPATH={self.src_dir_str}"] +cmd)); die execute_subprocess_async(__lowerCAmelCase , env=self.get_env() ) else: __magic_name__ :List[Any] = ['''run_translation.py'''] + args with patch.object(__lowerCAmelCase , '''argv''' , __lowerCAmelCase ): main() return output_dir
0
0
import torch from torch import nn from transformers import CLIPPreTrainedModel, CLIPVisionModel from ...models.attention import BasicTransformerBlock from ...utils import logging snake_case = logging.get_logger(__name__) # pylint: disable=invalid-name class A_ ( UpperCAmelCase ): """simple docstring""" def __init__( self : Union[str, Any] ,__A : Union[str, Any] ,__A : Optional[Any]=768 ) -> Optional[int]: super().__init__(__A ) _lowercase = proj_size _lowercase = CLIPVisionModel(__A ) _lowercase = PaintByExampleMapper(__A ) _lowercase = nn.LayerNorm(config.hidden_size ) _lowercase = nn.Linear(config.hidden_size ,self.proj_size ) # uncondition for scaling _lowercase = nn.Parameter(torch.randn((1, 1, self.proj_size) ) ) def __UpperCAmelCase ( self : str ,__A : Optional[int] ,__A : Optional[int]=False ) -> Union[str, Any]: _lowercase = self.model(pixel_values=__A ) _lowercase = clip_output.pooler_output _lowercase = self.mapper(latent_states[:, None] ) _lowercase = self.final_layer_norm(__A ) _lowercase = self.proj_out(__A ) if return_uncond_vector: return latent_states, self.uncond_vector return latent_states class A_ ( nn.Module ): """simple docstring""" def __init__( self : Optional[int] ,__A : Dict ) -> str: super().__init__() _lowercase = (config.num_hidden_layers + 1) // 5 _lowercase = config.hidden_size _lowercase = 1 _lowercase = nn.ModuleList( [ BasicTransformerBlock(__A ,__A ,__A ,activation_fn='gelu' ,attention_bias=__A ) for _ in range(__A ) ] ) def __UpperCAmelCase ( self : Tuple ,__A : Optional[Any] ) -> Dict: for block in self.blocks: _lowercase = block(__A ) return hidden_states
67
import sys SCREAMING_SNAKE_CASE__ : Optional[Any] = ( """73167176531330624919225119674426574742355349194934""" """96983520312774506326239578318016984801869478851843""" """85861560789112949495459501737958331952853208805511""" """12540698747158523863050715693290963295227443043557""" """66896648950445244523161731856403098711121722383113""" """62229893423380308135336276614282806444486645238749""" """30358907296290491560440772390713810515859307960866""" """70172427121883998797908792274921901699720888093776""" """65727333001053367881220235421809751254540594752243""" """52584907711670556013604839586446706324415722155397""" """53697817977846174064955149290862569321978468622482""" """83972241375657056057490261407972968652414535100474""" """82166370484403199890008895243450658541227588666881""" """16427171479924442928230863465674813919123162824586""" """17866458359124566529476545682848912883142607690042""" """24219022671055626321111109370544217506941658960408""" """07198403850962455444362981230987879927244284909188""" """84580156166097919133875499200524063689912560717606""" """05886116467109405077541002256983155200055935729725""" """71636269561882670428252483600823257530420752963450""" ) def __lowercase ( snake_case = N ): """simple docstring""" __magic_name__ :Optional[int] = -sys.maxsize - 1 for i in range(len(snake_case ) - 1_2 ): __magic_name__ :List[Any] = 1 for j in range(1_3 ): product *= int(n[i + j] ) if product > largest_product: __magic_name__ :str = product return largest_product if __name__ == "__main__": print(f"{solution() = }")
0
0
from typing import List, Union import numpy as np from ..tokenization_utils import TruncationStrategy from ..utils import add_end_docstrings, logging from .base import PIPELINE_INIT_ARGS, ArgumentHandler, ChunkPipeline __A = logging.get_logger(__name__) class _A ( UpperCamelCase ): """simple docstring""" def _a ( self : List[Any] , __SCREAMING_SNAKE_CASE : Union[str, Any] ) -> Tuple: if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ): __UpperCAmelCase =[label.strip() for label in labels.split(""",""" ) if label.strip()] return labels def __call__( self : Any , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : Tuple , __SCREAMING_SNAKE_CASE : List[Any] ) -> Dict: if len(__SCREAMING_SNAKE_CASE ) == 0 or len(__SCREAMING_SNAKE_CASE ) == 0: raise ValueError("""You must include at least one label and at least one sequence.""" ) if hypothesis_template.format(labels[0] ) == hypothesis_template: raise ValueError( ( """The provided hypothesis_template \"{}\" was not able to be formatted with the target labels. """ """Make sure the passed template includes formatting syntax such as {{}} where the label should go.""" ).format(__SCREAMING_SNAKE_CASE ) ) if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ): __UpperCAmelCase =[sequences] __UpperCAmelCase =[] for sequence in sequences: sequence_pairs.extend([[sequence, hypothesis_template.format(__SCREAMING_SNAKE_CASE )] for label in labels] ) return sequence_pairs, sequences @add_end_docstrings(UpperCamelCase ) class _A ( UpperCamelCase ): """simple docstring""" def __init__( self : Optional[int] , __SCREAMING_SNAKE_CASE : Tuple=ZeroShotClassificationArgumentHandler() , *__SCREAMING_SNAKE_CASE : str , **__SCREAMING_SNAKE_CASE : Any ) -> List[str]: __UpperCAmelCase =args_parser super().__init__(*__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ) if self.entailment_id == -1: logger.warning( """Failed to determine 'entailment' label id from the label2id mapping in the model config. Setting to """ """-1. Define a descriptive label2id mapping in the model config to ensure correct outputs.""" ) @property def _a ( self : Any ) -> Optional[int]: for label, ind in self.model.config.labelaid.items(): if label.lower().startswith("""entail""" ): return ind return -1 def _a ( self : int , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : Union[str, Any]=True , __SCREAMING_SNAKE_CASE : List[Any]=True , __SCREAMING_SNAKE_CASE : Optional[int]=TruncationStrategy.ONLY_FIRST , **__SCREAMING_SNAKE_CASE : Optional[Any] ) -> Tuple: __UpperCAmelCase =self.framework if self.tokenizer.pad_token is None: # Override for tokenizers not supporting padding logger.error( """Tokenizer was not supporting padding necessary for zero-shot, attempting to use """ """ `pad_token=eos_token`""" ) __UpperCAmelCase =self.tokenizer.eos_token try: __UpperCAmelCase =self.tokenizer( __SCREAMING_SNAKE_CASE , add_special_tokens=__SCREAMING_SNAKE_CASE , return_tensors=__SCREAMING_SNAKE_CASE , padding=__SCREAMING_SNAKE_CASE , truncation=__SCREAMING_SNAKE_CASE , ) except Exception as e: if "too short" in str(__SCREAMING_SNAKE_CASE ): # tokenizers might yell that we want to truncate # to a value that is not even reached by the input. # In that case we don't want to truncate. # It seems there's not a really better way to catch that # exception. __UpperCAmelCase =self.tokenizer( __SCREAMING_SNAKE_CASE , add_special_tokens=__SCREAMING_SNAKE_CASE , return_tensors=__SCREAMING_SNAKE_CASE , padding=__SCREAMING_SNAKE_CASE , truncation=TruncationStrategy.DO_NOT_TRUNCATE , ) else: raise e return inputs def _a ( self : Tuple , **__SCREAMING_SNAKE_CASE : Dict ) -> Optional[int]: if kwargs.get("""multi_class""" , __SCREAMING_SNAKE_CASE ) is not None: __UpperCAmelCase =kwargs["""multi_class"""] logger.warning( """The `multi_class` argument has been deprecated and renamed to `multi_label`. """ """`multi_class` will be removed in a future version of Transformers.""" ) __UpperCAmelCase ={} if "candidate_labels" in kwargs: __UpperCAmelCase =self._args_parser._parse_labels(kwargs["""candidate_labels"""] ) if "hypothesis_template" in kwargs: __UpperCAmelCase =kwargs["""hypothesis_template"""] __UpperCAmelCase ={} if "multi_label" in kwargs: __UpperCAmelCase =kwargs["""multi_label"""] return preprocess_params, {}, postprocess_params def __call__( self : int , __SCREAMING_SNAKE_CASE : Union[str, List[str]] , *__SCREAMING_SNAKE_CASE : List[str] , **__SCREAMING_SNAKE_CASE : Optional[int] , ) -> List[Any]: if len(__SCREAMING_SNAKE_CASE ) == 0: pass elif len(__SCREAMING_SNAKE_CASE ) == 1 and "candidate_labels" not in kwargs: __UpperCAmelCase =args[0] else: raise ValueError(f'''Unable to understand extra arguments {args}''' ) return super().__call__(__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ) def _a ( self : Tuple , __SCREAMING_SNAKE_CASE : Dict , __SCREAMING_SNAKE_CASE : Dict=None , __SCREAMING_SNAKE_CASE : Optional[int]="This example is {}." ) -> Optional[Any]: __UpperCAmelCase , __UpperCAmelCase =self._args_parser(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) for i, (candidate_label, sequence_pair) in enumerate(zip(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) ): __UpperCAmelCase =self._parse_and_tokenize([sequence_pair] ) yield { "candidate_label": candidate_label, "sequence": sequences[0], "is_last": i == len(__SCREAMING_SNAKE_CASE ) - 1, **model_input, } def _a ( self : Optional[Any] , __SCREAMING_SNAKE_CASE : List[Any] ) -> str: __UpperCAmelCase =inputs["""candidate_label"""] __UpperCAmelCase =inputs["""sequence"""] __UpperCAmelCase ={k: inputs[k] for k in self.tokenizer.model_input_names} __UpperCAmelCase =self.model(**__SCREAMING_SNAKE_CASE ) __UpperCAmelCase ={ """candidate_label""": candidate_label, """sequence""": sequence, """is_last""": inputs["""is_last"""], **outputs, } return model_outputs def _a ( self : Optional[int] , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : Any=False ) -> List[Any]: __UpperCAmelCase =[outputs["""candidate_label"""] for outputs in model_outputs] __UpperCAmelCase =[outputs["""sequence"""] for outputs in model_outputs] __UpperCAmelCase =np.concatenate([output["""logits"""].numpy() for output in model_outputs] ) __UpperCAmelCase =logits.shape[0] __UpperCAmelCase =len(__SCREAMING_SNAKE_CASE ) __UpperCAmelCase =N // n __UpperCAmelCase =logits.reshape((num_sequences, n, -1) ) if multi_label or len(__SCREAMING_SNAKE_CASE ) == 1: # softmax over the entailment vs. contradiction dim for each label independently __UpperCAmelCase =self.entailment_id __UpperCAmelCase =-1 if entailment_id == 0 else 0 __UpperCAmelCase =reshaped_outputs[..., [contradiction_id, entailment_id]] __UpperCAmelCase =np.exp(__SCREAMING_SNAKE_CASE ) / np.exp(__SCREAMING_SNAKE_CASE ).sum(-1 , keepdims=__SCREAMING_SNAKE_CASE ) __UpperCAmelCase =scores[..., 1] else: # softmax the "entailment" logits over all candidate labels __UpperCAmelCase =reshaped_outputs[..., self.entailment_id] __UpperCAmelCase =np.exp(__SCREAMING_SNAKE_CASE ) / np.exp(__SCREAMING_SNAKE_CASE ).sum(-1 , keepdims=__SCREAMING_SNAKE_CASE ) __UpperCAmelCase =list(reversed(scores[0].argsort() ) ) return { "sequence": sequences[0], "labels": [candidate_labels[i] for i in top_inds], "scores": scores[0, top_inds].tolist(), }
68
SCREAMING_SNAKE_CASE__ : Tuple = { """a""": """AAAAA""", """b""": """AAAAB""", """c""": """AAABA""", """d""": """AAABB""", """e""": """AABAA""", """f""": """AABAB""", """g""": """AABBA""", """h""": """AABBB""", """i""": """ABAAA""", """j""": """BBBAA""", """k""": """ABAAB""", """l""": """ABABA""", """m""": """ABABB""", """n""": """ABBAA""", """o""": """ABBAB""", """p""": """ABBBA""", """q""": """ABBBB""", """r""": """BAAAA""", """s""": """BAAAB""", """t""": """BAABA""", """u""": """BAABB""", """v""": """BBBAB""", """w""": """BABAA""", """x""": """BABAB""", """y""": """BABBA""", """z""": """BABBB""", """ """: """ """, } SCREAMING_SNAKE_CASE__ : Union[str, Any] = {value: key for key, value in encode_dict.items()} def __lowercase ( snake_case ): """simple docstring""" __magic_name__ :Tuple = '''''' for letter in word.lower(): if letter.isalpha() or letter == " ": encoded += encode_dict[letter] else: raise Exception('''encode() accepts only letters of the alphabet and spaces''' ) return encoded def __lowercase ( snake_case ): """simple docstring""" if set(snake_case ) - {"A", "B", " "} != set(): raise Exception('''decode() accepts only \'A\', \'B\' and spaces''' ) __magic_name__ :Dict = '''''' for word in coded.split(): while len(snake_case ) != 0: decoded += decode_dict[word[:5]] __magic_name__ :int = word[5:] decoded += " " return decoded.strip() if __name__ == "__main__": from doctest import testmod testmod()
0
0
'''simple docstring''' from typing import Dict, List, Optional, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import ( center_crop, get_resize_output_image_size, normalize, rescale, resize, to_channel_dimension_format, ) from ...image_utils import ( IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, ChannelDimension, ImageInput, PILImageResampling, make_list_of_images, to_numpy_array, valid_images, ) from ...utils import TensorType, is_vision_available, logging if is_vision_available(): import PIL a : Tuple = logging.get_logger(__name__) class SCREAMING_SNAKE_CASE__ ( _UpperCamelCase ): __SCREAMING_SNAKE_CASE = ["""pixel_values"""] def __init__( self : Tuple , a_ : bool = True , a_ : Dict[str, int] = None , a_ : int = 0.9 , a_ : PILImageResampling = PILImageResampling.BICUBIC , a_ : bool = True , a_ : Dict[str, int] = None , a_ : Union[int, float] = 1 / 255 , a_ : bool = True , a_ : bool = True , a_ : Optional[Union[float, List[float]]] = None , a_ : Optional[Union[float, List[float]]] = None , **a_ : Optional[int] , ): """simple docstring""" super().__init__(**a_ ) __snake_case = size if size is not None else {"shortest_edge": 224} __snake_case = get_size_dict(a_ , default_to_square=a_ ) __snake_case = crop_size if crop_size is not None else {"height": 224, "width": 224} __snake_case = get_size_dict(a_ , param_name="crop_size" ) __snake_case = do_resize __snake_case = size __snake_case = crop_pct __snake_case = resample __snake_case = do_center_crop __snake_case = crop_size __snake_case = do_rescale __snake_case = rescale_factor __snake_case = do_normalize __snake_case = image_mean if image_mean is not None else IMAGENET_DEFAULT_MEAN __snake_case = image_std if image_std is not None else IMAGENET_DEFAULT_STD def A ( self : Tuple , a_ : np.ndarray , a_ : Dict[str, int] , a_ : Optional[float] = None , a_ : PILImageResampling = PILImageResampling.BICUBIC , a_ : Optional[Union[str, ChannelDimension]] = None , **a_ : Any , ): """simple docstring""" __snake_case = get_size_dict(a_ , default_to_square=a_ ) if "shortest_edge" not in size and ("height" not in size or "width" not in size): raise ValueError(f'''size must contain \'height\' and \'width\' or \'shortest_edge\' as keys. Got {size.keys()}''' ) if crop_pct is not None: if "shortest_edge" in size: __snake_case = int(size["shortest_edge"] / crop_pct ) elif "height" in size and "width" in size: if size["height"] == size["width"]: __snake_case = int(size["height"] / crop_pct ) else: __snake_case = (int(size["height"] / crop_pct ), int(size["width"] / crop_pct )) else: raise ValueError("Invalid size for resize: {}".format(a_ ) ) __snake_case = get_resize_output_image_size(a_ , size=a_ , default_to_square=a_ ) else: if "shortest_edge" in size: __snake_case = get_resize_output_image_size(a_ , size=size["shortest_edge"] , default_to_square=a_ ) elif "height" in size and "width" in size: __snake_case = (size["height"], size["width"]) else: raise ValueError("Invalid size for resize: {}".format(a_ ) ) return resize(a_ , size=a_ , resample=a_ , data_format=a_ , **a_ ) def A ( self : int , a_ : np.ndarray , a_ : Dict[str, int] , a_ : Optional[Union[str, ChannelDimension]] = None , **a_ : str , ): """simple docstring""" __snake_case = get_size_dict(a_ ) if "height" not in size or "width" not in size: raise ValueError(f'''size must contain \'height\' and \'width\' as keys. Got {size.keys()}''' ) return center_crop(a_ , size=(size["height"], size["width"]) , data_format=a_ , **a_ ) def A ( self : Dict , a_ : np.ndarray , a_ : Union[int, float] , a_ : Optional[Union[str, ChannelDimension]] = None , **a_ : Any , ): """simple docstring""" return rescale(a_ , scale=a_ , data_format=a_ , **a_ ) def A ( self : str , a_ : np.ndarray , a_ : Union[float, List[float]] , a_ : Union[float, List[float]] , a_ : Optional[Union[str, ChannelDimension]] = None , **a_ : Dict , ): """simple docstring""" return normalize(a_ , mean=a_ , std=a_ , data_format=a_ , **a_ ) def A ( self : Dict , a_ : ImageInput , a_ : bool = None , a_ : Dict[str, int] = None , a_ : int = None , a_ : PILImageResampling = None , a_ : bool = None , a_ : Dict[str, int] = None , a_ : bool = None , a_ : float = None , a_ : bool = None , a_ : Optional[Union[float, List[float]]] = None , a_ : Optional[Union[float, List[float]]] = None , a_ : Optional[Union[str, TensorType]] = None , a_ : ChannelDimension = ChannelDimension.FIRST , **a_ : int , ): """simple docstring""" __snake_case = do_resize if do_resize is not None else self.do_resize __snake_case = crop_pct if crop_pct is not None else self.crop_pct __snake_case = resample if resample is not None else self.resample __snake_case = do_center_crop if do_center_crop is not None else self.do_center_crop __snake_case = do_rescale if do_rescale is not None else self.do_rescale __snake_case = rescale_factor if rescale_factor is not None else self.rescale_factor __snake_case = do_normalize if do_normalize is not None else self.do_normalize __snake_case = image_mean if image_mean is not None else self.image_mean __snake_case = image_std if image_std is not None else self.image_std __snake_case = size if size is not None else self.size __snake_case = get_size_dict(a_ , default_to_square=a_ ) __snake_case = crop_size if crop_size is not None else self.crop_size __snake_case = get_size_dict(a_ , param_name="crop_size" ) __snake_case = make_list_of_images(a_ ) if not valid_images(a_ ): raise ValueError( "Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, " "torch.Tensor, tf.Tensor or jax.ndarray." ) if do_resize and size is None or resample is None: raise ValueError("Size and resample must be specified if do_resize is True." ) if do_center_crop and crop_pct is None: raise ValueError("Crop_pct must be specified if do_center_crop is True." ) if do_rescale and rescale_factor is None: raise ValueError("Rescale factor must be specified if do_rescale is True." ) if do_normalize and (image_mean is None or image_std is None): raise ValueError("Image mean and std must be specified if do_normalize is True." ) # All transformations expect numpy arrays. __snake_case = [to_numpy_array(a_ ) for image in images] if do_resize: __snake_case = [self.resize(image=a_ , size=a_ , crop_pct=a_ , resample=a_ ) for image in images] if do_center_crop: __snake_case = [self.center_crop(image=a_ , size=a_ ) for image in images] if do_rescale: __snake_case = [self.rescale(image=a_ , scale=a_ ) for image in images] if do_normalize: __snake_case = [self.normalize(image=a_ , mean=a_ , std=a_ ) for image in images] __snake_case = [to_channel_dimension_format(a_ , a_ ) for image in images] __snake_case = {"pixel_values": images} return BatchFeature(data=a_ , tensor_type=a_ )
69
import argparse import torch from torch import nn from transformers import MaMaaaConfig, MaMaaaForConditionalGeneration def __lowercase ( snake_case ): """simple docstring""" __magic_name__ :Optional[Any] = [ '''encoder.version''', '''decoder.version''', '''model.encoder.version''', '''model.decoder.version''', '''decoder.output_projection.weight''', '''_float_tensor''', '''encoder.embed_positions._float_tensor''', '''decoder.embed_positions._float_tensor''', ] for k in ignore_keys: state_dict.pop(snake_case, snake_case ) def __lowercase ( snake_case ): """simple docstring""" __magic_name__ , __magic_name__ :Tuple = emb.weight.shape __magic_name__ :int = nn.Linear(snake_case, snake_case, bias=snake_case ) __magic_name__ :str = emb.weight.data return lin_layer def __lowercase ( snake_case ): """simple docstring""" __magic_name__ :int = torch.load(snake_case, map_location='''cpu''' ) __magic_name__ :Optional[Any] = mam_aaa['''args'''] or mam_aaa['''cfg''']['''model'''] __magic_name__ :List[Any] = mam_aaa['''model'''] remove_ignore_keys_(snake_case ) __magic_name__ :Tuple = state_dict['''encoder.embed_tokens.weight'''].shape[0] __magic_name__ :List[str] = MaMaaaConfig( vocab_size=snake_case, max_position_embeddings=1_0_2_4, encoder_layers=args.encoder_layers, decoder_layers=args.decoder_layers, encoder_attention_heads=args.encoder_attention_heads, decoder_attention_heads=args.decoder_attention_heads, encoder_ffn_dim=args.encoder_ffn_embed_dim, decoder_ffn_dim=args.decoder_ffn_embed_dim, d_model=args.encoder_embed_dim, encoder_layerdrop=args.encoder_layerdrop, decoder_layerdrop=args.decoder_layerdrop, dropout=args.dropout, attention_dropout=args.attention_dropout, activation_dropout=args.activation_dropout, activation_function='''relu''', ) __magic_name__ :int = state_dict['''decoder.embed_tokens.weight'''] __magic_name__ :List[str] = MaMaaaForConditionalGeneration(snake_case ) model.model.load_state_dict(snake_case, strict=snake_case ) __magic_name__ :List[str] = make_linear_from_emb(model.model.shared ) return model if __name__ == "__main__": SCREAMING_SNAKE_CASE__ : Optional[Any] = argparse.ArgumentParser() # Required parameters parser.add_argument("""fairseq_path""", type=str, help="""path to a model.pt on local filesystem.""") parser.add_argument("""pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""") SCREAMING_SNAKE_CASE__ : int = parser.parse_args() SCREAMING_SNAKE_CASE__ : Any = convert_fairseq_mamaaa_checkpoint_from_disk(args.fairseq_pathß) model.save_pretrained(args.pytorch_dump_folder_path)
0
0
import requests lowerCamelCase : Union[str, Any] = "" # <-- Put your OpenWeatherMap appid here! lowerCamelCase : Any = "https://api.openweathermap.org/data/2.5/" def _SCREAMING_SNAKE_CASE ( lowercase : str = "Chicago" , lowercase : str = APPID ): '''simple docstring''' return requests.get(URL_BASE + 'weather' , params=locals() ).json() def _SCREAMING_SNAKE_CASE ( lowercase : str = "Kolkata, India" , lowercase : str = APPID ): '''simple docstring''' return requests.get(URL_BASE + 'forecast' , params=locals() ).json() def _SCREAMING_SNAKE_CASE ( lowercase : float = 55.68 , lowercase : float = 12.57 , lowercase : str = APPID ): '''simple docstring''' return requests.get(URL_BASE + 'onecall' , params=locals() ).json() if __name__ == "__main__": from pprint import pprint while True: lowerCamelCase : Any = input("Enter a location:").strip() if location: pprint(current_weather(location)) else: break
70
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available SCREAMING_SNAKE_CASE__ : Dict = { """configuration_canine""": ["""CANINE_PRETRAINED_CONFIG_ARCHIVE_MAP""", """CanineConfig"""], """tokenization_canine""": ["""CanineTokenizer"""], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: SCREAMING_SNAKE_CASE__ : str = [ """CANINE_PRETRAINED_MODEL_ARCHIVE_LIST""", """CanineForMultipleChoice""", """CanineForQuestionAnswering""", """CanineForSequenceClassification""", """CanineForTokenClassification""", """CanineLayer""", """CanineModel""", """CaninePreTrainedModel""", """load_tf_weights_in_canine""", ] if TYPE_CHECKING: from .configuration_canine import CANINE_PRETRAINED_CONFIG_ARCHIVE_MAP, CanineConfig from .tokenization_canine import CanineTokenizer try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_canine import ( CANINE_PRETRAINED_MODEL_ARCHIVE_LIST, CanineForMultipleChoice, CanineForQuestionAnswering, CanineForSequenceClassification, CanineForTokenClassification, CanineLayer, CanineModel, CaninePreTrainedModel, load_tf_weights_in_canine, ) else: import sys SCREAMING_SNAKE_CASE__ : Union[str, Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
0
0
'''simple docstring''' import argparse import json import os import torch from torch import nn from transformers import NllbMoeConfig, NllbMoeModel from transformers.modeling_utils import dtype_byte_size from transformers.utils import WEIGHTS_INDEX_NAME, WEIGHTS_NAME def a__ ( _SCREAMING_SNAKE_CASE : Union[str, Any] ) -> Optional[Any]: """simple docstring""" UpperCAmelCase_ : Optional[Any] = [ "encoder.version", "decoder.version", "model.encoder.version", "model.decoder.version", "decoder.output_projection.weight", "_float_tensor", "encoder.embed_positions._float_tensor", "decoder.embed_positions._float_tensor", ] for k in ignore_keys: state_dict.pop(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) def a__ ( _SCREAMING_SNAKE_CASE : Optional[Any] ) -> str: """simple docstring""" UpperCAmelCase_ , UpperCAmelCase_ : str = emb.weight.shape UpperCAmelCase_ : List[Any] = nn.Linear(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , bias=_SCREAMING_SNAKE_CASE ) UpperCAmelCase_ : Tuple = emb.weight.data return lin_layer def a__ ( _SCREAMING_SNAKE_CASE : Dict , _SCREAMING_SNAKE_CASE : Optional[Any]=None ) -> Union[str, Any]: """simple docstring""" UpperCAmelCase_ : Optional[Any] = {} for old_key in state_dict.keys(): UpperCAmelCase_ : Optional[int] = old_key if "moe_layer.experts." in key: if expert_idx is not None: UpperCAmelCase_ : List[Any] = key.replace("moe_layer.experts.0" , F'''ffn.experts.expert_{expert_idx}''' ) else: UpperCAmelCase_ : Tuple = key.replace("moe_layer.experts." , "ffn.experts.expert_" ) if "gate" in key: UpperCAmelCase_ : List[Any] = key.replace(".moe_layer.gate.wg" , ".ffn.router.classifier" ) if "fc2" and "experts" not in key: UpperCAmelCase_ : Dict = key.replace(".fc2." , ".ffn.fc2." ) if "fc1" and "experts" not in key: UpperCAmelCase_ : List[str] = key.replace(".fc1." , ".ffn.fc1." ) if ".encoder_attn." in key: UpperCAmelCase_ : List[str] = key.replace(".encoder_attn." , ".cross_attention." ) if "encoder_attn_layer_norm" in key: UpperCAmelCase_ : Union[str, Any] = key.replace("encoder_attn_layer_norm" , "cross_attention_layer_norm" ) if "final_layer_norm" in key: UpperCAmelCase_ : Any = key.replace("final_layer_norm" , "ff_layer_norm" ) UpperCAmelCase_ : Optional[Any] = state_dict[old_key] return new_dict def a__ ( _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : Optional[int] , _SCREAMING_SNAKE_CASE : List[Any] , _SCREAMING_SNAKE_CASE : Tuple , _SCREAMING_SNAKE_CASE : str = WEIGHTS_NAME ) -> str: """simple docstring""" UpperCAmelCase_ : Optional[Any] = [] UpperCAmelCase_ : Optional[Any] = 0 os.makedirs(_SCREAMING_SNAKE_CASE , exist_ok=_SCREAMING_SNAKE_CASE ) for expert in range(_SCREAMING_SNAKE_CASE ): UpperCAmelCase_ : Any = switch_checkpoint_path + F'''-rank-{expert}.pt''' if os.path.isfile(_SCREAMING_SNAKE_CASE ): UpperCAmelCase_ : List[Any] = torch.load(_SCREAMING_SNAKE_CASE )["model"] remove_ignore_keys_(_SCREAMING_SNAKE_CASE ) UpperCAmelCase_ : List[Any] = rename_fairseq_keys(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) UpperCAmelCase_ : Union[str, Any] = os.path.join( _SCREAMING_SNAKE_CASE , weights_name.replace(".bin" , F'''-{len(_SCREAMING_SNAKE_CASE )+1:05d}-of-???.bin''' ) ) torch.save(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) sharded_state_dicts.append(expert_state.keys() ) total_size += sum([value.numel() for key, value in expert_state.items()] ) * dtype_byte_size( expert_state[list(_SCREAMING_SNAKE_CASE )[0]].dtype ) # Add the last block UpperCAmelCase_ : Tuple = os.path.join(_SCREAMING_SNAKE_CASE , weights_name.replace(".bin" , F'''-{len(_SCREAMING_SNAKE_CASE )+1:05d}-of-???.bin''' ) ) UpperCAmelCase_ : List[str] = torch.load(switch_checkpoint_path + "-shared.pt" )["model"] remove_ignore_keys_(_SCREAMING_SNAKE_CASE ) UpperCAmelCase_ : str = rename_fairseq_keys(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) UpperCAmelCase_ : List[str] = shared_weights["decoder.embed_tokens.weight"] sharded_state_dicts.append(shared_weights.keys() ) # If we only have the shared weights (dummy model/experts saved on the same file) if len(_SCREAMING_SNAKE_CASE ) == 1: UpperCAmelCase_ : Dict = os.path.join(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) torch.save(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) return {weights_name: sharded_state_dicts[0]}, None else: torch.save(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) # Otherwise, let's build the index UpperCAmelCase_ : Optional[Any] = {} for idx, shard in enumerate(_SCREAMING_SNAKE_CASE ): UpperCAmelCase_ : str = weights_name.replace(".bin" , F'''-{idx+1:05d}-of-{len(_SCREAMING_SNAKE_CASE ):05d}.bin''' ) UpperCAmelCase_ : List[Any] = os.path.join(_SCREAMING_SNAKE_CASE , weights_name.replace(".bin" , F'''-{idx+1:05d}-of-???.bin''' ) ) os.rename(_SCREAMING_SNAKE_CASE , os.path.join(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ) for key in shard: UpperCAmelCase_ : Tuple = shard_file # Add the metadata UpperCAmelCase_ : Dict = {"total_size": total_size} UpperCAmelCase_ : List[Any] = {"metadata": metadata, "weight_map": weight_map} with open(os.path.join(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) , "w" , encoding="utf-8" ) as f: UpperCAmelCase_ : Dict = json.dumps(_SCREAMING_SNAKE_CASE , indent=2 , sort_keys=_SCREAMING_SNAKE_CASE ) + "\n" f.write(_SCREAMING_SNAKE_CASE ) return metadata, index if __name__ == "__main__": _lowerCamelCase = argparse.ArgumentParser() # Required parameters parser.add_argument( """--nllb_moe_checkpoint_path""", default="""/home/arthur_huggingface_co/fairseq/weights/checkpoints/model_moe_54b/checkpoint_2_300000""", type=str, required=False, help="""Path to a directory containing a folder per layer. Follows the original Google format.""", ) parser.add_argument("""--dtype""", default="""float32""", type=str, required=False, help="""dtype of the saved model""") parser.add_argument( """--pytorch_dump_folder_path""", default="""/home/arthur_huggingface_co/fairseq/weights/checkpoints/hf-converted-moe-54b""", type=str, required=False, help="""Path to the output pytorch model.""", ) _lowerCamelCase = parser.parse_args() _lowerCamelCase , _lowerCamelCase = shard_on_the_fly( args.nllb_moe_checkpoint_path, args.pytorch_dump_folder_path, 128, args.dtype, ) _lowerCamelCase = NllbMoeConfig.from_pretrained( """facebook/nllb-200-3.3B""", encoder_sparse_step=4, decoder_sparse_step=4, num_experts=128 ) config.save_pretrained(args.pytorch_dump_folder_path) _lowerCamelCase = NllbMoeModel.from_pretrained(args.pytorch_dump_folder_path) print("""Done""") model.save_pretrained(args.pytorch_dump_folder_path)
71
import warnings from ...processing_utils import ProcessorMixin from ...tokenization_utils_base import BatchEncoding class lowerCamelCase_ ( lowerCamelCase ): a__ = ['''image_processor''', '''tokenizer'''] a__ = '''ChineseCLIPImageProcessor''' a__ = ('''BertTokenizer''', '''BertTokenizerFast''') def __init__( self , __lowerCAmelCase=None , __lowerCAmelCase=None , **__lowerCAmelCase ): """simple docstring""" __magic_name__ :Tuple = None if "feature_extractor" in kwargs: warnings.warn( '''The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`''' ''' instead.''' , __lowerCAmelCase , ) __magic_name__ :Optional[Any] = kwargs.pop('''feature_extractor''' ) __magic_name__ :Tuple = image_processor if image_processor is not None else feature_extractor if image_processor is None: raise ValueError('''You need to specify an `image_processor`.''' ) if tokenizer is None: raise ValueError('''You need to specify a `tokenizer`.''' ) super().__init__(__lowerCAmelCase , __lowerCAmelCase ) __magic_name__ :List[Any] = self.image_processor def __call__( self , __lowerCAmelCase=None , __lowerCAmelCase=None , __lowerCAmelCase=None , **__lowerCAmelCase ): """simple docstring""" if text is None and images is None: raise ValueError('''You have to specify either text or images. Both cannot be none.''' ) if text is not None: __magic_name__ :int = self.tokenizer(__lowerCAmelCase , return_tensors=__lowerCAmelCase , **__lowerCAmelCase ) if images is not None: __magic_name__ :Dict = self.image_processor(__lowerCAmelCase , return_tensors=__lowerCAmelCase , **__lowerCAmelCase ) if text is not None and images is not None: __magic_name__ :Union[str, Any] = image_features.pixel_values return encoding elif text is not None: return encoding else: return BatchEncoding(data=dict(**__lowerCAmelCase ) , tensor_type=__lowerCAmelCase ) def A ( self , *__lowerCAmelCase , **__lowerCAmelCase ): """simple docstring""" return self.tokenizer.batch_decode(*__lowerCAmelCase , **__lowerCAmelCase ) def A ( self , *__lowerCAmelCase , **__lowerCAmelCase ): """simple docstring""" return self.tokenizer.decode(*__lowerCAmelCase , **__lowerCAmelCase ) @property def A ( self ): """simple docstring""" __magic_name__ :List[Any] = self.tokenizer.model_input_names __magic_name__ :Any = self.image_processor.model_input_names return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) ) @property def A ( self ): """simple docstring""" warnings.warn( '''`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.''' , __lowerCAmelCase , ) return self.image_processor_class
0
0
'''simple docstring''' import os # All paths are set with the intent you should run this script from the root of the repo with the command # python utils/check_doctest_list.py _UpperCAmelCase : List[Any] = '''.''' if __name__ == "__main__": _UpperCAmelCase : int = os.path.join(REPO_PATH, '''utils/documentation_tests.txt''') _UpperCAmelCase : str = [] _UpperCAmelCase : Optional[Any] = [] with open(doctest_file_path) as fp: for line in fp: _UpperCAmelCase : str = line.strip() _UpperCAmelCase : Tuple = os.path.join(REPO_PATH, line) if not (os.path.isfile(path) or os.path.isdir(path)): non_existent_paths.append(line) all_paths.append(path) if len(non_existent_paths) > 0: _UpperCAmelCase : Tuple = '''\n'''.join(non_existent_paths) raise ValueError(F"""`utils/documentation_tests.txt` contains non-existent paths:\n{non_existent_paths}""") if all_paths != sorted(all_paths): raise ValueError('''Files in `utils/documentation_tests.txt` are not in alphabetical order.''')
72
from sklearn.metrics import matthews_corrcoef import datasets SCREAMING_SNAKE_CASE__ : Optional[Any] = """ Compute the Matthews correlation coefficient (MCC) The Matthews correlation coefficient is used in machine learning as a measure of the quality of binary and multiclass classifications. It takes into account true and false positives and negatives and is generally regarded as a balanced measure which can be used even if the classes are of very different sizes. The MCC is in essence a correlation coefficient value between -1 and +1. A coefficient of +1 represents a perfect prediction, 0 an average random prediction and -1 an inverse prediction. The statistic is also known as the phi coefficient. [source: Wikipedia] """ SCREAMING_SNAKE_CASE__ : Union[str, Any] = """ Args: predictions (list of int): Predicted labels, as returned by a model. references (list of int): Ground truth labels. sample_weight (list of int, float, or bool): Sample weights. Defaults to `None`. Returns: matthews_correlation (dict containing float): Matthews correlation. Examples: Example 1, a basic example with only predictions and references as inputs: >>> matthews_metric = datasets.load_metric(\"matthews_correlation\") >>> results = matthews_metric.compute(references=[1, 3, 2, 0, 3, 2], ... predictions=[1, 2, 2, 0, 3, 3]) >>> print(round(results['matthews_correlation'], 2)) 0.54 Example 2, the same example as above, but also including sample weights: >>> matthews_metric = datasets.load_metric(\"matthews_correlation\") >>> results = matthews_metric.compute(references=[1, 3, 2, 0, 3, 2], ... predictions=[1, 2, 2, 0, 3, 3], ... sample_weight=[0.5, 3, 1, 1, 1, 2]) >>> print(round(results['matthews_correlation'], 2)) 0.1 Example 3, the same example as above, but with sample weights that cause a negative correlation: >>> matthews_metric = datasets.load_metric(\"matthews_correlation\") >>> results = matthews_metric.compute(references=[1, 3, 2, 0, 3, 2], ... predictions=[1, 2, 2, 0, 3, 3], ... sample_weight=[0.5, 1, 0, 0, 0, 1]) >>> print(round(results['matthews_correlation'], 2)) -0.25 """ SCREAMING_SNAKE_CASE__ : int = """\ @article{scikit-learn, title={Scikit-learn: Machine Learning in {P}ython}, author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V. and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P. and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.}, journal={Journal of Machine Learning Research}, volume={12}, pages={2825--2830}, year={2011} } """ @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class lowerCamelCase_ ( datasets.Metric ): def A ( self ): """simple docstring""" return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { '''predictions''': datasets.Value('''int32''' ), '''references''': datasets.Value('''int32''' ), } ) , reference_urls=[ '''https://scikit-learn.org/stable/modules/generated/sklearn.metrics.matthews_corrcoef.html''' ] , ) def A ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase=None ): """simple docstring""" return { "matthews_correlation": float(matthews_corrcoef(__lowerCAmelCase , __lowerCAmelCase , sample_weight=__lowerCAmelCase ) ), }
0
0
from argparse import ArgumentParser, Namespace from ..utils import logging from . import BaseTransformersCLICommand def lowerCamelCase__ (_UpperCAmelCase): return ConvertCommand( args.model_type , args.tf_checkpoint , args.pytorch_dump_output , args.config , args.finetuning_task_name) a_ : Any = '\ntransformers can only be used from the commandline to convert TensorFlow models in PyTorch, In that case, it requires\nTensorFlow to be installed. Please see https://www.tensorflow.org/install/ for installation instructions.\n' class _snake_case ( A__ ): @staticmethod def SCREAMING_SNAKE_CASE__ ( a) -> Union[str, Any]: SCREAMING_SNAKE_CASE = parser.add_parser( 'convert' , help='CLI tool to run convert model from original author checkpoints to Transformers PyTorch checkpoints.' , ) train_parser.add_argument('--model_type' , type=a , required=a , help='Model\'s type.') train_parser.add_argument( '--tf_checkpoint' , type=a , required=a , help='TensorFlow checkpoint path or folder.') train_parser.add_argument( '--pytorch_dump_output' , type=a , required=a , help='Path to the PyTorch saved model output.') train_parser.add_argument('--config' , type=a , default='' , help='Configuration file path or folder.') train_parser.add_argument( '--finetuning_task_name' , type=a , default=a , help='Optional fine-tuning task name if the TF model was a finetuned model.' , ) train_parser.set_defaults(func=a) def __init__( self , a , a , a , a , a , *a , ) -> Any: SCREAMING_SNAKE_CASE = logging.get_logger('transformers-cli/converting') self._logger.info(f'''Loading model {model_type}''') SCREAMING_SNAKE_CASE = model_type SCREAMING_SNAKE_CASE = tf_checkpoint SCREAMING_SNAKE_CASE = pytorch_dump_output SCREAMING_SNAKE_CASE = config SCREAMING_SNAKE_CASE = finetuning_task_name def SCREAMING_SNAKE_CASE__ ( self) -> str: if self._model_type == "albert": try: from ..models.albert.convert_albert_original_tf_checkpoint_to_pytorch import ( convert_tf_checkpoint_to_pytorch, ) except ImportError: raise ImportError(a) convert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output) elif self._model_type == "bert": try: from ..models.bert.convert_bert_original_tf_checkpoint_to_pytorch import ( convert_tf_checkpoint_to_pytorch, ) except ImportError: raise ImportError(a) convert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output) elif self._model_type == "funnel": try: from ..models.funnel.convert_funnel_original_tf_checkpoint_to_pytorch import ( convert_tf_checkpoint_to_pytorch, ) except ImportError: raise ImportError(a) convert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output) elif self._model_type == "t5": try: from ..models.ta.convert_ta_original_tf_checkpoint_to_pytorch import convert_tf_checkpoint_to_pytorch except ImportError: raise ImportError(a) convert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output) elif self._model_type == "gpt": from ..models.openai.convert_openai_original_tf_checkpoint_to_pytorch import ( convert_openai_checkpoint_to_pytorch, ) convert_openai_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output) elif self._model_type == "transfo_xl": try: from ..models.transfo_xl.convert_transfo_xl_original_tf_checkpoint_to_pytorch import ( convert_transfo_xl_checkpoint_to_pytorch, ) except ImportError: raise ImportError(a) if "ckpt" in self._tf_checkpoint.lower(): SCREAMING_SNAKE_CASE = self._tf_checkpoint SCREAMING_SNAKE_CASE = '' else: SCREAMING_SNAKE_CASE = self._tf_checkpoint SCREAMING_SNAKE_CASE = '' convert_transfo_xl_checkpoint_to_pytorch( a , self._config , self._pytorch_dump_output , a) elif self._model_type == "gpt2": try: from ..models.gpta.convert_gpta_original_tf_checkpoint_to_pytorch import ( convert_gpta_checkpoint_to_pytorch, ) except ImportError: raise ImportError(a) convert_gpta_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output) elif self._model_type == "xlnet": try: from ..models.xlnet.convert_xlnet_original_tf_checkpoint_to_pytorch import ( convert_xlnet_checkpoint_to_pytorch, ) except ImportError: raise ImportError(a) convert_xlnet_checkpoint_to_pytorch( self._tf_checkpoint , self._config , self._pytorch_dump_output , self._finetuning_task_name) elif self._model_type == "xlm": from ..models.xlm.convert_xlm_original_pytorch_checkpoint_to_pytorch import ( convert_xlm_checkpoint_to_pytorch, ) convert_xlm_checkpoint_to_pytorch(self._tf_checkpoint , self._pytorch_dump_output) elif self._model_type == "lxmert": from ..models.lxmert.convert_lxmert_original_tf_checkpoint_to_pytorch import ( convert_lxmert_checkpoint_to_pytorch, ) convert_lxmert_checkpoint_to_pytorch(self._tf_checkpoint , self._pytorch_dump_output) elif self._model_type == "rembert": from ..models.rembert.convert_rembert_tf_checkpoint_to_pytorch import ( convert_rembert_tf_checkpoint_to_pytorch, ) convert_rembert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output) else: raise ValueError( '--model_type should be selected in the list [bert, gpt, gpt2, t5, transfo_xl, xlnet, xlm, lxmert]')
73
from __future__ import annotations def __lowercase ( snake_case, snake_case ): """simple docstring""" print(f'''Vertex\tShortest Distance from vertex {src}''' ) for i, d in enumerate(snake_case ): print(f'''{i}\t\t{d}''' ) def __lowercase ( snake_case, snake_case, snake_case ): """simple docstring""" for j in range(snake_case ): __magic_name__ , __magic_name__ , __magic_name__ :Tuple = (graph[j][k] for k in ['''src''', '''dst''', '''weight''']) if distance[u] != float('''inf''' ) and distance[u] + w < distance[v]: return True return False def __lowercase ( snake_case, snake_case, snake_case, snake_case ): """simple docstring""" __magic_name__ :List[Any] = [float('''inf''' )] * vertex_count __magic_name__ :Tuple = 0.0 for _ in range(vertex_count - 1 ): for j in range(snake_case ): __magic_name__ , __magic_name__ , __magic_name__ :Dict = (graph[j][k] for k in ['''src''', '''dst''', '''weight''']) if distance[u] != float('''inf''' ) and distance[u] + w < distance[v]: __magic_name__ :Tuple = distance[u] + w __magic_name__ :Tuple = check_negative_cycle(snake_case, snake_case, snake_case ) if negative_cycle_exists: raise Exception('''Negative cycle found''' ) return distance if __name__ == "__main__": import doctest doctest.testmod() SCREAMING_SNAKE_CASE__ : Tuple = int(input("""Enter number of vertices: """).strip()) SCREAMING_SNAKE_CASE__ : Any = int(input("""Enter number of edges: """).strip()) SCREAMING_SNAKE_CASE__ : list[dict[str, int]] = [{} for _ in range(E)] for i in range(E): print("""Edge """, i + 1) SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Dict = ( int(x) for x in input("""Enter source, destination, weight: """).strip().split(""" """) ) SCREAMING_SNAKE_CASE__ : Dict = {"""src""": src, """dst""": dest, """weight""": weight} SCREAMING_SNAKE_CASE__ : List[Any] = int(input("""\nEnter shortest path source:""").strip()) SCREAMING_SNAKE_CASE__ : List[str] = bellman_ford(graph, V, E, source) print_distance(shortest_distance, 0)
0
0
import argparse import os import re import packaging.version lowercase_ = """examples/""" lowercase_ = { """examples""": (re.compile(R"""^check_min_version\(\"[^\"]+\"\)\s*$""", re.MULTILINE), """check_min_version(\"VERSION\")\n"""), """init""": (re.compile(R"""^__version__\s+=\s+\"([^\"]+)\"\s*$""", re.MULTILINE), """__version__ = \"VERSION\"\n"""), """setup""": (re.compile(R"""^(\s*)version\s*=\s*\"[^\"]+\",""", re.MULTILINE), R"""\1version=\"VERSION\","""), """doc""": (re.compile(R"""^(\s*)release\s*=\s*\"[^\"]+\"$""", re.MULTILINE), """release = \"VERSION\"\n"""), } lowercase_ = { """init""": """src/transformers/__init__.py""", """setup""": """setup.py""", } lowercase_ = """README.md""" def a__ ( snake_case , snake_case , snake_case ): """simple docstring""" with open(snake_case , '''r''' , encoding='''utf-8''' , newline='''\n''' ) as f: __SCREAMING_SNAKE_CASE : List[Any] = f.read() __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : str = REPLACE_PATTERNS[pattern] __SCREAMING_SNAKE_CASE : List[Any] = replace.replace('''VERSION''' , snake_case ) __SCREAMING_SNAKE_CASE : Optional[Any] = re_pattern.sub(snake_case , snake_case ) with open(snake_case , '''w''' , encoding='''utf-8''' , newline='''\n''' ) as f: f.write(snake_case ) def a__ ( snake_case ): """simple docstring""" for folder, directories, fnames in os.walk(snake_case ): # Removing some of the folders with non-actively maintained examples from the walk if "research_projects" in directories: directories.remove('''research_projects''' ) if "legacy" in directories: directories.remove('''legacy''' ) for fname in fnames: if fname.endswith('''.py''' ): update_version_in_file(os.path.join(snake_case , snake_case ) , snake_case , pattern='''examples''' ) def a__ ( snake_case , snake_case=False ): """simple docstring""" for pattern, fname in REPLACE_FILES.items(): update_version_in_file(snake_case , snake_case , snake_case ) if not patch: update_version_in_examples(snake_case ) def a__ ( ): """simple docstring""" __SCREAMING_SNAKE_CASE : Optional[Any] = '''🤗 Transformers currently provides the following architectures''' __SCREAMING_SNAKE_CASE : Union[str, Any] = '''1. Want to contribute a new model?''' with open(snake_case , '''r''' , encoding='''utf-8''' , newline='''\n''' ) as f: __SCREAMING_SNAKE_CASE : Dict = f.readlines() # Find the start of the list. __SCREAMING_SNAKE_CASE : Any = 0 while not lines[start_index].startswith(_start_prompt ): start_index += 1 start_index += 1 __SCREAMING_SNAKE_CASE : Tuple = start_index # Update the lines in the model list. while not lines[index].startswith(_end_prompt ): if lines[index].startswith('''1.''' ): __SCREAMING_SNAKE_CASE : int = lines[index].replace( '''https://huggingface.co/docs/transformers/main/model_doc''' , '''https://huggingface.co/docs/transformers/model_doc''' , ) index += 1 with open(snake_case , '''w''' , encoding='''utf-8''' , newline='''\n''' ) as f: f.writelines(snake_case ) def a__ ( ): """simple docstring""" with open(REPLACE_FILES['''init'''] , '''r''' ) as f: __SCREAMING_SNAKE_CASE : Union[str, Any] = f.read() __SCREAMING_SNAKE_CASE : Union[str, Any] = REPLACE_PATTERNS['''init'''][0].search(snake_case ).groups()[0] return packaging.version.parse(snake_case ) def a__ ( snake_case=False ): """simple docstring""" __SCREAMING_SNAKE_CASE : Union[str, Any] = get_version() if patch and default_version.is_devrelease: raise ValueError('''Can\'t create a patch version from the dev branch, checkout a released version!''' ) if default_version.is_devrelease: __SCREAMING_SNAKE_CASE : List[Any] = default_version.base_version elif patch: __SCREAMING_SNAKE_CASE : Optional[int] = F'''{default_version.major}.{default_version.minor}.{default_version.micro + 1}''' else: __SCREAMING_SNAKE_CASE : List[Any] = F'''{default_version.major}.{default_version.minor + 1}.0''' # Now let's ask nicely if that's the right one. __SCREAMING_SNAKE_CASE : Any = input(F'''Which version are you releasing? [{default_version}]''' ) if len(snake_case ) == 0: __SCREAMING_SNAKE_CASE : Optional[Any] = default_version print(F'''Updating version to {version}.''' ) global_version_update(snake_case , patch=snake_case ) if not patch: print('''Cleaning main README, don\'t forget to run `make fix-copies`.''' ) clean_main_ref_in_model_list() def a__ ( ): """simple docstring""" __SCREAMING_SNAKE_CASE : Any = get_version() __SCREAMING_SNAKE_CASE : Optional[Any] = F'''{current_version.major}.{current_version.minor + 1}.0.dev0''' __SCREAMING_SNAKE_CASE : Optional[Any] = current_version.base_version # Check with the user we got that right. __SCREAMING_SNAKE_CASE : Dict = input(F'''Which version are we developing now? [{dev_version}]''' ) if len(snake_case ) == 0: __SCREAMING_SNAKE_CASE : Optional[Any] = dev_version print(F'''Updating version to {version}.''' ) global_version_update(snake_case ) print('''Cleaning main README, don\'t forget to run `make fix-copies`.''' ) clean_main_ref_in_model_list() if __name__ == "__main__": lowercase_ = argparse.ArgumentParser() parser.add_argument("""--post_release""", action="""store_true""", help="""Whether this is pre or post release.""") parser.add_argument("""--patch""", action="""store_true""", help="""Whether or not this is a patch release.""") lowercase_ = parser.parse_args() if not args.post_release: pre_release_work(patch=args.patch) elif args.patch: print("""Nothing to do after a patch :-)""") else: post_release_work()
74
from __future__ import annotations import unittest from transformers import RoFormerConfig, is_tf_available from transformers.testing_utils import require_tf, slow from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import ( TFRoFormerForCausalLM, TFRoFormerForMaskedLM, TFRoFormerForMultipleChoice, TFRoFormerForQuestionAnswering, TFRoFormerForSequenceClassification, TFRoFormerForTokenClassification, TFRoFormerModel, ) from transformers.models.roformer.modeling_tf_roformer import ( TFRoFormerSelfAttention, TFRoFormerSinusoidalPositionalEmbedding, ) class lowerCamelCase_ : def __init__( self , __lowerCAmelCase , __lowerCAmelCase=1_3 , __lowerCAmelCase=7 , __lowerCAmelCase=True , __lowerCAmelCase=True , __lowerCAmelCase=True , __lowerCAmelCase=True , __lowerCAmelCase=9_9 , __lowerCAmelCase=3_2 , __lowerCAmelCase=2 , __lowerCAmelCase=4 , __lowerCAmelCase=3_7 , __lowerCAmelCase="gelu" , __lowerCAmelCase=0.1 , __lowerCAmelCase=0.1 , __lowerCAmelCase=5_1_2 , __lowerCAmelCase=1_6 , __lowerCAmelCase=2 , __lowerCAmelCase=0.02 , __lowerCAmelCase=3 , __lowerCAmelCase=4 , __lowerCAmelCase=None , ): """simple docstring""" __magic_name__ :Optional[int] = parent __magic_name__ :List[Any] = 1_3 __magic_name__ :Union[str, Any] = 7 __magic_name__ :Optional[Any] = True __magic_name__ :Tuple = True __magic_name__ :List[str] = True __magic_name__ :List[Any] = True __magic_name__ :int = 9_9 __magic_name__ :Any = 3_2 __magic_name__ :Union[str, Any] = 2 __magic_name__ :List[str] = 4 __magic_name__ :List[Any] = 3_7 __magic_name__ :Tuple = '''gelu''' __magic_name__ :Any = 0.1 __magic_name__ :str = 0.1 __magic_name__ :List[str] = 5_1_2 __magic_name__ :int = 1_6 __magic_name__ :Any = 2 __magic_name__ :List[Any] = 0.02 __magic_name__ :Optional[Any] = 3 __magic_name__ :Tuple = 4 __magic_name__ :Optional[Any] = None def A ( self ): """simple docstring""" __magic_name__ :Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) __magic_name__ :str = None if self.use_input_mask: __magic_name__ :Optional[int] = random_attention_mask([self.batch_size, self.seq_length] ) __magic_name__ :str = None if self.use_token_type_ids: __magic_name__ :List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) __magic_name__ :Union[str, Any] = None __magic_name__ :Tuple = None __magic_name__ :str = None if self.use_labels: __magic_name__ :List[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size ) __magic_name__ :List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) __magic_name__ :List[Any] = ids_tensor([self.batch_size] , self.num_choices ) __magic_name__ :str = RoFormerConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , return_dict=__lowerCAmelCase , ) return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def A ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ): """simple docstring""" __magic_name__ :int = TFRoFormerModel(config=__lowerCAmelCase ) __magic_name__ :Optional[Any] = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids} __magic_name__ :List[str] = [input_ids, input_mask] __magic_name__ :Any = model(__lowerCAmelCase ) __magic_name__ :List[str] = model(__lowerCAmelCase ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def A ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ): """simple docstring""" __magic_name__ :Dict = True __magic_name__ :List[str] = TFRoFormerForCausalLM(config=__lowerCAmelCase ) __magic_name__ :str = { '''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids, } __magic_name__ :Optional[Any] = model(__lowerCAmelCase )['''logits'''] self.parent.assertListEqual( list(prediction_scores.numpy().shape ) , [self.batch_size, self.seq_length, self.vocab_size] ) def A ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ): """simple docstring""" __magic_name__ :Optional[Any] = TFRoFormerForMaskedLM(config=__lowerCAmelCase ) __magic_name__ :Any = { '''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids, } __magic_name__ :Dict = model(__lowerCAmelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def A ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ): """simple docstring""" __magic_name__ :int = self.num_labels __magic_name__ :str = TFRoFormerForSequenceClassification(config=__lowerCAmelCase ) __magic_name__ :Optional[int] = { '''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids, } __magic_name__ :str = model(__lowerCAmelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def A ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ): """simple docstring""" __magic_name__ :Union[str, Any] = self.num_choices __magic_name__ :Tuple = TFRoFormerForMultipleChoice(config=__lowerCAmelCase ) __magic_name__ :int = tf.tile(tf.expand_dims(__lowerCAmelCase , 1 ) , (1, self.num_choices, 1) ) __magic_name__ :Optional[Any] = tf.tile(tf.expand_dims(__lowerCAmelCase , 1 ) , (1, self.num_choices, 1) ) __magic_name__ :Union[str, Any] = tf.tile(tf.expand_dims(__lowerCAmelCase , 1 ) , (1, self.num_choices, 1) ) __magic_name__ :str = { '''input_ids''': multiple_choice_inputs_ids, '''attention_mask''': multiple_choice_input_mask, '''token_type_ids''': multiple_choice_token_type_ids, } __magic_name__ :Tuple = model(__lowerCAmelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) ) def A ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ): """simple docstring""" __magic_name__ :Optional[int] = self.num_labels __magic_name__ :Any = TFRoFormerForTokenClassification(config=__lowerCAmelCase ) __magic_name__ :str = { '''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids, } __magic_name__ :Dict = model(__lowerCAmelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def A ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ): """simple docstring""" __magic_name__ :List[str] = TFRoFormerForQuestionAnswering(config=__lowerCAmelCase ) __magic_name__ :List[str] = { '''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids, } __magic_name__ :Union[str, Any] = model(__lowerCAmelCase ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def A ( self ): """simple docstring""" __magic_name__ :Union[str, Any] = self.prepare_config_and_inputs() ( ( __magic_name__ ) , ( __magic_name__ ) , ( __magic_name__ ) , ( __magic_name__ ) , ( __magic_name__ ) , ( __magic_name__ ) , ( __magic_name__ ) , ) :Union[str, Any] = config_and_inputs __magic_name__ :Optional[Any] = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask} return config, inputs_dict @require_tf class lowerCamelCase_ ( lowerCamelCase , lowerCamelCase , unittest.TestCase ): a__ = ( ( TFRoFormerModel, TFRoFormerForCausalLM, TFRoFormerForMaskedLM, TFRoFormerForQuestionAnswering, TFRoFormerForSequenceClassification, TFRoFormerForTokenClassification, TFRoFormerForMultipleChoice, ) if is_tf_available() else () ) a__ = ( { '''feature-extraction''': TFRoFormerModel, '''fill-mask''': TFRoFormerForMaskedLM, '''question-answering''': TFRoFormerForQuestionAnswering, '''text-classification''': TFRoFormerForSequenceClassification, '''text-generation''': TFRoFormerForCausalLM, '''token-classification''': TFRoFormerForTokenClassification, '''zero-shot''': TFRoFormerForSequenceClassification, } if is_tf_available() else {} ) a__ = False a__ = False def A ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ): """simple docstring""" if pipeline_test_casse_name == "TextGenerationPipelineTests": return True return False def A ( self ): """simple docstring""" __magic_name__ :List[str] = TFRoFormerModelTester(self ) __magic_name__ :List[str] = ConfigTester(self , config_class=__lowerCAmelCase , hidden_size=3_7 ) def A ( self ): """simple docstring""" self.config_tester.run_common_tests() def A ( self ): """simple docstring""" __magic_name__ :Optional[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*__lowerCAmelCase ) def A ( self ): """simple docstring""" __magic_name__ :Union[str, Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_lm(*__lowerCAmelCase ) def A ( self ): """simple docstring""" __magic_name__ :Tuple = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_lm_head(*__lowerCAmelCase ) def A ( self ): """simple docstring""" __magic_name__ :Optional[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_multiple_choice(*__lowerCAmelCase ) def A ( self ): """simple docstring""" __magic_name__ :Tuple = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_question_answering(*__lowerCAmelCase ) def A ( self ): """simple docstring""" __magic_name__ :Dict = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_sequence_classification(*__lowerCAmelCase ) def A ( self ): """simple docstring""" __magic_name__ :Optional[int] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_token_classification(*__lowerCAmelCase ) @slow def A ( self ): """simple docstring""" __magic_name__ :Optional[Any] = TFRoFormerModel.from_pretrained('''junnyu/roformer_chinese_base''' ) self.assertIsNotNone(__lowerCAmelCase ) @require_tf class lowerCamelCase_ ( unittest.TestCase ): @slow def A ( self ): """simple docstring""" __magic_name__ :int = TFRoFormerForMaskedLM.from_pretrained('''junnyu/roformer_chinese_base''' ) __magic_name__ :Dict = tf.constant([[0, 1, 2, 3, 4, 5]] ) __magic_name__ :Optional[Any] = model(__lowerCAmelCase )[0] # TODO Replace vocab size __magic_name__ :int = 5_0_0_0_0 __magic_name__ :Tuple = [1, 6, vocab_size] self.assertEqual(output.shape , __lowerCAmelCase ) print(output[:, :3, :3] ) # TODO Replace values below with what was printed above. __magic_name__ :Any = tf.constant( [ [ [-0.12053341, -1.0264901, 0.29221946], [-1.5133783, 0.197433, 0.15190607], [-5.0135403, -3.900256, -0.84038764], ] ] ) tf.debugging.assert_near(output[:, :3, :3] , __lowerCAmelCase , atol=1E-4 ) @require_tf class lowerCamelCase_ ( unittest.TestCase ): a__ = 1e-4 def A ( self ): """simple docstring""" __magic_name__ :Optional[int] = tf.constant([[4, 1_0]] ) __magic_name__ :Optional[int] = TFRoFormerSinusoidalPositionalEmbedding(num_positions=6 , embedding_dim=6 ) __magic_name__ :Optional[Any] = emba(input_ids.shape ) __magic_name__ :List[str] = tf.constant( [[0.0000, 0.0000, 0.0000, 1.0000, 1.0000, 1.0000], [0.8415, 0.0464, 0.0022, 0.5403, 0.9989, 1.0000]] ) tf.debugging.assert_near(__lowerCAmelCase , __lowerCAmelCase , atol=self.tolerance ) def A ( self ): """simple docstring""" __magic_name__ :Tuple = tf.constant( [ [0.0000, 0.0000, 0.0000, 0.0000, 0.0000], [0.8415, 0.8219, 0.8020, 0.7819, 0.7617], [0.9093, 0.9364, 0.9581, 0.9749, 0.9870], ] ) __magic_name__ :Union[str, Any] = TFRoFormerSinusoidalPositionalEmbedding(num_positions=5_1_2 , embedding_dim=5_1_2 ) emba([2, 1_6, 5_1_2] ) __magic_name__ :Optional[int] = emba.weight[:3, :5] tf.debugging.assert_near(__lowerCAmelCase , __lowerCAmelCase , atol=self.tolerance ) @require_tf class lowerCamelCase_ ( unittest.TestCase ): a__ = 1e-4 def A ( self ): """simple docstring""" # 2,12,16,64 __magic_name__ :int = tf.reshape(tf.range(2 * 1_2 * 1_6 * 6_4 , dtype=tf.floataa ) , shape=(2, 1_2, 1_6, 6_4) ) / 1_0_0 __magic_name__ :str = -tf.reshape(tf.range(2 * 1_2 * 1_6 * 6_4 , dtype=tf.floataa ) , shape=(2, 1_2, 1_6, 6_4) ) / 1_0_0 __magic_name__ :int = TFRoFormerSinusoidalPositionalEmbedding(num_positions=3_2 , embedding_dim=6_4 ) __magic_name__ :List[str] = embed_positions([2, 1_6, 7_6_8] )[None, None, :, :] __magic_name__ , __magic_name__ :Union[str, Any] = TFRoFormerSelfAttention.apply_rotary_position_embeddings( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) __magic_name__ :Tuple = tf.constant( [ [0.0000, 0.0100, 0.0200, 0.0300, 0.0400, 0.0500, 0.0600, 0.0700], [-0.2012, 0.8897, 0.0263, 0.9401, 0.2074, 0.9463, 0.3481, 0.9343], [-1.7057, 0.6271, -1.2145, 1.3897, -0.6303, 1.7647, -0.1173, 1.8985], [-2.1731, -1.6397, -2.7358, 0.2854, -2.1840, 1.7183, -1.3018, 2.4871], [0.2717, -3.6173, -2.9206, -2.1988, -3.6638, 0.3858, -2.9155, 2.2980], [3.9859, -2.1580, -0.7984, -4.4904, -4.1181, -2.0252, -4.4782, 1.1253], ] ) __magic_name__ :List[str] = tf.constant( [ [0.0000, -0.0100, -0.0200, -0.0300, -0.0400, -0.0500, -0.0600, -0.0700], [0.2012, -0.8897, -0.0263, -0.9401, -0.2074, -0.9463, -0.3481, -0.9343], [1.7057, -0.6271, 1.2145, -1.3897, 0.6303, -1.7647, 0.1173, -1.8985], [2.1731, 1.6397, 2.7358, -0.2854, 2.1840, -1.7183, 1.3018, -2.4871], [-0.2717, 3.6173, 2.9206, 2.1988, 3.6638, -0.3858, 2.9155, -2.2980], [-3.9859, 2.1580, 0.7984, 4.4904, 4.1181, 2.0252, 4.4782, -1.1253], ] ) tf.debugging.assert_near(query_layer[0, 0, :6, :8] , __lowerCAmelCase , atol=self.tolerance ) tf.debugging.assert_near(key_layer[0, 0, :6, :8] , __lowerCAmelCase , atol=self.tolerance )
0
0
'''simple docstring''' import string # frequency taken from https://en.wikipedia.org/wiki/Letter_frequency UpperCamelCase__ = { '''E''': 12.70, '''T''': 9.06, '''A''': 8.17, '''O''': 7.51, '''I''': 6.97, '''N''': 6.75, '''S''': 6.33, '''H''': 6.09, '''R''': 5.99, '''D''': 4.25, '''L''': 4.03, '''C''': 2.78, '''U''': 2.76, '''M''': 2.41, '''W''': 2.36, '''F''': 2.23, '''G''': 2.02, '''Y''': 1.97, '''P''': 1.93, '''B''': 1.29, '''V''': 0.98, '''K''': 0.77, '''J''': 0.15, '''X''': 0.15, '''Q''': 0.10, '''Z''': 0.07, } UpperCamelCase__ = '''ETAOINSHRDLCUMWFGYPBVKJXQZ''' UpperCamelCase__ = '''ABCDEFGHIJKLMNOPQRSTUVWXYZ''' def a__ ( lowerCAmelCase__ ) -> dict[str, int]: UpperCAmelCase__ : int = {letter: 0 for letter in string.ascii_uppercase} for letter in message.upper(): if letter in LETTERS: letter_count[letter] += 1 return letter_count def a__ ( lowerCAmelCase__ ) -> str: return x[0] def a__ ( lowerCAmelCase__ ) -> str: UpperCAmelCase__ : Optional[int] = get_letter_count(lowerCAmelCase__ ) UpperCAmelCase__ : dict[int, list[str]] = { freq: [] for letter, freq in letter_to_freq.items() } for letter in LETTERS: freq_to_letter[letter_to_freq[letter]].append(lowerCAmelCase__ ) UpperCAmelCase__ : dict[int, str] = {} for freq in freq_to_letter: freq_to_letter[freq].sort(key=ETAOIN.find , reverse=lowerCAmelCase__ ) UpperCAmelCase__ : Tuple = ''''''.join(freq_to_letter[freq] ) UpperCAmelCase__ : List[Any] = list(freq_to_letter_str.items() ) freq_pairs.sort(key=lowerCAmelCase__ , reverse=lowerCAmelCase__ ) UpperCAmelCase__ : list[str] = [freq_pair[1] for freq_pair in freq_pairs] return "".join(lowerCAmelCase__ ) def a__ ( lowerCAmelCase__ ) -> int: UpperCAmelCase__ : Union[str, Any] = get_frequency_order(lowerCAmelCase__ ) UpperCAmelCase__ : Tuple = 0 for common_letter in ETAOIN[:6]: if common_letter in freq_order[:6]: match_score += 1 for uncommon_letter in ETAOIN[-6:]: if uncommon_letter in freq_order[-6:]: match_score += 1 return match_score if __name__ == "__main__": import doctest doctest.testmod()
75
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available SCREAMING_SNAKE_CASE__ : Optional[int] = {"""tokenization_herbert""": ["""HerbertTokenizer"""]} try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: SCREAMING_SNAKE_CASE__ : Optional[Any] = ["""HerbertTokenizerFast"""] if TYPE_CHECKING: from .tokenization_herbert import HerbertTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_herbert_fast import HerbertTokenizerFast else: import sys SCREAMING_SNAKE_CASE__ : Union[str, Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
0
0
"""simple docstring""" import copy from dataclasses import dataclass, field from typing import ClassVar, Dict from ..features import ClassLabel, Features, Value from .base import TaskTemplate @dataclass(frozen=snake_case ) class UpperCAmelCase_ ( snake_case ): # `task` is not a ClassVar since we want it to be part of the `asdict` output for JSON serialization UpperCamelCase =field(default="text-classification" , metadata={"include_in_asdict_even_if_is_default": True} ) UpperCamelCase =Features({"text": Value("string" )} ) UpperCamelCase =Features({"labels": ClassLabel} ) UpperCamelCase ="text" UpperCamelCase ="labels" def _lowerCamelCase ( self , UpperCamelCase_ ) -> List[Any]: if self.label_column not in features: raise ValueError(F"""Column {self.label_column} is not present in features.""" ) if not isinstance(features[self.label_column] , UpperCamelCase_ ): raise ValueError(F"""Column {self.label_column} is not a ClassLabel.""" ) __lowercase : int = copy.deepcopy(self ) __lowercase : Union[str, Any] = self.label_schema.copy() __lowercase : int = features[self.label_column] __lowercase : List[str] = label_schema return task_template @property def _lowerCamelCase ( self ) -> Dict[str, str]: return { self.text_column: "text", self.label_column: "labels", }
76
import argparse import gdown import numpy as np import torch from huggingface_hub import hf_hub_download from transformers import ( CLIPTokenizer, CLIPTokenizerFast, VideoMAEImageProcessor, XCLIPConfig, XCLIPModel, XCLIPProcessor, XCLIPTextConfig, XCLIPVisionConfig, ) def __lowercase ( snake_case, snake_case ): """simple docstring""" __magic_name__ :str = XCLIPTextConfig() # derive patch size from model name __magic_name__ :Union[str, Any] = model_name.find('''patch''' ) __magic_name__ :Optional[Any] = int(model_name[start_idx + len('''patch''' ) : start_idx + len('''patch''' ) + 2] ) __magic_name__ :int = XCLIPVisionConfig(patch_size=snake_case, num_frames=snake_case ) if "large" in model_name: __magic_name__ :Dict = 7_6_8 __magic_name__ :int = 3_0_7_2 __magic_name__ :List[Any] = 1_2 __magic_name__ :str = 1_0_2_4 __magic_name__ :Any = 4_0_9_6 __magic_name__ :Optional[Any] = 1_6 __magic_name__ :Union[str, Any] = 2_4 __magic_name__ :Union[str, Any] = 7_6_8 __magic_name__ :Tuple = 3_0_7_2 if model_name == "xclip-large-patch14-16-frames": __magic_name__ :List[str] = 3_3_6 __magic_name__ :Any = XCLIPConfig.from_text_vision_configs(snake_case, snake_case ) if "large" in model_name: __magic_name__ :str = 7_6_8 return config def __lowercase ( snake_case ): """simple docstring""" if name == "token_embedding.weight": __magic_name__ :Any = name.replace('''token_embedding.weight''', '''text_model.embeddings.token_embedding.weight''' ) if name == "positional_embedding": __magic_name__ :Any = name.replace('''positional_embedding''', '''text_model.embeddings.position_embedding.weight''' ) if "ln_1" in name: __magic_name__ :List[str] = name.replace('''ln_1''', '''layer_norm1''' ) if "ln_2" in name: __magic_name__ :str = name.replace('''ln_2''', '''layer_norm2''' ) if "c_fc" in name: __magic_name__ :List[Any] = name.replace('''c_fc''', '''fc1''' ) if "c_proj" in name: __magic_name__ :Any = name.replace('''c_proj''', '''fc2''' ) if name.startswith('''transformer.resblocks''' ): __magic_name__ :Any = name.replace('''transformer.resblocks''', '''text_model.encoder.layers''' ) if "attn.out_proj" in name and "message" not in name: __magic_name__ :Union[str, Any] = name.replace('''attn.out_proj''', '''self_attn.out_proj''' ) if "ln_final" in name: __magic_name__ :Tuple = name.replace('''ln_final''', '''text_model.final_layer_norm''' ) # visual encoder if name == "visual.class_embedding": __magic_name__ :List[Any] = name.replace('''visual.class_embedding''', '''vision_model.embeddings.class_embedding''' ) if name == "visual.positional_embedding": __magic_name__ :Any = name.replace('''visual.positional_embedding''', '''vision_model.embeddings.position_embedding.weight''' ) if name.startswith('''visual.transformer.resblocks''' ): __magic_name__ :Union[str, Any] = name.replace('''visual.transformer.resblocks''', '''vision_model.encoder.layers''' ) if "visual.conv1" in name: __magic_name__ :Tuple = name.replace('''visual.conv1''', '''vision_model.embeddings.patch_embedding''' ) if "visual.ln_pre" in name: __magic_name__ :Tuple = name.replace('''visual.ln_pre''', '''vision_model.pre_layernorm''' ) if "visual.ln_post" in name: __magic_name__ :Optional[Any] = name.replace('''visual.ln_post''', '''vision_model.post_layernorm''' ) if "visual.proj" in name: __magic_name__ :Tuple = name.replace('''visual.proj''', '''visual_projection.weight''' ) if "text_projection" in name: __magic_name__ :int = name.replace('''text_projection''', '''text_projection.weight''' ) # things on top if "prompts_visual_proj" in name: __magic_name__ :int = name.replace('''prompts_visual_proj''', '''prompts_visual_projection''' ) if "prompts_visual_ln" in name: __magic_name__ :Dict = name.replace('''prompts_visual_ln''', '''prompts_visual_layernorm''' ) # mit if name == "mit.positional_embedding": __magic_name__ :List[Any] = name.replace('''positional''', '''position''' ) if name.startswith('''mit.resblocks''' ): __magic_name__ :Union[str, Any] = name.replace('''mit.resblocks''', '''mit.encoder.layers''' ) # prompts generator if name.startswith('''prompts_generator.norm''' ): __magic_name__ :str = name.replace('''prompts_generator.norm''', '''prompts_generator.layernorm''' ) return name def __lowercase ( snake_case, snake_case ): """simple docstring""" for key in orig_state_dict.copy().keys(): __magic_name__ :Any = orig_state_dict.pop(snake_case ) if "attn.in_proj" in key: __magic_name__ :str = key.split('''.''' ) if key.startswith('''visual''' ): __magic_name__ :List[Any] = key_split[3] __magic_name__ :List[Any] = config.vision_config.hidden_size if "message_attn" in key: if "weight" in key: __magic_name__ :List[Any] = val[ :dim, : ] __magic_name__ :List[str] = val[ dim : dim * 2, : ] __magic_name__ :List[str] = val[ -dim:, : ] else: __magic_name__ :str = val[ :dim ] __magic_name__ :Optional[int] = val[ dim : dim * 2 ] __magic_name__ :Any = val[ -dim: ] else: if "weight" in key: __magic_name__ :int = val[ :dim, : ] __magic_name__ :Union[str, Any] = val[ dim : dim * 2, : ] __magic_name__ :List[Any] = val[ -dim:, : ] else: __magic_name__ :Union[str, Any] = val[:dim] __magic_name__ :str = val[ dim : dim * 2 ] __magic_name__ :Dict = val[-dim:] elif key.startswith('''mit''' ): __magic_name__ :List[Any] = key_split[2] __magic_name__ :Any = config.vision_config.mit_hidden_size if "weight" in key: __magic_name__ :Union[str, Any] = val[:dim, :] __magic_name__ :Optional[int] = val[dim : dim * 2, :] __magic_name__ :int = val[-dim:, :] else: __magic_name__ :Tuple = val[:dim] __magic_name__ :Optional[int] = val[dim : dim * 2] __magic_name__ :Optional[int] = val[-dim:] else: __magic_name__ :Any = key_split[2] __magic_name__ :List[Any] = config.text_config.hidden_size if "weight" in key: __magic_name__ :Union[str, Any] = val[:dim, :] __magic_name__ :Tuple = val[ dim : dim * 2, : ] __magic_name__ :str = val[-dim:, :] else: __magic_name__ :int = val[:dim] __magic_name__ :Any = val[ dim : dim * 2 ] __magic_name__ :str = val[-dim:] else: __magic_name__ :Tuple = rename_key(snake_case ) if new_key_name in ["visual_projection.weight", "text_projection.weight"]: __magic_name__ :List[Any] = val.T __magic_name__ :Optional[Any] = val return orig_state_dict def __lowercase ( snake_case ): """simple docstring""" if num_frames == 8: __magic_name__ :Any = '''eating_spaghetti_8_frames.npy''' elif num_frames == 1_6: __magic_name__ :List[Any] = '''eating_spaghetti.npy''' elif num_frames == 3_2: __magic_name__ :Tuple = '''eating_spaghetti_32_frames.npy''' __magic_name__ :str = hf_hub_download( repo_id='''hf-internal-testing/spaghetti-video''', filename=snake_case, repo_type='''dataset''', ) __magic_name__ :List[Any] = np.load(snake_case ) return list(snake_case ) def __lowercase ( snake_case, snake_case=None, snake_case=False ): """simple docstring""" __magic_name__ :Union[str, Any] = { # fully supervised kinetics-400 checkpoints '''xclip-base-patch32''': '''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_32_8.pth''', '''xclip-base-patch32-16-frames''': ( '''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_32_16.pth''' ), '''xclip-base-patch16''': '''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_16_8.pth''', '''xclip-base-patch16-16-frames''': ( '''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_16_16.pth''' ), '''xclip-large-patch14''': '''https://drive.google.com/u/0/uc?id=1NUOImq0o5DlQTST17iIP3vG7DgmHQuCx&amp;export=download&amp;confirm=t&amp;uuid=b26caedc-88e2-473e-830a-9d158b653cdb''', '''xclip-large-patch14-16-frames''': '''https://drive.google.com/u/0/uc?id=1FOYgnJc097OJ4lGwtRCCydQyVPJEOH7d&amp;export=download&amp;confirm=t&amp;uuid=538fa810-e671-4050-b385-9a623f89804f''', # fully supervised kinetics-600 checkpoints '''xclip-base-patch16-kinetics-600''': ( '''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k600_16_8.pth''' ), '''xclip-base-patch16-kinetics-600-16-frames''': ( '''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k600_16_16.pth''' ), '''xclip-large-patch14-kinetics-600''': '''https://drive.google.com/u/0/uc?id=1FV8C1INuM91sLAN4ImjzePLIlpMSihwV&amp;export=download&amp;confirm=t&amp;uuid=141d4977-4a65-44ae-864f-4b0c19f838be''', # few shot '''xclip-base-patch16-hmdb-2-shot''': ( '''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_2.pth''' ), '''xclip-base-patch16-hmdb-4-shot''': ( '''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_4.pth''' ), '''xclip-base-patch16-hmdb-8-shot''': ( '''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_8.pth''' ), '''xclip-base-patch16-hmdb-16-shot''': ( '''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_16.pth''' ), '''xclip-base-patch16-ucf-2-shot''': ( '''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_2.pth''' ), '''xclip-base-patch16-ucf-4-shot''': ( '''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_4.pth''' ), '''xclip-base-patch16-ucf-8-shot''': ( '''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_8.pth''' ), '''xclip-base-patch16-ucf-16-shot''': ( '''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_16.pth''' ), # zero shot '''xclip-base-patch16-zero-shot''': '''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/zero.pth''', } __magic_name__ :Optional[int] = model_to_url[model_name] __magic_name__ :List[str] = 8 if "16-frames" in model_name: __magic_name__ :List[Any] = 1_6 elif "shot" in model_name: __magic_name__ :Dict = 3_2 __magic_name__ :str = get_xclip_config(snake_case, snake_case ) __magic_name__ :List[Any] = XCLIPModel(snake_case ) model.eval() if "drive" in checkpoint_url: __magic_name__ :Any = '''pytorch_model.bin''' gdown.cached_download(snake_case, snake_case, quiet=snake_case ) __magic_name__ :Optional[Any] = torch.load(snake_case, map_location='''cpu''' )['''model'''] else: __magic_name__ :Optional[int] = torch.hub.load_state_dict_from_url(snake_case )['''model'''] __magic_name__ :List[str] = convert_state_dict(snake_case, snake_case ) __magic_name__ :List[Any] = XCLIPModel(snake_case ) __magic_name__ , __magic_name__ :Optional[Any] = model.load_state_dict(snake_case, strict=snake_case ) assert missing_keys == ["text_model.embeddings.position_ids", "vision_model.embeddings.position_ids"] model.eval() __magic_name__ :str = 3_3_6 if model_name == '''xclip-large-patch14-16-frames''' else 2_2_4 __magic_name__ :Optional[int] = VideoMAEImageProcessor(size=snake_case ) __magic_name__ :Optional[int] = CLIPTokenizer.from_pretrained('''openai/clip-vit-base-patch32''' ) __magic_name__ :Tuple = CLIPTokenizerFast.from_pretrained('''openai/clip-vit-base-patch32''' ) __magic_name__ :Optional[int] = XCLIPProcessor(image_processor=snake_case, tokenizer=snake_case ) __magic_name__ :List[Any] = prepare_video(snake_case ) __magic_name__ :str = processor( text=['''playing sports''', '''eating spaghetti''', '''go shopping'''], videos=snake_case, return_tensors='''pt''', padding=snake_case ) print('''Shape of pixel values:''', inputs.pixel_values.shape ) with torch.no_grad(): __magic_name__ :Tuple = model(**snake_case ) # Verify outputs __magic_name__ :Any = outputs.logits_per_video __magic_name__ :str = logits_per_video.softmax(dim=1 ) print('''Probs:''', snake_case ) # kinetics-400 if model_name == "xclip-base-patch32": __magic_name__ :Dict = torch.tensor([[0.0019, 0.9951, 0.0030]] ) elif model_name == "xclip-base-patch32-16-frames": __magic_name__ :str = torch.tensor([[7.0_9_9_9E-0_4, 9.9_8_8_3E-0_1, 4.5_5_8_0E-0_4]] ) elif model_name == "xclip-base-patch16": __magic_name__ :Tuple = torch.tensor([[0.0083, 0.9681, 0.0236]] ) elif model_name == "xclip-base-patch16-16-frames": __magic_name__ :Tuple = torch.tensor([[7.6_9_3_7E-0_4, 9.9_7_2_8E-0_1, 1.9_4_7_3E-0_3]] ) elif model_name == "xclip-large-patch14": __magic_name__ :str = torch.tensor([[0.0062, 0.9864, 0.0075]] ) elif model_name == "xclip-large-patch14-16-frames": __magic_name__ :Optional[int] = torch.tensor([[3.3_8_7_7E-0_4, 9.9_9_3_7E-0_1, 2.8_8_8_8E-0_4]] ) # kinetics-600 elif model_name == "xclip-base-patch16-kinetics-600": __magic_name__ :Optional[int] = torch.tensor([[0.0555, 0.8914, 0.0531]] ) elif model_name == "xclip-base-patch16-kinetics-600-16-frames": __magic_name__ :List[str] = torch.tensor([[3.8_5_5_4E-0_4, 9.9_9_2_9E-0_1, 3.2_7_5_4E-0_4]] ) elif model_name == "xclip-large-patch14-kinetics-600": __magic_name__ :List[str] = torch.tensor([[0.0036, 0.9920, 0.0045]] ) # few shot elif model_name == "xclip-base-patch16-hmdb-2-shot": __magic_name__ :Tuple = torch.tensor([[7.1_8_9_0E-0_6, 9.9_9_9_4E-0_1, 5.6_5_5_9E-0_5]] ) elif model_name == "xclip-base-patch16-hmdb-4-shot": __magic_name__ :List[str] = torch.tensor([[1.0_3_2_0E-0_5, 9.9_9_9_3E-0_1, 6.2_4_3_5E-0_5]] ) elif model_name == "xclip-base-patch16-hmdb-8-shot": __magic_name__ :Optional[int] = torch.tensor([[4.1_3_7_7E-0_6, 9.9_9_9_0E-0_1, 9.8_3_8_6E-0_5]] ) elif model_name == "xclip-base-patch16-hmdb-16-shot": __magic_name__ :Optional[int] = torch.tensor([[4.1_3_4_7E-0_5, 9.9_9_6_2E-0_1, 3.3_4_1_1E-0_4]] ) elif model_name == "xclip-base-patch16-ucf-2-shot": __magic_name__ :Union[str, Any] = torch.tensor([[8.5_8_5_7E-0_5, 9.9_9_2_8E-0_1, 6.3_2_9_1E-0_4]] ) elif model_name == "xclip-base-patch16-ucf-4-shot": __magic_name__ :Union[str, Any] = torch.tensor([[8.5_8_5_7E-0_5, 9.9_9_2_8E-0_1, 6.3_2_9_1E-0_4]] ) elif model_name == "xclip-base-patch16-ucf-8-shot": __magic_name__ :Optional[int] = torch.tensor([[0.0027, 0.9904, 0.0070]] ) elif model_name == "xclip-base-patch16-ucf-16-shot": __magic_name__ :Any = torch.tensor([[9.8_2_1_9E-0_4, 9.9_5_9_3E-0_1, 3.0_8_6_3E-0_3]] ) # zero shot elif model_name == "xclip-base-patch16-zero-shot": __magic_name__ :Optional[int] = torch.tensor([[3.5_0_8_2E-0_4, 9.9_7_8_5E-0_1, 1.7_9_6_6E-0_3]] ) else: raise ValueError(f'''Model name {model_name} not supported''' ) assert torch.allclose(snake_case, snake_case, atol=1E-3 ) print('''Looks ok!''' ) if pytorch_dump_folder_path is not None: print(f'''Saving model {model_name} to {pytorch_dump_folder_path}''' ) model.save_pretrained(snake_case ) if push_to_hub: print('''Pushing model, processor and slow tokenizer files to the hub...''' ) model.push_to_hub(snake_case, organization='''nielsr''' ) processor.push_to_hub(snake_case, organization='''nielsr''' ) slow_tokenizer.push_to_hub(snake_case, organization='''nielsr''' ) if __name__ == "__main__": SCREAMING_SNAKE_CASE__ : Optional[Any] = argparse.ArgumentParser() # Required parameters parser.add_argument( """--model_name""", default="""xclip-base-patch32""", type=str, help="""Name of the model.""", ) parser.add_argument( """--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory.""" ) parser.add_argument( """--push_to_hub""", action="""store_true""", help="""Whether or not to push the converted model to the 🤗 hub.""" ) SCREAMING_SNAKE_CASE__ : List[Any] = parser.parse_args() convert_xclip_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
0
0
"""simple docstring""" import argparse from pathlib import Path import requests import torch from PIL import Image from transformers import ( RobertaTokenizer, TrOCRConfig, TrOCRForCausalLM, TrOCRProcessor, VisionEncoderDecoderModel, ViTConfig, ViTImageProcessor, ViTModel, ) from transformers.utils import logging logging.set_verbosity_info() A = logging.get_logger(__name__) def _UpperCamelCase ( UpperCamelCase , UpperCamelCase ) -> Union[str, Any]: """simple docstring""" __UpperCAmelCase : int = [] for i in range(encoder_config.num_hidden_layers ): # encoder layers: output projection, 2 feedforward neural networks and 2 layernorms rename_keys.append( (f"encoder.deit.blocks.{i}.norm1.weight", f"encoder.encoder.layer.{i}.layernorm_before.weight") ) rename_keys.append((f"encoder.deit.blocks.{i}.norm1.bias", f"encoder.encoder.layer.{i}.layernorm_before.bias") ) rename_keys.append( (f"encoder.deit.blocks.{i}.attn.proj.weight", f"encoder.encoder.layer.{i}.attention.output.dense.weight") ) rename_keys.append( (f"encoder.deit.blocks.{i}.attn.proj.bias", f"encoder.encoder.layer.{i}.attention.output.dense.bias") ) rename_keys.append( (f"encoder.deit.blocks.{i}.norm2.weight", f"encoder.encoder.layer.{i}.layernorm_after.weight") ) rename_keys.append((f"encoder.deit.blocks.{i}.norm2.bias", f"encoder.encoder.layer.{i}.layernorm_after.bias") ) rename_keys.append( (f"encoder.deit.blocks.{i}.mlp.fc1.weight", f"encoder.encoder.layer.{i}.intermediate.dense.weight") ) rename_keys.append( (f"encoder.deit.blocks.{i}.mlp.fc1.bias", f"encoder.encoder.layer.{i}.intermediate.dense.bias") ) rename_keys.append( (f"encoder.deit.blocks.{i}.mlp.fc2.weight", f"encoder.encoder.layer.{i}.output.dense.weight") ) rename_keys.append((f"encoder.deit.blocks.{i}.mlp.fc2.bias", f"encoder.encoder.layer.{i}.output.dense.bias") ) # cls token, position embeddings and patch embeddings of encoder rename_keys.extend( [ ("encoder.deit.cls_token", "encoder.embeddings.cls_token"), ("encoder.deit.pos_embed", "encoder.embeddings.position_embeddings"), ("encoder.deit.patch_embed.proj.weight", "encoder.embeddings.patch_embeddings.projection.weight"), ("encoder.deit.patch_embed.proj.bias", "encoder.embeddings.patch_embeddings.projection.bias"), ("encoder.deit.norm.weight", "encoder.layernorm.weight"), ("encoder.deit.norm.bias", "encoder.layernorm.bias"), ] ) return rename_keys def _UpperCamelCase ( UpperCamelCase , UpperCamelCase ) -> int: """simple docstring""" for i in range(encoder_config.num_hidden_layers ): # queries, keys and values (only weights, no biases) __UpperCAmelCase : Optional[Any] = state_dict.pop(f"encoder.deit.blocks.{i}.attn.qkv.weight" ) __UpperCAmelCase : List[Any] = in_proj_weight[ : encoder_config.hidden_size, : ] __UpperCAmelCase : List[str] = in_proj_weight[ encoder_config.hidden_size : encoder_config.hidden_size * 2, : ] __UpperCAmelCase : List[Any] = in_proj_weight[ -encoder_config.hidden_size :, : ] def _UpperCamelCase ( UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> Any: """simple docstring""" __UpperCAmelCase : str = dct.pop(UpperCamelCase ) __UpperCAmelCase : List[Any] = val def _UpperCamelCase ( UpperCamelCase ) -> str: """simple docstring""" if "handwritten" in checkpoint_url: __UpperCAmelCase : Any = "https://fki.tic.heia-fr.ch/static/img/a01-122-02-00.jpg" # industry # url = "https://fki.tic.heia-fr.ch/static/img/a01-122-02-12.jpg" # have # url = "https://fki.tic.heia-fr.ch/static/img/a01-122-02-10.jpg" # let # url = "https://fki.tic.heia-fr.ch/static/img/a01-122-02.jpg" # # url = "https://fki.tic.heia-fr.ch/static/img/a01-122.jpg" elif "printed" in checkpoint_url or "stage1" in checkpoint_url: __UpperCAmelCase : Any = "https://www.researchgate.net/profile/Dinh-Sang/publication/338099565/figure/fig8/AS:840413229350922@1577381536857/An-receipt-example-in-the-SROIE-2019-dataset_Q640.jpg" __UpperCAmelCase : Tuple = Image.open(requests.get(UpperCamelCase , stream=UpperCamelCase ).raw ).convert("RGB" ) return im @torch.no_grad() def _UpperCamelCase ( UpperCamelCase , UpperCamelCase ) -> List[Any]: """simple docstring""" __UpperCAmelCase : List[str] = ViTConfig(image_size=384 , qkv_bias=UpperCamelCase ) __UpperCAmelCase : Any = TrOCRConfig() # size of the architecture if "base" in checkpoint_url: __UpperCAmelCase : Optional[int] = 768 elif "large" in checkpoint_url: # use ViT-large encoder __UpperCAmelCase : Any = 1024 __UpperCAmelCase : int = 4096 __UpperCAmelCase : Tuple = 24 __UpperCAmelCase : Any = 16 __UpperCAmelCase : Union[str, Any] = 1024 else: raise ValueError("Should either find 'base' or 'large' in checkpoint URL" ) # the large-printed + stage1 checkpoints uses sinusoidal position embeddings, no layernorm afterwards if "large-printed" in checkpoint_url or "stage1" in checkpoint_url: __UpperCAmelCase : List[str] = False __UpperCAmelCase : Any = "relu" __UpperCAmelCase : Any = 1024 __UpperCAmelCase : str = True __UpperCAmelCase : Any = False __UpperCAmelCase : Optional[int] = False # load HuggingFace model __UpperCAmelCase : Optional[Any] = ViTModel(UpperCamelCase , add_pooling_layer=UpperCamelCase ) __UpperCAmelCase : str = TrOCRForCausalLM(UpperCamelCase ) __UpperCAmelCase : Optional[int] = VisionEncoderDecoderModel(encoder=UpperCamelCase , decoder=UpperCamelCase ) model.eval() # load state_dict of original model, rename some keys __UpperCAmelCase : Optional[int] = torch.hub.load_state_dict_from_url(UpperCamelCase , map_location="cpu" , check_hash=UpperCamelCase )["model"] __UpperCAmelCase : Optional[Any] = create_rename_keys(UpperCamelCase , UpperCamelCase ) for src, dest in rename_keys: rename_key(UpperCamelCase , UpperCamelCase , UpperCamelCase ) read_in_q_k_v(UpperCamelCase , UpperCamelCase ) # remove parameters we don't need del state_dict["encoder.deit.head.weight"] del state_dict["encoder.deit.head.bias"] del state_dict["decoder.version"] # add prefix to decoder keys for key, val in state_dict.copy().items(): __UpperCAmelCase : Optional[int] = state_dict.pop(UpperCamelCase ) if key.startswith("decoder" ) and "output_projection" not in key: __UpperCAmelCase : Any = val else: __UpperCAmelCase : int = val # load state dict model.load_state_dict(UpperCamelCase ) # Check outputs on an image __UpperCAmelCase : List[str] = ViTImageProcessor(size=encoder_config.image_size ) __UpperCAmelCase : Union[str, Any] = RobertaTokenizer.from_pretrained("roberta-large" ) __UpperCAmelCase : Dict = TrOCRProcessor(UpperCamelCase , UpperCamelCase ) __UpperCAmelCase : Optional[Any] = processor(images=prepare_img(UpperCamelCase ) , return_tensors="pt" ).pixel_values # verify logits __UpperCAmelCase : Dict = torch.tensor([[model.config.decoder.decoder_start_token_id]] ) __UpperCAmelCase : Optional[Any] = model(pixel_values=UpperCamelCase , decoder_input_ids=UpperCamelCase ) __UpperCAmelCase : List[Any] = outputs.logits __UpperCAmelCase : List[str] = torch.Size([1, 1, 5_0265] ) if "trocr-base-handwritten" in checkpoint_url: __UpperCAmelCase : str = torch.tensor( [-1.4502, -4.6683, -0.5347, -2.9291, 9.1435, -3.0571, 8.9764, 1.7560, 8.7358, -1.5311] ) elif "trocr-large-handwritten" in checkpoint_url: __UpperCAmelCase : int = torch.tensor( [-2.6437, -1.3129, -2.2596, -5.3455, 6.3539, 1.7604, 5.4991, 1.4702, 5.6113, 2.0170] ) elif "trocr-base-printed" in checkpoint_url: __UpperCAmelCase : str = torch.tensor( [-5.6816, -5.8388, 1.1398, -6.9034, 6.8505, -2.4393, 1.2284, -1.0232, -1.9661, -3.9210] ) elif "trocr-large-printed" in checkpoint_url: __UpperCAmelCase : Union[str, Any] = torch.tensor( [-6.0162, -7.0959, 4.4155, -5.1063, 7.0468, -3.1631, 2.6466, -0.3081, -0.8106, -1.7535] ) if "stage1" not in checkpoint_url: assert logits.shape == expected_shape, "Shape of logits not as expected" assert torch.allclose(logits[0, 0, :10] , UpperCamelCase , atol=1e-3 ), "First elements of logits not as expected" Path(UpperCamelCase ).mkdir(exist_ok=UpperCamelCase ) print(f"Saving model to {pytorch_dump_folder_path}" ) model.save_pretrained(UpperCamelCase ) print(f"Saving processor to {pytorch_dump_folder_path}" ) processor.save_pretrained(UpperCamelCase ) if __name__ == "__main__": A = argparse.ArgumentParser() parser.add_argument( """--checkpoint_url""", default="""https://layoutlm.blob.core.windows.net/trocr/model_zoo/fairseq/trocr-base-handwritten.pt""", type=str, help="""URL to the original PyTorch checkpoint (.pth file).""", ) parser.add_argument( """--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the folder to output PyTorch model.""" ) A = parser.parse_args() convert_tr_ocr_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path)
77
import numpy as np import torch from torch.utils.data import Dataset from utils import logger class lowerCamelCase_ ( lowerCamelCase ): def __init__( self , __lowerCAmelCase , __lowerCAmelCase ): """simple docstring""" __magic_name__ :Optional[int] = params __magic_name__ :Any = np.array(__lowerCAmelCase ) __magic_name__ :Optional[Any] = np.array([len(__lowerCAmelCase ) for t in data] ) self.check() self.remove_long_sequences() self.remove_empty_sequences() self.remove_unknown_sequences() self.check() self.print_statistics() def __getitem__( self , __lowerCAmelCase ): """simple docstring""" return (self.token_ids[index], self.lengths[index]) def __len__( self ): """simple docstring""" return len(self.lengths ) def A ( self ): """simple docstring""" assert len(self.token_ids ) == len(self.lengths ) assert all(self.lengths[i] == len(self.token_ids[i] ) for i in range(len(self.lengths ) ) ) def A ( self ): """simple docstring""" __magic_name__ :Any = self.params.max_model_input_size __magic_name__ :int = self.lengths > max_len logger.info(F'''Splitting {sum(__lowerCAmelCase )} too long sequences.''' ) def divide_chunks(__lowerCAmelCase , __lowerCAmelCase ): return [l[i : i + n] for i in range(0 , len(__lowerCAmelCase ) , __lowerCAmelCase )] __magic_name__ :Optional[int] = [] __magic_name__ :List[Any] = [] if self.params.mlm: __magic_name__ , __magic_name__ :Optional[Any] = self.params.special_tok_ids['''cls_token'''], self.params.special_tok_ids['''sep_token'''] else: __magic_name__ , __magic_name__ :Tuple = self.params.special_tok_ids['''bos_token'''], self.params.special_tok_ids['''eos_token'''] for seq_, len_ in zip(self.token_ids , self.lengths ): assert (seq_[0] == cls_id) and (seq_[-1] == sep_id), seq_ if len_ <= max_len: new_tok_ids.append(seq_ ) new_lengths.append(len_ ) else: __magic_name__ :int = [] for sub_s in divide_chunks(seq_ , max_len - 2 ): if sub_s[0] != cls_id: __magic_name__ :List[Any] = np.insert(__lowerCAmelCase , 0 , __lowerCAmelCase ) if sub_s[-1] != sep_id: __magic_name__ :Union[str, Any] = np.insert(__lowerCAmelCase , len(__lowerCAmelCase ) , __lowerCAmelCase ) assert len(__lowerCAmelCase ) <= max_len assert (sub_s[0] == cls_id) and (sub_s[-1] == sep_id), sub_s sub_seqs.append(__lowerCAmelCase ) new_tok_ids.extend(__lowerCAmelCase ) new_lengths.extend([len(__lowerCAmelCase ) for l in sub_seqs] ) __magic_name__ :Tuple = np.array(__lowerCAmelCase ) __magic_name__ :Optional[int] = np.array(__lowerCAmelCase ) def A ( self ): """simple docstring""" __magic_name__ :Optional[Any] = len(self ) __magic_name__ :int = self.lengths > 1_1 __magic_name__ :List[str] = self.token_ids[indices] __magic_name__ :Union[str, Any] = self.lengths[indices] __magic_name__ :List[str] = len(self ) logger.info(F'''Remove {init_size - new_size} too short (<=11 tokens) sequences.''' ) def A ( self ): """simple docstring""" if "unk_token" not in self.params.special_tok_ids: return else: __magic_name__ :Tuple = self.params.special_tok_ids['''unk_token'''] __magic_name__ :Dict = len(self ) __magic_name__ :Tuple = np.array([np.count_nonzero(a == unk_token_id ) for a in self.token_ids] ) __magic_name__ :int = (unk_occs / self.lengths) < 0.5 __magic_name__ :str = self.token_ids[indices] __magic_name__ :str = self.lengths[indices] __magic_name__ :Any = len(self ) logger.info(F'''Remove {init_size - new_size} sequences with a high level of unknown tokens (50%).''' ) def A ( self ): """simple docstring""" if not self.params.is_master: return logger.info(F'''{len(self )} sequences''' ) # data_len = sum(self.lengths) # nb_unique_tokens = len(Counter(list(chain(*self.token_ids)))) # logger.info(f'{data_len} tokens ({nb_unique_tokens} unique)') # unk_idx = self.params.special_tok_ids['unk_token'] # nb_unknown = sum([(t==unk_idx).sum() for t in self.token_ids]) # logger.info(f'{nb_unknown} unknown tokens (covering {100*nb_unknown/data_len:.2f}% of the data)') def A ( self , __lowerCAmelCase ): """simple docstring""" __magic_name__ :Optional[Any] = [t[0] for t in batch] __magic_name__ :List[Any] = [t[1] for t in batch] assert len(__lowerCAmelCase ) == len(__lowerCAmelCase ) # Max for paddings __magic_name__ :Tuple = max(__lowerCAmelCase ) # Pad token ids if self.params.mlm: __magic_name__ :Any = self.params.special_tok_ids['''pad_token'''] else: __magic_name__ :str = self.params.special_tok_ids['''unk_token'''] __magic_name__ :Any = [list(t.astype(__lowerCAmelCase ) ) + [pad_idx] * (max_seq_len_ - len(__lowerCAmelCase )) for t in token_ids] assert len(tk_ ) == len(__lowerCAmelCase ) assert all(len(__lowerCAmelCase ) == max_seq_len_ for t in tk_ ) __magic_name__ :Optional[int] = torch.tensor(tk_ ) # (bs, max_seq_len_) __magic_name__ :Optional[int] = torch.tensor(__lowerCAmelCase ) # (bs) return tk_t, lg_t
0
0
'''simple docstring''' import json import os from dataclasses import dataclass from functools import partial from typing import Callable import flax.linen as nn import jax import jax.numpy as jnp import joblib import optax import wandb from flax import jax_utils, struct, traverse_util from flax.serialization import from_bytes, to_bytes from flax.training import train_state from flax.training.common_utils import shard from tqdm.auto import tqdm from transformers import BigBirdConfig, FlaxBigBirdForQuestionAnswering from transformers.models.big_bird.modeling_flax_big_bird import FlaxBigBirdForQuestionAnsweringModule class __A ( UpperCamelCase__ ): a__ : BigBirdConfig a__ : jnp.dtype = jnp.floataa a__ : bool = True def _lowercase (self : Dict ): super().setup() UpperCAmelCase_ = nn.Dense(5 , dtype=self.dtype ) def __call__(self : Optional[Any] , *__a : Tuple , **__a : List[Any] ): UpperCAmelCase_ = super().__call__(*__a , **__a ) UpperCAmelCase_ = self.cls(outputs[2] ) return outputs[:2] + (cls_out,) class __A ( UpperCamelCase__ ): a__ : str = FlaxBigBirdForNaturalQuestionsModule def lowerCAmelCase_ ( snake_case_ : Dict , snake_case_ : Any , snake_case_ : List[Any] , snake_case_ : str , snake_case_ : Optional[Any] , snake_case_ : List[str] ) -> str: '''simple docstring''' def cross_entropy(snake_case_ : Union[str, Any] , snake_case_ : Any , snake_case_ : Optional[Any]=None ): UpperCAmelCase_ = logits.shape[-1] UpperCAmelCase_ = (labels[..., None] == jnp.arange(snake_case_ )[None]).astype("f4" ) UpperCAmelCase_ = jax.nn.log_softmax(snake_case_ , axis=-1 ) UpperCAmelCase_ = -jnp.sum(labels * logits , axis=-1 ) if reduction is not None: UpperCAmelCase_ = reduction(snake_case_ ) return loss UpperCAmelCase_ = partial(snake_case_ , reduction=jnp.mean ) UpperCAmelCase_ = cross_entropy(snake_case_ , snake_case_ ) UpperCAmelCase_ = cross_entropy(snake_case_ , snake_case_ ) UpperCAmelCase_ = cross_entropy(snake_case_ , snake_case_ ) return (start_loss + end_loss + pooled_loss) / 3 @dataclass class __A : a__ : str = "google/bigbird-roberta-base" a__ : int = 3_000 a__ : int = 10_500 a__ : int = 128 a__ : int = 3 a__ : int = 1 a__ : int = 5 # tx_args a__ : float = 3e-5 a__ : float = 0.0 a__ : int = 20_000 a__ : float = 0.0_0_9_5 a__ : str = "bigbird-roberta-natural-questions" a__ : str = "training-expt" a__ : str = "data/nq-training.jsonl" a__ : str = "data/nq-validation.jsonl" def _lowercase (self : str ): os.makedirs(self.base_dir , exist_ok=__a ) UpperCAmelCase_ = os.path.join(self.base_dir , self.save_dir ) UpperCAmelCase_ = self.batch_size_per_device * jax.device_count() @dataclass class __A : a__ : int a__ : int = 4_096 # no dynamic padding on TPUs def __call__(self : List[Any] , __a : Any ): UpperCAmelCase_ = self.collate_fn(__a ) UpperCAmelCase_ = jax.tree_util.tree_map(__a , __a ) return batch def _lowercase (self : Tuple , __a : Tuple ): UpperCAmelCase_ , UpperCAmelCase_ = self.fetch_inputs(features["input_ids"] ) UpperCAmelCase_ = { "input_ids": jnp.array(__a , dtype=jnp.intaa ), "attention_mask": jnp.array(__a , dtype=jnp.intaa ), "start_labels": jnp.array(features["start_token"] , dtype=jnp.intaa ), "end_labels": jnp.array(features["end_token"] , dtype=jnp.intaa ), "pooled_labels": jnp.array(features["category"] , dtype=jnp.intaa ), } return batch def _lowercase (self : str , __a : list ): UpperCAmelCase_ = [self._fetch_inputs(__a ) for ids in input_ids] return zip(*__a ) def _lowercase (self : List[Any] , __a : list ): UpperCAmelCase_ = [1 for _ in range(len(__a ) )] while len(__a ) < self.max_length: input_ids.append(self.pad_id ) attention_mask.append(0 ) return input_ids, attention_mask def lowerCAmelCase_ ( snake_case_ : str , snake_case_ : Any , snake_case_ : List[Any]=None ) -> Any: '''simple docstring''' if seed is not None: UpperCAmelCase_ = dataset.shuffle(seed=snake_case_ ) for i in range(len(snake_case_ ) // batch_size ): UpperCAmelCase_ = dataset[i * batch_size : (i + 1) * batch_size] yield dict(snake_case_ ) @partial(jax.pmap , axis_name="batch" ) def lowerCAmelCase_ ( snake_case_ : Union[str, Any] , snake_case_ : List[Any] , **snake_case_ : Any ) -> Optional[int]: '''simple docstring''' def loss_fn(snake_case_ : Tuple ): UpperCAmelCase_ = model_inputs.pop("start_labels" ) UpperCAmelCase_ = model_inputs.pop("end_labels" ) UpperCAmelCase_ = model_inputs.pop("pooled_labels" ) UpperCAmelCase_ = state.apply_fn(**snake_case_ , params=snake_case_ , dropout_rng=snake_case_ , train=snake_case_ ) UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = outputs return state.loss_fn( snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , ) UpperCAmelCase_ , UpperCAmelCase_ = jax.random.split(snake_case_ ) UpperCAmelCase_ = jax.value_and_grad(snake_case_ ) UpperCAmelCase_ , UpperCAmelCase_ = grad_fn(state.params ) UpperCAmelCase_ = jax.lax.pmean({"loss": loss} , axis_name="batch" ) UpperCAmelCase_ = jax.lax.pmean(snake_case_ , "batch" ) UpperCAmelCase_ = state.apply_gradients(grads=snake_case_ ) return state, metrics, new_drp_rng @partial(jax.pmap , axis_name="batch" ) def lowerCAmelCase_ ( snake_case_ : List[str] , **snake_case_ : Any ) -> str: '''simple docstring''' UpperCAmelCase_ = model_inputs.pop("start_labels" ) UpperCAmelCase_ = model_inputs.pop("end_labels" ) UpperCAmelCase_ = model_inputs.pop("pooled_labels" ) UpperCAmelCase_ = state.apply_fn(**snake_case_ , params=state.params , train=snake_case_ ) UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = outputs UpperCAmelCase_ = state.loss_fn(snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ ) UpperCAmelCase_ = jax.lax.pmean({"loss": loss} , axis_name="batch" ) return metrics class __A ( train_state.TrainState ): a__ : Callable = struct.field(pytree_node=UpperCamelCase__ ) @dataclass class __A : a__ : Args a__ : Callable a__ : Callable a__ : Callable a__ : Callable a__ : wandb a__ : Callable = None def _lowercase (self : str , __a : Union[str, Any] , __a : List[Any] , __a : Any , __a : int=None ): UpperCAmelCase_ = model.params UpperCAmelCase_ = TrainState.create( apply_fn=model.__call__ , params=__a , tx=__a , loss_fn=__a , ) if ckpt_dir is not None: UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = restore_checkpoint(__a , __a ) UpperCAmelCase_ = { "lr": args.lr, "init_lr": args.init_lr, "warmup_steps": args.warmup_steps, "num_train_steps": num_train_steps, "weight_decay": args.weight_decay, } UpperCAmelCase_ , UpperCAmelCase_ = build_tx(**__a ) UpperCAmelCase_ = train_state.TrainState( step=__a , apply_fn=model.__call__ , params=__a , tx=__a , opt_state=__a , ) UpperCAmelCase_ = args UpperCAmelCase_ = data_collator UpperCAmelCase_ = lr UpperCAmelCase_ = params UpperCAmelCase_ = jax_utils.replicate(__a ) return state def _lowercase (self : Tuple , __a : Dict , __a : str , __a : Any ): UpperCAmelCase_ = self.args UpperCAmelCase_ = len(__a ) // args.batch_size UpperCAmelCase_ = jax.random.PRNGKey(0 ) UpperCAmelCase_ = jax.random.split(__a , jax.device_count() ) for epoch in range(args.max_epochs ): UpperCAmelCase_ = jnp.array(0 , dtype=jnp.floataa ) UpperCAmelCase_ = get_batched_dataset(__a , args.batch_size , seed=__a ) UpperCAmelCase_ = 0 for batch in tqdm(__a , total=__a , desc=f"""Running EPOCH-{epoch}""" ): UpperCAmelCase_ = self.data_collator(__a ) UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = self.train_step_fn(__a , __a , **__a ) running_loss += jax_utils.unreplicate(metrics["loss"] ) i += 1 if i % args.logging_steps == 0: UpperCAmelCase_ = jax_utils.unreplicate(state.step ) UpperCAmelCase_ = running_loss.item() / i UpperCAmelCase_ = self.scheduler_fn(state_step - 1 ) UpperCAmelCase_ = self.evaluate(__a , __a ) UpperCAmelCase_ = { "step": state_step.item(), "eval_loss": eval_loss.item(), "tr_loss": tr_loss, "lr": lr.item(), } tqdm.write(str(__a ) ) self.logger.log(__a , commit=__a ) if i % args.save_steps == 0: self.save_checkpoint(args.save_dir + f"""-e{epoch}-s{i}""" , state=__a ) def _lowercase (self : List[str] , __a : List[Any] , __a : Any ): UpperCAmelCase_ = get_batched_dataset(__a , self.args.batch_size ) UpperCAmelCase_ = len(__a ) // self.args.batch_size UpperCAmelCase_ = jnp.array(0 , dtype=jnp.floataa ) UpperCAmelCase_ = 0 for batch in tqdm(__a , total=__a , desc="Evaluating ... " ): UpperCAmelCase_ = self.data_collator(__a ) UpperCAmelCase_ = self.val_step_fn(__a , **__a ) running_loss += jax_utils.unreplicate(metrics["loss"] ) i += 1 return running_loss / i def _lowercase (self : Optional[int] , __a : List[str] , __a : int ): UpperCAmelCase_ = jax_utils.unreplicate(__a ) print(f"""SAVING CHECKPOINT IN {save_dir}""" , end=" ... " ) self.model_save_fn(__a , params=state.params ) with open(os.path.join(__a , "opt_state.msgpack" ) , "wb" ) as f: f.write(to_bytes(state.opt_state ) ) joblib.dump(self.args , os.path.join(__a , "args.joblib" ) ) joblib.dump(self.data_collator , os.path.join(__a , "data_collator.joblib" ) ) with open(os.path.join(__a , "training_state.json" ) , "w" ) as f: json.dump({"step": state.step.item()} , __a ) print("DONE" ) def lowerCAmelCase_ ( snake_case_ : Dict , snake_case_ : Any ) -> Optional[int]: '''simple docstring''' print(f"""RESTORING CHECKPOINT FROM {save_dir}""" , end=" ... " ) with open(os.path.join(snake_case_ , "flax_model.msgpack" ) , "rb" ) as f: UpperCAmelCase_ = from_bytes(state.params , f.read() ) with open(os.path.join(snake_case_ , "opt_state.msgpack" ) , "rb" ) as f: UpperCAmelCase_ = from_bytes(state.opt_state , f.read() ) UpperCAmelCase_ = joblib.load(os.path.join(snake_case_ , "args.joblib" ) ) UpperCAmelCase_ = joblib.load(os.path.join(snake_case_ , "data_collator.joblib" ) ) with open(os.path.join(snake_case_ , "training_state.json" ) , "r" ) as f: UpperCAmelCase_ = json.load(snake_case_ ) UpperCAmelCase_ = training_state["step"] print("DONE" ) return params, opt_state, step, args, data_collator def lowerCAmelCase_ ( snake_case_ : Optional[int] , snake_case_ : Any , snake_case_ : Optional[Any] , snake_case_ : str ) -> Union[str, Any]: '''simple docstring''' UpperCAmelCase_ = num_train_steps - warmup_steps UpperCAmelCase_ = optax.linear_schedule(init_value=snake_case_ , end_value=snake_case_ , transition_steps=snake_case_ ) UpperCAmelCase_ = optax.linear_schedule(init_value=snake_case_ , end_value=1E-7 , transition_steps=snake_case_ ) UpperCAmelCase_ = optax.join_schedules(schedules=[warmup_fn, decay_fn] , boundaries=[warmup_steps] ) return lr def lowerCAmelCase_ ( snake_case_ : Tuple , snake_case_ : Tuple , snake_case_ : Optional[Any] , snake_case_ : Optional[Any] , snake_case_ : int ) -> Optional[Any]: '''simple docstring''' def weight_decay_mask(snake_case_ : Any ): UpperCAmelCase_ = traverse_util.flatten_dict(snake_case_ ) UpperCAmelCase_ = {k: (v[-1] != "bias" and v[-2:] != ("LayerNorm", "scale")) for k, v in params.items()} return traverse_util.unflatten_dict(snake_case_ ) UpperCAmelCase_ = scheduler_fn(snake_case_ , snake_case_ , snake_case_ , snake_case_ ) UpperCAmelCase_ = optax.adamw(learning_rate=snake_case_ , weight_decay=snake_case_ , mask=snake_case_ ) return tx, lr
78
import os from shutil import copyfile from typing import Any, Dict, List, Optional, Tuple import sentencepiece as spm from ...tokenization_utils import PreTrainedTokenizer from ...utils import logging SCREAMING_SNAKE_CASE__ : str = logging.get_logger(__name__) SCREAMING_SNAKE_CASE__ : Tuple = """▁""" SCREAMING_SNAKE_CASE__ : Union[str, Any] = {"""vocab_file""": """spiece.model"""} SCREAMING_SNAKE_CASE__ : List[Any] = { """vocab_file""": { """google/reformer-crime-and-punishment""": ( """https://huggingface.co/google/reformer-crime-and-punishment/resolve/main/spiece.model""" ) } } SCREAMING_SNAKE_CASE__ : Optional[int] = { """google/reformer-crime-and-punishment""": 52_42_88, } class lowerCamelCase_ ( lowerCamelCase ): a__ = VOCAB_FILES_NAMES a__ = PRETRAINED_VOCAB_FILES_MAP a__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES a__ = ['''input_ids''', '''attention_mask'''] def __init__( self , __lowerCAmelCase , __lowerCAmelCase="</s>" , __lowerCAmelCase="<unk>" , __lowerCAmelCase=[] , __lowerCAmelCase = None , **__lowerCAmelCase , ): """simple docstring""" __magic_name__ :int = {} if sp_model_kwargs is None else sp_model_kwargs super().__init__( eos_token=__lowerCAmelCase , unk_token=__lowerCAmelCase , additional_special_tokens=__lowerCAmelCase , sp_model_kwargs=self.sp_model_kwargs , **__lowerCAmelCase , ) __magic_name__ :Optional[Any] = vocab_file __magic_name__ :int = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(__lowerCAmelCase ) @property def A ( self ): """simple docstring""" return self.sp_model.get_piece_size() def A ( self ): """simple docstring""" __magic_name__ :str = {self.convert_ids_to_tokens(__lowerCAmelCase ): i for i in range(self.vocab_size )} vocab.update(self.added_tokens_encoder ) return vocab def __getstate__( self ): """simple docstring""" __magic_name__ :Optional[Any] = self.__dict__.copy() __magic_name__ :Optional[Any] = None return state def __setstate__( self , __lowerCAmelCase ): """simple docstring""" __magic_name__ :Any = d # for backward compatibility if not hasattr(self , '''sp_model_kwargs''' ): __magic_name__ :Optional[int] = {} __magic_name__ :Union[str, Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(self.vocab_file ) def A ( self , __lowerCAmelCase ): """simple docstring""" return self.sp_model.encode(__lowerCAmelCase , out_type=__lowerCAmelCase ) def A ( self , __lowerCAmelCase ): """simple docstring""" return self.sp_model.piece_to_id(__lowerCAmelCase ) def A ( self , __lowerCAmelCase ): """simple docstring""" if index < self.sp_model.get_piece_size(): __magic_name__ :int = self.sp_model.IdToPiece(__lowerCAmelCase ) return token def A ( self , __lowerCAmelCase ): """simple docstring""" __magic_name__ :Optional[Any] = [] __magic_name__ :Tuple = '''''' for token in tokens: # make sure that special tokens are not decoded using sentencepiece model if token in self.all_special_tokens: out_string += self.sp_model.decode(__lowerCAmelCase ) + token __magic_name__ :Optional[Any] = [] else: current_sub_tokens.append(__lowerCAmelCase ) out_string += self.sp_model.decode(__lowerCAmelCase ) return out_string.strip() def A ( self , __lowerCAmelCase , __lowerCAmelCase = None ): """simple docstring""" if not os.path.isdir(__lowerCAmelCase ): logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' ) return __magic_name__ :Optional[int] = os.path.join( __lowerCAmelCase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(__lowerCAmelCase ) and os.path.isfile(self.vocab_file ): copyfile(self.vocab_file , __lowerCAmelCase ) elif not os.path.isfile(self.vocab_file ): with open(__lowerCAmelCase , '''wb''' ) as fi: __magic_name__ :Dict = self.sp_model.serialized_model_proto() fi.write(__lowerCAmelCase ) return (out_vocab_file,)
0
0
SCREAMING_SNAKE_CASE__ : Union[str, Any] = """0.21.0""" from .accelerator import Accelerator from .big_modeling import ( cpu_offload, cpu_offload_with_hook, disk_offload, dispatch_model, init_empty_weights, init_on_device, load_checkpoint_and_dispatch, ) from .data_loader import skip_first_batches from .launchers import debug_launcher, notebook_launcher from .state import PartialState from .utils import ( DeepSpeedPlugin, DistributedDataParallelKwargs, DistributedType, FullyShardedDataParallelPlugin, GradScalerKwargs, InitProcessGroupKwargs, find_executable_batch_size, infer_auto_device_map, is_rich_available, load_checkpoint_in_model, synchronize_rng_states, ) if is_rich_available(): from .utils import rich
79
import os import unittest from transformers import MobileBertTokenizer, MobileBertTokenizerFast from transformers.models.bert.tokenization_bert import ( VOCAB_FILES_NAMES, BasicTokenizer, WordpieceTokenizer, _is_control, _is_punctuation, _is_whitespace, ) from transformers.testing_utils import require_tokenizers, slow from ...test_tokenization_common import TokenizerTesterMixin, filter_non_english @require_tokenizers class lowerCamelCase_ ( lowerCamelCase , unittest.TestCase ): a__ = MobileBertTokenizer a__ = MobileBertTokenizerFast a__ = True a__ = True a__ = filter_non_english a__ = '''google/mobilebert-uncased''' def A ( self ): """simple docstring""" super().setUp() __magic_name__ :Tuple = [ '''[UNK]''', '''[CLS]''', '''[SEP]''', '''[PAD]''', '''[MASK]''', '''want''', '''##want''', '''##ed''', '''wa''', '''un''', '''runn''', '''##ing''', ''',''', '''low''', '''lowest''', ] __magic_name__ :Dict = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] ) with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer: vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) ) __magic_name__ :List[str] = [ (tokenizer_def[0], self.pre_trained_model_path, tokenizer_def[2]) # else the 'google/' prefix is stripped for tokenizer_def in self.tokenizers_list ] def A ( self , __lowerCAmelCase ): """simple docstring""" __magic_name__ :Union[str, Any] = '''UNwant\u00E9d,running''' __magic_name__ :int = '''unwanted, running''' return input_text, output_text def A ( self ): """simple docstring""" __magic_name__ :Optional[int] = self.tokenizer_class(self.vocab_file ) __magic_name__ :List[Any] = tokenizer.tokenize('''UNwant\u00E9d,running''' ) self.assertListEqual(__lowerCAmelCase , ['''un''', '''##want''', '''##ed''', ''',''', '''runn''', '''##ing'''] ) self.assertListEqual(tokenizer.convert_tokens_to_ids(__lowerCAmelCase ) , [9, 6, 7, 1_2, 1_0, 1_1] ) def A ( self ): """simple docstring""" if not self.test_rust_tokenizer: return __magic_name__ :int = self.get_tokenizer() __magic_name__ :Tuple = self.get_rust_tokenizer() __magic_name__ :List[str] = '''UNwant\u00E9d,running''' __magic_name__ :Optional[Any] = tokenizer.tokenize(__lowerCAmelCase ) __magic_name__ :List[Any] = rust_tokenizer.tokenize(__lowerCAmelCase ) self.assertListEqual(__lowerCAmelCase , __lowerCAmelCase ) __magic_name__ :int = tokenizer.encode(__lowerCAmelCase , add_special_tokens=__lowerCAmelCase ) __magic_name__ :str = rust_tokenizer.encode(__lowerCAmelCase , add_special_tokens=__lowerCAmelCase ) self.assertListEqual(__lowerCAmelCase , __lowerCAmelCase ) __magic_name__ :List[Any] = self.get_rust_tokenizer() __magic_name__ :Any = tokenizer.encode(__lowerCAmelCase ) __magic_name__ :Any = rust_tokenizer.encode(__lowerCAmelCase ) self.assertListEqual(__lowerCAmelCase , __lowerCAmelCase ) # With lower casing __magic_name__ :Any = self.get_tokenizer(do_lower_case=__lowerCAmelCase ) __magic_name__ :List[Any] = self.get_rust_tokenizer(do_lower_case=__lowerCAmelCase ) __magic_name__ :Dict = '''UNwant\u00E9d,running''' __magic_name__ :Tuple = tokenizer.tokenize(__lowerCAmelCase ) __magic_name__ :Union[str, Any] = rust_tokenizer.tokenize(__lowerCAmelCase ) self.assertListEqual(__lowerCAmelCase , __lowerCAmelCase ) __magic_name__ :Optional[Any] = tokenizer.encode(__lowerCAmelCase , add_special_tokens=__lowerCAmelCase ) __magic_name__ :Dict = rust_tokenizer.encode(__lowerCAmelCase , add_special_tokens=__lowerCAmelCase ) self.assertListEqual(__lowerCAmelCase , __lowerCAmelCase ) __magic_name__ :Tuple = self.get_rust_tokenizer() __magic_name__ :Dict = tokenizer.encode(__lowerCAmelCase ) __magic_name__ :List[Any] = rust_tokenizer.encode(__lowerCAmelCase ) self.assertListEqual(__lowerCAmelCase , __lowerCAmelCase ) def A ( self ): """simple docstring""" __magic_name__ :Optional[int] = BasicTokenizer() self.assertListEqual(tokenizer.tokenize('''ah\u535A\u63A8zz''' ) , ['''ah''', '''\u535A''', '''\u63A8''', '''zz'''] ) def A ( self ): """simple docstring""" __magic_name__ :List[Any] = BasicTokenizer(do_lower_case=__lowerCAmelCase ) self.assertListEqual( tokenizer.tokenize(''' \tHeLLo!how \n Are yoU? ''' ) , ['''hello''', '''!''', '''how''', '''are''', '''you''', '''?'''] ) self.assertListEqual(tokenizer.tokenize('''H\u00E9llo''' ) , ['''hello'''] ) def A ( self ): """simple docstring""" __magic_name__ :Union[str, Any] = BasicTokenizer(do_lower_case=__lowerCAmelCase , strip_accents=__lowerCAmelCase ) self.assertListEqual( tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''hällo''', '''!''', '''how''', '''are''', '''you''', '''?'''] ) self.assertListEqual(tokenizer.tokenize('''H\u00E9llo''' ) , ['''h\u00E9llo'''] ) def A ( self ): """simple docstring""" __magic_name__ :Dict = BasicTokenizer(do_lower_case=__lowerCAmelCase , strip_accents=__lowerCAmelCase ) self.assertListEqual( tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''hallo''', '''!''', '''how''', '''are''', '''you''', '''?'''] ) self.assertListEqual(tokenizer.tokenize('''H\u00E9llo''' ) , ['''hello'''] ) def A ( self ): """simple docstring""" __magic_name__ :Optional[int] = BasicTokenizer(do_lower_case=__lowerCAmelCase ) self.assertListEqual( tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''hallo''', '''!''', '''how''', '''are''', '''you''', '''?'''] ) self.assertListEqual(tokenizer.tokenize('''H\u00E9llo''' ) , ['''hello'''] ) def A ( self ): """simple docstring""" __magic_name__ :List[str] = BasicTokenizer(do_lower_case=__lowerCAmelCase ) self.assertListEqual( tokenizer.tokenize(''' \tHeLLo!how \n Are yoU? ''' ) , ['''HeLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?'''] ) def A ( self ): """simple docstring""" __magic_name__ :int = BasicTokenizer(do_lower_case=__lowerCAmelCase , strip_accents=__lowerCAmelCase ) self.assertListEqual( tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''HäLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?'''] ) def A ( self ): """simple docstring""" __magic_name__ :Optional[int] = BasicTokenizer(do_lower_case=__lowerCAmelCase , strip_accents=__lowerCAmelCase ) self.assertListEqual( tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''HaLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?'''] ) def A ( self ): """simple docstring""" __magic_name__ :Optional[Any] = BasicTokenizer(do_lower_case=__lowerCAmelCase , never_split=['''[UNK]'''] ) self.assertListEqual( tokenizer.tokenize(''' \tHeLLo!how \n Are yoU? [UNK]''' ) , ['''HeLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?''', '''[UNK]'''] ) def A ( self ): """simple docstring""" __magic_name__ :int = ['''[UNK]''', '''[CLS]''', '''[SEP]''', '''want''', '''##want''', '''##ed''', '''wa''', '''un''', '''runn''', '''##ing'''] __magic_name__ :Union[str, Any] = {} for i, token in enumerate(__lowerCAmelCase ): __magic_name__ :Tuple = i __magic_name__ :List[Any] = WordpieceTokenizer(vocab=__lowerCAmelCase , unk_token='''[UNK]''' ) self.assertListEqual(tokenizer.tokenize('''''' ) , [] ) self.assertListEqual(tokenizer.tokenize('''unwanted running''' ) , ['''un''', '''##want''', '''##ed''', '''runn''', '''##ing'''] ) self.assertListEqual(tokenizer.tokenize('''unwantedX running''' ) , ['''[UNK]''', '''runn''', '''##ing'''] ) def A ( self ): """simple docstring""" self.assertTrue(_is_whitespace(''' ''' ) ) self.assertTrue(_is_whitespace('''\t''' ) ) self.assertTrue(_is_whitespace('''\r''' ) ) self.assertTrue(_is_whitespace('''\n''' ) ) self.assertTrue(_is_whitespace('''\u00A0''' ) ) self.assertFalse(_is_whitespace('''A''' ) ) self.assertFalse(_is_whitespace('''-''' ) ) def A ( self ): """simple docstring""" self.assertTrue(_is_control('''\u0005''' ) ) self.assertFalse(_is_control('''A''' ) ) self.assertFalse(_is_control(''' ''' ) ) self.assertFalse(_is_control('''\t''' ) ) self.assertFalse(_is_control('''\r''' ) ) def A ( self ): """simple docstring""" self.assertTrue(_is_punctuation('''-''' ) ) self.assertTrue(_is_punctuation('''$''' ) ) self.assertTrue(_is_punctuation('''`''' ) ) self.assertTrue(_is_punctuation('''.''' ) ) self.assertFalse(_is_punctuation('''A''' ) ) self.assertFalse(_is_punctuation(''' ''' ) ) def A ( self ): """simple docstring""" __magic_name__ :Any = self.get_tokenizer() __magic_name__ :Any = self.get_rust_tokenizer() # Example taken from the issue https://github.com/huggingface/tokenizers/issues/340 self.assertListEqual([tokenizer.tokenize(__lowerCAmelCase ) for t in ['''Test''', '''\xad''', '''test''']] , [['''[UNK]'''], [], ['''[UNK]''']] ) self.assertListEqual( [rust_tokenizer.tokenize(__lowerCAmelCase ) for t in ['''Test''', '''\xad''', '''test''']] , [['''[UNK]'''], [], ['''[UNK]''']] ) @slow def A ( self ): """simple docstring""" __magic_name__ :Optional[int] = self.tokenizer_class.from_pretrained('''google/mobilebert-uncased''' ) __magic_name__ :Optional[int] = tokenizer.encode('''sequence builders''' , add_special_tokens=__lowerCAmelCase ) __magic_name__ :List[Any] = tokenizer.encode('''multi-sequence build''' , add_special_tokens=__lowerCAmelCase ) __magic_name__ :Union[str, Any] = tokenizer.build_inputs_with_special_tokens(__lowerCAmelCase ) __magic_name__ :List[Any] = tokenizer.build_inputs_with_special_tokens(__lowerCAmelCase , __lowerCAmelCase ) assert encoded_sentence == [1_0_1] + text + [1_0_2] assert encoded_pair == [1_0_1] + text + [1_0_2] + text_a + [1_0_2] def A ( self ): """simple docstring""" for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})''' ): __magic_name__ :Optional[Any] = self.rust_tokenizer_class.from_pretrained(__lowerCAmelCase , **__lowerCAmelCase ) __magic_name__ :Optional[int] = F'''A, naïve {tokenizer_r.mask_token} AllenNLP sentence.''' __magic_name__ :Optional[Any] = tokenizer_r.encode_plus( __lowerCAmelCase , return_attention_mask=__lowerCAmelCase , return_token_type_ids=__lowerCAmelCase , return_offsets_mapping=__lowerCAmelCase , add_special_tokens=__lowerCAmelCase , ) __magic_name__ :Any = tokenizer_r.do_lower_case if hasattr(__lowerCAmelCase , '''do_lower_case''' ) else False __magic_name__ :Optional[int] = ( [ ((0, 0), tokenizer_r.cls_token), ((0, 1), '''A'''), ((1, 2), ''','''), ((3, 5), '''na'''), ((5, 6), '''##ï'''), ((6, 8), '''##ve'''), ((9, 1_5), tokenizer_r.mask_token), ((1_6, 2_1), '''Allen'''), ((2_1, 2_3), '''##NL'''), ((2_3, 2_4), '''##P'''), ((2_5, 3_3), '''sentence'''), ((3_3, 3_4), '''.'''), ((0, 0), tokenizer_r.sep_token), ] if not do_lower_case else [ ((0, 0), tokenizer_r.cls_token), ((0, 1), '''a'''), ((1, 2), ''','''), ((3, 8), '''naive'''), ((9, 1_5), tokenizer_r.mask_token), ((1_6, 2_1), '''allen'''), ((2_1, 2_3), '''##nl'''), ((2_3, 2_4), '''##p'''), ((2_5, 3_3), '''sentence'''), ((3_3, 3_4), '''.'''), ((0, 0), tokenizer_r.sep_token), ] ) self.assertEqual( [e[1] for e in expected_results] , tokenizer_r.convert_ids_to_tokens(tokens['''input_ids'''] ) ) self.assertEqual([e[0] for e in expected_results] , tokens['''offset_mapping'''] ) def A ( self ): """simple docstring""" __magic_name__ :Dict = ['''的''', '''人''', '''有'''] __magic_name__ :Any = ''''''.join(__lowerCAmelCase ) for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})''' ): __magic_name__ :Optional[Any] = True __magic_name__ :Optional[int] = self.tokenizer_class.from_pretrained(__lowerCAmelCase , **__lowerCAmelCase ) __magic_name__ :Tuple = self.rust_tokenizer_class.from_pretrained(__lowerCAmelCase , **__lowerCAmelCase ) __magic_name__ :Dict = tokenizer_p.encode(__lowerCAmelCase , add_special_tokens=__lowerCAmelCase ) __magic_name__ :List[str] = tokenizer_r.encode(__lowerCAmelCase , add_special_tokens=__lowerCAmelCase ) __magic_name__ :Dict = tokenizer_r.convert_ids_to_tokens(__lowerCAmelCase ) __magic_name__ :Union[str, Any] = tokenizer_p.convert_ids_to_tokens(__lowerCAmelCase ) # it is expected that each Chinese character is not preceded by "##" self.assertListEqual(__lowerCAmelCase , __lowerCAmelCase ) self.assertListEqual(__lowerCAmelCase , __lowerCAmelCase ) __magic_name__ :List[str] = False __magic_name__ :Tuple = self.rust_tokenizer_class.from_pretrained(__lowerCAmelCase , **__lowerCAmelCase ) __magic_name__ :List[str] = self.tokenizer_class.from_pretrained(__lowerCAmelCase , **__lowerCAmelCase ) __magic_name__ :Optional[Any] = tokenizer_r.encode(__lowerCAmelCase , add_special_tokens=__lowerCAmelCase ) __magic_name__ :Union[str, Any] = tokenizer_p.encode(__lowerCAmelCase , add_special_tokens=__lowerCAmelCase ) __magic_name__ :List[str] = tokenizer_r.convert_ids_to_tokens(__lowerCAmelCase ) __magic_name__ :Optional[int] = tokenizer_p.convert_ids_to_tokens(__lowerCAmelCase ) # it is expected that only the first Chinese character is not preceded by "##". __magic_name__ :Dict = [ F'''##{token}''' if idx != 0 else token for idx, token in enumerate(__lowerCAmelCase ) ] self.assertListEqual(__lowerCAmelCase , __lowerCAmelCase ) self.assertListEqual(__lowerCAmelCase , __lowerCAmelCase )
0
0
import unittest from diffusers import FlaxAutoencoderKL from diffusers.utils import is_flax_available from diffusers.utils.testing_utils import require_flax from .test_modeling_common_flax import FlaxModelTesterMixin if is_flax_available(): import jax @require_flax class __UpperCamelCase ( _lowerCAmelCase , unittest.TestCase ): __snake_case :Any = FlaxAutoencoderKL @property def _a ( self : Tuple ) -> Optional[int]: """simple docstring""" __lowercase = 4 __lowercase = 3 __lowercase = (32, 32) __lowercase = jax.random.PRNGKey(0 ) __lowercase = jax.random.uniform(_lowerCAmelCase , ((batch_size, num_channels) + sizes) ) return {"sample": image, "prng_key": prng_key} def _a ( self : str ) -> Optional[int]: """simple docstring""" __lowercase = { """block_out_channels""": [32, 64], """in_channels""": 3, """out_channels""": 3, """down_block_types""": ["""DownEncoderBlock2D""", """DownEncoderBlock2D"""], """up_block_types""": ["""UpDecoderBlock2D""", """UpDecoderBlock2D"""], """latent_channels""": 4, } __lowercase = self.dummy_input return init_dict, inputs_dict
80
import logging import os import quant_trainer import torch from torch.utils.data import DataLoader from transformers import Trainer, is_torch_tpu_available from transformers.trainer_utils import PredictionOutput SCREAMING_SNAKE_CASE__ : List[str] = logging.getLogger(__name__) if is_torch_tpu_available(check_device=False): import torch_xla.core.xla_model as xm import torch_xla.debug.metrics as met class lowerCamelCase_ ( lowerCamelCase ): def __init__( self , *__lowerCAmelCase , __lowerCAmelCase=None , __lowerCAmelCase=None , __lowerCAmelCase=None , **__lowerCAmelCase ): """simple docstring""" super().__init__(*__lowerCAmelCase , **__lowerCAmelCase ) __magic_name__ :Any = eval_examples __magic_name__ :str = post_process_function __magic_name__ :int = quant_trainer_args __magic_name__ :List[str] = 1_2_8 # default number of calibration samples def A ( self , __lowerCAmelCase=None ): """simple docstring""" if calib_dataset is None and self.calib_dataset is None: raise ValueError('''Trainer: calibration requires an calib_dataset.''' ) __magic_name__ :Optional[Any] = calib_dataset if calib_dataset is not None else self.calib_dataset __magic_name__ :Optional[int] = self._remove_unused_columns(__lowerCAmelCase , description='''Calibration''' ) return DataLoader( __lowerCAmelCase , batch_size=self.args.eval_batch_size , collate_fn=self.data_collator , drop_last=self.args.dataloader_drop_last , num_workers=self.args.dataloader_num_workers , pin_memory=self.args.dataloader_pin_memory , shuffle=__lowerCAmelCase , ) def A ( self , __lowerCAmelCase=None ): """simple docstring""" __magic_name__ :Dict = self.train_dataset if calib_dataset is None else calib_dataset __magic_name__ :Any = self.get_calib_dataloader(__lowerCAmelCase ) __magic_name__ :List[str] = self.model quant_trainer.configure_model(__lowerCAmelCase , self.quant_trainer_args , calib=__lowerCAmelCase ) model.eval() quant_trainer.enable_calibration(__lowerCAmelCase ) logger.info('''***** Running calibration *****''' ) logger.info(F''' Num examples = {self.calib_num}''' ) logger.info(F''' Batch size = {calib_dataloader.batch_size}''' ) for step, inputs in enumerate(__lowerCAmelCase ): # Prediction step __magic_name__ , __magic_name__ , __magic_name__ :str = self.prediction_step(__lowerCAmelCase , __lowerCAmelCase , prediction_loss_only=__lowerCAmelCase ) if (step + 1) * calib_dataloader.batch_size >= self.calib_num: break quant_trainer.finish_calibration(__lowerCAmelCase , self.quant_trainer_args ) __magic_name__ :Any = model def A ( self , __lowerCAmelCase=None , __lowerCAmelCase=None , __lowerCAmelCase=None , __lowerCAmelCase = "eval" ): """simple docstring""" __magic_name__ :Tuple = self.eval_dataset if eval_dataset is None else eval_dataset __magic_name__ :Optional[Any] = self.get_eval_dataloader(__lowerCAmelCase ) __magic_name__ :str = self.eval_examples if eval_examples is None else eval_examples # Temporarily disable metric computation, we will do it in the loop here. __magic_name__ :Any = self.compute_metrics __magic_name__ :List[Any] = None __magic_name__ :List[str] = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop try: __magic_name__ :Optional[Any] = eval_loop( __lowerCAmelCase , description='''Evaluation''' , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=__lowerCAmelCase , ) finally: __magic_name__ :Union[str, Any] = compute_metrics if self.post_process_function is not None and self.compute_metrics is not None: __magic_name__ :Union[str, Any] = self.post_process_function(__lowerCAmelCase , __lowerCAmelCase , output.predictions ) __magic_name__ :int = self.compute_metrics(__lowerCAmelCase ) # Prefix all keys with metric_key_prefix + '_' for key in list(metrics.keys() ): if not key.startswith(F'''{metric_key_prefix}_''' ): __magic_name__ :Dict = metrics.pop(__lowerCAmelCase ) self.log(__lowerCAmelCase ) else: __magic_name__ :List[str] = {} if self.args.tpu_metrics_debug or self.args.debug: # tpu-comment: Logging debug metrics for PyTorch/XLA (compile, execute times, ops, etc.) xm.master_print(met.metrics_report() ) __magic_name__ :Optional[Any] = self.callback_handler.on_evaluate(self.args , self.state , self.control , __lowerCAmelCase ) return metrics def A ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase=None , __lowerCAmelCase = "test" ): """simple docstring""" __magic_name__ :int = self.get_test_dataloader(__lowerCAmelCase ) # Temporarily disable metric computation, we will do it in the loop here. __magic_name__ :Dict = self.compute_metrics __magic_name__ :str = None __magic_name__ :Optional[int] = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop try: __magic_name__ :int = eval_loop( __lowerCAmelCase , description='''Prediction''' , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=__lowerCAmelCase , ) finally: __magic_name__ :List[Any] = compute_metrics if self.post_process_function is None or self.compute_metrics is None: return output __magic_name__ :Optional[Any] = self.post_process_function(__lowerCAmelCase , __lowerCAmelCase , output.predictions , '''predict''' ) __magic_name__ :Dict = self.compute_metrics(__lowerCAmelCase ) # Prefix all keys with metric_key_prefix + '_' for key in list(metrics.keys() ): if not key.startswith(F'''{metric_key_prefix}_''' ): __magic_name__ :List[str] = metrics.pop(__lowerCAmelCase ) return PredictionOutput(predictions=predictions.predictions , label_ids=predictions.label_ids , metrics=__lowerCAmelCase ) def A ( self , __lowerCAmelCase="./" ): """simple docstring""" __magic_name__ :List[Any] = self.eval_dataset __magic_name__ :Any = self.get_eval_dataloader(__lowerCAmelCase ) __magic_name__ :int = next(iter(__lowerCAmelCase ) ) # saving device - to make it consistent __magic_name__ :str = torch.device('''cuda''' if torch.cuda.is_available() else '''cpu''' ) # convert to tuple __magic_name__ :int = tuple(v.to(__lowerCAmelCase ) for k, v in batch.items() ) logger.info('''Converting model to be onnx compatible''' ) from pytorch_quantization.nn import TensorQuantizer __magic_name__ :Any = True __magic_name__ :Optional[int] = self.model.to(__lowerCAmelCase ) model.eval() model.float() __magic_name__ :Any = model.module if hasattr(__lowerCAmelCase , '''module''' ) else model quant_trainer.configure_model(__lowerCAmelCase , self.quant_trainer_args ) __magic_name__ :int = os.path.join(__lowerCAmelCase , '''model.onnx''' ) logger.info(F'''exporting model to {output_model_file}''' ) __magic_name__ :Dict = {0: '''batch_size''', 1: '''seq_len'''} torch.onnx.export( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , export_params=__lowerCAmelCase , opset_version=1_3 , do_constant_folding=__lowerCAmelCase , input_names=['''input_ids''', '''attention_mask''', '''token_type_ids'''] , output_names=['''output_start_logits''', '''output_end_logits'''] , dynamic_axes={ '''input_ids''': axes, '''attention_mask''': axes, '''token_type_ids''': axes, '''output_start_logits''': axes, '''output_end_logits''': axes, } , verbose=__lowerCAmelCase , ) logger.info('''onnx export finished''' )
0
0