code
stringlengths 86
54.5k
| code_codestyle
int64 0
371
| style_context
stringlengths 87
49.2k
| style_context_codestyle
int64 0
349
| label
int64 0
1
|
---|---|---|---|---|
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
snake_case__ = logging.get_logger(__name__)
snake_case__ = {
"""facebook/levit-128S""": """https://huggingface.co/facebook/levit-128S/resolve/main/config.json""",
# See all LeViT models at https://huggingface.co/models?filter=levit
}
class UpperCamelCase_ (a__ ):
"""simple docstring"""
_lowerCAmelCase = 'levit'
def __init__( self : int , _lowerCamelCase : List[Any]=224 , _lowerCamelCase : Union[str, Any]=3 , _lowerCamelCase : int=3 , _lowerCamelCase : Tuple=2 , _lowerCamelCase : Optional[int]=1 , _lowerCamelCase : List[str]=16 , _lowerCamelCase : Tuple=[128, 256, 384] , _lowerCamelCase : List[str]=[4, 8, 12] , _lowerCamelCase : Optional[int]=[4, 4, 4] , _lowerCamelCase : Union[str, Any]=[16, 16, 16] , _lowerCamelCase : int=0 , _lowerCamelCase : Union[str, Any]=[2, 2, 2] , _lowerCamelCase : Optional[Any]=[2, 2, 2] , _lowerCamelCase : Optional[Any]=0.02 , **_lowerCamelCase : List[Any] , ):
"""simple docstring"""
super().__init__(**_lowerCamelCase )
A_ : List[Any] = image_size
A_ : List[str] = num_channels
A_ : Tuple = kernel_size
A_ : Optional[int] = stride
A_ : Dict = padding
A_ : Tuple = hidden_sizes
A_ : Tuple = num_attention_heads
A_ : int = depths
A_ : Any = key_dim
A_ : Any = drop_path_rate
A_ : Tuple = patch_size
A_ : Union[str, Any] = attention_ratio
A_ : str = mlp_ratio
A_ : Optional[Any] = initializer_range
A_ : Union[str, Any] = [
['''Subsample''', key_dim[0], hidden_sizes[0] // key_dim[0], 4, 2, 2],
['''Subsample''', key_dim[0], hidden_sizes[1] // key_dim[0], 4, 2, 2],
]
class UpperCamelCase_ (a__ ):
"""simple docstring"""
_lowerCAmelCase = version.parse('1.11' )
@property
def _a ( self : Optional[int] ):
"""simple docstring"""
return OrderedDict(
[
('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}),
] )
@property
def _a ( self : int ):
"""simple docstring"""
return 1E-4
| 4 |
'''simple docstring'''
class UpperCamelCase_ :
"""simple docstring"""
def __init__( self : Optional[Any] , _lowerCamelCase : Union[str, Any] ):
"""simple docstring"""
A_ : Union[str, Any] = val
A_ : Tuple = None
A_ : Any = None
def _a ( self : Tuple , _lowerCamelCase : List[Any] ):
"""simple docstring"""
if self.val:
if val < self.val:
if self.left is None:
A_ : int = Node(_lowerCamelCase )
else:
self.left.insert(_lowerCamelCase )
elif val > self.val:
if self.right is None:
A_ : List[str] = Node(_lowerCamelCase )
else:
self.right.insert(_lowerCamelCase )
else:
A_ : Any = val
def snake_case__ ( lowerCamelCase__ : Any , lowerCamelCase__ : Optional[int] ) -> str:
# Recursive traversal
if root:
inorder(root.left , lowerCamelCase__ )
res.append(root.val )
inorder(root.right , lowerCamelCase__ )
def snake_case__ ( lowerCamelCase__ : Optional[int] ) -> Tuple:
# Build BST
if len(lowerCamelCase__ ) == 0:
return arr
A_ : Dict = Node(arr[0] )
for i in range(1 , len(lowerCamelCase__ ) ):
root.insert(arr[i] )
# Traverse BST in order.
A_ : Tuple = []
inorder(lowerCamelCase__ , lowerCamelCase__ )
return res
if __name__ == "__main__":
print(tree_sort([10, 1, 3, 2, 9, 14, 13]))
| 4 | 1 |
'''simple docstring'''
import argparse
import pickle
import numpy as np
import torch
from torch import nn
from transformers import ReformerConfig, ReformerModelWithLMHead
from transformers.utils import logging
logging.set_verbosity_info()
def snake_case__ ( lowerCamelCase__ : Union[str, Any] , lowerCamelCase__ : Dict , lowerCamelCase__ : Dict=None ) -> Optional[Any]:
# set parameter of one layer
assert torch_layer.weight.shape == weight.shape, f'{torch_layer} layer.weight does not match'
A_ : Optional[Any] = nn.Parameter(lowerCamelCase__ )
if bias is not None:
assert torch_layer.bias.shape == bias.shape, f'{torch_layer} layer.bias does not match'
A_ : str = nn.Parameter(lowerCamelCase__ )
def snake_case__ ( lowerCamelCase__ : List[str] , lowerCamelCase__ : Union[str, Any] , lowerCamelCase__ : int ) -> Dict:
# set torch weights for 1-to-1 comparison
A_ : Dict = np.asarray(weights[0] )
A_ : Optional[int] = np.asarray(weights[1] )
A_ : Union[str, Any] = np.asarray(weights[2] )
set_param(
torch_layer.self_attention.query_key , torch.tensor(lowerCamelCase__ ).transpose(1 , 2 ).contiguous().view(-1 , lowerCamelCase__ ) , )
set_param(
torch_layer.self_attention.value , torch.tensor(lowerCamelCase__ ).transpose(1 , 2 ).contiguous().view(-1 , lowerCamelCase__ ) , )
set_param(
torch_layer.output.dense , torch.tensor(lowerCamelCase__ ).view(-1 , lowerCamelCase__ ).contiguous().transpose(0 , 1 ) , )
def snake_case__ ( lowerCamelCase__ : Optional[Any] , lowerCamelCase__ : List[str] , lowerCamelCase__ : str ) -> Dict:
# set torch weights for 1-to-1 comparison
A_ : Optional[int] = np.asarray(weights[0] )
A_ : Union[str, Any] = np.asarray(weights[1] )
A_ : Optional[Any] = np.asarray(weights[2] )
A_ : List[str] = np.asarray(weights[3] )
set_param(
torch_layer.self_attention.query , torch.tensor(lowerCamelCase__ ).transpose(1 , 2 ).contiguous().view(-1 , lowerCamelCase__ ) , )
set_param(
torch_layer.self_attention.key , torch.tensor(lowerCamelCase__ ).transpose(1 , 2 ).contiguous().view(-1 , lowerCamelCase__ ) , )
set_param(
torch_layer.self_attention.value , torch.tensor(lowerCamelCase__ ).transpose(1 , 2 ).contiguous().view(-1 , lowerCamelCase__ ) , )
set_param(
torch_layer.output.dense , torch.tensor(lowerCamelCase__ ).view(-1 , lowerCamelCase__ ).contiguous().transpose(0 , 1 ) , )
def snake_case__ ( lowerCamelCase__ : Tuple , lowerCamelCase__ : Optional[int] , lowerCamelCase__ : str ) -> int:
# layernorm 1
A_ : Optional[Any] = weights[0][0][0]
A_ : Optional[int] = np.asarray(layer_norm_a[0] )
A_ : int = np.asarray(layer_norm_a[1] )
set_param(
torch_block.attention.layer_norm , torch.tensor(lowerCamelCase__ ) , torch.tensor(lowerCamelCase__ ) , )
# lsh weights + output
A_ : Dict = weights[0][1]
if len(lowerCamelCase__ ) < 4:
set_layer_weights_in_torch_lsh(lowerCamelCase__ , torch_block.attention , lowerCamelCase__ )
else:
set_layer_weights_in_torch_local(lowerCamelCase__ , torch_block.attention , lowerCamelCase__ )
# intermediate weighs
A_ : Any = weights[2][0][1][2]
# Chunked Feed Forward
if len(lowerCamelCase__ ) == 4:
A_ : List[str] = intermediate_weights[2]
# layernorm 2
A_ : Tuple = np.asarray(intermediate_weights[0][0] )
A_ : Tuple = np.asarray(intermediate_weights[0][1] )
set_param(
torch_block.feed_forward.layer_norm , torch.tensor(lowerCamelCase__ ) , torch.tensor(lowerCamelCase__ ) , )
# intermediate dense
A_ : Optional[int] = np.asarray(intermediate_weights[1][0] )
A_ : int = np.asarray(intermediate_weights[1][1] )
set_param(
torch_block.feed_forward.dense.dense , torch.tensor(lowerCamelCase__ ).transpose(0 , 1 ).contiguous() , torch.tensor(lowerCamelCase__ ) , )
# intermediate out
A_ : Tuple = np.asarray(intermediate_weights[4][0] )
A_ : Optional[int] = np.asarray(intermediate_weights[4][1] )
set_param(
torch_block.feed_forward.output.dense , torch.tensor(lowerCamelCase__ ).transpose(0 , 1 ).contiguous() , torch.tensor(lowerCamelCase__ ) , )
def snake_case__ ( lowerCamelCase__ : int , lowerCamelCase__ : Union[str, Any] , lowerCamelCase__ : str ) -> str:
# reformer model
A_ : List[Any] = torch_model.reformer
# word embeds
A_ : List[str] = np.asarray(weights[1] )
set_param(
torch_model_reformer.embeddings.word_embeddings , torch.tensor(lowerCamelCase__ ) , )
if isinstance(weights[3] , lowerCamelCase__ ):
A_ : int = torch_model_reformer.embeddings.position_embeddings
for emb_idx in range(len(position_embeddings.weights ) ):
A_ : Tuple = np.asarray(weights[3][emb_idx][0] )
assert (
position_embeddings.weights[emb_idx].shape == emb_weights.shape
), f'{position_embeddings[emb_idx]} emb does not match'
A_ : Optional[int] = nn.Parameter(torch.tensor(lowerCamelCase__ ) )
A_ : List[str] = weights[5]
assert len(torch_model_reformer.encoder.layers ) * 4 == len(
lowerCamelCase__ ), "HF and trax model do not have the same number of layers"
for layer_idx, layer in enumerate(torch_model_reformer.encoder.layers ):
A_ : List[Any] = trax_layer_weights[4 * layer_idx : 4 * (layer_idx + 1)]
set_block_weights_in_torch(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
# output layer norm
A_ : Optional[Any] = np.asarray(weights[7][0] )
A_ : Tuple = np.asarray(weights[7][1] )
set_param(
torch_model_reformer.encoder.layer_norm , torch.tensor(lowerCamelCase__ ) , torch.tensor(lowerCamelCase__ ) , )
# output embeddings
A_ : Optional[int] = np.asarray(weights[9][0] )
A_ : Optional[int] = np.asarray(weights[9][1] )
set_param(
torch_model.lm_head.decoder , torch.tensor(lowerCamelCase__ ).transpose(0 , 1 ).contiguous() , torch.tensor(lowerCamelCase__ ) , )
def snake_case__ ( lowerCamelCase__ : str , lowerCamelCase__ : Optional[int] , lowerCamelCase__ : Optional[int] ) -> Tuple:
# Initialise PyTorch model
A_ : Union[str, Any] = ReformerConfig.from_json_file(lowerCamelCase__ )
print(f'Building PyTorch model from configuration: {config}' )
A_ : Tuple = ReformerModelWithLMHead(lowerCamelCase__ )
with open(lowerCamelCase__ , '''rb''' ) as f:
A_ : Tuple = pickle.load(lowerCamelCase__ )['''weights''']
set_model_weights_in_torch(lowerCamelCase__ , lowerCamelCase__ , config.hidden_size )
# Save pytorch-model
print(f'Save PyTorch model to {pytorch_dump_path}' )
torch.save(model.state_dict() , lowerCamelCase__ )
if __name__ == "__main__":
snake_case__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--trax_model_pkl_path""", default=None, type=str, required=True, help="""Path to the TensorFlow checkpoint path."""
)
parser.add_argument(
"""--config_file""",
default=None,
type=str,
required=True,
help=(
"""The config json file corresponding to the pre-trained Reformer model. \n"""
"""This specifies the model architecture."""
),
)
parser.add_argument(
"""--pytorch_dump_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
snake_case__ = parser.parse_args()
convert_trax_checkpoint_to_pytorch(args.trax_model_pkl_path, args.config_file, args.pytorch_dump_path)
| 4 |
'''simple docstring'''
def snake_case__ ( lowerCamelCase__ : list ) -> list:
if len(lowerCamelCase__ ) <= 1:
return [tuple(lowerCamelCase__ )]
A_ : List[str] = []
def generate(lowerCamelCase__ : int , lowerCamelCase__ : list ):
if k == 1:
res.append(tuple(arr[:] ) )
return
generate(k - 1 , lowerCamelCase__ )
for i in range(k - 1 ):
if k % 2 == 0: # k is even
A_ ,A_ : Optional[int] = arr[k - 1], arr[i]
else: # k is odd
A_ ,A_ : Union[str, Any] = arr[k - 1], arr[0]
generate(k - 1 , lowerCamelCase__ )
generate(len(lowerCamelCase__ ) , lowerCamelCase__ )
return res
if __name__ == "__main__":
snake_case__ = input("""Enter numbers separated by a comma:\n""").strip()
snake_case__ = [int(item) for item in user_input.split(""",""")]
print(heaps(arr))
| 4 | 1 |
'''simple docstring'''
import json
import os
import unittest
from transformers import BatchEncoding, LEDTokenizer, LEDTokenizerFast
from transformers.models.led.tokenization_led import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, require_torch
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class UpperCamelCase_ (a__, unittest.TestCase ):
"""simple docstring"""
_lowerCAmelCase = LEDTokenizer
_lowerCAmelCase = LEDTokenizerFast
_lowerCAmelCase = True
def _a ( self : str ):
"""simple docstring"""
super().setUp()
A_ : List[str] = [
'''l''',
'''o''',
'''w''',
'''e''',
'''r''',
'''s''',
'''t''',
'''i''',
'''d''',
'''n''',
'''\u0120''',
'''\u0120l''',
'''\u0120n''',
'''\u0120lo''',
'''\u0120low''',
'''er''',
'''\u0120lowest''',
'''\u0120newer''',
'''\u0120wider''',
'''<unk>''',
]
A_ : Optional[Any] = dict(zip(_lowerCamelCase , range(len(_lowerCamelCase ) ) ) )
A_ : Union[str, Any] = ['''#version: 0.2''', '''\u0120 l''', '''\u0120l o''', '''\u0120lo w''', '''e r''', '''''']
A_ : Any = {'''unk_token''': '''<unk>'''}
A_ : str = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
A_ : Dict = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write(json.dumps(_lowerCamelCase ) + '''\n''' )
with open(self.merges_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write('''\n'''.join(_lowerCamelCase ) )
def _a ( self : Dict , **_lowerCamelCase : Dict ):
"""simple docstring"""
kwargs.update(self.special_tokens_map )
return self.tokenizer_class.from_pretrained(self.tmpdirname , **_lowerCamelCase )
def _a ( self : List[Any] , **_lowerCamelCase : List[Any] ):
"""simple docstring"""
kwargs.update(self.special_tokens_map )
return self.rust_tokenizer_class.from_pretrained(self.tmpdirname , **_lowerCamelCase )
def _a ( self : Union[str, Any] , _lowerCamelCase : int ):
"""simple docstring"""
return "lower newer", "lower newer"
@cached_property
def _a ( self : Optional[int] ):
"""simple docstring"""
return LEDTokenizer.from_pretrained('''allenai/led-base-16384''' )
@cached_property
def _a ( self : List[str] ):
"""simple docstring"""
return LEDTokenizerFast.from_pretrained('''allenai/led-base-16384''' )
@require_torch
def _a ( self : Optional[int] ):
"""simple docstring"""
A_ : Any = ['''A long paragraph for summarization.''', '''Another paragraph for summarization.''']
A_ : Any = [0, 250, 251, 17818, 13, 39186, 1938, 4, 2]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
A_ : Dict = tokenizer(_lowerCamelCase , max_length=len(_lowerCamelCase ) , padding=_lowerCamelCase , return_tensors='''pt''' )
self.assertIsInstance(_lowerCamelCase , _lowerCamelCase )
self.assertEqual((2, 9) , batch.input_ids.shape )
self.assertEqual((2, 9) , batch.attention_mask.shape )
A_ : Optional[Any] = batch.input_ids.tolist()[0]
self.assertListEqual(_lowerCamelCase , _lowerCamelCase )
@require_torch
def _a ( self : List[Any] ):
"""simple docstring"""
A_ : Any = ['''A long paragraph for summarization.''', '''Another paragraph for summarization.''']
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
A_ : Dict = tokenizer(_lowerCamelCase , padding=_lowerCamelCase , return_tensors='''pt''' )
self.assertIn('''input_ids''' , _lowerCamelCase )
self.assertIn('''attention_mask''' , _lowerCamelCase )
self.assertNotIn('''labels''' , _lowerCamelCase )
self.assertNotIn('''decoder_attention_mask''' , _lowerCamelCase )
@require_torch
def _a ( self : str ):
"""simple docstring"""
A_ : List[Any] = [
'''Summary of the text.''',
'''Another summary.''',
]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
A_ : Optional[Any] = tokenizer(text_target=_lowerCamelCase , max_length=32 , padding='''max_length''' , return_tensors='''pt''' )
self.assertEqual(32 , targets['''input_ids'''].shape[1] )
@require_torch
def _a ( self : Optional[Any] ):
"""simple docstring"""
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
A_ : Tuple = tokenizer(
['''I am a small frog''' * 1024, '''I am a small frog'''] , padding=_lowerCamelCase , truncation=_lowerCamelCase , return_tensors='''pt''' )
self.assertIsInstance(_lowerCamelCase , _lowerCamelCase )
self.assertEqual(batch.input_ids.shape , (2, 5122) )
@require_torch
def _a ( self : List[str] ):
"""simple docstring"""
A_ : int = ['''A long paragraph for summarization.''']
A_ : Dict = [
'''Summary of the text.''',
]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
A_ : str = tokenizer(_lowerCamelCase , return_tensors='''pt''' )
A_ : Dict = tokenizer(text_target=_lowerCamelCase , return_tensors='''pt''' )
A_ : Optional[Any] = inputs['''input_ids''']
A_ : List[str] = targets['''input_ids''']
self.assertTrue((input_ids[:, 0] == tokenizer.bos_token_id).all().item() )
self.assertTrue((labels[:, 0] == tokenizer.bos_token_id).all().item() )
self.assertTrue((input_ids[:, -1] == tokenizer.eos_token_id).all().item() )
self.assertTrue((labels[:, -1] == tokenizer.eos_token_id).all().item() )
@require_torch
def _a ( self : List[str] ):
"""simple docstring"""
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
A_ : str = ['''Summary of the text.''', '''Another summary.''']
A_ : Optional[int] = [[0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, -1, -1]]
A_ : int = tokenizer(_lowerCamelCase , padding=_lowerCamelCase )
A_ : Dict = [[0] * len(_lowerCamelCase ) for x in encoded_output['''input_ids''']]
A_ : List[str] = tokenizer.pad(_lowerCamelCase )
self.assertSequenceEqual(outputs['''global_attention_mask'''] , _lowerCamelCase )
def _a ( self : Tuple ):
"""simple docstring"""
pass
def _a ( self : Tuple ):
"""simple docstring"""
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'{tokenizer.__class__.__name__} ({pretrained_name})' ):
A_ : int = self.rust_tokenizer_class.from_pretrained(_lowerCamelCase , **_lowerCamelCase )
A_ : int = self.tokenizer_class.from_pretrained(_lowerCamelCase , **_lowerCamelCase )
A_ : Tuple = '''A, <mask> AllenNLP sentence.'''
A_ : List[Any] = tokenizer_r.encode_plus(_lowerCamelCase , add_special_tokens=_lowerCamelCase , return_token_type_ids=_lowerCamelCase )
A_ : Dict = tokenizer_p.encode_plus(_lowerCamelCase , add_special_tokens=_lowerCamelCase , return_token_type_ids=_lowerCamelCase )
self.assertEqual(sum(tokens_r['''token_type_ids'''] ) , sum(tokens_p['''token_type_ids'''] ) )
self.assertEqual(
sum(tokens_r['''attention_mask'''] ) / len(tokens_r['''attention_mask'''] ) , sum(tokens_p['''attention_mask'''] ) / len(tokens_p['''attention_mask'''] ) , )
A_ : List[Any] = tokenizer_r.convert_ids_to_tokens(tokens_r['''input_ids'''] )
A_ : List[str] = tokenizer_p.convert_ids_to_tokens(tokens_p['''input_ids'''] )
self.assertSequenceEqual(tokens_p['''input_ids'''] , [0, 250, 6, 50264, 3823, 487, 21992, 3645, 4, 2] )
self.assertSequenceEqual(tokens_r['''input_ids'''] , [0, 250, 6, 50264, 3823, 487, 21992, 3645, 4, 2] )
self.assertSequenceEqual(
_lowerCamelCase , ['''<s>''', '''A''', ''',''', '''<mask>''', '''ĠAllen''', '''N''', '''LP''', '''Ġsentence''', '''.''', '''</s>'''] )
self.assertSequenceEqual(
_lowerCamelCase , ['''<s>''', '''A''', ''',''', '''<mask>''', '''ĠAllen''', '''N''', '''LP''', '''Ġsentence''', '''.''', '''</s>'''] )
| 4 |
'''simple docstring'''
import unittest
from queue import Empty
from threading import Thread
from transformers import AutoTokenizer, TextIteratorStreamer, TextStreamer, is_torch_available
from transformers.testing_utils import CaptureStdout, require_torch, torch_device
from ..test_modeling_common import ids_tensor
if is_torch_available():
import torch
from transformers import AutoModelForCausalLM
@require_torch
class UpperCamelCase_ (unittest.TestCase ):
"""simple docstring"""
def _a ( self : Dict ):
"""simple docstring"""
A_ : Optional[int] = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' )
A_ : Tuple = AutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ).to(_lowerCamelCase )
A_ : Dict = -1
A_ : List[str] = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(_lowerCamelCase )
A_ : Any = model.generate(_lowerCamelCase , max_new_tokens=10 , do_sample=_lowerCamelCase )
A_ : List[str] = tokenizer.decode(greedy_ids[0] )
with CaptureStdout() as cs:
A_ : List[str] = TextStreamer(_lowerCamelCase )
model.generate(_lowerCamelCase , max_new_tokens=10 , do_sample=_lowerCamelCase , streamer=_lowerCamelCase )
# The greedy text should be printed to stdout, except for the final "\n" in the streamer
A_ : Dict = cs.out[:-1]
self.assertEqual(_lowerCamelCase , _lowerCamelCase )
def _a ( self : Tuple ):
"""simple docstring"""
A_ : Optional[int] = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' )
A_ : List[str] = AutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ).to(_lowerCamelCase )
A_ : Dict = -1
A_ : List[str] = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(_lowerCamelCase )
A_ : Optional[int] = model.generate(_lowerCamelCase , max_new_tokens=10 , do_sample=_lowerCamelCase )
A_ : str = tokenizer.decode(greedy_ids[0] )
A_ : int = TextIteratorStreamer(_lowerCamelCase )
A_ : List[Any] = {'''input_ids''': input_ids, '''max_new_tokens''': 10, '''do_sample''': False, '''streamer''': streamer}
A_ : List[Any] = Thread(target=model.generate , kwargs=_lowerCamelCase )
thread.start()
A_ : List[Any] = ''''''
for new_text in streamer:
streamer_text += new_text
self.assertEqual(_lowerCamelCase , _lowerCamelCase )
def _a ( self : int ):
"""simple docstring"""
A_ : List[str] = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' )
A_ : List[Any] = AutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ).to(_lowerCamelCase )
A_ : List[str] = -1
A_ : Any = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(_lowerCamelCase )
A_ : Tuple = model.generate(_lowerCamelCase , max_new_tokens=10 , do_sample=_lowerCamelCase )
A_ : Tuple = greedy_ids[:, input_ids.shape[1] :]
A_ : Tuple = tokenizer.decode(new_greedy_ids[0] )
with CaptureStdout() as cs:
A_ : Any = TextStreamer(_lowerCamelCase , skip_prompt=_lowerCamelCase )
model.generate(_lowerCamelCase , max_new_tokens=10 , do_sample=_lowerCamelCase , streamer=_lowerCamelCase )
# The greedy text should be printed to stdout, except for the final "\n" in the streamer
A_ : Any = cs.out[:-1]
self.assertEqual(_lowerCamelCase , _lowerCamelCase )
def _a ( self : List[Any] ):
"""simple docstring"""
A_ : List[Any] = AutoTokenizer.from_pretrained('''distilgpt2''' )
A_ : Tuple = AutoModelForCausalLM.from_pretrained('''distilgpt2''' ).to(_lowerCamelCase )
A_ : List[Any] = -1
A_ : Union[str, Any] = torch.ones((1, 5) , device=_lowerCamelCase ).long() * model.config.bos_token_id
with CaptureStdout() as cs:
A_ : List[Any] = TextStreamer(_lowerCamelCase , skip_special_tokens=_lowerCamelCase )
model.generate(_lowerCamelCase , max_new_tokens=1 , do_sample=_lowerCamelCase , streamer=_lowerCamelCase )
# The prompt contains a special token, so the streamer should not print it. As such, the output text, when
# re-tokenized, must only contain one token
A_ : List[str] = cs.out[:-1] # Remove the final "\n"
A_ : List[Any] = tokenizer(_lowerCamelCase , return_tensors='''pt''' )
self.assertEqual(streamer_text_tokenized.input_ids.shape , (1, 1) )
def _a ( self : Union[str, Any] ):
"""simple docstring"""
A_ : str = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' )
A_ : str = AutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ).to(_lowerCamelCase )
A_ : Union[str, Any] = -1
A_ : Union[str, Any] = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(_lowerCamelCase )
A_ : List[str] = TextIteratorStreamer(_lowerCamelCase , timeout=0.0_01 )
A_ : str = {'''input_ids''': input_ids, '''max_new_tokens''': 10, '''do_sample''': False, '''streamer''': streamer}
A_ : List[str] = Thread(target=model.generate , kwargs=_lowerCamelCase )
thread.start()
# The streamer will timeout after 0.001 seconds, so an exception will be raised
with self.assertRaises(_lowerCamelCase ):
A_ : str = ''''''
for new_text in streamer:
streamer_text += new_text
| 4 | 1 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
snake_case__ = logging.get_logger(__name__)
snake_case__ = {
"""microsoft/table-transformer-detection""": (
"""https://huggingface.co/microsoft/table-transformer-detection/resolve/main/config.json"""
),
}
class UpperCamelCase_ (a__ ):
"""simple docstring"""
_lowerCAmelCase = 'table-transformer'
_lowerCAmelCase = ['past_key_values']
_lowerCAmelCase = {
'hidden_size': 'd_model',
'num_attention_heads': 'encoder_attention_heads',
}
def __init__( self : Any , _lowerCamelCase : Union[str, Any]=True , _lowerCamelCase : Dict=None , _lowerCamelCase : int=3 , _lowerCamelCase : Any=100 , _lowerCamelCase : List[Any]=6 , _lowerCamelCase : Tuple=2048 , _lowerCamelCase : Any=8 , _lowerCamelCase : Dict=6 , _lowerCamelCase : Tuple=2048 , _lowerCamelCase : int=8 , _lowerCamelCase : Optional[int]=0.0 , _lowerCamelCase : List[Any]=0.0 , _lowerCamelCase : List[Any]=True , _lowerCamelCase : Optional[int]="relu" , _lowerCamelCase : Union[str, Any]=256 , _lowerCamelCase : Any=0.1 , _lowerCamelCase : Tuple=0.0 , _lowerCamelCase : Optional[int]=0.0 , _lowerCamelCase : str=0.02 , _lowerCamelCase : Tuple=1.0 , _lowerCamelCase : Dict=False , _lowerCamelCase : str="sine" , _lowerCamelCase : str="resnet50" , _lowerCamelCase : Any=True , _lowerCamelCase : List[str]=False , _lowerCamelCase : Any=1 , _lowerCamelCase : int=5 , _lowerCamelCase : Tuple=2 , _lowerCamelCase : Optional[int]=1 , _lowerCamelCase : Any=1 , _lowerCamelCase : Dict=5 , _lowerCamelCase : str=2 , _lowerCamelCase : Union[str, Any]=0.1 , **_lowerCamelCase : int , ):
"""simple docstring"""
if backbone_config is not None and use_timm_backbone:
raise ValueError('''You can\'t specify both `backbone_config` and `use_timm_backbone`.''' )
if not use_timm_backbone:
if backbone_config is None:
logger.info('''`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.''' )
A_ : int = CONFIG_MAPPING['''resnet'''](out_features=['''stage4'''] )
elif isinstance(_lowerCamelCase , _lowerCamelCase ):
A_ : str = backbone_config.get('''model_type''' )
A_ : Optional[int] = CONFIG_MAPPING[backbone_model_type]
A_ : List[str] = config_class.from_dict(_lowerCamelCase )
# set timm attributes to None
A_ ,A_ ,A_ : Union[str, Any] = None, None, None
A_ : Optional[Any] = use_timm_backbone
A_ : Optional[int] = backbone_config
A_ : Optional[Any] = num_channels
A_ : Dict = num_queries
A_ : str = d_model
A_ : List[str] = encoder_ffn_dim
A_ : int = encoder_layers
A_ : Optional[Any] = encoder_attention_heads
A_ : List[str] = decoder_ffn_dim
A_ : Any = decoder_layers
A_ : List[str] = decoder_attention_heads
A_ : Tuple = dropout
A_ : Optional[Any] = attention_dropout
A_ : Any = activation_dropout
A_ : List[Any] = activation_function
A_ : Dict = init_std
A_ : Any = init_xavier_std
A_ : List[Any] = encoder_layerdrop
A_ : int = decoder_layerdrop
A_ : Any = encoder_layers
A_ : List[str] = auxiliary_loss
A_ : List[Any] = position_embedding_type
A_ : Optional[Any] = backbone
A_ : Tuple = use_pretrained_backbone
A_ : List[Any] = dilation
# Hungarian matcher
A_ : List[str] = class_cost
A_ : str = bbox_cost
A_ : Union[str, Any] = giou_cost
# Loss coefficients
A_ : Any = mask_loss_coefficient
A_ : Optional[int] = dice_loss_coefficient
A_ : Dict = bbox_loss_coefficient
A_ : int = giou_loss_coefficient
A_ : int = eos_coefficient
super().__init__(is_encoder_decoder=_lowerCamelCase , **_lowerCamelCase )
@property
def _a ( self : List[Any] ):
"""simple docstring"""
return self.encoder_attention_heads
@property
def _a ( self : Any ):
"""simple docstring"""
return self.d_model
class UpperCamelCase_ (a__ ):
"""simple docstring"""
_lowerCAmelCase = version.parse('1.11' )
@property
def _a ( self : Tuple ):
"""simple docstring"""
return OrderedDict(
[
('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}),
('''pixel_mask''', {0: '''batch'''}),
] )
@property
def _a ( self : Optional[int] ):
"""simple docstring"""
return 1E-5
@property
def _a ( self : str ):
"""simple docstring"""
return 12
| 4 |
'''simple docstring'''
import heapq
def snake_case__ ( lowerCamelCase__ : dict ) -> set[int]:
A_ : list[list] = []
# for each node and his adjacency list add them and the rank of the node to queue
# using heapq module the queue will be filled like a Priority Queue
# heapq works with a min priority queue, so I used -1*len(v) to build it
for key, value in graph.items():
# O(log(n))
heapq.heappush(lowerCamelCase__ , [-1 * len(lowerCamelCase__ ), (key, value)] )
# chosen_vertices = set of chosen vertices
A_ : str = set()
# while queue isn't empty and there are still edges
# (queue[0][0] is the rank of the node with max rank)
while queue and queue[0][0] != 0:
# extract vertex with max rank from queue and add it to chosen_vertices
A_ : Tuple = heapq.heappop(lowerCamelCase__ )[1][0]
chosen_vertices.add(lowerCamelCase__ )
# Remove all arcs adjacent to argmax
for elem in queue:
# if v haven't adjacent node, skip
if elem[0] == 0:
continue
# if argmax is reachable from elem
# remove argmax from elem's adjacent list and update his rank
if argmax in elem[1][1]:
A_ : List[str] = elem[1][1].index(lowerCamelCase__ )
del elem[1][1][index]
elem[0] += 1
# re-order the queue
heapq.heapify(lowerCamelCase__ )
return chosen_vertices
if __name__ == "__main__":
import doctest
doctest.testmod()
snake_case__ = {0: [1, 3], 1: [0, 3], 2: [0, 3, 4], 3: [0, 1, 2], 4: [2, 3]}
print(F'Minimum vertex cover:\n{greedy_min_vertex_cover(graph)}')
| 4 | 1 |
'''simple docstring'''
import os
import re
import shutil
import sys
import tempfile
import unittest
import black
snake_case__ = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, """utils"""))
import check_copies # noqa: E402
# This is the reference code that will be used in the tests.
# If BertLMPredictionHead is changed in modeling_bert.py, this code needs to be manually updated.
snake_case__ = """ def __init__(self, config):
super().__init__()
self.transform = BertPredictionHeadTransform(config)
# The output weights are the same as the input embeddings, but there is
# an output-only bias for each token.
self.decoder = nn.Linear(config.hidden_size, config.vocab_size, bias=False)
self.bias = nn.Parameter(torch.zeros(config.vocab_size))
# Need a link between the two variables so that the bias is correctly resized with `resize_token_embeddings`
self.decoder.bias = self.bias
def forward(self, hidden_states):
hidden_states = self.transform(hidden_states)
hidden_states = self.decoder(hidden_states)
return hidden_states
"""
class UpperCamelCase_ (unittest.TestCase ):
"""simple docstring"""
def _a ( self : Optional[int] ):
"""simple docstring"""
A_ : List[str] = tempfile.mkdtemp()
os.makedirs(os.path.join(self.transformer_dir , '''models/bert/''' ) )
A_ : Dict = self.transformer_dir
shutil.copy(
os.path.join(_lowerCamelCase , '''src/transformers/models/bert/modeling_bert.py''' ) , os.path.join(self.transformer_dir , '''models/bert/modeling_bert.py''' ) , )
def _a ( self : Tuple ):
"""simple docstring"""
A_ : Optional[Any] = '''src/transformers'''
shutil.rmtree(self.transformer_dir )
def _a ( self : Optional[Any] , _lowerCamelCase : Any , _lowerCamelCase : int , _lowerCamelCase : Optional[int] , _lowerCamelCase : List[Any]=None ):
"""simple docstring"""
A_ : int = comment + f'\nclass {class_name}(nn.Module):\n' + class_code
if overwrite_result is not None:
A_ : Optional[int] = comment + f'\nclass {class_name}(nn.Module):\n' + overwrite_result
A_ : List[str] = black.Mode(target_versions={black.TargetVersion.PYaa} , line_length=119 )
A_ : Optional[int] = black.format_str(_lowerCamelCase , mode=_lowerCamelCase )
A_ : Optional[Any] = os.path.join(self.transformer_dir , '''new_code.py''' )
with open(_lowerCamelCase , '''w''' , newline='''\n''' ) as f:
f.write(_lowerCamelCase )
if overwrite_result is None:
self.assertTrue(len(check_copies.is_copy_consistent(_lowerCamelCase ) ) == 0 )
else:
check_copies.is_copy_consistent(f.name , overwrite=_lowerCamelCase )
with open(_lowerCamelCase , '''r''' ) as f:
self.assertTrue(f.read() , _lowerCamelCase )
def _a ( self : Optional[Any] ):
"""simple docstring"""
A_ : List[str] = check_copies.find_code_in_transformers('''models.bert.modeling_bert.BertLMPredictionHead''' )
self.assertEqual(_lowerCamelCase , _lowerCamelCase )
def _a ( self : Union[str, Any] ):
"""simple docstring"""
self.check_copy_consistency(
'''# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead''' , '''BertLMPredictionHead''' , REFERENCE_CODE + '''\n''' , )
# With no empty line at the end
self.check_copy_consistency(
'''# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead''' , '''BertLMPredictionHead''' , _lowerCamelCase , )
# Copy consistency with rename
self.check_copy_consistency(
'''# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead with Bert->TestModel''' , '''TestModelLMPredictionHead''' , re.sub('''Bert''' , '''TestModel''' , _lowerCamelCase ) , )
# Copy consistency with a really long name
A_ : List[Any] = '''TestModelWithAReallyLongNameBecauseSomePeopleLikeThatForSomeReason'''
self.check_copy_consistency(
f'# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead with Bert->{long_class_name}' , f'{long_class_name}LMPredictionHead' , re.sub('''Bert''' , _lowerCamelCase , _lowerCamelCase ) , )
# Copy consistency with overwrite
self.check_copy_consistency(
'''# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead with Bert->TestModel''' , '''TestModelLMPredictionHead''' , _lowerCamelCase , overwrite_result=re.sub('''Bert''' , '''TestModel''' , _lowerCamelCase ) , )
def _a ( self : str ):
"""simple docstring"""
A_ : List[Any] = check_copies.LOCALIZED_READMES['''README_zh-hans.md''']
A_ : List[str] = (
'''1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (from Google Research and the'''
''' Toyota Technological Institute at Chicago) released with the paper [ALBERT: A Lite BERT for'''
''' Self-supervised Learning of Language Representations](https://arxiv.org/abs/1909.11942), by Zhenzhong'''
''' Lan, Mingda Chen, Sebastian Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut.\n1.'''
''' **[DistilBERT](https://huggingface.co/transformers/model_doc/distilbert.html)** (from HuggingFace),'''
''' released together with the paper [DistilBERT, a distilled version of BERT: smaller, faster, cheaper and'''
''' lighter](https://arxiv.org/abs/1910.01108) by Victor Sanh, Lysandre Debut and Thomas Wolf. The same'''
''' method has been applied to compress GPT2 into'''
''' [DistilGPT2](https://github.com/huggingface/transformers/tree/main/examples/distillation), RoBERTa into'''
''' [DistilRoBERTa](https://github.com/huggingface/transformers/tree/main/examples/distillation),'''
''' Multilingual BERT into'''
''' [DistilmBERT](https://github.com/huggingface/transformers/tree/main/examples/distillation) and a German'''
''' version of DistilBERT.\n1. **[ELECTRA](https://huggingface.co/transformers/model_doc/electra.html)**'''
''' (from Google Research/Stanford University) released with the paper [ELECTRA: Pre-training text encoders'''
''' as discriminators rather than generators](https://arxiv.org/abs/2003.10555) by Kevin Clark, Minh-Thang'''
''' Luong, Quoc V. Le, Christopher D. Manning.'''
)
A_ : int = (
'''1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (来自 Google Research and the'''
''' Toyota Technological Institute at Chicago) 伴随论文 [ALBERT: A Lite BERT for Self-supervised Learning of'''
''' Language Representations](https://arxiv.org/abs/1909.11942), 由 Zhenzhong Lan, Mingda Chen, Sebastian'''
''' Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut 发布。\n'''
)
A_ : Optional[Any] = (
'''1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (来自 Google Research and the'''
''' Toyota Technological Institute at Chicago) 伴随论文 [ALBERT: A Lite BERT for Self-supervised Learning of'''
''' Language Representations](https://arxiv.org/abs/1909.11942), 由 Zhenzhong Lan, Mingda Chen, Sebastian'''
''' Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut 发布。\n1.'''
''' **[DistilBERT](https://huggingface.co/transformers/model_doc/distilbert.html)** (来自 HuggingFace) 伴随论文'''
''' [DistilBERT, a distilled version of BERT: smaller, faster, cheaper and'''
''' lighter](https://arxiv.org/abs/1910.01108) 由 Victor Sanh, Lysandre Debut and Thomas Wolf 发布。 The same'''
''' method has been applied to compress GPT2 into'''
''' [DistilGPT2](https://github.com/huggingface/transformers/tree/main/examples/distillation), RoBERTa into'''
''' [DistilRoBERTa](https://github.com/huggingface/transformers/tree/main/examples/distillation),'''
''' Multilingual BERT into'''
''' [DistilmBERT](https://github.com/huggingface/transformers/tree/main/examples/distillation) and a German'''
''' version of DistilBERT.\n1. **[ELECTRA](https://huggingface.co/transformers/model_doc/electra.html)** (来自'''
''' Google Research/Stanford University) 伴随论文 [ELECTRA: Pre-training text encoders as discriminators rather'''
''' than generators](https://arxiv.org/abs/2003.10555) 由 Kevin Clark, Minh-Thang Luong, Quoc V. Le,'''
''' Christopher D. Manning 发布。\n'''
)
A_ ,A_ : Optional[Any] = check_copies.convert_to_localized_md(
_lowerCamelCase , _lowerCamelCase , localized_readme['''format_model_list'''] )
self.assertFalse(_lowerCamelCase )
self.assertEqual(_lowerCamelCase , _lowerCamelCase )
A_ ,A_ : Optional[Any] = check_copies.convert_to_localized_md(
_lowerCamelCase , _lowerCamelCase , localized_readme['''format_model_list'''] )
# Check whether the number of models is equal to README.md after conversion.
self.assertTrue(_lowerCamelCase )
A_ : Any = (
'''1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (from Google Research and the'''
''' Toyota Technological Institute at Chicago) released with the paper [ALBERT: A Lite BERT for'''
''' Self-supervised Learning of Language Representations](https://arxiv.org/abs/1909.11942), by Zhenzhong'''
''' Lan, Mingda Chen, Sebastian Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut.'''
)
A_ : Optional[int] = (
'''1. **[ALBERT](https://huggingface.co/transformers/main/model_doc/albert.html)** (来自 Google Research and'''
''' the Toyota Technological Institute at Chicago) 伴随论文 [ALBERT: A Lite BERT for Self-supervised Learning of'''
''' Language Representations](https://arxiv.org/abs/1909.11942), 由 Zhenzhong Lan, Mingda Chen, Sebastian'''
''' Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut 发布。\n'''
)
A_ : str = (
'''1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (来自 Google Research and the'''
''' Toyota Technological Institute at Chicago) 伴随论文 [ALBERT: A Lite BERT for Self-supervised Learning of'''
''' Language Representations](https://arxiv.org/abs/1909.11942), 由 Zhenzhong Lan, Mingda Chen, Sebastian'''
''' Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut 发布。\n'''
)
A_ ,A_ : Dict = check_copies.convert_to_localized_md(
_lowerCamelCase , _lowerCamelCase , localized_readme['''format_model_list'''] )
# Check if the model link is synchronized.
self.assertEqual(_lowerCamelCase , _lowerCamelCase )
| 4 |
'''simple docstring'''
import argparse
from collections import OrderedDict
from pathlib import Path
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from torchvision.transforms import functional as F
from transformers import DetrImageProcessor, TableTransformerConfig, TableTransformerForObjectDetection
from transformers.utils import logging
logging.set_verbosity_info()
snake_case__ = logging.get_logger(__name__)
# here we list all keys to be renamed (original name on the left, our name on the right)
snake_case__ = []
for i in range(6):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append(
(F'transformer.encoder.layers.{i}.self_attn.out_proj.weight', F'encoder.layers.{i}.self_attn.out_proj.weight')
)
rename_keys.append(
(F'transformer.encoder.layers.{i}.self_attn.out_proj.bias', F'encoder.layers.{i}.self_attn.out_proj.bias')
)
rename_keys.append((F'transformer.encoder.layers.{i}.linear1.weight', F'encoder.layers.{i}.fc1.weight'))
rename_keys.append((F'transformer.encoder.layers.{i}.linear1.bias', F'encoder.layers.{i}.fc1.bias'))
rename_keys.append((F'transformer.encoder.layers.{i}.linear2.weight', F'encoder.layers.{i}.fc2.weight'))
rename_keys.append((F'transformer.encoder.layers.{i}.linear2.bias', F'encoder.layers.{i}.fc2.bias'))
rename_keys.append(
(F'transformer.encoder.layers.{i}.norm1.weight', F'encoder.layers.{i}.self_attn_layer_norm.weight')
)
rename_keys.append((F'transformer.encoder.layers.{i}.norm1.bias', F'encoder.layers.{i}.self_attn_layer_norm.bias'))
rename_keys.append((F'transformer.encoder.layers.{i}.norm2.weight', F'encoder.layers.{i}.final_layer_norm.weight'))
rename_keys.append((F'transformer.encoder.layers.{i}.norm2.bias', F'encoder.layers.{i}.final_layer_norm.bias'))
# decoder layers: 2 times output projection, 2 feedforward neural networks and 3 layernorms
rename_keys.append(
(F'transformer.decoder.layers.{i}.self_attn.out_proj.weight', F'decoder.layers.{i}.self_attn.out_proj.weight')
)
rename_keys.append(
(F'transformer.decoder.layers.{i}.self_attn.out_proj.bias', F'decoder.layers.{i}.self_attn.out_proj.bias')
)
rename_keys.append(
(
F'transformer.decoder.layers.{i}.multihead_attn.out_proj.weight',
F'decoder.layers.{i}.encoder_attn.out_proj.weight',
)
)
rename_keys.append(
(
F'transformer.decoder.layers.{i}.multihead_attn.out_proj.bias',
F'decoder.layers.{i}.encoder_attn.out_proj.bias',
)
)
rename_keys.append((F'transformer.decoder.layers.{i}.linear1.weight', F'decoder.layers.{i}.fc1.weight'))
rename_keys.append((F'transformer.decoder.layers.{i}.linear1.bias', F'decoder.layers.{i}.fc1.bias'))
rename_keys.append((F'transformer.decoder.layers.{i}.linear2.weight', F'decoder.layers.{i}.fc2.weight'))
rename_keys.append((F'transformer.decoder.layers.{i}.linear2.bias', F'decoder.layers.{i}.fc2.bias'))
rename_keys.append(
(F'transformer.decoder.layers.{i}.norm1.weight', F'decoder.layers.{i}.self_attn_layer_norm.weight')
)
rename_keys.append((F'transformer.decoder.layers.{i}.norm1.bias', F'decoder.layers.{i}.self_attn_layer_norm.bias'))
rename_keys.append(
(F'transformer.decoder.layers.{i}.norm2.weight', F'decoder.layers.{i}.encoder_attn_layer_norm.weight')
)
rename_keys.append(
(F'transformer.decoder.layers.{i}.norm2.bias', F'decoder.layers.{i}.encoder_attn_layer_norm.bias')
)
rename_keys.append((F'transformer.decoder.layers.{i}.norm3.weight', F'decoder.layers.{i}.final_layer_norm.weight'))
rename_keys.append((F'transformer.decoder.layers.{i}.norm3.bias', F'decoder.layers.{i}.final_layer_norm.bias'))
# convolutional projection + query embeddings + layernorm of encoder + layernorm of decoder + class and bounding box heads
rename_keys.extend(
[
("""input_proj.weight""", """input_projection.weight"""),
("""input_proj.bias""", """input_projection.bias"""),
("""query_embed.weight""", """query_position_embeddings.weight"""),
("""transformer.encoder.norm.weight""", """encoder.layernorm.weight"""),
("""transformer.encoder.norm.bias""", """encoder.layernorm.bias"""),
("""transformer.decoder.norm.weight""", """decoder.layernorm.weight"""),
("""transformer.decoder.norm.bias""", """decoder.layernorm.bias"""),
("""class_embed.weight""", """class_labels_classifier.weight"""),
("""class_embed.bias""", """class_labels_classifier.bias"""),
("""bbox_embed.layers.0.weight""", """bbox_predictor.layers.0.weight"""),
("""bbox_embed.layers.0.bias""", """bbox_predictor.layers.0.bias"""),
("""bbox_embed.layers.1.weight""", """bbox_predictor.layers.1.weight"""),
("""bbox_embed.layers.1.bias""", """bbox_predictor.layers.1.bias"""),
("""bbox_embed.layers.2.weight""", """bbox_predictor.layers.2.weight"""),
("""bbox_embed.layers.2.bias""", """bbox_predictor.layers.2.bias"""),
]
)
def snake_case__ ( lowerCamelCase__ : List[Any] , lowerCamelCase__ : List[Any] , lowerCamelCase__ : List[Any] ) -> Optional[Any]:
A_ : Tuple = state_dict.pop(lowerCamelCase__ )
A_ : Optional[Any] = val
def snake_case__ ( lowerCamelCase__ : Dict ) -> Any:
A_ : int = OrderedDict()
for key, value in state_dict.items():
if "backbone.0.body" in key:
A_ : int = key.replace('''backbone.0.body''' , '''backbone.conv_encoder.model''' )
A_ : List[str] = value
else:
A_ : Optional[int] = value
return new_state_dict
def snake_case__ ( lowerCamelCase__ : Union[str, Any] ) -> Optional[Any]:
A_ : Any = ''''''
# first: transformer encoder
for i in range(6 ):
# read in weights + bias of input projection layer (in PyTorch's MultiHeadAttention, this is a single matrix + bias)
A_ : Tuple = state_dict.pop(f'{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_weight' )
A_ : Dict = state_dict.pop(f'{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_bias' )
# next, add query, keys and values (in that order) to the state dict
A_ : str = in_proj_weight[:2_5_6, :]
A_ : Optional[Any] = in_proj_bias[:2_5_6]
A_ : Dict = in_proj_weight[2_5_6:5_1_2, :]
A_ : Tuple = in_proj_bias[2_5_6:5_1_2]
A_ : Tuple = in_proj_weight[-2_5_6:, :]
A_ : Optional[int] = in_proj_bias[-2_5_6:]
# next: transformer decoder (which is a bit more complex because it also includes cross-attention)
for i in range(6 ):
# read in weights + bias of input projection layer of self-attention
A_ : Union[str, Any] = state_dict.pop(f'{prefix}transformer.decoder.layers.{i}.self_attn.in_proj_weight' )
A_ : Dict = state_dict.pop(f'{prefix}transformer.decoder.layers.{i}.self_attn.in_proj_bias' )
# next, add query, keys and values (in that order) to the state dict
A_ : List[str] = in_proj_weight[:2_5_6, :]
A_ : int = in_proj_bias[:2_5_6]
A_ : Any = in_proj_weight[2_5_6:5_1_2, :]
A_ : List[str] = in_proj_bias[2_5_6:5_1_2]
A_ : Union[str, Any] = in_proj_weight[-2_5_6:, :]
A_ : Optional[Any] = in_proj_bias[-2_5_6:]
# read in weights + bias of input projection layer of cross-attention
A_ : Tuple = state_dict.pop(
f'{prefix}transformer.decoder.layers.{i}.multihead_attn.in_proj_weight' )
A_ : Optional[Any] = state_dict.pop(f'{prefix}transformer.decoder.layers.{i}.multihead_attn.in_proj_bias' )
# next, add query, keys and values (in that order) of cross-attention to the state dict
A_ : Dict = in_proj_weight_cross_attn[:2_5_6, :]
A_ : Tuple = in_proj_bias_cross_attn[:2_5_6]
A_ : int = in_proj_weight_cross_attn[2_5_6:5_1_2, :]
A_ : List[str] = in_proj_bias_cross_attn[2_5_6:5_1_2]
A_ : Any = in_proj_weight_cross_attn[-2_5_6:, :]
A_ : Any = in_proj_bias_cross_attn[-2_5_6:]
def snake_case__ ( lowerCamelCase__ : List[str] , lowerCamelCase__ : Tuple ) -> Dict:
A_ ,A_ : int = image.size
A_ : Tuple = max(lowerCamelCase__ , lowerCamelCase__ )
A_ : Optional[Any] = 8_0_0 if '''detection''' in checkpoint_url else 1_0_0_0
A_ : Union[str, Any] = target_max_size / current_max_size
A_ : Any = image.resize((int(round(scale * width ) ), int(round(scale * height ) )) )
return resized_image
def snake_case__ ( lowerCamelCase__ : Tuple ) -> str:
A_ : Any = F.to_tensor(lowerCamelCase__ )
A_ : Optional[Any] = F.normalize(lowerCamelCase__ , mean=[0.485, 0.456, 0.406] , std=[0.229, 0.224, 0.225] )
return image
@torch.no_grad()
def snake_case__ ( lowerCamelCase__ : List[Any] , lowerCamelCase__ : int , lowerCamelCase__ : int ) -> str:
logger.info('''Converting model...''' )
# load original state dict
A_ : Tuple = torch.hub.load_state_dict_from_url(lowerCamelCase__ , map_location='''cpu''' )
# rename keys
for src, dest in rename_keys:
rename_key(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
A_ : str = rename_backbone_keys(lowerCamelCase__ )
# query, key and value matrices need special treatment
read_in_q_k_v(lowerCamelCase__ )
# important: we need to prepend a prefix to each of the base model keys as the head models use different attributes for them
A_ : List[Any] = '''model.'''
for key in state_dict.copy().keys():
if not key.startswith('''class_labels_classifier''' ) and not key.startswith('''bbox_predictor''' ):
A_ : List[Any] = state_dict.pop(lowerCamelCase__ )
A_ : str = val
# create HuggingFace model and load state dict
A_ : Union[str, Any] = TableTransformerConfig(
backbone='''resnet18''' , mask_loss_coefficient=1 , dice_loss_coefficient=1 , ce_loss_coefficient=1 , bbox_loss_coefficient=5 , giou_loss_coefficient=2 , eos_coefficient=0.4 , class_cost=1 , bbox_cost=5 , giou_cost=2 , )
if "detection" in checkpoint_url:
A_ : Dict = 1_5
A_ : Dict = 2
A_ : int = {0: '''table''', 1: '''table rotated'''}
A_ : List[str] = idalabel
A_ : Optional[int] = {v: k for k, v in idalabel.items()}
else:
A_ : Union[str, Any] = 1_2_5
A_ : Optional[Any] = 6
A_ : Optional[Any] = {
0: '''table''',
1: '''table column''',
2: '''table row''',
3: '''table column header''',
4: '''table projected row header''',
5: '''table spanning cell''',
}
A_ : int = idalabel
A_ : Tuple = {v: k for k, v in idalabel.items()}
A_ : Optional[Any] = DetrImageProcessor(
format='''coco_detection''' , max_size=8_0_0 if '''detection''' in checkpoint_url else 1_0_0_0 )
A_ : int = TableTransformerForObjectDetection(lowerCamelCase__ )
model.load_state_dict(lowerCamelCase__ )
model.eval()
# verify our conversion
A_ : Optional[int] = '''example_pdf.png''' if '''detection''' in checkpoint_url else '''example_table.png'''
A_ : Union[str, Any] = hf_hub_download(repo_id='''nielsr/example-pdf''' , repo_type='''dataset''' , filename=lowerCamelCase__ )
A_ : Tuple = Image.open(lowerCamelCase__ ).convert('''RGB''' )
A_ : int = normalize(resize(lowerCamelCase__ , lowerCamelCase__ ) ).unsqueeze(0 )
A_ : str = model(lowerCamelCase__ )
if "detection" in checkpoint_url:
A_ : str = (1, 1_5, 3)
A_ : int = torch.tensor(
[[-6.7897, -16.9985, 6.7937], [-8.0186, -22.2192, 6.9677], [-7.3117, -21.0708, 7.4055]] )
A_ : Tuple = torch.tensor([[0.4867, 0.1767, 0.6732], [0.6718, 0.4479, 0.3830], [0.4716, 0.1760, 0.6364]] )
else:
A_ : Optional[int] = (1, 1_2_5, 7)
A_ : Dict = torch.tensor(
[[-18.1430, -8.3214, 4.8274], [-18.4685, -7.1361, -4.2667], [-26.3693, -9.3429, -4.9962]] )
A_ : Any = torch.tensor([[0.4983, 0.5595, 0.9440], [0.4916, 0.6315, 0.5954], [0.6108, 0.8637, 0.1135]] )
assert outputs.logits.shape == expected_shape
assert torch.allclose(outputs.logits[0, :3, :3] , lowerCamelCase__ , atol=1e-4 )
assert torch.allclose(outputs.pred_boxes[0, :3, :3] , lowerCamelCase__ , atol=1e-4 )
print('''Looks ok!''' )
if pytorch_dump_folder_path is not None:
# Save model and image processor
logger.info(f'Saving PyTorch model and image processor to {pytorch_dump_folder_path}...' )
Path(lowerCamelCase__ ).mkdir(exist_ok=lowerCamelCase__ )
model.save_pretrained(lowerCamelCase__ )
image_processor.save_pretrained(lowerCamelCase__ )
if push_to_hub:
# Push model to HF hub
logger.info('''Pushing model to the hub...''' )
A_ : List[Any] = (
'''microsoft/table-transformer-detection'''
if '''detection''' in checkpoint_url
else '''microsoft/table-transformer-structure-recognition'''
)
model.push_to_hub(lowerCamelCase__ )
image_processor.push_to_hub(lowerCamelCase__ )
if __name__ == "__main__":
snake_case__ = argparse.ArgumentParser()
parser.add_argument(
"""--checkpoint_url""",
default="""https://pubtables1m.blob.core.windows.net/model/pubtables1m_detection_detr_r18.pth""",
type=str,
choices=[
"""https://pubtables1m.blob.core.windows.net/model/pubtables1m_detection_detr_r18.pth""",
"""https://pubtables1m.blob.core.windows.net/model/pubtables1m_structure_detr_r18.pth""",
],
help="""URL of the Table Transformer checkpoint you'd like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the folder to output PyTorch model."""
)
parser.add_argument(
"""--push_to_hub""", action="""store_true""", help="""Whether or not to push the converted model to the 🤗 hub."""
)
snake_case__ = parser.parse_args()
convert_table_transformer_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub)
| 4 | 1 |
'''simple docstring'''
def snake_case__ ( lowerCamelCase__ : int = 1_0_0_0 ) -> int:
A_ ,A_ : int = 1, 1
A_ : Dict = 2
while True:
A_ : List[Any] = 0
A_ : str = fa + fa
A_ ,A_ : str = fa, f
index += 1
for _ in str(lowerCamelCase__ ):
i += 1
if i == n:
break
return index
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 4 |
'''simple docstring'''
import logging
import os
from dataclasses import dataclass
from typing import List, Optional, Union
import tqdm
from filelock import FileLock
from transformers import (
BartTokenizer,
BartTokenizerFast,
DataProcessor,
PreTrainedTokenizer,
RobertaTokenizer,
RobertaTokenizerFast,
XLMRobertaTokenizer,
is_tf_available,
is_torch_available,
)
snake_case__ = logging.getLogger(__name__)
@dataclass(frozen=a__ )
class UpperCamelCase_ :
"""simple docstring"""
_lowerCAmelCase = 42
_lowerCAmelCase = 42
_lowerCAmelCase = None
_lowerCAmelCase = None
_lowerCAmelCase = None
@dataclass(frozen=a__ )
class UpperCamelCase_ :
"""simple docstring"""
_lowerCAmelCase = 42
_lowerCAmelCase = None
_lowerCAmelCase = None
_lowerCAmelCase = None
_lowerCAmelCase = None
if is_torch_available():
import torch
from torch.utils.data import Dataset
class UpperCamelCase_ (a__ ):
"""simple docstring"""
_lowerCAmelCase = 42
def __init__( self : Optional[int] , _lowerCamelCase : str , _lowerCamelCase : PreTrainedTokenizer , _lowerCamelCase : str , _lowerCamelCase : Optional[int] = None , _lowerCamelCase : List[Any]=False , _lowerCamelCase : bool = False , ):
"""simple docstring"""
A_ : Optional[int] = hans_processors[task]()
A_ : int = os.path.join(
_lowerCamelCase , '''cached_{}_{}_{}_{}'''.format(
'''dev''' if evaluate else '''train''' , tokenizer.__class__.__name__ , str(_lowerCamelCase ) , _lowerCamelCase , ) , )
A_ : Dict = processor.get_labels()
if tokenizer.__class__ in (
RobertaTokenizer,
RobertaTokenizerFast,
XLMRobertaTokenizer,
BartTokenizer,
BartTokenizerFast,
):
# HACK(label indices are swapped in RoBERTa pretrained model)
A_ ,A_ : List[str] = label_list[2], label_list[1]
A_ : Optional[int] = label_list
# Make sure only the first process in distributed training processes the dataset,
# and the others will use the cache.
A_ : str = cached_features_file + '''.lock'''
with FileLock(_lowerCamelCase ):
if os.path.exists(_lowerCamelCase ) and not overwrite_cache:
logger.info(f'Loading features from cached file {cached_features_file}' )
A_ : List[str] = torch.load(_lowerCamelCase )
else:
logger.info(f'Creating features from dataset file at {data_dir}' )
A_ : Optional[int] = (
processor.get_dev_examples(_lowerCamelCase ) if evaluate else processor.get_train_examples(_lowerCamelCase )
)
logger.info('''Training examples: %s''' , len(_lowerCamelCase ) )
A_ : Optional[int] = hans_convert_examples_to_features(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
logger.info('''Saving features into cached file %s''' , _lowerCamelCase )
torch.save(self.features , _lowerCamelCase )
def __len__( self : List[str] ):
"""simple docstring"""
return len(self.features )
def __getitem__( self : List[str] , _lowerCamelCase : Optional[int] ):
"""simple docstring"""
return self.features[i]
def _a ( self : str ):
"""simple docstring"""
return self.label_list
if is_tf_available():
import tensorflow as tf
class UpperCamelCase_ :
"""simple docstring"""
_lowerCAmelCase = 42
def __init__( self : Optional[int] , _lowerCamelCase : str , _lowerCamelCase : PreTrainedTokenizer , _lowerCamelCase : str , _lowerCamelCase : Optional[int] = 128 , _lowerCamelCase : Dict=False , _lowerCamelCase : bool = False , ):
"""simple docstring"""
A_ : Optional[int] = hans_processors[task]()
A_ : Optional[int] = processor.get_labels()
if tokenizer.__class__ in (
RobertaTokenizer,
RobertaTokenizerFast,
XLMRobertaTokenizer,
BartTokenizer,
BartTokenizerFast,
):
# HACK(label indices are swapped in RoBERTa pretrained model)
A_ ,A_ : Union[str, Any] = label_list[2], label_list[1]
A_ : Tuple = label_list
A_ : Optional[int] = processor.get_dev_examples(_lowerCamelCase ) if evaluate else processor.get_train_examples(_lowerCamelCase )
A_ : Tuple = hans_convert_examples_to_features(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
def gen():
for ex_index, ex in tqdm.tqdm(enumerate(self.features ) , desc='''convert examples to features''' ):
if ex_index % 10000 == 0:
logger.info('''Writing example %d of %d''' % (ex_index, len(_lowerCamelCase )) )
yield (
{
"example_id": 0,
"input_ids": ex.input_ids,
"attention_mask": ex.attention_mask,
"token_type_ids": ex.token_type_ids,
},
ex.label,
)
A_ : List[Any] = tf.data.Dataset.from_generator(
_lowerCamelCase , (
{
'''example_id''': tf.intaa,
'''input_ids''': tf.intaa,
'''attention_mask''': tf.intaa,
'''token_type_ids''': tf.intaa,
},
tf.intaa,
) , (
{
'''example_id''': tf.TensorShape([] ),
'''input_ids''': tf.TensorShape([None, None] ),
'''attention_mask''': tf.TensorShape([None, None] ),
'''token_type_ids''': tf.TensorShape([None, None] ),
},
tf.TensorShape([] ),
) , )
def _a ( self : Any ):
"""simple docstring"""
return self.dataset
def __len__( self : Dict ):
"""simple docstring"""
return len(self.features )
def __getitem__( self : Optional[int] , _lowerCamelCase : List[str] ):
"""simple docstring"""
return self.features[i]
def _a ( self : Tuple ):
"""simple docstring"""
return self.label_list
class UpperCamelCase_ (a__ ):
"""simple docstring"""
def _a ( self : List[str] , _lowerCamelCase : Union[str, Any] ):
"""simple docstring"""
return self._create_examples(self._read_tsv(os.path.join(_lowerCamelCase , '''heuristics_train_set.txt''' ) ) , '''train''' )
def _a ( self : List[str] , _lowerCamelCase : Tuple ):
"""simple docstring"""
return self._create_examples(self._read_tsv(os.path.join(_lowerCamelCase , '''heuristics_evaluation_set.txt''' ) ) , '''dev''' )
def _a ( self : Any ):
"""simple docstring"""
return ["contradiction", "entailment", "neutral"]
def _a ( self : Optional[Any] , _lowerCamelCase : Optional[Any] , _lowerCamelCase : Any ):
"""simple docstring"""
A_ : Tuple = []
for i, line in enumerate(_lowerCamelCase ):
if i == 0:
continue
A_ : str = '''%s-%s''' % (set_type, line[0])
A_ : Optional[Any] = line[5]
A_ : Union[str, Any] = line[6]
A_ : List[str] = line[7][2:] if line[7].startswith('''ex''' ) else line[7]
A_ : str = line[0]
examples.append(InputExample(guid=_lowerCamelCase , text_a=_lowerCamelCase , text_b=_lowerCamelCase , label=_lowerCamelCase , pairID=_lowerCamelCase ) )
return examples
def snake_case__ ( lowerCamelCase__ : List[InputExample] , lowerCamelCase__ : List[str] , lowerCamelCase__ : int , lowerCamelCase__ : PreTrainedTokenizer , ) -> int:
A_ : Union[str, Any] = {label: i for i, label in enumerate(lowerCamelCase__ )}
A_ : Optional[Any] = []
for ex_index, example in tqdm.tqdm(enumerate(lowerCamelCase__ ) , desc='''convert examples to features''' ):
if ex_index % 1_0_0_0_0 == 0:
logger.info('''Writing example %d''' % (ex_index) )
A_ : Optional[int] = tokenizer(
example.text_a , example.text_b , add_special_tokens=lowerCamelCase__ , max_length=lowerCamelCase__ , padding='''max_length''' , truncation=lowerCamelCase__ , return_overflowing_tokens=lowerCamelCase__ , )
A_ : List[str] = label_map[example.label] if example.label in label_map else 0
A_ : Tuple = int(example.pairID )
features.append(InputFeatures(**lowerCamelCase__ , label=lowerCamelCase__ , pairID=lowerCamelCase__ ) )
for i, example in enumerate(examples[:5] ):
logger.info('''*** Example ***''' )
logger.info(f'guid: {example}' )
logger.info(f'features: {features[i]}' )
return features
snake_case__ = {
"""hans""": 3,
}
snake_case__ = {
"""hans""": HansProcessor,
}
| 4 | 1 |
'''simple docstring'''
import importlib
import math
import os
from dataclasses import dataclass
from enum import Enum
from typing import Any, Dict, Optional, Tuple, Union
import flax
import jax.numpy as jnp
from ..utils import BaseOutput
snake_case__ = """scheduler_config.json"""
class UpperCamelCase_ (a__ ):
"""simple docstring"""
_lowerCAmelCase = 1
_lowerCAmelCase = 2
_lowerCAmelCase = 3
_lowerCAmelCase = 4
_lowerCAmelCase = 5
@dataclass
class UpperCamelCase_ (a__ ):
"""simple docstring"""
_lowerCAmelCase = 42
class UpperCamelCase_ :
"""simple docstring"""
_lowerCAmelCase = SCHEDULER_CONFIG_NAME
_lowerCAmelCase = ['dtype']
_lowerCAmelCase = []
_lowerCAmelCase = True
@classmethod
def _a ( cls : Union[str, Any] , _lowerCamelCase : Dict[str, Any] = None , _lowerCamelCase : Optional[str] = None , _lowerCamelCase : Union[str, Any]=False , **_lowerCamelCase : str , ):
"""simple docstring"""
A_ ,A_ : List[Any] = cls.load_config(
pretrained_model_name_or_path=_lowerCamelCase , subfolder=_lowerCamelCase , return_unused_kwargs=_lowerCamelCase , **_lowerCamelCase , )
A_ ,A_ : Tuple = cls.from_config(_lowerCamelCase , return_unused_kwargs=_lowerCamelCase , **_lowerCamelCase )
if hasattr(_lowerCamelCase , '''create_state''' ) and getattr(_lowerCamelCase , '''has_state''' , _lowerCamelCase ):
A_ : Union[str, Any] = scheduler.create_state()
if return_unused_kwargs:
return scheduler, state, unused_kwargs
return scheduler, state
def _a ( self : Optional[int] , _lowerCamelCase : Union[str, os.PathLike] , _lowerCamelCase : bool = False , **_lowerCamelCase : List[str] ):
"""simple docstring"""
self.save_config(save_directory=_lowerCamelCase , push_to_hub=_lowerCamelCase , **_lowerCamelCase )
@property
def _a ( self : Optional[Any] ):
"""simple docstring"""
return self._get_compatibles()
@classmethod
def _a ( cls : Union[str, Any] ):
"""simple docstring"""
A_ : Any = list(set([cls.__name__] + cls._compatibles ) )
A_ : Tuple = importlib.import_module(__name__.split('''.''' )[0] )
A_ : Any = [
getattr(_lowerCamelCase , _lowerCamelCase ) for c in compatible_classes_str if hasattr(_lowerCamelCase , _lowerCamelCase )
]
return compatible_classes
def snake_case__ ( lowerCamelCase__ : jnp.ndarray , lowerCamelCase__ : Tuple[int] ) -> jnp.ndarray:
assert len(lowerCamelCase__ ) >= x.ndim
return jnp.broadcast_to(x.reshape(x.shape + (1,) * (len(lowerCamelCase__ ) - x.ndim) ) , lowerCamelCase__ )
def snake_case__ ( lowerCamelCase__ : int , lowerCamelCase__ : List[str]=0.999 , lowerCamelCase__ : List[str]=jnp.floataa ) -> jnp.ndarray:
def alpha_bar(lowerCamelCase__ : int ):
return math.cos((time_step + 0.008) / 1.008 * math.pi / 2 ) ** 2
A_ : Optional[int] = []
for i in range(lowerCamelCase__ ):
A_ : int = i / num_diffusion_timesteps
A_ : Any = (i + 1) / num_diffusion_timesteps
betas.append(min(1 - alpha_bar(lowerCamelCase__ ) / alpha_bar(lowerCamelCase__ ) , lowerCamelCase__ ) )
return jnp.array(lowerCamelCase__ , dtype=lowerCamelCase__ )
@flax.struct.dataclass
class UpperCamelCase_ :
"""simple docstring"""
_lowerCAmelCase = 42
_lowerCAmelCase = 42
_lowerCAmelCase = 42
@classmethod
def _a ( cls : List[str] , _lowerCamelCase : List[str] ):
"""simple docstring"""
A_ : str = scheduler.config
if config.trained_betas is not None:
A_ : Tuple = jnp.asarray(config.trained_betas , dtype=scheduler.dtype )
elif config.beta_schedule == "linear":
A_ : Optional[Any] = jnp.linspace(config.beta_start , config.beta_end , config.num_train_timesteps , dtype=scheduler.dtype )
elif config.beta_schedule == "scaled_linear":
# this schedule is very specific to the latent diffusion model.
A_ : Optional[Any] = (
jnp.linspace(
config.beta_start**0.5 , config.beta_end**0.5 , config.num_train_timesteps , dtype=scheduler.dtype )
** 2
)
elif config.beta_schedule == "squaredcos_cap_v2":
# Glide cosine schedule
A_ : List[str] = betas_for_alpha_bar(config.num_train_timesteps , dtype=scheduler.dtype )
else:
raise NotImplementedError(
f'beta_schedule {config.beta_schedule} is not implemented for scheduler {scheduler.__class__.__name__}' )
A_ : Dict = 1.0 - betas
A_ : Any = jnp.cumprod(_lowerCamelCase , axis=0 )
return cls(
alphas=_lowerCamelCase , betas=_lowerCamelCase , alphas_cumprod=_lowerCamelCase , )
def snake_case__ ( lowerCamelCase__ : CommonSchedulerState , lowerCamelCase__ : jnp.ndarray , lowerCamelCase__ : jnp.ndarray , lowerCamelCase__ : jnp.ndarray ) -> Tuple:
A_ : Union[str, Any] = state.alphas_cumprod
A_ : List[str] = alphas_cumprod[timesteps] ** 0.5
A_ : Union[str, Any] = sqrt_alpha_prod.flatten()
A_ : int = broadcast_to_shape_from_left(lowerCamelCase__ , original_samples.shape )
A_ : Optional[int] = (1 - alphas_cumprod[timesteps]) ** 0.5
A_ : Tuple = sqrt_one_minus_alpha_prod.flatten()
A_ : List[str] = broadcast_to_shape_from_left(lowerCamelCase__ , original_samples.shape )
return sqrt_alpha_prod, sqrt_one_minus_alpha_prod
def snake_case__ ( lowerCamelCase__ : CommonSchedulerState , lowerCamelCase__ : jnp.ndarray , lowerCamelCase__ : jnp.ndarray , lowerCamelCase__ : jnp.ndarray ) -> Tuple:
A_ ,A_ : List[Any] = get_sqrt_alpha_prod(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
A_ : List[str] = sqrt_alpha_prod * original_samples + sqrt_one_minus_alpha_prod * noise
return noisy_samples
def snake_case__ ( lowerCamelCase__ : CommonSchedulerState , lowerCamelCase__ : jnp.ndarray , lowerCamelCase__ : jnp.ndarray , lowerCamelCase__ : jnp.ndarray ) -> Dict:
A_ ,A_ : List[str] = get_sqrt_alpha_prod(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
A_ : Tuple = sqrt_alpha_prod * noise - sqrt_one_minus_alpha_prod * sample
return velocity
| 4 |
'''simple docstring'''
import io
import itertools
import json
from dataclasses import dataclass
from typing import Optional
import pyarrow as pa
import pyarrow.json as paj
import datasets
from datasets.table import table_cast
from datasets.utils.file_utils import readline
snake_case__ = datasets.utils.logging.get_logger(__name__)
@dataclass
class UpperCamelCase_ (datasets.BuilderConfig ):
"""simple docstring"""
_lowerCAmelCase = None
_lowerCAmelCase = "utf-8"
_lowerCAmelCase = None
_lowerCAmelCase = None
_lowerCAmelCase = True # deprecated
_lowerCAmelCase = None # deprecated
_lowerCAmelCase = 1_0 << 2_0 # 10MB
_lowerCAmelCase = None
class UpperCamelCase_ (datasets.ArrowBasedBuilder ):
"""simple docstring"""
_lowerCAmelCase = JsonConfig
def _a ( self : int ):
"""simple docstring"""
if self.config.block_size is not None:
logger.warning('''The JSON loader parameter `block_size` is deprecated. Please use `chunksize` instead''' )
A_ : List[Any] = self.config.block_size
if self.config.use_threads is not True:
logger.warning(
'''The JSON loader parameter `use_threads` is deprecated and doesn\'t have any effect anymore.''' )
if self.config.newlines_in_values is not None:
raise ValueError('''The JSON loader parameter `newlines_in_values` is no longer supported''' )
return datasets.DatasetInfo(features=self.config.features )
def _a ( self : Any , _lowerCamelCase : List[str] ):
"""simple docstring"""
if not self.config.data_files:
raise ValueError(f'At least one data file must be specified, but got data_files={self.config.data_files}' )
A_ : int = dl_manager.download_and_extract(self.config.data_files )
if isinstance(_lowerCamelCase , (str, list, tuple) ):
A_ : Union[str, Any] = data_files
if isinstance(_lowerCamelCase , _lowerCamelCase ):
A_ : List[str] = [files]
A_ : List[Any] = [dl_manager.iter_files(_lowerCamelCase ) for file in files]
return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={'''files''': files} )]
A_ : Tuple = []
for split_name, files in data_files.items():
if isinstance(_lowerCamelCase , _lowerCamelCase ):
A_ : int = [files]
A_ : Union[str, Any] = [dl_manager.iter_files(_lowerCamelCase ) for file in files]
splits.append(datasets.SplitGenerator(name=_lowerCamelCase , gen_kwargs={'''files''': files} ) )
return splits
def _a ( self : int , _lowerCamelCase : pa.Table ):
"""simple docstring"""
if self.config.features is not None:
# adding missing columns
for column_name in set(self.config.features ) - set(pa_table.column_names ):
A_ : Optional[int] = self.config.features.arrow_schema.field(_lowerCamelCase ).type
A_ : Optional[int] = pa_table.append_column(_lowerCamelCase , pa.array([None] * len(_lowerCamelCase ) , type=_lowerCamelCase ) )
# more expensive cast to support nested structures with keys in a different order
# allows str <-> int/float or str to Audio for example
A_ : str = table_cast(_lowerCamelCase , self.config.features.arrow_schema )
return pa_table
def _a ( self : List[str] , _lowerCamelCase : int ):
"""simple docstring"""
for file_idx, file in enumerate(itertools.chain.from_iterable(_lowerCamelCase ) ):
# If the file is one json object and if we need to look at the list of items in one specific field
if self.config.field is not None:
with open(_lowerCamelCase , encoding=self.config.encoding , errors=self.config.encoding_errors ) as f:
A_ : int = json.load(_lowerCamelCase )
# We keep only the field we are interested in
A_ : List[str] = dataset[self.config.field]
# We accept two format: a list of dicts or a dict of lists
if isinstance(_lowerCamelCase , (list, tuple) ):
A_ : int = set().union(*[row.keys() for row in dataset] )
A_ : List[str] = {col: [row.get(_lowerCamelCase ) for row in dataset] for col in keys}
else:
A_ : Tuple = dataset
A_ : Dict = pa.Table.from_pydict(_lowerCamelCase )
yield file_idx, self._cast_table(_lowerCamelCase )
# If the file has one json object per line
else:
with open(_lowerCamelCase , '''rb''' ) as f:
A_ : int = 0
# Use block_size equal to the chunk size divided by 32 to leverage multithreading
# Set a default minimum value of 16kB if the chunk size is really small
A_ : int = max(self.config.chunksize // 32 , 16 << 10 )
A_ : int = (
self.config.encoding_errors if self.config.encoding_errors is not None else '''strict'''
)
while True:
A_ : Any = f.read(self.config.chunksize )
if not batch:
break
# Finish current line
try:
batch += f.readline()
except (AttributeError, io.UnsupportedOperation):
batch += readline(_lowerCamelCase )
# PyArrow only accepts utf-8 encoded bytes
if self.config.encoding != "utf-8":
A_ : Optional[Any] = batch.decode(self.config.encoding , errors=_lowerCamelCase ).encode('''utf-8''' )
try:
while True:
try:
A_ : List[Any] = paj.read_json(
io.BytesIO(_lowerCamelCase ) , read_options=paj.ReadOptions(block_size=_lowerCamelCase ) )
break
except (pa.ArrowInvalid, pa.ArrowNotImplementedError) as e:
if (
isinstance(_lowerCamelCase , pa.ArrowInvalid )
and "straddling" not in str(_lowerCamelCase )
or block_size > len(_lowerCamelCase )
):
raise
else:
# Increase the block size in case it was too small.
# The block size will be reset for the next file.
logger.debug(
f'Batch of {len(_lowerCamelCase )} bytes couldn\'t be parsed with block_size={block_size}. Retrying with block_size={block_size * 2}.' )
block_size *= 2
except pa.ArrowInvalid as e:
try:
with open(
_lowerCamelCase , encoding=self.config.encoding , errors=self.config.encoding_errors ) as f:
A_ : Optional[Any] = json.load(_lowerCamelCase )
except json.JSONDecodeError:
logger.error(f'Failed to read file \'{file}\' with error {type(_lowerCamelCase )}: {e}' )
raise e
# If possible, parse the file as a list of json objects and exit the loop
if isinstance(_lowerCamelCase , _lowerCamelCase ): # list is the only sequence type supported in JSON
try:
A_ : Optional[int] = set().union(*[row.keys() for row in dataset] )
A_ : Tuple = {col: [row.get(_lowerCamelCase ) for row in dataset] for col in keys}
A_ : int = pa.Table.from_pydict(_lowerCamelCase )
except (pa.ArrowInvalid, AttributeError) as e:
logger.error(f'Failed to read file \'{file}\' with error {type(_lowerCamelCase )}: {e}' )
raise ValueError(f'Not able to read records in the JSON file at {file}.' ) from None
yield file_idx, self._cast_table(_lowerCamelCase )
break
else:
logger.error(f'Failed to read file \'{file}\' with error {type(_lowerCamelCase )}: {e}' )
raise ValueError(
f'Not able to read records in the JSON file at {file}. '
f'You should probably indicate the field of the JSON file containing your records. '
f'This JSON file contain the following fields: {str(list(dataset.keys() ) )}. '
f'Select the correct one and provide it as `field=\'XXX\'` to the dataset loading method. ' ) from None
# Uncomment for debugging (will print the Arrow table size and elements)
# logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}")
# logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows)))
yield (file_idx, batch_idx), self._cast_table(_lowerCamelCase )
batch_idx += 1
| 4 | 1 |
'''simple docstring'''
import json
import pathlib
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DetrImageProcessor
class UpperCamelCase_ (unittest.TestCase ):
"""simple docstring"""
def __init__( self : int , _lowerCamelCase : Tuple , _lowerCamelCase : Any=7 , _lowerCamelCase : Optional[Any]=3 , _lowerCamelCase : Optional[Any]=30 , _lowerCamelCase : Dict=400 , _lowerCamelCase : Dict=True , _lowerCamelCase : str=None , _lowerCamelCase : List[Any]=True , _lowerCamelCase : List[str]=1 / 255 , _lowerCamelCase : Any=True , _lowerCamelCase : str=[0.5, 0.5, 0.5] , _lowerCamelCase : List[str]=[0.5, 0.5, 0.5] , _lowerCamelCase : Any=True , ):
"""simple docstring"""
A_ : Any = size if size is not None else {'''shortest_edge''': 18, '''longest_edge''': 1333}
A_ : Any = parent
A_ : int = batch_size
A_ : str = num_channels
A_ : str = min_resolution
A_ : Dict = max_resolution
A_ : List[Any] = do_resize
A_ : str = size
A_ : str = do_rescale
A_ : List[Any] = rescale_factor
A_ : List[str] = do_normalize
A_ : Dict = image_mean
A_ : int = image_std
A_ : int = do_pad
def _a ( self : Optional[Any] ):
"""simple docstring"""
return {
"do_resize": self.do_resize,
"size": self.size,
"do_rescale": self.do_rescale,
"rescale_factor": self.rescale_factor,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_pad": self.do_pad,
}
def _a ( self : Optional[int] , _lowerCamelCase : Any , _lowerCamelCase : Union[str, Any]=False ):
"""simple docstring"""
if not batched:
A_ : str = image_inputs[0]
if isinstance(_lowerCamelCase , Image.Image ):
A_ ,A_ : Optional[int] = image.size
else:
A_ ,A_ : int = image.shape[1], image.shape[2]
if w < h:
A_ : Optional[int] = int(self.size['''shortest_edge'''] * h / w )
A_ : Any = self.size['''shortest_edge''']
elif w > h:
A_ : Dict = self.size['''shortest_edge''']
A_ : List[Any] = int(self.size['''shortest_edge'''] * w / h )
else:
A_ : int = self.size['''shortest_edge''']
A_ : List[Any] = self.size['''shortest_edge''']
else:
A_ : List[Any] = []
for image in image_inputs:
A_ ,A_ : List[Any] = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
A_ : Any = max(_lowerCamelCase , key=lambda _lowerCamelCase : item[0] )[0]
A_ : Any = max(_lowerCamelCase , key=lambda _lowerCamelCase : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class UpperCamelCase_ (a__, unittest.TestCase ):
"""simple docstring"""
_lowerCAmelCase = DetrImageProcessor if is_vision_available() else None
def _a ( self : Optional[Any] ):
"""simple docstring"""
A_ : Any = DetrImageProcessingTester(self )
@property
def _a ( self : Tuple ):
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def _a ( self : Dict ):
"""simple docstring"""
A_ : Optional[Any] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(_lowerCamelCase , '''image_mean''' ) )
self.assertTrue(hasattr(_lowerCamelCase , '''image_std''' ) )
self.assertTrue(hasattr(_lowerCamelCase , '''do_normalize''' ) )
self.assertTrue(hasattr(_lowerCamelCase , '''do_rescale''' ) )
self.assertTrue(hasattr(_lowerCamelCase , '''rescale_factor''' ) )
self.assertTrue(hasattr(_lowerCamelCase , '''do_resize''' ) )
self.assertTrue(hasattr(_lowerCamelCase , '''size''' ) )
self.assertTrue(hasattr(_lowerCamelCase , '''do_pad''' ) )
def _a ( self : int ):
"""simple docstring"""
A_ : List[str] = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'''shortest_edge''': 18, '''longest_edge''': 1333} )
self.assertEqual(image_processor.do_pad , _lowerCamelCase )
A_ : Any = self.image_processing_class.from_dict(
self.image_processor_dict , size=42 , max_size=84 , pad_and_return_pixel_mask=_lowerCamelCase )
self.assertEqual(image_processor.size , {'''shortest_edge''': 42, '''longest_edge''': 84} )
self.assertEqual(image_processor.do_pad , _lowerCamelCase )
def _a ( self : List[Any] ):
"""simple docstring"""
pass
def _a ( self : Dict ):
"""simple docstring"""
A_ : Dict = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
A_ : Optional[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=_lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(_lowerCamelCase , Image.Image )
# Test not batched input
A_ : Any = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
A_ ,A_ : Any = self.image_processor_tester.get_expected_values(_lowerCamelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
A_ ,A_ : int = self.image_processor_tester.get_expected_values(_lowerCamelCase , batched=_lowerCamelCase )
A_ : Tuple = image_processing(_lowerCamelCase , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def _a ( self : Tuple ):
"""simple docstring"""
A_ : List[Any] = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
A_ : Tuple = prepare_image_inputs(self.image_processor_tester , equal_resolution=_lowerCamelCase , numpify=_lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(_lowerCamelCase , np.ndarray )
# Test not batched input
A_ : int = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
A_ ,A_ : Dict = self.image_processor_tester.get_expected_values(_lowerCamelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
A_ : Optional[Any] = image_processing(_lowerCamelCase , return_tensors='''pt''' ).pixel_values
A_ ,A_ : Optional[Any] = self.image_processor_tester.get_expected_values(_lowerCamelCase , batched=_lowerCamelCase )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def _a ( self : Optional[Any] ):
"""simple docstring"""
A_ : Union[str, Any] = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
A_ : Union[str, Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=_lowerCamelCase , torchify=_lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(_lowerCamelCase , torch.Tensor )
# Test not batched input
A_ : Optional[Any] = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
A_ ,A_ : Dict = self.image_processor_tester.get_expected_values(_lowerCamelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
A_ : Dict = image_processing(_lowerCamelCase , return_tensors='''pt''' ).pixel_values
A_ ,A_ : List[str] = self.image_processor_tester.get_expected_values(_lowerCamelCase , batched=_lowerCamelCase )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
@slow
def _a ( self : List[Any] ):
"""simple docstring"""
A_ : List[Any] = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
with open('''./tests/fixtures/tests_samples/COCO/coco_annotations.txt''' , '''r''' ) as f:
A_ : Union[str, Any] = json.loads(f.read() )
A_ : Any = {'''image_id''': 39769, '''annotations''': target}
# encode them
A_ : Union[str, Any] = DetrImageProcessor.from_pretrained('''facebook/detr-resnet-50''' )
A_ : List[str] = image_processing(images=_lowerCamelCase , annotations=_lowerCamelCase , return_tensors='''pt''' )
# verify pixel values
A_ : Optional[Any] = torch.Size([1, 3, 800, 1066] )
self.assertEqual(encoding['''pixel_values'''].shape , _lowerCamelCase )
A_ : int = torch.tensor([0.27_96, 0.31_38, 0.34_81] )
self.assertTrue(torch.allclose(encoding['''pixel_values'''][0, 0, 0, :3] , _lowerCamelCase , atol=1E-4 ) )
# verify area
A_ : Dict = torch.tensor([58_87.96_00, 1_12_50.20_61, 48_93_53.84_38, 83_71_22.75_00, 14_79_67.51_56, 16_57_32.34_38] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''area'''] , _lowerCamelCase ) )
# verify boxes
A_ : int = torch.Size([6, 4] )
self.assertEqual(encoding['''labels'''][0]['''boxes'''].shape , _lowerCamelCase )
A_ : Dict = torch.tensor([0.55_03, 0.27_65, 0.06_04, 0.22_15] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''boxes'''][0] , _lowerCamelCase , atol=1E-3 ) )
# verify image_id
A_ : Union[str, Any] = torch.tensor([39769] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''image_id'''] , _lowerCamelCase ) )
# verify is_crowd
A_ : List[Any] = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''iscrowd'''] , _lowerCamelCase ) )
# verify class_labels
A_ : Tuple = torch.tensor([75, 75, 63, 65, 17, 17] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''class_labels'''] , _lowerCamelCase ) )
# verify orig_size
A_ : List[Any] = torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''orig_size'''] , _lowerCamelCase ) )
# verify size
A_ : Optional[int] = torch.tensor([800, 1066] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''size'''] , _lowerCamelCase ) )
@slow
def _a ( self : Any ):
"""simple docstring"""
A_ : Tuple = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
with open('''./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt''' , '''r''' ) as f:
A_ : Tuple = json.loads(f.read() )
A_ : Union[str, Any] = {'''file_name''': '''000000039769.png''', '''image_id''': 39769, '''segments_info''': target}
A_ : Optional[Any] = pathlib.Path('''./tests/fixtures/tests_samples/COCO/coco_panoptic''' )
# encode them
A_ : str = DetrImageProcessor.from_pretrained('''facebook/detr-resnet-50-panoptic''' )
A_ : Tuple = image_processing(images=_lowerCamelCase , annotations=_lowerCamelCase , masks_path=_lowerCamelCase , return_tensors='''pt''' )
# verify pixel values
A_ : Dict = torch.Size([1, 3, 800, 1066] )
self.assertEqual(encoding['''pixel_values'''].shape , _lowerCamelCase )
A_ : Optional[Any] = torch.tensor([0.27_96, 0.31_38, 0.34_81] )
self.assertTrue(torch.allclose(encoding['''pixel_values'''][0, 0, 0, :3] , _lowerCamelCase , atol=1E-4 ) )
# verify area
A_ : Dict = torch.tensor([14_79_79.68_75, 16_55_27.04_69, 48_46_38.59_38, 1_12_92.93_75, 58_79.65_62, 76_34.11_47] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''area'''] , _lowerCamelCase ) )
# verify boxes
A_ : Optional[Any] = torch.Size([6, 4] )
self.assertEqual(encoding['''labels'''][0]['''boxes'''].shape , _lowerCamelCase )
A_ : Any = torch.tensor([0.26_25, 0.54_37, 0.46_88, 0.86_25] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''boxes'''][0] , _lowerCamelCase , atol=1E-3 ) )
# verify image_id
A_ : Tuple = torch.tensor([39769] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''image_id'''] , _lowerCamelCase ) )
# verify is_crowd
A_ : Dict = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''iscrowd'''] , _lowerCamelCase ) )
# verify class_labels
A_ : Union[str, Any] = torch.tensor([17, 17, 63, 75, 75, 93] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''class_labels'''] , _lowerCamelCase ) )
# verify masks
A_ : int = 822873
self.assertEqual(encoding['''labels'''][0]['''masks'''].sum().item() , _lowerCamelCase )
# verify orig_size
A_ : Dict = torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''orig_size'''] , _lowerCamelCase ) )
# verify size
A_ : Any = torch.tensor([800, 1066] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''size'''] , _lowerCamelCase ) )
| 4 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
snake_case__ = logging.get_logger(__name__)
snake_case__ = {
"""microsoft/swin-tiny-patch4-window7-224""": (
"""https://huggingface.co/microsoft/swin-tiny-patch4-window7-224/resolve/main/config.json"""
),
# See all Swin models at https://huggingface.co/models?filter=swin
}
class UpperCamelCase_ (a__, a__ ):
"""simple docstring"""
_lowerCAmelCase = 'swin'
_lowerCAmelCase = {
'num_attention_heads': 'num_heads',
'num_hidden_layers': 'num_layers',
}
def __init__( self : Any , _lowerCamelCase : Optional[Any]=224 , _lowerCamelCase : List[str]=4 , _lowerCamelCase : Optional[Any]=3 , _lowerCamelCase : Tuple=96 , _lowerCamelCase : List[Any]=[2, 2, 6, 2] , _lowerCamelCase : List[str]=[3, 6, 12, 24] , _lowerCamelCase : List[Any]=7 , _lowerCamelCase : Optional[int]=4.0 , _lowerCamelCase : List[str]=True , _lowerCamelCase : List[str]=0.0 , _lowerCamelCase : Any=0.0 , _lowerCamelCase : Dict=0.1 , _lowerCamelCase : List[str]="gelu" , _lowerCamelCase : Tuple=False , _lowerCamelCase : Dict=0.02 , _lowerCamelCase : Optional[Any]=1E-5 , _lowerCamelCase : Any=32 , _lowerCamelCase : Tuple=None , _lowerCamelCase : Any=None , **_lowerCamelCase : str , ):
"""simple docstring"""
super().__init__(**_lowerCamelCase )
A_ : Optional[int] = image_size
A_ : Optional[int] = patch_size
A_ : Optional[int] = num_channels
A_ : Any = embed_dim
A_ : List[Any] = depths
A_ : Any = len(_lowerCamelCase )
A_ : List[Any] = num_heads
A_ : Tuple = window_size
A_ : Tuple = mlp_ratio
A_ : Dict = qkv_bias
A_ : List[str] = hidden_dropout_prob
A_ : List[str] = attention_probs_dropout_prob
A_ : Any = drop_path_rate
A_ : List[Any] = hidden_act
A_ : Tuple = use_absolute_embeddings
A_ : int = layer_norm_eps
A_ : Optional[Any] = initializer_range
A_ : Union[str, Any] = encoder_stride
# we set the hidden_size attribute in order to make Swin work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
A_ : str = int(embed_dim * 2 ** (len(_lowerCamelCase ) - 1) )
A_ : str = ['''stem'''] + [f'stage{idx}' for idx in range(1 , len(_lowerCamelCase ) + 1 )]
A_ ,A_ : Optional[Any] = get_aligned_output_features_output_indices(
out_features=_lowerCamelCase , out_indices=_lowerCamelCase , stage_names=self.stage_names )
class UpperCamelCase_ (a__ ):
"""simple docstring"""
_lowerCAmelCase = version.parse('1.11' )
@property
def _a ( self : str ):
"""simple docstring"""
return OrderedDict(
[
('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}),
] )
@property
def _a ( self : Union[str, Any] ):
"""simple docstring"""
return 1E-4
| 4 | 1 |
'''simple docstring'''
import os
import tempfile
import unittest
from pathlib import Path
from transformers import AutoConfig, is_torch_available
from transformers.testing_utils import require_torch, torch_device
if is_torch_available():
from transformers import PyTorchBenchmark, PyTorchBenchmarkArguments
@require_torch
class UpperCamelCase_ (unittest.TestCase ):
"""simple docstring"""
def _a ( self : List[str] , _lowerCamelCase : Any ):
"""simple docstring"""
for model_result in results.values():
for batch_size, sequence_length in zip(model_result['''bs'''] , model_result['''ss'''] ):
A_ : Optional[int] = model_result['''result'''][batch_size][sequence_length]
self.assertIsNotNone(_lowerCamelCase )
def _a ( self : Dict ):
"""simple docstring"""
A_ : Dict = '''sshleifer/tiny-gpt2'''
A_ : Optional[int] = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=_lowerCamelCase , inference=_lowerCamelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_lowerCamelCase , )
A_ : Union[str, Any] = PyTorchBenchmark(_lowerCamelCase )
A_ : int = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def _a ( self : Optional[Any] ):
"""simple docstring"""
A_ : Union[str, Any] = '''sgugger/tiny-distilbert-classification'''
A_ : int = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=_lowerCamelCase , inference=_lowerCamelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_lowerCamelCase , only_pretrain_model=_lowerCamelCase , )
A_ : List[str] = PyTorchBenchmark(_lowerCamelCase )
A_ : Optional[int] = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def _a ( self : List[Any] ):
"""simple docstring"""
A_ : Union[str, Any] = '''sshleifer/tiny-gpt2'''
A_ : Any = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=_lowerCamelCase , inference=_lowerCamelCase , torchscript=_lowerCamelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_lowerCamelCase , )
A_ : List[Any] = PyTorchBenchmark(_lowerCamelCase )
A_ : Union[str, Any] = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
@unittest.skipIf(torch_device == '''cpu''' , '''Cant do half precision''' )
def _a ( self : Union[str, Any] ):
"""simple docstring"""
A_ : List[str] = '''sshleifer/tiny-gpt2'''
A_ : Dict = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=_lowerCamelCase , inference=_lowerCamelCase , fpaa=_lowerCamelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_lowerCamelCase , )
A_ : int = PyTorchBenchmark(_lowerCamelCase )
A_ : Optional[int] = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def _a ( self : Tuple ):
"""simple docstring"""
A_ : int = '''sshleifer/tiny-gpt2'''
A_ : Dict = AutoConfig.from_pretrained(_lowerCamelCase )
# set architectures equal to `None`
A_ : List[str] = None
A_ : str = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=_lowerCamelCase , inference=_lowerCamelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_lowerCamelCase , )
A_ : Optional[int] = PyTorchBenchmark(_lowerCamelCase , configs=[config] )
A_ : Optional[int] = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def _a ( self : Union[str, Any] ):
"""simple docstring"""
A_ : Union[str, Any] = '''sshleifer/tiny-gpt2'''
A_ : List[Any] = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=_lowerCamelCase , inference=_lowerCamelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_lowerCamelCase , )
A_ : List[Any] = PyTorchBenchmark(_lowerCamelCase )
A_ : Any = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
@unittest.skipIf(torch_device == '''cpu''' , '''Can\'t do half precision''' )
def _a ( self : Optional[int] ):
"""simple docstring"""
A_ : Optional[int] = '''sshleifer/tiny-gpt2'''
A_ : Union[str, Any] = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=_lowerCamelCase , inference=_lowerCamelCase , sequence_lengths=[8] , batch_sizes=[1] , fpaa=_lowerCamelCase , multi_process=_lowerCamelCase , )
A_ : int = PyTorchBenchmark(_lowerCamelCase )
A_ : str = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
def _a ( self : Tuple ):
"""simple docstring"""
A_ : Optional[Any] = '''sshleifer/tiny-gpt2'''
A_ : int = AutoConfig.from_pretrained(_lowerCamelCase )
A_ : List[str] = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=_lowerCamelCase , inference=_lowerCamelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_lowerCamelCase , )
A_ : str = PyTorchBenchmark(_lowerCamelCase , configs=[config] )
A_ : List[str] = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def _a ( self : Union[str, Any] ):
"""simple docstring"""
A_ : int = '''sshleifer/tinier_bart'''
A_ : str = AutoConfig.from_pretrained(_lowerCamelCase )
A_ : Union[str, Any] = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=_lowerCamelCase , inference=_lowerCamelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_lowerCamelCase , )
A_ : Any = PyTorchBenchmark(_lowerCamelCase , configs=[config] )
A_ : Dict = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def _a ( self : Dict ):
"""simple docstring"""
A_ : Union[str, Any] = '''sshleifer/tiny-gpt2'''
A_ : Optional[int] = AutoConfig.from_pretrained(_lowerCamelCase )
A_ : Any = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=_lowerCamelCase , inference=_lowerCamelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_lowerCamelCase , )
A_ : List[str] = PyTorchBenchmark(_lowerCamelCase , configs=[config] )
A_ : Optional[Any] = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
def _a ( self : Tuple ):
"""simple docstring"""
A_ : Optional[Any] = '''sshleifer/tinier_bart'''
A_ : List[Any] = AutoConfig.from_pretrained(_lowerCamelCase )
A_ : str = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=_lowerCamelCase , inference=_lowerCamelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_lowerCamelCase , )
A_ : List[Any] = PyTorchBenchmark(_lowerCamelCase , configs=[config] )
A_ : int = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
def _a ( self : int ):
"""simple docstring"""
A_ : str = '''sshleifer/tiny-gpt2'''
with tempfile.TemporaryDirectory() as tmp_dir:
A_ : Tuple = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=_lowerCamelCase , inference=_lowerCamelCase , save_to_csv=_lowerCamelCase , sequence_lengths=[8] , batch_sizes=[1] , inference_time_csv_file=os.path.join(_lowerCamelCase , '''inf_time.csv''' ) , train_memory_csv_file=os.path.join(_lowerCamelCase , '''train_mem.csv''' ) , inference_memory_csv_file=os.path.join(_lowerCamelCase , '''inf_mem.csv''' ) , train_time_csv_file=os.path.join(_lowerCamelCase , '''train_time.csv''' ) , env_info_csv_file=os.path.join(_lowerCamelCase , '''env.csv''' ) , multi_process=_lowerCamelCase , )
A_ : int = PyTorchBenchmark(_lowerCamelCase )
benchmark.run()
self.assertTrue(Path(os.path.join(_lowerCamelCase , '''inf_time.csv''' ) ).exists() )
self.assertTrue(Path(os.path.join(_lowerCamelCase , '''train_time.csv''' ) ).exists() )
self.assertTrue(Path(os.path.join(_lowerCamelCase , '''inf_mem.csv''' ) ).exists() )
self.assertTrue(Path(os.path.join(_lowerCamelCase , '''train_mem.csv''' ) ).exists() )
self.assertTrue(Path(os.path.join(_lowerCamelCase , '''env.csv''' ) ).exists() )
def _a ( self : Optional[int] ):
"""simple docstring"""
A_ : Any = '''sshleifer/tiny-gpt2'''
def _check_summary_is_not_empty(_lowerCamelCase : Dict ):
self.assertTrue(hasattr(_lowerCamelCase , '''sequential''' ) )
self.assertTrue(hasattr(_lowerCamelCase , '''cumulative''' ) )
self.assertTrue(hasattr(_lowerCamelCase , '''current''' ) )
self.assertTrue(hasattr(_lowerCamelCase , '''total''' ) )
with tempfile.TemporaryDirectory() as tmp_dir:
A_ : List[str] = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=_lowerCamelCase , inference=_lowerCamelCase , sequence_lengths=[8] , batch_sizes=[1] , log_filename=os.path.join(_lowerCamelCase , '''log.txt''' ) , log_print=_lowerCamelCase , trace_memory_line_by_line=_lowerCamelCase , multi_process=_lowerCamelCase , )
A_ : str = PyTorchBenchmark(_lowerCamelCase )
A_ : Any = benchmark.run()
_check_summary_is_not_empty(result.inference_summary )
_check_summary_is_not_empty(result.train_summary )
self.assertTrue(Path(os.path.join(_lowerCamelCase , '''log.txt''' ) ).exists() )
| 4 |
'''simple docstring'''
from __future__ import annotations
def snake_case__ ( lowerCamelCase__ : list[int] , lowerCamelCase__ : int ) -> list[int]:
A_ : int = 0
A_ : str = len(lowerCamelCase__ ) - 1
while i < j:
if nums[i] + nums[j] == target:
return [i, j]
elif nums[i] + nums[j] < target:
A_ : Tuple = i + 1
else:
A_ : List[str] = j - 1
return []
if __name__ == "__main__":
import doctest
doctest.testmod()
print(F'{two_pointer([2, 7, 11, 15], 9) = }')
| 4 | 1 |
'''simple docstring'''
from maths.prime_check import is_prime
def snake_case__ ( lowerCamelCase__ : int ) -> int:
if not isinstance(lowerCamelCase__ , lowerCamelCase__ ):
A_ : Dict = f'Input value of [number={number}] must be an integer'
raise TypeError(lowerCamelCase__ )
if is_prime(lowerCamelCase__ ) and is_prime(number + 2 ):
return number + 2
else:
return -1
if __name__ == "__main__":
import doctest
doctest.testmod()
| 4 |
'''simple docstring'''
def snake_case__ ( lowerCamelCase__ : list[int] , lowerCamelCase__ : list[int] , lowerCamelCase__ : int ) -> bool:
return not any(
neighbour == 1 and colored_vertices[i] == color
for i, neighbour in enumerate(lowerCamelCase__ ) )
def snake_case__ ( lowerCamelCase__ : list[list[int]] , lowerCamelCase__ : int , lowerCamelCase__ : list[int] , lowerCamelCase__ : int ) -> bool:
# Base Case
if index == len(lowerCamelCase__ ):
return True
# Recursive Step
for i in range(lowerCamelCase__ ):
if valid_coloring(graph[index] , lowerCamelCase__ , lowerCamelCase__ ):
# Color current vertex
A_ : int = i
# Validate coloring
if util_color(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , index + 1 ):
return True
# Backtrack
A_ : str = -1
return False
def snake_case__ ( lowerCamelCase__ : list[list[int]] , lowerCamelCase__ : int ) -> list[int]:
A_ : List[str] = [-1] * len(lowerCamelCase__ )
if util_color(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , 0 ):
return colored_vertices
return []
| 4 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
snake_case__ = {
"""configuration_mega""": ["""MEGA_PRETRAINED_CONFIG_ARCHIVE_MAP""", """MegaConfig""", """MegaOnnxConfig"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case__ = [
"""MEGA_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""MegaForCausalLM""",
"""MegaForMaskedLM""",
"""MegaForMultipleChoice""",
"""MegaForQuestionAnswering""",
"""MegaForSequenceClassification""",
"""MegaForTokenClassification""",
"""MegaModel""",
"""MegaPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_mega import MEGA_PRETRAINED_CONFIG_ARCHIVE_MAP, MegaConfig, MegaOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mega import (
MEGA_PRETRAINED_MODEL_ARCHIVE_LIST,
MegaForCausalLM,
MegaForMaskedLM,
MegaForMultipleChoice,
MegaForQuestionAnswering,
MegaForSequenceClassification,
MegaForTokenClassification,
MegaModel,
MegaPreTrainedModel,
)
else:
import sys
snake_case__ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 4 |
'''simple docstring'''
from __future__ import annotations
from PIL import Image
# Define glider example
snake_case__ = [
[0, 1, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0, 0],
[1, 1, 1, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
]
# Define blinker example
snake_case__ = [[0, 1, 0], [0, 1, 0], [0, 1, 0]]
def snake_case__ ( lowerCamelCase__ : list[list[int]] ) -> list[list[int]]:
A_ : str = []
for i in range(len(lowerCamelCase__ ) ):
A_ : Optional[Any] = []
for j in range(len(cells[i] ) ):
# Get the number of live neighbours
A_ : Optional[int] = 0
if i > 0 and j > 0:
neighbour_count += cells[i - 1][j - 1]
if i > 0:
neighbour_count += cells[i - 1][j]
if i > 0 and j < len(cells[i] ) - 1:
neighbour_count += cells[i - 1][j + 1]
if j > 0:
neighbour_count += cells[i][j - 1]
if j < len(cells[i] ) - 1:
neighbour_count += cells[i][j + 1]
if i < len(lowerCamelCase__ ) - 1 and j > 0:
neighbour_count += cells[i + 1][j - 1]
if i < len(lowerCamelCase__ ) - 1:
neighbour_count += cells[i + 1][j]
if i < len(lowerCamelCase__ ) - 1 and j < len(cells[i] ) - 1:
neighbour_count += cells[i + 1][j + 1]
# Rules of the game of life (excerpt from Wikipedia):
# 1. Any live cell with two or three live neighbours survives.
# 2. Any dead cell with three live neighbours becomes a live cell.
# 3. All other live cells die in the next generation.
# Similarly, all other dead cells stay dead.
A_ : List[str] = cells[i][j] == 1
if (
(alive and 2 <= neighbour_count <= 3)
or not alive
and neighbour_count == 3
):
next_generation_row.append(1 )
else:
next_generation_row.append(0 )
next_generation.append(lowerCamelCase__ )
return next_generation
def snake_case__ ( lowerCamelCase__ : list[list[int]] , lowerCamelCase__ : int ) -> list[Image.Image]:
A_ : List[Any] = []
for _ in range(lowerCamelCase__ ):
# Create output image
A_ : Optional[int] = Image.new('''RGB''' , (len(cells[0] ), len(lowerCamelCase__ )) )
A_ : int = img.load()
# Save cells to image
for x in range(len(lowerCamelCase__ ) ):
for y in range(len(cells[0] ) ):
A_ : Optional[Any] = 2_5_5 - cells[y][x] * 2_5_5
A_ : str = (colour, colour, colour)
# Save image
images.append(lowerCamelCase__ )
A_ : Optional[int] = new_generation(lowerCamelCase__ )
return images
if __name__ == "__main__":
snake_case__ = generate_images(GLIDER, 16)
images[0].save("""out.gif""", save_all=True, append_images=images[1:])
| 4 | 1 |
'''simple docstring'''
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import CLIPTokenizer, CLIPTokenizerFast
from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import OwlViTImageProcessor, OwlViTProcessor
@require_vision
class UpperCamelCase_ (unittest.TestCase ):
"""simple docstring"""
def _a ( self : int ):
"""simple docstring"""
A_ : int = tempfile.mkdtemp()
# fmt: off
A_ : Optional[int] = ['''''', '''l''', '''o''', '''w''', '''e''', '''r''', '''s''', '''t''', '''i''', '''d''', '''n''', '''lo''', '''l</w>''', '''w</w>''', '''r</w>''', '''t</w>''', '''low</w>''', '''er</w>''', '''lowest</w>''', '''newer</w>''', '''wider''', '''<unk>''', '''<|startoftext|>''', '''<|endoftext|>''']
# fmt: on
A_ : Dict = dict(zip(_lowerCamelCase , range(len(_lowerCamelCase ) ) ) )
A_ : Tuple = ['''#version: 0.2''', '''l o''', '''lo w</w>''', '''e r</w>''', '''''']
A_ : Any = {'''unk_token''': '''<unk>'''}
A_ : List[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
A_ : int = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write(json.dumps(_lowerCamelCase ) + '''\n''' )
with open(self.merges_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write('''\n'''.join(_lowerCamelCase ) )
A_ : Tuple = {
'''do_resize''': True,
'''size''': 20,
'''do_center_crop''': True,
'''crop_size''': 18,
'''do_normalize''': True,
'''image_mean''': [0.48_14_54_66, 0.4_57_82_75, 0.40_82_10_73],
'''image_std''': [0.26_86_29_54, 0.26_13_02_58, 0.27_57_77_11],
}
A_ : Optional[int] = os.path.join(self.tmpdirname , _lowerCamelCase )
with open(self.image_processor_file , '''w''' , encoding='''utf-8''' ) as fp:
json.dump(_lowerCamelCase , _lowerCamelCase )
def _a ( self : Dict , **_lowerCamelCase : Union[str, Any] ):
"""simple docstring"""
return CLIPTokenizer.from_pretrained(self.tmpdirname , pad_token='''!''' , **_lowerCamelCase )
def _a ( self : Dict , **_lowerCamelCase : Optional[Any] ):
"""simple docstring"""
return CLIPTokenizerFast.from_pretrained(self.tmpdirname , pad_token='''!''' , **_lowerCamelCase )
def _a ( self : Dict , **_lowerCamelCase : Optional[Any] ):
"""simple docstring"""
return OwlViTImageProcessor.from_pretrained(self.tmpdirname , **_lowerCamelCase )
def _a ( self : int ):
"""simple docstring"""
shutil.rmtree(self.tmpdirname )
def _a ( self : int ):
"""simple docstring"""
A_ : Tuple = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
A_ : Dict = [Image.fromarray(np.moveaxis(_lowerCamelCase , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def _a ( self : Union[str, Any] ):
"""simple docstring"""
A_ : Dict = self.get_tokenizer()
A_ : List[str] = self.get_rust_tokenizer()
A_ : Optional[int] = self.get_image_processor()
A_ : Tuple = OwlViTProcessor(tokenizer=_lowerCamelCase , image_processor=_lowerCamelCase )
processor_slow.save_pretrained(self.tmpdirname )
A_ : str = OwlViTProcessor.from_pretrained(self.tmpdirname , use_fast=_lowerCamelCase )
A_ : Dict = OwlViTProcessor(tokenizer=_lowerCamelCase , image_processor=_lowerCamelCase )
processor_fast.save_pretrained(self.tmpdirname )
A_ : Optional[int] = OwlViTProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() )
self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() )
self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() )
self.assertIsInstance(processor_slow.tokenizer , _lowerCamelCase )
self.assertIsInstance(processor_fast.tokenizer , _lowerCamelCase )
self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor_slow.image_processor , _lowerCamelCase )
self.assertIsInstance(processor_fast.image_processor , _lowerCamelCase )
def _a ( self : str ):
"""simple docstring"""
A_ : Tuple = OwlViTProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
A_ : Tuple = self.get_tokenizer(bos_token='''(BOS)''' , eos_token='''(EOS)''' )
A_ : List[str] = self.get_image_processor(do_normalize=_lowerCamelCase )
A_ : Dict = OwlViTProcessor.from_pretrained(
self.tmpdirname , bos_token='''(BOS)''' , eos_token='''(EOS)''' , do_normalize=_lowerCamelCase )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , _lowerCamelCase )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , _lowerCamelCase )
def _a ( self : Dict ):
"""simple docstring"""
A_ : Tuple = self.get_image_processor()
A_ : Any = self.get_tokenizer()
A_ : Optional[Any] = OwlViTProcessor(tokenizer=_lowerCamelCase , image_processor=_lowerCamelCase )
A_ : Optional[int] = self.prepare_image_inputs()
A_ : List[Any] = image_processor(_lowerCamelCase , return_tensors='''np''' )
A_ : Dict = processor(images=_lowerCamelCase , return_tensors='''np''' )
for key in input_image_proc.keys():
self.assertAlmostEqual(input_image_proc[key].sum() , input_processor[key].sum() , delta=1E-2 )
def _a ( self : Dict ):
"""simple docstring"""
A_ : str = self.get_image_processor()
A_ : Optional[int] = self.get_tokenizer()
A_ : List[Any] = OwlViTProcessor(tokenizer=_lowerCamelCase , image_processor=_lowerCamelCase )
A_ : Union[str, Any] = '''lower newer'''
A_ : Optional[int] = processor(text=_lowerCamelCase , return_tensors='''np''' )
A_ : int = tokenizer(_lowerCamelCase , return_tensors='''np''' )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key][0].tolist() , encoded_processor[key][0].tolist() )
def _a ( self : Optional[Any] ):
"""simple docstring"""
A_ : Tuple = self.get_image_processor()
A_ : Optional[Any] = self.get_tokenizer()
A_ : Optional[Any] = OwlViTProcessor(tokenizer=_lowerCamelCase , image_processor=_lowerCamelCase )
A_ : str = '''lower newer'''
A_ : List[Any] = self.prepare_image_inputs()
A_ : Dict = processor(text=_lowerCamelCase , images=_lowerCamelCase )
self.assertListEqual(list(inputs.keys() ) , ['''input_ids''', '''attention_mask''', '''pixel_values'''] )
# test if it raises when no input is passed
with pytest.raises(_lowerCamelCase ):
processor()
def _a ( self : List[str] ):
"""simple docstring"""
A_ : Tuple = '''google/owlvit-base-patch32'''
A_ : Tuple = OwlViTProcessor.from_pretrained(_lowerCamelCase )
A_ : int = ['''cat''', '''nasa badge''']
A_ : int = processor(text=_lowerCamelCase )
A_ : Optional[Any] = 16
self.assertListEqual(list(inputs.keys() ) , ['''input_ids''', '''attention_mask'''] )
self.assertEqual(inputs['''input_ids'''].shape , (2, seq_length) )
# test if it raises when no input is passed
with pytest.raises(_lowerCamelCase ):
processor()
def _a ( self : List[str] ):
"""simple docstring"""
A_ : int = '''google/owlvit-base-patch32'''
A_ : Any = OwlViTProcessor.from_pretrained(_lowerCamelCase )
A_ : Tuple = [['''cat''', '''nasa badge'''], ['''person''']]
A_ : int = processor(text=_lowerCamelCase )
A_ : Optional[int] = 16
A_ : int = len(_lowerCamelCase )
A_ : str = max([len(_lowerCamelCase ) for texts in input_texts] )
self.assertListEqual(list(inputs.keys() ) , ['''input_ids''', '''attention_mask'''] )
self.assertEqual(inputs['''input_ids'''].shape , (batch_size * num_max_text_queries, seq_length) )
# test if it raises when no input is passed
with pytest.raises(_lowerCamelCase ):
processor()
def _a ( self : str ):
"""simple docstring"""
A_ : List[Any] = '''google/owlvit-base-patch32'''
A_ : List[str] = OwlViTProcessor.from_pretrained(_lowerCamelCase )
A_ : int = ['''cat''', '''nasa badge''']
A_ : Optional[int] = processor(text=_lowerCamelCase )
A_ : str = 16
A_ : int = inputs['''input_ids''']
A_ : List[str] = [
[49406, 2368, 49407, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[49406, 6841, 11301, 49407, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
]
self.assertListEqual(list(inputs.keys() ) , ['''input_ids''', '''attention_mask'''] )
self.assertEqual(inputs['''input_ids'''].shape , (2, seq_length) )
self.assertListEqual(list(input_ids[0] ) , predicted_ids[0] )
self.assertListEqual(list(input_ids[1] ) , predicted_ids[1] )
def _a ( self : Any ):
"""simple docstring"""
A_ : Dict = self.get_image_processor()
A_ : Tuple = self.get_tokenizer()
A_ : Tuple = OwlViTProcessor(tokenizer=_lowerCamelCase , image_processor=_lowerCamelCase )
A_ : int = self.prepare_image_inputs()
A_ : Optional[Any] = self.prepare_image_inputs()
A_ : Optional[int] = processor(images=_lowerCamelCase , query_images=_lowerCamelCase )
self.assertListEqual(list(inputs.keys() ) , ['''query_pixel_values''', '''pixel_values'''] )
# test if it raises when no input is passed
with pytest.raises(_lowerCamelCase ):
processor()
def _a ( self : Any ):
"""simple docstring"""
A_ : Optional[int] = self.get_image_processor()
A_ : Tuple = self.get_tokenizer()
A_ : str = OwlViTProcessor(tokenizer=_lowerCamelCase , image_processor=_lowerCamelCase )
A_ : Union[str, Any] = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
A_ : Any = processor.batch_decode(_lowerCamelCase )
A_ : List[str] = tokenizer.batch_decode(_lowerCamelCase )
self.assertListEqual(_lowerCamelCase , _lowerCamelCase )
| 4 |
'''simple docstring'''
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import BertTokenizer, BertTokenizerFast
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import AlignProcessor, EfficientNetImageProcessor
@require_vision
class UpperCamelCase_ (unittest.TestCase ):
"""simple docstring"""
def _a ( self : Union[str, Any] ):
"""simple docstring"""
A_ : Any = tempfile.mkdtemp()
A_ : List[Any] = [
'''[UNK]''',
'''[CLS]''',
'''[SEP]''',
'''[PAD]''',
'''[MASK]''',
'''want''',
'''##want''',
'''##ed''',
'''wa''',
'''un''',
'''runn''',
'''##ing''',
''',''',
'''low''',
'''lowest''',
]
A_ : str = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) )
A_ : Tuple = {
'''do_resize''': True,
'''size''': 20,
'''do_center_crop''': True,
'''crop_size''': 18,
'''do_normalize''': True,
'''image_mean''': [0.48_14_54_66, 0.4_57_82_75, 0.40_82_10_73],
'''image_std''': [0.26_86_29_54, 0.26_13_02_58, 0.27_57_77_11],
}
A_ : List[Any] = os.path.join(self.tmpdirname , _lowerCamelCase )
with open(self.image_processor_file , '''w''' , encoding='''utf-8''' ) as fp:
json.dump(_lowerCamelCase , _lowerCamelCase )
def _a ( self : Dict , **_lowerCamelCase : Tuple ):
"""simple docstring"""
return BertTokenizer.from_pretrained(self.tmpdirname , **_lowerCamelCase )
def _a ( self : Optional[int] , **_lowerCamelCase : Optional[int] ):
"""simple docstring"""
return BertTokenizerFast.from_pretrained(self.tmpdirname , **_lowerCamelCase )
def _a ( self : Optional[Any] , **_lowerCamelCase : Tuple ):
"""simple docstring"""
return EfficientNetImageProcessor.from_pretrained(self.tmpdirname , **_lowerCamelCase )
def _a ( self : Tuple ):
"""simple docstring"""
shutil.rmtree(self.tmpdirname )
def _a ( self : int ):
"""simple docstring"""
A_ : Union[str, Any] = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
A_ : Any = [Image.fromarray(np.moveaxis(_lowerCamelCase , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def _a ( self : int ):
"""simple docstring"""
A_ : Tuple = self.get_tokenizer()
A_ : Tuple = self.get_rust_tokenizer()
A_ : Dict = self.get_image_processor()
A_ : List[Any] = AlignProcessor(tokenizer=_lowerCamelCase , image_processor=_lowerCamelCase )
processor_slow.save_pretrained(self.tmpdirname )
A_ : str = AlignProcessor.from_pretrained(self.tmpdirname , use_fast=_lowerCamelCase )
A_ : Any = AlignProcessor(tokenizer=_lowerCamelCase , image_processor=_lowerCamelCase )
processor_fast.save_pretrained(self.tmpdirname )
A_ : List[Any] = AlignProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() )
self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() )
self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() )
self.assertIsInstance(processor_slow.tokenizer , _lowerCamelCase )
self.assertIsInstance(processor_fast.tokenizer , _lowerCamelCase )
self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor_slow.image_processor , _lowerCamelCase )
self.assertIsInstance(processor_fast.image_processor , _lowerCamelCase )
def _a ( self : List[Any] ):
"""simple docstring"""
A_ : List[str] = AlignProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
A_ : Optional[int] = self.get_tokenizer(bos_token='''(BOS)''' , eos_token='''(EOS)''' )
A_ : Tuple = self.get_image_processor(do_normalize=_lowerCamelCase , padding_value=1.0 )
A_ : List[str] = AlignProcessor.from_pretrained(
self.tmpdirname , bos_token='''(BOS)''' , eos_token='''(EOS)''' , do_normalize=_lowerCamelCase , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , _lowerCamelCase )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , _lowerCamelCase )
def _a ( self : Union[str, Any] ):
"""simple docstring"""
A_ : Dict = self.get_image_processor()
A_ : Any = self.get_tokenizer()
A_ : List[str] = AlignProcessor(tokenizer=_lowerCamelCase , image_processor=_lowerCamelCase )
A_ : Any = self.prepare_image_inputs()
A_ : List[Any] = image_processor(_lowerCamelCase , return_tensors='''np''' )
A_ : str = processor(images=_lowerCamelCase , return_tensors='''np''' )
for key in input_image_proc.keys():
self.assertAlmostEqual(input_image_proc[key].sum() , input_processor[key].sum() , delta=1E-2 )
def _a ( self : Dict ):
"""simple docstring"""
A_ : str = self.get_image_processor()
A_ : List[str] = self.get_tokenizer()
A_ : Optional[int] = AlignProcessor(tokenizer=_lowerCamelCase , image_processor=_lowerCamelCase )
A_ : int = '''lower newer'''
A_ : str = processor(text=_lowerCamelCase )
A_ : Dict = tokenizer(_lowerCamelCase , padding='''max_length''' , max_length=64 )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def _a ( self : str ):
"""simple docstring"""
A_ : Optional[int] = self.get_image_processor()
A_ : Optional[Any] = self.get_tokenizer()
A_ : List[str] = AlignProcessor(tokenizer=_lowerCamelCase , image_processor=_lowerCamelCase )
A_ : List[Any] = '''lower newer'''
A_ : Optional[int] = self.prepare_image_inputs()
A_ : List[Any] = processor(text=_lowerCamelCase , images=_lowerCamelCase )
self.assertListEqual(list(inputs.keys() ) , ['''input_ids''', '''token_type_ids''', '''attention_mask''', '''pixel_values'''] )
# test if it raises when no input is passed
with pytest.raises(_lowerCamelCase ):
processor()
def _a ( self : List[str] ):
"""simple docstring"""
A_ : Optional[Any] = self.get_image_processor()
A_ : Optional[int] = self.get_tokenizer()
A_ : List[Any] = AlignProcessor(tokenizer=_lowerCamelCase , image_processor=_lowerCamelCase )
A_ : str = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
A_ : str = processor.batch_decode(_lowerCamelCase )
A_ : Union[str, Any] = tokenizer.batch_decode(_lowerCamelCase )
self.assertListEqual(_lowerCamelCase , _lowerCamelCase )
def _a ( self : Tuple ):
"""simple docstring"""
A_ : str = self.get_image_processor()
A_ : Tuple = self.get_tokenizer()
A_ : Any = AlignProcessor(tokenizer=_lowerCamelCase , image_processor=_lowerCamelCase )
A_ : str = '''lower newer'''
A_ : List[str] = self.prepare_image_inputs()
A_ : Tuple = processor(text=_lowerCamelCase , images=_lowerCamelCase )
self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
| 4 | 1 |
'''simple docstring'''
from ..utils import DummyObject, requires_backends
class UpperCamelCase_ (metaclass=a__ ):
"""simple docstring"""
_lowerCAmelCase = ['keras_nlp']
def __init__( self : Optional[Any] , *_lowerCamelCase : Dict , **_lowerCamelCase : List[str] ):
"""simple docstring"""
requires_backends(self , ['''keras_nlp'''] )
| 4 |
'''simple docstring'''
import json
import os
from pathlib import Path
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple, Union
import sentencepiece
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
snake_case__ = logging.get_logger(__name__)
snake_case__ = """▁"""
snake_case__ = {
"""vocab_file""": """vocab.json""",
"""spm_file""": """sentencepiece.bpe.model""",
}
snake_case__ = {
"""vocab_file""": {
"""facebook/s2t-small-librispeech-asr""": (
"""https://huggingface.co/facebook/s2t-small-librispeech-asr/resolve/main/vocab.json"""
),
},
"""spm_file""": {
"""facebook/s2t-small-librispeech-asr""": (
"""https://huggingface.co/facebook/s2t-small-librispeech-asr/resolve/main/sentencepiece.bpe.model"""
)
},
}
snake_case__ = {
"""facebook/s2t-small-librispeech-asr""": 10_24,
}
snake_case__ = ["""pt""", """fr""", """ru""", """nl""", """ro""", """it""", """es""", """de"""]
snake_case__ = {"""mustc""": MUSTC_LANGS}
class UpperCamelCase_ (a__ ):
"""simple docstring"""
_lowerCAmelCase = VOCAB_FILES_NAMES
_lowerCAmelCase = PRETRAINED_VOCAB_FILES_MAP
_lowerCAmelCase = MAX_MODEL_INPUT_SIZES
_lowerCAmelCase = ['input_ids', 'attention_mask']
_lowerCAmelCase = []
def __init__( self : Optional[int] , _lowerCamelCase : List[str] , _lowerCamelCase : List[str] , _lowerCamelCase : str="<s>" , _lowerCamelCase : Union[str, Any]="</s>" , _lowerCamelCase : Dict="<pad>" , _lowerCamelCase : str="<unk>" , _lowerCamelCase : Union[str, Any]=False , _lowerCamelCase : int=False , _lowerCamelCase : Any=None , _lowerCamelCase : Any=None , _lowerCamelCase : Optional[Dict[str, Any]] = None , **_lowerCamelCase : Optional[int] , ):
"""simple docstring"""
A_ : Union[str, Any] = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=_lowerCamelCase , eos_token=_lowerCamelCase , unk_token=_lowerCamelCase , pad_token=_lowerCamelCase , do_upper_case=_lowerCamelCase , do_lower_case=_lowerCamelCase , tgt_lang=_lowerCamelCase , lang_codes=_lowerCamelCase , sp_model_kwargs=self.sp_model_kwargs , **_lowerCamelCase , )
A_ : Optional[int] = do_upper_case
A_ : Tuple = do_lower_case
A_ : Tuple = load_json(_lowerCamelCase )
A_ : Tuple = {v: k for k, v in self.encoder.items()}
A_ : List[Any] = spm_file
A_ : List[str] = load_spm(_lowerCamelCase , self.sp_model_kwargs )
if lang_codes is not None:
A_ : Any = lang_codes
A_ : Optional[Any] = LANGUAGES[lang_codes]
A_ : Optional[Any] = [f'<lang:{lang}>' for lang in self.langs]
A_ : Union[str, Any] = {lang: self.sp_model.PieceToId(f'<lang:{lang}>' ) for lang in self.langs}
A_ : Optional[int] = self.lang_tokens
A_ : int = tgt_lang if tgt_lang is not None else self.langs[0]
self.set_tgt_lang_special_tokens(self._tgt_lang )
else:
A_ : Dict = {}
@property
def _a ( self : Tuple ):
"""simple docstring"""
return len(self.encoder )
@property
def _a ( self : int ):
"""simple docstring"""
return self._tgt_lang
@tgt_lang.setter
def _a ( self : List[str] , _lowerCamelCase : Any ):
"""simple docstring"""
A_ : int = new_tgt_lang
self.set_tgt_lang_special_tokens(_lowerCamelCase )
def _a ( self : Tuple , _lowerCamelCase : str ):
"""simple docstring"""
A_ : List[str] = self.lang_code_to_id[tgt_lang]
A_ : Optional[Any] = [lang_code_id]
def _a ( self : Optional[Any] , _lowerCamelCase : str ):
"""simple docstring"""
return self.sp_model.encode(_lowerCamelCase , out_type=_lowerCamelCase )
def _a ( self : List[Any] , _lowerCamelCase : int ):
"""simple docstring"""
return self.encoder.get(_lowerCamelCase , self.encoder[self.unk_token] )
def _a ( self : int , _lowerCamelCase : int ):
"""simple docstring"""
return self.decoder.get(_lowerCamelCase , self.unk_token )
def _a ( self : int , _lowerCamelCase : List[str] ):
"""simple docstring"""
A_ : List[Any] = []
A_ : Any = ''''''
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
A_ : Union[str, Any] = self.sp_model.decode(_lowerCamelCase )
out_string += (decoded.upper() if self.do_upper_case else decoded) + token + " "
A_ : Optional[Any] = []
else:
current_sub_tokens.append(_lowerCamelCase )
A_ : Tuple = self.sp_model.decode(_lowerCamelCase )
out_string += decoded.upper() if self.do_upper_case else decoded
return out_string.strip()
def _a ( self : int , _lowerCamelCase : Dict , _lowerCamelCase : Any=None ):
"""simple docstring"""
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + [self.eos_token_id]
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + [self.eos_token_id]
def _a ( self : List[Any] , _lowerCamelCase : List[int] , _lowerCamelCase : Optional[List[int]] = None , _lowerCamelCase : bool = False ):
"""simple docstring"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_lowerCamelCase , token_ids_a=_lowerCamelCase , already_has_special_tokens=_lowerCamelCase )
A_ : Tuple = [1] * len(self.prefix_tokens )
A_ : Tuple = [1]
if token_ids_a is None:
return prefix_ones + ([0] * len(_lowerCamelCase )) + suffix_ones
return prefix_ones + ([0] * len(_lowerCamelCase )) + ([0] * len(_lowerCamelCase )) + suffix_ones
def _a ( self : Dict ):
"""simple docstring"""
A_ : Union[str, Any] = self.encoder.copy()
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self : Union[str, Any] ):
"""simple docstring"""
A_ : Dict = self.__dict__.copy()
A_ : List[Any] = None
return state
def __setstate__( self : List[str] , _lowerCamelCase : Dict ):
"""simple docstring"""
A_ : Dict = d
# for backward compatibility
if not hasattr(self , '''sp_model_kwargs''' ):
A_ : Optional[int] = {}
A_ : int = load_spm(self.spm_file , self.sp_model_kwargs )
def _a ( self : Optional[Any] , _lowerCamelCase : str , _lowerCamelCase : Optional[str] = None ):
"""simple docstring"""
A_ : Dict = Path(_lowerCamelCase )
assert save_dir.is_dir(), f'{save_directory} should be a directory'
A_ : Optional[int] = save_dir / (
(filename_prefix + '''-''' if filename_prefix else '''''') + self.vocab_files_names['''vocab_file''']
)
A_ : Optional[int] = save_dir / (
(filename_prefix + '''-''' if filename_prefix else '''''') + self.vocab_files_names['''spm_file''']
)
save_json(self.encoder , _lowerCamelCase )
if os.path.abspath(self.spm_file ) != os.path.abspath(_lowerCamelCase ) and os.path.isfile(self.spm_file ):
copyfile(self.spm_file , _lowerCamelCase )
elif not os.path.isfile(self.spm_file ):
with open(_lowerCamelCase , '''wb''' ) as fi:
A_ : List[str] = self.sp_model.serialized_model_proto()
fi.write(_lowerCamelCase )
return (str(_lowerCamelCase ), str(_lowerCamelCase ))
def snake_case__ ( lowerCamelCase__ : str , lowerCamelCase__ : Dict[str, Any] ) -> sentencepiece.SentencePieceProcessor:
A_ : Tuple = sentencepiece.SentencePieceProcessor(**lowerCamelCase__ )
spm.Load(str(lowerCamelCase__ ) )
return spm
def snake_case__ ( lowerCamelCase__ : str ) -> Union[Dict, List]:
with open(lowerCamelCase__ , '''r''' ) as f:
return json.load(lowerCamelCase__ )
def snake_case__ ( lowerCamelCase__ : Any , lowerCamelCase__ : str ) -> None:
with open(lowerCamelCase__ , '''w''' ) as f:
json.dump(lowerCamelCase__ , lowerCamelCase__ , indent=2 )
| 4 | 1 |
'''simple docstring'''
# DISCLAIMER: This code is strongly influenced by https://github.com/pesser/pytorch_diffusion
# and https://github.com/hojonathanho/diffusion
import math
from dataclasses import dataclass
from typing import List, Optional, Tuple, Union
import numpy as np
import torch
from diffusers.configuration_utils import ConfigMixin, register_to_config
from diffusers.schedulers.scheduling_utils import SchedulerMixin
from diffusers.utils import BaseOutput, deprecate
@dataclass
# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->DDIM
class UpperCamelCase_ (a__ ):
"""simple docstring"""
_lowerCAmelCase = 42
_lowerCAmelCase = None
def snake_case__ ( lowerCamelCase__ : Union[str, Any] , lowerCamelCase__ : Tuple=0.999 , lowerCamelCase__ : List[Any]="cosine" , ) -> Optional[int]:
if alpha_transform_type == "cosine":
def alpha_bar_fn(lowerCamelCase__ : List[Any] ):
return math.cos((t + 0.008) / 1.008 * math.pi / 2 ) ** 2
elif alpha_transform_type == "exp":
def alpha_bar_fn(lowerCamelCase__ : Optional[int] ):
return math.exp(t * -12.0 )
else:
raise ValueError(f'Unsupported alpha_tranform_type: {alpha_transform_type}' )
A_ : List[Any] = []
for i in range(lowerCamelCase__ ):
A_ : Tuple = i / num_diffusion_timesteps
A_ : Union[str, Any] = (i + 1) / num_diffusion_timesteps
betas.append(min(1 - alpha_bar_fn(lowerCamelCase__ ) / alpha_bar_fn(lowerCamelCase__ ) , lowerCamelCase__ ) )
return torch.tensor(lowerCamelCase__ , dtype=torch.floataa )
class UpperCamelCase_ (a__, a__ ):
"""simple docstring"""
_lowerCAmelCase = 1
@register_to_config
def __init__( self : Any , _lowerCamelCase : int = 1000 , _lowerCamelCase : float = 0.00_01 , _lowerCamelCase : float = 0.02 , _lowerCamelCase : str = "linear" , _lowerCamelCase : Optional[Union[np.ndarray, List[float]]] = None , _lowerCamelCase : bool = True , _lowerCamelCase : bool = True , _lowerCamelCase : int = 0 , _lowerCamelCase : str = "epsilon" , _lowerCamelCase : float = 1.0 , **_lowerCamelCase : Tuple , ):
"""simple docstring"""
if kwargs.get('''set_alpha_to_one''' , _lowerCamelCase ) is not None:
A_ : Optional[Any] = (
'''The `set_alpha_to_one` argument is deprecated. Please use `set_alpha_to_zero` instead.'''
)
deprecate('''set_alpha_to_one''' , '''1.0.0''' , _lowerCamelCase , standard_warn=_lowerCamelCase )
A_ : Union[str, Any] = kwargs['''set_alpha_to_one''']
if trained_betas is not None:
A_ : List[Any] = torch.tensor(_lowerCamelCase , dtype=torch.floataa )
elif beta_schedule == "linear":
A_ : List[Any] = torch.linspace(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , dtype=torch.floataa )
elif beta_schedule == "scaled_linear":
# this schedule is very specific to the latent diffusion model.
A_ : int = (
torch.linspace(beta_start**0.5 , beta_end**0.5 , _lowerCamelCase , dtype=torch.floataa ) ** 2
)
elif beta_schedule == "squaredcos_cap_v2":
# Glide cosine schedule
A_ : List[str] = betas_for_alpha_bar(_lowerCamelCase )
else:
raise NotImplementedError(f'{beta_schedule} does is not implemented for {self.__class__}' )
A_ : Tuple = 1.0 - self.betas
A_ : Optional[int] = torch.cumprod(self.alphas , dim=0 )
# At every step in inverted ddim, we are looking into the next alphas_cumprod
# For the final step, there is no next alphas_cumprod, and the index is out of bounds
# `set_alpha_to_zero` decides whether we set this parameter simply to zero
# in this case, self.step() just output the predicted noise
# or whether we use the final alpha of the "non-previous" one.
A_ : List[str] = torch.tensor(0.0 ) if set_alpha_to_zero else self.alphas_cumprod[-1]
# standard deviation of the initial noise distribution
A_ : Union[str, Any] = 1.0
# setable values
A_ : Optional[int] = None
A_ : List[Any] = torch.from_numpy(np.arange(0 , _lowerCamelCase ).copy().astype(np.intaa ) )
def _a ( self : Any , _lowerCamelCase : torch.FloatTensor , _lowerCamelCase : Optional[int] = None ):
"""simple docstring"""
return sample
def _a ( self : List[str] , _lowerCamelCase : int , _lowerCamelCase : Union[str, torch.device] = None ):
"""simple docstring"""
if num_inference_steps > self.config.num_train_timesteps:
raise ValueError(
f'`num_inference_steps`: {num_inference_steps} cannot be larger than `self.config.train_timesteps`:'
f' {self.config.num_train_timesteps} as the unet model trained with this scheduler can only handle'
f' maximal {self.config.num_train_timesteps} timesteps.' )
A_ : List[str] = num_inference_steps
A_ : Any = self.config.num_train_timesteps // self.num_inference_steps
# creates integer timesteps by multiplying by ratio
# casting to int to avoid issues when num_inference_step is power of 3
A_ : Union[str, Any] = (np.arange(0 , _lowerCamelCase ) * step_ratio).round().copy().astype(np.intaa )
A_ : List[Any] = torch.from_numpy(_lowerCamelCase ).to(_lowerCamelCase )
self.timesteps += self.config.steps_offset
def _a ( self : Tuple , _lowerCamelCase : torch.FloatTensor , _lowerCamelCase : int , _lowerCamelCase : torch.FloatTensor , _lowerCamelCase : float = 0.0 , _lowerCamelCase : bool = False , _lowerCamelCase : Optional[torch.FloatTensor] = None , _lowerCamelCase : bool = True , ):
"""simple docstring"""
A_ : Union[str, Any] = timestep + self.config.num_train_timesteps // self.num_inference_steps
# 2. compute alphas, betas
# change original implementation to exactly match noise levels for analogous forward process
A_ : int = self.alphas_cumprod[timestep]
A_ : Optional[Any] = (
self.alphas_cumprod[prev_timestep]
if prev_timestep < self.config.num_train_timesteps
else self.final_alpha_cumprod
)
A_ : Tuple = 1 - alpha_prod_t
# 3. compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
if self.config.prediction_type == "epsilon":
A_ : Dict = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5
A_ : Dict = model_output
elif self.config.prediction_type == "sample":
A_ : Tuple = model_output
A_ : List[str] = (sample - alpha_prod_t ** 0.5 * pred_original_sample) / beta_prod_t ** 0.5
elif self.config.prediction_type == "v_prediction":
A_ : Dict = (alpha_prod_t**0.5) * sample - (beta_prod_t**0.5) * model_output
A_ : Union[str, Any] = (alpha_prod_t**0.5) * model_output + (beta_prod_t**0.5) * sample
else:
raise ValueError(
f'prediction_type given as {self.config.prediction_type} must be one of `epsilon`, `sample`, or'
''' `v_prediction`''' )
# 4. Clip or threshold "predicted x_0"
if self.config.clip_sample:
A_ : List[Any] = pred_original_sample.clamp(
-self.config.clip_sample_range , self.config.clip_sample_range )
# 5. compute "direction pointing to x_t" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
A_ : List[str] = (1 - alpha_prod_t_prev) ** 0.5 * pred_epsilon
# 6. compute x_t without "random noise" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
A_ : Optional[int] = alpha_prod_t_prev ** 0.5 * pred_original_sample + pred_sample_direction
if not return_dict:
return (prev_sample, pred_original_sample)
return DDIMSchedulerOutput(prev_sample=_lowerCamelCase , pred_original_sample=_lowerCamelCase )
def __len__( self : Any ):
"""simple docstring"""
return self.config.num_train_timesteps
| 4 |
'''simple docstring'''
import argparse
import json
import os
import sys
import tempfile
import unittest
from argparse import Namespace
from dataclasses import dataclass, field
from enum import Enum
from pathlib import Path
from typing import List, Literal, Optional
import yaml
from transformers import HfArgumentParser, TrainingArguments
from transformers.hf_argparser import make_choice_type_function, string_to_bool
# Since Python 3.10, we can use the builtin `|` operator for Union types
# See PEP 604: https://peps.python.org/pep-0604
snake_case__ = sys.version_info >= (3, 10)
def snake_case__ ( lowerCamelCase__ : Union[str, Any]=None , lowerCamelCase__ : str=None ) -> List[Any]:
return field(default_factory=lambda: default , metadata=lowerCamelCase__ )
@dataclass
class UpperCamelCase_ :
"""simple docstring"""
_lowerCAmelCase = 42
_lowerCAmelCase = 42
_lowerCAmelCase = 42
_lowerCAmelCase = 42
@dataclass
class UpperCamelCase_ :
"""simple docstring"""
_lowerCAmelCase = 4_2
_lowerCAmelCase = field(default='toto', metadata={'help': 'help message'} )
@dataclass
class UpperCamelCase_ :
"""simple docstring"""
_lowerCAmelCase = False
_lowerCAmelCase = True
_lowerCAmelCase = None
class UpperCamelCase_ (a__ ):
"""simple docstring"""
_lowerCAmelCase = 'titi'
_lowerCAmelCase = 'toto'
class UpperCamelCase_ (a__ ):
"""simple docstring"""
_lowerCAmelCase = 'titi'
_lowerCAmelCase = 'toto'
_lowerCAmelCase = 4_2
@dataclass
class UpperCamelCase_ :
"""simple docstring"""
_lowerCAmelCase = "toto"
def _a ( self : Optional[Any] ):
"""simple docstring"""
A_ : Optional[int] = BasicEnum(self.foo )
@dataclass
class UpperCamelCase_ :
"""simple docstring"""
_lowerCAmelCase = "toto"
def _a ( self : Tuple ):
"""simple docstring"""
A_ : Optional[Any] = MixedTypeEnum(self.foo )
@dataclass
class UpperCamelCase_ :
"""simple docstring"""
_lowerCAmelCase = None
_lowerCAmelCase = field(default=a__, metadata={'help': 'help message'} )
_lowerCAmelCase = None
_lowerCAmelCase = list_field(default=[] )
_lowerCAmelCase = list_field(default=[] )
@dataclass
class UpperCamelCase_ :
"""simple docstring"""
_lowerCAmelCase = list_field(default=[] )
_lowerCAmelCase = list_field(default=[1, 2, 3] )
_lowerCAmelCase = list_field(default=['Hallo', 'Bonjour', 'Hello'] )
_lowerCAmelCase = list_field(default=[0.1, 0.2, 0.3] )
@dataclass
class UpperCamelCase_ :
"""simple docstring"""
_lowerCAmelCase = field()
_lowerCAmelCase = field()
_lowerCAmelCase = field()
def _a ( self : Tuple ):
"""simple docstring"""
A_ : Tuple = BasicEnum(self.required_enum )
@dataclass
class UpperCamelCase_ :
"""simple docstring"""
_lowerCAmelCase = 42
_lowerCAmelCase = field()
_lowerCAmelCase = None
_lowerCAmelCase = field(default='toto', metadata={'help': 'help message'} )
_lowerCAmelCase = list_field(default=['Hallo', 'Bonjour', 'Hello'] )
if is_python_no_less_than_3_10:
@dataclass
class UpperCamelCase_ :
"""simple docstring"""
_lowerCAmelCase = False
_lowerCAmelCase = True
_lowerCAmelCase = None
@dataclass
class UpperCamelCase_ :
"""simple docstring"""
_lowerCAmelCase = None
_lowerCAmelCase = field(default=a__, metadata={'help': 'help message'} )
_lowerCAmelCase = None
_lowerCAmelCase = list_field(default=[] )
_lowerCAmelCase = list_field(default=[] )
class UpperCamelCase_ (unittest.TestCase ):
"""simple docstring"""
def _a ( self : List[str] , _lowerCamelCase : argparse.ArgumentParser , _lowerCamelCase : argparse.ArgumentParser ):
"""simple docstring"""
self.assertEqual(len(a._actions ) , len(b._actions ) )
for x, y in zip(a._actions , b._actions ):
A_ : Union[str, Any] = {k: v for k, v in vars(_lowerCamelCase ).items() if k != '''container'''}
A_ : Optional[Any] = {k: v for k, v in vars(_lowerCamelCase ).items() if k != '''container'''}
# Choices with mixed type have custom function as "type"
# So we need to compare results directly for equality
if xx.get('''choices''' , _lowerCamelCase ) and yy.get('''choices''' , _lowerCamelCase ):
for expected_choice in yy["choices"] + xx["choices"]:
self.assertEqual(xx['''type'''](_lowerCamelCase ) , yy['''type'''](_lowerCamelCase ) )
del xx["type"], yy["type"]
self.assertEqual(_lowerCamelCase , _lowerCamelCase )
def _a ( self : Optional[int] ):
"""simple docstring"""
A_ : Union[str, Any] = HfArgumentParser(_lowerCamelCase )
A_ : Optional[Any] = argparse.ArgumentParser()
expected.add_argument('''--foo''' , type=_lowerCamelCase , required=_lowerCamelCase )
expected.add_argument('''--bar''' , type=_lowerCamelCase , required=_lowerCamelCase )
expected.add_argument('''--baz''' , type=_lowerCamelCase , required=_lowerCamelCase )
expected.add_argument('''--flag''' , type=_lowerCamelCase , default=_lowerCamelCase , const=_lowerCamelCase , nargs='''?''' )
self.argparsersEqual(_lowerCamelCase , _lowerCamelCase )
A_ : Union[str, Any] = ['''--foo''', '''1''', '''--baz''', '''quux''', '''--bar''', '''0.5''']
((A_) ,) : List[str] = parser.parse_args_into_dataclasses(_lowerCamelCase , look_for_args_file=_lowerCamelCase )
self.assertFalse(example.flag )
def _a ( self : Dict ):
"""simple docstring"""
A_ : int = HfArgumentParser(_lowerCamelCase )
A_ : int = argparse.ArgumentParser()
expected.add_argument('''--foo''' , default=42 , type=_lowerCamelCase )
expected.add_argument('''--baz''' , default='''toto''' , type=_lowerCamelCase , help='''help message''' )
self.argparsersEqual(_lowerCamelCase , _lowerCamelCase )
def _a ( self : Dict ):
"""simple docstring"""
A_ : Any = argparse.ArgumentParser()
expected.add_argument('''--foo''' , type=_lowerCamelCase , default=_lowerCamelCase , const=_lowerCamelCase , nargs='''?''' )
expected.add_argument('''--baz''' , type=_lowerCamelCase , default=_lowerCamelCase , const=_lowerCamelCase , nargs='''?''' )
# A boolean no_* argument always has to come after its "default: True" regular counter-part
# and its default must be set to False
expected.add_argument('''--no_baz''' , action='''store_false''' , default=_lowerCamelCase , dest='''baz''' )
expected.add_argument('''--opt''' , type=_lowerCamelCase , default=_lowerCamelCase )
A_ : Dict = [WithDefaultBoolExample]
if is_python_no_less_than_3_10:
dataclass_types.append(_lowerCamelCase )
for dataclass_type in dataclass_types:
A_ : Any = HfArgumentParser(_lowerCamelCase )
self.argparsersEqual(_lowerCamelCase , _lowerCamelCase )
A_ : List[Any] = parser.parse_args([] )
self.assertEqual(_lowerCamelCase , Namespace(foo=_lowerCamelCase , baz=_lowerCamelCase , opt=_lowerCamelCase ) )
A_ : Optional[int] = parser.parse_args(['''--foo''', '''--no_baz'''] )
self.assertEqual(_lowerCamelCase , Namespace(foo=_lowerCamelCase , baz=_lowerCamelCase , opt=_lowerCamelCase ) )
A_ : Union[str, Any] = parser.parse_args(['''--foo''', '''--baz'''] )
self.assertEqual(_lowerCamelCase , Namespace(foo=_lowerCamelCase , baz=_lowerCamelCase , opt=_lowerCamelCase ) )
A_ : List[str] = parser.parse_args(['''--foo''', '''True''', '''--baz''', '''True''', '''--opt''', '''True'''] )
self.assertEqual(_lowerCamelCase , Namespace(foo=_lowerCamelCase , baz=_lowerCamelCase , opt=_lowerCamelCase ) )
A_ : List[Any] = parser.parse_args(['''--foo''', '''False''', '''--baz''', '''False''', '''--opt''', '''False'''] )
self.assertEqual(_lowerCamelCase , Namespace(foo=_lowerCamelCase , baz=_lowerCamelCase , opt=_lowerCamelCase ) )
def _a ( self : List[Any] ):
"""simple docstring"""
A_ : str = HfArgumentParser(_lowerCamelCase )
A_ : Optional[int] = argparse.ArgumentParser()
expected.add_argument(
'''--foo''' , default='''toto''' , choices=['''titi''', '''toto''', 42] , type=make_choice_type_function(['''titi''', '''toto''', 42] ) , )
self.argparsersEqual(_lowerCamelCase , _lowerCamelCase )
A_ : str = parser.parse_args([] )
self.assertEqual(args.foo , '''toto''' )
A_ : List[Any] = parser.parse_args_into_dataclasses([] )[0]
self.assertEqual(enum_ex.foo , MixedTypeEnum.toto )
A_ : int = parser.parse_args(['''--foo''', '''titi'''] )
self.assertEqual(args.foo , '''titi''' )
A_ : Dict = parser.parse_args_into_dataclasses(['''--foo''', '''titi'''] )[0]
self.assertEqual(enum_ex.foo , MixedTypeEnum.titi )
A_ : Tuple = parser.parse_args(['''--foo''', '''42'''] )
self.assertEqual(args.foo , 42 )
A_ : List[str] = parser.parse_args_into_dataclasses(['''--foo''', '''42'''] )[0]
self.assertEqual(enum_ex.foo , MixedTypeEnum.fourtytwo )
def _a ( self : Optional[int] ):
"""simple docstring"""
@dataclass
class UpperCamelCase_ :
"""simple docstring"""
_lowerCAmelCase = "toto"
A_ : List[str] = HfArgumentParser(_lowerCamelCase )
A_ : Tuple = argparse.ArgumentParser()
expected.add_argument(
'''--foo''' , default='''toto''' , choices=('''titi''', '''toto''', 42) , type=make_choice_type_function(['''titi''', '''toto''', 42] ) , )
self.argparsersEqual(_lowerCamelCase , _lowerCamelCase )
A_ : Tuple = parser.parse_args([] )
self.assertEqual(args.foo , '''toto''' )
A_ : List[str] = parser.parse_args(['''--foo''', '''titi'''] )
self.assertEqual(args.foo , '''titi''' )
A_ : int = parser.parse_args(['''--foo''', '''42'''] )
self.assertEqual(args.foo , 42 )
def _a ( self : Dict ):
"""simple docstring"""
A_ : int = HfArgumentParser(_lowerCamelCase )
A_ : List[Any] = argparse.ArgumentParser()
expected.add_argument('''--foo_int''' , nargs='''+''' , default=[] , type=_lowerCamelCase )
expected.add_argument('''--bar_int''' , nargs='''+''' , default=[1, 2, 3] , type=_lowerCamelCase )
expected.add_argument('''--foo_str''' , nargs='''+''' , default=['''Hallo''', '''Bonjour''', '''Hello'''] , type=_lowerCamelCase )
expected.add_argument('''--foo_float''' , nargs='''+''' , default=[0.1, 0.2, 0.3] , type=_lowerCamelCase )
self.argparsersEqual(_lowerCamelCase , _lowerCamelCase )
A_ : Optional[int] = parser.parse_args([] )
self.assertEqual(
_lowerCamelCase , Namespace(foo_int=[] , bar_int=[1, 2, 3] , foo_str=['''Hallo''', '''Bonjour''', '''Hello'''] , foo_float=[0.1, 0.2, 0.3] ) , )
A_ : str = parser.parse_args('''--foo_int 1 --bar_int 2 3 --foo_str a b c --foo_float 0.1 0.7'''.split() )
self.assertEqual(_lowerCamelCase , Namespace(foo_int=[1] , bar_int=[2, 3] , foo_str=['''a''', '''b''', '''c'''] , foo_float=[0.1, 0.7] ) )
def _a ( self : Dict ):
"""simple docstring"""
A_ : Optional[Any] = argparse.ArgumentParser()
expected.add_argument('''--foo''' , default=_lowerCamelCase , type=_lowerCamelCase )
expected.add_argument('''--bar''' , default=_lowerCamelCase , type=_lowerCamelCase , help='''help message''' )
expected.add_argument('''--baz''' , default=_lowerCamelCase , type=_lowerCamelCase )
expected.add_argument('''--ces''' , nargs='''+''' , default=[] , type=_lowerCamelCase )
expected.add_argument('''--des''' , nargs='''+''' , default=[] , type=_lowerCamelCase )
A_ : Tuple = [OptionalExample]
if is_python_no_less_than_3_10:
dataclass_types.append(_lowerCamelCase )
for dataclass_type in dataclass_types:
A_ : int = HfArgumentParser(_lowerCamelCase )
self.argparsersEqual(_lowerCamelCase , _lowerCamelCase )
A_ : List[Any] = parser.parse_args([] )
self.assertEqual(_lowerCamelCase , Namespace(foo=_lowerCamelCase , bar=_lowerCamelCase , baz=_lowerCamelCase , ces=[] , des=[] ) )
A_ : Optional[Any] = parser.parse_args('''--foo 12 --bar 3.14 --baz 42 --ces a b c --des 1 2 3'''.split() )
self.assertEqual(_lowerCamelCase , Namespace(foo=12 , bar=3.14 , baz='''42''' , ces=['''a''', '''b''', '''c'''] , des=[1, 2, 3] ) )
def _a ( self : List[Any] ):
"""simple docstring"""
A_ : List[Any] = HfArgumentParser(_lowerCamelCase )
A_ : Dict = argparse.ArgumentParser()
expected.add_argument('''--required_list''' , nargs='''+''' , type=_lowerCamelCase , required=_lowerCamelCase )
expected.add_argument('''--required_str''' , type=_lowerCamelCase , required=_lowerCamelCase )
expected.add_argument(
'''--required_enum''' , type=make_choice_type_function(['''titi''', '''toto'''] ) , choices=['''titi''', '''toto'''] , required=_lowerCamelCase , )
self.argparsersEqual(_lowerCamelCase , _lowerCamelCase )
def _a ( self : Optional[Any] ):
"""simple docstring"""
A_ : Union[str, Any] = HfArgumentParser(_lowerCamelCase )
A_ : List[Any] = argparse.ArgumentParser()
expected.add_argument('''--foo''' , type=_lowerCamelCase , required=_lowerCamelCase )
expected.add_argument(
'''--required_enum''' , type=make_choice_type_function(['''titi''', '''toto'''] ) , choices=['''titi''', '''toto'''] , required=_lowerCamelCase , )
expected.add_argument('''--opt''' , type=_lowerCamelCase , default=_lowerCamelCase )
expected.add_argument('''--baz''' , default='''toto''' , type=_lowerCamelCase , help='''help message''' )
expected.add_argument('''--foo_str''' , nargs='''+''' , default=['''Hallo''', '''Bonjour''', '''Hello'''] , type=_lowerCamelCase )
self.argparsersEqual(_lowerCamelCase , _lowerCamelCase )
def _a ( self : Tuple ):
"""simple docstring"""
A_ : List[Any] = HfArgumentParser(_lowerCamelCase )
A_ : Union[str, Any] = {
'''foo''': 12,
'''bar''': 3.14,
'''baz''': '''42''',
'''flag''': True,
}
A_ : Optional[int] = parser.parse_dict(_lowerCamelCase )[0]
A_ : str = BasicExample(**_lowerCamelCase )
self.assertEqual(_lowerCamelCase , _lowerCamelCase )
def _a ( self : List[str] ):
"""simple docstring"""
A_ : Any = HfArgumentParser(_lowerCamelCase )
A_ : List[str] = {
'''foo''': 12,
'''bar''': 3.14,
'''baz''': '''42''',
'''flag''': True,
'''extra''': 42,
}
self.assertRaises(_lowerCamelCase , parser.parse_dict , _lowerCamelCase , allow_extra_keys=_lowerCamelCase )
def _a ( self : Optional[Any] ):
"""simple docstring"""
A_ : Union[str, Any] = HfArgumentParser(_lowerCamelCase )
A_ : List[str] = {
'''foo''': 12,
'''bar''': 3.14,
'''baz''': '''42''',
'''flag''': True,
}
with tempfile.TemporaryDirectory() as tmp_dir:
A_ : Tuple = os.path.join(_lowerCamelCase , '''temp_json''' )
os.mkdir(_lowerCamelCase )
with open(temp_local_path + '''.json''' , '''w+''' ) as f:
json.dump(_lowerCamelCase , _lowerCamelCase )
A_ : List[str] = parser.parse_yaml_file(Path(temp_local_path + '''.json''' ) )[0]
A_ : Optional[Any] = BasicExample(**_lowerCamelCase )
self.assertEqual(_lowerCamelCase , _lowerCamelCase )
def _a ( self : int ):
"""simple docstring"""
A_ : int = HfArgumentParser(_lowerCamelCase )
A_ : Tuple = {
'''foo''': 12,
'''bar''': 3.14,
'''baz''': '''42''',
'''flag''': True,
}
with tempfile.TemporaryDirectory() as tmp_dir:
A_ : int = os.path.join(_lowerCamelCase , '''temp_yaml''' )
os.mkdir(_lowerCamelCase )
with open(temp_local_path + '''.yaml''' , '''w+''' ) as f:
yaml.dump(_lowerCamelCase , _lowerCamelCase )
A_ : Optional[Any] = parser.parse_yaml_file(Path(temp_local_path + '''.yaml''' ) )[0]
A_ : int = BasicExample(**_lowerCamelCase )
self.assertEqual(_lowerCamelCase , _lowerCamelCase )
def _a ( self : Union[str, Any] ):
"""simple docstring"""
A_ : Dict = HfArgumentParser(_lowerCamelCase )
self.assertIsNotNone(_lowerCamelCase )
| 4 | 1 |
'''simple docstring'''
from abc import ABC, abstractmethod
from argparse import ArgumentParser
class UpperCamelCase_ (a__ ):
"""simple docstring"""
@staticmethod
@abstractmethod
def _a ( _lowerCamelCase : ArgumentParser ):
"""simple docstring"""
raise NotImplementedError()
@abstractmethod
def _a ( self : Union[str, Any] ):
"""simple docstring"""
raise NotImplementedError()
| 4 |
'''simple docstring'''
import sys
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from huggingface_hub import HfFolder, delete_repo
from requests.exceptions import HTTPError
from transformers import AutoImageProcessor, ViTImageProcessor
from transformers.testing_utils import TOKEN, USER, get_tests_dir, is_staging_test
sys.path.append(str(Path(__file__).parent.parent / """utils"""))
from test_module.custom_image_processing import CustomImageProcessor # noqa E402
snake_case__ = get_tests_dir("""fixtures""")
class UpperCamelCase_ (unittest.TestCase ):
"""simple docstring"""
def _a ( self : List[str] ):
"""simple docstring"""
A_ : List[Any] = mock.Mock()
A_ : List[str] = 500
A_ : Tuple = {}
A_ : int = HTTPError
A_ : Optional[Any] = {}
# Download this model to make sure it's in the cache.
A_ : Tuple = ViTImageProcessor.from_pretrained('''hf-internal-testing/tiny-random-vit''' )
# Under the mock environment we get a 500 error when trying to reach the model.
with mock.patch('''requests.Session.request''' , return_value=_lowerCamelCase ) as mock_head:
A_ : List[Any] = ViTImageProcessor.from_pretrained('''hf-internal-testing/tiny-random-vit''' )
# This check we did call the fake head request
mock_head.assert_called()
def _a ( self : Tuple ):
"""simple docstring"""
A_ : Tuple = ViTImageProcessor.from_pretrained(
'''https://huggingface.co/hf-internal-testing/tiny-random-vit/resolve/main/preprocessor_config.json''' )
def _a ( self : Dict ):
"""simple docstring"""
with self.assertRaises(_lowerCamelCase ):
# config is in subfolder, the following should not work without specifying the subfolder
A_ : Any = AutoImageProcessor.from_pretrained('''hf-internal-testing/stable-diffusion-all-variants''' )
A_ : Tuple = AutoImageProcessor.from_pretrained(
'''hf-internal-testing/stable-diffusion-all-variants''' , subfolder='''feature_extractor''' )
self.assertIsNotNone(_lowerCamelCase )
@is_staging_test
class UpperCamelCase_ (unittest.TestCase ):
"""simple docstring"""
@classmethod
def _a ( cls : Tuple ):
"""simple docstring"""
A_ : int = TOKEN
HfFolder.save_token(_lowerCamelCase )
@classmethod
def _a ( cls : str ):
"""simple docstring"""
try:
delete_repo(token=cls._token , repo_id='''test-image-processor''' )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id='''valid_org/test-image-processor-org''' )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id='''test-dynamic-image-processor''' )
except HTTPError:
pass
def _a ( self : List[Any] ):
"""simple docstring"""
A_ : Dict = ViTImageProcessor.from_pretrained(_lowerCamelCase )
image_processor.push_to_hub('''test-image-processor''' , use_auth_token=self._token )
A_ : Optional[int] = ViTImageProcessor.from_pretrained(f'{USER}/test-image-processor' )
for k, v in image_processor.__dict__.items():
self.assertEqual(_lowerCamelCase , getattr(_lowerCamelCase , _lowerCamelCase ) )
# Reset repo
delete_repo(token=self._token , repo_id='''test-image-processor''' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
image_processor.save_pretrained(
_lowerCamelCase , repo_id='''test-image-processor''' , push_to_hub=_lowerCamelCase , use_auth_token=self._token )
A_ : List[Any] = ViTImageProcessor.from_pretrained(f'{USER}/test-image-processor' )
for k, v in image_processor.__dict__.items():
self.assertEqual(_lowerCamelCase , getattr(_lowerCamelCase , _lowerCamelCase ) )
def _a ( self : Optional[Any] ):
"""simple docstring"""
A_ : int = ViTImageProcessor.from_pretrained(_lowerCamelCase )
image_processor.push_to_hub('''valid_org/test-image-processor''' , use_auth_token=self._token )
A_ : List[str] = ViTImageProcessor.from_pretrained('''valid_org/test-image-processor''' )
for k, v in image_processor.__dict__.items():
self.assertEqual(_lowerCamelCase , getattr(_lowerCamelCase , _lowerCamelCase ) )
# Reset repo
delete_repo(token=self._token , repo_id='''valid_org/test-image-processor''' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
image_processor.save_pretrained(
_lowerCamelCase , repo_id='''valid_org/test-image-processor-org''' , push_to_hub=_lowerCamelCase , use_auth_token=self._token )
A_ : Any = ViTImageProcessor.from_pretrained('''valid_org/test-image-processor-org''' )
for k, v in image_processor.__dict__.items():
self.assertEqual(_lowerCamelCase , getattr(_lowerCamelCase , _lowerCamelCase ) )
def _a ( self : Optional[Any] ):
"""simple docstring"""
CustomImageProcessor.register_for_auto_class()
A_ : Any = CustomImageProcessor.from_pretrained(_lowerCamelCase )
image_processor.push_to_hub('''test-dynamic-image-processor''' , use_auth_token=self._token )
# This has added the proper auto_map field to the config
self.assertDictEqual(
image_processor.auto_map , {'''AutoImageProcessor''': '''custom_image_processing.CustomImageProcessor'''} , )
A_ : str = AutoImageProcessor.from_pretrained(
f'{USER}/test-dynamic-image-processor' , trust_remote_code=_lowerCamelCase )
# Can't make an isinstance check because the new_image_processor is from the CustomImageProcessor class of a dynamic module
self.assertEqual(new_image_processor.__class__.__name__ , '''CustomImageProcessor''' )
| 4 | 1 |
'''simple docstring'''
from math import cos, sin, sqrt, tau
from audio_filters.iir_filter import IIRFilter
def snake_case__ ( lowerCamelCase__ : int , lowerCamelCase__ : int , lowerCamelCase__ : float = 1 / sqrt(2 ) ) -> IIRFilter:
A_ : Dict = tau * frequency / samplerate
A_ : Union[str, Any] = sin(lowerCamelCase__ )
A_ : str = cos(lowerCamelCase__ )
A_ : Optional[int] = _sin / (2 * q_factor)
A_ : Dict = (1 - _cos) / 2
A_ : Optional[int] = 1 - _cos
A_ : List[Any] = 1 + alpha
A_ : str = -2 * _cos
A_ : Optional[int] = 1 - alpha
A_ : Union[str, Any] = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def snake_case__ ( lowerCamelCase__ : int , lowerCamelCase__ : int , lowerCamelCase__ : float = 1 / sqrt(2 ) ) -> IIRFilter:
A_ : Union[str, Any] = tau * frequency / samplerate
A_ : str = sin(lowerCamelCase__ )
A_ : str = cos(lowerCamelCase__ )
A_ : Union[str, Any] = _sin / (2 * q_factor)
A_ : str = (1 + _cos) / 2
A_ : Optional[int] = -1 - _cos
A_ : Optional[Any] = 1 + alpha
A_ : int = -2 * _cos
A_ : Tuple = 1 - alpha
A_ : Optional[Any] = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def snake_case__ ( lowerCamelCase__ : int , lowerCamelCase__ : int , lowerCamelCase__ : float = 1 / sqrt(2 ) ) -> IIRFilter:
A_ : Optional[Any] = tau * frequency / samplerate
A_ : List[str] = sin(lowerCamelCase__ )
A_ : str = cos(lowerCamelCase__ )
A_ : Optional[Any] = _sin / (2 * q_factor)
A_ : List[str] = _sin / 2
A_ : Optional[int] = 0
A_ : List[str] = -ba
A_ : Optional[Any] = 1 + alpha
A_ : Optional[int] = -2 * _cos
A_ : Tuple = 1 - alpha
A_ : Dict = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def snake_case__ ( lowerCamelCase__ : int , lowerCamelCase__ : int , lowerCamelCase__ : float = 1 / sqrt(2 ) ) -> IIRFilter:
A_ : Dict = tau * frequency / samplerate
A_ : List[Any] = sin(lowerCamelCase__ )
A_ : str = cos(lowerCamelCase__ )
A_ : Optional[int] = _sin / (2 * q_factor)
A_ : Optional[Any] = 1 - alpha
A_ : List[str] = -2 * _cos
A_ : Tuple = 1 + alpha
A_ : Any = IIRFilter(2 )
filt.set_coefficients([ba, ba, ba] , [ba, ba, ba] )
return filt
def snake_case__ ( lowerCamelCase__ : int , lowerCamelCase__ : int , lowerCamelCase__ : float , lowerCamelCase__ : float = 1 / sqrt(2 ) , ) -> IIRFilter:
A_ : str = tau * frequency / samplerate
A_ : str = sin(lowerCamelCase__ )
A_ : Union[str, Any] = cos(lowerCamelCase__ )
A_ : Any = _sin / (2 * q_factor)
A_ : Tuple = 1_0 ** (gain_db / 4_0)
A_ : List[str] = 1 + alpha * big_a
A_ : str = -2 * _cos
A_ : Optional[int] = 1 - alpha * big_a
A_ : List[Any] = 1 + alpha / big_a
A_ : Dict = -2 * _cos
A_ : Optional[Any] = 1 - alpha / big_a
A_ : List[Any] = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def snake_case__ ( lowerCamelCase__ : int , lowerCamelCase__ : int , lowerCamelCase__ : float , lowerCamelCase__ : float = 1 / sqrt(2 ) , ) -> IIRFilter:
A_ : Any = tau * frequency / samplerate
A_ : Any = sin(lowerCamelCase__ )
A_ : Any = cos(lowerCamelCase__ )
A_ : str = _sin / (2 * q_factor)
A_ : Tuple = 1_0 ** (gain_db / 4_0)
A_ : List[Any] = (big_a + 1) - (big_a - 1) * _cos
A_ : int = (big_a + 1) + (big_a - 1) * _cos
A_ : Dict = (big_a - 1) - (big_a + 1) * _cos
A_ : str = (big_a - 1) + (big_a + 1) * _cos
A_ : List[str] = 2 * sqrt(lowerCamelCase__ ) * alpha
A_ : Optional[Any] = big_a * (pmc + aaa)
A_ : Any = 2 * big_a * mpc
A_ : int = big_a * (pmc - aaa)
A_ : Optional[int] = ppmc + aaa
A_ : int = -2 * pmpc
A_ : Tuple = ppmc - aaa
A_ : List[Any] = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def snake_case__ ( lowerCamelCase__ : int , lowerCamelCase__ : int , lowerCamelCase__ : float , lowerCamelCase__ : float = 1 / sqrt(2 ) , ) -> IIRFilter:
A_ : Any = tau * frequency / samplerate
A_ : int = sin(lowerCamelCase__ )
A_ : str = cos(lowerCamelCase__ )
A_ : Tuple = _sin / (2 * q_factor)
A_ : Any = 1_0 ** (gain_db / 4_0)
A_ : List[str] = (big_a + 1) - (big_a - 1) * _cos
A_ : Any = (big_a + 1) + (big_a - 1) * _cos
A_ : Tuple = (big_a - 1) - (big_a + 1) * _cos
A_ : Optional[int] = (big_a - 1) + (big_a + 1) * _cos
A_ : Any = 2 * sqrt(lowerCamelCase__ ) * alpha
A_ : Optional[int] = big_a * (ppmc + aaa)
A_ : Optional[Any] = -2 * big_a * pmpc
A_ : Union[str, Any] = big_a * (ppmc - aaa)
A_ : Tuple = pmc + aaa
A_ : List[str] = 2 * mpc
A_ : Optional[Any] = pmc - aaa
A_ : Optional[Any] = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
| 4 |
'''simple docstring'''
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import torch
from ..models.clipseg import CLIPSegForImageSegmentation
from ..utils import is_vision_available, requires_backends
from .base import PipelineTool
if is_vision_available():
from PIL import Image
class UpperCamelCase_ (a__ ):
"""simple docstring"""
_lowerCAmelCase = (
'This is a tool that creates a segmentation mask of an image according to a label. It cannot create an image.'
'It takes two arguments named `image` which should be the original image, and `label` which should be a text '
'describing the elements what should be identified in the segmentation mask. The tool returns the mask.'
)
_lowerCAmelCase = 'CIDAS/clipseg-rd64-refined'
_lowerCAmelCase = 'image_segmenter'
_lowerCAmelCase = CLIPSegForImageSegmentation
_lowerCAmelCase = ['image', 'text']
_lowerCAmelCase = ['image']
def __init__( self : Optional[int] , *_lowerCamelCase : Optional[int] , **_lowerCamelCase : Union[str, Any] ):
"""simple docstring"""
requires_backends(self , ['''vision'''] )
super().__init__(*_lowerCamelCase , **_lowerCamelCase )
def _a ( self : List[str] , _lowerCamelCase : "Image" , _lowerCamelCase : str ):
"""simple docstring"""
return self.pre_processor(text=[label] , images=[image] , padding=_lowerCamelCase , return_tensors='''pt''' )
def _a ( self : Union[str, Any] , _lowerCamelCase : Optional[int] ):
"""simple docstring"""
with torch.no_grad():
A_ : Optional[int] = self.model(**_lowerCamelCase ).logits
return logits
def _a ( self : List[str] , _lowerCamelCase : Optional[int] ):
"""simple docstring"""
A_ : int = outputs.cpu().detach().numpy()
A_ : Tuple = 0
A_ : List[str] = 1
return Image.fromarray((array * 255).astype(np.uinta ) )
| 4 | 1 |
'''simple docstring'''
from __future__ import annotations
import matplotlib.pyplot as plt # type: ignore
import numpy
# initial triangle of Koch snowflake
snake_case__ = numpy.array([0, 0])
snake_case__ = numpy.array([0.5, 0.8_6_6_0_2_5_4])
snake_case__ = numpy.array([1, 0])
snake_case__ = [VECTOR_1, VECTOR_2, VECTOR_3, VECTOR_1]
def snake_case__ ( lowerCamelCase__ : list[numpy.ndarray] , lowerCamelCase__ : int ) -> list[numpy.ndarray]:
A_ : Any = initial_vectors
for _ in range(lowerCamelCase__ ):
A_ : List[Any] = iteration_step(lowerCamelCase__ )
return vectors
def snake_case__ ( lowerCamelCase__ : list[numpy.ndarray] ) -> list[numpy.ndarray]:
A_ : str = []
for i, start_vector in enumerate(vectors[:-1] ):
A_ : Optional[Any] = vectors[i + 1]
new_vectors.append(lowerCamelCase__ )
A_ : str = end_vector - start_vector
new_vectors.append(start_vector + difference_vector / 3 )
new_vectors.append(
start_vector + difference_vector / 3 + rotate(difference_vector / 3 , 6_0 ) )
new_vectors.append(start_vector + difference_vector * 2 / 3 )
new_vectors.append(vectors[-1] )
return new_vectors
def snake_case__ ( lowerCamelCase__ : numpy.ndarray , lowerCamelCase__ : float ) -> numpy.ndarray:
A_ : str = numpy.radians(lowerCamelCase__ )
A_ ,A_ : Tuple = numpy.cos(lowerCamelCase__ ), numpy.sin(lowerCamelCase__ )
A_ : Any = numpy.array(((c, -s), (s, c)) )
return numpy.dot(lowerCamelCase__ , lowerCamelCase__ )
def snake_case__ ( lowerCamelCase__ : list[numpy.ndarray] ) -> None:
A_ : Optional[Any] = plt.gca()
axes.set_aspect('''equal''' )
# matplotlib.pyplot.plot takes a list of all x-coordinates and a list of all
# y-coordinates as inputs, which are constructed from the vector-list using
# zip()
A_ ,A_ : Union[str, Any] = zip(*lowerCamelCase__ )
plt.plot(lowerCamelCase__ , lowerCamelCase__ )
plt.show()
if __name__ == "__main__":
import doctest
doctest.testmod()
snake_case__ = iterate(INITIAL_VECTORS, 5)
plot(processed_vectors)
| 4 |
'''simple docstring'''
from collections.abc import Sequence
def snake_case__ ( lowerCamelCase__ : Sequence[float] , lowerCamelCase__ : bool = False ) -> float:
if not arr:
return 0
A_ : Union[str, Any] = 0 if allow_empty_subarrays else float('''-inf''' )
A_ : str = 0.0
for num in arr:
A_ : Any = max(0 if allow_empty_subarrays else num , curr_sum + num )
A_ : Tuple = max(lowerCamelCase__ , lowerCamelCase__ )
return max_sum
if __name__ == "__main__":
from doctest import testmod
testmod()
snake_case__ = [-2, 1, -3, 4, -1, 2, 1, -5, 4]
print(F'{max_subarray_sum(nums) = }')
| 4 | 1 |
'''simple docstring'''
from __future__ import annotations
import os
from collections.abc import Mapping
snake_case__ = tuple[int, int]
class UpperCamelCase_ :
"""simple docstring"""
def __init__( self : Tuple , _lowerCamelCase : set[int] , _lowerCamelCase : Mapping[EdgeT, int] ):
"""simple docstring"""
A_ : set[int] = vertices
A_ : dict[EdgeT, int] = {
(min(_lowerCamelCase ), max(_lowerCamelCase )): weight for edge, weight in edges.items()
}
def _a ( self : Any , _lowerCamelCase : EdgeT , _lowerCamelCase : int ):
"""simple docstring"""
self.vertices.add(edge[0] )
self.vertices.add(edge[1] )
A_ : List[Any] = weight
def _a ( self : Dict ):
"""simple docstring"""
A_ : Graph = Graph({min(self.vertices )} , {} )
A_ : EdgeT
A_ : int
A_ : EdgeT
A_ : int
while len(subgraph.vertices ) < len(self.vertices ):
A_ : Any = max(self.edges.values() ) + 1
for edge, weight in self.edges.items():
if (edge[0] in subgraph.vertices) ^ (edge[1] in subgraph.vertices):
if weight < min_weight:
A_ : Optional[Any] = edge
A_ : Union[str, Any] = weight
subgraph.add_edge(_lowerCamelCase , _lowerCamelCase )
return subgraph
def snake_case__ ( lowerCamelCase__ : str = "p107_network.txt" ) -> int:
A_ : str = os.path.abspath(os.path.dirname(lowerCamelCase__ ) )
A_ : str = os.path.join(lowerCamelCase__ , lowerCamelCase__ )
A_ : dict[EdgeT, int] = {}
A_ : list[str]
A_ : int
A_ : int
with open(lowerCamelCase__ ) as f:
A_ : Any = f.read().strip().split('''\n''' )
A_ : Tuple = [line.split(''',''' ) for line in data]
for edgea in range(1 , len(lowerCamelCase__ ) ):
for edgea in range(lowerCamelCase__ ):
if adjaceny_matrix[edgea][edgea] != "-":
A_ : Union[str, Any] = int(adjaceny_matrix[edgea][edgea] )
A_ : Graph = Graph(set(range(len(lowerCamelCase__ ) ) ) , lowerCamelCase__ )
A_ : Graph = graph.prims_algorithm()
A_ : int = sum(graph.edges.values() )
A_ : int = sum(subgraph.edges.values() )
return initial_total - optimal_total
if __name__ == "__main__":
print(F'{solution() = }')
| 4 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
snake_case__ = logging.get_logger(__name__)
snake_case__ = {
"""facebook/s2t-wav2vec2-large-en-de""": (
"""https://huggingface.co/facebook/s2t-wav2vec2-large-en-de/resolve/main/config.json"""
),
# See all Speech2Text models at https://huggingface.co/models?filter=speech2text2
}
class UpperCamelCase_ (a__ ):
"""simple docstring"""
_lowerCAmelCase = 'speech_to_text_2'
_lowerCAmelCase = ['past_key_values']
_lowerCAmelCase = {'num_attention_heads': 'decoder_attention_heads', 'hidden_size': 'd_model'}
def __init__( self : Optional[Any] , _lowerCamelCase : Optional[Any]=10000 , _lowerCamelCase : List[Any]=6 , _lowerCamelCase : int=2048 , _lowerCamelCase : Dict=4 , _lowerCamelCase : str=0.0 , _lowerCamelCase : int=True , _lowerCamelCase : int="relu" , _lowerCamelCase : Any=256 , _lowerCamelCase : List[Any]=0.1 , _lowerCamelCase : Tuple=0.0 , _lowerCamelCase : Union[str, Any]=0.0 , _lowerCamelCase : Optional[Any]=0.02 , _lowerCamelCase : int=2 , _lowerCamelCase : List[str]=True , _lowerCamelCase : str=1 , _lowerCamelCase : List[Any]=0 , _lowerCamelCase : Optional[int]=2 , _lowerCamelCase : Tuple=1024 , **_lowerCamelCase : int , ):
"""simple docstring"""
A_ : Optional[int] = vocab_size
A_ : Tuple = d_model
A_ : List[str] = decoder_ffn_dim
A_ : str = decoder_layers
A_ : Any = decoder_attention_heads
A_ : int = dropout
A_ : str = attention_dropout
A_ : Optional[int] = activation_dropout
A_ : str = activation_function
A_ : List[Any] = init_std
A_ : Union[str, Any] = decoder_layerdrop
A_ : Any = use_cache
A_ : Optional[Any] = decoder_layers
A_ : Optional[int] = scale_embedding # scale factor will be sqrt(d_model) if True
A_ : Optional[Any] = max_target_positions
super().__init__(
pad_token_id=_lowerCamelCase , bos_token_id=_lowerCamelCase , eos_token_id=_lowerCamelCase , decoder_start_token_id=_lowerCamelCase , **_lowerCamelCase , )
| 4 | 1 |
'''simple docstring'''
import argparse
from diffusers.pipelines.stable_diffusion.convert_from_ckpt import download_controlnet_from_original_ckpt
if __name__ == "__main__":
snake_case__ = argparse.ArgumentParser()
parser.add_argument(
"""--checkpoint_path""", default=None, type=str, required=True, help="""Path to the checkpoint to convert."""
)
parser.add_argument(
"""--original_config_file""",
type=str,
required=True,
help="""The YAML config file corresponding to the original architecture.""",
)
parser.add_argument(
"""--num_in_channels""",
default=None,
type=int,
help="""The number of input channels. If `None` number of input channels will be automatically inferred.""",
)
parser.add_argument(
"""--image_size""",
default=5_12,
type=int,
help=(
"""The image size that the model was trained on. Use 512 for Stable Diffusion v1.X and Stable Siffusion v2"""
""" Base. Use 768 for Stable Diffusion v2."""
),
)
parser.add_argument(
"""--extract_ema""",
action="""store_true""",
help=(
"""Only relevant for checkpoints that have both EMA and non-EMA weights. Whether to extract the EMA weights"""
""" or not. Defaults to `False`. Add `--extract_ema` to extract the EMA weights. EMA weights usually yield"""
""" higher quality images for inference. Non-EMA weights are usually better to continue fine-tuning."""
),
)
parser.add_argument(
"""--upcast_attention""",
action="""store_true""",
help=(
"""Whether the attention computation should always be upcasted. This is necessary when running stable"""
""" diffusion 2.1."""
),
)
parser.add_argument(
"""--from_safetensors""",
action="""store_true""",
help="""If `--checkpoint_path` is in `safetensors` format, load checkpoint with safetensors instead of PyTorch.""",
)
parser.add_argument(
"""--to_safetensors""",
action="""store_true""",
help="""Whether to store pipeline in safetensors format or not.""",
)
parser.add_argument("""--dump_path""", default=None, type=str, required=True, help="""Path to the output model.""")
parser.add_argument("""--device""", type=str, help="""Device to use (e.g. cpu, cuda:0, cuda:1, etc.)""")
def snake_case__ ( lowerCamelCase__ : Optional[int] ) -> List[Any]:
if string == "True":
return True
elif string == "False":
return False
else:
raise ValueError(f'could not parse string as bool {string}' )
parser.add_argument(
"""--use_linear_projection""", help="""Override for use linear projection""", required=False, type=parse_bool
)
parser.add_argument("""--cross_attention_dim""", help="""Override for cross attention_dim""", required=False, type=int)
snake_case__ = parser.parse_args()
snake_case__ = download_controlnet_from_original_ckpt(
checkpoint_path=args.checkpoint_path,
original_config_file=args.original_config_file,
image_size=args.image_size,
extract_ema=args.extract_ema,
num_in_channels=args.num_in_channels,
upcast_attention=args.upcast_attention,
from_safetensors=args.from_safetensors,
device=args.device,
use_linear_projection=args.use_linear_projection,
cross_attention_dim=args.cross_attention_dim,
)
controlnet.save_pretrained(args.dump_path, safe_serialization=args.to_safetensors)
| 4 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
snake_case__ = logging.get_logger(__name__)
snake_case__ = {
"""microsoft/table-transformer-detection""": (
"""https://huggingface.co/microsoft/table-transformer-detection/resolve/main/config.json"""
),
}
class UpperCamelCase_ (a__ ):
"""simple docstring"""
_lowerCAmelCase = 'table-transformer'
_lowerCAmelCase = ['past_key_values']
_lowerCAmelCase = {
'hidden_size': 'd_model',
'num_attention_heads': 'encoder_attention_heads',
}
def __init__( self : Any , _lowerCamelCase : Union[str, Any]=True , _lowerCamelCase : Dict=None , _lowerCamelCase : int=3 , _lowerCamelCase : Any=100 , _lowerCamelCase : List[Any]=6 , _lowerCamelCase : Tuple=2048 , _lowerCamelCase : Any=8 , _lowerCamelCase : Dict=6 , _lowerCamelCase : Tuple=2048 , _lowerCamelCase : int=8 , _lowerCamelCase : Optional[int]=0.0 , _lowerCamelCase : List[Any]=0.0 , _lowerCamelCase : List[Any]=True , _lowerCamelCase : Optional[int]="relu" , _lowerCamelCase : Union[str, Any]=256 , _lowerCamelCase : Any=0.1 , _lowerCamelCase : Tuple=0.0 , _lowerCamelCase : Optional[int]=0.0 , _lowerCamelCase : str=0.02 , _lowerCamelCase : Tuple=1.0 , _lowerCamelCase : Dict=False , _lowerCamelCase : str="sine" , _lowerCamelCase : str="resnet50" , _lowerCamelCase : Any=True , _lowerCamelCase : List[str]=False , _lowerCamelCase : Any=1 , _lowerCamelCase : int=5 , _lowerCamelCase : Tuple=2 , _lowerCamelCase : Optional[int]=1 , _lowerCamelCase : Any=1 , _lowerCamelCase : Dict=5 , _lowerCamelCase : str=2 , _lowerCamelCase : Union[str, Any]=0.1 , **_lowerCamelCase : int , ):
"""simple docstring"""
if backbone_config is not None and use_timm_backbone:
raise ValueError('''You can\'t specify both `backbone_config` and `use_timm_backbone`.''' )
if not use_timm_backbone:
if backbone_config is None:
logger.info('''`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.''' )
A_ : int = CONFIG_MAPPING['''resnet'''](out_features=['''stage4'''] )
elif isinstance(_lowerCamelCase , _lowerCamelCase ):
A_ : str = backbone_config.get('''model_type''' )
A_ : Optional[int] = CONFIG_MAPPING[backbone_model_type]
A_ : List[str] = config_class.from_dict(_lowerCamelCase )
# set timm attributes to None
A_ ,A_ ,A_ : Union[str, Any] = None, None, None
A_ : Optional[Any] = use_timm_backbone
A_ : Optional[int] = backbone_config
A_ : Optional[Any] = num_channels
A_ : Dict = num_queries
A_ : str = d_model
A_ : List[str] = encoder_ffn_dim
A_ : int = encoder_layers
A_ : Optional[Any] = encoder_attention_heads
A_ : List[str] = decoder_ffn_dim
A_ : Any = decoder_layers
A_ : List[str] = decoder_attention_heads
A_ : Tuple = dropout
A_ : Optional[Any] = attention_dropout
A_ : Any = activation_dropout
A_ : List[Any] = activation_function
A_ : Dict = init_std
A_ : Any = init_xavier_std
A_ : List[Any] = encoder_layerdrop
A_ : int = decoder_layerdrop
A_ : Any = encoder_layers
A_ : List[str] = auxiliary_loss
A_ : List[Any] = position_embedding_type
A_ : Optional[Any] = backbone
A_ : Tuple = use_pretrained_backbone
A_ : List[Any] = dilation
# Hungarian matcher
A_ : List[str] = class_cost
A_ : str = bbox_cost
A_ : Union[str, Any] = giou_cost
# Loss coefficients
A_ : Any = mask_loss_coefficient
A_ : Optional[int] = dice_loss_coefficient
A_ : Dict = bbox_loss_coefficient
A_ : int = giou_loss_coefficient
A_ : int = eos_coefficient
super().__init__(is_encoder_decoder=_lowerCamelCase , **_lowerCamelCase )
@property
def _a ( self : List[Any] ):
"""simple docstring"""
return self.encoder_attention_heads
@property
def _a ( self : Any ):
"""simple docstring"""
return self.d_model
class UpperCamelCase_ (a__ ):
"""simple docstring"""
_lowerCAmelCase = version.parse('1.11' )
@property
def _a ( self : Tuple ):
"""simple docstring"""
return OrderedDict(
[
('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}),
('''pixel_mask''', {0: '''batch'''}),
] )
@property
def _a ( self : Optional[int] ):
"""simple docstring"""
return 1E-5
@property
def _a ( self : str ):
"""simple docstring"""
return 12
| 4 | 1 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
snake_case__ = logging.get_logger(__name__)
snake_case__ = {
"""RWKV/rwkv-4-169m-pile""": """https://huggingface.co/RWKV/rwkv-4-169m-pile/resolve/main/config.json""",
"""RWKV/rwkv-4-430m-pile""": """https://huggingface.co/RWKV/rwkv-4-430m-pile/resolve/main/config.json""",
"""RWKV/rwkv-4-1b5-pile""": """https://huggingface.co/RWKV/rwkv-4-1b5-pile/resolve/main/config.json""",
"""RWKV/rwkv-4-3b-pile""": """https://huggingface.co/RWKV/rwkv-4-3b-pile/resolve/main/config.json""",
"""RWKV/rwkv-4-7b-pile""": """https://huggingface.co/RWKV/rwkv-4-7b-pile/resolve/main/config.json""",
"""RWKV/rwkv-4-14b-pile""": """https://huggingface.co/RWKV/rwkv-4-14b-pile/resolve/main/config.json""",
"""RWKV/rwkv-raven-1b5""": """https://huggingface.co/RWKV/rwkv-raven-1b5/resolve/main/config.json""",
"""RWKV/rwkv-raven-3b""": """https://huggingface.co/RWKV/rwkv-raven-3b/resolve/main/config.json""",
"""RWKV/rwkv-raven-7b""": """https://huggingface.co/RWKV/rwkv-raven-7b/resolve/main/config.json""",
"""RWKV/rwkv-raven-14b""": """https://huggingface.co/RWKV/rwkv-raven-14b/resolve/main/config.json""",
}
class UpperCamelCase_ (a__ ):
"""simple docstring"""
_lowerCAmelCase = 'rwkv'
_lowerCAmelCase = {'max_position_embeddings': 'context_length'}
def __init__( self : Tuple , _lowerCamelCase : str=50277 , _lowerCamelCase : Tuple=1024 , _lowerCamelCase : Optional[int]=4096 , _lowerCamelCase : Tuple=32 , _lowerCamelCase : List[str]=None , _lowerCamelCase : str=None , _lowerCamelCase : Tuple=1E-5 , _lowerCamelCase : Optional[Any]=0 , _lowerCamelCase : Dict=0 , _lowerCamelCase : Dict=6 , _lowerCamelCase : List[Any]=False , _lowerCamelCase : List[Any]=True , **_lowerCamelCase : List[str] , ):
"""simple docstring"""
A_ : Optional[int] = vocab_size
A_ : List[Any] = context_length
A_ : Optional[int] = hidden_size
A_ : Union[str, Any] = num_hidden_layers
A_ : Optional[int] = attention_hidden_size if attention_hidden_size is not None else hidden_size
A_ : Union[str, Any] = intermediate_size if intermediate_size is not None else 4 * hidden_size
A_ : str = layer_norm_epsilon
A_ : Dict = rescale_every
A_ : Union[str, Any] = use_cache
A_ : Dict = bos_token_id
A_ : Dict = eos_token_id
super().__init__(
tie_word_embeddings=_lowerCamelCase , bos_token_id=_lowerCamelCase , eos_token_id=_lowerCamelCase , **_lowerCamelCase )
| 4 |
'''simple docstring'''
import inspect
import unittest
from transformers import BitConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import BitBackbone, BitForImageClassification, BitImageProcessor, BitModel
from transformers.models.bit.modeling_bit import BIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
class UpperCamelCase_ :
"""simple docstring"""
def __init__( self : Optional[Any] , _lowerCamelCase : int , _lowerCamelCase : List[str]=3 , _lowerCamelCase : Any=32 , _lowerCamelCase : Union[str, Any]=3 , _lowerCamelCase : int=10 , _lowerCamelCase : Union[str, Any]=[8, 16, 32, 64] , _lowerCamelCase : Dict=[1, 1, 2, 1] , _lowerCamelCase : Union[str, Any]=True , _lowerCamelCase : Optional[int]=True , _lowerCamelCase : Any="relu" , _lowerCamelCase : Optional[Any]=3 , _lowerCamelCase : Optional[Any]=None , _lowerCamelCase : Dict=["stage2", "stage3", "stage4"] , _lowerCamelCase : Union[str, Any]=[2, 3, 4] , _lowerCamelCase : Tuple=1 , ):
"""simple docstring"""
A_ : List[str] = parent
A_ : List[str] = batch_size
A_ : Union[str, Any] = image_size
A_ : Tuple = num_channels
A_ : Any = embeddings_size
A_ : int = hidden_sizes
A_ : Optional[Any] = depths
A_ : List[Any] = is_training
A_ : Optional[int] = use_labels
A_ : int = hidden_act
A_ : Tuple = num_labels
A_ : Union[str, Any] = scope
A_ : List[Any] = len(_lowerCamelCase )
A_ : Union[str, Any] = out_features
A_ : List[Any] = out_indices
A_ : Dict = num_groups
def _a ( self : Optional[int] ):
"""simple docstring"""
A_ : Dict = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
A_ : Union[str, Any] = None
if self.use_labels:
A_ : Any = ids_tensor([self.batch_size] , self.num_labels )
A_ : Any = self.get_config()
return config, pixel_values, labels
def _a ( self : Union[str, Any] ):
"""simple docstring"""
return BitConfig(
num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , out_features=self.out_features , out_indices=self.out_indices , num_groups=self.num_groups , )
def _a ( self : List[Any] , _lowerCamelCase : List[str] , _lowerCamelCase : List[str] , _lowerCamelCase : Optional[Any] ):
"""simple docstring"""
A_ : Any = BitModel(config=_lowerCamelCase )
model.to(_lowerCamelCase )
model.eval()
A_ : int = model(_lowerCamelCase )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def _a ( self : Optional[int] , _lowerCamelCase : List[Any] , _lowerCamelCase : str , _lowerCamelCase : Optional[int] ):
"""simple docstring"""
A_ : Dict = self.num_labels
A_ : Optional[Any] = BitForImageClassification(_lowerCamelCase )
model.to(_lowerCamelCase )
model.eval()
A_ : List[Any] = model(_lowerCamelCase , labels=_lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _a ( self : Any , _lowerCamelCase : int , _lowerCamelCase : int , _lowerCamelCase : List[Any] ):
"""simple docstring"""
A_ : List[Any] = BitBackbone(config=_lowerCamelCase )
model.to(_lowerCamelCase )
model.eval()
A_ : int = model(_lowerCamelCase )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[1], 4, 4] )
# verify channels
self.parent.assertEqual(len(model.channels ) , len(config.out_features ) )
self.parent.assertListEqual(model.channels , config.hidden_sizes[1:] )
# verify backbone works with out_features=None
A_ : Optional[Any] = None
A_ : int = BitBackbone(config=_lowerCamelCase )
model.to(_lowerCamelCase )
model.eval()
A_ : Optional[int] = model(_lowerCamelCase )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , 1 )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[-1], 1, 1] )
# verify channels
self.parent.assertEqual(len(model.channels ) , 1 )
self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]] )
def _a ( self : List[Any] ):
"""simple docstring"""
A_ : Union[str, Any] = self.prepare_config_and_inputs()
A_ ,A_ ,A_ : Union[str, Any] = config_and_inputs
A_ : str = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class UpperCamelCase_ (a__, a__, unittest.TestCase ):
"""simple docstring"""
_lowerCAmelCase = (BitModel, BitForImageClassification, BitBackbone) if is_torch_available() else ()
_lowerCAmelCase = (
{'feature-extraction': BitModel, 'image-classification': BitForImageClassification}
if is_torch_available()
else {}
)
_lowerCAmelCase = False
_lowerCAmelCase = False
_lowerCAmelCase = False
_lowerCAmelCase = False
_lowerCAmelCase = False
def _a ( self : Optional[Any] ):
"""simple docstring"""
A_ : List[str] = BitModelTester(self )
A_ : Optional[Any] = ConfigTester(self , config_class=_lowerCamelCase , has_text_modality=_lowerCamelCase )
def _a ( self : Optional[Any] ):
"""simple docstring"""
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def _a ( self : List[Any] ):
"""simple docstring"""
return
@unittest.skip(reason='''Bit does not output attentions''' )
def _a ( self : str ):
"""simple docstring"""
pass
@unittest.skip(reason='''Bit does not use inputs_embeds''' )
def _a ( self : Union[str, Any] ):
"""simple docstring"""
pass
@unittest.skip(reason='''Bit does not support input and output embeddings''' )
def _a ( self : Any ):
"""simple docstring"""
pass
def _a ( self : List[Any] ):
"""simple docstring"""
A_ ,A_ : str = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A_ : Dict = model_class(_lowerCamelCase )
A_ : Dict = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
A_ : int = [*signature.parameters.keys()]
A_ : Union[str, Any] = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , _lowerCamelCase )
def _a ( self : Optional[Any] ):
"""simple docstring"""
A_ : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_lowerCamelCase )
def _a ( self : Optional[Any] ):
"""simple docstring"""
A_ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_backbone(*_lowerCamelCase )
def _a ( self : Tuple ):
"""simple docstring"""
A_ ,A_ : Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A_ : str = model_class(config=_lowerCamelCase )
for name, module in model.named_modules():
if isinstance(_lowerCamelCase , (nn.BatchNormad, nn.GroupNorm) ):
self.assertTrue(
torch.all(module.weight == 1 ) , msg=f'Parameter {name} of model {model_class} seems not properly initialized' , )
self.assertTrue(
torch.all(module.bias == 0 ) , msg=f'Parameter {name} of model {model_class} seems not properly initialized' , )
def _a ( self : int ):
"""simple docstring"""
def check_hidden_states_output(_lowerCamelCase : Union[str, Any] , _lowerCamelCase : Dict , _lowerCamelCase : int ):
A_ : Union[str, Any] = model_class(_lowerCamelCase )
model.to(_lowerCamelCase )
model.eval()
with torch.no_grad():
A_ : Union[str, Any] = model(**self._prepare_for_class(_lowerCamelCase , _lowerCamelCase ) )
A_ : int = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
A_ : List[Any] = self.model_tester.num_stages
self.assertEqual(len(_lowerCamelCase ) , expected_num_stages + 1 )
# Bit's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , )
A_ ,A_ : str = self.model_tester.prepare_config_and_inputs_for_common()
A_ : Tuple = ['''preactivation''', '''bottleneck''']
for model_class in self.all_model_classes:
for layer_type in layers_type:
A_ : Tuple = layer_type
A_ : Optional[Any] = True
check_hidden_states_output(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
A_ : List[str] = True
check_hidden_states_output(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
@unittest.skip(reason='''Bit does not use feedforward chunking''' )
def _a ( self : Tuple ):
"""simple docstring"""
pass
def _a ( self : str ):
"""simple docstring"""
A_ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_lowerCamelCase )
@slow
def _a ( self : Union[str, Any] ):
"""simple docstring"""
for model_name in BIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
A_ : List[Any] = BitModel.from_pretrained(_lowerCamelCase )
self.assertIsNotNone(_lowerCamelCase )
def snake_case__ ( ) -> Optional[int]:
A_ : Optional[int] = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_torch
@require_vision
class UpperCamelCase_ (unittest.TestCase ):
"""simple docstring"""
@cached_property
def _a ( self : List[Any] ):
"""simple docstring"""
return (
BitImageProcessor.from_pretrained(BIT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ) if is_vision_available() else None
)
@slow
def _a ( self : Dict ):
"""simple docstring"""
A_ : Optional[int] = BitForImageClassification.from_pretrained(BIT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ).to(_lowerCamelCase )
A_ : Union[str, Any] = self.default_image_processor
A_ : Optional[int] = prepare_img()
A_ : int = image_processor(images=_lowerCamelCase , return_tensors='''pt''' ).to(_lowerCamelCase )
# forward pass
with torch.no_grad():
A_ : Union[str, Any] = model(**_lowerCamelCase )
# verify the logits
A_ : Dict = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape , _lowerCamelCase )
A_ : Tuple = torch.tensor([[-0.65_26, -0.52_63, -1.43_98]] ).to(_lowerCamelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , _lowerCamelCase , atol=1E-4 ) )
@require_torch
class UpperCamelCase_ (a__, unittest.TestCase ):
"""simple docstring"""
_lowerCAmelCase = (BitBackbone,) if is_torch_available() else ()
_lowerCAmelCase = BitConfig
_lowerCAmelCase = False
def _a ( self : List[str] ):
"""simple docstring"""
A_ : Union[str, Any] = BitModelTester(self )
| 4 | 1 |
'''simple docstring'''
import argparse
import requests
import torch
from PIL import Image
from transformers import SwinConfig, SwinForMaskedImageModeling, ViTImageProcessor
def snake_case__ ( lowerCamelCase__ : Union[str, Any] ) -> Optional[Any]:
A_ : List[Any] = SwinConfig(image_size=1_9_2 )
if "base" in model_name:
A_ : Tuple = 6
A_ : Any = 1_2_8
A_ : str = (2, 2, 1_8, 2)
A_ : Dict = (4, 8, 1_6, 3_2)
elif "large" in model_name:
A_ : Union[str, Any] = 1_2
A_ : Optional[Any] = 1_9_2
A_ : Dict = (2, 2, 1_8, 2)
A_ : Optional[Any] = (6, 1_2, 2_4, 4_8)
else:
raise ValueError('''Model not supported, only supports base and large variants''' )
A_ : List[Any] = window_size
A_ : List[str] = embed_dim
A_ : List[str] = depths
A_ : List[Any] = num_heads
return config
def snake_case__ ( lowerCamelCase__ : Dict ) -> Any:
if "encoder.mask_token" in name:
A_ : Optional[int] = name.replace('''encoder.mask_token''' , '''embeddings.mask_token''' )
if "encoder.patch_embed.proj" in name:
A_ : int = name.replace('''encoder.patch_embed.proj''' , '''embeddings.patch_embeddings.projection''' )
if "encoder.patch_embed.norm" in name:
A_ : List[Any] = name.replace('''encoder.patch_embed.norm''' , '''embeddings.norm''' )
if "attn.proj" in name:
A_ : str = name.replace('''attn.proj''' , '''attention.output.dense''' )
if "attn" in name:
A_ : int = name.replace('''attn''' , '''attention.self''' )
if "norm1" in name:
A_ : Tuple = name.replace('''norm1''' , '''layernorm_before''' )
if "norm2" in name:
A_ : Union[str, Any] = name.replace('''norm2''' , '''layernorm_after''' )
if "mlp.fc1" in name:
A_ : Union[str, Any] = name.replace('''mlp.fc1''' , '''intermediate.dense''' )
if "mlp.fc2" in name:
A_ : int = name.replace('''mlp.fc2''' , '''output.dense''' )
if name == "encoder.norm.weight":
A_ : List[str] = '''layernorm.weight'''
if name == "encoder.norm.bias":
A_ : Union[str, Any] = '''layernorm.bias'''
if "decoder" in name:
pass
else:
A_ : List[Any] = '''swin.''' + name
return name
def snake_case__ ( lowerCamelCase__ : int , lowerCamelCase__ : Any ) -> Optional[Any]:
for key in orig_state_dict.copy().keys():
A_ : List[str] = orig_state_dict.pop(lowerCamelCase__ )
if "attn_mask" in key:
pass
elif "qkv" in key:
A_ : Dict = key.split('''.''' )
A_ : Tuple = int(key_split[2] )
A_ : int = int(key_split[4] )
A_ : List[Any] = model.swin.encoder.layers[layer_num].blocks[block_num].attention.self.all_head_size
if "weight" in key:
A_ : Optional[int] = val[:dim, :]
A_ : str = val[
dim : dim * 2, :
]
A_ : List[Any] = val[-dim:, :]
else:
A_ : Union[str, Any] = val[
:dim
]
A_ : Any = val[
dim : dim * 2
]
A_ : Tuple = val[
-dim:
]
else:
A_ : Any = val
return orig_state_dict
def snake_case__ ( lowerCamelCase__ : Any , lowerCamelCase__ : Any , lowerCamelCase__ : Dict , lowerCamelCase__ : Any ) -> Any:
A_ : List[str] = torch.load(lowerCamelCase__ , map_location='''cpu''' )['''model''']
A_ : Optional[Any] = get_swin_config(lowerCamelCase__ )
A_ : Any = SwinForMaskedImageModeling(lowerCamelCase__ )
model.eval()
A_ : Any = convert_state_dict(lowerCamelCase__ , lowerCamelCase__ )
model.load_state_dict(lowerCamelCase__ )
A_ : Any = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
A_ : str = ViTImageProcessor(size={'''height''': 1_9_2, '''width''': 1_9_2} )
A_ : Optional[int] = Image.open(requests.get(lowerCamelCase__ , stream=lowerCamelCase__ ).raw )
A_ : Tuple = image_processor(images=lowerCamelCase__ , return_tensors='''pt''' )
with torch.no_grad():
A_ : int = model(**lowerCamelCase__ ).logits
print(outputs.keys() )
print('''Looks ok!''' )
if pytorch_dump_folder_path is not None:
print(f'Saving model {model_name} to {pytorch_dump_folder_path}' )
model.save_pretrained(lowerCamelCase__ )
print(f'Saving image processor to {pytorch_dump_folder_path}' )
image_processor.save_pretrained(lowerCamelCase__ )
if push_to_hub:
print(f'Pushing model and image processor for {model_name} to hub' )
model.push_to_hub(f'microsoft/{model_name}' )
image_processor.push_to_hub(f'microsoft/{model_name}' )
if __name__ == "__main__":
snake_case__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--model_name""",
default="""swin-base-simmim-window6-192""",
type=str,
choices=["""swin-base-simmim-window6-192""", """swin-large-simmim-window12-192"""],
help="""Name of the Swin SimMIM model you'd like to convert.""",
)
parser.add_argument(
"""--checkpoint_path""",
default="""/Users/nielsrogge/Documents/SwinSimMIM/simmim_pretrain__swin_base__img192_window6__100ep.pth""",
type=str,
help="""Path to the original PyTorch checkpoint (.pth file).""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory."""
)
parser.add_argument(
"""--push_to_hub""", action="""store_true""", help="""Whether or not to push the converted model to the 🤗 hub."""
)
snake_case__ = parser.parse_args()
convert_swin_checkpoint(args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub)
| 4 |
'''simple docstring'''
import pprint
import requests
snake_case__ = """https://zenquotes.io/api"""
def snake_case__ ( ) -> list:
return requests.get(API_ENDPOINT_URL + '''/today''' ).json()
def snake_case__ ( ) -> list:
return requests.get(API_ENDPOINT_URL + '''/random''' ).json()
if __name__ == "__main__":
snake_case__ = random_quotes()
pprint.pprint(response)
| 4 | 1 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
snake_case__ = logging.get_logger(__name__)
snake_case__ = {
"""facebook/dpr-ctx_encoder-single-nq-base""": (
"""https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/config.json"""
),
"""facebook/dpr-question_encoder-single-nq-base""": (
"""https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/config.json"""
),
"""facebook/dpr-reader-single-nq-base""": (
"""https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/config.json"""
),
"""facebook/dpr-ctx_encoder-multiset-base""": (
"""https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/config.json"""
),
"""facebook/dpr-question_encoder-multiset-base""": (
"""https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/config.json"""
),
"""facebook/dpr-reader-multiset-base""": (
"""https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/config.json"""
),
}
class UpperCamelCase_ (a__ ):
"""simple docstring"""
_lowerCAmelCase = 'dpr'
def __init__( self : int , _lowerCamelCase : int=30522 , _lowerCamelCase : Optional[int]=768 , _lowerCamelCase : List[str]=12 , _lowerCamelCase : List[Any]=12 , _lowerCamelCase : Union[str, Any]=3072 , _lowerCamelCase : int="gelu" , _lowerCamelCase : List[Any]=0.1 , _lowerCamelCase : Any=0.1 , _lowerCamelCase : Dict=512 , _lowerCamelCase : int=2 , _lowerCamelCase : Tuple=0.02 , _lowerCamelCase : Tuple=1E-12 , _lowerCamelCase : Dict=0 , _lowerCamelCase : List[str]="absolute" , _lowerCamelCase : int = 0 , **_lowerCamelCase : int , ):
"""simple docstring"""
super().__init__(pad_token_id=_lowerCamelCase , **_lowerCamelCase )
A_ : Union[str, Any] = vocab_size
A_ : Tuple = hidden_size
A_ : Tuple = num_hidden_layers
A_ : Union[str, Any] = num_attention_heads
A_ : List[Any] = hidden_act
A_ : Union[str, Any] = intermediate_size
A_ : Optional[Any] = hidden_dropout_prob
A_ : Any = attention_probs_dropout_prob
A_ : Dict = max_position_embeddings
A_ : int = type_vocab_size
A_ : Optional[Any] = initializer_range
A_ : Union[str, Any] = layer_norm_eps
A_ : Dict = projection_dim
A_ : Any = position_embedding_type
| 4 |
'''simple docstring'''
from __future__ import annotations
class UpperCamelCase_ :
"""simple docstring"""
def __init__( self : Optional[int] , _lowerCamelCase : int ):
"""simple docstring"""
A_ : Union[str, Any] = order
# a_{0} ... a_{k}
A_ : Union[str, Any] = [1.0] + [0.0] * order
# b_{0} ... b_{k}
A_ : int = [1.0] + [0.0] * order
# x[n-1] ... x[n-k]
A_ : str = [0.0] * self.order
# y[n-1] ... y[n-k]
A_ : Optional[Any] = [0.0] * self.order
def _a ( self : Dict , _lowerCamelCase : list[float] , _lowerCamelCase : list[float] ):
"""simple docstring"""
if len(_lowerCamelCase ) < self.order:
A_ : Any = [1.0, *a_coeffs]
if len(_lowerCamelCase ) != self.order + 1:
A_ : List[Any] = (
f'Expected a_coeffs to have {self.order + 1} elements '
f'for {self.order}-order filter, got {len(_lowerCamelCase )}'
)
raise ValueError(_lowerCamelCase )
if len(_lowerCamelCase ) != self.order + 1:
A_ : Union[str, Any] = (
f'Expected b_coeffs to have {self.order + 1} elements '
f'for {self.order}-order filter, got {len(_lowerCamelCase )}'
)
raise ValueError(_lowerCamelCase )
A_ : Tuple = a_coeffs
A_ : str = b_coeffs
def _a ( self : Tuple , _lowerCamelCase : float ):
"""simple docstring"""
A_ : Any = 0.0
# Start at index 1 and do index 0 at the end.
for i in range(1 , self.order + 1 ):
result += (
self.b_coeffs[i] * self.input_history[i - 1]
- self.a_coeffs[i] * self.output_history[i - 1]
)
A_ : str = (result + self.b_coeffs[0] * sample) / self.a_coeffs[0]
A_ : Optional[Any] = self.input_history[:-1]
A_ : List[str] = self.output_history[:-1]
A_ : Tuple = sample
A_ : Tuple = result
return result
| 4 | 1 |
'''simple docstring'''
import json
import os
import unittest
from transformers.models.roc_bert.tokenization_roc_bert import (
VOCAB_FILES_NAMES,
RoCBertBasicTokenizer,
RoCBertTokenizer,
RoCBertWordpieceTokenizer,
_is_control,
_is_punctuation,
_is_whitespace,
)
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin, filter_non_english
@require_tokenizers
class UpperCamelCase_ (a__, unittest.TestCase ):
"""simple docstring"""
_lowerCAmelCase = RoCBertTokenizer
_lowerCAmelCase = None
_lowerCAmelCase = False
_lowerCAmelCase = True
_lowerCAmelCase = filter_non_english
def _a ( self : Optional[Any] ):
"""simple docstring"""
super().setUp()
A_ : Optional[Any] = ['''[UNK]''', '''[CLS]''', '''[SEP]''', '''[PAD]''', '''[MASK]''', '''你''', '''好''', '''是''', '''谁''', '''a''', '''b''', '''c''', '''d''']
A_ : Optional[int] = {}
A_ : int = {}
for i, value in enumerate(_lowerCamelCase ):
A_ : Tuple = i
A_ : Optional[int] = i
A_ : Tuple = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
A_ : Tuple = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''word_shape_file'''] )
A_ : int = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''word_pronunciation_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) )
with open(self.word_shape_file , '''w''' , encoding='''utf-8''' ) as word_shape_writer:
json.dump(_lowerCamelCase , _lowerCamelCase , ensure_ascii=_lowerCamelCase )
with open(self.word_pronunciation_file , '''w''' , encoding='''utf-8''' ) as word_pronunciation_writer:
json.dump(_lowerCamelCase , _lowerCamelCase , ensure_ascii=_lowerCamelCase )
def _a ( self : Any ):
"""simple docstring"""
A_ : Optional[Any] = self.tokenizer_class(self.vocab_file , self.word_shape_file , self.word_pronunciation_file )
A_ : List[str] = tokenizer.tokenize('''你好[SEP]你是谁''' )
self.assertListEqual(_lowerCamelCase , ['''你''', '''好''', '''[SEP]''', '''你''', '''是''', '''谁'''] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(_lowerCamelCase ) , [5, 6, 2, 5, 7, 8] )
self.assertListEqual(tokenizer.convert_tokens_to_shape_ids(_lowerCamelCase ) , [5, 6, 2, 5, 7, 8] )
self.assertListEqual(tokenizer.convert_tokens_to_pronunciation_ids(_lowerCamelCase ) , [5, 6, 2, 5, 7, 8] )
def _a ( self : List[str] ):
"""simple docstring"""
A_ : Optional[Any] = RoCBertBasicTokenizer()
self.assertListEqual(tokenizer.tokenize('''ah\u535A\u63A8zz''' ) , ['''ah''', '''\u535A''', '''\u63A8''', '''zz'''] )
def _a ( self : Dict ):
"""simple docstring"""
A_ : str = RoCBertBasicTokenizer(do_lower_case=_lowerCamelCase )
self.assertListEqual(
tokenizer.tokenize(''' \tHeLLo!how \n Are yoU? ''' ) , ['''hello''', '''!''', '''how''', '''are''', '''you''', '''?'''] )
self.assertListEqual(tokenizer.tokenize('''H\u00E9llo''' ) , ['''hello'''] )
def _a ( self : Any ):
"""simple docstring"""
A_ : str = RoCBertBasicTokenizer(do_lower_case=_lowerCamelCase , strip_accents=_lowerCamelCase )
self.assertListEqual(
tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''hällo''', '''!''', '''how''', '''are''', '''you''', '''?'''] )
self.assertListEqual(tokenizer.tokenize('''H\u00E9llo''' ) , ['''h\u00E9llo'''] )
def _a ( self : Any ):
"""simple docstring"""
A_ : Union[str, Any] = RoCBertBasicTokenizer(do_lower_case=_lowerCamelCase , strip_accents=_lowerCamelCase )
self.assertListEqual(
tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''hallo''', '''!''', '''how''', '''are''', '''you''', '''?'''] )
self.assertListEqual(tokenizer.tokenize('''H\u00E9llo''' ) , ['''hello'''] )
def _a ( self : Optional[int] ):
"""simple docstring"""
A_ : str = RoCBertBasicTokenizer(do_lower_case=_lowerCamelCase )
self.assertListEqual(
tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''hallo''', '''!''', '''how''', '''are''', '''you''', '''?'''] )
self.assertListEqual(tokenizer.tokenize('''H\u00E9llo''' ) , ['''hello'''] )
def _a ( self : Union[str, Any] ):
"""simple docstring"""
A_ : Dict = RoCBertBasicTokenizer(do_lower_case=_lowerCamelCase )
self.assertListEqual(
tokenizer.tokenize(''' \tHeLLo!how \n Are yoU? ''' ) , ['''HeLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?'''] )
def _a ( self : Any ):
"""simple docstring"""
A_ : Tuple = RoCBertBasicTokenizer(do_lower_case=_lowerCamelCase , strip_accents=_lowerCamelCase )
self.assertListEqual(
tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''HäLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?'''] )
def _a ( self : Optional[int] ):
"""simple docstring"""
A_ : Optional[int] = RoCBertBasicTokenizer(do_lower_case=_lowerCamelCase , strip_accents=_lowerCamelCase )
self.assertListEqual(
tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''HaLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?'''] )
def _a ( self : Optional[Any] ):
"""simple docstring"""
A_ : Optional[int] = RoCBertBasicTokenizer(do_lower_case=_lowerCamelCase , never_split=['''[UNK]'''] )
self.assertListEqual(
tokenizer.tokenize(''' \tHeLLo!how \n Are yoU? [UNK]''' ) , ['''HeLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?''', '''[UNK]'''] )
def _a ( self : Any ):
"""simple docstring"""
A_ : int = ['''[UNK]''', '''[CLS]''', '''[SEP]''', '''want''', '''##want''', '''##ed''', '''wa''', '''un''', '''runn''', '''##ing''']
A_ : Union[str, Any] = {}
for i, token in enumerate(_lowerCamelCase ):
A_ : Dict = i
A_ : List[str] = RoCBertWordpieceTokenizer(vocab=_lowerCamelCase , unk_token='''[UNK]''' )
self.assertListEqual(tokenizer.tokenize('''''' ) , [] )
self.assertListEqual(tokenizer.tokenize('''unwanted running''' ) , ['''un''', '''##want''', '''##ed''', '''runn''', '''##ing'''] )
self.assertListEqual(tokenizer.tokenize('''unwantedX running''' ) , ['''[UNK]''', '''runn''', '''##ing'''] )
def _a ( self : int ):
"""simple docstring"""
self.assertTrue(_is_whitespace(''' ''' ) )
self.assertTrue(_is_whitespace('''\t''' ) )
self.assertTrue(_is_whitespace('''\r''' ) )
self.assertTrue(_is_whitespace('''\n''' ) )
self.assertTrue(_is_whitespace('''\u00A0''' ) )
self.assertFalse(_is_whitespace('''A''' ) )
self.assertFalse(_is_whitespace('''-''' ) )
def _a ( self : Tuple ):
"""simple docstring"""
self.assertTrue(_is_control('''\u0005''' ) )
self.assertFalse(_is_control('''A''' ) )
self.assertFalse(_is_control(''' ''' ) )
self.assertFalse(_is_control('''\t''' ) )
self.assertFalse(_is_control('''\r''' ) )
def _a ( self : Tuple ):
"""simple docstring"""
self.assertTrue(_is_punctuation('''-''' ) )
self.assertTrue(_is_punctuation('''$''' ) )
self.assertTrue(_is_punctuation('''`''' ) )
self.assertTrue(_is_punctuation('''.''' ) )
self.assertFalse(_is_punctuation('''A''' ) )
self.assertFalse(_is_punctuation(''' ''' ) )
def _a ( self : int ):
"""simple docstring"""
A_ : Any = self.get_tokenizer()
# Example taken from the issue https://github.com/huggingface/tokenizers/issues/340
self.assertListEqual([tokenizer.tokenize(_lowerCamelCase ) for t in ['''Test''', '''\xad''', '''test''']] , [['''[UNK]'''], [], ['''[UNK]''']] )
if self.test_rust_tokenizer:
A_ : List[Any] = self.get_rust_tokenizer()
self.assertListEqual(
[rust_tokenizer.tokenize(_lowerCamelCase ) for t in ['''Test''', '''\xad''', '''test''']] , [['''[UNK]'''], [], ['''[UNK]''']] )
def _a ( self : Optional[int] ):
"""simple docstring"""
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'{tokenizer.__class__.__name__} ({pretrained_name})' ):
A_ : List[Any] = self.rust_tokenizer_class.from_pretrained(_lowerCamelCase , **_lowerCamelCase )
A_ : Any = f'A, naïve {tokenizer_r.mask_token} AllenNLP sentence.'
A_ : Dict = tokenizer_r.encode_plus(
_lowerCamelCase , return_attention_mask=_lowerCamelCase , return_token_type_ids=_lowerCamelCase , return_offsets_mapping=_lowerCamelCase , add_special_tokens=_lowerCamelCase , )
A_ : int = tokenizer_r.do_lower_case if hasattr(_lowerCamelCase , '''do_lower_case''' ) else False
A_ : int = (
[
((0, 0), tokenizer_r.cls_token),
((0, 1), '''A'''),
((1, 2), ''','''),
((3, 5), '''na'''),
((5, 6), '''##ï'''),
((6, 8), '''##ve'''),
((9, 15), tokenizer_r.mask_token),
((16, 21), '''Allen'''),
((21, 23), '''##NL'''),
((23, 24), '''##P'''),
((25, 33), '''sentence'''),
((33, 34), '''.'''),
((0, 0), tokenizer_r.sep_token),
]
if not do_lower_case
else [
((0, 0), tokenizer_r.cls_token),
((0, 1), '''a'''),
((1, 2), ''','''),
((3, 8), '''naive'''),
((9, 15), tokenizer_r.mask_token),
((16, 21), '''allen'''),
((21, 23), '''##nl'''),
((23, 24), '''##p'''),
((25, 33), '''sentence'''),
((33, 34), '''.'''),
((0, 0), tokenizer_r.sep_token),
]
)
self.assertEqual(
[e[1] for e in expected_results] , tokenizer_r.convert_ids_to_tokens(tokens['''input_ids'''] ) )
self.assertEqual([e[0] for e in expected_results] , tokens['''offset_mapping'''] )
def _a ( self : int ):
"""simple docstring"""
A_ : Tuple = ['''的''', '''人''', '''有''']
A_ : Optional[int] = ''''''.join(_lowerCamelCase )
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'{tokenizer.__class__.__name__} ({pretrained_name})' ):
A_ : List[str] = True
A_ : List[Any] = self.tokenizer_class.from_pretrained(_lowerCamelCase , **_lowerCamelCase )
A_ : Optional[Any] = self.rust_tokenizer_class.from_pretrained(_lowerCamelCase , **_lowerCamelCase )
A_ : str = tokenizer_p.encode(_lowerCamelCase , add_special_tokens=_lowerCamelCase )
A_ : Union[str, Any] = tokenizer_r.encode(_lowerCamelCase , add_special_tokens=_lowerCamelCase )
A_ : str = tokenizer_r.convert_ids_to_tokens(_lowerCamelCase )
A_ : Dict = tokenizer_p.convert_ids_to_tokens(_lowerCamelCase )
# it is expected that each Chinese character is not preceded by "##"
self.assertListEqual(_lowerCamelCase , _lowerCamelCase )
self.assertListEqual(_lowerCamelCase , _lowerCamelCase )
A_ : List[str] = False
A_ : List[Any] = self.rust_tokenizer_class.from_pretrained(_lowerCamelCase , **_lowerCamelCase )
A_ : int = self.tokenizer_class.from_pretrained(_lowerCamelCase , **_lowerCamelCase )
A_ : Any = tokenizer_r.encode(_lowerCamelCase , add_special_tokens=_lowerCamelCase )
A_ : Union[str, Any] = tokenizer_p.encode(_lowerCamelCase , add_special_tokens=_lowerCamelCase )
A_ : Optional[int] = tokenizer_r.convert_ids_to_tokens(_lowerCamelCase )
A_ : List[Any] = tokenizer_p.convert_ids_to_tokens(_lowerCamelCase )
# it is expected that only the first Chinese character is not preceded by "##".
A_ : Optional[Any] = [
f'##{token}' if idx != 0 else token for idx, token in enumerate(_lowerCamelCase )
]
self.assertListEqual(_lowerCamelCase , _lowerCamelCase )
self.assertListEqual(_lowerCamelCase , _lowerCamelCase )
@slow
def _a ( self : Union[str, Any] ):
"""simple docstring"""
A_ : Any = self.tokenizer_class(self.vocab_file , self.word_shape_file , self.word_pronunciation_file )
A_ : Tuple = tokenizer.encode('''你好''' , add_special_tokens=_lowerCamelCase )
A_ : Dict = tokenizer.encode('''你是谁''' , add_special_tokens=_lowerCamelCase )
A_ : Dict = tokenizer.build_inputs_with_special_tokens(_lowerCamelCase )
A_ : Optional[Any] = tokenizer.build_inputs_with_special_tokens(_lowerCamelCase , _lowerCamelCase )
assert encoded_sentence == [1] + text + [2]
assert encoded_pair == [1] + text + [2] + text_a + [2]
def _a ( self : Optional[Any] ):
"""simple docstring"""
A_ : Optional[Any] = self.get_tokenizers(do_lower_case=_lowerCamelCase )
for tokenizer in tokenizers:
with self.subTest(f'{tokenizer.__class__.__name__}' ):
A_ : Dict = '''你好,你是谁'''
A_ : List[Any] = tokenizer.tokenize(_lowerCamelCase )
A_ : Union[str, Any] = tokenizer.convert_tokens_to_ids(_lowerCamelCase )
A_ : int = tokenizer.convert_tokens_to_shape_ids(_lowerCamelCase )
A_ : Any = tokenizer.convert_tokens_to_pronunciation_ids(_lowerCamelCase )
A_ : Any = tokenizer.prepare_for_model(
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , add_special_tokens=_lowerCamelCase )
A_ : Optional[int] = tokenizer.encode_plus(_lowerCamelCase , add_special_tokens=_lowerCamelCase )
self.assertEqual(_lowerCamelCase , _lowerCamelCase )
| 4 |
'''simple docstring'''
class UpperCamelCase_ :
"""simple docstring"""
def __init__( self : Optional[Any] , _lowerCamelCase : Union[str, Any] ):
"""simple docstring"""
A_ : Union[str, Any] = val
A_ : Tuple = None
A_ : Any = None
def _a ( self : Tuple , _lowerCamelCase : List[Any] ):
"""simple docstring"""
if self.val:
if val < self.val:
if self.left is None:
A_ : int = Node(_lowerCamelCase )
else:
self.left.insert(_lowerCamelCase )
elif val > self.val:
if self.right is None:
A_ : List[str] = Node(_lowerCamelCase )
else:
self.right.insert(_lowerCamelCase )
else:
A_ : Any = val
def snake_case__ ( lowerCamelCase__ : Any , lowerCamelCase__ : Optional[int] ) -> str:
# Recursive traversal
if root:
inorder(root.left , lowerCamelCase__ )
res.append(root.val )
inorder(root.right , lowerCamelCase__ )
def snake_case__ ( lowerCamelCase__ : Optional[int] ) -> Tuple:
# Build BST
if len(lowerCamelCase__ ) == 0:
return arr
A_ : Dict = Node(arr[0] )
for i in range(1 , len(lowerCamelCase__ ) ):
root.insert(arr[i] )
# Traverse BST in order.
A_ : Tuple = []
inorder(lowerCamelCase__ , lowerCamelCase__ )
return res
if __name__ == "__main__":
print(tree_sort([10, 1, 3, 2, 9, 14, 13]))
| 4 | 1 |
'''simple docstring'''
from __future__ import annotations
import copy
import inspect
import json
import math
import os
import tempfile
import unittest
from importlib import import_module
import numpy as np
from transformers import ViTMAEConfig
from transformers.file_utils import cached_property, is_tf_available, is_vision_available
from transformers.testing_utils import require_tf, require_vision, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFViTMAEForPreTraining, TFViTMAEModel
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class UpperCamelCase_ :
"""simple docstring"""
def __init__( self : Union[str, Any] , _lowerCamelCase : Tuple , _lowerCamelCase : str=13 , _lowerCamelCase : Tuple=30 , _lowerCamelCase : List[Any]=2 , _lowerCamelCase : Any=3 , _lowerCamelCase : int=True , _lowerCamelCase : List[Any]=True , _lowerCamelCase : int=32 , _lowerCamelCase : int=2 , _lowerCamelCase : int=4 , _lowerCamelCase : List[str]=37 , _lowerCamelCase : Optional[Any]="gelu" , _lowerCamelCase : List[str]=0.1 , _lowerCamelCase : Tuple=0.1 , _lowerCamelCase : Tuple=10 , _lowerCamelCase : Optional[Any]=0.02 , _lowerCamelCase : List[str]=3 , _lowerCamelCase : Optional[Any]=0.6 , _lowerCamelCase : Union[str, Any]=None , ):
"""simple docstring"""
A_ : Optional[int] = parent
A_ : Any = batch_size
A_ : Any = image_size
A_ : Union[str, Any] = patch_size
A_ : Tuple = num_channels
A_ : Tuple = is_training
A_ : Union[str, Any] = use_labels
A_ : Optional[int] = hidden_size
A_ : Tuple = num_hidden_layers
A_ : str = num_attention_heads
A_ : Dict = intermediate_size
A_ : List[Any] = hidden_act
A_ : Union[str, Any] = hidden_dropout_prob
A_ : Any = attention_probs_dropout_prob
A_ : Tuple = type_sequence_label_size
A_ : Optional[int] = initializer_range
A_ : List[Any] = mask_ratio
A_ : Dict = scope
# in ViTMAE, the expected sequence length = (num_patches + 1) * (1 - config.mask_ratio), rounded above
# (we add 1 for the [CLS] token)
A_ : List[str] = (image_size // patch_size) ** 2
A_ : str = int(math.ceil((1 - mask_ratio) * (num_patches + 1) ) )
def _a ( self : str ):
"""simple docstring"""
A_ : str = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
A_ : List[Any] = None
if self.use_labels:
A_ : Dict = ids_tensor([self.batch_size] , self.type_sequence_label_size )
A_ : Union[str, Any] = self.get_config()
return config, pixel_values, labels
def _a ( self : Optional[int] ):
"""simple docstring"""
return ViTMAEConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , decoder_hidden_size=self.hidden_size , decoder_num_hidden_layers=self.num_hidden_layers , decoder_num_attention_heads=self.num_attention_heads , decoder_intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=_lowerCamelCase , initializer_range=self.initializer_range , mask_ratio=self.mask_ratio , )
def _a ( self : Optional[Any] , _lowerCamelCase : Optional[int] , _lowerCamelCase : List[Any] , _lowerCamelCase : Tuple ):
"""simple docstring"""
A_ : Union[str, Any] = TFViTMAEModel(config=_lowerCamelCase )
A_ : List[Any] = model(_lowerCamelCase , training=_lowerCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _a ( self : Optional[Any] , _lowerCamelCase : str , _lowerCamelCase : Optional[int] , _lowerCamelCase : Union[str, Any] ):
"""simple docstring"""
A_ : Any = TFViTMAEForPreTraining(_lowerCamelCase )
A_ : List[str] = model(_lowerCamelCase , training=_lowerCamelCase )
# expected sequence length = num_patches
A_ : int = (self.image_size // self.patch_size) ** 2
A_ : int = self.patch_size**2 * self.num_channels
self.parent.assertEqual(result.logits.shape , (self.batch_size, num_patches, expected_num_channels) )
# test greyscale images
A_ : str = 1
A_ : Dict = TFViTMAEForPreTraining(_lowerCamelCase )
A_ : Tuple = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
A_ : Optional[Any] = model(_lowerCamelCase , training=_lowerCamelCase )
A_ : Tuple = self.patch_size**2
self.parent.assertEqual(result.logits.shape , (self.batch_size, num_patches, expected_num_channels) )
def _a ( self : int ):
"""simple docstring"""
A_ : Tuple = self.prepare_config_and_inputs()
((A_) ,(A_) ,(A_)) : List[str] = config_and_inputs
A_ : Tuple = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_tf
class UpperCamelCase_ (a__, a__, unittest.TestCase ):
"""simple docstring"""
_lowerCAmelCase = (TFViTMAEModel, TFViTMAEForPreTraining) if is_tf_available() else ()
_lowerCAmelCase = {'feature-extraction': TFViTMAEModel} if is_tf_available() else {}
_lowerCAmelCase = False
_lowerCAmelCase = False
_lowerCAmelCase = False
_lowerCAmelCase = False
def _a ( self : Dict ):
"""simple docstring"""
A_ : List[str] = TFViTMAEModelTester(self )
A_ : Optional[Any] = ConfigTester(self , config_class=_lowerCamelCase , has_text_modality=_lowerCamelCase , hidden_size=37 )
def _a ( self : Tuple ):
"""simple docstring"""
self.config_tester.run_common_tests()
@unittest.skip(reason='''ViTMAE does not use inputs_embeds''' )
def _a ( self : Any ):
"""simple docstring"""
pass
def _a ( self : Optional[Any] ):
"""simple docstring"""
A_ ,A_ : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A_ : Tuple = model_class(_lowerCamelCase )
self.assertIsInstance(model.get_input_embeddings() , (tf.keras.layers.Layer) )
A_ : Union[str, Any] = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(_lowerCamelCase , tf.keras.layers.Layer ) )
def _a ( self : Any ):
"""simple docstring"""
A_ ,A_ : str = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A_ : int = model_class(_lowerCamelCase )
A_ : Any = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
A_ : List[Any] = [*signature.parameters.keys()]
A_ : int = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , _lowerCamelCase )
def _a ( self : str ):
"""simple docstring"""
A_ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_lowerCamelCase )
def _a ( self : Any ):
"""simple docstring"""
A_ : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*_lowerCamelCase )
def _a ( self : Union[str, Any] ):
"""simple docstring"""
np.random.seed(2 )
A_ ,A_ : str = self.model_tester.prepare_config_and_inputs_for_common()
A_ : Any = int((config.image_size // config.patch_size) ** 2 )
A_ : Any = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
for model_class in self.all_model_classes:
A_ : List[str] = model_class(_lowerCamelCase )
A_ : Tuple = self._prepare_for_class(_lowerCamelCase , _lowerCamelCase )
A_ : Tuple = model(_lowerCamelCase , noise=_lowerCamelCase )
A_ : List[str] = copy.deepcopy(self._prepare_for_class(_lowerCamelCase , _lowerCamelCase ) )
A_ : Any = model(**_lowerCamelCase , noise=_lowerCamelCase )
A_ : Optional[Any] = outputs_dict[0].numpy()
A_ : Optional[Any] = outputs_keywords[0].numpy()
self.assertLess(np.sum(np.abs(output_dict - output_keywords ) ) , 1E-6 )
def _a ( self : List[Any] ):
"""simple docstring"""
np.random.seed(2 )
A_ ,A_ : str = self.model_tester.prepare_config_and_inputs_for_common()
A_ : str = int((config.image_size // config.patch_size) ** 2 )
A_ : Dict = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
def prepare_numpy_arrays(_lowerCamelCase : Optional[Any] ):
A_ : Any = {}
for k, v in inputs_dict.items():
if tf.is_tensor(_lowerCamelCase ):
A_ : Dict = v.numpy()
else:
A_ : Union[str, Any] = np.array(_lowerCamelCase )
return inputs_np_dict
for model_class in self.all_model_classes:
A_ : List[str] = model_class(_lowerCamelCase )
A_ : List[Any] = self._prepare_for_class(_lowerCamelCase , _lowerCamelCase )
A_ : int = prepare_numpy_arrays(_lowerCamelCase )
A_ : str = model(_lowerCamelCase , noise=_lowerCamelCase )
A_ : Union[str, Any] = model(**_lowerCamelCase , noise=_lowerCamelCase )
self.assert_outputs_same(_lowerCamelCase , _lowerCamelCase )
def _a ( self : List[str] , _lowerCamelCase : Any , _lowerCamelCase : List[str] , _lowerCamelCase : Optional[Any] ):
"""simple docstring"""
np.random.seed(2 )
A_ : List[Any] = int((tf_model.config.image_size // tf_model.config.patch_size) ** 2 )
A_ : int = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
A_ : List[Any] = tf.constant(_lowerCamelCase )
# Add `noise` argument.
# PT inputs will be prepared in `super().check_pt_tf_models()` with this added `noise` argument
A_ : str = tf_noise
super().check_pt_tf_models(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
def _a ( self : Dict ):
"""simple docstring"""
np.random.seed(2 )
A_ ,A_ : int = self.model_tester.prepare_config_and_inputs_for_common()
A_ : Dict = {
module_member
for model_class in self.all_model_classes
for module in (import_module(model_class.__module__ ),)
for module_member_name in dir(_lowerCamelCase )
if module_member_name.endswith('''MainLayer''' )
# This condition is required, since `modeling_tf_clip.py` has 3 classes whose names end with `MainLayer`.
and module_member_name[: -len('''MainLayer''' )] == model_class.__name__[: -len('''Model''' )]
for module_member in (getattr(_lowerCamelCase , _lowerCamelCase ),)
if isinstance(_lowerCamelCase , _lowerCamelCase )
and tf.keras.layers.Layer in module_member.__bases__
and getattr(_lowerCamelCase , '''_keras_serializable''' , _lowerCamelCase )
}
A_ : List[Any] = int((config.image_size // config.patch_size) ** 2 )
A_ : Union[str, Any] = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
A_ : Any = tf.convert_to_tensor(_lowerCamelCase )
inputs_dict.update({'''noise''': noise} )
for main_layer_class in tf_main_layer_classes:
A_ : Optional[Any] = main_layer_class(_lowerCamelCase )
A_ : List[Any] = {
name: tf.keras.Input(tensor.shape[1:] , dtype=tensor.dtype ) for name, tensor in inputs_dict.items()
}
A_ : Tuple = tf.keras.Model(_lowerCamelCase , outputs=main_layer(_lowerCamelCase ) )
A_ : Dict = model(_lowerCamelCase )
with tempfile.TemporaryDirectory() as tmpdirname:
A_ : List[Any] = os.path.join(_lowerCamelCase , '''keras_model.h5''' )
model.save(_lowerCamelCase )
A_ : Any = tf.keras.models.load_model(
_lowerCamelCase , custom_objects={main_layer_class.__name__: main_layer_class} )
assert isinstance(_lowerCamelCase , tf.keras.Model )
A_ : Optional[int] = model(_lowerCamelCase )
self.assert_outputs_same(_lowerCamelCase , _lowerCamelCase )
@slow
def _a ( self : str ):
"""simple docstring"""
np.random.seed(2 )
A_ ,A_ : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
A_ : Union[str, Any] = int((config.image_size // config.patch_size) ** 2 )
A_ : int = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
for model_class in self.all_model_classes:
A_ : Any = model_class(_lowerCamelCase )
A_ : List[Any] = self._prepare_for_class(_lowerCamelCase , _lowerCamelCase )
A_ : Optional[Any] = model(_lowerCamelCase , noise=_lowerCamelCase )
if model_class.__name__ == "TFViTMAEModel":
A_ : Dict = outputs.last_hidden_state.numpy()
A_ : List[Any] = 0
else:
A_ : Any = outputs.logits.numpy()
A_ : List[Any] = 0
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(_lowerCamelCase , saved_model=_lowerCamelCase )
A_ : List[Any] = model_class.from_pretrained(_lowerCamelCase )
A_ : Any = model(_lowerCamelCase , noise=_lowerCamelCase )
if model_class.__name__ == "TFViTMAEModel":
A_ : List[Any] = after_outputs['''last_hidden_state'''].numpy()
A_ : Optional[Any] = 0
else:
A_ : Dict = after_outputs['''logits'''].numpy()
A_ : Dict = 0
A_ : List[Any] = np.amax(np.abs(out_a - out_a ) )
self.assertLessEqual(_lowerCamelCase , 1E-5 )
def _a ( self : Optional[Any] ):
"""simple docstring"""
np.random.seed(2 )
A_ ,A_ : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
A_ : int = int((config.image_size // config.patch_size) ** 2 )
A_ : Dict = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
for model_class in self.all_model_classes:
A_ : List[Any] = model_class(_lowerCamelCase )
A_ : str = self._prepare_for_class(_lowerCamelCase , _lowerCamelCase )
A_ : str = model(_lowerCamelCase , noise=_lowerCamelCase )
A_ : Optional[Any] = model.get_config()
# make sure that returned config is jsonifiable, which is required by keras
json.dumps(_lowerCamelCase )
A_ : Optional[int] = model_class.from_config(model.get_config() )
# make sure it also accepts a normal config
A_ : Any = model_class.from_config(model.config )
A_ : int = new_model(_lowerCamelCase ) # Build model
new_model.set_weights(model.get_weights() )
A_ : Dict = new_model(_lowerCamelCase , noise=_lowerCamelCase )
self.assert_outputs_same(_lowerCamelCase , _lowerCamelCase )
@unittest.skip(
reason='''ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load
to get deterministic results.''' )
def _a ( self : List[str] ):
"""simple docstring"""
pass
@unittest.skip(reason='''ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load''' )
def _a ( self : List[Any] ):
"""simple docstring"""
pass
@slow
def _a ( self : Optional[Any] ):
"""simple docstring"""
A_ : Tuple = TFViTMAEModel.from_pretrained('''google/vit-base-patch16-224''' )
self.assertIsNotNone(_lowerCamelCase )
def snake_case__ ( ) -> List[str]:
A_ : Optional[int] = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_tf
@require_vision
class UpperCamelCase_ (unittest.TestCase ):
"""simple docstring"""
@cached_property
def _a ( self : int ):
"""simple docstring"""
return ViTImageProcessor.from_pretrained('''facebook/vit-mae-base''' ) if is_vision_available() else None
@slow
def _a ( self : Optional[Any] ):
"""simple docstring"""
np.random.seed(2 )
A_ : List[str] = TFViTMAEForPreTraining.from_pretrained('''facebook/vit-mae-base''' )
A_ : str = self.default_image_processor
A_ : Dict = prepare_img()
A_ : Union[str, Any] = image_processor(images=_lowerCamelCase , return_tensors='''tf''' )
# prepare a noise vector that will be also used for testing the TF model
# (this way we can ensure that the PT and TF models operate on the same inputs)
A_ : Tuple = ViTMAEConfig()
A_ : List[str] = int((vit_mae_config.image_size // vit_mae_config.patch_size) ** 2 )
A_ : Optional[Any] = np.random.uniform(size=(1, num_patches) )
# forward pass
A_ : Union[str, Any] = model(**_lowerCamelCase , noise=_lowerCamelCase )
# verify the logits
A_ : Dict = tf.convert_to_tensor([1, 196, 768] )
self.assertEqual(outputs.logits.shape , _lowerCamelCase )
A_ : Dict = tf.convert_to_tensor(
[[-0.05_48, -1.70_23, -0.93_25], [0.37_21, -0.56_70, -0.22_33], [0.82_35, -1.38_78, -0.35_24]] )
tf.debugging.assert_near(outputs.logits[0, :3, :3] , _lowerCamelCase , atol=1E-4 )
| 4 |
'''simple docstring'''
def snake_case__ ( lowerCamelCase__ : list ) -> list:
if len(lowerCamelCase__ ) <= 1:
return [tuple(lowerCamelCase__ )]
A_ : List[str] = []
def generate(lowerCamelCase__ : int , lowerCamelCase__ : list ):
if k == 1:
res.append(tuple(arr[:] ) )
return
generate(k - 1 , lowerCamelCase__ )
for i in range(k - 1 ):
if k % 2 == 0: # k is even
A_ ,A_ : Optional[int] = arr[k - 1], arr[i]
else: # k is odd
A_ ,A_ : Union[str, Any] = arr[k - 1], arr[0]
generate(k - 1 , lowerCamelCase__ )
generate(len(lowerCamelCase__ ) , lowerCamelCase__ )
return res
if __name__ == "__main__":
snake_case__ = input("""Enter numbers separated by a comma:\n""").strip()
snake_case__ = [int(item) for item in user_input.split(""",""")]
print(heaps(arr))
| 4 | 1 |
'''simple docstring'''
import asyncio
import os
import shutil
import subprocess
import sys
import tempfile
import unittest
from distutils.util import strtobool
from functools import partial
from pathlib import Path
from typing import List, Union
from unittest import mock
import torch
from ..state import AcceleratorState, PartialState
from ..utils import (
gather,
is_bnb_available,
is_comet_ml_available,
is_datasets_available,
is_deepspeed_available,
is_mps_available,
is_safetensors_available,
is_tensorboard_available,
is_torch_version,
is_tpu_available,
is_transformers_available,
is_wandb_available,
is_xpu_available,
)
def snake_case__ ( lowerCamelCase__ : List[str] , lowerCamelCase__ : Dict=False ) -> List[str]:
try:
A_ : Union[str, Any] = os.environ[key]
except KeyError:
# KEY isn't set, default to `default`.
A_ : Optional[int] = default
else:
# KEY is set, convert it to True or False.
try:
A_ : List[str] = strtobool(lowerCamelCase__ )
except ValueError:
# More values are supported, but let's keep the message simple.
raise ValueError(f'If set, {key} must be yes or no.' )
return _value
snake_case__ = parse_flag_from_env("""RUN_SLOW""", default=False)
def snake_case__ ( lowerCamelCase__ : Optional[int] ) -> Dict:
return unittest.skip('''Test was skipped''' )(lowerCamelCase__ )
def snake_case__ ( lowerCamelCase__ : Dict ) -> str:
return unittest.skipUnless(_run_slow_tests , '''test is slow''' )(lowerCamelCase__ )
def snake_case__ ( lowerCamelCase__ : Any ) -> Optional[int]:
return unittest.skipUnless(not torch.cuda.is_available() , '''test requires only a CPU''' )(lowerCamelCase__ )
def snake_case__ ( lowerCamelCase__ : int ) -> Optional[int]:
return unittest.skipUnless(torch.cuda.is_available() , '''test requires a GPU''' )(lowerCamelCase__ )
def snake_case__ ( lowerCamelCase__ : Optional[Any] ) -> int:
return unittest.skipUnless(is_xpu_available() , '''test requires a XPU''' )(lowerCamelCase__ )
def snake_case__ ( lowerCamelCase__ : int ) -> Dict:
return unittest.skipUnless(is_mps_available() , '''test requires a `mps` backend support in `torch`''' )(lowerCamelCase__ )
def snake_case__ ( lowerCamelCase__ : Union[str, Any] ) -> str:
return unittest.skipUnless(
is_transformers_available() and is_datasets_available() , '''test requires the Hugging Face suite''' )(lowerCamelCase__ )
def snake_case__ ( lowerCamelCase__ : Dict ) -> Optional[int]:
return unittest.skipUnless(is_bnb_available() , '''test requires the bitsandbytes library''' )(lowerCamelCase__ )
def snake_case__ ( lowerCamelCase__ : Any ) -> List[Any]:
return unittest.skipUnless(is_tpu_available() , '''test requires TPU''' )(lowerCamelCase__ )
def snake_case__ ( lowerCamelCase__ : Optional[Any] ) -> Optional[int]:
return unittest.skipUnless(torch.cuda.device_count() == 1 , '''test requires a GPU''' )(lowerCamelCase__ )
def snake_case__ ( lowerCamelCase__ : int ) -> Optional[int]:
return unittest.skipUnless(torch.xpu.device_count() == 1 , '''test requires a XPU''' )(lowerCamelCase__ )
def snake_case__ ( lowerCamelCase__ : Any ) -> str:
return unittest.skipUnless(torch.cuda.device_count() > 1 , '''test requires multiple GPUs''' )(lowerCamelCase__ )
def snake_case__ ( lowerCamelCase__ : Optional[int] ) -> Optional[int]:
return unittest.skipUnless(torch.xpu.device_count() > 1 , '''test requires multiple XPUs''' )(lowerCamelCase__ )
def snake_case__ ( lowerCamelCase__ : Dict ) -> Tuple:
return unittest.skipUnless(is_safetensors_available() , '''test requires safetensors''' )(lowerCamelCase__ )
def snake_case__ ( lowerCamelCase__ : List[str] ) -> List[str]:
return unittest.skipUnless(is_deepspeed_available() , '''test requires DeepSpeed''' )(lowerCamelCase__ )
def snake_case__ ( lowerCamelCase__ : Union[str, Any] ) -> int:
return unittest.skipUnless(is_torch_version('''>=''' , '''1.12.0''' ) , '''test requires torch version >= 1.12.0''' )(lowerCamelCase__ )
def snake_case__ ( lowerCamelCase__ : List[Any]=None , lowerCamelCase__ : int=None ) -> Optional[Any]:
if test_case is None:
return partial(lowerCamelCase__ , version=lowerCamelCase__ )
return unittest.skipUnless(is_torch_version('''>=''' , lowerCamelCase__ ) , f'test requires torch version >= {version}' )(lowerCamelCase__ )
def snake_case__ ( lowerCamelCase__ : List[str] ) -> Optional[int]:
return unittest.skipUnless(is_tensorboard_available() , '''test requires Tensorboard''' )(lowerCamelCase__ )
def snake_case__ ( lowerCamelCase__ : Optional[Any] ) -> Tuple:
return unittest.skipUnless(is_wandb_available() , '''test requires wandb''' )(lowerCamelCase__ )
def snake_case__ ( lowerCamelCase__ : Optional[int] ) -> List[str]:
return unittest.skipUnless(is_comet_ml_available() , '''test requires comet_ml''' )(lowerCamelCase__ )
snake_case__ = (
any([is_wandb_available(), is_tensorboard_available()]) and not is_comet_ml_available()
)
def snake_case__ ( lowerCamelCase__ : List[Any] ) -> Union[str, Any]:
return unittest.skipUnless(
_atleast_one_tracker_available , '''test requires at least one tracker to be available and for `comet_ml` to not be installed''' , )(lowerCamelCase__ )
class UpperCamelCase_ (unittest.TestCase ):
"""simple docstring"""
_lowerCAmelCase = True
@classmethod
def _a ( cls : Dict ):
"""simple docstring"""
A_ : int = tempfile.mkdtemp()
@classmethod
def _a ( cls : Dict ):
"""simple docstring"""
if os.path.exists(cls.tmpdir ):
shutil.rmtree(cls.tmpdir )
def _a ( self : str ):
"""simple docstring"""
if self.clear_on_setup:
for path in Path(self.tmpdir ).glob('''**/*''' ):
if path.is_file():
path.unlink()
elif path.is_dir():
shutil.rmtree(_lowerCamelCase )
class UpperCamelCase_ (unittest.TestCase ):
"""simple docstring"""
def _a ( self : Dict ):
"""simple docstring"""
super().tearDown()
# Reset the state of the AcceleratorState singleton.
AcceleratorState._reset_state()
PartialState._reset_state()
class UpperCamelCase_ (unittest.TestCase ):
"""simple docstring"""
def _a ( self : Any , _lowerCamelCase : Union[mock.Mock, List[mock.Mock]] ):
"""simple docstring"""
A_ : List[Any] = mocks if isinstance(_lowerCamelCase , (tuple, list) ) else [mocks]
for m in self.mocks:
m.start()
self.addCleanup(m.stop )
def snake_case__ ( lowerCamelCase__ : Optional[Any] ) -> Dict:
A_ : Optional[int] = AcceleratorState()
A_ : Tuple = tensor[None].clone().to(state.device )
A_ : Tuple = gather(lowerCamelCase__ ).cpu()
A_ : int = tensor[0].cpu()
for i in range(tensors.shape[0] ):
if not torch.equal(tensors[i] , lowerCamelCase__ ):
return False
return True
class UpperCamelCase_ :
"""simple docstring"""
def __init__( self : Dict , _lowerCamelCase : List[Any] , _lowerCamelCase : Optional[Any] , _lowerCamelCase : int ):
"""simple docstring"""
A_ : Any = returncode
A_ : str = stdout
A_ : Union[str, Any] = stderr
async def snake_case__ ( lowerCamelCase__ : Dict , lowerCamelCase__ : List[Any] ) -> Optional[Any]:
while True:
A_ : List[Any] = await stream.readline()
if line:
callback(lowerCamelCase__ )
else:
break
async def snake_case__ ( lowerCamelCase__ : str , lowerCamelCase__ : Union[str, Any]=None , lowerCamelCase__ : Optional[int]=None , lowerCamelCase__ : Optional[Any]=None , lowerCamelCase__ : int=False , lowerCamelCase__ : str=False ) -> _RunOutput:
if echo:
print('''\nRunning: ''' , ''' '''.join(lowerCamelCase__ ) )
A_ : Union[str, Any] = await asyncio.create_subprocess_exec(
cmd[0] , *cmd[1:] , stdin=lowerCamelCase__ , stdout=asyncio.subprocess.PIPE , stderr=asyncio.subprocess.PIPE , env=lowerCamelCase__ , )
# note: there is a warning for a possible deadlock when using `wait` with huge amounts of data in the pipe
# https://docs.python.org/3/library/asyncio-subprocess.html#asyncio.asyncio.subprocess.Process.wait
#
# If it starts hanging, will need to switch to the following code. The problem is that no data
# will be seen until it's done and if it hangs for example there will be no debug info.
# out, err = await p.communicate()
# return _RunOutput(p.returncode, out, err)
A_ : Union[str, Any] = []
A_ : str = []
def tee(lowerCamelCase__ : Optional[int] , lowerCamelCase__ : Optional[Any] , lowerCamelCase__ : int , lowerCamelCase__ : Union[str, Any]="" ):
A_ : int = line.decode('''utf-8''' ).rstrip()
sink.append(lowerCamelCase__ )
if not quiet:
print(lowerCamelCase__ , lowerCamelCase__ , file=lowerCamelCase__ )
# XXX: the timeout doesn't seem to make any difference here
await asyncio.wait(
[
asyncio.create_task(_read_stream(p.stdout , lambda lowerCamelCase__ : tee(lowerCamelCase__ , lowerCamelCase__ , sys.stdout , label='''stdout:''' ) ) ),
asyncio.create_task(_read_stream(p.stderr , lambda lowerCamelCase__ : tee(lowerCamelCase__ , lowerCamelCase__ , sys.stderr , label='''stderr:''' ) ) ),
] , timeout=lowerCamelCase__ , )
return _RunOutput(await p.wait() , lowerCamelCase__ , lowerCamelCase__ )
def snake_case__ ( lowerCamelCase__ : Dict , lowerCamelCase__ : List[str]=None , lowerCamelCase__ : Optional[int]=None , lowerCamelCase__ : str=1_8_0 , lowerCamelCase__ : Union[str, Any]=False , lowerCamelCase__ : List[str]=True ) -> _RunOutput:
A_ : str = asyncio.get_event_loop()
A_ : Dict = loop.run_until_complete(
_stream_subprocess(lowerCamelCase__ , env=lowerCamelCase__ , stdin=lowerCamelCase__ , timeout=lowerCamelCase__ , quiet=lowerCamelCase__ , echo=lowerCamelCase__ ) )
A_ : Optional[int] = ''' '''.join(lowerCamelCase__ )
if result.returncode > 0:
A_ : Tuple = '''\n'''.join(result.stderr )
raise RuntimeError(
f'\'{cmd_str}\' failed with returncode {result.returncode}\n\n'
f'The combined stderr from workers follows:\n{stderr}' )
return result
class UpperCamelCase_ (a__ ):
"""simple docstring"""
pass
def snake_case__ ( lowerCamelCase__ : List[str] , lowerCamelCase__ : Optional[Any]=False ) -> int:
try:
A_ : List[Any] = subprocess.check_output(lowerCamelCase__ , stderr=subprocess.STDOUT )
if return_stdout:
if hasattr(lowerCamelCase__ , '''decode''' ):
A_ : Any = output.decode('''utf-8''' )
return output
except subprocess.CalledProcessError as e:
raise SubprocessCallException(
f'Command `{" ".join(lowerCamelCase__ )}` failed with the following error:\n\n{e.output.decode()}' ) from e
| 4 |
'''simple docstring'''
import unittest
from queue import Empty
from threading import Thread
from transformers import AutoTokenizer, TextIteratorStreamer, TextStreamer, is_torch_available
from transformers.testing_utils import CaptureStdout, require_torch, torch_device
from ..test_modeling_common import ids_tensor
if is_torch_available():
import torch
from transformers import AutoModelForCausalLM
@require_torch
class UpperCamelCase_ (unittest.TestCase ):
"""simple docstring"""
def _a ( self : Dict ):
"""simple docstring"""
A_ : Optional[int] = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' )
A_ : Tuple = AutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ).to(_lowerCamelCase )
A_ : Dict = -1
A_ : List[str] = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(_lowerCamelCase )
A_ : Any = model.generate(_lowerCamelCase , max_new_tokens=10 , do_sample=_lowerCamelCase )
A_ : List[str] = tokenizer.decode(greedy_ids[0] )
with CaptureStdout() as cs:
A_ : List[str] = TextStreamer(_lowerCamelCase )
model.generate(_lowerCamelCase , max_new_tokens=10 , do_sample=_lowerCamelCase , streamer=_lowerCamelCase )
# The greedy text should be printed to stdout, except for the final "\n" in the streamer
A_ : Dict = cs.out[:-1]
self.assertEqual(_lowerCamelCase , _lowerCamelCase )
def _a ( self : Tuple ):
"""simple docstring"""
A_ : Optional[int] = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' )
A_ : List[str] = AutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ).to(_lowerCamelCase )
A_ : Dict = -1
A_ : List[str] = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(_lowerCamelCase )
A_ : Optional[int] = model.generate(_lowerCamelCase , max_new_tokens=10 , do_sample=_lowerCamelCase )
A_ : str = tokenizer.decode(greedy_ids[0] )
A_ : int = TextIteratorStreamer(_lowerCamelCase )
A_ : List[Any] = {'''input_ids''': input_ids, '''max_new_tokens''': 10, '''do_sample''': False, '''streamer''': streamer}
A_ : List[Any] = Thread(target=model.generate , kwargs=_lowerCamelCase )
thread.start()
A_ : List[Any] = ''''''
for new_text in streamer:
streamer_text += new_text
self.assertEqual(_lowerCamelCase , _lowerCamelCase )
def _a ( self : int ):
"""simple docstring"""
A_ : List[str] = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' )
A_ : List[Any] = AutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ).to(_lowerCamelCase )
A_ : List[str] = -1
A_ : Any = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(_lowerCamelCase )
A_ : Tuple = model.generate(_lowerCamelCase , max_new_tokens=10 , do_sample=_lowerCamelCase )
A_ : Tuple = greedy_ids[:, input_ids.shape[1] :]
A_ : Tuple = tokenizer.decode(new_greedy_ids[0] )
with CaptureStdout() as cs:
A_ : Any = TextStreamer(_lowerCamelCase , skip_prompt=_lowerCamelCase )
model.generate(_lowerCamelCase , max_new_tokens=10 , do_sample=_lowerCamelCase , streamer=_lowerCamelCase )
# The greedy text should be printed to stdout, except for the final "\n" in the streamer
A_ : Any = cs.out[:-1]
self.assertEqual(_lowerCamelCase , _lowerCamelCase )
def _a ( self : List[Any] ):
"""simple docstring"""
A_ : List[Any] = AutoTokenizer.from_pretrained('''distilgpt2''' )
A_ : Tuple = AutoModelForCausalLM.from_pretrained('''distilgpt2''' ).to(_lowerCamelCase )
A_ : List[Any] = -1
A_ : Union[str, Any] = torch.ones((1, 5) , device=_lowerCamelCase ).long() * model.config.bos_token_id
with CaptureStdout() as cs:
A_ : List[Any] = TextStreamer(_lowerCamelCase , skip_special_tokens=_lowerCamelCase )
model.generate(_lowerCamelCase , max_new_tokens=1 , do_sample=_lowerCamelCase , streamer=_lowerCamelCase )
# The prompt contains a special token, so the streamer should not print it. As such, the output text, when
# re-tokenized, must only contain one token
A_ : List[str] = cs.out[:-1] # Remove the final "\n"
A_ : List[Any] = tokenizer(_lowerCamelCase , return_tensors='''pt''' )
self.assertEqual(streamer_text_tokenized.input_ids.shape , (1, 1) )
def _a ( self : Union[str, Any] ):
"""simple docstring"""
A_ : str = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' )
A_ : str = AutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ).to(_lowerCamelCase )
A_ : Union[str, Any] = -1
A_ : Union[str, Any] = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(_lowerCamelCase )
A_ : List[str] = TextIteratorStreamer(_lowerCamelCase , timeout=0.0_01 )
A_ : str = {'''input_ids''': input_ids, '''max_new_tokens''': 10, '''do_sample''': False, '''streamer''': streamer}
A_ : List[str] = Thread(target=model.generate , kwargs=_lowerCamelCase )
thread.start()
# The streamer will timeout after 0.001 seconds, so an exception will be raised
with self.assertRaises(_lowerCamelCase ):
A_ : str = ''''''
for new_text in streamer:
streamer_text += new_text
| 4 | 1 |
'''simple docstring'''
from ..utils import DummyObject, requires_backends
class UpperCamelCase_ (metaclass=a__ ):
"""simple docstring"""
_lowerCAmelCase = ['onnx']
def __init__( self : List[Any] , *_lowerCamelCase : List[Any] , **_lowerCamelCase : List[str] ):
"""simple docstring"""
requires_backends(self , ['''onnx'''] )
@classmethod
def _a ( cls : str , *_lowerCamelCase : Tuple , **_lowerCamelCase : Tuple ):
"""simple docstring"""
requires_backends(cls , ['''onnx'''] )
@classmethod
def _a ( cls : Optional[Any] , *_lowerCamelCase : str , **_lowerCamelCase : Optional[int] ):
"""simple docstring"""
requires_backends(cls , ['''onnx'''] )
| 4 |
'''simple docstring'''
import heapq
def snake_case__ ( lowerCamelCase__ : dict ) -> set[int]:
A_ : list[list] = []
# for each node and his adjacency list add them and the rank of the node to queue
# using heapq module the queue will be filled like a Priority Queue
# heapq works with a min priority queue, so I used -1*len(v) to build it
for key, value in graph.items():
# O(log(n))
heapq.heappush(lowerCamelCase__ , [-1 * len(lowerCamelCase__ ), (key, value)] )
# chosen_vertices = set of chosen vertices
A_ : str = set()
# while queue isn't empty and there are still edges
# (queue[0][0] is the rank of the node with max rank)
while queue and queue[0][0] != 0:
# extract vertex with max rank from queue and add it to chosen_vertices
A_ : Tuple = heapq.heappop(lowerCamelCase__ )[1][0]
chosen_vertices.add(lowerCamelCase__ )
# Remove all arcs adjacent to argmax
for elem in queue:
# if v haven't adjacent node, skip
if elem[0] == 0:
continue
# if argmax is reachable from elem
# remove argmax from elem's adjacent list and update his rank
if argmax in elem[1][1]:
A_ : List[str] = elem[1][1].index(lowerCamelCase__ )
del elem[1][1][index]
elem[0] += 1
# re-order the queue
heapq.heapify(lowerCamelCase__ )
return chosen_vertices
if __name__ == "__main__":
import doctest
doctest.testmod()
snake_case__ = {0: [1, 3], 1: [0, 3], 2: [0, 3, 4], 3: [0, 1, 2], 4: [2, 3]}
print(F'Minimum vertex cover:\n{greedy_min_vertex_cover(graph)}')
| 4 | 1 |
'''simple docstring'''
from __future__ import annotations
snake_case__ = {
"""A""": ["""B""", """C""", """E"""],
"""B""": ["""A""", """D""", """E"""],
"""C""": ["""A""", """F""", """G"""],
"""D""": ["""B"""],
"""E""": ["""A""", """B""", """D"""],
"""F""": ["""C"""],
"""G""": ["""C"""],
}
class UpperCamelCase_ :
"""simple docstring"""
def __init__( self : Tuple , _lowerCamelCase : dict[str, list[str]] , _lowerCamelCase : str ):
"""simple docstring"""
A_ : List[Any] = graph
# mapping node to its parent in resulting breadth first tree
A_ : dict[str, str | None] = {}
A_ : str = source_vertex
def _a ( self : Optional[Any] ):
"""simple docstring"""
A_ : Tuple = {self.source_vertex}
A_ : Any = None
A_ : List[Any] = [self.source_vertex] # first in first out queue
while queue:
A_ : str = queue.pop(0 )
for adjacent_vertex in self.graph[vertex]:
if adjacent_vertex not in visited:
visited.add(_lowerCamelCase )
A_ : Any = vertex
queue.append(_lowerCamelCase )
def _a ( self : Optional[Any] , _lowerCamelCase : str ):
"""simple docstring"""
if target_vertex == self.source_vertex:
return self.source_vertex
A_ : Any = self.parent.get(_lowerCamelCase )
if target_vertex_parent is None:
A_ : Union[str, Any] = (
f'No path from vertex: {self.source_vertex} to vertex: {target_vertex}'
)
raise ValueError(_lowerCamelCase )
return self.shortest_path(_lowerCamelCase ) + f'->{target_vertex}'
if __name__ == "__main__":
snake_case__ = Graph(graph, """G""")
g.breath_first_search()
print(g.shortest_path("""D"""))
print(g.shortest_path("""G"""))
print(g.shortest_path("""Foo"""))
| 4 |
'''simple docstring'''
import argparse
from collections import OrderedDict
from pathlib import Path
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from torchvision.transforms import functional as F
from transformers import DetrImageProcessor, TableTransformerConfig, TableTransformerForObjectDetection
from transformers.utils import logging
logging.set_verbosity_info()
snake_case__ = logging.get_logger(__name__)
# here we list all keys to be renamed (original name on the left, our name on the right)
snake_case__ = []
for i in range(6):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append(
(F'transformer.encoder.layers.{i}.self_attn.out_proj.weight', F'encoder.layers.{i}.self_attn.out_proj.weight')
)
rename_keys.append(
(F'transformer.encoder.layers.{i}.self_attn.out_proj.bias', F'encoder.layers.{i}.self_attn.out_proj.bias')
)
rename_keys.append((F'transformer.encoder.layers.{i}.linear1.weight', F'encoder.layers.{i}.fc1.weight'))
rename_keys.append((F'transformer.encoder.layers.{i}.linear1.bias', F'encoder.layers.{i}.fc1.bias'))
rename_keys.append((F'transformer.encoder.layers.{i}.linear2.weight', F'encoder.layers.{i}.fc2.weight'))
rename_keys.append((F'transformer.encoder.layers.{i}.linear2.bias', F'encoder.layers.{i}.fc2.bias'))
rename_keys.append(
(F'transformer.encoder.layers.{i}.norm1.weight', F'encoder.layers.{i}.self_attn_layer_norm.weight')
)
rename_keys.append((F'transformer.encoder.layers.{i}.norm1.bias', F'encoder.layers.{i}.self_attn_layer_norm.bias'))
rename_keys.append((F'transformer.encoder.layers.{i}.norm2.weight', F'encoder.layers.{i}.final_layer_norm.weight'))
rename_keys.append((F'transformer.encoder.layers.{i}.norm2.bias', F'encoder.layers.{i}.final_layer_norm.bias'))
# decoder layers: 2 times output projection, 2 feedforward neural networks and 3 layernorms
rename_keys.append(
(F'transformer.decoder.layers.{i}.self_attn.out_proj.weight', F'decoder.layers.{i}.self_attn.out_proj.weight')
)
rename_keys.append(
(F'transformer.decoder.layers.{i}.self_attn.out_proj.bias', F'decoder.layers.{i}.self_attn.out_proj.bias')
)
rename_keys.append(
(
F'transformer.decoder.layers.{i}.multihead_attn.out_proj.weight',
F'decoder.layers.{i}.encoder_attn.out_proj.weight',
)
)
rename_keys.append(
(
F'transformer.decoder.layers.{i}.multihead_attn.out_proj.bias',
F'decoder.layers.{i}.encoder_attn.out_proj.bias',
)
)
rename_keys.append((F'transformer.decoder.layers.{i}.linear1.weight', F'decoder.layers.{i}.fc1.weight'))
rename_keys.append((F'transformer.decoder.layers.{i}.linear1.bias', F'decoder.layers.{i}.fc1.bias'))
rename_keys.append((F'transformer.decoder.layers.{i}.linear2.weight', F'decoder.layers.{i}.fc2.weight'))
rename_keys.append((F'transformer.decoder.layers.{i}.linear2.bias', F'decoder.layers.{i}.fc2.bias'))
rename_keys.append(
(F'transformer.decoder.layers.{i}.norm1.weight', F'decoder.layers.{i}.self_attn_layer_norm.weight')
)
rename_keys.append((F'transformer.decoder.layers.{i}.norm1.bias', F'decoder.layers.{i}.self_attn_layer_norm.bias'))
rename_keys.append(
(F'transformer.decoder.layers.{i}.norm2.weight', F'decoder.layers.{i}.encoder_attn_layer_norm.weight')
)
rename_keys.append(
(F'transformer.decoder.layers.{i}.norm2.bias', F'decoder.layers.{i}.encoder_attn_layer_norm.bias')
)
rename_keys.append((F'transformer.decoder.layers.{i}.norm3.weight', F'decoder.layers.{i}.final_layer_norm.weight'))
rename_keys.append((F'transformer.decoder.layers.{i}.norm3.bias', F'decoder.layers.{i}.final_layer_norm.bias'))
# convolutional projection + query embeddings + layernorm of encoder + layernorm of decoder + class and bounding box heads
rename_keys.extend(
[
("""input_proj.weight""", """input_projection.weight"""),
("""input_proj.bias""", """input_projection.bias"""),
("""query_embed.weight""", """query_position_embeddings.weight"""),
("""transformer.encoder.norm.weight""", """encoder.layernorm.weight"""),
("""transformer.encoder.norm.bias""", """encoder.layernorm.bias"""),
("""transformer.decoder.norm.weight""", """decoder.layernorm.weight"""),
("""transformer.decoder.norm.bias""", """decoder.layernorm.bias"""),
("""class_embed.weight""", """class_labels_classifier.weight"""),
("""class_embed.bias""", """class_labels_classifier.bias"""),
("""bbox_embed.layers.0.weight""", """bbox_predictor.layers.0.weight"""),
("""bbox_embed.layers.0.bias""", """bbox_predictor.layers.0.bias"""),
("""bbox_embed.layers.1.weight""", """bbox_predictor.layers.1.weight"""),
("""bbox_embed.layers.1.bias""", """bbox_predictor.layers.1.bias"""),
("""bbox_embed.layers.2.weight""", """bbox_predictor.layers.2.weight"""),
("""bbox_embed.layers.2.bias""", """bbox_predictor.layers.2.bias"""),
]
)
def snake_case__ ( lowerCamelCase__ : List[Any] , lowerCamelCase__ : List[Any] , lowerCamelCase__ : List[Any] ) -> Optional[Any]:
A_ : Tuple = state_dict.pop(lowerCamelCase__ )
A_ : Optional[Any] = val
def snake_case__ ( lowerCamelCase__ : Dict ) -> Any:
A_ : int = OrderedDict()
for key, value in state_dict.items():
if "backbone.0.body" in key:
A_ : int = key.replace('''backbone.0.body''' , '''backbone.conv_encoder.model''' )
A_ : List[str] = value
else:
A_ : Optional[int] = value
return new_state_dict
def snake_case__ ( lowerCamelCase__ : Union[str, Any] ) -> Optional[Any]:
A_ : Any = ''''''
# first: transformer encoder
for i in range(6 ):
# read in weights + bias of input projection layer (in PyTorch's MultiHeadAttention, this is a single matrix + bias)
A_ : Tuple = state_dict.pop(f'{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_weight' )
A_ : Dict = state_dict.pop(f'{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_bias' )
# next, add query, keys and values (in that order) to the state dict
A_ : str = in_proj_weight[:2_5_6, :]
A_ : Optional[Any] = in_proj_bias[:2_5_6]
A_ : Dict = in_proj_weight[2_5_6:5_1_2, :]
A_ : Tuple = in_proj_bias[2_5_6:5_1_2]
A_ : Tuple = in_proj_weight[-2_5_6:, :]
A_ : Optional[int] = in_proj_bias[-2_5_6:]
# next: transformer decoder (which is a bit more complex because it also includes cross-attention)
for i in range(6 ):
# read in weights + bias of input projection layer of self-attention
A_ : Union[str, Any] = state_dict.pop(f'{prefix}transformer.decoder.layers.{i}.self_attn.in_proj_weight' )
A_ : Dict = state_dict.pop(f'{prefix}transformer.decoder.layers.{i}.self_attn.in_proj_bias' )
# next, add query, keys and values (in that order) to the state dict
A_ : List[str] = in_proj_weight[:2_5_6, :]
A_ : int = in_proj_bias[:2_5_6]
A_ : Any = in_proj_weight[2_5_6:5_1_2, :]
A_ : List[str] = in_proj_bias[2_5_6:5_1_2]
A_ : Union[str, Any] = in_proj_weight[-2_5_6:, :]
A_ : Optional[Any] = in_proj_bias[-2_5_6:]
# read in weights + bias of input projection layer of cross-attention
A_ : Tuple = state_dict.pop(
f'{prefix}transformer.decoder.layers.{i}.multihead_attn.in_proj_weight' )
A_ : Optional[Any] = state_dict.pop(f'{prefix}transformer.decoder.layers.{i}.multihead_attn.in_proj_bias' )
# next, add query, keys and values (in that order) of cross-attention to the state dict
A_ : Dict = in_proj_weight_cross_attn[:2_5_6, :]
A_ : Tuple = in_proj_bias_cross_attn[:2_5_6]
A_ : int = in_proj_weight_cross_attn[2_5_6:5_1_2, :]
A_ : List[str] = in_proj_bias_cross_attn[2_5_6:5_1_2]
A_ : Any = in_proj_weight_cross_attn[-2_5_6:, :]
A_ : Any = in_proj_bias_cross_attn[-2_5_6:]
def snake_case__ ( lowerCamelCase__ : List[str] , lowerCamelCase__ : Tuple ) -> Dict:
A_ ,A_ : int = image.size
A_ : Tuple = max(lowerCamelCase__ , lowerCamelCase__ )
A_ : Optional[Any] = 8_0_0 if '''detection''' in checkpoint_url else 1_0_0_0
A_ : Union[str, Any] = target_max_size / current_max_size
A_ : Any = image.resize((int(round(scale * width ) ), int(round(scale * height ) )) )
return resized_image
def snake_case__ ( lowerCamelCase__ : Tuple ) -> str:
A_ : Any = F.to_tensor(lowerCamelCase__ )
A_ : Optional[Any] = F.normalize(lowerCamelCase__ , mean=[0.485, 0.456, 0.406] , std=[0.229, 0.224, 0.225] )
return image
@torch.no_grad()
def snake_case__ ( lowerCamelCase__ : List[Any] , lowerCamelCase__ : int , lowerCamelCase__ : int ) -> str:
logger.info('''Converting model...''' )
# load original state dict
A_ : Tuple = torch.hub.load_state_dict_from_url(lowerCamelCase__ , map_location='''cpu''' )
# rename keys
for src, dest in rename_keys:
rename_key(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
A_ : str = rename_backbone_keys(lowerCamelCase__ )
# query, key and value matrices need special treatment
read_in_q_k_v(lowerCamelCase__ )
# important: we need to prepend a prefix to each of the base model keys as the head models use different attributes for them
A_ : List[Any] = '''model.'''
for key in state_dict.copy().keys():
if not key.startswith('''class_labels_classifier''' ) and not key.startswith('''bbox_predictor''' ):
A_ : List[Any] = state_dict.pop(lowerCamelCase__ )
A_ : str = val
# create HuggingFace model and load state dict
A_ : Union[str, Any] = TableTransformerConfig(
backbone='''resnet18''' , mask_loss_coefficient=1 , dice_loss_coefficient=1 , ce_loss_coefficient=1 , bbox_loss_coefficient=5 , giou_loss_coefficient=2 , eos_coefficient=0.4 , class_cost=1 , bbox_cost=5 , giou_cost=2 , )
if "detection" in checkpoint_url:
A_ : Dict = 1_5
A_ : Dict = 2
A_ : int = {0: '''table''', 1: '''table rotated'''}
A_ : List[str] = idalabel
A_ : Optional[int] = {v: k for k, v in idalabel.items()}
else:
A_ : Union[str, Any] = 1_2_5
A_ : Optional[Any] = 6
A_ : Optional[Any] = {
0: '''table''',
1: '''table column''',
2: '''table row''',
3: '''table column header''',
4: '''table projected row header''',
5: '''table spanning cell''',
}
A_ : int = idalabel
A_ : Tuple = {v: k for k, v in idalabel.items()}
A_ : Optional[Any] = DetrImageProcessor(
format='''coco_detection''' , max_size=8_0_0 if '''detection''' in checkpoint_url else 1_0_0_0 )
A_ : int = TableTransformerForObjectDetection(lowerCamelCase__ )
model.load_state_dict(lowerCamelCase__ )
model.eval()
# verify our conversion
A_ : Optional[int] = '''example_pdf.png''' if '''detection''' in checkpoint_url else '''example_table.png'''
A_ : Union[str, Any] = hf_hub_download(repo_id='''nielsr/example-pdf''' , repo_type='''dataset''' , filename=lowerCamelCase__ )
A_ : Tuple = Image.open(lowerCamelCase__ ).convert('''RGB''' )
A_ : int = normalize(resize(lowerCamelCase__ , lowerCamelCase__ ) ).unsqueeze(0 )
A_ : str = model(lowerCamelCase__ )
if "detection" in checkpoint_url:
A_ : str = (1, 1_5, 3)
A_ : int = torch.tensor(
[[-6.7897, -16.9985, 6.7937], [-8.0186, -22.2192, 6.9677], [-7.3117, -21.0708, 7.4055]] )
A_ : Tuple = torch.tensor([[0.4867, 0.1767, 0.6732], [0.6718, 0.4479, 0.3830], [0.4716, 0.1760, 0.6364]] )
else:
A_ : Optional[int] = (1, 1_2_5, 7)
A_ : Dict = torch.tensor(
[[-18.1430, -8.3214, 4.8274], [-18.4685, -7.1361, -4.2667], [-26.3693, -9.3429, -4.9962]] )
A_ : Any = torch.tensor([[0.4983, 0.5595, 0.9440], [0.4916, 0.6315, 0.5954], [0.6108, 0.8637, 0.1135]] )
assert outputs.logits.shape == expected_shape
assert torch.allclose(outputs.logits[0, :3, :3] , lowerCamelCase__ , atol=1e-4 )
assert torch.allclose(outputs.pred_boxes[0, :3, :3] , lowerCamelCase__ , atol=1e-4 )
print('''Looks ok!''' )
if pytorch_dump_folder_path is not None:
# Save model and image processor
logger.info(f'Saving PyTorch model and image processor to {pytorch_dump_folder_path}...' )
Path(lowerCamelCase__ ).mkdir(exist_ok=lowerCamelCase__ )
model.save_pretrained(lowerCamelCase__ )
image_processor.save_pretrained(lowerCamelCase__ )
if push_to_hub:
# Push model to HF hub
logger.info('''Pushing model to the hub...''' )
A_ : List[Any] = (
'''microsoft/table-transformer-detection'''
if '''detection''' in checkpoint_url
else '''microsoft/table-transformer-structure-recognition'''
)
model.push_to_hub(lowerCamelCase__ )
image_processor.push_to_hub(lowerCamelCase__ )
if __name__ == "__main__":
snake_case__ = argparse.ArgumentParser()
parser.add_argument(
"""--checkpoint_url""",
default="""https://pubtables1m.blob.core.windows.net/model/pubtables1m_detection_detr_r18.pth""",
type=str,
choices=[
"""https://pubtables1m.blob.core.windows.net/model/pubtables1m_detection_detr_r18.pth""",
"""https://pubtables1m.blob.core.windows.net/model/pubtables1m_structure_detr_r18.pth""",
],
help="""URL of the Table Transformer checkpoint you'd like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the folder to output PyTorch model."""
)
parser.add_argument(
"""--push_to_hub""", action="""store_true""", help="""Whether or not to push the converted model to the 🤗 hub."""
)
snake_case__ = parser.parse_args()
convert_table_transformer_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub)
| 4 | 1 |
'''simple docstring'''
import inspect
import unittest
from math import floor
from transformers import CvtConfig
from transformers.file_utils import cached_property, is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import CvtForImageClassification, CvtModel
from transformers.models.cvt.modeling_cvt import CVT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class UpperCamelCase_ (a__ ):
"""simple docstring"""
def _a ( self : List[Any] ):
"""simple docstring"""
A_ : List[str] = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(_lowerCamelCase , '''embed_dim''' ) )
self.parent.assertTrue(hasattr(_lowerCamelCase , '''num_heads''' ) )
class UpperCamelCase_ :
"""simple docstring"""
def __init__( self : int , _lowerCamelCase : int , _lowerCamelCase : List[str]=13 , _lowerCamelCase : str=64 , _lowerCamelCase : str=3 , _lowerCamelCase : Optional[Any]=[16, 48, 96] , _lowerCamelCase : Dict=[1, 3, 6] , _lowerCamelCase : Tuple=[1, 2, 10] , _lowerCamelCase : List[str]=[7, 3, 3] , _lowerCamelCase : Any=[4, 2, 2] , _lowerCamelCase : str=[2, 1, 1] , _lowerCamelCase : List[str]=[2, 2, 2] , _lowerCamelCase : List[Any]=[False, False, True] , _lowerCamelCase : Any=[0.0, 0.0, 0.0] , _lowerCamelCase : int=0.02 , _lowerCamelCase : Tuple=1E-12 , _lowerCamelCase : Union[str, Any]=True , _lowerCamelCase : Optional[Any]=True , _lowerCamelCase : int=2 , ):
"""simple docstring"""
A_ : Any = parent
A_ : List[Any] = batch_size
A_ : List[str] = image_size
A_ : str = patch_sizes
A_ : Union[str, Any] = patch_stride
A_ : int = patch_padding
A_ : int = is_training
A_ : List[str] = use_labels
A_ : List[str] = num_labels
A_ : List[Any] = num_channels
A_ : str = embed_dim
A_ : Optional[int] = num_heads
A_ : Optional[Any] = stride_kv
A_ : str = depth
A_ : Any = cls_token
A_ : int = attention_drop_rate
A_ : str = initializer_range
A_ : int = layer_norm_eps
def _a ( self : Any ):
"""simple docstring"""
A_ : str = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
A_ : Union[str, Any] = None
if self.use_labels:
A_ : Optional[int] = ids_tensor([self.batch_size] , self.num_labels )
A_ : Optional[Any] = self.get_config()
return config, pixel_values, labels
def _a ( self : List[str] ):
"""simple docstring"""
return CvtConfig(
image_size=self.image_size , num_labels=self.num_labels , num_channels=self.num_channels , embed_dim=self.embed_dim , num_heads=self.num_heads , patch_sizes=self.patch_sizes , patch_padding=self.patch_padding , patch_stride=self.patch_stride , stride_kv=self.stride_kv , depth=self.depth , cls_token=self.cls_token , attention_drop_rate=self.attention_drop_rate , initializer_range=self.initializer_range , )
def _a ( self : Tuple , _lowerCamelCase : List[Any] , _lowerCamelCase : str , _lowerCamelCase : Optional[Any] ):
"""simple docstring"""
A_ : List[str] = CvtModel(config=_lowerCamelCase )
model.to(_lowerCamelCase )
model.eval()
A_ : Dict = model(_lowerCamelCase )
A_ : Union[str, Any] = (self.image_size, self.image_size)
A_ ,A_ : Union[str, Any] = image_size[0], image_size[1]
for i in range(len(self.depth ) ):
A_ : Optional[Any] = floor(((height + 2 * self.patch_padding[i] - self.patch_sizes[i]) / self.patch_stride[i]) + 1 )
A_ : List[str] = floor(((width + 2 * self.patch_padding[i] - self.patch_sizes[i]) / self.patch_stride[i]) + 1 )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.embed_dim[-1], height, width) )
def _a ( self : List[str] , _lowerCamelCase : str , _lowerCamelCase : Dict , _lowerCamelCase : List[Any] ):
"""simple docstring"""
A_ : Optional[Any] = self.num_labels
A_ : Tuple = CvtForImageClassification(_lowerCamelCase )
model.to(_lowerCamelCase )
model.eval()
A_ : str = model(_lowerCamelCase , labels=_lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _a ( self : Dict ):
"""simple docstring"""
A_ : int = self.prepare_config_and_inputs()
A_ ,A_ ,A_ : Union[str, Any] = config_and_inputs
A_ : List[str] = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class UpperCamelCase_ (a__, a__, unittest.TestCase ):
"""simple docstring"""
_lowerCAmelCase = (CvtModel, CvtForImageClassification) if is_torch_available() else ()
_lowerCAmelCase = (
{'feature-extraction': CvtModel, 'image-classification': CvtForImageClassification}
if is_torch_available()
else {}
)
_lowerCAmelCase = False
_lowerCAmelCase = False
_lowerCAmelCase = False
_lowerCAmelCase = False
_lowerCAmelCase = False
def _a ( self : Optional[int] ):
"""simple docstring"""
A_ : List[Any] = CvtModelTester(self )
A_ : Union[str, Any] = ConfigTester(self , config_class=_lowerCamelCase , has_text_modality=_lowerCamelCase , hidden_size=37 )
def _a ( self : List[Any] ):
"""simple docstring"""
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def _a ( self : Optional[int] ):
"""simple docstring"""
return
@unittest.skip(reason='''Cvt does not output attentions''' )
def _a ( self : Union[str, Any] ):
"""simple docstring"""
pass
@unittest.skip(reason='''Cvt does not use inputs_embeds''' )
def _a ( self : Optional[Any] ):
"""simple docstring"""
pass
@unittest.skip(reason='''Cvt does not support input and output embeddings''' )
def _a ( self : Optional[int] ):
"""simple docstring"""
pass
def _a ( self : str ):
"""simple docstring"""
A_ ,A_ : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A_ : Union[str, Any] = model_class(_lowerCamelCase )
A_ : Union[str, Any] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
A_ : Dict = [*signature.parameters.keys()]
A_ : int = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , _lowerCamelCase )
def _a ( self : int ):
"""simple docstring"""
A_ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_lowerCamelCase )
def _a ( self : List[str] ):
"""simple docstring"""
def check_hidden_states_output(_lowerCamelCase : Any , _lowerCamelCase : List[Any] , _lowerCamelCase : Any ):
A_ : Optional[int] = model_class(_lowerCamelCase )
model.to(_lowerCamelCase )
model.eval()
with torch.no_grad():
A_ : Tuple = model(**self._prepare_for_class(_lowerCamelCase , _lowerCamelCase ) )
A_ : int = outputs.hidden_states
A_ : Union[str, Any] = len(self.model_tester.depth )
self.assertEqual(len(_lowerCamelCase ) , _lowerCamelCase )
# verify the first hidden states (first block)
self.assertListEqual(
list(hidden_states[0].shape[-3:] ) , [
self.model_tester.embed_dim[0],
self.model_tester.image_size // 4,
self.model_tester.image_size // 4,
] , )
A_ ,A_ : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A_ : Any = True
check_hidden_states_output(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
A_ : Union[str, Any] = True
check_hidden_states_output(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
def _a ( self : Union[str, Any] ):
"""simple docstring"""
A_ : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_lowerCamelCase )
@unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''' )
def _a ( self : int ):
"""simple docstring"""
pass
@slow
def _a ( self : str ):
"""simple docstring"""
for model_name in CVT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
A_ : int = CvtModel.from_pretrained(_lowerCamelCase )
self.assertIsNotNone(_lowerCamelCase )
def snake_case__ ( ) -> Any:
A_ : List[str] = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_torch
@require_vision
class UpperCamelCase_ (unittest.TestCase ):
"""simple docstring"""
@cached_property
def _a ( self : List[str] ):
"""simple docstring"""
return AutoImageProcessor.from_pretrained(CVT_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
@slow
def _a ( self : str ):
"""simple docstring"""
A_ : int = CvtForImageClassification.from_pretrained(CVT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ).to(_lowerCamelCase )
A_ : Union[str, Any] = self.default_image_processor
A_ : Union[str, Any] = prepare_img()
A_ : str = image_processor(images=_lowerCamelCase , return_tensors='''pt''' ).to(_lowerCamelCase )
# forward pass
with torch.no_grad():
A_ : str = model(**_lowerCamelCase )
# verify the logits
A_ : Dict = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape , _lowerCamelCase )
A_ : Tuple = torch.tensor([0.92_85, 0.90_15, -0.31_50] ).to(_lowerCamelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , _lowerCamelCase , atol=1E-4 ) )
| 4 |
'''simple docstring'''
import logging
import os
from dataclasses import dataclass
from typing import List, Optional, Union
import tqdm
from filelock import FileLock
from transformers import (
BartTokenizer,
BartTokenizerFast,
DataProcessor,
PreTrainedTokenizer,
RobertaTokenizer,
RobertaTokenizerFast,
XLMRobertaTokenizer,
is_tf_available,
is_torch_available,
)
snake_case__ = logging.getLogger(__name__)
@dataclass(frozen=a__ )
class UpperCamelCase_ :
"""simple docstring"""
_lowerCAmelCase = 42
_lowerCAmelCase = 42
_lowerCAmelCase = None
_lowerCAmelCase = None
_lowerCAmelCase = None
@dataclass(frozen=a__ )
class UpperCamelCase_ :
"""simple docstring"""
_lowerCAmelCase = 42
_lowerCAmelCase = None
_lowerCAmelCase = None
_lowerCAmelCase = None
_lowerCAmelCase = None
if is_torch_available():
import torch
from torch.utils.data import Dataset
class UpperCamelCase_ (a__ ):
"""simple docstring"""
_lowerCAmelCase = 42
def __init__( self : Optional[int] , _lowerCamelCase : str , _lowerCamelCase : PreTrainedTokenizer , _lowerCamelCase : str , _lowerCamelCase : Optional[int] = None , _lowerCamelCase : List[Any]=False , _lowerCamelCase : bool = False , ):
"""simple docstring"""
A_ : Optional[int] = hans_processors[task]()
A_ : int = os.path.join(
_lowerCamelCase , '''cached_{}_{}_{}_{}'''.format(
'''dev''' if evaluate else '''train''' , tokenizer.__class__.__name__ , str(_lowerCamelCase ) , _lowerCamelCase , ) , )
A_ : Dict = processor.get_labels()
if tokenizer.__class__ in (
RobertaTokenizer,
RobertaTokenizerFast,
XLMRobertaTokenizer,
BartTokenizer,
BartTokenizerFast,
):
# HACK(label indices are swapped in RoBERTa pretrained model)
A_ ,A_ : List[str] = label_list[2], label_list[1]
A_ : Optional[int] = label_list
# Make sure only the first process in distributed training processes the dataset,
# and the others will use the cache.
A_ : str = cached_features_file + '''.lock'''
with FileLock(_lowerCamelCase ):
if os.path.exists(_lowerCamelCase ) and not overwrite_cache:
logger.info(f'Loading features from cached file {cached_features_file}' )
A_ : List[str] = torch.load(_lowerCamelCase )
else:
logger.info(f'Creating features from dataset file at {data_dir}' )
A_ : Optional[int] = (
processor.get_dev_examples(_lowerCamelCase ) if evaluate else processor.get_train_examples(_lowerCamelCase )
)
logger.info('''Training examples: %s''' , len(_lowerCamelCase ) )
A_ : Optional[int] = hans_convert_examples_to_features(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
logger.info('''Saving features into cached file %s''' , _lowerCamelCase )
torch.save(self.features , _lowerCamelCase )
def __len__( self : List[str] ):
"""simple docstring"""
return len(self.features )
def __getitem__( self : List[str] , _lowerCamelCase : Optional[int] ):
"""simple docstring"""
return self.features[i]
def _a ( self : str ):
"""simple docstring"""
return self.label_list
if is_tf_available():
import tensorflow as tf
class UpperCamelCase_ :
"""simple docstring"""
_lowerCAmelCase = 42
def __init__( self : Optional[int] , _lowerCamelCase : str , _lowerCamelCase : PreTrainedTokenizer , _lowerCamelCase : str , _lowerCamelCase : Optional[int] = 128 , _lowerCamelCase : Dict=False , _lowerCamelCase : bool = False , ):
"""simple docstring"""
A_ : Optional[int] = hans_processors[task]()
A_ : Optional[int] = processor.get_labels()
if tokenizer.__class__ in (
RobertaTokenizer,
RobertaTokenizerFast,
XLMRobertaTokenizer,
BartTokenizer,
BartTokenizerFast,
):
# HACK(label indices are swapped in RoBERTa pretrained model)
A_ ,A_ : Union[str, Any] = label_list[2], label_list[1]
A_ : Tuple = label_list
A_ : Optional[int] = processor.get_dev_examples(_lowerCamelCase ) if evaluate else processor.get_train_examples(_lowerCamelCase )
A_ : Tuple = hans_convert_examples_to_features(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
def gen():
for ex_index, ex in tqdm.tqdm(enumerate(self.features ) , desc='''convert examples to features''' ):
if ex_index % 10000 == 0:
logger.info('''Writing example %d of %d''' % (ex_index, len(_lowerCamelCase )) )
yield (
{
"example_id": 0,
"input_ids": ex.input_ids,
"attention_mask": ex.attention_mask,
"token_type_ids": ex.token_type_ids,
},
ex.label,
)
A_ : List[Any] = tf.data.Dataset.from_generator(
_lowerCamelCase , (
{
'''example_id''': tf.intaa,
'''input_ids''': tf.intaa,
'''attention_mask''': tf.intaa,
'''token_type_ids''': tf.intaa,
},
tf.intaa,
) , (
{
'''example_id''': tf.TensorShape([] ),
'''input_ids''': tf.TensorShape([None, None] ),
'''attention_mask''': tf.TensorShape([None, None] ),
'''token_type_ids''': tf.TensorShape([None, None] ),
},
tf.TensorShape([] ),
) , )
def _a ( self : Any ):
"""simple docstring"""
return self.dataset
def __len__( self : Dict ):
"""simple docstring"""
return len(self.features )
def __getitem__( self : Optional[int] , _lowerCamelCase : List[str] ):
"""simple docstring"""
return self.features[i]
def _a ( self : Tuple ):
"""simple docstring"""
return self.label_list
class UpperCamelCase_ (a__ ):
"""simple docstring"""
def _a ( self : List[str] , _lowerCamelCase : Union[str, Any] ):
"""simple docstring"""
return self._create_examples(self._read_tsv(os.path.join(_lowerCamelCase , '''heuristics_train_set.txt''' ) ) , '''train''' )
def _a ( self : List[str] , _lowerCamelCase : Tuple ):
"""simple docstring"""
return self._create_examples(self._read_tsv(os.path.join(_lowerCamelCase , '''heuristics_evaluation_set.txt''' ) ) , '''dev''' )
def _a ( self : Any ):
"""simple docstring"""
return ["contradiction", "entailment", "neutral"]
def _a ( self : Optional[Any] , _lowerCamelCase : Optional[Any] , _lowerCamelCase : Any ):
"""simple docstring"""
A_ : Tuple = []
for i, line in enumerate(_lowerCamelCase ):
if i == 0:
continue
A_ : str = '''%s-%s''' % (set_type, line[0])
A_ : Optional[Any] = line[5]
A_ : Union[str, Any] = line[6]
A_ : List[str] = line[7][2:] if line[7].startswith('''ex''' ) else line[7]
A_ : str = line[0]
examples.append(InputExample(guid=_lowerCamelCase , text_a=_lowerCamelCase , text_b=_lowerCamelCase , label=_lowerCamelCase , pairID=_lowerCamelCase ) )
return examples
def snake_case__ ( lowerCamelCase__ : List[InputExample] , lowerCamelCase__ : List[str] , lowerCamelCase__ : int , lowerCamelCase__ : PreTrainedTokenizer , ) -> int:
A_ : Union[str, Any] = {label: i for i, label in enumerate(lowerCamelCase__ )}
A_ : Optional[Any] = []
for ex_index, example in tqdm.tqdm(enumerate(lowerCamelCase__ ) , desc='''convert examples to features''' ):
if ex_index % 1_0_0_0_0 == 0:
logger.info('''Writing example %d''' % (ex_index) )
A_ : Optional[int] = tokenizer(
example.text_a , example.text_b , add_special_tokens=lowerCamelCase__ , max_length=lowerCamelCase__ , padding='''max_length''' , truncation=lowerCamelCase__ , return_overflowing_tokens=lowerCamelCase__ , )
A_ : List[str] = label_map[example.label] if example.label in label_map else 0
A_ : Tuple = int(example.pairID )
features.append(InputFeatures(**lowerCamelCase__ , label=lowerCamelCase__ , pairID=lowerCamelCase__ ) )
for i, example in enumerate(examples[:5] ):
logger.info('''*** Example ***''' )
logger.info(f'guid: {example}' )
logger.info(f'features: {features[i]}' )
return features
snake_case__ = {
"""hans""": 3,
}
snake_case__ = {
"""hans""": HansProcessor,
}
| 4 | 1 |
'''simple docstring'''
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import re
from ..models.auto import AutoProcessor
from ..models.vision_encoder_decoder import VisionEncoderDecoderModel
from ..utils import is_vision_available
from .base import PipelineTool
if is_vision_available():
from PIL import Image
class UpperCamelCase_ (a__ ):
"""simple docstring"""
_lowerCAmelCase = 'naver-clova-ix/donut-base-finetuned-docvqa'
_lowerCAmelCase = (
'This is a tool that answers a question about an document (pdf). It takes an input named `document` which '
'should be the document containing the information, as well as a `question` that is the question about the '
'document. It returns a text that contains the answer to the question.'
)
_lowerCAmelCase = 'document_qa'
_lowerCAmelCase = AutoProcessor
_lowerCAmelCase = VisionEncoderDecoderModel
_lowerCAmelCase = ['image', 'text']
_lowerCAmelCase = ['text']
def __init__( self : Optional[int] , *_lowerCamelCase : Any , **_lowerCamelCase : Dict ):
"""simple docstring"""
if not is_vision_available():
raise ValueError('''Pillow must be installed to use the DocumentQuestionAnsweringTool.''' )
super().__init__(*_lowerCamelCase , **_lowerCamelCase )
def _a ( self : str , _lowerCamelCase : "Image" , _lowerCamelCase : str ):
"""simple docstring"""
A_ : Tuple = '''<s_docvqa><s_question>{user_input}</s_question><s_answer>'''
A_ : List[str] = task_prompt.replace('''{user_input}''' , _lowerCamelCase )
A_ : str = self.pre_processor.tokenizer(
_lowerCamelCase , add_special_tokens=_lowerCamelCase , return_tensors='''pt''' ).input_ids
A_ : Optional[Any] = self.pre_processor(_lowerCamelCase , return_tensors='''pt''' ).pixel_values
return {"decoder_input_ids": decoder_input_ids, "pixel_values": pixel_values}
def _a ( self : Optional[Any] , _lowerCamelCase : Union[str, Any] ):
"""simple docstring"""
return self.model.generate(
inputs['''pixel_values'''].to(self.device ) , decoder_input_ids=inputs['''decoder_input_ids'''].to(self.device ) , max_length=self.model.decoder.config.max_position_embeddings , early_stopping=_lowerCamelCase , pad_token_id=self.pre_processor.tokenizer.pad_token_id , eos_token_id=self.pre_processor.tokenizer.eos_token_id , use_cache=_lowerCamelCase , num_beams=1 , bad_words_ids=[[self.pre_processor.tokenizer.unk_token_id]] , return_dict_in_generate=_lowerCamelCase , ).sequences
def _a ( self : Optional[Any] , _lowerCamelCase : Optional[Any] ):
"""simple docstring"""
A_ : Optional[int] = self.pre_processor.batch_decode(_lowerCamelCase )[0]
A_ : int = sequence.replace(self.pre_processor.tokenizer.eos_token , '''''' )
A_ : Tuple = sequence.replace(self.pre_processor.tokenizer.pad_token , '''''' )
A_ : str = re.sub(R'''<.*?>''' , '''''' , _lowerCamelCase , count=1 ).strip() # remove first task start token
A_ : Any = self.pre_processor.tokenajson(_lowerCamelCase )
return sequence["answer"]
| 4 |
'''simple docstring'''
import io
import itertools
import json
from dataclasses import dataclass
from typing import Optional
import pyarrow as pa
import pyarrow.json as paj
import datasets
from datasets.table import table_cast
from datasets.utils.file_utils import readline
snake_case__ = datasets.utils.logging.get_logger(__name__)
@dataclass
class UpperCamelCase_ (datasets.BuilderConfig ):
"""simple docstring"""
_lowerCAmelCase = None
_lowerCAmelCase = "utf-8"
_lowerCAmelCase = None
_lowerCAmelCase = None
_lowerCAmelCase = True # deprecated
_lowerCAmelCase = None # deprecated
_lowerCAmelCase = 1_0 << 2_0 # 10MB
_lowerCAmelCase = None
class UpperCamelCase_ (datasets.ArrowBasedBuilder ):
"""simple docstring"""
_lowerCAmelCase = JsonConfig
def _a ( self : int ):
"""simple docstring"""
if self.config.block_size is not None:
logger.warning('''The JSON loader parameter `block_size` is deprecated. Please use `chunksize` instead''' )
A_ : List[Any] = self.config.block_size
if self.config.use_threads is not True:
logger.warning(
'''The JSON loader parameter `use_threads` is deprecated and doesn\'t have any effect anymore.''' )
if self.config.newlines_in_values is not None:
raise ValueError('''The JSON loader parameter `newlines_in_values` is no longer supported''' )
return datasets.DatasetInfo(features=self.config.features )
def _a ( self : Any , _lowerCamelCase : List[str] ):
"""simple docstring"""
if not self.config.data_files:
raise ValueError(f'At least one data file must be specified, but got data_files={self.config.data_files}' )
A_ : int = dl_manager.download_and_extract(self.config.data_files )
if isinstance(_lowerCamelCase , (str, list, tuple) ):
A_ : Union[str, Any] = data_files
if isinstance(_lowerCamelCase , _lowerCamelCase ):
A_ : List[str] = [files]
A_ : List[Any] = [dl_manager.iter_files(_lowerCamelCase ) for file in files]
return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={'''files''': files} )]
A_ : Tuple = []
for split_name, files in data_files.items():
if isinstance(_lowerCamelCase , _lowerCamelCase ):
A_ : int = [files]
A_ : Union[str, Any] = [dl_manager.iter_files(_lowerCamelCase ) for file in files]
splits.append(datasets.SplitGenerator(name=_lowerCamelCase , gen_kwargs={'''files''': files} ) )
return splits
def _a ( self : int , _lowerCamelCase : pa.Table ):
"""simple docstring"""
if self.config.features is not None:
# adding missing columns
for column_name in set(self.config.features ) - set(pa_table.column_names ):
A_ : Optional[int] = self.config.features.arrow_schema.field(_lowerCamelCase ).type
A_ : Optional[int] = pa_table.append_column(_lowerCamelCase , pa.array([None] * len(_lowerCamelCase ) , type=_lowerCamelCase ) )
# more expensive cast to support nested structures with keys in a different order
# allows str <-> int/float or str to Audio for example
A_ : str = table_cast(_lowerCamelCase , self.config.features.arrow_schema )
return pa_table
def _a ( self : List[str] , _lowerCamelCase : int ):
"""simple docstring"""
for file_idx, file in enumerate(itertools.chain.from_iterable(_lowerCamelCase ) ):
# If the file is one json object and if we need to look at the list of items in one specific field
if self.config.field is not None:
with open(_lowerCamelCase , encoding=self.config.encoding , errors=self.config.encoding_errors ) as f:
A_ : int = json.load(_lowerCamelCase )
# We keep only the field we are interested in
A_ : List[str] = dataset[self.config.field]
# We accept two format: a list of dicts or a dict of lists
if isinstance(_lowerCamelCase , (list, tuple) ):
A_ : int = set().union(*[row.keys() for row in dataset] )
A_ : List[str] = {col: [row.get(_lowerCamelCase ) for row in dataset] for col in keys}
else:
A_ : Tuple = dataset
A_ : Dict = pa.Table.from_pydict(_lowerCamelCase )
yield file_idx, self._cast_table(_lowerCamelCase )
# If the file has one json object per line
else:
with open(_lowerCamelCase , '''rb''' ) as f:
A_ : int = 0
# Use block_size equal to the chunk size divided by 32 to leverage multithreading
# Set a default minimum value of 16kB if the chunk size is really small
A_ : int = max(self.config.chunksize // 32 , 16 << 10 )
A_ : int = (
self.config.encoding_errors if self.config.encoding_errors is not None else '''strict'''
)
while True:
A_ : Any = f.read(self.config.chunksize )
if not batch:
break
# Finish current line
try:
batch += f.readline()
except (AttributeError, io.UnsupportedOperation):
batch += readline(_lowerCamelCase )
# PyArrow only accepts utf-8 encoded bytes
if self.config.encoding != "utf-8":
A_ : Optional[Any] = batch.decode(self.config.encoding , errors=_lowerCamelCase ).encode('''utf-8''' )
try:
while True:
try:
A_ : List[Any] = paj.read_json(
io.BytesIO(_lowerCamelCase ) , read_options=paj.ReadOptions(block_size=_lowerCamelCase ) )
break
except (pa.ArrowInvalid, pa.ArrowNotImplementedError) as e:
if (
isinstance(_lowerCamelCase , pa.ArrowInvalid )
and "straddling" not in str(_lowerCamelCase )
or block_size > len(_lowerCamelCase )
):
raise
else:
# Increase the block size in case it was too small.
# The block size will be reset for the next file.
logger.debug(
f'Batch of {len(_lowerCamelCase )} bytes couldn\'t be parsed with block_size={block_size}. Retrying with block_size={block_size * 2}.' )
block_size *= 2
except pa.ArrowInvalid as e:
try:
with open(
_lowerCamelCase , encoding=self.config.encoding , errors=self.config.encoding_errors ) as f:
A_ : Optional[Any] = json.load(_lowerCamelCase )
except json.JSONDecodeError:
logger.error(f'Failed to read file \'{file}\' with error {type(_lowerCamelCase )}: {e}' )
raise e
# If possible, parse the file as a list of json objects and exit the loop
if isinstance(_lowerCamelCase , _lowerCamelCase ): # list is the only sequence type supported in JSON
try:
A_ : Optional[int] = set().union(*[row.keys() for row in dataset] )
A_ : Tuple = {col: [row.get(_lowerCamelCase ) for row in dataset] for col in keys}
A_ : int = pa.Table.from_pydict(_lowerCamelCase )
except (pa.ArrowInvalid, AttributeError) as e:
logger.error(f'Failed to read file \'{file}\' with error {type(_lowerCamelCase )}: {e}' )
raise ValueError(f'Not able to read records in the JSON file at {file}.' ) from None
yield file_idx, self._cast_table(_lowerCamelCase )
break
else:
logger.error(f'Failed to read file \'{file}\' with error {type(_lowerCamelCase )}: {e}' )
raise ValueError(
f'Not able to read records in the JSON file at {file}. '
f'You should probably indicate the field of the JSON file containing your records. '
f'This JSON file contain the following fields: {str(list(dataset.keys() ) )}. '
f'Select the correct one and provide it as `field=\'XXX\'` to the dataset loading method. ' ) from None
# Uncomment for debugging (will print the Arrow table size and elements)
# logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}")
# logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows)))
yield (file_idx, batch_idx), self._cast_table(_lowerCamelCase )
batch_idx += 1
| 4 | 1 |
'''simple docstring'''
from __future__ import annotations
import math
def snake_case__ ( lowerCamelCase__ : float , lowerCamelCase__ : int ) -> float:
A_ : Union[str, Any] = u
for i in range(1 , lowerCamelCase__ ):
A_ : int = temp * (u - i)
return temp
def snake_case__ ( ) -> None:
A_ : Optional[Any] = int(input('''enter the numbers of values: ''' ) )
A_ : list[list[float]] = []
for _ in range(lowerCamelCase__ ):
y.append([] )
for i in range(lowerCamelCase__ ):
for j in range(lowerCamelCase__ ):
y[i].append(lowerCamelCase__ )
A_ : Optional[int] = 0
print('''enter the values of parameters in a list: ''' )
A_ : Union[str, Any] = list(map(lowerCamelCase__ , input().split() ) )
print('''enter the values of corresponding parameters: ''' )
for i in range(lowerCamelCase__ ):
A_ : List[Any] = float(input() )
A_ : List[Any] = int(input('''enter the value to interpolate: ''' ) )
A_ : List[str] = (value - x[0]) / (x[1] - x[0])
# for calculating forward difference table
for i in range(1 , lowerCamelCase__ ):
for j in range(n - i ):
A_ : Union[str, Any] = y[j + 1][i - 1] - y[j][i - 1]
A_ : Tuple = y[0][0]
for i in range(1 , lowerCamelCase__ ):
summ += (ucal(lowerCamelCase__ , lowerCamelCase__ ) * y[0][i]) / math.factorial(lowerCamelCase__ )
print(f'the value at {value} is {summ}' )
if __name__ == "__main__":
main()
| 4 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
snake_case__ = logging.get_logger(__name__)
snake_case__ = {
"""microsoft/swin-tiny-patch4-window7-224""": (
"""https://huggingface.co/microsoft/swin-tiny-patch4-window7-224/resolve/main/config.json"""
),
# See all Swin models at https://huggingface.co/models?filter=swin
}
class UpperCamelCase_ (a__, a__ ):
"""simple docstring"""
_lowerCAmelCase = 'swin'
_lowerCAmelCase = {
'num_attention_heads': 'num_heads',
'num_hidden_layers': 'num_layers',
}
def __init__( self : Any , _lowerCamelCase : Optional[Any]=224 , _lowerCamelCase : List[str]=4 , _lowerCamelCase : Optional[Any]=3 , _lowerCamelCase : Tuple=96 , _lowerCamelCase : List[Any]=[2, 2, 6, 2] , _lowerCamelCase : List[str]=[3, 6, 12, 24] , _lowerCamelCase : List[Any]=7 , _lowerCamelCase : Optional[int]=4.0 , _lowerCamelCase : List[str]=True , _lowerCamelCase : List[str]=0.0 , _lowerCamelCase : Any=0.0 , _lowerCamelCase : Dict=0.1 , _lowerCamelCase : List[str]="gelu" , _lowerCamelCase : Tuple=False , _lowerCamelCase : Dict=0.02 , _lowerCamelCase : Optional[Any]=1E-5 , _lowerCamelCase : Any=32 , _lowerCamelCase : Tuple=None , _lowerCamelCase : Any=None , **_lowerCamelCase : str , ):
"""simple docstring"""
super().__init__(**_lowerCamelCase )
A_ : Optional[int] = image_size
A_ : Optional[int] = patch_size
A_ : Optional[int] = num_channels
A_ : Any = embed_dim
A_ : List[Any] = depths
A_ : Any = len(_lowerCamelCase )
A_ : List[Any] = num_heads
A_ : Tuple = window_size
A_ : Tuple = mlp_ratio
A_ : Dict = qkv_bias
A_ : List[str] = hidden_dropout_prob
A_ : List[str] = attention_probs_dropout_prob
A_ : Any = drop_path_rate
A_ : List[Any] = hidden_act
A_ : Tuple = use_absolute_embeddings
A_ : int = layer_norm_eps
A_ : Optional[Any] = initializer_range
A_ : Union[str, Any] = encoder_stride
# we set the hidden_size attribute in order to make Swin work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
A_ : str = int(embed_dim * 2 ** (len(_lowerCamelCase ) - 1) )
A_ : str = ['''stem'''] + [f'stage{idx}' for idx in range(1 , len(_lowerCamelCase ) + 1 )]
A_ ,A_ : Optional[Any] = get_aligned_output_features_output_indices(
out_features=_lowerCamelCase , out_indices=_lowerCamelCase , stage_names=self.stage_names )
class UpperCamelCase_ (a__ ):
"""simple docstring"""
_lowerCAmelCase = version.parse('1.11' )
@property
def _a ( self : str ):
"""simple docstring"""
return OrderedDict(
[
('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}),
] )
@property
def _a ( self : Union[str, Any] ):
"""simple docstring"""
return 1E-4
| 4 | 1 |
'''simple docstring'''
from __future__ import annotations
import unittest
from transformers import LEDConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFLEDForConditionalGeneration, TFLEDModel
@require_tf
class UpperCamelCase_ :
"""simple docstring"""
_lowerCAmelCase = LEDConfig
_lowerCAmelCase = {}
_lowerCAmelCase = 'gelu'
def __init__( self : List[Any] , _lowerCamelCase : Optional[Any] , _lowerCamelCase : List[Any]=13 , _lowerCamelCase : Dict=7 , _lowerCamelCase : Dict=True , _lowerCamelCase : int=False , _lowerCamelCase : Optional[Any]=99 , _lowerCamelCase : List[Any]=32 , _lowerCamelCase : List[Any]=2 , _lowerCamelCase : Optional[int]=4 , _lowerCamelCase : Optional[int]=37 , _lowerCamelCase : str=0.1 , _lowerCamelCase : Union[str, Any]=0.1 , _lowerCamelCase : List[str]=20 , _lowerCamelCase : int=2 , _lowerCamelCase : Optional[Any]=1 , _lowerCamelCase : Tuple=0 , _lowerCamelCase : Any=4 , ):
"""simple docstring"""
A_ : List[Any] = parent
A_ : Tuple = batch_size
A_ : List[Any] = seq_length
A_ : List[Any] = is_training
A_ : str = use_labels
A_ : List[str] = vocab_size
A_ : Union[str, Any] = hidden_size
A_ : List[str] = num_hidden_layers
A_ : List[Any] = num_attention_heads
A_ : Optional[Any] = intermediate_size
A_ : str = hidden_dropout_prob
A_ : Any = attention_probs_dropout_prob
A_ : Optional[Any] = max_position_embeddings
A_ : List[Any] = eos_token_id
A_ : str = pad_token_id
A_ : Tuple = bos_token_id
A_ : List[Any] = attention_window
# `ModelTesterMixin.test_attention_outputs` is expecting attention tensors to be of size
# [num_attention_heads, encoder_seq_length, encoder_key_length], but TFLongformerSelfAttention
# returns attention of shape [num_attention_heads, encoder_seq_length, self.attention_window + 1]
# because its local attention only attends to `self.attention_window` and one before and one after
A_ : Optional[Any] = self.attention_window + 2
# because of padding `encoder_seq_length`, is different from `seq_length`. Relevant for
# the `test_attention_outputs` and `test_hidden_states_output` tests
A_ : Optional[int] = (
self.seq_length + (self.attention_window - self.seq_length % self.attention_window) % self.attention_window
)
def _a ( self : Optional[Any] ):
"""simple docstring"""
A_ : List[str] = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size )
A_ : Any = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 )
A_ : Union[str, Any] = tf.concat([input_ids, eos_tensor] , axis=1 )
A_ : int = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
A_ : List[Any] = self.config_cls(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , attention_window=self.attention_window , **self.config_updates , )
A_ : Dict = prepare_led_inputs_dict(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
A_ : Optional[Any] = tf.concat(
[tf.zeros_like(_lowerCamelCase )[:, :-1], tf.ones_like(_lowerCamelCase )[:, -1:]] , axis=-1 , )
A_ : Optional[int] = global_attention_mask
return config, inputs_dict
def _a ( self : Optional[Any] , _lowerCamelCase : str , _lowerCamelCase : Optional[int] ):
"""simple docstring"""
A_ : Optional[int] = TFLEDModel(config=_lowerCamelCase ).get_decoder()
A_ : str = inputs_dict['''input_ids''']
A_ : Optional[Any] = input_ids[:1, :]
A_ : Optional[Any] = inputs_dict['''attention_mask'''][:1, :]
A_ : List[Any] = 1
# first forward pass
A_ : Optional[int] = model(_lowerCamelCase , attention_mask=_lowerCamelCase , use_cache=_lowerCamelCase )
A_ ,A_ : List[str] = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
A_ : List[Any] = ids_tensor((self.batch_size, 3) , config.vocab_size )
A_ : Any = tf.cast(ids_tensor((self.batch_size, 3) , 2 ) , tf.inta )
# append to next input_ids and
A_ : List[Any] = tf.concat([input_ids, next_tokens] , axis=-1 )
A_ : Tuple = tf.concat([attention_mask, next_attn_mask] , axis=-1 )
A_ : Optional[int] = model(_lowerCamelCase , attention_mask=_lowerCamelCase )[0]
A_ : Dict = model(_lowerCamelCase , attention_mask=_lowerCamelCase , past_key_values=_lowerCamelCase )[0]
self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1] )
# select random slice
A_ : List[str] = int(ids_tensor((1,) , output_from_past.shape[-1] ) )
A_ : List[str] = output_from_no_past[:, -3:, random_slice_idx]
A_ : List[str] = output_from_past[:, :, random_slice_idx]
# test that outputs are equal for slice
tf.debugging.assert_near(_lowerCamelCase , _lowerCamelCase , rtol=1E-3 )
def snake_case__ ( lowerCamelCase__ : Optional[Any] , lowerCamelCase__ : Dict , lowerCamelCase__ : Tuple , lowerCamelCase__ : int=None , lowerCamelCase__ : int=None , lowerCamelCase__ : List[Any]=None , lowerCamelCase__ : Dict=None , ) -> Dict:
if attention_mask is None:
A_ : List[str] = tf.cast(tf.math.not_equal(lowerCamelCase__ , config.pad_token_id ) , tf.inta )
if decoder_attention_mask is None:
A_ : List[str] = tf.concat(
[
tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta ),
tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ) , tf.inta ),
] , axis=-1 , )
if head_mask is None:
A_ : Union[str, Any] = tf.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
A_ : List[str] = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"attention_mask": attention_mask,
"decoder_input_ids": decoder_input_ids,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
}
@require_tf
class UpperCamelCase_ (a__, a__, unittest.TestCase ):
"""simple docstring"""
_lowerCAmelCase = (TFLEDForConditionalGeneration, TFLEDModel) if is_tf_available() else ()
_lowerCAmelCase = (TFLEDForConditionalGeneration,) if is_tf_available() else ()
_lowerCAmelCase = (
{
'conversational': TFLEDForConditionalGeneration,
'feature-extraction': TFLEDModel,
'summarization': TFLEDForConditionalGeneration,
'text2text-generation': TFLEDForConditionalGeneration,
'translation': TFLEDForConditionalGeneration,
}
if is_tf_available()
else {}
)
_lowerCAmelCase = True
_lowerCAmelCase = False
_lowerCAmelCase = False
_lowerCAmelCase = False
def _a ( self : List[Any] ):
"""simple docstring"""
A_ : Tuple = TFLEDModelTester(self )
A_ : Tuple = ConfigTester(self , config_class=_lowerCamelCase )
def _a ( self : Union[str, Any] ):
"""simple docstring"""
self.config_tester.run_common_tests()
def _a ( self : Any ):
"""simple docstring"""
A_ : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*_lowerCamelCase )
def _a ( self : int ):
"""simple docstring"""
A_ ,A_ : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
A_ : str = tf.zeros_like(inputs_dict['''attention_mask'''] )
A_ : str = 2
A_ : Dict = tf.where(
tf.range(self.model_tester.seq_length )[None, :] < num_global_attn_indices , 1 , inputs_dict['''global_attention_mask'''] , )
A_ : str = True
A_ : Dict = self.model_tester.seq_length
A_ : Dict = self.model_tester.encoder_seq_length
def check_decoder_attentions_output(_lowerCamelCase : Union[str, Any] ):
A_ : str = outputs.decoder_attentions
self.assertEqual(len(_lowerCamelCase ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(decoder_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_length, seq_length] , )
def check_encoder_attentions_output(_lowerCamelCase : Union[str, Any] ):
A_ : Optional[int] = [t.numpy() for t in outputs.encoder_attentions]
A_ : Any = [t.numpy() for t in outputs.encoder_global_attentions]
self.assertEqual(len(_lowerCamelCase ) , self.model_tester.num_hidden_layers )
self.assertEqual(len(_lowerCamelCase ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_length, seq_length] , )
self.assertListEqual(
list(global_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, num_global_attn_indices] , )
for model_class in self.all_model_classes:
A_ : Optional[int] = True
A_ : str = False
A_ : Optional[Any] = False
A_ : List[Any] = model_class(_lowerCamelCase )
A_ : str = model(self._prepare_for_class(_lowerCamelCase , _lowerCamelCase ) )
A_ : int = len(_lowerCamelCase )
self.assertEqual(config.output_hidden_states , _lowerCamelCase )
check_encoder_attentions_output(_lowerCamelCase )
if self.is_encoder_decoder:
A_ : str = model_class(_lowerCamelCase )
A_ : Dict = model(self._prepare_for_class(_lowerCamelCase , _lowerCamelCase ) )
self.assertEqual(config.output_hidden_states , _lowerCamelCase )
check_decoder_attentions_output(_lowerCamelCase )
# Check that output attentions can also be changed via the config
del inputs_dict["output_attentions"]
A_ : List[Any] = True
A_ : Optional[int] = model_class(_lowerCamelCase )
A_ : Dict = model(self._prepare_for_class(_lowerCamelCase , _lowerCamelCase ) )
self.assertEqual(config.output_hidden_states , _lowerCamelCase )
check_encoder_attentions_output(_lowerCamelCase )
# Check attention is always last and order is fine
A_ : Tuple = True
A_ : Dict = True
A_ : Union[str, Any] = model_class(_lowerCamelCase )
A_ : List[str] = model(self._prepare_for_class(_lowerCamelCase , _lowerCamelCase ) )
self.assertEqual(out_len + (2 if self.is_encoder_decoder else 1) , len(_lowerCamelCase ) )
self.assertEqual(model.config.output_hidden_states , _lowerCamelCase )
check_encoder_attentions_output(_lowerCamelCase )
@unittest.skip('''LED keeps using potentially symbolic tensors in conditionals and breaks tracing.''' )
def _a ( self : int ):
"""simple docstring"""
pass
def _a ( self : Optional[int] ):
"""simple docstring"""
pass
def snake_case__ ( lowerCamelCase__ : List[str] ) -> Dict:
return tf.constant(lowerCamelCase__ , dtype=tf.intaa )
snake_case__ = 1e-4
@slow
@require_tf
class UpperCamelCase_ (unittest.TestCase ):
"""simple docstring"""
def _a ( self : Optional[int] ):
"""simple docstring"""
A_ : List[Any] = TFLEDForConditionalGeneration.from_pretrained('''allenai/led-base-16384''' ).led
# change to intended input here
A_ : Any = _long_tensor([512 * [0, 31414, 232, 328, 740, 1140, 12695, 69]] )
A_ : List[Any] = _long_tensor([128 * [0, 31414, 232, 328, 740, 1140, 12695, 69]] )
A_ : List[Any] = prepare_led_inputs_dict(model.config , _lowerCamelCase , _lowerCamelCase )
A_ : Tuple = model(**_lowerCamelCase )[0]
A_ : Dict = (1, 1024, 768)
self.assertEqual(output.shape , _lowerCamelCase )
# change to expected output here
A_ : Optional[Any] = tf.convert_to_tensor(
[[2.30_50, 2.82_79, 0.65_31], [-1.84_57, -0.14_55, -3.56_61], [-1.01_86, 0.45_86, -2.20_43]] , )
tf.debugging.assert_near(output[:, :3, :3] , _lowerCamelCase , atol=1E-3 )
def _a ( self : Tuple ):
"""simple docstring"""
A_ : Tuple = TFLEDForConditionalGeneration.from_pretrained('''allenai/led-base-16384''' )
# change to intended input here
A_ : Optional[Any] = _long_tensor([512 * [0, 31414, 232, 328, 740, 1140, 12695, 69]] )
A_ : List[Any] = _long_tensor([128 * [0, 31414, 232, 328, 740, 1140, 12695, 69]] )
A_ : Union[str, Any] = prepare_led_inputs_dict(model.config , _lowerCamelCase , _lowerCamelCase )
A_ : Any = model(**_lowerCamelCase )[0]
A_ : Optional[int] = (1, 1024, model.config.vocab_size)
self.assertEqual(output.shape , _lowerCamelCase )
# change to expected output here
A_ : int = tf.convert_to_tensor(
[[33.65_07, 6.45_72, 16.80_89], [5.87_39, -2.42_38, 11.29_02], [-3.21_39, -4.31_49, 4.27_83]] , )
tf.debugging.assert_near(output[:, :3, :3] , _lowerCamelCase , atol=1E-3 , rtol=1E-3 )
| 4 |
'''simple docstring'''
from __future__ import annotations
def snake_case__ ( lowerCamelCase__ : list[int] , lowerCamelCase__ : int ) -> list[int]:
A_ : int = 0
A_ : str = len(lowerCamelCase__ ) - 1
while i < j:
if nums[i] + nums[j] == target:
return [i, j]
elif nums[i] + nums[j] < target:
A_ : Tuple = i + 1
else:
A_ : List[str] = j - 1
return []
if __name__ == "__main__":
import doctest
doctest.testmod()
print(F'{two_pointer([2, 7, 11, 15], 9) = }')
| 4 | 1 |
'''simple docstring'''
import logging
from dataclasses import dataclass, field
from pathlib import Path
from typing import Optional, Union
from .generation.configuration_utils import GenerationConfig
from .training_args import TrainingArguments
from .utils import add_start_docstrings
snake_case__ = logging.getLogger(__name__)
@dataclass
@add_start_docstrings(TrainingArguments.__doc__ )
class UpperCamelCase_ (a__ ):
"""simple docstring"""
_lowerCAmelCase = field(default=a__, metadata={'help': 'Whether to use SortishSampler or not.'} )
_lowerCAmelCase = field(
default=a__, metadata={'help': 'Whether to use generate to calculate generative metrics (ROUGE, BLEU).'} )
_lowerCAmelCase = field(
default=a__, metadata={
'help': (
'The `max_length` to use on each evaluation loop when `predict_with_generate=True`. Will default '
'to the `max_length` value of the model configuration.'
)
}, )
_lowerCAmelCase = field(
default=a__, metadata={
'help': (
'The `num_beams` to use on each evaluation loop when `predict_with_generate=True`. Will default '
'to the `num_beams` value of the model configuration.'
)
}, )
_lowerCAmelCase = field(
default=a__, metadata={
'help': 'Model id, file path or url pointing to a GenerationConfig json file, to use during prediction.'
}, )
def _a ( self : Union[str, Any] ):
"""simple docstring"""
A_ : Any = super().to_dict()
for k, v in d.items():
if isinstance(_lowerCamelCase , _lowerCamelCase ):
A_ : List[str] = v.to_dict()
return d
| 4 |
'''simple docstring'''
def snake_case__ ( lowerCamelCase__ : list[int] , lowerCamelCase__ : list[int] , lowerCamelCase__ : int ) -> bool:
return not any(
neighbour == 1 and colored_vertices[i] == color
for i, neighbour in enumerate(lowerCamelCase__ ) )
def snake_case__ ( lowerCamelCase__ : list[list[int]] , lowerCamelCase__ : int , lowerCamelCase__ : list[int] , lowerCamelCase__ : int ) -> bool:
# Base Case
if index == len(lowerCamelCase__ ):
return True
# Recursive Step
for i in range(lowerCamelCase__ ):
if valid_coloring(graph[index] , lowerCamelCase__ , lowerCamelCase__ ):
# Color current vertex
A_ : int = i
# Validate coloring
if util_color(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , index + 1 ):
return True
# Backtrack
A_ : str = -1
return False
def snake_case__ ( lowerCamelCase__ : list[list[int]] , lowerCamelCase__ : int ) -> list[int]:
A_ : List[str] = [-1] * len(lowerCamelCase__ )
if util_color(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , 0 ):
return colored_vertices
return []
| 4 | 1 |
'''simple docstring'''
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class UpperCamelCase_ (unittest.TestCase ):
"""simple docstring"""
def __init__( self : List[Any] , _lowerCamelCase : Optional[Any] , _lowerCamelCase : Tuple=13 , _lowerCamelCase : List[str]=3 , _lowerCamelCase : List[Any]=224 , _lowerCamelCase : Tuple=30 , _lowerCamelCase : Union[str, Any]=400 , _lowerCamelCase : List[str]=True , _lowerCamelCase : Optional[Any]=None , _lowerCamelCase : Tuple=True , _lowerCamelCase : Any=[0.5, 0.5, 0.5] , _lowerCamelCase : Optional[int]=[0.5, 0.5, 0.5] , ):
"""simple docstring"""
A_ : Optional[int] = size if size is not None else {'''height''': 18, '''width''': 18}
A_ : str = parent
A_ : Any = batch_size
A_ : Union[str, Any] = num_channels
A_ : str = image_size
A_ : Union[str, Any] = min_resolution
A_ : Dict = max_resolution
A_ : Any = do_resize
A_ : str = size
A_ : int = do_normalize
A_ : Optional[int] = image_mean
A_ : List[str] = image_std
def _a ( self : Tuple ):
"""simple docstring"""
return {
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_normalize": self.do_normalize,
"do_resize": self.do_resize,
"size": self.size,
}
@require_torch
@require_vision
class UpperCamelCase_ (a__, unittest.TestCase ):
"""simple docstring"""
_lowerCAmelCase = ViTImageProcessor if is_vision_available() else None
def _a ( self : Union[str, Any] ):
"""simple docstring"""
A_ : Tuple = EfficientFormerImageProcessorTester(self )
@property
def _a ( self : Any ):
"""simple docstring"""
return self.image_proc_tester.prepare_image_processor_dict()
def _a ( self : int ):
"""simple docstring"""
A_ : int = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(_lowerCamelCase , '''image_mean''' ) )
self.assertTrue(hasattr(_lowerCamelCase , '''image_std''' ) )
self.assertTrue(hasattr(_lowerCamelCase , '''do_normalize''' ) )
self.assertTrue(hasattr(_lowerCamelCase , '''do_resize''' ) )
self.assertTrue(hasattr(_lowerCamelCase , '''size''' ) )
def _a ( self : Tuple ):
"""simple docstring"""
pass
def _a ( self : Union[str, Any] ):
"""simple docstring"""
A_ : Dict = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
A_ : Any = prepare_image_inputs(self.image_proc_tester , equal_resolution=_lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(_lowerCamelCase , Image.Image )
# Test not batched input
A_ : int = image_processor(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_proc_tester.num_channels,
self.image_proc_tester.size['''height'''],
self.image_proc_tester.size['''width'''],
) , )
# Test batched
A_ : Optional[int] = image_processor(_lowerCamelCase , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_proc_tester.batch_size,
self.image_proc_tester.num_channels,
self.image_proc_tester.size['''height'''],
self.image_proc_tester.size['''width'''],
) , )
def _a ( self : Any ):
"""simple docstring"""
A_ : str = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
A_ : Tuple = prepare_image_inputs(self.image_proc_tester , equal_resolution=_lowerCamelCase , numpify=_lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(_lowerCamelCase , np.ndarray )
# Test not batched input
A_ : Optional[int] = image_processor(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_proc_tester.num_channels,
self.image_proc_tester.size['''height'''],
self.image_proc_tester.size['''width'''],
) , )
# Test batched
A_ : Dict = image_processor(_lowerCamelCase , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_proc_tester.batch_size,
self.image_proc_tester.num_channels,
self.image_proc_tester.size['''height'''],
self.image_proc_tester.size['''width'''],
) , )
def _a ( self : Optional[int] ):
"""simple docstring"""
A_ : List[str] = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
A_ : int = prepare_image_inputs(self.image_proc_tester , equal_resolution=_lowerCamelCase , torchify=_lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(_lowerCamelCase , torch.Tensor )
# Test not batched input
A_ : int = image_processor(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_proc_tester.num_channels,
self.image_proc_tester.size['''height'''],
self.image_proc_tester.size['''width'''],
) , )
# Test batched
A_ : Optional[Any] = image_processor(_lowerCamelCase , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_proc_tester.batch_size,
self.image_proc_tester.num_channels,
self.image_proc_tester.size['''height'''],
self.image_proc_tester.size['''width'''],
) , )
| 4 |
'''simple docstring'''
from __future__ import annotations
from PIL import Image
# Define glider example
snake_case__ = [
[0, 1, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0, 0],
[1, 1, 1, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
]
# Define blinker example
snake_case__ = [[0, 1, 0], [0, 1, 0], [0, 1, 0]]
def snake_case__ ( lowerCamelCase__ : list[list[int]] ) -> list[list[int]]:
A_ : str = []
for i in range(len(lowerCamelCase__ ) ):
A_ : Optional[Any] = []
for j in range(len(cells[i] ) ):
# Get the number of live neighbours
A_ : Optional[int] = 0
if i > 0 and j > 0:
neighbour_count += cells[i - 1][j - 1]
if i > 0:
neighbour_count += cells[i - 1][j]
if i > 0 and j < len(cells[i] ) - 1:
neighbour_count += cells[i - 1][j + 1]
if j > 0:
neighbour_count += cells[i][j - 1]
if j < len(cells[i] ) - 1:
neighbour_count += cells[i][j + 1]
if i < len(lowerCamelCase__ ) - 1 and j > 0:
neighbour_count += cells[i + 1][j - 1]
if i < len(lowerCamelCase__ ) - 1:
neighbour_count += cells[i + 1][j]
if i < len(lowerCamelCase__ ) - 1 and j < len(cells[i] ) - 1:
neighbour_count += cells[i + 1][j + 1]
# Rules of the game of life (excerpt from Wikipedia):
# 1. Any live cell with two or three live neighbours survives.
# 2. Any dead cell with three live neighbours becomes a live cell.
# 3. All other live cells die in the next generation.
# Similarly, all other dead cells stay dead.
A_ : List[str] = cells[i][j] == 1
if (
(alive and 2 <= neighbour_count <= 3)
or not alive
and neighbour_count == 3
):
next_generation_row.append(1 )
else:
next_generation_row.append(0 )
next_generation.append(lowerCamelCase__ )
return next_generation
def snake_case__ ( lowerCamelCase__ : list[list[int]] , lowerCamelCase__ : int ) -> list[Image.Image]:
A_ : List[Any] = []
for _ in range(lowerCamelCase__ ):
# Create output image
A_ : Optional[int] = Image.new('''RGB''' , (len(cells[0] ), len(lowerCamelCase__ )) )
A_ : int = img.load()
# Save cells to image
for x in range(len(lowerCamelCase__ ) ):
for y in range(len(cells[0] ) ):
A_ : Optional[Any] = 2_5_5 - cells[y][x] * 2_5_5
A_ : str = (colour, colour, colour)
# Save image
images.append(lowerCamelCase__ )
A_ : Optional[int] = new_generation(lowerCamelCase__ )
return images
if __name__ == "__main__":
snake_case__ = generate_images(GLIDER, 16)
images[0].save("""out.gif""", save_all=True, append_images=images[1:])
| 4 | 1 |
'''simple docstring'''
def snake_case__ ( lowerCamelCase__ : list ) -> list:
if len(lowerCamelCase__ ) <= 1:
return lst
A_ : Any = 1
while i < len(lowerCamelCase__ ):
if lst[i - 1] <= lst[i]:
i += 1
else:
A_ ,A_ : Optional[int] = lst[i], lst[i - 1]
i -= 1
if i == 0:
A_ : List[Any] = 1
return lst
if __name__ == "__main__":
snake_case__ = input("""Enter numbers separated by a comma:\n""").strip()
snake_case__ = [int(item) for item in user_input.split(""",""")]
print(gnome_sort(unsorted))
| 4 |
'''simple docstring'''
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import BertTokenizer, BertTokenizerFast
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import AlignProcessor, EfficientNetImageProcessor
@require_vision
class UpperCamelCase_ (unittest.TestCase ):
"""simple docstring"""
def _a ( self : Union[str, Any] ):
"""simple docstring"""
A_ : Any = tempfile.mkdtemp()
A_ : List[Any] = [
'''[UNK]''',
'''[CLS]''',
'''[SEP]''',
'''[PAD]''',
'''[MASK]''',
'''want''',
'''##want''',
'''##ed''',
'''wa''',
'''un''',
'''runn''',
'''##ing''',
''',''',
'''low''',
'''lowest''',
]
A_ : str = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) )
A_ : Tuple = {
'''do_resize''': True,
'''size''': 20,
'''do_center_crop''': True,
'''crop_size''': 18,
'''do_normalize''': True,
'''image_mean''': [0.48_14_54_66, 0.4_57_82_75, 0.40_82_10_73],
'''image_std''': [0.26_86_29_54, 0.26_13_02_58, 0.27_57_77_11],
}
A_ : List[Any] = os.path.join(self.tmpdirname , _lowerCamelCase )
with open(self.image_processor_file , '''w''' , encoding='''utf-8''' ) as fp:
json.dump(_lowerCamelCase , _lowerCamelCase )
def _a ( self : Dict , **_lowerCamelCase : Tuple ):
"""simple docstring"""
return BertTokenizer.from_pretrained(self.tmpdirname , **_lowerCamelCase )
def _a ( self : Optional[int] , **_lowerCamelCase : Optional[int] ):
"""simple docstring"""
return BertTokenizerFast.from_pretrained(self.tmpdirname , **_lowerCamelCase )
def _a ( self : Optional[Any] , **_lowerCamelCase : Tuple ):
"""simple docstring"""
return EfficientNetImageProcessor.from_pretrained(self.tmpdirname , **_lowerCamelCase )
def _a ( self : Tuple ):
"""simple docstring"""
shutil.rmtree(self.tmpdirname )
def _a ( self : int ):
"""simple docstring"""
A_ : Union[str, Any] = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
A_ : Any = [Image.fromarray(np.moveaxis(_lowerCamelCase , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def _a ( self : int ):
"""simple docstring"""
A_ : Tuple = self.get_tokenizer()
A_ : Tuple = self.get_rust_tokenizer()
A_ : Dict = self.get_image_processor()
A_ : List[Any] = AlignProcessor(tokenizer=_lowerCamelCase , image_processor=_lowerCamelCase )
processor_slow.save_pretrained(self.tmpdirname )
A_ : str = AlignProcessor.from_pretrained(self.tmpdirname , use_fast=_lowerCamelCase )
A_ : Any = AlignProcessor(tokenizer=_lowerCamelCase , image_processor=_lowerCamelCase )
processor_fast.save_pretrained(self.tmpdirname )
A_ : List[Any] = AlignProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() )
self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() )
self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() )
self.assertIsInstance(processor_slow.tokenizer , _lowerCamelCase )
self.assertIsInstance(processor_fast.tokenizer , _lowerCamelCase )
self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor_slow.image_processor , _lowerCamelCase )
self.assertIsInstance(processor_fast.image_processor , _lowerCamelCase )
def _a ( self : List[Any] ):
"""simple docstring"""
A_ : List[str] = AlignProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
A_ : Optional[int] = self.get_tokenizer(bos_token='''(BOS)''' , eos_token='''(EOS)''' )
A_ : Tuple = self.get_image_processor(do_normalize=_lowerCamelCase , padding_value=1.0 )
A_ : List[str] = AlignProcessor.from_pretrained(
self.tmpdirname , bos_token='''(BOS)''' , eos_token='''(EOS)''' , do_normalize=_lowerCamelCase , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , _lowerCamelCase )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , _lowerCamelCase )
def _a ( self : Union[str, Any] ):
"""simple docstring"""
A_ : Dict = self.get_image_processor()
A_ : Any = self.get_tokenizer()
A_ : List[str] = AlignProcessor(tokenizer=_lowerCamelCase , image_processor=_lowerCamelCase )
A_ : Any = self.prepare_image_inputs()
A_ : List[Any] = image_processor(_lowerCamelCase , return_tensors='''np''' )
A_ : str = processor(images=_lowerCamelCase , return_tensors='''np''' )
for key in input_image_proc.keys():
self.assertAlmostEqual(input_image_proc[key].sum() , input_processor[key].sum() , delta=1E-2 )
def _a ( self : Dict ):
"""simple docstring"""
A_ : str = self.get_image_processor()
A_ : List[str] = self.get_tokenizer()
A_ : Optional[int] = AlignProcessor(tokenizer=_lowerCamelCase , image_processor=_lowerCamelCase )
A_ : int = '''lower newer'''
A_ : str = processor(text=_lowerCamelCase )
A_ : Dict = tokenizer(_lowerCamelCase , padding='''max_length''' , max_length=64 )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def _a ( self : str ):
"""simple docstring"""
A_ : Optional[int] = self.get_image_processor()
A_ : Optional[Any] = self.get_tokenizer()
A_ : List[str] = AlignProcessor(tokenizer=_lowerCamelCase , image_processor=_lowerCamelCase )
A_ : List[Any] = '''lower newer'''
A_ : Optional[int] = self.prepare_image_inputs()
A_ : List[Any] = processor(text=_lowerCamelCase , images=_lowerCamelCase )
self.assertListEqual(list(inputs.keys() ) , ['''input_ids''', '''token_type_ids''', '''attention_mask''', '''pixel_values'''] )
# test if it raises when no input is passed
with pytest.raises(_lowerCamelCase ):
processor()
def _a ( self : List[str] ):
"""simple docstring"""
A_ : Optional[Any] = self.get_image_processor()
A_ : Optional[int] = self.get_tokenizer()
A_ : List[Any] = AlignProcessor(tokenizer=_lowerCamelCase , image_processor=_lowerCamelCase )
A_ : str = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
A_ : str = processor.batch_decode(_lowerCamelCase )
A_ : Union[str, Any] = tokenizer.batch_decode(_lowerCamelCase )
self.assertListEqual(_lowerCamelCase , _lowerCamelCase )
def _a ( self : Tuple ):
"""simple docstring"""
A_ : str = self.get_image_processor()
A_ : Tuple = self.get_tokenizer()
A_ : Any = AlignProcessor(tokenizer=_lowerCamelCase , image_processor=_lowerCamelCase )
A_ : str = '''lower newer'''
A_ : List[str] = self.prepare_image_inputs()
A_ : Tuple = processor(text=_lowerCamelCase , images=_lowerCamelCase )
self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
| 4 | 1 |
'''simple docstring'''
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_mobilebert import MobileBertTokenizer
snake_case__ = logging.get_logger(__name__)
snake_case__ = {"""vocab_file""": """vocab.txt""", """tokenizer_file""": """tokenizer.json"""}
snake_case__ = {
"""vocab_file""": {"""mobilebert-uncased""": """https://huggingface.co/google/mobilebert-uncased/resolve/main/vocab.txt"""},
"""tokenizer_file""": {
"""mobilebert-uncased""": """https://huggingface.co/google/mobilebert-uncased/resolve/main/tokenizer.json"""
},
}
snake_case__ = {"""mobilebert-uncased""": 5_12}
snake_case__ = {}
class UpperCamelCase_ (a__ ):
"""simple docstring"""
_lowerCAmelCase = VOCAB_FILES_NAMES
_lowerCAmelCase = PRETRAINED_VOCAB_FILES_MAP
_lowerCAmelCase = PRETRAINED_INIT_CONFIGURATION
_lowerCAmelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_lowerCAmelCase = MobileBertTokenizer
def __init__( self : Tuple , _lowerCamelCase : Optional[int]=None , _lowerCamelCase : Optional[Any]=None , _lowerCamelCase : int=True , _lowerCamelCase : List[str]="[UNK]" , _lowerCamelCase : Optional[int]="[SEP]" , _lowerCamelCase : Any="[PAD]" , _lowerCamelCase : Any="[CLS]" , _lowerCamelCase : Union[str, Any]="[MASK]" , _lowerCamelCase : List[Any]=True , _lowerCamelCase : List[Any]=None , **_lowerCamelCase : int , ):
"""simple docstring"""
super().__init__(
_lowerCamelCase , tokenizer_file=_lowerCamelCase , do_lower_case=_lowerCamelCase , unk_token=_lowerCamelCase , sep_token=_lowerCamelCase , pad_token=_lowerCamelCase , cls_token=_lowerCamelCase , mask_token=_lowerCamelCase , tokenize_chinese_chars=_lowerCamelCase , strip_accents=_lowerCamelCase , **_lowerCamelCase , )
A_ : Optional[Any] = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('''lowercase''' , _lowerCamelCase ) != do_lower_case
or normalizer_state.get('''strip_accents''' , _lowerCamelCase ) != strip_accents
or normalizer_state.get('''handle_chinese_chars''' , _lowerCamelCase ) != tokenize_chinese_chars
):
A_ : List[Any] = getattr(_lowerCamelCase , normalizer_state.pop('''type''' ) )
A_ : Union[str, Any] = do_lower_case
A_ : Optional[int] = strip_accents
A_ : Tuple = tokenize_chinese_chars
A_ : Any = normalizer_class(**_lowerCamelCase )
A_ : Optional[int] = do_lower_case
def _a ( self : int , _lowerCamelCase : Union[str, Any] , _lowerCamelCase : Optional[int]=None ):
"""simple docstring"""
A_ : Optional[Any] = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def _a ( self : Any , _lowerCamelCase : List[int] , _lowerCamelCase : Optional[List[int]] = None ):
"""simple docstring"""
A_ : Optional[Any] = [self.sep_token_id]
A_ : str = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def _a ( self : Union[str, Any] , _lowerCamelCase : str , _lowerCamelCase : Optional[str] = None ):
"""simple docstring"""
A_ : int = self._tokenizer.model.save(_lowerCamelCase , name=_lowerCamelCase )
return tuple(_lowerCamelCase )
| 4 |
'''simple docstring'''
import json
import os
from pathlib import Path
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple, Union
import sentencepiece
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
snake_case__ = logging.get_logger(__name__)
snake_case__ = """▁"""
snake_case__ = {
"""vocab_file""": """vocab.json""",
"""spm_file""": """sentencepiece.bpe.model""",
}
snake_case__ = {
"""vocab_file""": {
"""facebook/s2t-small-librispeech-asr""": (
"""https://huggingface.co/facebook/s2t-small-librispeech-asr/resolve/main/vocab.json"""
),
},
"""spm_file""": {
"""facebook/s2t-small-librispeech-asr""": (
"""https://huggingface.co/facebook/s2t-small-librispeech-asr/resolve/main/sentencepiece.bpe.model"""
)
},
}
snake_case__ = {
"""facebook/s2t-small-librispeech-asr""": 10_24,
}
snake_case__ = ["""pt""", """fr""", """ru""", """nl""", """ro""", """it""", """es""", """de"""]
snake_case__ = {"""mustc""": MUSTC_LANGS}
class UpperCamelCase_ (a__ ):
"""simple docstring"""
_lowerCAmelCase = VOCAB_FILES_NAMES
_lowerCAmelCase = PRETRAINED_VOCAB_FILES_MAP
_lowerCAmelCase = MAX_MODEL_INPUT_SIZES
_lowerCAmelCase = ['input_ids', 'attention_mask']
_lowerCAmelCase = []
def __init__( self : Optional[int] , _lowerCamelCase : List[str] , _lowerCamelCase : List[str] , _lowerCamelCase : str="<s>" , _lowerCamelCase : Union[str, Any]="</s>" , _lowerCamelCase : Dict="<pad>" , _lowerCamelCase : str="<unk>" , _lowerCamelCase : Union[str, Any]=False , _lowerCamelCase : int=False , _lowerCamelCase : Any=None , _lowerCamelCase : Any=None , _lowerCamelCase : Optional[Dict[str, Any]] = None , **_lowerCamelCase : Optional[int] , ):
"""simple docstring"""
A_ : Union[str, Any] = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=_lowerCamelCase , eos_token=_lowerCamelCase , unk_token=_lowerCamelCase , pad_token=_lowerCamelCase , do_upper_case=_lowerCamelCase , do_lower_case=_lowerCamelCase , tgt_lang=_lowerCamelCase , lang_codes=_lowerCamelCase , sp_model_kwargs=self.sp_model_kwargs , **_lowerCamelCase , )
A_ : Optional[int] = do_upper_case
A_ : Tuple = do_lower_case
A_ : Tuple = load_json(_lowerCamelCase )
A_ : Tuple = {v: k for k, v in self.encoder.items()}
A_ : List[Any] = spm_file
A_ : List[str] = load_spm(_lowerCamelCase , self.sp_model_kwargs )
if lang_codes is not None:
A_ : Any = lang_codes
A_ : Optional[Any] = LANGUAGES[lang_codes]
A_ : Optional[Any] = [f'<lang:{lang}>' for lang in self.langs]
A_ : Union[str, Any] = {lang: self.sp_model.PieceToId(f'<lang:{lang}>' ) for lang in self.langs}
A_ : Optional[int] = self.lang_tokens
A_ : int = tgt_lang if tgt_lang is not None else self.langs[0]
self.set_tgt_lang_special_tokens(self._tgt_lang )
else:
A_ : Dict = {}
@property
def _a ( self : Tuple ):
"""simple docstring"""
return len(self.encoder )
@property
def _a ( self : int ):
"""simple docstring"""
return self._tgt_lang
@tgt_lang.setter
def _a ( self : List[str] , _lowerCamelCase : Any ):
"""simple docstring"""
A_ : int = new_tgt_lang
self.set_tgt_lang_special_tokens(_lowerCamelCase )
def _a ( self : Tuple , _lowerCamelCase : str ):
"""simple docstring"""
A_ : List[str] = self.lang_code_to_id[tgt_lang]
A_ : Optional[Any] = [lang_code_id]
def _a ( self : Optional[Any] , _lowerCamelCase : str ):
"""simple docstring"""
return self.sp_model.encode(_lowerCamelCase , out_type=_lowerCamelCase )
def _a ( self : List[Any] , _lowerCamelCase : int ):
"""simple docstring"""
return self.encoder.get(_lowerCamelCase , self.encoder[self.unk_token] )
def _a ( self : int , _lowerCamelCase : int ):
"""simple docstring"""
return self.decoder.get(_lowerCamelCase , self.unk_token )
def _a ( self : int , _lowerCamelCase : List[str] ):
"""simple docstring"""
A_ : List[Any] = []
A_ : Any = ''''''
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
A_ : Union[str, Any] = self.sp_model.decode(_lowerCamelCase )
out_string += (decoded.upper() if self.do_upper_case else decoded) + token + " "
A_ : Optional[Any] = []
else:
current_sub_tokens.append(_lowerCamelCase )
A_ : Tuple = self.sp_model.decode(_lowerCamelCase )
out_string += decoded.upper() if self.do_upper_case else decoded
return out_string.strip()
def _a ( self : int , _lowerCamelCase : Dict , _lowerCamelCase : Any=None ):
"""simple docstring"""
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + [self.eos_token_id]
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + [self.eos_token_id]
def _a ( self : List[Any] , _lowerCamelCase : List[int] , _lowerCamelCase : Optional[List[int]] = None , _lowerCamelCase : bool = False ):
"""simple docstring"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_lowerCamelCase , token_ids_a=_lowerCamelCase , already_has_special_tokens=_lowerCamelCase )
A_ : Tuple = [1] * len(self.prefix_tokens )
A_ : Tuple = [1]
if token_ids_a is None:
return prefix_ones + ([0] * len(_lowerCamelCase )) + suffix_ones
return prefix_ones + ([0] * len(_lowerCamelCase )) + ([0] * len(_lowerCamelCase )) + suffix_ones
def _a ( self : Dict ):
"""simple docstring"""
A_ : Union[str, Any] = self.encoder.copy()
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self : Union[str, Any] ):
"""simple docstring"""
A_ : Dict = self.__dict__.copy()
A_ : List[Any] = None
return state
def __setstate__( self : List[str] , _lowerCamelCase : Dict ):
"""simple docstring"""
A_ : Dict = d
# for backward compatibility
if not hasattr(self , '''sp_model_kwargs''' ):
A_ : Optional[int] = {}
A_ : int = load_spm(self.spm_file , self.sp_model_kwargs )
def _a ( self : Optional[Any] , _lowerCamelCase : str , _lowerCamelCase : Optional[str] = None ):
"""simple docstring"""
A_ : Dict = Path(_lowerCamelCase )
assert save_dir.is_dir(), f'{save_directory} should be a directory'
A_ : Optional[int] = save_dir / (
(filename_prefix + '''-''' if filename_prefix else '''''') + self.vocab_files_names['''vocab_file''']
)
A_ : Optional[int] = save_dir / (
(filename_prefix + '''-''' if filename_prefix else '''''') + self.vocab_files_names['''spm_file''']
)
save_json(self.encoder , _lowerCamelCase )
if os.path.abspath(self.spm_file ) != os.path.abspath(_lowerCamelCase ) and os.path.isfile(self.spm_file ):
copyfile(self.spm_file , _lowerCamelCase )
elif not os.path.isfile(self.spm_file ):
with open(_lowerCamelCase , '''wb''' ) as fi:
A_ : List[str] = self.sp_model.serialized_model_proto()
fi.write(_lowerCamelCase )
return (str(_lowerCamelCase ), str(_lowerCamelCase ))
def snake_case__ ( lowerCamelCase__ : str , lowerCamelCase__ : Dict[str, Any] ) -> sentencepiece.SentencePieceProcessor:
A_ : Tuple = sentencepiece.SentencePieceProcessor(**lowerCamelCase__ )
spm.Load(str(lowerCamelCase__ ) )
return spm
def snake_case__ ( lowerCamelCase__ : str ) -> Union[Dict, List]:
with open(lowerCamelCase__ , '''r''' ) as f:
return json.load(lowerCamelCase__ )
def snake_case__ ( lowerCamelCase__ : Any , lowerCamelCase__ : str ) -> None:
with open(lowerCamelCase__ , '''w''' ) as f:
json.dump(lowerCamelCase__ , lowerCamelCase__ , indent=2 )
| 4 | 1 |
'''simple docstring'''
from ..utils import DummyObject, requires_backends
class UpperCamelCase_ (metaclass=a__ ):
"""simple docstring"""
_lowerCAmelCase = ['flax', 'transformers']
def __init__( self : int , *_lowerCamelCase : Tuple , **_lowerCamelCase : Optional[int] ):
"""simple docstring"""
requires_backends(self , ['''flax''', '''transformers'''] )
@classmethod
def _a ( cls : List[Any] , *_lowerCamelCase : Any , **_lowerCamelCase : str ):
"""simple docstring"""
requires_backends(cls , ['''flax''', '''transformers'''] )
@classmethod
def _a ( cls : List[str] , *_lowerCamelCase : Optional[int] , **_lowerCamelCase : Optional[Any] ):
"""simple docstring"""
requires_backends(cls , ['''flax''', '''transformers'''] )
class UpperCamelCase_ (metaclass=a__ ):
"""simple docstring"""
_lowerCAmelCase = ['flax', 'transformers']
def __init__( self : Dict , *_lowerCamelCase : List[Any] , **_lowerCamelCase : Any ):
"""simple docstring"""
requires_backends(self , ['''flax''', '''transformers'''] )
@classmethod
def _a ( cls : Any , *_lowerCamelCase : str , **_lowerCamelCase : str ):
"""simple docstring"""
requires_backends(cls , ['''flax''', '''transformers'''] )
@classmethod
def _a ( cls : List[Any] , *_lowerCamelCase : str , **_lowerCamelCase : Tuple ):
"""simple docstring"""
requires_backends(cls , ['''flax''', '''transformers'''] )
class UpperCamelCase_ (metaclass=a__ ):
"""simple docstring"""
_lowerCAmelCase = ['flax', 'transformers']
def __init__( self : Dict , *_lowerCamelCase : Union[str, Any] , **_lowerCamelCase : str ):
"""simple docstring"""
requires_backends(self , ['''flax''', '''transformers'''] )
@classmethod
def _a ( cls : List[str] , *_lowerCamelCase : List[Any] , **_lowerCamelCase : Dict ):
"""simple docstring"""
requires_backends(cls , ['''flax''', '''transformers'''] )
@classmethod
def _a ( cls : Tuple , *_lowerCamelCase : List[str] , **_lowerCamelCase : Dict ):
"""simple docstring"""
requires_backends(cls , ['''flax''', '''transformers'''] )
class UpperCamelCase_ (metaclass=a__ ):
"""simple docstring"""
_lowerCAmelCase = ['flax', 'transformers']
def __init__( self : List[Any] , *_lowerCamelCase : Optional[int] , **_lowerCamelCase : Union[str, Any] ):
"""simple docstring"""
requires_backends(self , ['''flax''', '''transformers'''] )
@classmethod
def _a ( cls : List[Any] , *_lowerCamelCase : Optional[Any] , **_lowerCamelCase : int ):
"""simple docstring"""
requires_backends(cls , ['''flax''', '''transformers'''] )
@classmethod
def _a ( cls : Optional[Any] , *_lowerCamelCase : Tuple , **_lowerCamelCase : List[Any] ):
"""simple docstring"""
requires_backends(cls , ['''flax''', '''transformers'''] )
| 4 |
'''simple docstring'''
import argparse
import json
import os
import sys
import tempfile
import unittest
from argparse import Namespace
from dataclasses import dataclass, field
from enum import Enum
from pathlib import Path
from typing import List, Literal, Optional
import yaml
from transformers import HfArgumentParser, TrainingArguments
from transformers.hf_argparser import make_choice_type_function, string_to_bool
# Since Python 3.10, we can use the builtin `|` operator for Union types
# See PEP 604: https://peps.python.org/pep-0604
snake_case__ = sys.version_info >= (3, 10)
def snake_case__ ( lowerCamelCase__ : Union[str, Any]=None , lowerCamelCase__ : str=None ) -> List[Any]:
return field(default_factory=lambda: default , metadata=lowerCamelCase__ )
@dataclass
class UpperCamelCase_ :
"""simple docstring"""
_lowerCAmelCase = 42
_lowerCAmelCase = 42
_lowerCAmelCase = 42
_lowerCAmelCase = 42
@dataclass
class UpperCamelCase_ :
"""simple docstring"""
_lowerCAmelCase = 4_2
_lowerCAmelCase = field(default='toto', metadata={'help': 'help message'} )
@dataclass
class UpperCamelCase_ :
"""simple docstring"""
_lowerCAmelCase = False
_lowerCAmelCase = True
_lowerCAmelCase = None
class UpperCamelCase_ (a__ ):
"""simple docstring"""
_lowerCAmelCase = 'titi'
_lowerCAmelCase = 'toto'
class UpperCamelCase_ (a__ ):
"""simple docstring"""
_lowerCAmelCase = 'titi'
_lowerCAmelCase = 'toto'
_lowerCAmelCase = 4_2
@dataclass
class UpperCamelCase_ :
"""simple docstring"""
_lowerCAmelCase = "toto"
def _a ( self : Optional[Any] ):
"""simple docstring"""
A_ : Optional[int] = BasicEnum(self.foo )
@dataclass
class UpperCamelCase_ :
"""simple docstring"""
_lowerCAmelCase = "toto"
def _a ( self : Tuple ):
"""simple docstring"""
A_ : Optional[Any] = MixedTypeEnum(self.foo )
@dataclass
class UpperCamelCase_ :
"""simple docstring"""
_lowerCAmelCase = None
_lowerCAmelCase = field(default=a__, metadata={'help': 'help message'} )
_lowerCAmelCase = None
_lowerCAmelCase = list_field(default=[] )
_lowerCAmelCase = list_field(default=[] )
@dataclass
class UpperCamelCase_ :
"""simple docstring"""
_lowerCAmelCase = list_field(default=[] )
_lowerCAmelCase = list_field(default=[1, 2, 3] )
_lowerCAmelCase = list_field(default=['Hallo', 'Bonjour', 'Hello'] )
_lowerCAmelCase = list_field(default=[0.1, 0.2, 0.3] )
@dataclass
class UpperCamelCase_ :
"""simple docstring"""
_lowerCAmelCase = field()
_lowerCAmelCase = field()
_lowerCAmelCase = field()
def _a ( self : Tuple ):
"""simple docstring"""
A_ : Tuple = BasicEnum(self.required_enum )
@dataclass
class UpperCamelCase_ :
"""simple docstring"""
_lowerCAmelCase = 42
_lowerCAmelCase = field()
_lowerCAmelCase = None
_lowerCAmelCase = field(default='toto', metadata={'help': 'help message'} )
_lowerCAmelCase = list_field(default=['Hallo', 'Bonjour', 'Hello'] )
if is_python_no_less_than_3_10:
@dataclass
class UpperCamelCase_ :
"""simple docstring"""
_lowerCAmelCase = False
_lowerCAmelCase = True
_lowerCAmelCase = None
@dataclass
class UpperCamelCase_ :
"""simple docstring"""
_lowerCAmelCase = None
_lowerCAmelCase = field(default=a__, metadata={'help': 'help message'} )
_lowerCAmelCase = None
_lowerCAmelCase = list_field(default=[] )
_lowerCAmelCase = list_field(default=[] )
class UpperCamelCase_ (unittest.TestCase ):
"""simple docstring"""
def _a ( self : List[str] , _lowerCamelCase : argparse.ArgumentParser , _lowerCamelCase : argparse.ArgumentParser ):
"""simple docstring"""
self.assertEqual(len(a._actions ) , len(b._actions ) )
for x, y in zip(a._actions , b._actions ):
A_ : Union[str, Any] = {k: v for k, v in vars(_lowerCamelCase ).items() if k != '''container'''}
A_ : Optional[Any] = {k: v for k, v in vars(_lowerCamelCase ).items() if k != '''container'''}
# Choices with mixed type have custom function as "type"
# So we need to compare results directly for equality
if xx.get('''choices''' , _lowerCamelCase ) and yy.get('''choices''' , _lowerCamelCase ):
for expected_choice in yy["choices"] + xx["choices"]:
self.assertEqual(xx['''type'''](_lowerCamelCase ) , yy['''type'''](_lowerCamelCase ) )
del xx["type"], yy["type"]
self.assertEqual(_lowerCamelCase , _lowerCamelCase )
def _a ( self : Optional[int] ):
"""simple docstring"""
A_ : Union[str, Any] = HfArgumentParser(_lowerCamelCase )
A_ : Optional[Any] = argparse.ArgumentParser()
expected.add_argument('''--foo''' , type=_lowerCamelCase , required=_lowerCamelCase )
expected.add_argument('''--bar''' , type=_lowerCamelCase , required=_lowerCamelCase )
expected.add_argument('''--baz''' , type=_lowerCamelCase , required=_lowerCamelCase )
expected.add_argument('''--flag''' , type=_lowerCamelCase , default=_lowerCamelCase , const=_lowerCamelCase , nargs='''?''' )
self.argparsersEqual(_lowerCamelCase , _lowerCamelCase )
A_ : Union[str, Any] = ['''--foo''', '''1''', '''--baz''', '''quux''', '''--bar''', '''0.5''']
((A_) ,) : List[str] = parser.parse_args_into_dataclasses(_lowerCamelCase , look_for_args_file=_lowerCamelCase )
self.assertFalse(example.flag )
def _a ( self : Dict ):
"""simple docstring"""
A_ : int = HfArgumentParser(_lowerCamelCase )
A_ : int = argparse.ArgumentParser()
expected.add_argument('''--foo''' , default=42 , type=_lowerCamelCase )
expected.add_argument('''--baz''' , default='''toto''' , type=_lowerCamelCase , help='''help message''' )
self.argparsersEqual(_lowerCamelCase , _lowerCamelCase )
def _a ( self : Dict ):
"""simple docstring"""
A_ : Any = argparse.ArgumentParser()
expected.add_argument('''--foo''' , type=_lowerCamelCase , default=_lowerCamelCase , const=_lowerCamelCase , nargs='''?''' )
expected.add_argument('''--baz''' , type=_lowerCamelCase , default=_lowerCamelCase , const=_lowerCamelCase , nargs='''?''' )
# A boolean no_* argument always has to come after its "default: True" regular counter-part
# and its default must be set to False
expected.add_argument('''--no_baz''' , action='''store_false''' , default=_lowerCamelCase , dest='''baz''' )
expected.add_argument('''--opt''' , type=_lowerCamelCase , default=_lowerCamelCase )
A_ : Dict = [WithDefaultBoolExample]
if is_python_no_less_than_3_10:
dataclass_types.append(_lowerCamelCase )
for dataclass_type in dataclass_types:
A_ : Any = HfArgumentParser(_lowerCamelCase )
self.argparsersEqual(_lowerCamelCase , _lowerCamelCase )
A_ : List[Any] = parser.parse_args([] )
self.assertEqual(_lowerCamelCase , Namespace(foo=_lowerCamelCase , baz=_lowerCamelCase , opt=_lowerCamelCase ) )
A_ : Optional[int] = parser.parse_args(['''--foo''', '''--no_baz'''] )
self.assertEqual(_lowerCamelCase , Namespace(foo=_lowerCamelCase , baz=_lowerCamelCase , opt=_lowerCamelCase ) )
A_ : Union[str, Any] = parser.parse_args(['''--foo''', '''--baz'''] )
self.assertEqual(_lowerCamelCase , Namespace(foo=_lowerCamelCase , baz=_lowerCamelCase , opt=_lowerCamelCase ) )
A_ : List[str] = parser.parse_args(['''--foo''', '''True''', '''--baz''', '''True''', '''--opt''', '''True'''] )
self.assertEqual(_lowerCamelCase , Namespace(foo=_lowerCamelCase , baz=_lowerCamelCase , opt=_lowerCamelCase ) )
A_ : List[Any] = parser.parse_args(['''--foo''', '''False''', '''--baz''', '''False''', '''--opt''', '''False'''] )
self.assertEqual(_lowerCamelCase , Namespace(foo=_lowerCamelCase , baz=_lowerCamelCase , opt=_lowerCamelCase ) )
def _a ( self : List[Any] ):
"""simple docstring"""
A_ : str = HfArgumentParser(_lowerCamelCase )
A_ : Optional[int] = argparse.ArgumentParser()
expected.add_argument(
'''--foo''' , default='''toto''' , choices=['''titi''', '''toto''', 42] , type=make_choice_type_function(['''titi''', '''toto''', 42] ) , )
self.argparsersEqual(_lowerCamelCase , _lowerCamelCase )
A_ : str = parser.parse_args([] )
self.assertEqual(args.foo , '''toto''' )
A_ : List[Any] = parser.parse_args_into_dataclasses([] )[0]
self.assertEqual(enum_ex.foo , MixedTypeEnum.toto )
A_ : int = parser.parse_args(['''--foo''', '''titi'''] )
self.assertEqual(args.foo , '''titi''' )
A_ : Dict = parser.parse_args_into_dataclasses(['''--foo''', '''titi'''] )[0]
self.assertEqual(enum_ex.foo , MixedTypeEnum.titi )
A_ : Tuple = parser.parse_args(['''--foo''', '''42'''] )
self.assertEqual(args.foo , 42 )
A_ : List[str] = parser.parse_args_into_dataclasses(['''--foo''', '''42'''] )[0]
self.assertEqual(enum_ex.foo , MixedTypeEnum.fourtytwo )
def _a ( self : Optional[int] ):
"""simple docstring"""
@dataclass
class UpperCamelCase_ :
"""simple docstring"""
_lowerCAmelCase = "toto"
A_ : List[str] = HfArgumentParser(_lowerCamelCase )
A_ : Tuple = argparse.ArgumentParser()
expected.add_argument(
'''--foo''' , default='''toto''' , choices=('''titi''', '''toto''', 42) , type=make_choice_type_function(['''titi''', '''toto''', 42] ) , )
self.argparsersEqual(_lowerCamelCase , _lowerCamelCase )
A_ : Tuple = parser.parse_args([] )
self.assertEqual(args.foo , '''toto''' )
A_ : List[str] = parser.parse_args(['''--foo''', '''titi'''] )
self.assertEqual(args.foo , '''titi''' )
A_ : int = parser.parse_args(['''--foo''', '''42'''] )
self.assertEqual(args.foo , 42 )
def _a ( self : Dict ):
"""simple docstring"""
A_ : int = HfArgumentParser(_lowerCamelCase )
A_ : List[Any] = argparse.ArgumentParser()
expected.add_argument('''--foo_int''' , nargs='''+''' , default=[] , type=_lowerCamelCase )
expected.add_argument('''--bar_int''' , nargs='''+''' , default=[1, 2, 3] , type=_lowerCamelCase )
expected.add_argument('''--foo_str''' , nargs='''+''' , default=['''Hallo''', '''Bonjour''', '''Hello'''] , type=_lowerCamelCase )
expected.add_argument('''--foo_float''' , nargs='''+''' , default=[0.1, 0.2, 0.3] , type=_lowerCamelCase )
self.argparsersEqual(_lowerCamelCase , _lowerCamelCase )
A_ : Optional[int] = parser.parse_args([] )
self.assertEqual(
_lowerCamelCase , Namespace(foo_int=[] , bar_int=[1, 2, 3] , foo_str=['''Hallo''', '''Bonjour''', '''Hello'''] , foo_float=[0.1, 0.2, 0.3] ) , )
A_ : str = parser.parse_args('''--foo_int 1 --bar_int 2 3 --foo_str a b c --foo_float 0.1 0.7'''.split() )
self.assertEqual(_lowerCamelCase , Namespace(foo_int=[1] , bar_int=[2, 3] , foo_str=['''a''', '''b''', '''c'''] , foo_float=[0.1, 0.7] ) )
def _a ( self : Dict ):
"""simple docstring"""
A_ : Optional[Any] = argparse.ArgumentParser()
expected.add_argument('''--foo''' , default=_lowerCamelCase , type=_lowerCamelCase )
expected.add_argument('''--bar''' , default=_lowerCamelCase , type=_lowerCamelCase , help='''help message''' )
expected.add_argument('''--baz''' , default=_lowerCamelCase , type=_lowerCamelCase )
expected.add_argument('''--ces''' , nargs='''+''' , default=[] , type=_lowerCamelCase )
expected.add_argument('''--des''' , nargs='''+''' , default=[] , type=_lowerCamelCase )
A_ : Tuple = [OptionalExample]
if is_python_no_less_than_3_10:
dataclass_types.append(_lowerCamelCase )
for dataclass_type in dataclass_types:
A_ : int = HfArgumentParser(_lowerCamelCase )
self.argparsersEqual(_lowerCamelCase , _lowerCamelCase )
A_ : List[Any] = parser.parse_args([] )
self.assertEqual(_lowerCamelCase , Namespace(foo=_lowerCamelCase , bar=_lowerCamelCase , baz=_lowerCamelCase , ces=[] , des=[] ) )
A_ : Optional[Any] = parser.parse_args('''--foo 12 --bar 3.14 --baz 42 --ces a b c --des 1 2 3'''.split() )
self.assertEqual(_lowerCamelCase , Namespace(foo=12 , bar=3.14 , baz='''42''' , ces=['''a''', '''b''', '''c'''] , des=[1, 2, 3] ) )
def _a ( self : List[Any] ):
"""simple docstring"""
A_ : List[Any] = HfArgumentParser(_lowerCamelCase )
A_ : Dict = argparse.ArgumentParser()
expected.add_argument('''--required_list''' , nargs='''+''' , type=_lowerCamelCase , required=_lowerCamelCase )
expected.add_argument('''--required_str''' , type=_lowerCamelCase , required=_lowerCamelCase )
expected.add_argument(
'''--required_enum''' , type=make_choice_type_function(['''titi''', '''toto'''] ) , choices=['''titi''', '''toto'''] , required=_lowerCamelCase , )
self.argparsersEqual(_lowerCamelCase , _lowerCamelCase )
def _a ( self : Optional[Any] ):
"""simple docstring"""
A_ : Union[str, Any] = HfArgumentParser(_lowerCamelCase )
A_ : List[Any] = argparse.ArgumentParser()
expected.add_argument('''--foo''' , type=_lowerCamelCase , required=_lowerCamelCase )
expected.add_argument(
'''--required_enum''' , type=make_choice_type_function(['''titi''', '''toto'''] ) , choices=['''titi''', '''toto'''] , required=_lowerCamelCase , )
expected.add_argument('''--opt''' , type=_lowerCamelCase , default=_lowerCamelCase )
expected.add_argument('''--baz''' , default='''toto''' , type=_lowerCamelCase , help='''help message''' )
expected.add_argument('''--foo_str''' , nargs='''+''' , default=['''Hallo''', '''Bonjour''', '''Hello'''] , type=_lowerCamelCase )
self.argparsersEqual(_lowerCamelCase , _lowerCamelCase )
def _a ( self : Tuple ):
"""simple docstring"""
A_ : List[Any] = HfArgumentParser(_lowerCamelCase )
A_ : Union[str, Any] = {
'''foo''': 12,
'''bar''': 3.14,
'''baz''': '''42''',
'''flag''': True,
}
A_ : Optional[int] = parser.parse_dict(_lowerCamelCase )[0]
A_ : str = BasicExample(**_lowerCamelCase )
self.assertEqual(_lowerCamelCase , _lowerCamelCase )
def _a ( self : List[str] ):
"""simple docstring"""
A_ : Any = HfArgumentParser(_lowerCamelCase )
A_ : List[str] = {
'''foo''': 12,
'''bar''': 3.14,
'''baz''': '''42''',
'''flag''': True,
'''extra''': 42,
}
self.assertRaises(_lowerCamelCase , parser.parse_dict , _lowerCamelCase , allow_extra_keys=_lowerCamelCase )
def _a ( self : Optional[Any] ):
"""simple docstring"""
A_ : Union[str, Any] = HfArgumentParser(_lowerCamelCase )
A_ : List[str] = {
'''foo''': 12,
'''bar''': 3.14,
'''baz''': '''42''',
'''flag''': True,
}
with tempfile.TemporaryDirectory() as tmp_dir:
A_ : Tuple = os.path.join(_lowerCamelCase , '''temp_json''' )
os.mkdir(_lowerCamelCase )
with open(temp_local_path + '''.json''' , '''w+''' ) as f:
json.dump(_lowerCamelCase , _lowerCamelCase )
A_ : List[str] = parser.parse_yaml_file(Path(temp_local_path + '''.json''' ) )[0]
A_ : Optional[Any] = BasicExample(**_lowerCamelCase )
self.assertEqual(_lowerCamelCase , _lowerCamelCase )
def _a ( self : int ):
"""simple docstring"""
A_ : int = HfArgumentParser(_lowerCamelCase )
A_ : Tuple = {
'''foo''': 12,
'''bar''': 3.14,
'''baz''': '''42''',
'''flag''': True,
}
with tempfile.TemporaryDirectory() as tmp_dir:
A_ : int = os.path.join(_lowerCamelCase , '''temp_yaml''' )
os.mkdir(_lowerCamelCase )
with open(temp_local_path + '''.yaml''' , '''w+''' ) as f:
yaml.dump(_lowerCamelCase , _lowerCamelCase )
A_ : Optional[Any] = parser.parse_yaml_file(Path(temp_local_path + '''.yaml''' ) )[0]
A_ : int = BasicExample(**_lowerCamelCase )
self.assertEqual(_lowerCamelCase , _lowerCamelCase )
def _a ( self : Union[str, Any] ):
"""simple docstring"""
A_ : Dict = HfArgumentParser(_lowerCamelCase )
self.assertIsNotNone(_lowerCamelCase )
| 4 | 1 |
'''simple docstring'''
def snake_case__ ( lowerCamelCase__ : int , lowerCamelCase__ : int , lowerCamelCase__ : int ) -> float:
A_ : int = (num_of_terms / 2) * (2 * first_term + (num_of_terms - 1) * common_diff)
# formula for sum of series
return total
def snake_case__ ( ) -> Optional[Any]:
print(sum_of_series(1 , 1 , 1_0 ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 4 |
'''simple docstring'''
import sys
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from huggingface_hub import HfFolder, delete_repo
from requests.exceptions import HTTPError
from transformers import AutoImageProcessor, ViTImageProcessor
from transformers.testing_utils import TOKEN, USER, get_tests_dir, is_staging_test
sys.path.append(str(Path(__file__).parent.parent / """utils"""))
from test_module.custom_image_processing import CustomImageProcessor # noqa E402
snake_case__ = get_tests_dir("""fixtures""")
class UpperCamelCase_ (unittest.TestCase ):
"""simple docstring"""
def _a ( self : List[str] ):
"""simple docstring"""
A_ : List[Any] = mock.Mock()
A_ : List[str] = 500
A_ : Tuple = {}
A_ : int = HTTPError
A_ : Optional[Any] = {}
# Download this model to make sure it's in the cache.
A_ : Tuple = ViTImageProcessor.from_pretrained('''hf-internal-testing/tiny-random-vit''' )
# Under the mock environment we get a 500 error when trying to reach the model.
with mock.patch('''requests.Session.request''' , return_value=_lowerCamelCase ) as mock_head:
A_ : List[Any] = ViTImageProcessor.from_pretrained('''hf-internal-testing/tiny-random-vit''' )
# This check we did call the fake head request
mock_head.assert_called()
def _a ( self : Tuple ):
"""simple docstring"""
A_ : Tuple = ViTImageProcessor.from_pretrained(
'''https://huggingface.co/hf-internal-testing/tiny-random-vit/resolve/main/preprocessor_config.json''' )
def _a ( self : Dict ):
"""simple docstring"""
with self.assertRaises(_lowerCamelCase ):
# config is in subfolder, the following should not work without specifying the subfolder
A_ : Any = AutoImageProcessor.from_pretrained('''hf-internal-testing/stable-diffusion-all-variants''' )
A_ : Tuple = AutoImageProcessor.from_pretrained(
'''hf-internal-testing/stable-diffusion-all-variants''' , subfolder='''feature_extractor''' )
self.assertIsNotNone(_lowerCamelCase )
@is_staging_test
class UpperCamelCase_ (unittest.TestCase ):
"""simple docstring"""
@classmethod
def _a ( cls : Tuple ):
"""simple docstring"""
A_ : int = TOKEN
HfFolder.save_token(_lowerCamelCase )
@classmethod
def _a ( cls : str ):
"""simple docstring"""
try:
delete_repo(token=cls._token , repo_id='''test-image-processor''' )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id='''valid_org/test-image-processor-org''' )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id='''test-dynamic-image-processor''' )
except HTTPError:
pass
def _a ( self : List[Any] ):
"""simple docstring"""
A_ : Dict = ViTImageProcessor.from_pretrained(_lowerCamelCase )
image_processor.push_to_hub('''test-image-processor''' , use_auth_token=self._token )
A_ : Optional[int] = ViTImageProcessor.from_pretrained(f'{USER}/test-image-processor' )
for k, v in image_processor.__dict__.items():
self.assertEqual(_lowerCamelCase , getattr(_lowerCamelCase , _lowerCamelCase ) )
# Reset repo
delete_repo(token=self._token , repo_id='''test-image-processor''' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
image_processor.save_pretrained(
_lowerCamelCase , repo_id='''test-image-processor''' , push_to_hub=_lowerCamelCase , use_auth_token=self._token )
A_ : List[Any] = ViTImageProcessor.from_pretrained(f'{USER}/test-image-processor' )
for k, v in image_processor.__dict__.items():
self.assertEqual(_lowerCamelCase , getattr(_lowerCamelCase , _lowerCamelCase ) )
def _a ( self : Optional[Any] ):
"""simple docstring"""
A_ : int = ViTImageProcessor.from_pretrained(_lowerCamelCase )
image_processor.push_to_hub('''valid_org/test-image-processor''' , use_auth_token=self._token )
A_ : List[str] = ViTImageProcessor.from_pretrained('''valid_org/test-image-processor''' )
for k, v in image_processor.__dict__.items():
self.assertEqual(_lowerCamelCase , getattr(_lowerCamelCase , _lowerCamelCase ) )
# Reset repo
delete_repo(token=self._token , repo_id='''valid_org/test-image-processor''' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
image_processor.save_pretrained(
_lowerCamelCase , repo_id='''valid_org/test-image-processor-org''' , push_to_hub=_lowerCamelCase , use_auth_token=self._token )
A_ : Any = ViTImageProcessor.from_pretrained('''valid_org/test-image-processor-org''' )
for k, v in image_processor.__dict__.items():
self.assertEqual(_lowerCamelCase , getattr(_lowerCamelCase , _lowerCamelCase ) )
def _a ( self : Optional[Any] ):
"""simple docstring"""
CustomImageProcessor.register_for_auto_class()
A_ : Any = CustomImageProcessor.from_pretrained(_lowerCamelCase )
image_processor.push_to_hub('''test-dynamic-image-processor''' , use_auth_token=self._token )
# This has added the proper auto_map field to the config
self.assertDictEqual(
image_processor.auto_map , {'''AutoImageProcessor''': '''custom_image_processing.CustomImageProcessor'''} , )
A_ : str = AutoImageProcessor.from_pretrained(
f'{USER}/test-dynamic-image-processor' , trust_remote_code=_lowerCamelCase )
# Can't make an isinstance check because the new_image_processor is from the CustomImageProcessor class of a dynamic module
self.assertEqual(new_image_processor.__class__.__name__ , '''CustomImageProcessor''' )
| 4 | 1 |
'''simple docstring'''
from typing import List
import numpy as np
def snake_case__ ( lowerCamelCase__ : dict ) -> int:
A_ : Tuple = {key: len(lowerCamelCase__ ) for key, value in gen_kwargs.items() if isinstance(lowerCamelCase__ , lowerCamelCase__ )}
if len(set(lists_lengths.values() ) ) > 1:
raise RuntimeError(
(
'''Sharding is ambiguous for this dataset: '''
+ '''we found several data sources lists of different lengths, and we don\'t know over which list we should parallelize:\n'''
+ '''\n'''.join(f'\t- key {key} has length {length}' for key, length in lists_lengths.items() )
+ '''\nTo fix this, check the \'gen_kwargs\' and make sure to use lists only for data sources, '''
+ '''and use tuples otherwise. In the end there should only be one single list, or several lists with the same length.'''
) )
A_ : Optional[Any] = max(lists_lengths.values() , default=0 )
return max(1 , lowerCamelCase__ )
def snake_case__ ( lowerCamelCase__ : int , lowerCamelCase__ : int ) -> List[range]:
A_ : str = []
for group_idx in range(lowerCamelCase__ ):
A_ : List[Any] = num_shards // max_num_jobs + (group_idx < (num_shards % max_num_jobs))
if num_shards_to_add == 0:
break
A_ : Union[str, Any] = shards_indices_per_group[-1].stop if shards_indices_per_group else 0
A_ : int = range(lowerCamelCase__ , start + num_shards_to_add )
shards_indices_per_group.append(lowerCamelCase__ )
return shards_indices_per_group
def snake_case__ ( lowerCamelCase__ : dict , lowerCamelCase__ : int ) -> List[dict]:
A_ : Optional[int] = _number_of_shards_in_gen_kwargs(lowerCamelCase__ )
if num_shards == 1:
return [dict(lowerCamelCase__ )]
else:
A_ : List[Any] = _distribute_shards(num_shards=lowerCamelCase__ , max_num_jobs=lowerCamelCase__ )
return [
{
key: [value[shard_idx] for shard_idx in shard_indices_per_group[group_idx]]
if isinstance(lowerCamelCase__ , lowerCamelCase__ )
else value
for key, value in gen_kwargs.items()
}
for group_idx in range(len(lowerCamelCase__ ) )
]
def snake_case__ ( lowerCamelCase__ : List[dict] ) -> dict:
return {
key: [value for gen_kwargs in gen_kwargs_list for value in gen_kwargs[key]]
if isinstance(gen_kwargs_list[0][key] , lowerCamelCase__ )
else gen_kwargs_list[0][key]
for key in gen_kwargs_list[0]
}
def snake_case__ ( lowerCamelCase__ : np.random.Generator , lowerCamelCase__ : dict ) -> dict:
A_ : Union[str, Any] = {len(lowerCamelCase__ ) for value in gen_kwargs.values() if isinstance(lowerCamelCase__ , lowerCamelCase__ )}
A_ : Tuple = {}
for size in list_sizes:
A_ : str = list(range(lowerCamelCase__ ) )
rng.shuffle(indices_per_size[size] )
# Now let's copy the gen_kwargs and shuffle the lists based on their sizes
A_ : Optional[Any] = dict(lowerCamelCase__ )
for key, value in shuffled_kwargs.items():
if isinstance(lowerCamelCase__ , lowerCamelCase__ ):
A_ : int = [value[i] for i in indices_per_size[len(lowerCamelCase__ )]]
return shuffled_kwargs
| 4 |
'''simple docstring'''
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import torch
from ..models.clipseg import CLIPSegForImageSegmentation
from ..utils import is_vision_available, requires_backends
from .base import PipelineTool
if is_vision_available():
from PIL import Image
class UpperCamelCase_ (a__ ):
"""simple docstring"""
_lowerCAmelCase = (
'This is a tool that creates a segmentation mask of an image according to a label. It cannot create an image.'
'It takes two arguments named `image` which should be the original image, and `label` which should be a text '
'describing the elements what should be identified in the segmentation mask. The tool returns the mask.'
)
_lowerCAmelCase = 'CIDAS/clipseg-rd64-refined'
_lowerCAmelCase = 'image_segmenter'
_lowerCAmelCase = CLIPSegForImageSegmentation
_lowerCAmelCase = ['image', 'text']
_lowerCAmelCase = ['image']
def __init__( self : Optional[int] , *_lowerCamelCase : Optional[int] , **_lowerCamelCase : Union[str, Any] ):
"""simple docstring"""
requires_backends(self , ['''vision'''] )
super().__init__(*_lowerCamelCase , **_lowerCamelCase )
def _a ( self : List[str] , _lowerCamelCase : "Image" , _lowerCamelCase : str ):
"""simple docstring"""
return self.pre_processor(text=[label] , images=[image] , padding=_lowerCamelCase , return_tensors='''pt''' )
def _a ( self : Union[str, Any] , _lowerCamelCase : Optional[int] ):
"""simple docstring"""
with torch.no_grad():
A_ : Optional[int] = self.model(**_lowerCamelCase ).logits
return logits
def _a ( self : List[str] , _lowerCamelCase : Optional[int] ):
"""simple docstring"""
A_ : int = outputs.cpu().detach().numpy()
A_ : Tuple = 0
A_ : List[str] = 1
return Image.fromarray((array * 255).astype(np.uinta ) )
| 4 | 1 |
'''simple docstring'''
from __future__ import annotations
from statistics import mean
def snake_case__ ( lowerCamelCase__ : list[int] , lowerCamelCase__ : list[int] , lowerCamelCase__ : int ) -> list[int]:
A_ : Any = [0] * no_of_processes
A_ : Optional[int] = [0] * no_of_processes
# Initialize remaining_time to waiting_time.
for i in range(lowerCamelCase__ ):
A_ : Optional[Any] = burst_time[i]
A_ : list[int] = []
A_ : Any = 0
A_ : Optional[Any] = 0
# When processes are not completed,
# A process whose arrival time has passed \
# and has remaining execution time is put into the ready_process.
# The shortest process in the ready_process, target_process is executed.
while completed != no_of_processes:
A_ : List[Any] = []
A_ : Optional[Any] = -1
for i in range(lowerCamelCase__ ):
if (arrival_time[i] <= total_time) and (remaining_time[i] > 0):
ready_process.append(lowerCamelCase__ )
if len(lowerCamelCase__ ) > 0:
A_ : str = ready_process[0]
for i in ready_process:
if remaining_time[i] < remaining_time[target_process]:
A_ : Tuple = i
total_time += burst_time[target_process]
completed += 1
A_ : List[Any] = 0
A_ : Any = (
total_time - arrival_time[target_process] - burst_time[target_process]
)
else:
total_time += 1
return waiting_time
def snake_case__ ( lowerCamelCase__ : list[int] , lowerCamelCase__ : int , lowerCamelCase__ : list[int] ) -> list[int]:
A_ : Any = [0] * no_of_processes
for i in range(lowerCamelCase__ ):
A_ : Optional[Any] = burst_time[i] + waiting_time[i]
return turn_around_time
if __name__ == "__main__":
print("""[TEST CASE 01]""")
snake_case__ = 4
snake_case__ = [2, 5, 3, 7]
snake_case__ = [0, 0, 0, 0]
snake_case__ = calculate_waitingtime(arrival_time, burst_time, no_of_processes)
snake_case__ = calculate_turnaroundtime(
burst_time, no_of_processes, waiting_time
)
# Printing the Result
print("""PID\tBurst Time\tArrival Time\tWaiting Time\tTurnaround Time""")
for i, process_id in enumerate(list(range(1, 5))):
print(
F'{process_id}\t{burst_time[i]}\t\t\t{arrival_time[i]}\t\t\t\t'
F'{waiting_time[i]}\t\t\t\t{turn_around_time[i]}'
)
print(F'\nAverage waiting time = {mean(waiting_time):.5f}')
print(F'Average turnaround time = {mean(turn_around_time):.5f}')
| 4 |
'''simple docstring'''
from collections.abc import Sequence
def snake_case__ ( lowerCamelCase__ : Sequence[float] , lowerCamelCase__ : bool = False ) -> float:
if not arr:
return 0
A_ : Union[str, Any] = 0 if allow_empty_subarrays else float('''-inf''' )
A_ : str = 0.0
for num in arr:
A_ : Any = max(0 if allow_empty_subarrays else num , curr_sum + num )
A_ : Tuple = max(lowerCamelCase__ , lowerCamelCase__ )
return max_sum
if __name__ == "__main__":
from doctest import testmod
testmod()
snake_case__ = [-2, 1, -3, 4, -1, 2, 1, -5, 4]
print(F'{max_subarray_sum(nums) = }')
| 4 | 1 |
'''simple docstring'''
import logging
import sys
from dataclasses import dataclass, field
from typing import Any, Dict, List, Optional, Union
import librosa
import torch
from datasets import DatasetDict, load_dataset
from packaging import version
from torch import nn
from transformers import (
HfArgumentParser,
Trainer,
TrainingArguments,
WavaVecaConfig,
WavaVecaFeatureExtractor,
WavaVecaForPreTraining,
is_apex_available,
trainer_utils,
)
from transformers.models.wavaveca.modeling_wavaveca import _compute_mask_indices
if is_apex_available():
from apex import amp
if version.parse(version.parse(torch.__version__).base_version) >= version.parse("""1.6"""):
snake_case__ = True
from torch.cuda.amp import autocast
snake_case__ = logging.getLogger(__name__)
@dataclass
class UpperCamelCase_ :
"""simple docstring"""
_lowerCAmelCase = field(
metadata={'help': 'Path to pretrained model or model identifier from huggingface.co/models'} )
_lowerCAmelCase = field(
default=a__, metadata={'help': 'Where do you want to store the pretrained models downloaded from huggingface.co'}, )
_lowerCAmelCase = field(
default=a__, metadata={'help': 'Whether to freeze the feature extractor layers of the model.'} )
_lowerCAmelCase = field(
default=a__, metadata={'help': 'Whether to log verbose messages or not.'}, )
_lowerCAmelCase = field(
default=2.0, metadata={'help': 'Maximum temperature for gumbel softmax.'} )
_lowerCAmelCase = field(
default=0.5, metadata={'help': 'Minimum temperature for gumbel softmax.'} )
_lowerCAmelCase = field(
default=0.99_99_95, metadata={'help': 'Decay of gumbel temperature during training.'} )
def snake_case__ ( lowerCamelCase__ : ModelArguments , lowerCamelCase__ : TrainingArguments ) -> str:
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' , datefmt='''%m/%d/%Y %H:%M:%S''' , handlers=[logging.StreamHandler(sys.stdout )] , )
A_ : Any = logging.WARNING
if model_args.verbose_logging:
A_ : Optional[int] = logging.DEBUG
elif trainer_utils.is_main_process(training_args.local_rank ):
A_ : List[Any] = logging.INFO
logger.setLevel(lowerCamelCase__ )
@dataclass
class UpperCamelCase_ :
"""simple docstring"""
_lowerCAmelCase = field(
default=a__, metadata={'help': 'The name of the dataset to use (via the datasets library).'} )
_lowerCAmelCase = field(
default=a__, metadata={'help': 'The configuration name of the dataset to use (via the datasets library).'} )
_lowerCAmelCase = field(
default='train', metadata={
'help': 'The name of the training data set split to use (via the datasets library). Defaults to \'train\''
}, )
_lowerCAmelCase = field(
default='validation', metadata={
'help': (
'The name of the validation data set split to use (via the datasets library). Defaults to \'validation\''
)
}, )
_lowerCAmelCase = field(
default='file', metadata={'help': 'Column in the dataset that contains speech file path. Defaults to \'file\''}, )
_lowerCAmelCase = field(
default=a__, metadata={'help': 'Overwrite the cached preprocessed datasets or not.'} )
_lowerCAmelCase = field(
default=1, metadata={
'help': 'The percentage of the train set used as validation set in case there\'s no validation split'
}, )
_lowerCAmelCase = field(
default=a__, metadata={'help': 'The number of processes to use for the preprocessing.'}, )
_lowerCAmelCase = field(
default=20.0, metadata={'help': 'Filter audio files that are longer than `max_duration_in_seconds` seconds'} )
@dataclass
class UpperCamelCase_ :
"""simple docstring"""
_lowerCAmelCase = 42
_lowerCAmelCase = 42
_lowerCAmelCase = "longest"
_lowerCAmelCase = None
_lowerCAmelCase = None
def __call__( self : Tuple , _lowerCamelCase : List[Dict[str, Union[List[int], torch.Tensor]]] ):
"""simple docstring"""
A_ : str = self.feature_extractor.pad(
_lowerCamelCase , max_length=self.max_length , padding=self.padding , pad_to_multiple_of=self.pad_to_multiple_of , return_tensors='''pt''' , )
A_ : Dict = self.model._get_feat_extract_output_lengths(batch['''input_values'''].shape[-1] )
A_ : Optional[Any] = batch['''input_values'''].shape[0]
# make sure that no loss is computed on padded inputs
if batch["attention_mask"] is not None:
# compute real output lengths according to convolution formula
A_ : int = self.model._get_feat_extract_output_lengths(batch['''attention_mask'''].sum(-1 ) ).to(
torch.long )
A_ : int = torch.zeros(
(batch_size, mask_indices_seq_length) , dtype=torch.long , device=batch['''input_values'''].device )
# these two operations makes sure that all values
# before the output lengths indices are attended to
A_ : Dict = 1
A_ : int = attention_mask.flip([-1] ).cumsum(-1 ).flip([-1] ).bool()
# sample randomly masked indices
A_ : List[str] = _compute_mask_indices(
(batch_size, mask_indices_seq_length) , self.model.config.mask_time_prob , self.model.config.mask_time_length , attention_mask=_lowerCamelCase , min_masks=2 , )
return batch
class UpperCamelCase_ (a__ ):
"""simple docstring"""
def __init__( self : Tuple , *_lowerCamelCase : Any , _lowerCamelCase : List[Any]=1 , _lowerCamelCase : Tuple=0 , _lowerCamelCase : Optional[int]=1.0 , **_lowerCamelCase : Any ):
"""simple docstring"""
super().__init__(*_lowerCamelCase , **_lowerCamelCase )
A_ : Optional[Any] = 0
A_ : Tuple = max_gumbel_temp
A_ : Union[str, Any] = min_gumbel_temp
A_ : Any = gumbel_temp_decay
def _a ( self : Tuple , _lowerCamelCase : nn.Module , _lowerCamelCase : Dict[str, Union[torch.Tensor, Any]] ):
"""simple docstring"""
model.train()
A_ : int = self._prepare_inputs(_lowerCamelCase )
if self.use_amp:
with autocast():
A_ : Optional[int] = self.compute_loss(_lowerCamelCase , _lowerCamelCase )
else:
A_ : Tuple = self.compute_loss(_lowerCamelCase , _lowerCamelCase )
if self.args.n_gpu > 1 or self.deepspeed:
if model.module.config.ctc_loss_reduction == "mean":
A_ : Optional[int] = loss.mean()
elif model.module.config.ctc_loss_reduction == "sum":
A_ : Union[str, Any] = loss.sum() / (inputs['''mask_time_indices''']).sum()
else:
raise ValueError(f'{model.config.ctc_loss_reduction} is not valid. Choose one of [\'mean\', \'sum\']' )
if self.args.gradient_accumulation_steps > 1:
A_ : Optional[int] = loss / self.args.gradient_accumulation_steps
if self.use_amp:
self.scaler.scale(_lowerCamelCase ).backward()
elif self.use_apex:
with amp.scale_loss(_lowerCamelCase , self.optimizer ) as scaled_loss:
scaled_loss.backward()
elif self.deepspeed:
self.deepspeed.backward(_lowerCamelCase )
else:
loss.backward()
self.num_update_step += 1
# make sure gumbel softmax temperature is decayed
if self.args.n_gpu > 1 or self.deepspeed:
model.module.set_gumbel_temperature(
max(self.max_gumbel_temp * self.gumbel_temp_decay**self.num_update_step , self.min_gumbel_temp ) )
else:
model.set_gumbel_temperature(
max(self.max_gumbel_temp * self.gumbel_temp_decay**self.num_update_step , self.min_gumbel_temp ) )
return loss.detach()
def snake_case__ ( ) -> Dict:
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
A_ : List[Any] = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
A_ ,A_ ,A_ : Optional[Any] = parser.parse_args_into_dataclasses()
configure_logger(lowerCamelCase__ , lowerCamelCase__ )
# Downloading and loading a dataset from the hub.
A_ : int = load_dataset(data_args.dataset_name , data_args.dataset_config_name , cache_dir=model_args.cache_dir )
if "validation" not in datasets.keys():
# make sure only "validation" and "train" keys remain"
A_ : Dict = DatasetDict()
A_ : Tuple = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=f'{data_args.train_split_name}[:{data_args.validation_split_percentage}%]' , cache_dir=model_args.cache_dir , )
A_ : str = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=f'{data_args.train_split_name}[{data_args.validation_split_percentage}%:]' , cache_dir=model_args.cache_dir , )
else:
# make sure only "validation" and "train" keys remain"
A_ : List[Any] = DatasetDict()
A_ : Dict = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split='''validation''' , cache_dir=model_args.cache_dir , )
A_ : Optional[Any] = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=f'{data_args.train_split_name}' , cache_dir=model_args.cache_dir , )
# only normalized-inputs-training is supported
A_ : Union[str, Any] = WavaVecaFeatureExtractor.from_pretrained(
model_args.model_name_or_path , cache_dir=model_args.cache_dir , do_normalize=lowerCamelCase__ )
def prepare_dataset(lowerCamelCase__ : List[str] ):
# check that all files have the correct sampling rate
A_ ,A_ : str = librosa.load(batch[data_args.speech_file_column] , sr=feature_extractor.sampling_rate )
return batch
# load audio files into numpy arrays
A_ : Union[str, Any] = datasets.map(
lowerCamelCase__ , num_proc=data_args.preprocessing_num_workers , remove_columns=datasets['''train'''].column_names )
# filter audio files that are too long
A_ : List[str] = vectorized_datasets.filter(
lambda lowerCamelCase__ : len(data['''speech'''] ) < int(data_args.max_duration_in_seconds * feature_extractor.sampling_rate ) )
def normalize(lowerCamelCase__ : Optional[int] ):
return feature_extractor(batch['''speech'''] , sampling_rate=feature_extractor.sampling_rate )
# normalize and transform to `BatchFeatures`
A_ : Optional[int] = vectorized_datasets.map(
lowerCamelCase__ , batched=lowerCamelCase__ , num_proc=data_args.preprocessing_num_workers , load_from_cache_file=not data_args.overwrite_cache , remove_columns=vectorized_datasets['''train'''].column_names , )
# pretraining is only supported for "newer" stable layer norm architecture
# apply_spec_augment has to be True, mask_feature_prob has to be 0.0
A_ : int = WavaVecaConfig.from_pretrained(
model_args.model_name_or_path , cache_dir=model_args.cache_dir , gradient_checkpointing=training_args.gradient_checkpointing , )
if not config.do_stable_layer_norm or config.feat_extract_norm != "layer":
raise ValueError(
'''PreTraining is only supported for ``config.do_stable_layer_norm=True`` and'''
''' ``config.feat_extract_norm=\'layer\'''' )
A_ : Optional[int] = WavaVecaForPreTraining(lowerCamelCase__ )
A_ : str = DataCollatorForWavaVecaPretraining(model=lowerCamelCase__ , feature_extractor=lowerCamelCase__ )
A_ : Optional[int] = WavaVecaPreTrainer(
model=lowerCamelCase__ , data_collator=lowerCamelCase__ , args=lowerCamelCase__ , train_dataset=vectorized_datasets['''train'''] , eval_dataset=vectorized_datasets['''validation'''] , tokenizer=lowerCamelCase__ , max_gumbel_temp=model_args.max_gumbel_temperature , min_gumbel_temp=model_args.min_gumbel_temperature , gumbel_temp_decay=model_args.gumbel_temperature_decay , )
trainer.train()
if __name__ == "__main__":
main()
| 4 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
snake_case__ = logging.get_logger(__name__)
snake_case__ = {
"""facebook/s2t-wav2vec2-large-en-de""": (
"""https://huggingface.co/facebook/s2t-wav2vec2-large-en-de/resolve/main/config.json"""
),
# See all Speech2Text models at https://huggingface.co/models?filter=speech2text2
}
class UpperCamelCase_ (a__ ):
"""simple docstring"""
_lowerCAmelCase = 'speech_to_text_2'
_lowerCAmelCase = ['past_key_values']
_lowerCAmelCase = {'num_attention_heads': 'decoder_attention_heads', 'hidden_size': 'd_model'}
def __init__( self : Optional[Any] , _lowerCamelCase : Optional[Any]=10000 , _lowerCamelCase : List[Any]=6 , _lowerCamelCase : int=2048 , _lowerCamelCase : Dict=4 , _lowerCamelCase : str=0.0 , _lowerCamelCase : int=True , _lowerCamelCase : int="relu" , _lowerCamelCase : Any=256 , _lowerCamelCase : List[Any]=0.1 , _lowerCamelCase : Tuple=0.0 , _lowerCamelCase : Union[str, Any]=0.0 , _lowerCamelCase : Optional[Any]=0.02 , _lowerCamelCase : int=2 , _lowerCamelCase : List[str]=True , _lowerCamelCase : str=1 , _lowerCamelCase : List[Any]=0 , _lowerCamelCase : Optional[int]=2 , _lowerCamelCase : Tuple=1024 , **_lowerCamelCase : int , ):
"""simple docstring"""
A_ : Optional[int] = vocab_size
A_ : Tuple = d_model
A_ : List[str] = decoder_ffn_dim
A_ : str = decoder_layers
A_ : Any = decoder_attention_heads
A_ : int = dropout
A_ : str = attention_dropout
A_ : Optional[int] = activation_dropout
A_ : str = activation_function
A_ : List[Any] = init_std
A_ : Union[str, Any] = decoder_layerdrop
A_ : Any = use_cache
A_ : Optional[Any] = decoder_layers
A_ : Optional[int] = scale_embedding # scale factor will be sqrt(d_model) if True
A_ : Optional[Any] = max_target_positions
super().__init__(
pad_token_id=_lowerCamelCase , bos_token_id=_lowerCamelCase , eos_token_id=_lowerCamelCase , decoder_start_token_id=_lowerCamelCase , **_lowerCamelCase , )
| 4 | 1 |
'''simple docstring'''
import os
def snake_case__ ( ) -> List[str]:
A_ : Optional[Any] = os.path.join(os.path.dirname(lowerCamelCase__ ) , '''num.txt''' )
with open(lowerCamelCase__ ) as file_hand:
return str(sum(int(lowerCamelCase__ ) for line in file_hand ) )[:1_0]
if __name__ == "__main__":
print(solution())
| 4 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
snake_case__ = logging.get_logger(__name__)
snake_case__ = {
"""microsoft/table-transformer-detection""": (
"""https://huggingface.co/microsoft/table-transformer-detection/resolve/main/config.json"""
),
}
class UpperCamelCase_ (a__ ):
"""simple docstring"""
_lowerCAmelCase = 'table-transformer'
_lowerCAmelCase = ['past_key_values']
_lowerCAmelCase = {
'hidden_size': 'd_model',
'num_attention_heads': 'encoder_attention_heads',
}
def __init__( self : Any , _lowerCamelCase : Union[str, Any]=True , _lowerCamelCase : Dict=None , _lowerCamelCase : int=3 , _lowerCamelCase : Any=100 , _lowerCamelCase : List[Any]=6 , _lowerCamelCase : Tuple=2048 , _lowerCamelCase : Any=8 , _lowerCamelCase : Dict=6 , _lowerCamelCase : Tuple=2048 , _lowerCamelCase : int=8 , _lowerCamelCase : Optional[int]=0.0 , _lowerCamelCase : List[Any]=0.0 , _lowerCamelCase : List[Any]=True , _lowerCamelCase : Optional[int]="relu" , _lowerCamelCase : Union[str, Any]=256 , _lowerCamelCase : Any=0.1 , _lowerCamelCase : Tuple=0.0 , _lowerCamelCase : Optional[int]=0.0 , _lowerCamelCase : str=0.02 , _lowerCamelCase : Tuple=1.0 , _lowerCamelCase : Dict=False , _lowerCamelCase : str="sine" , _lowerCamelCase : str="resnet50" , _lowerCamelCase : Any=True , _lowerCamelCase : List[str]=False , _lowerCamelCase : Any=1 , _lowerCamelCase : int=5 , _lowerCamelCase : Tuple=2 , _lowerCamelCase : Optional[int]=1 , _lowerCamelCase : Any=1 , _lowerCamelCase : Dict=5 , _lowerCamelCase : str=2 , _lowerCamelCase : Union[str, Any]=0.1 , **_lowerCamelCase : int , ):
"""simple docstring"""
if backbone_config is not None and use_timm_backbone:
raise ValueError('''You can\'t specify both `backbone_config` and `use_timm_backbone`.''' )
if not use_timm_backbone:
if backbone_config is None:
logger.info('''`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.''' )
A_ : int = CONFIG_MAPPING['''resnet'''](out_features=['''stage4'''] )
elif isinstance(_lowerCamelCase , _lowerCamelCase ):
A_ : str = backbone_config.get('''model_type''' )
A_ : Optional[int] = CONFIG_MAPPING[backbone_model_type]
A_ : List[str] = config_class.from_dict(_lowerCamelCase )
# set timm attributes to None
A_ ,A_ ,A_ : Union[str, Any] = None, None, None
A_ : Optional[Any] = use_timm_backbone
A_ : Optional[int] = backbone_config
A_ : Optional[Any] = num_channels
A_ : Dict = num_queries
A_ : str = d_model
A_ : List[str] = encoder_ffn_dim
A_ : int = encoder_layers
A_ : Optional[Any] = encoder_attention_heads
A_ : List[str] = decoder_ffn_dim
A_ : Any = decoder_layers
A_ : List[str] = decoder_attention_heads
A_ : Tuple = dropout
A_ : Optional[Any] = attention_dropout
A_ : Any = activation_dropout
A_ : List[Any] = activation_function
A_ : Dict = init_std
A_ : Any = init_xavier_std
A_ : List[Any] = encoder_layerdrop
A_ : int = decoder_layerdrop
A_ : Any = encoder_layers
A_ : List[str] = auxiliary_loss
A_ : List[Any] = position_embedding_type
A_ : Optional[Any] = backbone
A_ : Tuple = use_pretrained_backbone
A_ : List[Any] = dilation
# Hungarian matcher
A_ : List[str] = class_cost
A_ : str = bbox_cost
A_ : Union[str, Any] = giou_cost
# Loss coefficients
A_ : Any = mask_loss_coefficient
A_ : Optional[int] = dice_loss_coefficient
A_ : Dict = bbox_loss_coefficient
A_ : int = giou_loss_coefficient
A_ : int = eos_coefficient
super().__init__(is_encoder_decoder=_lowerCamelCase , **_lowerCamelCase )
@property
def _a ( self : List[Any] ):
"""simple docstring"""
return self.encoder_attention_heads
@property
def _a ( self : Any ):
"""simple docstring"""
return self.d_model
class UpperCamelCase_ (a__ ):
"""simple docstring"""
_lowerCAmelCase = version.parse('1.11' )
@property
def _a ( self : Tuple ):
"""simple docstring"""
return OrderedDict(
[
('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}),
('''pixel_mask''', {0: '''batch'''}),
] )
@property
def _a ( self : Optional[int] ):
"""simple docstring"""
return 1E-5
@property
def _a ( self : str ):
"""simple docstring"""
return 12
| 4 | 1 |
'''simple docstring'''
import multiprocessing
import time
from arguments import PretokenizationArguments
from datasets import load_dataset
from transformers import AutoTokenizer, HfArgumentParser
def snake_case__ ( lowerCamelCase__ : int ) -> List[str]:
A_ : Tuple = {}
A_ : Optional[Any] = tokenizer(example['''content'''] , truncation=lowerCamelCase__ )['''input_ids''']
A_ : Optional[Any] = len(example['''content'''] ) / len(output['''input_ids'''] )
return output
snake_case__ = HfArgumentParser(PretokenizationArguments)
snake_case__ = parser.parse_args()
if args.num_workers is None:
snake_case__ = multiprocessing.cpu_count()
snake_case__ = AutoTokenizer.from_pretrained(args.tokenizer_dir)
snake_case__ = time.time()
snake_case__ = load_dataset(args.dataset_name, split="""train""")
print(F'Dataset loaded in {time.time()-t_start:.2f}s')
snake_case__ = time.time()
snake_case__ = ds.map(
tokenize,
num_proc=args.num_workers,
remove_columns=[
"""repo_name""",
"""path""",
"""copies""",
"""size""",
"""content""",
"""license""",
"""hash""",
"""line_mean""",
"""line_max""",
"""alpha_frac""",
"""autogenerated""",
],
)
print(F'Dataset tokenized in {time.time()-t_start:.2f}s')
snake_case__ = time.time()
ds.push_to_hub(args.tokenized_data_repo)
print(F'Data pushed to the hub in {time.time()-t_start:.2f}s')
| 4 |
'''simple docstring'''
import inspect
import unittest
from transformers import BitConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import BitBackbone, BitForImageClassification, BitImageProcessor, BitModel
from transformers.models.bit.modeling_bit import BIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
class UpperCamelCase_ :
"""simple docstring"""
def __init__( self : Optional[Any] , _lowerCamelCase : int , _lowerCamelCase : List[str]=3 , _lowerCamelCase : Any=32 , _lowerCamelCase : Union[str, Any]=3 , _lowerCamelCase : int=10 , _lowerCamelCase : Union[str, Any]=[8, 16, 32, 64] , _lowerCamelCase : Dict=[1, 1, 2, 1] , _lowerCamelCase : Union[str, Any]=True , _lowerCamelCase : Optional[int]=True , _lowerCamelCase : Any="relu" , _lowerCamelCase : Optional[Any]=3 , _lowerCamelCase : Optional[Any]=None , _lowerCamelCase : Dict=["stage2", "stage3", "stage4"] , _lowerCamelCase : Union[str, Any]=[2, 3, 4] , _lowerCamelCase : Tuple=1 , ):
"""simple docstring"""
A_ : List[str] = parent
A_ : List[str] = batch_size
A_ : Union[str, Any] = image_size
A_ : Tuple = num_channels
A_ : Any = embeddings_size
A_ : int = hidden_sizes
A_ : Optional[Any] = depths
A_ : List[Any] = is_training
A_ : Optional[int] = use_labels
A_ : int = hidden_act
A_ : Tuple = num_labels
A_ : Union[str, Any] = scope
A_ : List[Any] = len(_lowerCamelCase )
A_ : Union[str, Any] = out_features
A_ : List[Any] = out_indices
A_ : Dict = num_groups
def _a ( self : Optional[int] ):
"""simple docstring"""
A_ : Dict = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
A_ : Union[str, Any] = None
if self.use_labels:
A_ : Any = ids_tensor([self.batch_size] , self.num_labels )
A_ : Any = self.get_config()
return config, pixel_values, labels
def _a ( self : Union[str, Any] ):
"""simple docstring"""
return BitConfig(
num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , out_features=self.out_features , out_indices=self.out_indices , num_groups=self.num_groups , )
def _a ( self : List[Any] , _lowerCamelCase : List[str] , _lowerCamelCase : List[str] , _lowerCamelCase : Optional[Any] ):
"""simple docstring"""
A_ : Any = BitModel(config=_lowerCamelCase )
model.to(_lowerCamelCase )
model.eval()
A_ : int = model(_lowerCamelCase )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def _a ( self : Optional[int] , _lowerCamelCase : List[Any] , _lowerCamelCase : str , _lowerCamelCase : Optional[int] ):
"""simple docstring"""
A_ : Dict = self.num_labels
A_ : Optional[Any] = BitForImageClassification(_lowerCamelCase )
model.to(_lowerCamelCase )
model.eval()
A_ : List[Any] = model(_lowerCamelCase , labels=_lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _a ( self : Any , _lowerCamelCase : int , _lowerCamelCase : int , _lowerCamelCase : List[Any] ):
"""simple docstring"""
A_ : List[Any] = BitBackbone(config=_lowerCamelCase )
model.to(_lowerCamelCase )
model.eval()
A_ : int = model(_lowerCamelCase )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[1], 4, 4] )
# verify channels
self.parent.assertEqual(len(model.channels ) , len(config.out_features ) )
self.parent.assertListEqual(model.channels , config.hidden_sizes[1:] )
# verify backbone works with out_features=None
A_ : Optional[Any] = None
A_ : int = BitBackbone(config=_lowerCamelCase )
model.to(_lowerCamelCase )
model.eval()
A_ : Optional[int] = model(_lowerCamelCase )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , 1 )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[-1], 1, 1] )
# verify channels
self.parent.assertEqual(len(model.channels ) , 1 )
self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]] )
def _a ( self : List[Any] ):
"""simple docstring"""
A_ : Union[str, Any] = self.prepare_config_and_inputs()
A_ ,A_ ,A_ : Union[str, Any] = config_and_inputs
A_ : str = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class UpperCamelCase_ (a__, a__, unittest.TestCase ):
"""simple docstring"""
_lowerCAmelCase = (BitModel, BitForImageClassification, BitBackbone) if is_torch_available() else ()
_lowerCAmelCase = (
{'feature-extraction': BitModel, 'image-classification': BitForImageClassification}
if is_torch_available()
else {}
)
_lowerCAmelCase = False
_lowerCAmelCase = False
_lowerCAmelCase = False
_lowerCAmelCase = False
_lowerCAmelCase = False
def _a ( self : Optional[Any] ):
"""simple docstring"""
A_ : List[str] = BitModelTester(self )
A_ : Optional[Any] = ConfigTester(self , config_class=_lowerCamelCase , has_text_modality=_lowerCamelCase )
def _a ( self : Optional[Any] ):
"""simple docstring"""
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def _a ( self : List[Any] ):
"""simple docstring"""
return
@unittest.skip(reason='''Bit does not output attentions''' )
def _a ( self : str ):
"""simple docstring"""
pass
@unittest.skip(reason='''Bit does not use inputs_embeds''' )
def _a ( self : Union[str, Any] ):
"""simple docstring"""
pass
@unittest.skip(reason='''Bit does not support input and output embeddings''' )
def _a ( self : Any ):
"""simple docstring"""
pass
def _a ( self : List[Any] ):
"""simple docstring"""
A_ ,A_ : str = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A_ : Dict = model_class(_lowerCamelCase )
A_ : Dict = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
A_ : int = [*signature.parameters.keys()]
A_ : Union[str, Any] = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , _lowerCamelCase )
def _a ( self : Optional[Any] ):
"""simple docstring"""
A_ : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_lowerCamelCase )
def _a ( self : Optional[Any] ):
"""simple docstring"""
A_ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_backbone(*_lowerCamelCase )
def _a ( self : Tuple ):
"""simple docstring"""
A_ ,A_ : Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A_ : str = model_class(config=_lowerCamelCase )
for name, module in model.named_modules():
if isinstance(_lowerCamelCase , (nn.BatchNormad, nn.GroupNorm) ):
self.assertTrue(
torch.all(module.weight == 1 ) , msg=f'Parameter {name} of model {model_class} seems not properly initialized' , )
self.assertTrue(
torch.all(module.bias == 0 ) , msg=f'Parameter {name} of model {model_class} seems not properly initialized' , )
def _a ( self : int ):
"""simple docstring"""
def check_hidden_states_output(_lowerCamelCase : Union[str, Any] , _lowerCamelCase : Dict , _lowerCamelCase : int ):
A_ : Union[str, Any] = model_class(_lowerCamelCase )
model.to(_lowerCamelCase )
model.eval()
with torch.no_grad():
A_ : Union[str, Any] = model(**self._prepare_for_class(_lowerCamelCase , _lowerCamelCase ) )
A_ : int = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
A_ : List[Any] = self.model_tester.num_stages
self.assertEqual(len(_lowerCamelCase ) , expected_num_stages + 1 )
# Bit's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , )
A_ ,A_ : str = self.model_tester.prepare_config_and_inputs_for_common()
A_ : Tuple = ['''preactivation''', '''bottleneck''']
for model_class in self.all_model_classes:
for layer_type in layers_type:
A_ : Tuple = layer_type
A_ : Optional[Any] = True
check_hidden_states_output(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
A_ : List[str] = True
check_hidden_states_output(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
@unittest.skip(reason='''Bit does not use feedforward chunking''' )
def _a ( self : Tuple ):
"""simple docstring"""
pass
def _a ( self : str ):
"""simple docstring"""
A_ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_lowerCamelCase )
@slow
def _a ( self : Union[str, Any] ):
"""simple docstring"""
for model_name in BIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
A_ : List[Any] = BitModel.from_pretrained(_lowerCamelCase )
self.assertIsNotNone(_lowerCamelCase )
def snake_case__ ( ) -> Optional[int]:
A_ : Optional[int] = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_torch
@require_vision
class UpperCamelCase_ (unittest.TestCase ):
"""simple docstring"""
@cached_property
def _a ( self : List[Any] ):
"""simple docstring"""
return (
BitImageProcessor.from_pretrained(BIT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ) if is_vision_available() else None
)
@slow
def _a ( self : Dict ):
"""simple docstring"""
A_ : Optional[int] = BitForImageClassification.from_pretrained(BIT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ).to(_lowerCamelCase )
A_ : Union[str, Any] = self.default_image_processor
A_ : Optional[int] = prepare_img()
A_ : int = image_processor(images=_lowerCamelCase , return_tensors='''pt''' ).to(_lowerCamelCase )
# forward pass
with torch.no_grad():
A_ : Union[str, Any] = model(**_lowerCamelCase )
# verify the logits
A_ : Dict = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape , _lowerCamelCase )
A_ : Tuple = torch.tensor([[-0.65_26, -0.52_63, -1.43_98]] ).to(_lowerCamelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , _lowerCamelCase , atol=1E-4 ) )
@require_torch
class UpperCamelCase_ (a__, unittest.TestCase ):
"""simple docstring"""
_lowerCAmelCase = (BitBackbone,) if is_torch_available() else ()
_lowerCAmelCase = BitConfig
_lowerCAmelCase = False
def _a ( self : List[str] ):
"""simple docstring"""
A_ : Union[str, Any] = BitModelTester(self )
| 4 | 1 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
snake_case__ = logging.get_logger(__name__)
snake_case__ = {
"""google/bit-50""": """https://huggingface.co/google/bit-50/resolve/main/config.json""",
}
class UpperCamelCase_ (a__, a__ ):
"""simple docstring"""
_lowerCAmelCase = 'bit'
_lowerCAmelCase = ['preactivation', 'bottleneck']
_lowerCAmelCase = ['SAME', 'VALID']
def __init__( self : List[Any] , _lowerCamelCase : Dict=3 , _lowerCamelCase : str=64 , _lowerCamelCase : str=[256, 512, 1024, 2048] , _lowerCamelCase : str=[3, 4, 6, 3] , _lowerCamelCase : str="preactivation" , _lowerCamelCase : Dict="relu" , _lowerCamelCase : List[Any]=None , _lowerCamelCase : Tuple=32 , _lowerCamelCase : Dict=0.0 , _lowerCamelCase : List[Any]=False , _lowerCamelCase : Optional[int]=32 , _lowerCamelCase : int=1 , _lowerCamelCase : Optional[int]=None , _lowerCamelCase : Tuple=None , **_lowerCamelCase : Dict , ):
"""simple docstring"""
super().__init__(**_lowerCamelCase )
if layer_type not in self.layer_types:
raise ValueError(f'layer_type={layer_type} is not one of {",".join(self.layer_types )}' )
if global_padding is not None:
if global_padding.upper() in self.supported_padding:
A_ : Optional[int] = global_padding.upper()
else:
raise ValueError(f'Padding strategy {global_padding} not supported' )
A_ : str = num_channels
A_ : Any = embedding_size
A_ : Dict = hidden_sizes
A_ : Tuple = depths
A_ : int = layer_type
A_ : Union[str, Any] = hidden_act
A_ : Any = global_padding
A_ : Union[str, Any] = num_groups
A_ : Union[str, Any] = drop_path_rate
A_ : Tuple = embedding_dynamic_padding
A_ : Optional[int] = output_stride
A_ : Union[str, Any] = width_factor
A_ : Optional[int] = ['''stem'''] + [f'stage{idx}' for idx in range(1 , len(_lowerCamelCase ) + 1 )]
A_ ,A_ : Optional[Any] = get_aligned_output_features_output_indices(
out_features=_lowerCamelCase , out_indices=_lowerCamelCase , stage_names=self.stage_names )
| 4 |
'''simple docstring'''
import pprint
import requests
snake_case__ = """https://zenquotes.io/api"""
def snake_case__ ( ) -> list:
return requests.get(API_ENDPOINT_URL + '''/today''' ).json()
def snake_case__ ( ) -> list:
return requests.get(API_ENDPOINT_URL + '''/random''' ).json()
if __name__ == "__main__":
snake_case__ = random_quotes()
pprint.pprint(response)
| 4 | 1 |
'''simple docstring'''
import os
import pytest
from transformers.dynamic_module_utils import get_imports
snake_case__ = """
import os
"""
snake_case__ = """
def foo():
import os
return False
"""
snake_case__ = """
def foo():
def bar():
if True:
import os
return False
return bar()
"""
snake_case__ = """
import os
try:
import bar
except ImportError:
raise ValueError()
"""
snake_case__ = """
import os
def foo():
try:
import bar
except ImportError:
raise ValueError()
"""
snake_case__ = """
import os
try:
import bar
except (ImportError, AttributeError):
raise ValueError()
"""
snake_case__ = """
import os
try:
import bar
except ImportError as e:
raise ValueError()
"""
snake_case__ = """
import os
try:
import bar
except:
raise ValueError()
"""
snake_case__ = """
import os
try:
import bar
import baz
except ImportError:
raise ValueError()
"""
snake_case__ = """
import os
try:
import bar
import baz
except ImportError:
x = 1
raise ValueError()
"""
snake_case__ = [
TOP_LEVEL_IMPORT,
IMPORT_IN_FUNCTION,
DEEPLY_NESTED_IMPORT,
TOP_LEVEL_TRY_IMPORT,
GENERIC_EXCEPT_IMPORT,
MULTILINE_TRY_IMPORT,
MULTILINE_BOTH_IMPORT,
MULTIPLE_EXCEPTS_IMPORT,
EXCEPT_AS_IMPORT,
TRY_IMPORT_IN_FUNCTION,
]
@pytest.mark.parametrize('''case''' , lowerCamelCase__ )
def snake_case__ ( lowerCamelCase__ : str , lowerCamelCase__ : str ) -> List[Any]:
A_ : Dict = os.path.join(lowerCamelCase__ , '''test_file.py''' )
with open(lowerCamelCase__ , '''w''' ) as _tmp_file:
_tmp_file.write(lowerCamelCase__ )
A_ : Union[str, Any] = get_imports(lowerCamelCase__ )
assert parsed_imports == ["os"]
| 4 |
'''simple docstring'''
from __future__ import annotations
class UpperCamelCase_ :
"""simple docstring"""
def __init__( self : Optional[int] , _lowerCamelCase : int ):
"""simple docstring"""
A_ : Union[str, Any] = order
# a_{0} ... a_{k}
A_ : Union[str, Any] = [1.0] + [0.0] * order
# b_{0} ... b_{k}
A_ : int = [1.0] + [0.0] * order
# x[n-1] ... x[n-k]
A_ : str = [0.0] * self.order
# y[n-1] ... y[n-k]
A_ : Optional[Any] = [0.0] * self.order
def _a ( self : Dict , _lowerCamelCase : list[float] , _lowerCamelCase : list[float] ):
"""simple docstring"""
if len(_lowerCamelCase ) < self.order:
A_ : Any = [1.0, *a_coeffs]
if len(_lowerCamelCase ) != self.order + 1:
A_ : List[Any] = (
f'Expected a_coeffs to have {self.order + 1} elements '
f'for {self.order}-order filter, got {len(_lowerCamelCase )}'
)
raise ValueError(_lowerCamelCase )
if len(_lowerCamelCase ) != self.order + 1:
A_ : Union[str, Any] = (
f'Expected b_coeffs to have {self.order + 1} elements '
f'for {self.order}-order filter, got {len(_lowerCamelCase )}'
)
raise ValueError(_lowerCamelCase )
A_ : Tuple = a_coeffs
A_ : str = b_coeffs
def _a ( self : Tuple , _lowerCamelCase : float ):
"""simple docstring"""
A_ : Any = 0.0
# Start at index 1 and do index 0 at the end.
for i in range(1 , self.order + 1 ):
result += (
self.b_coeffs[i] * self.input_history[i - 1]
- self.a_coeffs[i] * self.output_history[i - 1]
)
A_ : str = (result + self.b_coeffs[0] * sample) / self.a_coeffs[0]
A_ : Optional[Any] = self.input_history[:-1]
A_ : List[str] = self.output_history[:-1]
A_ : Tuple = sample
A_ : Tuple = result
return result
| 4 | 1 |
'''simple docstring'''
from sklearn.metrics import fa_score, matthews_corrcoef
import datasets
from .record_evaluation import evaluate as evaluate_record
snake_case__ = """\
@article{wang2019superglue,
title={SuperGLUE: A Stickier Benchmark for General-Purpose Language Understanding Systems},
author={Wang, Alex and Pruksachatkun, Yada and Nangia, Nikita and Singh, Amanpreet and Michael, Julian and Hill, Felix and Levy, Omer and Bowman, Samuel R},
journal={arXiv preprint arXiv:1905.00537},
year={2019}
}
"""
snake_case__ = """\
SuperGLUE (https://super.gluebenchmark.com/) is a new benchmark styled after
GLUE with a new set of more difficult language understanding tasks, improved
resources, and a new public leaderboard.
"""
snake_case__ = """
Compute SuperGLUE evaluation metric associated to each SuperGLUE dataset.
Args:
predictions: list of predictions to score. Depending on the SuperGlUE subset:
- for 'record': list of question-answer dictionaries with the following keys:
- 'idx': index of the question as specified by the dataset
- 'prediction_text': the predicted answer text
- for 'multirc': list of question-answer dictionaries with the following keys:
- 'idx': index of the question-answer pair as specified by the dataset
- 'prediction': the predicted answer label
- otherwise: list of predicted labels
references: list of reference labels. Depending on the SuperGLUE subset:
- for 'record': list of question-answers dictionaries with the following keys:
- 'idx': index of the question as specified by the dataset
- 'answers': list of possible answers
- otherwise: list of reference labels
Returns: depending on the SuperGLUE subset:
- for 'record':
- 'exact_match': Exact match between answer and gold answer
- 'f1': F1 score
- for 'multirc':
- 'exact_match': Exact match between answer and gold answer
- 'f1_m': Per-question macro-F1 score
- 'f1_a': Average F1 score over all answers
- for 'axb':
'matthews_correlation': Matthew Correlation
- for 'cb':
- 'accuracy': Accuracy
- 'f1': F1 score
- for all others:
- 'accuracy': Accuracy
Examples:
>>> super_glue_metric = datasets.load_metric('super_glue', 'copa') # any of [\"copa\", \"rte\", \"wic\", \"wsc\", \"wsc.fixed\", \"boolq\", \"axg\"]
>>> predictions = [0, 1]
>>> references = [0, 1]
>>> results = super_glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{'accuracy': 1.0}
>>> super_glue_metric = datasets.load_metric('super_glue', 'cb')
>>> predictions = [0, 1]
>>> references = [0, 1]
>>> results = super_glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{'accuracy': 1.0, 'f1': 1.0}
>>> super_glue_metric = datasets.load_metric('super_glue', 'record')
>>> predictions = [{'idx': {'passage': 0, 'query': 0}, 'prediction_text': 'answer'}]
>>> references = [{'idx': {'passage': 0, 'query': 0}, 'answers': ['answer', 'another_answer']}]
>>> results = super_glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{'exact_match': 1.0, 'f1': 1.0}
>>> super_glue_metric = datasets.load_metric('super_glue', 'multirc')
>>> predictions = [{'idx': {'answer': 0, 'paragraph': 0, 'question': 0}, 'prediction': 0}, {'idx': {'answer': 1, 'paragraph': 2, 'question': 3}, 'prediction': 1}]
>>> references = [0, 1]
>>> results = super_glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{'exact_match': 1.0, 'f1_m': 1.0, 'f1_a': 1.0}
>>> super_glue_metric = datasets.load_metric('super_glue', 'axb')
>>> references = [0, 1]
>>> predictions = [0, 1]
>>> results = super_glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{'matthews_correlation': 1.0}
"""
def snake_case__ ( lowerCamelCase__ : int , lowerCamelCase__ : List[Any] ) -> Union[str, Any]:
return float((preds == labels).mean() )
def snake_case__ ( lowerCamelCase__ : str , lowerCamelCase__ : List[Any] , lowerCamelCase__ : Dict="binary" ) -> Union[str, Any]:
A_ : Tuple = simple_accuracy(lowerCamelCase__ , lowerCamelCase__ )
A_ : Dict = float(fa_score(y_true=lowerCamelCase__ , y_pred=lowerCamelCase__ , average=lowerCamelCase__ ) )
return {
"accuracy": acc,
"f1": fa,
}
def snake_case__ ( lowerCamelCase__ : str , lowerCamelCase__ : int ) -> Tuple:
A_ : List[Any] = {}
for id_pred, label in zip(lowerCamelCase__ , lowerCamelCase__ ):
A_ : Union[str, Any] = f'{id_pred["idx"]["paragraph"]}-{id_pred["idx"]["question"]}'
A_ : Tuple = id_pred['''prediction''']
if question_id in question_map:
question_map[question_id].append((pred, label) )
else:
A_ : Dict = [(pred, label)]
A_ ,A_ : Any = [], []
for question, preds_labels in question_map.items():
A_ ,A_ : Tuple = zip(*lowerCamelCase__ )
A_ : Any = fa_score(y_true=lowerCamelCase__ , y_pred=lowerCamelCase__ , average='''macro''' )
fas.append(lowerCamelCase__ )
A_ : int = int(sum(pred == label for pred, label in preds_labels ) == len(lowerCamelCase__ ) )
ems.append(lowerCamelCase__ )
A_ : Any = float(sum(lowerCamelCase__ ) / len(lowerCamelCase__ ) )
A_ : Tuple = sum(lowerCamelCase__ ) / len(lowerCamelCase__ )
A_ : Optional[Any] = float(fa_score(y_true=lowerCamelCase__ , y_pred=[id_pred['''prediction'''] for id_pred in ids_preds] ) )
return {"exact_match": em, "f1_m": fa_m, "f1_a": fa_a}
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION, _KWARGS_DESCRIPTION )
class UpperCamelCase_ (datasets.Metric ):
"""simple docstring"""
def _a ( self : Tuple ):
"""simple docstring"""
if self.config_name not in [
"boolq",
"cb",
"copa",
"multirc",
"record",
"rte",
"wic",
"wsc",
"wsc.fixed",
"axb",
"axg",
]:
raise KeyError(
'''You should supply a configuration name selected in '''
'''["boolq", "cb", "copa", "multirc", "record", "rte", "wic", "wsc", "wsc.fixed", "axb", "axg",]''' )
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(self._get_feature_types() ) , codebase_urls=[] , reference_urls=[] , format='''numpy''' if not self.config_name == '''record''' and not self.config_name == '''multirc''' else None , )
def _a ( self : int ):
"""simple docstring"""
if self.config_name == "record":
return {
"predictions": {
"idx": {
"passage": datasets.Value('''int64''' ),
"query": datasets.Value('''int64''' ),
},
"prediction_text": datasets.Value('''string''' ),
},
"references": {
"idx": {
"passage": datasets.Value('''int64''' ),
"query": datasets.Value('''int64''' ),
},
"answers": datasets.Sequence(datasets.Value('''string''' ) ),
},
}
elif self.config_name == "multirc":
return {
"predictions": {
"idx": {
"answer": datasets.Value('''int64''' ),
"paragraph": datasets.Value('''int64''' ),
"question": datasets.Value('''int64''' ),
},
"prediction": datasets.Value('''int64''' ),
},
"references": datasets.Value('''int64''' ),
}
else:
return {
"predictions": datasets.Value('''int64''' ),
"references": datasets.Value('''int64''' ),
}
def _a ( self : str , _lowerCamelCase : Tuple , _lowerCamelCase : Dict ):
"""simple docstring"""
if self.config_name == "axb":
return {"matthews_correlation": matthews_corrcoef(_lowerCamelCase , _lowerCamelCase )}
elif self.config_name == "cb":
return acc_and_fa(_lowerCamelCase , _lowerCamelCase , fa_avg='''macro''' )
elif self.config_name == "record":
A_ : str = [
{
'''qas''': [
{'''id''': ref['''idx''']['''query'''], '''answers''': [{'''text''': ans} for ans in ref['''answers''']]}
for ref in references
]
}
]
A_ : int = {pred['''idx''']['''query''']: pred['''prediction_text'''] for pred in predictions}
return evaluate_record(_lowerCamelCase , _lowerCamelCase )[0]
elif self.config_name == "multirc":
return evaluate_multirc(_lowerCamelCase , _lowerCamelCase )
elif self.config_name in ["copa", "rte", "wic", "wsc", "wsc.fixed", "boolq", "axg"]:
return {"accuracy": simple_accuracy(_lowerCamelCase , _lowerCamelCase )}
else:
raise KeyError(
'''You should supply a configuration name selected in '''
'''["boolq", "cb", "copa", "multirc", "record", "rte", "wic", "wsc", "wsc.fixed", "axb", "axg",]''' )
| 4 |
'''simple docstring'''
class UpperCamelCase_ :
"""simple docstring"""
def __init__( self : Optional[Any] , _lowerCamelCase : Union[str, Any] ):
"""simple docstring"""
A_ : Union[str, Any] = val
A_ : Tuple = None
A_ : Any = None
def _a ( self : Tuple , _lowerCamelCase : List[Any] ):
"""simple docstring"""
if self.val:
if val < self.val:
if self.left is None:
A_ : int = Node(_lowerCamelCase )
else:
self.left.insert(_lowerCamelCase )
elif val > self.val:
if self.right is None:
A_ : List[str] = Node(_lowerCamelCase )
else:
self.right.insert(_lowerCamelCase )
else:
A_ : Any = val
def snake_case__ ( lowerCamelCase__ : Any , lowerCamelCase__ : Optional[int] ) -> str:
# Recursive traversal
if root:
inorder(root.left , lowerCamelCase__ )
res.append(root.val )
inorder(root.right , lowerCamelCase__ )
def snake_case__ ( lowerCamelCase__ : Optional[int] ) -> Tuple:
# Build BST
if len(lowerCamelCase__ ) == 0:
return arr
A_ : Dict = Node(arr[0] )
for i in range(1 , len(lowerCamelCase__ ) ):
root.insert(arr[i] )
# Traverse BST in order.
A_ : Tuple = []
inorder(lowerCamelCase__ , lowerCamelCase__ )
return res
if __name__ == "__main__":
print(tree_sort([10, 1, 3, 2, 9, 14, 13]))
| 4 | 1 |
'''simple docstring'''
from collections import Counter
import numpy as np
from sklearn import datasets
from sklearn.model_selection import train_test_split
snake_case__ = datasets.load_iris()
snake_case__ = np.array(data["""data"""])
snake_case__ = np.array(data["""target"""])
snake_case__ = data["""target_names"""]
snake_case__ , snake_case__ , snake_case__ , snake_case__ = train_test_split(X, y)
def snake_case__ ( lowerCamelCase__ : Any , lowerCamelCase__ : List[str] ) -> Optional[int]:
return np.linalg.norm(np.array(lowerCamelCase__ ) - np.array(lowerCamelCase__ ) )
def snake_case__ ( lowerCamelCase__ : int , lowerCamelCase__ : Dict , lowerCamelCase__ : int , lowerCamelCase__ : Optional[Any] , lowerCamelCase__ : Optional[int]=5 ) -> Optional[int]:
A_ : Optional[int] = zip(lowerCamelCase__ , lowerCamelCase__ )
# List of distances of all points from the point to be classified
A_ : str = []
for data_point in data:
A_ : int = euclidean_distance(data_point[0] , lowerCamelCase__ )
distances.append((distance, data_point[1]) )
# Choosing 'k' points with the least distances.
A_ : int = [i[1] for i in sorted(lowerCamelCase__ )[:k]]
# Most commonly occurring class among them
# is the class into which the point is classified
A_ : Union[str, Any] = Counter(lowerCamelCase__ ).most_common(1 )[0][0]
return classes[result]
if __name__ == "__main__":
print(classifier(X_train, y_train, classes, [4.4, 3.1, 1.3, 1.4]))
| 4 |
'''simple docstring'''
def snake_case__ ( lowerCamelCase__ : list ) -> list:
if len(lowerCamelCase__ ) <= 1:
return [tuple(lowerCamelCase__ )]
A_ : List[str] = []
def generate(lowerCamelCase__ : int , lowerCamelCase__ : list ):
if k == 1:
res.append(tuple(arr[:] ) )
return
generate(k - 1 , lowerCamelCase__ )
for i in range(k - 1 ):
if k % 2 == 0: # k is even
A_ ,A_ : Optional[int] = arr[k - 1], arr[i]
else: # k is odd
A_ ,A_ : Union[str, Any] = arr[k - 1], arr[0]
generate(k - 1 , lowerCamelCase__ )
generate(len(lowerCamelCase__ ) , lowerCamelCase__ )
return res
if __name__ == "__main__":
snake_case__ = input("""Enter numbers separated by a comma:\n""").strip()
snake_case__ = [int(item) for item in user_input.split(""",""")]
print(heaps(arr))
| 4 | 1 |
'''simple docstring'''
def snake_case__ ( lowerCamelCase__ : list[list[int]] , lowerCamelCase__ : int , lowerCamelCase__ : int , lowerCamelCase__ : set ) -> int:
A_ ,A_ : Optional[Any] = len(lowerCamelCase__ ), len(grid[0] )
if (
min(lowerCamelCase__ , lowerCamelCase__ ) < 0
or row == row_length
or col == col_length
or (row, col) in visit
or grid[row][col] == 1
):
return 0
if row == row_length - 1 and col == col_length - 1:
return 1
visit.add((row, col) )
A_ : str = 0
count += depth_first_search(lowerCamelCase__ , row + 1 , lowerCamelCase__ , lowerCamelCase__ )
count += depth_first_search(lowerCamelCase__ , row - 1 , lowerCamelCase__ , lowerCamelCase__ )
count += depth_first_search(lowerCamelCase__ , lowerCamelCase__ , col + 1 , lowerCamelCase__ )
count += depth_first_search(lowerCamelCase__ , lowerCamelCase__ , col - 1 , lowerCamelCase__ )
visit.remove((row, col) )
return count
if __name__ == "__main__":
import doctest
doctest.testmod()
| 4 |
'''simple docstring'''
import unittest
from queue import Empty
from threading import Thread
from transformers import AutoTokenizer, TextIteratorStreamer, TextStreamer, is_torch_available
from transformers.testing_utils import CaptureStdout, require_torch, torch_device
from ..test_modeling_common import ids_tensor
if is_torch_available():
import torch
from transformers import AutoModelForCausalLM
@require_torch
class UpperCamelCase_ (unittest.TestCase ):
"""simple docstring"""
def _a ( self : Dict ):
"""simple docstring"""
A_ : Optional[int] = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' )
A_ : Tuple = AutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ).to(_lowerCamelCase )
A_ : Dict = -1
A_ : List[str] = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(_lowerCamelCase )
A_ : Any = model.generate(_lowerCamelCase , max_new_tokens=10 , do_sample=_lowerCamelCase )
A_ : List[str] = tokenizer.decode(greedy_ids[0] )
with CaptureStdout() as cs:
A_ : List[str] = TextStreamer(_lowerCamelCase )
model.generate(_lowerCamelCase , max_new_tokens=10 , do_sample=_lowerCamelCase , streamer=_lowerCamelCase )
# The greedy text should be printed to stdout, except for the final "\n" in the streamer
A_ : Dict = cs.out[:-1]
self.assertEqual(_lowerCamelCase , _lowerCamelCase )
def _a ( self : Tuple ):
"""simple docstring"""
A_ : Optional[int] = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' )
A_ : List[str] = AutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ).to(_lowerCamelCase )
A_ : Dict = -1
A_ : List[str] = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(_lowerCamelCase )
A_ : Optional[int] = model.generate(_lowerCamelCase , max_new_tokens=10 , do_sample=_lowerCamelCase )
A_ : str = tokenizer.decode(greedy_ids[0] )
A_ : int = TextIteratorStreamer(_lowerCamelCase )
A_ : List[Any] = {'''input_ids''': input_ids, '''max_new_tokens''': 10, '''do_sample''': False, '''streamer''': streamer}
A_ : List[Any] = Thread(target=model.generate , kwargs=_lowerCamelCase )
thread.start()
A_ : List[Any] = ''''''
for new_text in streamer:
streamer_text += new_text
self.assertEqual(_lowerCamelCase , _lowerCamelCase )
def _a ( self : int ):
"""simple docstring"""
A_ : List[str] = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' )
A_ : List[Any] = AutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ).to(_lowerCamelCase )
A_ : List[str] = -1
A_ : Any = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(_lowerCamelCase )
A_ : Tuple = model.generate(_lowerCamelCase , max_new_tokens=10 , do_sample=_lowerCamelCase )
A_ : Tuple = greedy_ids[:, input_ids.shape[1] :]
A_ : Tuple = tokenizer.decode(new_greedy_ids[0] )
with CaptureStdout() as cs:
A_ : Any = TextStreamer(_lowerCamelCase , skip_prompt=_lowerCamelCase )
model.generate(_lowerCamelCase , max_new_tokens=10 , do_sample=_lowerCamelCase , streamer=_lowerCamelCase )
# The greedy text should be printed to stdout, except for the final "\n" in the streamer
A_ : Any = cs.out[:-1]
self.assertEqual(_lowerCamelCase , _lowerCamelCase )
def _a ( self : List[Any] ):
"""simple docstring"""
A_ : List[Any] = AutoTokenizer.from_pretrained('''distilgpt2''' )
A_ : Tuple = AutoModelForCausalLM.from_pretrained('''distilgpt2''' ).to(_lowerCamelCase )
A_ : List[Any] = -1
A_ : Union[str, Any] = torch.ones((1, 5) , device=_lowerCamelCase ).long() * model.config.bos_token_id
with CaptureStdout() as cs:
A_ : List[Any] = TextStreamer(_lowerCamelCase , skip_special_tokens=_lowerCamelCase )
model.generate(_lowerCamelCase , max_new_tokens=1 , do_sample=_lowerCamelCase , streamer=_lowerCamelCase )
# The prompt contains a special token, so the streamer should not print it. As such, the output text, when
# re-tokenized, must only contain one token
A_ : List[str] = cs.out[:-1] # Remove the final "\n"
A_ : List[Any] = tokenizer(_lowerCamelCase , return_tensors='''pt''' )
self.assertEqual(streamer_text_tokenized.input_ids.shape , (1, 1) )
def _a ( self : Union[str, Any] ):
"""simple docstring"""
A_ : str = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' )
A_ : str = AutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ).to(_lowerCamelCase )
A_ : Union[str, Any] = -1
A_ : Union[str, Any] = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(_lowerCamelCase )
A_ : List[str] = TextIteratorStreamer(_lowerCamelCase , timeout=0.0_01 )
A_ : str = {'''input_ids''': input_ids, '''max_new_tokens''': 10, '''do_sample''': False, '''streamer''': streamer}
A_ : List[str] = Thread(target=model.generate , kwargs=_lowerCamelCase )
thread.start()
# The streamer will timeout after 0.001 seconds, so an exception will be raised
with self.assertRaises(_lowerCamelCase ):
A_ : str = ''''''
for new_text in streamer:
streamer_text += new_text
| 4 | 1 |
'''simple docstring'''
import argparse
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
########################################################################
# This is a fully working simple example to use Accelerate,
# specifically showcasing the experiment tracking capability,
# and builds off the `nlp_example.py` script.
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# To help focus on the differences in the code, building `DataLoaders`
# was refactored into its own function.
# New additions from the base script can be found quickly by
# looking for the # New Code # tags
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
snake_case__ = 16
snake_case__ = 32
def snake_case__ ( lowerCamelCase__ : Accelerator , lowerCamelCase__ : int = 1_6 ) -> Any:
A_ : str = AutoTokenizer.from_pretrained('''bert-base-cased''' )
A_ : Optional[Any] = load_dataset('''glue''' , '''mrpc''' )
def tokenize_function(lowerCamelCase__ : Dict ):
# max_length=None => use the model max length (it's actually the default)
A_ : List[str] = tokenizer(examples['''sentence1'''] , examples['''sentence2'''] , truncation=lowerCamelCase__ , max_length=lowerCamelCase__ )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
A_ : Tuple = datasets.map(
lowerCamelCase__ , batched=lowerCamelCase__ , remove_columns=['''idx''', '''sentence1''', '''sentence2'''] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
A_ : str = tokenized_datasets.rename_column('''label''' , '''labels''' )
def collate_fn(lowerCamelCase__ : Optional[Any] ):
# On TPU it's best to pad everything to the same length or training will be very slow.
A_ : Dict = 1_2_8 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
A_ : int = 1_6
elif accelerator.mixed_precision != "no":
A_ : Tuple = 8
else:
A_ : List[Any] = None
return tokenizer.pad(
lowerCamelCase__ , padding='''longest''' , max_length=lowerCamelCase__ , pad_to_multiple_of=lowerCamelCase__ , return_tensors='''pt''' , )
# Instantiate dataloaders.
A_ : Dict = DataLoader(
tokenized_datasets['''train'''] , shuffle=lowerCamelCase__ , collate_fn=lowerCamelCase__ , batch_size=lowerCamelCase__ )
A_ : Optional[Any] = DataLoader(
tokenized_datasets['''validation'''] , shuffle=lowerCamelCase__ , collate_fn=lowerCamelCase__ , batch_size=lowerCamelCase__ )
return train_dataloader, eval_dataloader
# For testing only
if os.environ.get("""TESTING_MOCKED_DATALOADERS""", None) == "1":
from accelerate.test_utils.training import mocked_dataloaders
snake_case__ = mocked_dataloaders # noqa: F811
def snake_case__ ( lowerCamelCase__ : int , lowerCamelCase__ : List[Any] ) -> List[str]:
# For testing only
if os.environ.get('''TESTING_MOCKED_DATALOADERS''' , lowerCamelCase__ ) == "1":
A_ : str = 2
# Initialize Accelerator
# New Code #
# We pass in "all" to `log_with` to grab all available trackers in the environment
# Note: If using a custom `Tracker` class, should be passed in here such as:
# >>> log_with = ["all", MyCustomTrackerClassInstance()]
if args.with_tracking:
A_ : Any = Accelerator(
cpu=args.cpu , mixed_precision=args.mixed_precision , log_with='''all''' , project_dir=args.project_dir )
else:
A_ : int = Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
A_ : Optional[Any] = config['''lr''']
A_ : List[Any] = int(config['''num_epochs'''] )
A_ : Union[str, Any] = int(config['''seed'''] )
A_ : int = int(config['''batch_size'''] )
set_seed(lowerCamelCase__ )
A_ ,A_ : str = get_dataloaders(lowerCamelCase__ , lowerCamelCase__ )
A_ : int = evaluate.load('''glue''' , '''mrpc''' )
# If the batch size is too big we use gradient accumulation
A_ : Optional[int] = 1
if batch_size > MAX_GPU_BATCH_SIZE and accelerator.distributed_type != DistributedType.TPU:
A_ : Dict = batch_size // MAX_GPU_BATCH_SIZE
A_ : Optional[Any] = MAX_GPU_BATCH_SIZE
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
A_ : Optional[int] = AutoModelForSequenceClassification.from_pretrained('''bert-base-cased''' , return_dict=lowerCamelCase__ )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
A_ : Tuple = model.to(accelerator.device )
# Instantiate optimizer
A_ : List[str] = AdamW(params=model.parameters() , lr=lowerCamelCase__ )
# Instantiate scheduler
A_ : Dict = get_linear_schedule_with_warmup(
optimizer=lowerCamelCase__ , num_warmup_steps=1_0_0 , num_training_steps=(len(lowerCamelCase__ ) * num_epochs) // gradient_accumulation_steps , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
A_ ,A_ ,A_ ,A_ ,A_ : Dict = accelerator.prepare(
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
# New Code #
# We need to initialize the trackers we use. Overall configurations can also be stored
if args.with_tracking:
A_ : Dict = os.path.split(lowerCamelCase__ )[-1].split('''.''' )[0]
accelerator.init_trackers(lowerCamelCase__ , lowerCamelCase__ )
# Now we train the model
for epoch in range(lowerCamelCase__ ):
model.train()
# New Code #
# For our tracking example, we will log the total loss of each epoch
if args.with_tracking:
A_ : Tuple = 0
for step, batch in enumerate(lowerCamelCase__ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
A_ : Dict = model(**lowerCamelCase__ )
A_ : Tuple = outputs.loss
# New Code #
if args.with_tracking:
total_loss += loss.detach().float()
A_ : str = loss / gradient_accumulation_steps
accelerator.backward(lowerCamelCase__ )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
for step, batch in enumerate(lowerCamelCase__ ):
# We could avoid this line since we set the accelerator with `device_placement=True` (the default).
batch.to(accelerator.device )
with torch.no_grad():
A_ : str = model(**lowerCamelCase__ )
A_ : Union[str, Any] = outputs.logits.argmax(dim=-1 )
A_ ,A_ : List[str] = accelerator.gather_for_metrics((predictions, batch['''labels''']) )
metric.add_batch(
predictions=lowerCamelCase__ , references=lowerCamelCase__ , )
A_ : Union[str, Any] = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(f'epoch {epoch}:' , lowerCamelCase__ )
# New Code #
# To actually log, we call `Accelerator.log`
# The values passed can be of `str`, `int`, `float` or `dict` of `str` to `float`/`int`
if args.with_tracking:
accelerator.log(
{
'''accuracy''': eval_metric['''accuracy'''],
'''f1''': eval_metric['''f1'''],
'''train_loss''': total_loss.item() / len(lowerCamelCase__ ),
'''epoch''': epoch,
} , step=lowerCamelCase__ , )
# New Code #
# When a run is finished, you should call `accelerator.end_training()`
# to close all of the open trackers
if args.with_tracking:
accelerator.end_training()
def snake_case__ ( ) -> Optional[Any]:
A_ : List[Any] = argparse.ArgumentParser(description='''Simple example of training script.''' )
parser.add_argument(
'''--mixed_precision''' , type=lowerCamelCase__ , default=lowerCamelCase__ , choices=['''no''', '''fp16''', '''bf16''', '''fp8'''] , help='''Whether to use mixed precision. Choose'''
'''between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10.'''
'''and an Nvidia Ampere GPU.''' , )
parser.add_argument('''--cpu''' , action='''store_true''' , help='''If passed, will train on the CPU.''' )
parser.add_argument(
'''--with_tracking''' , action='''store_true''' , help='''Whether to load in all available experiment trackers from the environment and use them for logging.''' , )
parser.add_argument(
'''--project_dir''' , type=lowerCamelCase__ , default='''logs''' , help='''Location on where to store experiment tracking logs` and relevent project information''' , )
A_ : Union[str, Any] = parser.parse_args()
A_ : Any = {'''lr''': 2e-5, '''num_epochs''': 3, '''seed''': 4_2, '''batch_size''': 1_6}
training_function(lowerCamelCase__ , lowerCamelCase__ )
if __name__ == "__main__":
main()
| 4 |
'''simple docstring'''
import heapq
def snake_case__ ( lowerCamelCase__ : dict ) -> set[int]:
A_ : list[list] = []
# for each node and his adjacency list add them and the rank of the node to queue
# using heapq module the queue will be filled like a Priority Queue
# heapq works with a min priority queue, so I used -1*len(v) to build it
for key, value in graph.items():
# O(log(n))
heapq.heappush(lowerCamelCase__ , [-1 * len(lowerCamelCase__ ), (key, value)] )
# chosen_vertices = set of chosen vertices
A_ : str = set()
# while queue isn't empty and there are still edges
# (queue[0][0] is the rank of the node with max rank)
while queue and queue[0][0] != 0:
# extract vertex with max rank from queue and add it to chosen_vertices
A_ : Tuple = heapq.heappop(lowerCamelCase__ )[1][0]
chosen_vertices.add(lowerCamelCase__ )
# Remove all arcs adjacent to argmax
for elem in queue:
# if v haven't adjacent node, skip
if elem[0] == 0:
continue
# if argmax is reachable from elem
# remove argmax from elem's adjacent list and update his rank
if argmax in elem[1][1]:
A_ : List[str] = elem[1][1].index(lowerCamelCase__ )
del elem[1][1][index]
elem[0] += 1
# re-order the queue
heapq.heapify(lowerCamelCase__ )
return chosen_vertices
if __name__ == "__main__":
import doctest
doctest.testmod()
snake_case__ = {0: [1, 3], 1: [0, 3], 2: [0, 3, 4], 3: [0, 1, 2], 4: [2, 3]}
print(F'Minimum vertex cover:\n{greedy_min_vertex_cover(graph)}')
| 4 | 1 |
'''simple docstring'''
from __future__ import annotations
class UpperCamelCase_ :
"""simple docstring"""
def __init__( self : Optional[int] , _lowerCamelCase : int ):
"""simple docstring"""
A_ : Union[str, Any] = order
# a_{0} ... a_{k}
A_ : Union[str, Any] = [1.0] + [0.0] * order
# b_{0} ... b_{k}
A_ : int = [1.0] + [0.0] * order
# x[n-1] ... x[n-k]
A_ : str = [0.0] * self.order
# y[n-1] ... y[n-k]
A_ : Optional[Any] = [0.0] * self.order
def _a ( self : Dict , _lowerCamelCase : list[float] , _lowerCamelCase : list[float] ):
"""simple docstring"""
if len(_lowerCamelCase ) < self.order:
A_ : Any = [1.0, *a_coeffs]
if len(_lowerCamelCase ) != self.order + 1:
A_ : List[Any] = (
f'Expected a_coeffs to have {self.order + 1} elements '
f'for {self.order}-order filter, got {len(_lowerCamelCase )}'
)
raise ValueError(_lowerCamelCase )
if len(_lowerCamelCase ) != self.order + 1:
A_ : Union[str, Any] = (
f'Expected b_coeffs to have {self.order + 1} elements '
f'for {self.order}-order filter, got {len(_lowerCamelCase )}'
)
raise ValueError(_lowerCamelCase )
A_ : Tuple = a_coeffs
A_ : str = b_coeffs
def _a ( self : Tuple , _lowerCamelCase : float ):
"""simple docstring"""
A_ : Any = 0.0
# Start at index 1 and do index 0 at the end.
for i in range(1 , self.order + 1 ):
result += (
self.b_coeffs[i] * self.input_history[i - 1]
- self.a_coeffs[i] * self.output_history[i - 1]
)
A_ : str = (result + self.b_coeffs[0] * sample) / self.a_coeffs[0]
A_ : Optional[Any] = self.input_history[:-1]
A_ : List[str] = self.output_history[:-1]
A_ : Tuple = sample
A_ : Tuple = result
return result
| 4 |
'''simple docstring'''
import argparse
from collections import OrderedDict
from pathlib import Path
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from torchvision.transforms import functional as F
from transformers import DetrImageProcessor, TableTransformerConfig, TableTransformerForObjectDetection
from transformers.utils import logging
logging.set_verbosity_info()
snake_case__ = logging.get_logger(__name__)
# here we list all keys to be renamed (original name on the left, our name on the right)
snake_case__ = []
for i in range(6):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append(
(F'transformer.encoder.layers.{i}.self_attn.out_proj.weight', F'encoder.layers.{i}.self_attn.out_proj.weight')
)
rename_keys.append(
(F'transformer.encoder.layers.{i}.self_attn.out_proj.bias', F'encoder.layers.{i}.self_attn.out_proj.bias')
)
rename_keys.append((F'transformer.encoder.layers.{i}.linear1.weight', F'encoder.layers.{i}.fc1.weight'))
rename_keys.append((F'transformer.encoder.layers.{i}.linear1.bias', F'encoder.layers.{i}.fc1.bias'))
rename_keys.append((F'transformer.encoder.layers.{i}.linear2.weight', F'encoder.layers.{i}.fc2.weight'))
rename_keys.append((F'transformer.encoder.layers.{i}.linear2.bias', F'encoder.layers.{i}.fc2.bias'))
rename_keys.append(
(F'transformer.encoder.layers.{i}.norm1.weight', F'encoder.layers.{i}.self_attn_layer_norm.weight')
)
rename_keys.append((F'transformer.encoder.layers.{i}.norm1.bias', F'encoder.layers.{i}.self_attn_layer_norm.bias'))
rename_keys.append((F'transformer.encoder.layers.{i}.norm2.weight', F'encoder.layers.{i}.final_layer_norm.weight'))
rename_keys.append((F'transformer.encoder.layers.{i}.norm2.bias', F'encoder.layers.{i}.final_layer_norm.bias'))
# decoder layers: 2 times output projection, 2 feedforward neural networks and 3 layernorms
rename_keys.append(
(F'transformer.decoder.layers.{i}.self_attn.out_proj.weight', F'decoder.layers.{i}.self_attn.out_proj.weight')
)
rename_keys.append(
(F'transformer.decoder.layers.{i}.self_attn.out_proj.bias', F'decoder.layers.{i}.self_attn.out_proj.bias')
)
rename_keys.append(
(
F'transformer.decoder.layers.{i}.multihead_attn.out_proj.weight',
F'decoder.layers.{i}.encoder_attn.out_proj.weight',
)
)
rename_keys.append(
(
F'transformer.decoder.layers.{i}.multihead_attn.out_proj.bias',
F'decoder.layers.{i}.encoder_attn.out_proj.bias',
)
)
rename_keys.append((F'transformer.decoder.layers.{i}.linear1.weight', F'decoder.layers.{i}.fc1.weight'))
rename_keys.append((F'transformer.decoder.layers.{i}.linear1.bias', F'decoder.layers.{i}.fc1.bias'))
rename_keys.append((F'transformer.decoder.layers.{i}.linear2.weight', F'decoder.layers.{i}.fc2.weight'))
rename_keys.append((F'transformer.decoder.layers.{i}.linear2.bias', F'decoder.layers.{i}.fc2.bias'))
rename_keys.append(
(F'transformer.decoder.layers.{i}.norm1.weight', F'decoder.layers.{i}.self_attn_layer_norm.weight')
)
rename_keys.append((F'transformer.decoder.layers.{i}.norm1.bias', F'decoder.layers.{i}.self_attn_layer_norm.bias'))
rename_keys.append(
(F'transformer.decoder.layers.{i}.norm2.weight', F'decoder.layers.{i}.encoder_attn_layer_norm.weight')
)
rename_keys.append(
(F'transformer.decoder.layers.{i}.norm2.bias', F'decoder.layers.{i}.encoder_attn_layer_norm.bias')
)
rename_keys.append((F'transformer.decoder.layers.{i}.norm3.weight', F'decoder.layers.{i}.final_layer_norm.weight'))
rename_keys.append((F'transformer.decoder.layers.{i}.norm3.bias', F'decoder.layers.{i}.final_layer_norm.bias'))
# convolutional projection + query embeddings + layernorm of encoder + layernorm of decoder + class and bounding box heads
rename_keys.extend(
[
("""input_proj.weight""", """input_projection.weight"""),
("""input_proj.bias""", """input_projection.bias"""),
("""query_embed.weight""", """query_position_embeddings.weight"""),
("""transformer.encoder.norm.weight""", """encoder.layernorm.weight"""),
("""transformer.encoder.norm.bias""", """encoder.layernorm.bias"""),
("""transformer.decoder.norm.weight""", """decoder.layernorm.weight"""),
("""transformer.decoder.norm.bias""", """decoder.layernorm.bias"""),
("""class_embed.weight""", """class_labels_classifier.weight"""),
("""class_embed.bias""", """class_labels_classifier.bias"""),
("""bbox_embed.layers.0.weight""", """bbox_predictor.layers.0.weight"""),
("""bbox_embed.layers.0.bias""", """bbox_predictor.layers.0.bias"""),
("""bbox_embed.layers.1.weight""", """bbox_predictor.layers.1.weight"""),
("""bbox_embed.layers.1.bias""", """bbox_predictor.layers.1.bias"""),
("""bbox_embed.layers.2.weight""", """bbox_predictor.layers.2.weight"""),
("""bbox_embed.layers.2.bias""", """bbox_predictor.layers.2.bias"""),
]
)
def snake_case__ ( lowerCamelCase__ : List[Any] , lowerCamelCase__ : List[Any] , lowerCamelCase__ : List[Any] ) -> Optional[Any]:
A_ : Tuple = state_dict.pop(lowerCamelCase__ )
A_ : Optional[Any] = val
def snake_case__ ( lowerCamelCase__ : Dict ) -> Any:
A_ : int = OrderedDict()
for key, value in state_dict.items():
if "backbone.0.body" in key:
A_ : int = key.replace('''backbone.0.body''' , '''backbone.conv_encoder.model''' )
A_ : List[str] = value
else:
A_ : Optional[int] = value
return new_state_dict
def snake_case__ ( lowerCamelCase__ : Union[str, Any] ) -> Optional[Any]:
A_ : Any = ''''''
# first: transformer encoder
for i in range(6 ):
# read in weights + bias of input projection layer (in PyTorch's MultiHeadAttention, this is a single matrix + bias)
A_ : Tuple = state_dict.pop(f'{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_weight' )
A_ : Dict = state_dict.pop(f'{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_bias' )
# next, add query, keys and values (in that order) to the state dict
A_ : str = in_proj_weight[:2_5_6, :]
A_ : Optional[Any] = in_proj_bias[:2_5_6]
A_ : Dict = in_proj_weight[2_5_6:5_1_2, :]
A_ : Tuple = in_proj_bias[2_5_6:5_1_2]
A_ : Tuple = in_proj_weight[-2_5_6:, :]
A_ : Optional[int] = in_proj_bias[-2_5_6:]
# next: transformer decoder (which is a bit more complex because it also includes cross-attention)
for i in range(6 ):
# read in weights + bias of input projection layer of self-attention
A_ : Union[str, Any] = state_dict.pop(f'{prefix}transformer.decoder.layers.{i}.self_attn.in_proj_weight' )
A_ : Dict = state_dict.pop(f'{prefix}transformer.decoder.layers.{i}.self_attn.in_proj_bias' )
# next, add query, keys and values (in that order) to the state dict
A_ : List[str] = in_proj_weight[:2_5_6, :]
A_ : int = in_proj_bias[:2_5_6]
A_ : Any = in_proj_weight[2_5_6:5_1_2, :]
A_ : List[str] = in_proj_bias[2_5_6:5_1_2]
A_ : Union[str, Any] = in_proj_weight[-2_5_6:, :]
A_ : Optional[Any] = in_proj_bias[-2_5_6:]
# read in weights + bias of input projection layer of cross-attention
A_ : Tuple = state_dict.pop(
f'{prefix}transformer.decoder.layers.{i}.multihead_attn.in_proj_weight' )
A_ : Optional[Any] = state_dict.pop(f'{prefix}transformer.decoder.layers.{i}.multihead_attn.in_proj_bias' )
# next, add query, keys and values (in that order) of cross-attention to the state dict
A_ : Dict = in_proj_weight_cross_attn[:2_5_6, :]
A_ : Tuple = in_proj_bias_cross_attn[:2_5_6]
A_ : int = in_proj_weight_cross_attn[2_5_6:5_1_2, :]
A_ : List[str] = in_proj_bias_cross_attn[2_5_6:5_1_2]
A_ : Any = in_proj_weight_cross_attn[-2_5_6:, :]
A_ : Any = in_proj_bias_cross_attn[-2_5_6:]
def snake_case__ ( lowerCamelCase__ : List[str] , lowerCamelCase__ : Tuple ) -> Dict:
A_ ,A_ : int = image.size
A_ : Tuple = max(lowerCamelCase__ , lowerCamelCase__ )
A_ : Optional[Any] = 8_0_0 if '''detection''' in checkpoint_url else 1_0_0_0
A_ : Union[str, Any] = target_max_size / current_max_size
A_ : Any = image.resize((int(round(scale * width ) ), int(round(scale * height ) )) )
return resized_image
def snake_case__ ( lowerCamelCase__ : Tuple ) -> str:
A_ : Any = F.to_tensor(lowerCamelCase__ )
A_ : Optional[Any] = F.normalize(lowerCamelCase__ , mean=[0.485, 0.456, 0.406] , std=[0.229, 0.224, 0.225] )
return image
@torch.no_grad()
def snake_case__ ( lowerCamelCase__ : List[Any] , lowerCamelCase__ : int , lowerCamelCase__ : int ) -> str:
logger.info('''Converting model...''' )
# load original state dict
A_ : Tuple = torch.hub.load_state_dict_from_url(lowerCamelCase__ , map_location='''cpu''' )
# rename keys
for src, dest in rename_keys:
rename_key(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
A_ : str = rename_backbone_keys(lowerCamelCase__ )
# query, key and value matrices need special treatment
read_in_q_k_v(lowerCamelCase__ )
# important: we need to prepend a prefix to each of the base model keys as the head models use different attributes for them
A_ : List[Any] = '''model.'''
for key in state_dict.copy().keys():
if not key.startswith('''class_labels_classifier''' ) and not key.startswith('''bbox_predictor''' ):
A_ : List[Any] = state_dict.pop(lowerCamelCase__ )
A_ : str = val
# create HuggingFace model and load state dict
A_ : Union[str, Any] = TableTransformerConfig(
backbone='''resnet18''' , mask_loss_coefficient=1 , dice_loss_coefficient=1 , ce_loss_coefficient=1 , bbox_loss_coefficient=5 , giou_loss_coefficient=2 , eos_coefficient=0.4 , class_cost=1 , bbox_cost=5 , giou_cost=2 , )
if "detection" in checkpoint_url:
A_ : Dict = 1_5
A_ : Dict = 2
A_ : int = {0: '''table''', 1: '''table rotated'''}
A_ : List[str] = idalabel
A_ : Optional[int] = {v: k for k, v in idalabel.items()}
else:
A_ : Union[str, Any] = 1_2_5
A_ : Optional[Any] = 6
A_ : Optional[Any] = {
0: '''table''',
1: '''table column''',
2: '''table row''',
3: '''table column header''',
4: '''table projected row header''',
5: '''table spanning cell''',
}
A_ : int = idalabel
A_ : Tuple = {v: k for k, v in idalabel.items()}
A_ : Optional[Any] = DetrImageProcessor(
format='''coco_detection''' , max_size=8_0_0 if '''detection''' in checkpoint_url else 1_0_0_0 )
A_ : int = TableTransformerForObjectDetection(lowerCamelCase__ )
model.load_state_dict(lowerCamelCase__ )
model.eval()
# verify our conversion
A_ : Optional[int] = '''example_pdf.png''' if '''detection''' in checkpoint_url else '''example_table.png'''
A_ : Union[str, Any] = hf_hub_download(repo_id='''nielsr/example-pdf''' , repo_type='''dataset''' , filename=lowerCamelCase__ )
A_ : Tuple = Image.open(lowerCamelCase__ ).convert('''RGB''' )
A_ : int = normalize(resize(lowerCamelCase__ , lowerCamelCase__ ) ).unsqueeze(0 )
A_ : str = model(lowerCamelCase__ )
if "detection" in checkpoint_url:
A_ : str = (1, 1_5, 3)
A_ : int = torch.tensor(
[[-6.7897, -16.9985, 6.7937], [-8.0186, -22.2192, 6.9677], [-7.3117, -21.0708, 7.4055]] )
A_ : Tuple = torch.tensor([[0.4867, 0.1767, 0.6732], [0.6718, 0.4479, 0.3830], [0.4716, 0.1760, 0.6364]] )
else:
A_ : Optional[int] = (1, 1_2_5, 7)
A_ : Dict = torch.tensor(
[[-18.1430, -8.3214, 4.8274], [-18.4685, -7.1361, -4.2667], [-26.3693, -9.3429, -4.9962]] )
A_ : Any = torch.tensor([[0.4983, 0.5595, 0.9440], [0.4916, 0.6315, 0.5954], [0.6108, 0.8637, 0.1135]] )
assert outputs.logits.shape == expected_shape
assert torch.allclose(outputs.logits[0, :3, :3] , lowerCamelCase__ , atol=1e-4 )
assert torch.allclose(outputs.pred_boxes[0, :3, :3] , lowerCamelCase__ , atol=1e-4 )
print('''Looks ok!''' )
if pytorch_dump_folder_path is not None:
# Save model and image processor
logger.info(f'Saving PyTorch model and image processor to {pytorch_dump_folder_path}...' )
Path(lowerCamelCase__ ).mkdir(exist_ok=lowerCamelCase__ )
model.save_pretrained(lowerCamelCase__ )
image_processor.save_pretrained(lowerCamelCase__ )
if push_to_hub:
# Push model to HF hub
logger.info('''Pushing model to the hub...''' )
A_ : List[Any] = (
'''microsoft/table-transformer-detection'''
if '''detection''' in checkpoint_url
else '''microsoft/table-transformer-structure-recognition'''
)
model.push_to_hub(lowerCamelCase__ )
image_processor.push_to_hub(lowerCamelCase__ )
if __name__ == "__main__":
snake_case__ = argparse.ArgumentParser()
parser.add_argument(
"""--checkpoint_url""",
default="""https://pubtables1m.blob.core.windows.net/model/pubtables1m_detection_detr_r18.pth""",
type=str,
choices=[
"""https://pubtables1m.blob.core.windows.net/model/pubtables1m_detection_detr_r18.pth""",
"""https://pubtables1m.blob.core.windows.net/model/pubtables1m_structure_detr_r18.pth""",
],
help="""URL of the Table Transformer checkpoint you'd like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the folder to output PyTorch model."""
)
parser.add_argument(
"""--push_to_hub""", action="""store_true""", help="""Whether or not to push the converted model to the 🤗 hub."""
)
snake_case__ = parser.parse_args()
convert_table_transformer_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub)
| 4 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
snake_case__ = {
"""configuration_rembert""": ["""REMBERT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """RemBertConfig""", """RemBertOnnxConfig"""]
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case__ = ["""RemBertTokenizer"""]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case__ = ["""RemBertTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case__ = [
"""REMBERT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""RemBertForCausalLM""",
"""RemBertForMaskedLM""",
"""RemBertForMultipleChoice""",
"""RemBertForQuestionAnswering""",
"""RemBertForSequenceClassification""",
"""RemBertForTokenClassification""",
"""RemBertLayer""",
"""RemBertModel""",
"""RemBertPreTrainedModel""",
"""load_tf_weights_in_rembert""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case__ = [
"""TF_REMBERT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFRemBertForCausalLM""",
"""TFRemBertForMaskedLM""",
"""TFRemBertForMultipleChoice""",
"""TFRemBertForQuestionAnswering""",
"""TFRemBertForSequenceClassification""",
"""TFRemBertForTokenClassification""",
"""TFRemBertLayer""",
"""TFRemBertModel""",
"""TFRemBertPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_rembert import REMBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, RemBertConfig, RemBertOnnxConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_rembert import RemBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_rembert_fast import RemBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_rembert import (
REMBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
RemBertForCausalLM,
RemBertForMaskedLM,
RemBertForMultipleChoice,
RemBertForQuestionAnswering,
RemBertForSequenceClassification,
RemBertForTokenClassification,
RemBertLayer,
RemBertModel,
RemBertPreTrainedModel,
load_tf_weights_in_rembert,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_rembert import (
TF_REMBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFRemBertForCausalLM,
TFRemBertForMaskedLM,
TFRemBertForMultipleChoice,
TFRemBertForQuestionAnswering,
TFRemBertForSequenceClassification,
TFRemBertForTokenClassification,
TFRemBertLayer,
TFRemBertModel,
TFRemBertPreTrainedModel,
)
else:
import sys
snake_case__ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 4 |
'''simple docstring'''
import logging
import os
from dataclasses import dataclass
from typing import List, Optional, Union
import tqdm
from filelock import FileLock
from transformers import (
BartTokenizer,
BartTokenizerFast,
DataProcessor,
PreTrainedTokenizer,
RobertaTokenizer,
RobertaTokenizerFast,
XLMRobertaTokenizer,
is_tf_available,
is_torch_available,
)
snake_case__ = logging.getLogger(__name__)
@dataclass(frozen=a__ )
class UpperCamelCase_ :
"""simple docstring"""
_lowerCAmelCase = 42
_lowerCAmelCase = 42
_lowerCAmelCase = None
_lowerCAmelCase = None
_lowerCAmelCase = None
@dataclass(frozen=a__ )
class UpperCamelCase_ :
"""simple docstring"""
_lowerCAmelCase = 42
_lowerCAmelCase = None
_lowerCAmelCase = None
_lowerCAmelCase = None
_lowerCAmelCase = None
if is_torch_available():
import torch
from torch.utils.data import Dataset
class UpperCamelCase_ (a__ ):
"""simple docstring"""
_lowerCAmelCase = 42
def __init__( self : Optional[int] , _lowerCamelCase : str , _lowerCamelCase : PreTrainedTokenizer , _lowerCamelCase : str , _lowerCamelCase : Optional[int] = None , _lowerCamelCase : List[Any]=False , _lowerCamelCase : bool = False , ):
"""simple docstring"""
A_ : Optional[int] = hans_processors[task]()
A_ : int = os.path.join(
_lowerCamelCase , '''cached_{}_{}_{}_{}'''.format(
'''dev''' if evaluate else '''train''' , tokenizer.__class__.__name__ , str(_lowerCamelCase ) , _lowerCamelCase , ) , )
A_ : Dict = processor.get_labels()
if tokenizer.__class__ in (
RobertaTokenizer,
RobertaTokenizerFast,
XLMRobertaTokenizer,
BartTokenizer,
BartTokenizerFast,
):
# HACK(label indices are swapped in RoBERTa pretrained model)
A_ ,A_ : List[str] = label_list[2], label_list[1]
A_ : Optional[int] = label_list
# Make sure only the first process in distributed training processes the dataset,
# and the others will use the cache.
A_ : str = cached_features_file + '''.lock'''
with FileLock(_lowerCamelCase ):
if os.path.exists(_lowerCamelCase ) and not overwrite_cache:
logger.info(f'Loading features from cached file {cached_features_file}' )
A_ : List[str] = torch.load(_lowerCamelCase )
else:
logger.info(f'Creating features from dataset file at {data_dir}' )
A_ : Optional[int] = (
processor.get_dev_examples(_lowerCamelCase ) if evaluate else processor.get_train_examples(_lowerCamelCase )
)
logger.info('''Training examples: %s''' , len(_lowerCamelCase ) )
A_ : Optional[int] = hans_convert_examples_to_features(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
logger.info('''Saving features into cached file %s''' , _lowerCamelCase )
torch.save(self.features , _lowerCamelCase )
def __len__( self : List[str] ):
"""simple docstring"""
return len(self.features )
def __getitem__( self : List[str] , _lowerCamelCase : Optional[int] ):
"""simple docstring"""
return self.features[i]
def _a ( self : str ):
"""simple docstring"""
return self.label_list
if is_tf_available():
import tensorflow as tf
class UpperCamelCase_ :
"""simple docstring"""
_lowerCAmelCase = 42
def __init__( self : Optional[int] , _lowerCamelCase : str , _lowerCamelCase : PreTrainedTokenizer , _lowerCamelCase : str , _lowerCamelCase : Optional[int] = 128 , _lowerCamelCase : Dict=False , _lowerCamelCase : bool = False , ):
"""simple docstring"""
A_ : Optional[int] = hans_processors[task]()
A_ : Optional[int] = processor.get_labels()
if tokenizer.__class__ in (
RobertaTokenizer,
RobertaTokenizerFast,
XLMRobertaTokenizer,
BartTokenizer,
BartTokenizerFast,
):
# HACK(label indices are swapped in RoBERTa pretrained model)
A_ ,A_ : Union[str, Any] = label_list[2], label_list[1]
A_ : Tuple = label_list
A_ : Optional[int] = processor.get_dev_examples(_lowerCamelCase ) if evaluate else processor.get_train_examples(_lowerCamelCase )
A_ : Tuple = hans_convert_examples_to_features(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
def gen():
for ex_index, ex in tqdm.tqdm(enumerate(self.features ) , desc='''convert examples to features''' ):
if ex_index % 10000 == 0:
logger.info('''Writing example %d of %d''' % (ex_index, len(_lowerCamelCase )) )
yield (
{
"example_id": 0,
"input_ids": ex.input_ids,
"attention_mask": ex.attention_mask,
"token_type_ids": ex.token_type_ids,
},
ex.label,
)
A_ : List[Any] = tf.data.Dataset.from_generator(
_lowerCamelCase , (
{
'''example_id''': tf.intaa,
'''input_ids''': tf.intaa,
'''attention_mask''': tf.intaa,
'''token_type_ids''': tf.intaa,
},
tf.intaa,
) , (
{
'''example_id''': tf.TensorShape([] ),
'''input_ids''': tf.TensorShape([None, None] ),
'''attention_mask''': tf.TensorShape([None, None] ),
'''token_type_ids''': tf.TensorShape([None, None] ),
},
tf.TensorShape([] ),
) , )
def _a ( self : Any ):
"""simple docstring"""
return self.dataset
def __len__( self : Dict ):
"""simple docstring"""
return len(self.features )
def __getitem__( self : Optional[int] , _lowerCamelCase : List[str] ):
"""simple docstring"""
return self.features[i]
def _a ( self : Tuple ):
"""simple docstring"""
return self.label_list
class UpperCamelCase_ (a__ ):
"""simple docstring"""
def _a ( self : List[str] , _lowerCamelCase : Union[str, Any] ):
"""simple docstring"""
return self._create_examples(self._read_tsv(os.path.join(_lowerCamelCase , '''heuristics_train_set.txt''' ) ) , '''train''' )
def _a ( self : List[str] , _lowerCamelCase : Tuple ):
"""simple docstring"""
return self._create_examples(self._read_tsv(os.path.join(_lowerCamelCase , '''heuristics_evaluation_set.txt''' ) ) , '''dev''' )
def _a ( self : Any ):
"""simple docstring"""
return ["contradiction", "entailment", "neutral"]
def _a ( self : Optional[Any] , _lowerCamelCase : Optional[Any] , _lowerCamelCase : Any ):
"""simple docstring"""
A_ : Tuple = []
for i, line in enumerate(_lowerCamelCase ):
if i == 0:
continue
A_ : str = '''%s-%s''' % (set_type, line[0])
A_ : Optional[Any] = line[5]
A_ : Union[str, Any] = line[6]
A_ : List[str] = line[7][2:] if line[7].startswith('''ex''' ) else line[7]
A_ : str = line[0]
examples.append(InputExample(guid=_lowerCamelCase , text_a=_lowerCamelCase , text_b=_lowerCamelCase , label=_lowerCamelCase , pairID=_lowerCamelCase ) )
return examples
def snake_case__ ( lowerCamelCase__ : List[InputExample] , lowerCamelCase__ : List[str] , lowerCamelCase__ : int , lowerCamelCase__ : PreTrainedTokenizer , ) -> int:
A_ : Union[str, Any] = {label: i for i, label in enumerate(lowerCamelCase__ )}
A_ : Optional[Any] = []
for ex_index, example in tqdm.tqdm(enumerate(lowerCamelCase__ ) , desc='''convert examples to features''' ):
if ex_index % 1_0_0_0_0 == 0:
logger.info('''Writing example %d''' % (ex_index) )
A_ : Optional[int] = tokenizer(
example.text_a , example.text_b , add_special_tokens=lowerCamelCase__ , max_length=lowerCamelCase__ , padding='''max_length''' , truncation=lowerCamelCase__ , return_overflowing_tokens=lowerCamelCase__ , )
A_ : List[str] = label_map[example.label] if example.label in label_map else 0
A_ : Tuple = int(example.pairID )
features.append(InputFeatures(**lowerCamelCase__ , label=lowerCamelCase__ , pairID=lowerCamelCase__ ) )
for i, example in enumerate(examples[:5] ):
logger.info('''*** Example ***''' )
logger.info(f'guid: {example}' )
logger.info(f'features: {features[i]}' )
return features
snake_case__ = {
"""hans""": 3,
}
snake_case__ = {
"""hans""": HansProcessor,
}
| 4 | 1 |
'''simple docstring'''
import copy
from typing import Dict, Optional
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
from ..detr import DetrConfig
from ..swin import SwinConfig
snake_case__ = {
"""facebook/maskformer-swin-base-ade""": (
"""https://huggingface.co/facebook/maskformer-swin-base-ade/blob/main/config.json"""
)
# See all MaskFormer models at https://huggingface.co/models?filter=maskformer
}
snake_case__ = logging.get_logger(__name__)
class UpperCamelCase_ (a__ ):
"""simple docstring"""
_lowerCAmelCase = 'maskformer'
_lowerCAmelCase = {'hidden_size': 'mask_feature_size'}
_lowerCAmelCase = ['resnet', 'swin']
_lowerCAmelCase = ['detr']
def __init__( self : Optional[int] , _lowerCamelCase : int = 256 , _lowerCamelCase : int = 256 , _lowerCamelCase : float = 0.1 , _lowerCamelCase : bool = False , _lowerCamelCase : Optional[Dict] = None , _lowerCamelCase : Optional[Dict] = None , _lowerCamelCase : float = 0.02 , _lowerCamelCase : float = 1.0 , _lowerCamelCase : float = 1.0 , _lowerCamelCase : float = 1.0 , _lowerCamelCase : float = 20.0 , _lowerCamelCase : Optional[bool] = None , **_lowerCamelCase : Optional[int] , ):
"""simple docstring"""
if backbone_config is None:
# fall back to https://huggingface.co/microsoft/swin-base-patch4-window12-384-in22k
A_ : List[Any] = SwinConfig(
image_size=384 , in_channels=3 , patch_size=4 , embed_dim=128 , depths=[2, 2, 18, 2] , num_heads=[4, 8, 16, 32] , window_size=12 , drop_path_rate=0.3 , out_features=['''stage1''', '''stage2''', '''stage3''', '''stage4'''] , )
if isinstance(_lowerCamelCase , _lowerCamelCase ):
A_ : Union[str, Any] = backbone_config.pop('''model_type''' )
A_ : Dict = CONFIG_MAPPING[backbone_model_type]
A_ : Any = config_class.from_dict(_lowerCamelCase )
# verify that the backbone is supported
if backbone_config.model_type not in self.backbones_supported:
logger.warning_once(
f'Backbone {backbone_config.model_type} is not a supported model and may not be compatible with MaskFormer. '
f'Supported model types: {",".join(self.backbones_supported )}' )
if decoder_config is None:
# fall back to https://huggingface.co/facebook/detr-resnet-50
A_ : List[Any] = DetrConfig()
else:
# verify that the decoder is supported
A_ : Optional[int] = (
decoder_config.pop('''model_type''' ) if isinstance(_lowerCamelCase , _lowerCamelCase ) else decoder_config.model_type
)
if decoder_type not in self.decoders_supported:
raise ValueError(
f'Transformer Decoder {decoder_type} not supported, please use one of'
f' {",".join(self.decoders_supported )}' )
if isinstance(_lowerCamelCase , _lowerCamelCase ):
A_ : Any = CONFIG_MAPPING[decoder_type]
A_ : str = config_class.from_dict(_lowerCamelCase )
A_ : Any = backbone_config
A_ : List[str] = decoder_config
# main feature dimension for the model
A_ : Union[str, Any] = fpn_feature_size
A_ : Dict = mask_feature_size
# initializer
A_ : str = init_std
A_ : List[str] = init_xavier_std
# Hungarian matcher && loss
A_ : Union[str, Any] = cross_entropy_weight
A_ : Dict = dice_weight
A_ : Union[str, Any] = mask_weight
A_ : Optional[Any] = use_auxiliary_loss
A_ : Optional[int] = no_object_weight
A_ : Any = output_auxiliary_logits
A_ : Optional[int] = self.decoder_config.encoder_attention_heads
A_ : Any = self.decoder_config.num_hidden_layers
super().__init__(**_lowerCamelCase )
@classmethod
def _a ( cls : Any , _lowerCamelCase : PretrainedConfig , _lowerCamelCase : PretrainedConfig , **_lowerCamelCase : Optional[Any] ):
"""simple docstring"""
return cls(
backbone_config=_lowerCamelCase , decoder_config=_lowerCamelCase , **_lowerCamelCase , )
def _a ( self : int ):
"""simple docstring"""
A_ : Union[str, Any] = copy.deepcopy(self.__dict__ )
A_ : int = self.backbone_config.to_dict()
A_ : Optional[Any] = self.decoder_config.to_dict()
A_ : List[str] = self.__class__.model_type
return output
| 4 |
'''simple docstring'''
import io
import itertools
import json
from dataclasses import dataclass
from typing import Optional
import pyarrow as pa
import pyarrow.json as paj
import datasets
from datasets.table import table_cast
from datasets.utils.file_utils import readline
snake_case__ = datasets.utils.logging.get_logger(__name__)
@dataclass
class UpperCamelCase_ (datasets.BuilderConfig ):
"""simple docstring"""
_lowerCAmelCase = None
_lowerCAmelCase = "utf-8"
_lowerCAmelCase = None
_lowerCAmelCase = None
_lowerCAmelCase = True # deprecated
_lowerCAmelCase = None # deprecated
_lowerCAmelCase = 1_0 << 2_0 # 10MB
_lowerCAmelCase = None
class UpperCamelCase_ (datasets.ArrowBasedBuilder ):
"""simple docstring"""
_lowerCAmelCase = JsonConfig
def _a ( self : int ):
"""simple docstring"""
if self.config.block_size is not None:
logger.warning('''The JSON loader parameter `block_size` is deprecated. Please use `chunksize` instead''' )
A_ : List[Any] = self.config.block_size
if self.config.use_threads is not True:
logger.warning(
'''The JSON loader parameter `use_threads` is deprecated and doesn\'t have any effect anymore.''' )
if self.config.newlines_in_values is not None:
raise ValueError('''The JSON loader parameter `newlines_in_values` is no longer supported''' )
return datasets.DatasetInfo(features=self.config.features )
def _a ( self : Any , _lowerCamelCase : List[str] ):
"""simple docstring"""
if not self.config.data_files:
raise ValueError(f'At least one data file must be specified, but got data_files={self.config.data_files}' )
A_ : int = dl_manager.download_and_extract(self.config.data_files )
if isinstance(_lowerCamelCase , (str, list, tuple) ):
A_ : Union[str, Any] = data_files
if isinstance(_lowerCamelCase , _lowerCamelCase ):
A_ : List[str] = [files]
A_ : List[Any] = [dl_manager.iter_files(_lowerCamelCase ) for file in files]
return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={'''files''': files} )]
A_ : Tuple = []
for split_name, files in data_files.items():
if isinstance(_lowerCamelCase , _lowerCamelCase ):
A_ : int = [files]
A_ : Union[str, Any] = [dl_manager.iter_files(_lowerCamelCase ) for file in files]
splits.append(datasets.SplitGenerator(name=_lowerCamelCase , gen_kwargs={'''files''': files} ) )
return splits
def _a ( self : int , _lowerCamelCase : pa.Table ):
"""simple docstring"""
if self.config.features is not None:
# adding missing columns
for column_name in set(self.config.features ) - set(pa_table.column_names ):
A_ : Optional[int] = self.config.features.arrow_schema.field(_lowerCamelCase ).type
A_ : Optional[int] = pa_table.append_column(_lowerCamelCase , pa.array([None] * len(_lowerCamelCase ) , type=_lowerCamelCase ) )
# more expensive cast to support nested structures with keys in a different order
# allows str <-> int/float or str to Audio for example
A_ : str = table_cast(_lowerCamelCase , self.config.features.arrow_schema )
return pa_table
def _a ( self : List[str] , _lowerCamelCase : int ):
"""simple docstring"""
for file_idx, file in enumerate(itertools.chain.from_iterable(_lowerCamelCase ) ):
# If the file is one json object and if we need to look at the list of items in one specific field
if self.config.field is not None:
with open(_lowerCamelCase , encoding=self.config.encoding , errors=self.config.encoding_errors ) as f:
A_ : int = json.load(_lowerCamelCase )
# We keep only the field we are interested in
A_ : List[str] = dataset[self.config.field]
# We accept two format: a list of dicts or a dict of lists
if isinstance(_lowerCamelCase , (list, tuple) ):
A_ : int = set().union(*[row.keys() for row in dataset] )
A_ : List[str] = {col: [row.get(_lowerCamelCase ) for row in dataset] for col in keys}
else:
A_ : Tuple = dataset
A_ : Dict = pa.Table.from_pydict(_lowerCamelCase )
yield file_idx, self._cast_table(_lowerCamelCase )
# If the file has one json object per line
else:
with open(_lowerCamelCase , '''rb''' ) as f:
A_ : int = 0
# Use block_size equal to the chunk size divided by 32 to leverage multithreading
# Set a default minimum value of 16kB if the chunk size is really small
A_ : int = max(self.config.chunksize // 32 , 16 << 10 )
A_ : int = (
self.config.encoding_errors if self.config.encoding_errors is not None else '''strict'''
)
while True:
A_ : Any = f.read(self.config.chunksize )
if not batch:
break
# Finish current line
try:
batch += f.readline()
except (AttributeError, io.UnsupportedOperation):
batch += readline(_lowerCamelCase )
# PyArrow only accepts utf-8 encoded bytes
if self.config.encoding != "utf-8":
A_ : Optional[Any] = batch.decode(self.config.encoding , errors=_lowerCamelCase ).encode('''utf-8''' )
try:
while True:
try:
A_ : List[Any] = paj.read_json(
io.BytesIO(_lowerCamelCase ) , read_options=paj.ReadOptions(block_size=_lowerCamelCase ) )
break
except (pa.ArrowInvalid, pa.ArrowNotImplementedError) as e:
if (
isinstance(_lowerCamelCase , pa.ArrowInvalid )
and "straddling" not in str(_lowerCamelCase )
or block_size > len(_lowerCamelCase )
):
raise
else:
# Increase the block size in case it was too small.
# The block size will be reset for the next file.
logger.debug(
f'Batch of {len(_lowerCamelCase )} bytes couldn\'t be parsed with block_size={block_size}. Retrying with block_size={block_size * 2}.' )
block_size *= 2
except pa.ArrowInvalid as e:
try:
with open(
_lowerCamelCase , encoding=self.config.encoding , errors=self.config.encoding_errors ) as f:
A_ : Optional[Any] = json.load(_lowerCamelCase )
except json.JSONDecodeError:
logger.error(f'Failed to read file \'{file}\' with error {type(_lowerCamelCase )}: {e}' )
raise e
# If possible, parse the file as a list of json objects and exit the loop
if isinstance(_lowerCamelCase , _lowerCamelCase ): # list is the only sequence type supported in JSON
try:
A_ : Optional[int] = set().union(*[row.keys() for row in dataset] )
A_ : Tuple = {col: [row.get(_lowerCamelCase ) for row in dataset] for col in keys}
A_ : int = pa.Table.from_pydict(_lowerCamelCase )
except (pa.ArrowInvalid, AttributeError) as e:
logger.error(f'Failed to read file \'{file}\' with error {type(_lowerCamelCase )}: {e}' )
raise ValueError(f'Not able to read records in the JSON file at {file}.' ) from None
yield file_idx, self._cast_table(_lowerCamelCase )
break
else:
logger.error(f'Failed to read file \'{file}\' with error {type(_lowerCamelCase )}: {e}' )
raise ValueError(
f'Not able to read records in the JSON file at {file}. '
f'You should probably indicate the field of the JSON file containing your records. '
f'This JSON file contain the following fields: {str(list(dataset.keys() ) )}. '
f'Select the correct one and provide it as `field=\'XXX\'` to the dataset loading method. ' ) from None
# Uncomment for debugging (will print the Arrow table size and elements)
# logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}")
# logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows)))
yield (file_idx, batch_idx), self._cast_table(_lowerCamelCase )
batch_idx += 1
| 4 | 1 |
'''simple docstring'''
import logging
import os
from dataclasses import dataclass
from typing import List, Optional, Union
import tqdm
from filelock import FileLock
from transformers import (
BartTokenizer,
BartTokenizerFast,
DataProcessor,
PreTrainedTokenizer,
RobertaTokenizer,
RobertaTokenizerFast,
XLMRobertaTokenizer,
is_tf_available,
is_torch_available,
)
snake_case__ = logging.getLogger(__name__)
@dataclass(frozen=a__ )
class UpperCamelCase_ :
"""simple docstring"""
_lowerCAmelCase = 42
_lowerCAmelCase = 42
_lowerCAmelCase = None
_lowerCAmelCase = None
_lowerCAmelCase = None
@dataclass(frozen=a__ )
class UpperCamelCase_ :
"""simple docstring"""
_lowerCAmelCase = 42
_lowerCAmelCase = None
_lowerCAmelCase = None
_lowerCAmelCase = None
_lowerCAmelCase = None
if is_torch_available():
import torch
from torch.utils.data import Dataset
class UpperCamelCase_ (a__ ):
"""simple docstring"""
_lowerCAmelCase = 42
def __init__( self : Optional[int] , _lowerCamelCase : str , _lowerCamelCase : PreTrainedTokenizer , _lowerCamelCase : str , _lowerCamelCase : Optional[int] = None , _lowerCamelCase : List[Any]=False , _lowerCamelCase : bool = False , ):
"""simple docstring"""
A_ : Optional[int] = hans_processors[task]()
A_ : int = os.path.join(
_lowerCamelCase , '''cached_{}_{}_{}_{}'''.format(
'''dev''' if evaluate else '''train''' , tokenizer.__class__.__name__ , str(_lowerCamelCase ) , _lowerCamelCase , ) , )
A_ : Dict = processor.get_labels()
if tokenizer.__class__ in (
RobertaTokenizer,
RobertaTokenizerFast,
XLMRobertaTokenizer,
BartTokenizer,
BartTokenizerFast,
):
# HACK(label indices are swapped in RoBERTa pretrained model)
A_ ,A_ : List[str] = label_list[2], label_list[1]
A_ : Optional[int] = label_list
# Make sure only the first process in distributed training processes the dataset,
# and the others will use the cache.
A_ : str = cached_features_file + '''.lock'''
with FileLock(_lowerCamelCase ):
if os.path.exists(_lowerCamelCase ) and not overwrite_cache:
logger.info(f'Loading features from cached file {cached_features_file}' )
A_ : List[str] = torch.load(_lowerCamelCase )
else:
logger.info(f'Creating features from dataset file at {data_dir}' )
A_ : Optional[int] = (
processor.get_dev_examples(_lowerCamelCase ) if evaluate else processor.get_train_examples(_lowerCamelCase )
)
logger.info('''Training examples: %s''' , len(_lowerCamelCase ) )
A_ : Optional[int] = hans_convert_examples_to_features(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
logger.info('''Saving features into cached file %s''' , _lowerCamelCase )
torch.save(self.features , _lowerCamelCase )
def __len__( self : List[str] ):
"""simple docstring"""
return len(self.features )
def __getitem__( self : List[str] , _lowerCamelCase : Optional[int] ):
"""simple docstring"""
return self.features[i]
def _a ( self : str ):
"""simple docstring"""
return self.label_list
if is_tf_available():
import tensorflow as tf
class UpperCamelCase_ :
"""simple docstring"""
_lowerCAmelCase = 42
def __init__( self : Optional[int] , _lowerCamelCase : str , _lowerCamelCase : PreTrainedTokenizer , _lowerCamelCase : str , _lowerCamelCase : Optional[int] = 128 , _lowerCamelCase : Dict=False , _lowerCamelCase : bool = False , ):
"""simple docstring"""
A_ : Optional[int] = hans_processors[task]()
A_ : Optional[int] = processor.get_labels()
if tokenizer.__class__ in (
RobertaTokenizer,
RobertaTokenizerFast,
XLMRobertaTokenizer,
BartTokenizer,
BartTokenizerFast,
):
# HACK(label indices are swapped in RoBERTa pretrained model)
A_ ,A_ : Union[str, Any] = label_list[2], label_list[1]
A_ : Tuple = label_list
A_ : Optional[int] = processor.get_dev_examples(_lowerCamelCase ) if evaluate else processor.get_train_examples(_lowerCamelCase )
A_ : Tuple = hans_convert_examples_to_features(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
def gen():
for ex_index, ex in tqdm.tqdm(enumerate(self.features ) , desc='''convert examples to features''' ):
if ex_index % 10000 == 0:
logger.info('''Writing example %d of %d''' % (ex_index, len(_lowerCamelCase )) )
yield (
{
"example_id": 0,
"input_ids": ex.input_ids,
"attention_mask": ex.attention_mask,
"token_type_ids": ex.token_type_ids,
},
ex.label,
)
A_ : List[Any] = tf.data.Dataset.from_generator(
_lowerCamelCase , (
{
'''example_id''': tf.intaa,
'''input_ids''': tf.intaa,
'''attention_mask''': tf.intaa,
'''token_type_ids''': tf.intaa,
},
tf.intaa,
) , (
{
'''example_id''': tf.TensorShape([] ),
'''input_ids''': tf.TensorShape([None, None] ),
'''attention_mask''': tf.TensorShape([None, None] ),
'''token_type_ids''': tf.TensorShape([None, None] ),
},
tf.TensorShape([] ),
) , )
def _a ( self : Any ):
"""simple docstring"""
return self.dataset
def __len__( self : Dict ):
"""simple docstring"""
return len(self.features )
def __getitem__( self : Optional[int] , _lowerCamelCase : List[str] ):
"""simple docstring"""
return self.features[i]
def _a ( self : Tuple ):
"""simple docstring"""
return self.label_list
class UpperCamelCase_ (a__ ):
"""simple docstring"""
def _a ( self : List[str] , _lowerCamelCase : Union[str, Any] ):
"""simple docstring"""
return self._create_examples(self._read_tsv(os.path.join(_lowerCamelCase , '''heuristics_train_set.txt''' ) ) , '''train''' )
def _a ( self : List[str] , _lowerCamelCase : Tuple ):
"""simple docstring"""
return self._create_examples(self._read_tsv(os.path.join(_lowerCamelCase , '''heuristics_evaluation_set.txt''' ) ) , '''dev''' )
def _a ( self : Any ):
"""simple docstring"""
return ["contradiction", "entailment", "neutral"]
def _a ( self : Optional[Any] , _lowerCamelCase : Optional[Any] , _lowerCamelCase : Any ):
"""simple docstring"""
A_ : Tuple = []
for i, line in enumerate(_lowerCamelCase ):
if i == 0:
continue
A_ : str = '''%s-%s''' % (set_type, line[0])
A_ : Optional[Any] = line[5]
A_ : Union[str, Any] = line[6]
A_ : List[str] = line[7][2:] if line[7].startswith('''ex''' ) else line[7]
A_ : str = line[0]
examples.append(InputExample(guid=_lowerCamelCase , text_a=_lowerCamelCase , text_b=_lowerCamelCase , label=_lowerCamelCase , pairID=_lowerCamelCase ) )
return examples
def snake_case__ ( lowerCamelCase__ : List[InputExample] , lowerCamelCase__ : List[str] , lowerCamelCase__ : int , lowerCamelCase__ : PreTrainedTokenizer , ) -> int:
A_ : Union[str, Any] = {label: i for i, label in enumerate(lowerCamelCase__ )}
A_ : Optional[Any] = []
for ex_index, example in tqdm.tqdm(enumerate(lowerCamelCase__ ) , desc='''convert examples to features''' ):
if ex_index % 1_0_0_0_0 == 0:
logger.info('''Writing example %d''' % (ex_index) )
A_ : Optional[int] = tokenizer(
example.text_a , example.text_b , add_special_tokens=lowerCamelCase__ , max_length=lowerCamelCase__ , padding='''max_length''' , truncation=lowerCamelCase__ , return_overflowing_tokens=lowerCamelCase__ , )
A_ : List[str] = label_map[example.label] if example.label in label_map else 0
A_ : Tuple = int(example.pairID )
features.append(InputFeatures(**lowerCamelCase__ , label=lowerCamelCase__ , pairID=lowerCamelCase__ ) )
for i, example in enumerate(examples[:5] ):
logger.info('''*** Example ***''' )
logger.info(f'guid: {example}' )
logger.info(f'features: {features[i]}' )
return features
snake_case__ = {
"""hans""": 3,
}
snake_case__ = {
"""hans""": HansProcessor,
}
| 4 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
snake_case__ = logging.get_logger(__name__)
snake_case__ = {
"""microsoft/swin-tiny-patch4-window7-224""": (
"""https://huggingface.co/microsoft/swin-tiny-patch4-window7-224/resolve/main/config.json"""
),
# See all Swin models at https://huggingface.co/models?filter=swin
}
class UpperCamelCase_ (a__, a__ ):
"""simple docstring"""
_lowerCAmelCase = 'swin'
_lowerCAmelCase = {
'num_attention_heads': 'num_heads',
'num_hidden_layers': 'num_layers',
}
def __init__( self : Any , _lowerCamelCase : Optional[Any]=224 , _lowerCamelCase : List[str]=4 , _lowerCamelCase : Optional[Any]=3 , _lowerCamelCase : Tuple=96 , _lowerCamelCase : List[Any]=[2, 2, 6, 2] , _lowerCamelCase : List[str]=[3, 6, 12, 24] , _lowerCamelCase : List[Any]=7 , _lowerCamelCase : Optional[int]=4.0 , _lowerCamelCase : List[str]=True , _lowerCamelCase : List[str]=0.0 , _lowerCamelCase : Any=0.0 , _lowerCamelCase : Dict=0.1 , _lowerCamelCase : List[str]="gelu" , _lowerCamelCase : Tuple=False , _lowerCamelCase : Dict=0.02 , _lowerCamelCase : Optional[Any]=1E-5 , _lowerCamelCase : Any=32 , _lowerCamelCase : Tuple=None , _lowerCamelCase : Any=None , **_lowerCamelCase : str , ):
"""simple docstring"""
super().__init__(**_lowerCamelCase )
A_ : Optional[int] = image_size
A_ : Optional[int] = patch_size
A_ : Optional[int] = num_channels
A_ : Any = embed_dim
A_ : List[Any] = depths
A_ : Any = len(_lowerCamelCase )
A_ : List[Any] = num_heads
A_ : Tuple = window_size
A_ : Tuple = mlp_ratio
A_ : Dict = qkv_bias
A_ : List[str] = hidden_dropout_prob
A_ : List[str] = attention_probs_dropout_prob
A_ : Any = drop_path_rate
A_ : List[Any] = hidden_act
A_ : Tuple = use_absolute_embeddings
A_ : int = layer_norm_eps
A_ : Optional[Any] = initializer_range
A_ : Union[str, Any] = encoder_stride
# we set the hidden_size attribute in order to make Swin work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
A_ : str = int(embed_dim * 2 ** (len(_lowerCamelCase ) - 1) )
A_ : str = ['''stem'''] + [f'stage{idx}' for idx in range(1 , len(_lowerCamelCase ) + 1 )]
A_ ,A_ : Optional[Any] = get_aligned_output_features_output_indices(
out_features=_lowerCamelCase , out_indices=_lowerCamelCase , stage_names=self.stage_names )
class UpperCamelCase_ (a__ ):
"""simple docstring"""
_lowerCAmelCase = version.parse('1.11' )
@property
def _a ( self : str ):
"""simple docstring"""
return OrderedDict(
[
('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}),
] )
@property
def _a ( self : Union[str, Any] ):
"""simple docstring"""
return 1E-4
| 4 | 1 |
'''simple docstring'''
import logging
import os
from .state import PartialState
class UpperCamelCase_ (logging.LoggerAdapter ):
"""simple docstring"""
@staticmethod
def _a ( _lowerCamelCase : Union[str, Any] ):
"""simple docstring"""
A_ : str = PartialState()
return not main_process_only or (main_process_only and state.is_main_process)
def _a ( self : List[Any] , _lowerCamelCase : Dict , _lowerCamelCase : str , *_lowerCamelCase : Tuple , **_lowerCamelCase : str ):
"""simple docstring"""
if PartialState._shared_state == {}:
raise RuntimeError(
'''You must initialize the accelerate state by calling either `PartialState()` or `Accelerator()` before using the logging utility.''' )
A_ : Dict = kwargs.pop('''main_process_only''' , _lowerCamelCase )
A_ : List[str] = kwargs.pop('''in_order''' , _lowerCamelCase )
if self.isEnabledFor(_lowerCamelCase ):
if self._should_log(_lowerCamelCase ):
A_ ,A_ : Union[str, Any] = self.process(_lowerCamelCase , _lowerCamelCase )
self.logger.log(_lowerCamelCase , _lowerCamelCase , *_lowerCamelCase , **_lowerCamelCase )
elif in_order:
A_ : Any = PartialState()
for i in range(state.num_processes ):
if i == state.process_index:
A_ ,A_ : str = self.process(_lowerCamelCase , _lowerCamelCase )
self.logger.log(_lowerCamelCase , _lowerCamelCase , *_lowerCamelCase , **_lowerCamelCase )
state.wait_for_everyone()
def snake_case__ ( lowerCamelCase__ : str , lowerCamelCase__ : str = None ) -> str:
if log_level is None:
A_ : int = os.environ.get('''ACCELERATE_LOG_LEVEL''' , lowerCamelCase__ )
A_ : int = logging.getLogger(lowerCamelCase__ )
if log_level is not None:
logger.setLevel(log_level.upper() )
logger.root.setLevel(log_level.upper() )
return MultiProcessAdapter(lowerCamelCase__ , {} )
| 4 |
'''simple docstring'''
from __future__ import annotations
def snake_case__ ( lowerCamelCase__ : list[int] , lowerCamelCase__ : int ) -> list[int]:
A_ : int = 0
A_ : str = len(lowerCamelCase__ ) - 1
while i < j:
if nums[i] + nums[j] == target:
return [i, j]
elif nums[i] + nums[j] < target:
A_ : Tuple = i + 1
else:
A_ : List[str] = j - 1
return []
if __name__ == "__main__":
import doctest
doctest.testmod()
print(F'{two_pointer([2, 7, 11, 15], 9) = }')
| 4 | 1 |
'''simple docstring'''
import unittest
from transformers import AutoTokenizer, FalconConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
FalconForCausalLM,
FalconForQuestionAnswering,
FalconForSequenceClassification,
FalconForTokenClassification,
FalconModel,
)
class UpperCamelCase_ :
"""simple docstring"""
def __init__( self : List[str] , _lowerCamelCase : List[str] , _lowerCamelCase : int=3 , _lowerCamelCase : Dict=7 , _lowerCamelCase : str=True , _lowerCamelCase : Union[str, Any]=True , _lowerCamelCase : Tuple=False , _lowerCamelCase : str=True , _lowerCamelCase : Tuple=99 , _lowerCamelCase : str=32 , _lowerCamelCase : Dict=5 , _lowerCamelCase : Union[str, Any]=4 , _lowerCamelCase : Dict=37 , _lowerCamelCase : Any="gelu" , _lowerCamelCase : int=0.1 , _lowerCamelCase : Tuple=0.1 , _lowerCamelCase : Union[str, Any]=512 , _lowerCamelCase : Optional[Any]=16 , _lowerCamelCase : Union[str, Any]=2 , _lowerCamelCase : List[Any]=0.02 , _lowerCamelCase : Any=3 , _lowerCamelCase : Optional[int]=4 , _lowerCamelCase : Any=None , ):
"""simple docstring"""
A_ : List[Any] = parent
A_ : Optional[int] = batch_size
A_ : Optional[int] = seq_length
A_ : Dict = is_training
A_ : Optional[int] = use_input_mask
A_ : Any = use_token_type_ids
A_ : Any = use_labels
A_ : Optional[int] = vocab_size
A_ : Optional[Any] = hidden_size
A_ : Union[str, Any] = num_hidden_layers
A_ : str = num_attention_heads
A_ : List[Any] = intermediate_size
A_ : str = hidden_act
A_ : Any = hidden_dropout_prob
A_ : int = attention_probs_dropout_prob
A_ : Union[str, Any] = max_position_embeddings
A_ : Optional[Any] = type_vocab_size
A_ : Optional[int] = type_sequence_label_size
A_ : Dict = initializer_range
A_ : Any = num_labels
A_ : int = num_choices
A_ : Optional[int] = scope
def _a ( self : List[str] ):
"""simple docstring"""
A_ : int = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
A_ : List[str] = None
if self.use_input_mask:
A_ : Any = random_attention_mask([self.batch_size, self.seq_length] )
A_ : int = None
A_ : str = None
A_ : Union[str, Any] = None
A_ : Dict = None
if self.use_labels:
A_ : Dict = ids_tensor([self.batch_size] , self.type_sequence_label_size )
A_ : int = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
A_ : str = ids_tensor([self.batch_size] , self.num_choices )
A_ : List[Any] = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def _a ( self : List[Any] ):
"""simple docstring"""
return FalconConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=_lowerCamelCase , initializer_range=self.initializer_range , pad_token_id=1 , new_decoder_architecture=_lowerCamelCase , )
def _a ( self : Dict , _lowerCamelCase : Tuple , _lowerCamelCase : Optional[Any] , _lowerCamelCase : List[Any] , _lowerCamelCase : Optional[int] , _lowerCamelCase : Optional[Any] , _lowerCamelCase : Optional[Any] , _lowerCamelCase : Dict ):
"""simple docstring"""
A_ : Dict = FalconModel(config=_lowerCamelCase )
model.to(_lowerCamelCase )
model.eval()
A_ : Dict = model(_lowerCamelCase , attention_mask=_lowerCamelCase )
A_ : Any = model(_lowerCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _a ( self : int , _lowerCamelCase : Dict , _lowerCamelCase : Any , _lowerCamelCase : Dict , _lowerCamelCase : str , _lowerCamelCase : Any , _lowerCamelCase : Any , _lowerCamelCase : Optional[Any] , _lowerCamelCase : Optional[int] , _lowerCamelCase : List[str] , ):
"""simple docstring"""
A_ : Union[str, Any] = True
A_ : Any = FalconModel(_lowerCamelCase )
model.to(_lowerCamelCase )
model.eval()
A_ : Optional[int] = model(
_lowerCamelCase , attention_mask=_lowerCamelCase , encoder_hidden_states=_lowerCamelCase , encoder_attention_mask=_lowerCamelCase , )
A_ : Optional[int] = model(
_lowerCamelCase , attention_mask=_lowerCamelCase , encoder_hidden_states=_lowerCamelCase , )
A_ : Tuple = model(_lowerCamelCase , attention_mask=_lowerCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _a ( self : Union[str, Any] , _lowerCamelCase : Dict , _lowerCamelCase : Any , _lowerCamelCase : str , _lowerCamelCase : Tuple , _lowerCamelCase : Union[str, Any] , _lowerCamelCase : int , _lowerCamelCase : Optional[Any] , _lowerCamelCase : Union[str, Any] , _lowerCamelCase : List[Any] , ):
"""simple docstring"""
A_ : str = FalconForCausalLM(config=_lowerCamelCase )
model.to(_lowerCamelCase )
model.eval()
A_ : str = model(_lowerCamelCase , attention_mask=_lowerCamelCase , labels=_lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _a ( self : List[str] , _lowerCamelCase : Union[str, Any] , _lowerCamelCase : Optional[int] , _lowerCamelCase : int , _lowerCamelCase : List[str] , _lowerCamelCase : Union[str, Any] , _lowerCamelCase : Union[str, Any] , _lowerCamelCase : str , _lowerCamelCase : str , _lowerCamelCase : Any , ):
"""simple docstring"""
A_ : List[Any] = True
A_ : Optional[int] = True
A_ : List[str] = FalconForCausalLM(config=_lowerCamelCase )
model.to(_lowerCamelCase )
model.eval()
# first forward pass
A_ : List[str] = model(
_lowerCamelCase , attention_mask=_lowerCamelCase , encoder_hidden_states=_lowerCamelCase , encoder_attention_mask=_lowerCamelCase , use_cache=_lowerCamelCase , )
A_ : Optional[int] = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
A_ : Any = ids_tensor((self.batch_size, 3) , config.vocab_size )
A_ : List[str] = ids_tensor((self.batch_size, 3) , vocab_size=2 )
# append to next input_ids and
A_ : List[str] = torch.cat([input_ids, next_tokens] , dim=-1 )
A_ : List[str] = torch.cat([input_mask, next_mask] , dim=-1 )
A_ : str = model(
_lowerCamelCase , attention_mask=_lowerCamelCase , encoder_hidden_states=_lowerCamelCase , encoder_attention_mask=_lowerCamelCase , output_hidden_states=_lowerCamelCase , )['''hidden_states'''][0]
A_ : Union[str, Any] = model(
_lowerCamelCase , attention_mask=_lowerCamelCase , encoder_hidden_states=_lowerCamelCase , encoder_attention_mask=_lowerCamelCase , past_key_values=_lowerCamelCase , output_hidden_states=_lowerCamelCase , )['''hidden_states'''][0]
# select random slice
A_ : Union[str, Any] = ids_tensor((1,) , output_from_past.shape[-1] ).item()
A_ : Optional[Any] = output_from_no_past[:, -3:, random_slice_idx].detach()
A_ : List[Any] = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(_lowerCamelCase , _lowerCamelCase , atol=1E-3 ) )
def _a ( self : int ):
"""simple docstring"""
A_ : Dict = self.prepare_config_and_inputs()
(
(
A_
) ,(
A_
) ,(
A_
) ,(
A_
) ,(
A_
) ,(
A_
) ,(
A_
) ,
) : str = config_and_inputs
A_ : Union[str, Any] = {'''input_ids''': input_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_torch
class UpperCamelCase_ (a__, a__, a__, unittest.TestCase ):
"""simple docstring"""
_lowerCAmelCase = (
(
FalconModel,
FalconForCausalLM,
FalconForSequenceClassification,
FalconForTokenClassification,
FalconForQuestionAnswering,
)
if is_torch_available()
else ()
)
_lowerCAmelCase = (FalconForCausalLM,) if is_torch_available() else ()
_lowerCAmelCase = (
{
'feature-extraction': FalconModel,
'text-classification': FalconForSequenceClassification,
'text-generation': FalconForCausalLM,
'question-answering': FalconForQuestionAnswering,
'token-classification': FalconForTokenClassification,
'zero-shot': FalconForSequenceClassification,
}
if is_torch_available()
else {}
)
_lowerCAmelCase = False
_lowerCAmelCase = False
def _a ( self : List[str] ):
"""simple docstring"""
A_ : str = FalconModelTester(self )
A_ : List[Any] = ConfigTester(self , config_class=_lowerCamelCase , hidden_size=37 )
def _a ( self : Any ):
"""simple docstring"""
self.config_tester.run_common_tests()
def _a ( self : int ):
"""simple docstring"""
A_ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_lowerCamelCase )
def _a ( self : Optional[Any] ):
"""simple docstring"""
A_ ,*A_ : str = self.model_tester.prepare_config_and_inputs()
for alibi in [True, False]:
A_ : Dict = alibi
self.model_tester.create_and_check_model(_lowerCamelCase , *_lowerCamelCase )
def _a ( self : Union[str, Any] ):
"""simple docstring"""
A_ ,A_ : int = self.model_tester.prepare_config_and_inputs_for_common()
A_ : List[Any] = 3
A_ : Any = input_dict['''input_ids''']
A_ : Tuple = input_ids.ne(1 ).to(_lowerCamelCase )
A_ : List[str] = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
A_ : Dict = FalconForSequenceClassification(_lowerCamelCase )
model.to(_lowerCamelCase )
model.eval()
A_ : Optional[int] = model(_lowerCamelCase , attention_mask=_lowerCamelCase , labels=_lowerCamelCase )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def _a ( self : List[Any] ):
"""simple docstring"""
A_ ,A_ : Any = self.model_tester.prepare_config_and_inputs_for_common()
A_ : Dict = 3
A_ : int = '''single_label_classification'''
A_ : Dict = input_dict['''input_ids''']
A_ : Dict = input_ids.ne(1 ).to(_lowerCamelCase )
A_ : Dict = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
A_ : List[str] = FalconForSequenceClassification(_lowerCamelCase )
model.to(_lowerCamelCase )
model.eval()
A_ : List[Any] = model(_lowerCamelCase , attention_mask=_lowerCamelCase , labels=_lowerCamelCase )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def _a ( self : Union[str, Any] ):
"""simple docstring"""
A_ ,A_ : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
A_ : Dict = input_dict['''input_ids''']
A_ : List[Any] = FalconForCausalLM(_lowerCamelCase )
model.to(_lowerCamelCase )
model.eval()
A_ : Union[str, Any] = model(_lowerCamelCase , use_cache=_lowerCamelCase )
A_ : Union[str, Any] = input_ids.shape[0]
A_ : Tuple = model._convert_to_rw_cache(result.past_key_values )
A_ : List[str] = model._convert_cache_to_standard_format(_lowerCamelCase , _lowerCamelCase )
for layer in range(len(_lowerCamelCase ) ):
for tensor_idx in range(2 ):
self.assertTrue(rw_cache[layer][tensor_idx].ndim == 3 )
self.assertTrue(result.past_key_values[layer][tensor_idx].ndim == 4 )
self.assertTrue(
torch.all(result.past_key_values[layer][tensor_idx] == standard_cache[layer][tensor_idx] ) )
def _a ( self : Optional[int] ):
"""simple docstring"""
A_ ,A_ : int = self.model_tester.prepare_config_and_inputs_for_common()
A_ : Tuple = 3
A_ : str = '''multi_label_classification'''
A_ : Union[str, Any] = input_dict['''input_ids''']
A_ : List[Any] = input_ids.ne(1 ).to(_lowerCamelCase )
A_ : Tuple = ids_tensor(
[self.model_tester.batch_size, config.num_labels] , self.model_tester.type_sequence_label_size ).to(torch.float )
A_ : List[str] = FalconForSequenceClassification(_lowerCamelCase )
model.to(_lowerCamelCase )
model.eval()
A_ : Optional[int] = model(_lowerCamelCase , attention_mask=_lowerCamelCase , labels=_lowerCamelCase )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def _a ( self : Union[str, Any] ):
"""simple docstring"""
for model_class in self.all_generative_model_classes:
A_ ,A_ : Dict = self.model_tester.prepare_config_and_inputs_for_common()
# If it doesn't support cache, pass the test
if not hasattr(_lowerCamelCase , '''use_cache''' ):
return
A_ : Tuple = model_class(_lowerCamelCase ).to(_lowerCamelCase )
if "use_cache" not in inputs:
A_ : str = True
A_ : Tuple = model(**_lowerCamelCase )
# If "past_key_values" is not returned, pass the test (e.g. RWKV uses a different cache name and format)
if "past_key_values" not in outputs:
return
A_ : List[Any] = (
getattr(_lowerCamelCase , '''decoder_layers''' , _lowerCamelCase )
or getattr(_lowerCamelCase , '''num_decoder_layers''' , _lowerCamelCase )
or config.num_hidden_layers
)
A_ : List[Any] = getattr(_lowerCamelCase , '''num_kv_heads''' , config.num_attention_heads )
A_ : Dict = getattr(_lowerCamelCase , '''d_model''' , config.hidden_size )
A_ : Optional[int] = embed_dim // num_attention_heads
A_ : List[Any] = outputs['''past_key_values''']
self.assertEqual(len(_lowerCamelCase ) , _lowerCamelCase )
A_ ,A_ : int = inputs['''input_ids'''].shape
for i in range(_lowerCamelCase ):
if config.new_decoder_architecture:
A_ : int = config.num_attention_heads
elif config.multi_query:
A_ : List[Any] = 1
self.assertEqual(len(past_kv[0] ) , 2 ) # K V for the decoder = 2
self.assertEqual(
past_kv[i][0].shape , (batch_size, num_attention_heads, seq_length, per_head_embed_dim) )
self.assertEqual(
past_kv[i][1].shape , (batch_size, num_attention_heads, seq_length, per_head_embed_dim) )
@require_torch
class UpperCamelCase_ (unittest.TestCase ):
"""simple docstring"""
@slow
def _a ( self : int ):
"""simple docstring"""
A_ : List[Any] = AutoTokenizer.from_pretrained('''Rocketknight1/falcon-rw-1b''' )
A_ : Tuple = FalconForCausalLM.from_pretrained('''Rocketknight1/falcon-rw-1b''' )
model.eval()
model.to(_lowerCamelCase )
A_ : List[Any] = tokenizer('''My favorite food is''' , return_tensors='''pt''' ).to(_lowerCamelCase )
A_ : Optional[Any] = (
'''My favorite food is pizza. I love it so much that I have a pizza party every year for my birthday.'''
)
A_ : Tuple = model.generate(**_lowerCamelCase , do_sample=_lowerCamelCase , max_new_tokens=19 )
A_ : int = tokenizer.batch_decode(_lowerCamelCase )[0]
self.assertEqual(_lowerCamelCase , _lowerCamelCase )
@slow
def _a ( self : Union[str, Any] ):
"""simple docstring"""
for repo in ["Rocketknight1/tiny-random-falcon-7b", "Rocketknight1/tiny-random-falcon-40b"]:
A_ : str = AutoTokenizer.from_pretrained(_lowerCamelCase )
A_ : str = FalconForCausalLM.from_pretrained(_lowerCamelCase )
model.eval()
model.to(_lowerCamelCase )
A_ : int = tokenizer('''My favorite food is''' , return_tensors='''pt''' ).to(_lowerCamelCase )
# We just test that these run without errors - the models are randomly initialized
# and so the actual text outputs will be garbage
model.generate(**_lowerCamelCase , do_sample=_lowerCamelCase , max_new_tokens=4 )
model.generate(**_lowerCamelCase , do_sample=_lowerCamelCase , max_new_tokens=4 )
model.generate(**_lowerCamelCase , num_beams=2 , max_new_tokens=4 )
@slow
def _a ( self : Optional[int] ):
"""simple docstring"""
with torch.no_grad():
for repo in [
"Rocketknight1/falcon-rw-1b",
"Rocketknight1/tiny-random-falcon-7b",
"Rocketknight1/tiny-random-falcon-40b",
]:
A_ : Optional[Any] = AutoTokenizer.from_pretrained(_lowerCamelCase )
A_ : Optional[Any] = FalconForCausalLM.from_pretrained(_lowerCamelCase )
model.eval()
model.to(device=_lowerCamelCase )
A_ : Tuple = tokenizer('''My favorite food is''' , return_tensors='''pt''' ).to(_lowerCamelCase )
# Test results are the same with and without cache
A_ : str = model.generate(**_lowerCamelCase , do_sample=_lowerCamelCase , max_new_tokens=20 , use_cache=_lowerCamelCase )
A_ : int = model.generate(**_lowerCamelCase , do_sample=_lowerCamelCase , max_new_tokens=20 , use_cache=_lowerCamelCase )
self.assertTrue((outputs_cache - outputs_no_cache).sum().item() == 0 )
| 4 |
'''simple docstring'''
def snake_case__ ( lowerCamelCase__ : list[int] , lowerCamelCase__ : list[int] , lowerCamelCase__ : int ) -> bool:
return not any(
neighbour == 1 and colored_vertices[i] == color
for i, neighbour in enumerate(lowerCamelCase__ ) )
def snake_case__ ( lowerCamelCase__ : list[list[int]] , lowerCamelCase__ : int , lowerCamelCase__ : list[int] , lowerCamelCase__ : int ) -> bool:
# Base Case
if index == len(lowerCamelCase__ ):
return True
# Recursive Step
for i in range(lowerCamelCase__ ):
if valid_coloring(graph[index] , lowerCamelCase__ , lowerCamelCase__ ):
# Color current vertex
A_ : int = i
# Validate coloring
if util_color(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , index + 1 ):
return True
# Backtrack
A_ : str = -1
return False
def snake_case__ ( lowerCamelCase__ : list[list[int]] , lowerCamelCase__ : int ) -> list[int]:
A_ : List[str] = [-1] * len(lowerCamelCase__ )
if util_color(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , 0 ):
return colored_vertices
return []
| 4 | 1 |
'''simple docstring'''
from ... import PretrainedConfig
snake_case__ = {
"""sijunhe/nezha-cn-base""": """https://huggingface.co/sijunhe/nezha-cn-base/resolve/main/config.json""",
}
class UpperCamelCase_ (a__ ):
"""simple docstring"""
_lowerCAmelCase = NEZHA_PRETRAINED_CONFIG_ARCHIVE_MAP
_lowerCAmelCase = 'nezha'
def __init__( self : Tuple , _lowerCamelCase : int=21128 , _lowerCamelCase : Optional[Any]=768 , _lowerCamelCase : Dict=12 , _lowerCamelCase : List[str]=12 , _lowerCamelCase : int=3072 , _lowerCamelCase : List[str]="gelu" , _lowerCamelCase : Optional[int]=0.1 , _lowerCamelCase : Any=0.1 , _lowerCamelCase : List[Any]=512 , _lowerCamelCase : Optional[Any]=64 , _lowerCamelCase : Any=2 , _lowerCamelCase : int=0.02 , _lowerCamelCase : List[Any]=1E-12 , _lowerCamelCase : Optional[Any]=0.1 , _lowerCamelCase : Tuple=0 , _lowerCamelCase : Dict=2 , _lowerCamelCase : List[str]=3 , _lowerCamelCase : Union[str, Any]=True , **_lowerCamelCase : Optional[Any] , ):
"""simple docstring"""
super().__init__(pad_token_id=_lowerCamelCase , bos_token_id=_lowerCamelCase , eos_token_id=_lowerCamelCase , **_lowerCamelCase )
A_ : Dict = vocab_size
A_ : int = hidden_size
A_ : str = num_hidden_layers
A_ : Optional[int] = num_attention_heads
A_ : List[Any] = hidden_act
A_ : Optional[Any] = intermediate_size
A_ : int = hidden_dropout_prob
A_ : Union[str, Any] = attention_probs_dropout_prob
A_ : Optional[int] = max_position_embeddings
A_ : List[str] = max_relative_position
A_ : Union[str, Any] = type_vocab_size
A_ : Dict = initializer_range
A_ : Tuple = layer_norm_eps
A_ : Optional[Any] = classifier_dropout
A_ : int = use_cache
| 4 |
'''simple docstring'''
from __future__ import annotations
from PIL import Image
# Define glider example
snake_case__ = [
[0, 1, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0, 0],
[1, 1, 1, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
]
# Define blinker example
snake_case__ = [[0, 1, 0], [0, 1, 0], [0, 1, 0]]
def snake_case__ ( lowerCamelCase__ : list[list[int]] ) -> list[list[int]]:
A_ : str = []
for i in range(len(lowerCamelCase__ ) ):
A_ : Optional[Any] = []
for j in range(len(cells[i] ) ):
# Get the number of live neighbours
A_ : Optional[int] = 0
if i > 0 and j > 0:
neighbour_count += cells[i - 1][j - 1]
if i > 0:
neighbour_count += cells[i - 1][j]
if i > 0 and j < len(cells[i] ) - 1:
neighbour_count += cells[i - 1][j + 1]
if j > 0:
neighbour_count += cells[i][j - 1]
if j < len(cells[i] ) - 1:
neighbour_count += cells[i][j + 1]
if i < len(lowerCamelCase__ ) - 1 and j > 0:
neighbour_count += cells[i + 1][j - 1]
if i < len(lowerCamelCase__ ) - 1:
neighbour_count += cells[i + 1][j]
if i < len(lowerCamelCase__ ) - 1 and j < len(cells[i] ) - 1:
neighbour_count += cells[i + 1][j + 1]
# Rules of the game of life (excerpt from Wikipedia):
# 1. Any live cell with two or three live neighbours survives.
# 2. Any dead cell with three live neighbours becomes a live cell.
# 3. All other live cells die in the next generation.
# Similarly, all other dead cells stay dead.
A_ : List[str] = cells[i][j] == 1
if (
(alive and 2 <= neighbour_count <= 3)
or not alive
and neighbour_count == 3
):
next_generation_row.append(1 )
else:
next_generation_row.append(0 )
next_generation.append(lowerCamelCase__ )
return next_generation
def snake_case__ ( lowerCamelCase__ : list[list[int]] , lowerCamelCase__ : int ) -> list[Image.Image]:
A_ : List[Any] = []
for _ in range(lowerCamelCase__ ):
# Create output image
A_ : Optional[int] = Image.new('''RGB''' , (len(cells[0] ), len(lowerCamelCase__ )) )
A_ : int = img.load()
# Save cells to image
for x in range(len(lowerCamelCase__ ) ):
for y in range(len(cells[0] ) ):
A_ : Optional[Any] = 2_5_5 - cells[y][x] * 2_5_5
A_ : str = (colour, colour, colour)
# Save image
images.append(lowerCamelCase__ )
A_ : Optional[int] = new_generation(lowerCamelCase__ )
return images
if __name__ == "__main__":
snake_case__ = generate_images(GLIDER, 16)
images[0].save("""out.gif""", save_all=True, append_images=images[1:])
| 4 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
snake_case__ = {
"""configuration_xlm""": ["""XLM_PRETRAINED_CONFIG_ARCHIVE_MAP""", """XLMConfig""", """XLMOnnxConfig"""],
"""tokenization_xlm""": ["""XLMTokenizer"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case__ = [
"""XLM_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""XLMForMultipleChoice""",
"""XLMForQuestionAnswering""",
"""XLMForQuestionAnsweringSimple""",
"""XLMForSequenceClassification""",
"""XLMForTokenClassification""",
"""XLMModel""",
"""XLMPreTrainedModel""",
"""XLMWithLMHeadModel""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case__ = [
"""TF_XLM_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFXLMForMultipleChoice""",
"""TFXLMForQuestionAnsweringSimple""",
"""TFXLMForSequenceClassification""",
"""TFXLMForTokenClassification""",
"""TFXLMMainLayer""",
"""TFXLMModel""",
"""TFXLMPreTrainedModel""",
"""TFXLMWithLMHeadModel""",
]
if TYPE_CHECKING:
from .configuration_xlm import XLM_PRETRAINED_CONFIG_ARCHIVE_MAP, XLMConfig, XLMOnnxConfig
from .tokenization_xlm import XLMTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xlm import (
XLM_PRETRAINED_MODEL_ARCHIVE_LIST,
XLMForMultipleChoice,
XLMForQuestionAnswering,
XLMForQuestionAnsweringSimple,
XLMForSequenceClassification,
XLMForTokenClassification,
XLMModel,
XLMPreTrainedModel,
XLMWithLMHeadModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_xlm import (
TF_XLM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXLMForMultipleChoice,
TFXLMForQuestionAnsweringSimple,
TFXLMForSequenceClassification,
TFXLMForTokenClassification,
TFXLMMainLayer,
TFXLMModel,
TFXLMPreTrainedModel,
TFXLMWithLMHeadModel,
)
else:
import sys
snake_case__ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 4 |
'''simple docstring'''
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import BertTokenizer, BertTokenizerFast
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import AlignProcessor, EfficientNetImageProcessor
@require_vision
class UpperCamelCase_ (unittest.TestCase ):
"""simple docstring"""
def _a ( self : Union[str, Any] ):
"""simple docstring"""
A_ : Any = tempfile.mkdtemp()
A_ : List[Any] = [
'''[UNK]''',
'''[CLS]''',
'''[SEP]''',
'''[PAD]''',
'''[MASK]''',
'''want''',
'''##want''',
'''##ed''',
'''wa''',
'''un''',
'''runn''',
'''##ing''',
''',''',
'''low''',
'''lowest''',
]
A_ : str = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) )
A_ : Tuple = {
'''do_resize''': True,
'''size''': 20,
'''do_center_crop''': True,
'''crop_size''': 18,
'''do_normalize''': True,
'''image_mean''': [0.48_14_54_66, 0.4_57_82_75, 0.40_82_10_73],
'''image_std''': [0.26_86_29_54, 0.26_13_02_58, 0.27_57_77_11],
}
A_ : List[Any] = os.path.join(self.tmpdirname , _lowerCamelCase )
with open(self.image_processor_file , '''w''' , encoding='''utf-8''' ) as fp:
json.dump(_lowerCamelCase , _lowerCamelCase )
def _a ( self : Dict , **_lowerCamelCase : Tuple ):
"""simple docstring"""
return BertTokenizer.from_pretrained(self.tmpdirname , **_lowerCamelCase )
def _a ( self : Optional[int] , **_lowerCamelCase : Optional[int] ):
"""simple docstring"""
return BertTokenizerFast.from_pretrained(self.tmpdirname , **_lowerCamelCase )
def _a ( self : Optional[Any] , **_lowerCamelCase : Tuple ):
"""simple docstring"""
return EfficientNetImageProcessor.from_pretrained(self.tmpdirname , **_lowerCamelCase )
def _a ( self : Tuple ):
"""simple docstring"""
shutil.rmtree(self.tmpdirname )
def _a ( self : int ):
"""simple docstring"""
A_ : Union[str, Any] = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
A_ : Any = [Image.fromarray(np.moveaxis(_lowerCamelCase , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def _a ( self : int ):
"""simple docstring"""
A_ : Tuple = self.get_tokenizer()
A_ : Tuple = self.get_rust_tokenizer()
A_ : Dict = self.get_image_processor()
A_ : List[Any] = AlignProcessor(tokenizer=_lowerCamelCase , image_processor=_lowerCamelCase )
processor_slow.save_pretrained(self.tmpdirname )
A_ : str = AlignProcessor.from_pretrained(self.tmpdirname , use_fast=_lowerCamelCase )
A_ : Any = AlignProcessor(tokenizer=_lowerCamelCase , image_processor=_lowerCamelCase )
processor_fast.save_pretrained(self.tmpdirname )
A_ : List[Any] = AlignProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() )
self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() )
self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() )
self.assertIsInstance(processor_slow.tokenizer , _lowerCamelCase )
self.assertIsInstance(processor_fast.tokenizer , _lowerCamelCase )
self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor_slow.image_processor , _lowerCamelCase )
self.assertIsInstance(processor_fast.image_processor , _lowerCamelCase )
def _a ( self : List[Any] ):
"""simple docstring"""
A_ : List[str] = AlignProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
A_ : Optional[int] = self.get_tokenizer(bos_token='''(BOS)''' , eos_token='''(EOS)''' )
A_ : Tuple = self.get_image_processor(do_normalize=_lowerCamelCase , padding_value=1.0 )
A_ : List[str] = AlignProcessor.from_pretrained(
self.tmpdirname , bos_token='''(BOS)''' , eos_token='''(EOS)''' , do_normalize=_lowerCamelCase , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , _lowerCamelCase )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , _lowerCamelCase )
def _a ( self : Union[str, Any] ):
"""simple docstring"""
A_ : Dict = self.get_image_processor()
A_ : Any = self.get_tokenizer()
A_ : List[str] = AlignProcessor(tokenizer=_lowerCamelCase , image_processor=_lowerCamelCase )
A_ : Any = self.prepare_image_inputs()
A_ : List[Any] = image_processor(_lowerCamelCase , return_tensors='''np''' )
A_ : str = processor(images=_lowerCamelCase , return_tensors='''np''' )
for key in input_image_proc.keys():
self.assertAlmostEqual(input_image_proc[key].sum() , input_processor[key].sum() , delta=1E-2 )
def _a ( self : Dict ):
"""simple docstring"""
A_ : str = self.get_image_processor()
A_ : List[str] = self.get_tokenizer()
A_ : Optional[int] = AlignProcessor(tokenizer=_lowerCamelCase , image_processor=_lowerCamelCase )
A_ : int = '''lower newer'''
A_ : str = processor(text=_lowerCamelCase )
A_ : Dict = tokenizer(_lowerCamelCase , padding='''max_length''' , max_length=64 )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def _a ( self : str ):
"""simple docstring"""
A_ : Optional[int] = self.get_image_processor()
A_ : Optional[Any] = self.get_tokenizer()
A_ : List[str] = AlignProcessor(tokenizer=_lowerCamelCase , image_processor=_lowerCamelCase )
A_ : List[Any] = '''lower newer'''
A_ : Optional[int] = self.prepare_image_inputs()
A_ : List[Any] = processor(text=_lowerCamelCase , images=_lowerCamelCase )
self.assertListEqual(list(inputs.keys() ) , ['''input_ids''', '''token_type_ids''', '''attention_mask''', '''pixel_values'''] )
# test if it raises when no input is passed
with pytest.raises(_lowerCamelCase ):
processor()
def _a ( self : List[str] ):
"""simple docstring"""
A_ : Optional[Any] = self.get_image_processor()
A_ : Optional[int] = self.get_tokenizer()
A_ : List[Any] = AlignProcessor(tokenizer=_lowerCamelCase , image_processor=_lowerCamelCase )
A_ : str = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
A_ : str = processor.batch_decode(_lowerCamelCase )
A_ : Union[str, Any] = tokenizer.batch_decode(_lowerCamelCase )
self.assertListEqual(_lowerCamelCase , _lowerCamelCase )
def _a ( self : Tuple ):
"""simple docstring"""
A_ : str = self.get_image_processor()
A_ : Tuple = self.get_tokenizer()
A_ : Any = AlignProcessor(tokenizer=_lowerCamelCase , image_processor=_lowerCamelCase )
A_ : str = '''lower newer'''
A_ : List[str] = self.prepare_image_inputs()
A_ : Tuple = processor(text=_lowerCamelCase , images=_lowerCamelCase )
self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
| 4 | 1 |
'''simple docstring'''
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..bit import BitConfig
snake_case__ = logging.get_logger(__name__)
snake_case__ = {
"""Intel/dpt-large""": """https://huggingface.co/Intel/dpt-large/resolve/main/config.json""",
# See all DPT models at https://huggingface.co/models?filter=dpt
}
class UpperCamelCase_ (a__ ):
"""simple docstring"""
_lowerCAmelCase = 'dpt'
def __init__( self : int , _lowerCamelCase : Union[str, Any]=768 , _lowerCamelCase : Optional[Any]=12 , _lowerCamelCase : Optional[int]=12 , _lowerCamelCase : Tuple=3072 , _lowerCamelCase : Optional[Any]="gelu" , _lowerCamelCase : Tuple=0.0 , _lowerCamelCase : Union[str, Any]=0.0 , _lowerCamelCase : Any=0.02 , _lowerCamelCase : str=1E-12 , _lowerCamelCase : Dict=384 , _lowerCamelCase : int=16 , _lowerCamelCase : str=3 , _lowerCamelCase : Optional[int]=False , _lowerCamelCase : List[Any]=True , _lowerCamelCase : Any=[2, 5, 8, 11] , _lowerCamelCase : Dict="project" , _lowerCamelCase : Dict=[4, 2, 1, 0.5] , _lowerCamelCase : Tuple=[96, 192, 384, 768] , _lowerCamelCase : Dict=256 , _lowerCamelCase : Tuple=-1 , _lowerCamelCase : Union[str, Any]=False , _lowerCamelCase : Optional[int]=True , _lowerCamelCase : Tuple=0.4 , _lowerCamelCase : str=255 , _lowerCamelCase : int=0.1 , _lowerCamelCase : Dict=[1, 1024, 24, 24] , _lowerCamelCase : List[Any]=[0, 1] , _lowerCamelCase : str=None , **_lowerCamelCase : Optional[int] , ):
"""simple docstring"""
super().__init__(**_lowerCamelCase )
A_ : Union[str, Any] = hidden_size
A_ : Tuple = is_hybrid
if self.is_hybrid:
if backbone_config is None:
logger.info('''Initializing the config with a `BiT` backbone.''' )
A_ : Optional[Any] = {
'''global_padding''': '''same''',
'''layer_type''': '''bottleneck''',
'''depths''': [3, 4, 9],
'''out_features''': ['''stage1''', '''stage2''', '''stage3'''],
'''embedding_dynamic_padding''': True,
}
A_ : List[str] = BitConfig(**_lowerCamelCase )
elif isinstance(_lowerCamelCase , _lowerCamelCase ):
logger.info('''Initializing the config with a `BiT` backbone.''' )
A_ : Optional[Any] = BitConfig(**_lowerCamelCase )
elif isinstance(_lowerCamelCase , _lowerCamelCase ):
A_ : List[str] = backbone_config
else:
raise ValueError(
f'backbone_config must be a dictionary or a `PretrainedConfig`, got {backbone_config.__class__}.' )
A_ : Any = backbone_featmap_shape
A_ : int = neck_ignore_stages
if readout_type != "project":
raise ValueError('''Readout type must be \'project\' when using `DPT-hybrid` mode.''' )
else:
A_ : Tuple = None
A_ : Dict = None
A_ : Optional[Any] = []
A_ : Optional[int] = num_hidden_layers
A_ : Optional[int] = num_attention_heads
A_ : Any = intermediate_size
A_ : Tuple = hidden_act
A_ : Union[str, Any] = hidden_dropout_prob
A_ : str = attention_probs_dropout_prob
A_ : Union[str, Any] = initializer_range
A_ : int = layer_norm_eps
A_ : Tuple = image_size
A_ : Optional[Any] = patch_size
A_ : Dict = num_channels
A_ : str = qkv_bias
A_ : Tuple = backbone_out_indices
if readout_type not in ["ignore", "add", "project"]:
raise ValueError('''Readout_type must be one of [\'ignore\', \'add\', \'project\']''' )
A_ : Any = readout_type
A_ : Any = reassemble_factors
A_ : Any = neck_hidden_sizes
A_ : str = fusion_hidden_size
A_ : List[str] = head_in_index
A_ : Optional[int] = use_batch_norm_in_fusion_residual
# auxiliary head attributes (semantic segmentation)
A_ : int = use_auxiliary_head
A_ : Optional[int] = auxiliary_loss_weight
A_ : Tuple = semantic_loss_ignore_index
A_ : List[Any] = semantic_classifier_dropout
def _a ( self : str ):
"""simple docstring"""
A_ : int = copy.deepcopy(self.__dict__ )
if output["backbone_config"] is not None:
A_ : Any = self.backbone_config.to_dict()
A_ : Any = self.__class__.model_type
return output
| 4 |
'''simple docstring'''
import json
import os
from pathlib import Path
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple, Union
import sentencepiece
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
snake_case__ = logging.get_logger(__name__)
snake_case__ = """▁"""
snake_case__ = {
"""vocab_file""": """vocab.json""",
"""spm_file""": """sentencepiece.bpe.model""",
}
snake_case__ = {
"""vocab_file""": {
"""facebook/s2t-small-librispeech-asr""": (
"""https://huggingface.co/facebook/s2t-small-librispeech-asr/resolve/main/vocab.json"""
),
},
"""spm_file""": {
"""facebook/s2t-small-librispeech-asr""": (
"""https://huggingface.co/facebook/s2t-small-librispeech-asr/resolve/main/sentencepiece.bpe.model"""
)
},
}
snake_case__ = {
"""facebook/s2t-small-librispeech-asr""": 10_24,
}
snake_case__ = ["""pt""", """fr""", """ru""", """nl""", """ro""", """it""", """es""", """de"""]
snake_case__ = {"""mustc""": MUSTC_LANGS}
class UpperCamelCase_ (a__ ):
"""simple docstring"""
_lowerCAmelCase = VOCAB_FILES_NAMES
_lowerCAmelCase = PRETRAINED_VOCAB_FILES_MAP
_lowerCAmelCase = MAX_MODEL_INPUT_SIZES
_lowerCAmelCase = ['input_ids', 'attention_mask']
_lowerCAmelCase = []
def __init__( self : Optional[int] , _lowerCamelCase : List[str] , _lowerCamelCase : List[str] , _lowerCamelCase : str="<s>" , _lowerCamelCase : Union[str, Any]="</s>" , _lowerCamelCase : Dict="<pad>" , _lowerCamelCase : str="<unk>" , _lowerCamelCase : Union[str, Any]=False , _lowerCamelCase : int=False , _lowerCamelCase : Any=None , _lowerCamelCase : Any=None , _lowerCamelCase : Optional[Dict[str, Any]] = None , **_lowerCamelCase : Optional[int] , ):
"""simple docstring"""
A_ : Union[str, Any] = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=_lowerCamelCase , eos_token=_lowerCamelCase , unk_token=_lowerCamelCase , pad_token=_lowerCamelCase , do_upper_case=_lowerCamelCase , do_lower_case=_lowerCamelCase , tgt_lang=_lowerCamelCase , lang_codes=_lowerCamelCase , sp_model_kwargs=self.sp_model_kwargs , **_lowerCamelCase , )
A_ : Optional[int] = do_upper_case
A_ : Tuple = do_lower_case
A_ : Tuple = load_json(_lowerCamelCase )
A_ : Tuple = {v: k for k, v in self.encoder.items()}
A_ : List[Any] = spm_file
A_ : List[str] = load_spm(_lowerCamelCase , self.sp_model_kwargs )
if lang_codes is not None:
A_ : Any = lang_codes
A_ : Optional[Any] = LANGUAGES[lang_codes]
A_ : Optional[Any] = [f'<lang:{lang}>' for lang in self.langs]
A_ : Union[str, Any] = {lang: self.sp_model.PieceToId(f'<lang:{lang}>' ) for lang in self.langs}
A_ : Optional[int] = self.lang_tokens
A_ : int = tgt_lang if tgt_lang is not None else self.langs[0]
self.set_tgt_lang_special_tokens(self._tgt_lang )
else:
A_ : Dict = {}
@property
def _a ( self : Tuple ):
"""simple docstring"""
return len(self.encoder )
@property
def _a ( self : int ):
"""simple docstring"""
return self._tgt_lang
@tgt_lang.setter
def _a ( self : List[str] , _lowerCamelCase : Any ):
"""simple docstring"""
A_ : int = new_tgt_lang
self.set_tgt_lang_special_tokens(_lowerCamelCase )
def _a ( self : Tuple , _lowerCamelCase : str ):
"""simple docstring"""
A_ : List[str] = self.lang_code_to_id[tgt_lang]
A_ : Optional[Any] = [lang_code_id]
def _a ( self : Optional[Any] , _lowerCamelCase : str ):
"""simple docstring"""
return self.sp_model.encode(_lowerCamelCase , out_type=_lowerCamelCase )
def _a ( self : List[Any] , _lowerCamelCase : int ):
"""simple docstring"""
return self.encoder.get(_lowerCamelCase , self.encoder[self.unk_token] )
def _a ( self : int , _lowerCamelCase : int ):
"""simple docstring"""
return self.decoder.get(_lowerCamelCase , self.unk_token )
def _a ( self : int , _lowerCamelCase : List[str] ):
"""simple docstring"""
A_ : List[Any] = []
A_ : Any = ''''''
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
A_ : Union[str, Any] = self.sp_model.decode(_lowerCamelCase )
out_string += (decoded.upper() if self.do_upper_case else decoded) + token + " "
A_ : Optional[Any] = []
else:
current_sub_tokens.append(_lowerCamelCase )
A_ : Tuple = self.sp_model.decode(_lowerCamelCase )
out_string += decoded.upper() if self.do_upper_case else decoded
return out_string.strip()
def _a ( self : int , _lowerCamelCase : Dict , _lowerCamelCase : Any=None ):
"""simple docstring"""
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + [self.eos_token_id]
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + [self.eos_token_id]
def _a ( self : List[Any] , _lowerCamelCase : List[int] , _lowerCamelCase : Optional[List[int]] = None , _lowerCamelCase : bool = False ):
"""simple docstring"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_lowerCamelCase , token_ids_a=_lowerCamelCase , already_has_special_tokens=_lowerCamelCase )
A_ : Tuple = [1] * len(self.prefix_tokens )
A_ : Tuple = [1]
if token_ids_a is None:
return prefix_ones + ([0] * len(_lowerCamelCase )) + suffix_ones
return prefix_ones + ([0] * len(_lowerCamelCase )) + ([0] * len(_lowerCamelCase )) + suffix_ones
def _a ( self : Dict ):
"""simple docstring"""
A_ : Union[str, Any] = self.encoder.copy()
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self : Union[str, Any] ):
"""simple docstring"""
A_ : Dict = self.__dict__.copy()
A_ : List[Any] = None
return state
def __setstate__( self : List[str] , _lowerCamelCase : Dict ):
"""simple docstring"""
A_ : Dict = d
# for backward compatibility
if not hasattr(self , '''sp_model_kwargs''' ):
A_ : Optional[int] = {}
A_ : int = load_spm(self.spm_file , self.sp_model_kwargs )
def _a ( self : Optional[Any] , _lowerCamelCase : str , _lowerCamelCase : Optional[str] = None ):
"""simple docstring"""
A_ : Dict = Path(_lowerCamelCase )
assert save_dir.is_dir(), f'{save_directory} should be a directory'
A_ : Optional[int] = save_dir / (
(filename_prefix + '''-''' if filename_prefix else '''''') + self.vocab_files_names['''vocab_file''']
)
A_ : Optional[int] = save_dir / (
(filename_prefix + '''-''' if filename_prefix else '''''') + self.vocab_files_names['''spm_file''']
)
save_json(self.encoder , _lowerCamelCase )
if os.path.abspath(self.spm_file ) != os.path.abspath(_lowerCamelCase ) and os.path.isfile(self.spm_file ):
copyfile(self.spm_file , _lowerCamelCase )
elif not os.path.isfile(self.spm_file ):
with open(_lowerCamelCase , '''wb''' ) as fi:
A_ : List[str] = self.sp_model.serialized_model_proto()
fi.write(_lowerCamelCase )
return (str(_lowerCamelCase ), str(_lowerCamelCase ))
def snake_case__ ( lowerCamelCase__ : str , lowerCamelCase__ : Dict[str, Any] ) -> sentencepiece.SentencePieceProcessor:
A_ : Tuple = sentencepiece.SentencePieceProcessor(**lowerCamelCase__ )
spm.Load(str(lowerCamelCase__ ) )
return spm
def snake_case__ ( lowerCamelCase__ : str ) -> Union[Dict, List]:
with open(lowerCamelCase__ , '''r''' ) as f:
return json.load(lowerCamelCase__ )
def snake_case__ ( lowerCamelCase__ : Any , lowerCamelCase__ : str ) -> None:
with open(lowerCamelCase__ , '''w''' ) as f:
json.dump(lowerCamelCase__ , lowerCamelCase__ , indent=2 )
| 4 | 1 |
'''simple docstring'''
import ast
import os
import re
import shutil
import tempfile
import unittest
from unittest import mock
import torch
from accelerate.test_utils.examples import compare_against_test
from accelerate.test_utils.testing import TempDirTestCase, require_trackers, run_command, slow
from accelerate.utils import write_basic_config
# DataLoaders built from `test_samples/MRPC` for quick testing
# Should mock `{script_name}.get_dataloaders` via:
# @mock.patch("{script_name}.get_dataloaders", mocked_dataloaders)
snake_case__ = [
"""cross_validation.py""",
"""gradient_accumulation.py""",
"""local_sgd.py""",
"""multi_process_metrics.py""",
"""memory.py""",
"""automatic_gradient_accumulation.py""",
"""fsdp_with_peak_mem_tracking.py""",
"""deepspeed_with_config_support.py""",
"""megatron_lm_gpt_pretraining.py""",
]
class UpperCamelCase_ (unittest.TestCase ):
"""simple docstring"""
def _a ( self : str , _lowerCamelCase : str , _lowerCamelCase : bool , _lowerCamelCase : str = None , _lowerCamelCase : list = None ):
"""simple docstring"""
A_ : Union[str, Any] = None
A_ : Tuple = os.path.abspath(os.path.join('''examples''' , '''by_feature''' ) )
A_ : int = os.path.abspath('''examples''' )
for item in os.listdir(_lowerCamelCase ):
if item not in EXCLUDE_EXAMPLES:
A_ : Optional[Any] = os.path.join(_lowerCamelCase , _lowerCamelCase )
if os.path.isfile(_lowerCamelCase ) and ".py" in item_path:
with self.subTest(
tested_script=_lowerCamelCase , feature_script=_lowerCamelCase , tested_section='''main()''' if parser_only else '''training_function()''' , ):
A_ : Tuple = compare_against_test(
os.path.join(_lowerCamelCase , _lowerCamelCase ) , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
A_ : Tuple = '''\n'''.join(_lowerCamelCase )
if special_strings is not None:
for string in special_strings:
A_ : Optional[int] = diff.replace(_lowerCamelCase , '''''' )
self.assertEqual(_lowerCamelCase , '''''' )
def _a ( self : Tuple ):
"""simple docstring"""
self.one_complete_example('''complete_nlp_example.py''' , _lowerCamelCase )
self.one_complete_example('''complete_nlp_example.py''' , _lowerCamelCase )
def _a ( self : Union[str, Any] ):
"""simple docstring"""
A_ : Tuple = os.path.abspath(os.path.join('''examples''' , '''cv_example.py''' ) )
A_ : Optional[int] = [
''' ''' * 16 + '''{\n\n''',
''' ''' * 20 + '''"accuracy": eval_metric["accuracy"],\n\n''',
''' ''' * 20 + '''"f1": eval_metric["f1"],\n\n''',
''' ''' * 20 + '''"train_loss": total_loss.item() / len(train_dataloader),\n\n''',
''' ''' * 20 + '''"epoch": epoch,\n\n''',
''' ''' * 16 + '''},\n\n''',
''' ''' * 16 + '''step=epoch,\n''',
''' ''' * 12,
''' ''' * 8 + '''for step, batch in enumerate(active_dataloader):\n''',
]
self.one_complete_example('''complete_cv_example.py''' , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
self.one_complete_example('''complete_cv_example.py''' , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
@mock.patch.dict(os.environ, {'TESTING_MOCKED_DATALOADERS': '1'} )
class UpperCamelCase_ (a__ ):
"""simple docstring"""
_lowerCAmelCase = False
@classmethod
def _a ( cls : int ):
"""simple docstring"""
super().setUpClass()
A_ : Optional[Any] = tempfile.mkdtemp()
A_ : Any = os.path.join(cls._tmpdir , '''default_config.yml''' )
write_basic_config(save_location=cls.configPath )
A_ : List[str] = ['''accelerate''', '''launch''', '''--config_file''', cls.configPath]
@classmethod
def _a ( cls : Optional[Any] ):
"""simple docstring"""
super().tearDownClass()
shutil.rmtree(cls._tmpdir )
def _a ( self : str ):
"""simple docstring"""
A_ : int = f'\n examples/by_feature/checkpointing.py\n --checkpointing_steps epoch\n --output_dir {self.tmpdir}\n '.split()
run_command(self._launch_args + testargs )
self.assertTrue(os.path.exists(os.path.join(self.tmpdir , '''epoch_0''' ) ) )
def _a ( self : List[str] ):
"""simple docstring"""
A_ : List[Any] = f'\n examples/by_feature/checkpointing.py\n --checkpointing_steps 1\n --output_dir {self.tmpdir}\n '.split()
A_ : List[str] = run_command(self._launch_args + testargs )
self.assertTrue(os.path.exists(os.path.join(self.tmpdir , '''step_2''' ) ) )
def _a ( self : Tuple ):
"""simple docstring"""
A_ : List[Any] = f'\n examples/by_feature/checkpointing.py\n --resume_from_checkpoint {os.path.join(self.tmpdir , "epoch_0" )}\n '.split()
A_ : List[str] = run_command(self._launch_args + testargs , return_stdout=_lowerCamelCase )
self.assertNotIn('''epoch 0:''' , _lowerCamelCase )
self.assertIn('''epoch 1:''' , _lowerCamelCase )
def _a ( self : Union[str, Any] ):
"""simple docstring"""
A_ : Tuple = f'\n examples/by_feature/checkpointing.py\n --resume_from_checkpoint {os.path.join(self.tmpdir , "step_2" )}\n '.split()
A_ : int = run_command(self._launch_args + testargs , return_stdout=_lowerCamelCase )
if torch.cuda.is_available():
A_ : str = torch.cuda.device_count()
else:
A_ : Tuple = 1
if num_processes > 1:
self.assertNotIn('''epoch 0:''' , _lowerCamelCase )
self.assertIn('''epoch 1:''' , _lowerCamelCase )
else:
self.assertIn('''epoch 0:''' , _lowerCamelCase )
self.assertIn('''epoch 1:''' , _lowerCamelCase )
@slow
def _a ( self : Dict ):
"""simple docstring"""
A_ : Optional[Any] = '''
examples/by_feature/cross_validation.py
--num_folds 2
'''.split()
with mock.patch.dict(os.environ , {'''TESTING_MOCKED_DATALOADERS''': '''0'''} ):
A_ : Optional[Any] = run_command(self._launch_args + testargs , return_stdout=_lowerCamelCase )
A_ : int = re.findall('''({.+})''' , _lowerCamelCase )
A_ : List[Any] = [r for r in results if '''accuracy''' in r][-1]
A_ : int = ast.literal_eval(_lowerCamelCase )
self.assertGreaterEqual(results['''accuracy'''] , 0.75 )
def _a ( self : Dict ):
"""simple docstring"""
A_ : Optional[int] = ['''examples/by_feature/multi_process_metrics.py''']
run_command(self._launch_args + testargs )
@require_trackers
@mock.patch.dict(os.environ , {'''WANDB_MODE''': '''offline'''} )
def _a ( self : List[Any] ):
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmpdir:
A_ : Union[str, Any] = f'\n examples/by_feature/tracking.py\n --with_tracking\n --project_dir {tmpdir}\n '.split()
run_command(self._launch_args + testargs )
self.assertTrue(os.path.exists(os.path.join(_lowerCamelCase , '''tracking''' ) ) )
def _a ( self : Dict ):
"""simple docstring"""
A_ : List[Any] = ['''examples/by_feature/gradient_accumulation.py''']
run_command(self._launch_args + testargs )
def _a ( self : Dict ):
"""simple docstring"""
A_ : Tuple = ['''examples/by_feature/local_sgd.py''']
run_command(self._launch_args + testargs )
| 4 |
'''simple docstring'''
import argparse
import json
import os
import sys
import tempfile
import unittest
from argparse import Namespace
from dataclasses import dataclass, field
from enum import Enum
from pathlib import Path
from typing import List, Literal, Optional
import yaml
from transformers import HfArgumentParser, TrainingArguments
from transformers.hf_argparser import make_choice_type_function, string_to_bool
# Since Python 3.10, we can use the builtin `|` operator for Union types
# See PEP 604: https://peps.python.org/pep-0604
snake_case__ = sys.version_info >= (3, 10)
def snake_case__ ( lowerCamelCase__ : Union[str, Any]=None , lowerCamelCase__ : str=None ) -> List[Any]:
return field(default_factory=lambda: default , metadata=lowerCamelCase__ )
@dataclass
class UpperCamelCase_ :
"""simple docstring"""
_lowerCAmelCase = 42
_lowerCAmelCase = 42
_lowerCAmelCase = 42
_lowerCAmelCase = 42
@dataclass
class UpperCamelCase_ :
"""simple docstring"""
_lowerCAmelCase = 4_2
_lowerCAmelCase = field(default='toto', metadata={'help': 'help message'} )
@dataclass
class UpperCamelCase_ :
"""simple docstring"""
_lowerCAmelCase = False
_lowerCAmelCase = True
_lowerCAmelCase = None
class UpperCamelCase_ (a__ ):
"""simple docstring"""
_lowerCAmelCase = 'titi'
_lowerCAmelCase = 'toto'
class UpperCamelCase_ (a__ ):
"""simple docstring"""
_lowerCAmelCase = 'titi'
_lowerCAmelCase = 'toto'
_lowerCAmelCase = 4_2
@dataclass
class UpperCamelCase_ :
"""simple docstring"""
_lowerCAmelCase = "toto"
def _a ( self : Optional[Any] ):
"""simple docstring"""
A_ : Optional[int] = BasicEnum(self.foo )
@dataclass
class UpperCamelCase_ :
"""simple docstring"""
_lowerCAmelCase = "toto"
def _a ( self : Tuple ):
"""simple docstring"""
A_ : Optional[Any] = MixedTypeEnum(self.foo )
@dataclass
class UpperCamelCase_ :
"""simple docstring"""
_lowerCAmelCase = None
_lowerCAmelCase = field(default=a__, metadata={'help': 'help message'} )
_lowerCAmelCase = None
_lowerCAmelCase = list_field(default=[] )
_lowerCAmelCase = list_field(default=[] )
@dataclass
class UpperCamelCase_ :
"""simple docstring"""
_lowerCAmelCase = list_field(default=[] )
_lowerCAmelCase = list_field(default=[1, 2, 3] )
_lowerCAmelCase = list_field(default=['Hallo', 'Bonjour', 'Hello'] )
_lowerCAmelCase = list_field(default=[0.1, 0.2, 0.3] )
@dataclass
class UpperCamelCase_ :
"""simple docstring"""
_lowerCAmelCase = field()
_lowerCAmelCase = field()
_lowerCAmelCase = field()
def _a ( self : Tuple ):
"""simple docstring"""
A_ : Tuple = BasicEnum(self.required_enum )
@dataclass
class UpperCamelCase_ :
"""simple docstring"""
_lowerCAmelCase = 42
_lowerCAmelCase = field()
_lowerCAmelCase = None
_lowerCAmelCase = field(default='toto', metadata={'help': 'help message'} )
_lowerCAmelCase = list_field(default=['Hallo', 'Bonjour', 'Hello'] )
if is_python_no_less_than_3_10:
@dataclass
class UpperCamelCase_ :
"""simple docstring"""
_lowerCAmelCase = False
_lowerCAmelCase = True
_lowerCAmelCase = None
@dataclass
class UpperCamelCase_ :
"""simple docstring"""
_lowerCAmelCase = None
_lowerCAmelCase = field(default=a__, metadata={'help': 'help message'} )
_lowerCAmelCase = None
_lowerCAmelCase = list_field(default=[] )
_lowerCAmelCase = list_field(default=[] )
class UpperCamelCase_ (unittest.TestCase ):
"""simple docstring"""
def _a ( self : List[str] , _lowerCamelCase : argparse.ArgumentParser , _lowerCamelCase : argparse.ArgumentParser ):
"""simple docstring"""
self.assertEqual(len(a._actions ) , len(b._actions ) )
for x, y in zip(a._actions , b._actions ):
A_ : Union[str, Any] = {k: v for k, v in vars(_lowerCamelCase ).items() if k != '''container'''}
A_ : Optional[Any] = {k: v for k, v in vars(_lowerCamelCase ).items() if k != '''container'''}
# Choices with mixed type have custom function as "type"
# So we need to compare results directly for equality
if xx.get('''choices''' , _lowerCamelCase ) and yy.get('''choices''' , _lowerCamelCase ):
for expected_choice in yy["choices"] + xx["choices"]:
self.assertEqual(xx['''type'''](_lowerCamelCase ) , yy['''type'''](_lowerCamelCase ) )
del xx["type"], yy["type"]
self.assertEqual(_lowerCamelCase , _lowerCamelCase )
def _a ( self : Optional[int] ):
"""simple docstring"""
A_ : Union[str, Any] = HfArgumentParser(_lowerCamelCase )
A_ : Optional[Any] = argparse.ArgumentParser()
expected.add_argument('''--foo''' , type=_lowerCamelCase , required=_lowerCamelCase )
expected.add_argument('''--bar''' , type=_lowerCamelCase , required=_lowerCamelCase )
expected.add_argument('''--baz''' , type=_lowerCamelCase , required=_lowerCamelCase )
expected.add_argument('''--flag''' , type=_lowerCamelCase , default=_lowerCamelCase , const=_lowerCamelCase , nargs='''?''' )
self.argparsersEqual(_lowerCamelCase , _lowerCamelCase )
A_ : Union[str, Any] = ['''--foo''', '''1''', '''--baz''', '''quux''', '''--bar''', '''0.5''']
((A_) ,) : List[str] = parser.parse_args_into_dataclasses(_lowerCamelCase , look_for_args_file=_lowerCamelCase )
self.assertFalse(example.flag )
def _a ( self : Dict ):
"""simple docstring"""
A_ : int = HfArgumentParser(_lowerCamelCase )
A_ : int = argparse.ArgumentParser()
expected.add_argument('''--foo''' , default=42 , type=_lowerCamelCase )
expected.add_argument('''--baz''' , default='''toto''' , type=_lowerCamelCase , help='''help message''' )
self.argparsersEqual(_lowerCamelCase , _lowerCamelCase )
def _a ( self : Dict ):
"""simple docstring"""
A_ : Any = argparse.ArgumentParser()
expected.add_argument('''--foo''' , type=_lowerCamelCase , default=_lowerCamelCase , const=_lowerCamelCase , nargs='''?''' )
expected.add_argument('''--baz''' , type=_lowerCamelCase , default=_lowerCamelCase , const=_lowerCamelCase , nargs='''?''' )
# A boolean no_* argument always has to come after its "default: True" regular counter-part
# and its default must be set to False
expected.add_argument('''--no_baz''' , action='''store_false''' , default=_lowerCamelCase , dest='''baz''' )
expected.add_argument('''--opt''' , type=_lowerCamelCase , default=_lowerCamelCase )
A_ : Dict = [WithDefaultBoolExample]
if is_python_no_less_than_3_10:
dataclass_types.append(_lowerCamelCase )
for dataclass_type in dataclass_types:
A_ : Any = HfArgumentParser(_lowerCamelCase )
self.argparsersEqual(_lowerCamelCase , _lowerCamelCase )
A_ : List[Any] = parser.parse_args([] )
self.assertEqual(_lowerCamelCase , Namespace(foo=_lowerCamelCase , baz=_lowerCamelCase , opt=_lowerCamelCase ) )
A_ : Optional[int] = parser.parse_args(['''--foo''', '''--no_baz'''] )
self.assertEqual(_lowerCamelCase , Namespace(foo=_lowerCamelCase , baz=_lowerCamelCase , opt=_lowerCamelCase ) )
A_ : Union[str, Any] = parser.parse_args(['''--foo''', '''--baz'''] )
self.assertEqual(_lowerCamelCase , Namespace(foo=_lowerCamelCase , baz=_lowerCamelCase , opt=_lowerCamelCase ) )
A_ : List[str] = parser.parse_args(['''--foo''', '''True''', '''--baz''', '''True''', '''--opt''', '''True'''] )
self.assertEqual(_lowerCamelCase , Namespace(foo=_lowerCamelCase , baz=_lowerCamelCase , opt=_lowerCamelCase ) )
A_ : List[Any] = parser.parse_args(['''--foo''', '''False''', '''--baz''', '''False''', '''--opt''', '''False'''] )
self.assertEqual(_lowerCamelCase , Namespace(foo=_lowerCamelCase , baz=_lowerCamelCase , opt=_lowerCamelCase ) )
def _a ( self : List[Any] ):
"""simple docstring"""
A_ : str = HfArgumentParser(_lowerCamelCase )
A_ : Optional[int] = argparse.ArgumentParser()
expected.add_argument(
'''--foo''' , default='''toto''' , choices=['''titi''', '''toto''', 42] , type=make_choice_type_function(['''titi''', '''toto''', 42] ) , )
self.argparsersEqual(_lowerCamelCase , _lowerCamelCase )
A_ : str = parser.parse_args([] )
self.assertEqual(args.foo , '''toto''' )
A_ : List[Any] = parser.parse_args_into_dataclasses([] )[0]
self.assertEqual(enum_ex.foo , MixedTypeEnum.toto )
A_ : int = parser.parse_args(['''--foo''', '''titi'''] )
self.assertEqual(args.foo , '''titi''' )
A_ : Dict = parser.parse_args_into_dataclasses(['''--foo''', '''titi'''] )[0]
self.assertEqual(enum_ex.foo , MixedTypeEnum.titi )
A_ : Tuple = parser.parse_args(['''--foo''', '''42'''] )
self.assertEqual(args.foo , 42 )
A_ : List[str] = parser.parse_args_into_dataclasses(['''--foo''', '''42'''] )[0]
self.assertEqual(enum_ex.foo , MixedTypeEnum.fourtytwo )
def _a ( self : Optional[int] ):
"""simple docstring"""
@dataclass
class UpperCamelCase_ :
"""simple docstring"""
_lowerCAmelCase = "toto"
A_ : List[str] = HfArgumentParser(_lowerCamelCase )
A_ : Tuple = argparse.ArgumentParser()
expected.add_argument(
'''--foo''' , default='''toto''' , choices=('''titi''', '''toto''', 42) , type=make_choice_type_function(['''titi''', '''toto''', 42] ) , )
self.argparsersEqual(_lowerCamelCase , _lowerCamelCase )
A_ : Tuple = parser.parse_args([] )
self.assertEqual(args.foo , '''toto''' )
A_ : List[str] = parser.parse_args(['''--foo''', '''titi'''] )
self.assertEqual(args.foo , '''titi''' )
A_ : int = parser.parse_args(['''--foo''', '''42'''] )
self.assertEqual(args.foo , 42 )
def _a ( self : Dict ):
"""simple docstring"""
A_ : int = HfArgumentParser(_lowerCamelCase )
A_ : List[Any] = argparse.ArgumentParser()
expected.add_argument('''--foo_int''' , nargs='''+''' , default=[] , type=_lowerCamelCase )
expected.add_argument('''--bar_int''' , nargs='''+''' , default=[1, 2, 3] , type=_lowerCamelCase )
expected.add_argument('''--foo_str''' , nargs='''+''' , default=['''Hallo''', '''Bonjour''', '''Hello'''] , type=_lowerCamelCase )
expected.add_argument('''--foo_float''' , nargs='''+''' , default=[0.1, 0.2, 0.3] , type=_lowerCamelCase )
self.argparsersEqual(_lowerCamelCase , _lowerCamelCase )
A_ : Optional[int] = parser.parse_args([] )
self.assertEqual(
_lowerCamelCase , Namespace(foo_int=[] , bar_int=[1, 2, 3] , foo_str=['''Hallo''', '''Bonjour''', '''Hello'''] , foo_float=[0.1, 0.2, 0.3] ) , )
A_ : str = parser.parse_args('''--foo_int 1 --bar_int 2 3 --foo_str a b c --foo_float 0.1 0.7'''.split() )
self.assertEqual(_lowerCamelCase , Namespace(foo_int=[1] , bar_int=[2, 3] , foo_str=['''a''', '''b''', '''c'''] , foo_float=[0.1, 0.7] ) )
def _a ( self : Dict ):
"""simple docstring"""
A_ : Optional[Any] = argparse.ArgumentParser()
expected.add_argument('''--foo''' , default=_lowerCamelCase , type=_lowerCamelCase )
expected.add_argument('''--bar''' , default=_lowerCamelCase , type=_lowerCamelCase , help='''help message''' )
expected.add_argument('''--baz''' , default=_lowerCamelCase , type=_lowerCamelCase )
expected.add_argument('''--ces''' , nargs='''+''' , default=[] , type=_lowerCamelCase )
expected.add_argument('''--des''' , nargs='''+''' , default=[] , type=_lowerCamelCase )
A_ : Tuple = [OptionalExample]
if is_python_no_less_than_3_10:
dataclass_types.append(_lowerCamelCase )
for dataclass_type in dataclass_types:
A_ : int = HfArgumentParser(_lowerCamelCase )
self.argparsersEqual(_lowerCamelCase , _lowerCamelCase )
A_ : List[Any] = parser.parse_args([] )
self.assertEqual(_lowerCamelCase , Namespace(foo=_lowerCamelCase , bar=_lowerCamelCase , baz=_lowerCamelCase , ces=[] , des=[] ) )
A_ : Optional[Any] = parser.parse_args('''--foo 12 --bar 3.14 --baz 42 --ces a b c --des 1 2 3'''.split() )
self.assertEqual(_lowerCamelCase , Namespace(foo=12 , bar=3.14 , baz='''42''' , ces=['''a''', '''b''', '''c'''] , des=[1, 2, 3] ) )
def _a ( self : List[Any] ):
"""simple docstring"""
A_ : List[Any] = HfArgumentParser(_lowerCamelCase )
A_ : Dict = argparse.ArgumentParser()
expected.add_argument('''--required_list''' , nargs='''+''' , type=_lowerCamelCase , required=_lowerCamelCase )
expected.add_argument('''--required_str''' , type=_lowerCamelCase , required=_lowerCamelCase )
expected.add_argument(
'''--required_enum''' , type=make_choice_type_function(['''titi''', '''toto'''] ) , choices=['''titi''', '''toto'''] , required=_lowerCamelCase , )
self.argparsersEqual(_lowerCamelCase , _lowerCamelCase )
def _a ( self : Optional[Any] ):
"""simple docstring"""
A_ : Union[str, Any] = HfArgumentParser(_lowerCamelCase )
A_ : List[Any] = argparse.ArgumentParser()
expected.add_argument('''--foo''' , type=_lowerCamelCase , required=_lowerCamelCase )
expected.add_argument(
'''--required_enum''' , type=make_choice_type_function(['''titi''', '''toto'''] ) , choices=['''titi''', '''toto'''] , required=_lowerCamelCase , )
expected.add_argument('''--opt''' , type=_lowerCamelCase , default=_lowerCamelCase )
expected.add_argument('''--baz''' , default='''toto''' , type=_lowerCamelCase , help='''help message''' )
expected.add_argument('''--foo_str''' , nargs='''+''' , default=['''Hallo''', '''Bonjour''', '''Hello'''] , type=_lowerCamelCase )
self.argparsersEqual(_lowerCamelCase , _lowerCamelCase )
def _a ( self : Tuple ):
"""simple docstring"""
A_ : List[Any] = HfArgumentParser(_lowerCamelCase )
A_ : Union[str, Any] = {
'''foo''': 12,
'''bar''': 3.14,
'''baz''': '''42''',
'''flag''': True,
}
A_ : Optional[int] = parser.parse_dict(_lowerCamelCase )[0]
A_ : str = BasicExample(**_lowerCamelCase )
self.assertEqual(_lowerCamelCase , _lowerCamelCase )
def _a ( self : List[str] ):
"""simple docstring"""
A_ : Any = HfArgumentParser(_lowerCamelCase )
A_ : List[str] = {
'''foo''': 12,
'''bar''': 3.14,
'''baz''': '''42''',
'''flag''': True,
'''extra''': 42,
}
self.assertRaises(_lowerCamelCase , parser.parse_dict , _lowerCamelCase , allow_extra_keys=_lowerCamelCase )
def _a ( self : Optional[Any] ):
"""simple docstring"""
A_ : Union[str, Any] = HfArgumentParser(_lowerCamelCase )
A_ : List[str] = {
'''foo''': 12,
'''bar''': 3.14,
'''baz''': '''42''',
'''flag''': True,
}
with tempfile.TemporaryDirectory() as tmp_dir:
A_ : Tuple = os.path.join(_lowerCamelCase , '''temp_json''' )
os.mkdir(_lowerCamelCase )
with open(temp_local_path + '''.json''' , '''w+''' ) as f:
json.dump(_lowerCamelCase , _lowerCamelCase )
A_ : List[str] = parser.parse_yaml_file(Path(temp_local_path + '''.json''' ) )[0]
A_ : Optional[Any] = BasicExample(**_lowerCamelCase )
self.assertEqual(_lowerCamelCase , _lowerCamelCase )
def _a ( self : int ):
"""simple docstring"""
A_ : int = HfArgumentParser(_lowerCamelCase )
A_ : Tuple = {
'''foo''': 12,
'''bar''': 3.14,
'''baz''': '''42''',
'''flag''': True,
}
with tempfile.TemporaryDirectory() as tmp_dir:
A_ : int = os.path.join(_lowerCamelCase , '''temp_yaml''' )
os.mkdir(_lowerCamelCase )
with open(temp_local_path + '''.yaml''' , '''w+''' ) as f:
yaml.dump(_lowerCamelCase , _lowerCamelCase )
A_ : Optional[Any] = parser.parse_yaml_file(Path(temp_local_path + '''.yaml''' ) )[0]
A_ : int = BasicExample(**_lowerCamelCase )
self.assertEqual(_lowerCamelCase , _lowerCamelCase )
def _a ( self : Union[str, Any] ):
"""simple docstring"""
A_ : Dict = HfArgumentParser(_lowerCamelCase )
self.assertIsNotNone(_lowerCamelCase )
| 4 | 1 |
'''simple docstring'''
# tests directory-specific settings - this file is run automatically
# by pytest before any tests are run
import sys
import warnings
from os.path import abspath, dirname, join
# allow having multiple repository checkouts and not needing to remember to rerun
# 'pip install -e .[dev]' when switching between checkouts and running tests.
snake_case__ = abspath(join(dirname(dirname(__file__)), """src"""))
sys.path.insert(1, git_repo_path)
# silence FutureWarning warnings in tests since often we can't act on them until
# they become normal warnings - i.e. the tests still need to test the current functionality
warnings.simplefilter(action="""ignore""", category=FutureWarning)
def snake_case__ ( lowerCamelCase__ : Any ) -> Union[str, Any]:
from diffusers.utils.testing_utils import pytest_addoption_shared
pytest_addoption_shared(lowerCamelCase__ )
def snake_case__ ( lowerCamelCase__ : Dict ) -> Optional[Any]:
from diffusers.utils.testing_utils import pytest_terminal_summary_main
A_ : Union[str, Any] = terminalreporter.config.getoption('''--make-reports''' )
if make_reports:
pytest_terminal_summary_main(lowerCamelCase__ , id=lowerCamelCase__ )
| 4 |
'''simple docstring'''
import sys
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from huggingface_hub import HfFolder, delete_repo
from requests.exceptions import HTTPError
from transformers import AutoImageProcessor, ViTImageProcessor
from transformers.testing_utils import TOKEN, USER, get_tests_dir, is_staging_test
sys.path.append(str(Path(__file__).parent.parent / """utils"""))
from test_module.custom_image_processing import CustomImageProcessor # noqa E402
snake_case__ = get_tests_dir("""fixtures""")
class UpperCamelCase_ (unittest.TestCase ):
"""simple docstring"""
def _a ( self : List[str] ):
"""simple docstring"""
A_ : List[Any] = mock.Mock()
A_ : List[str] = 500
A_ : Tuple = {}
A_ : int = HTTPError
A_ : Optional[Any] = {}
# Download this model to make sure it's in the cache.
A_ : Tuple = ViTImageProcessor.from_pretrained('''hf-internal-testing/tiny-random-vit''' )
# Under the mock environment we get a 500 error when trying to reach the model.
with mock.patch('''requests.Session.request''' , return_value=_lowerCamelCase ) as mock_head:
A_ : List[Any] = ViTImageProcessor.from_pretrained('''hf-internal-testing/tiny-random-vit''' )
# This check we did call the fake head request
mock_head.assert_called()
def _a ( self : Tuple ):
"""simple docstring"""
A_ : Tuple = ViTImageProcessor.from_pretrained(
'''https://huggingface.co/hf-internal-testing/tiny-random-vit/resolve/main/preprocessor_config.json''' )
def _a ( self : Dict ):
"""simple docstring"""
with self.assertRaises(_lowerCamelCase ):
# config is in subfolder, the following should not work without specifying the subfolder
A_ : Any = AutoImageProcessor.from_pretrained('''hf-internal-testing/stable-diffusion-all-variants''' )
A_ : Tuple = AutoImageProcessor.from_pretrained(
'''hf-internal-testing/stable-diffusion-all-variants''' , subfolder='''feature_extractor''' )
self.assertIsNotNone(_lowerCamelCase )
@is_staging_test
class UpperCamelCase_ (unittest.TestCase ):
"""simple docstring"""
@classmethod
def _a ( cls : Tuple ):
"""simple docstring"""
A_ : int = TOKEN
HfFolder.save_token(_lowerCamelCase )
@classmethod
def _a ( cls : str ):
"""simple docstring"""
try:
delete_repo(token=cls._token , repo_id='''test-image-processor''' )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id='''valid_org/test-image-processor-org''' )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id='''test-dynamic-image-processor''' )
except HTTPError:
pass
def _a ( self : List[Any] ):
"""simple docstring"""
A_ : Dict = ViTImageProcessor.from_pretrained(_lowerCamelCase )
image_processor.push_to_hub('''test-image-processor''' , use_auth_token=self._token )
A_ : Optional[int] = ViTImageProcessor.from_pretrained(f'{USER}/test-image-processor' )
for k, v in image_processor.__dict__.items():
self.assertEqual(_lowerCamelCase , getattr(_lowerCamelCase , _lowerCamelCase ) )
# Reset repo
delete_repo(token=self._token , repo_id='''test-image-processor''' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
image_processor.save_pretrained(
_lowerCamelCase , repo_id='''test-image-processor''' , push_to_hub=_lowerCamelCase , use_auth_token=self._token )
A_ : List[Any] = ViTImageProcessor.from_pretrained(f'{USER}/test-image-processor' )
for k, v in image_processor.__dict__.items():
self.assertEqual(_lowerCamelCase , getattr(_lowerCamelCase , _lowerCamelCase ) )
def _a ( self : Optional[Any] ):
"""simple docstring"""
A_ : int = ViTImageProcessor.from_pretrained(_lowerCamelCase )
image_processor.push_to_hub('''valid_org/test-image-processor''' , use_auth_token=self._token )
A_ : List[str] = ViTImageProcessor.from_pretrained('''valid_org/test-image-processor''' )
for k, v in image_processor.__dict__.items():
self.assertEqual(_lowerCamelCase , getattr(_lowerCamelCase , _lowerCamelCase ) )
# Reset repo
delete_repo(token=self._token , repo_id='''valid_org/test-image-processor''' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
image_processor.save_pretrained(
_lowerCamelCase , repo_id='''valid_org/test-image-processor-org''' , push_to_hub=_lowerCamelCase , use_auth_token=self._token )
A_ : Any = ViTImageProcessor.from_pretrained('''valid_org/test-image-processor-org''' )
for k, v in image_processor.__dict__.items():
self.assertEqual(_lowerCamelCase , getattr(_lowerCamelCase , _lowerCamelCase ) )
def _a ( self : Optional[Any] ):
"""simple docstring"""
CustomImageProcessor.register_for_auto_class()
A_ : Any = CustomImageProcessor.from_pretrained(_lowerCamelCase )
image_processor.push_to_hub('''test-dynamic-image-processor''' , use_auth_token=self._token )
# This has added the proper auto_map field to the config
self.assertDictEqual(
image_processor.auto_map , {'''AutoImageProcessor''': '''custom_image_processing.CustomImageProcessor'''} , )
A_ : str = AutoImageProcessor.from_pretrained(
f'{USER}/test-dynamic-image-processor' , trust_remote_code=_lowerCamelCase )
# Can't make an isinstance check because the new_image_processor is from the CustomImageProcessor class of a dynamic module
self.assertEqual(new_image_processor.__class__.__name__ , '''CustomImageProcessor''' )
| 4 | 1 |
'''simple docstring'''
from pathlib import PurePosixPath
from typing import Optional
import fsspec
from fsspec import AbstractFileSystem
from huggingface_hub.hf_api import DatasetInfo
from ..utils.file_utils import get_authentication_headers_for_url
from ..utils.hub import hf_hub_url
class UpperCamelCase_ (a__ ):
"""simple docstring"""
_lowerCAmelCase = ''
_lowerCAmelCase = 'hf-legacy' # "hf://"" is reserved for hffs
def __init__( self : Optional[int] , _lowerCamelCase : Optional[DatasetInfo] = None , _lowerCamelCase : Optional[str] = None , **_lowerCamelCase : Dict , ):
"""simple docstring"""
super().__init__(self , **_lowerCamelCase )
A_ : Any = repo_info
A_ : Optional[int] = token
A_ : List[Any] = None
def _a ( self : List[str] ):
"""simple docstring"""
if self.dir_cache is None:
A_ : Tuple = {}
for hf_file in self.repo_info.siblings:
# TODO(QL): add sizes
A_ : int = {
'''name''': hf_file.rfilename,
'''size''': None,
'''type''': '''file''',
}
self.dir_cache.update(
{
str(_lowerCamelCase ): {'''name''': str(_lowerCamelCase ), '''size''': None, '''type''': '''directory'''}
for d in list(PurePosixPath(hf_file.rfilename ).parents )[:-1]
} )
def _a ( self : Union[str, Any] , _lowerCamelCase : str , _lowerCamelCase : str = "rb" , **_lowerCamelCase : Dict , ):
"""simple docstring"""
if not isinstance(self.repo_info , _lowerCamelCase ):
raise NotImplementedError(f'Open is only implemented for dataset repositories, but got {self.repo_info}' )
A_ : Tuple = hf_hub_url(self.repo_info.id , _lowerCamelCase , revision=self.repo_info.sha )
return fsspec.open(
_lowerCamelCase , mode=_lowerCamelCase , headers=get_authentication_headers_for_url(_lowerCamelCase , use_auth_token=self.token ) , client_kwargs={'''trust_env''': True} , ).open()
def _a ( self : Optional[int] , _lowerCamelCase : Union[str, Any] , **_lowerCamelCase : int ):
"""simple docstring"""
self._get_dirs()
A_ : int = self._strip_protocol(_lowerCamelCase )
if path in self.dir_cache:
return self.dir_cache[path]
else:
raise FileNotFoundError(_lowerCamelCase )
def _a ( self : Tuple , _lowerCamelCase : List[Any] , _lowerCamelCase : Optional[Any]=False , **_lowerCamelCase : List[str] ):
"""simple docstring"""
self._get_dirs()
A_ : str = PurePosixPath(path.strip('''/''' ) )
A_ : Any = {}
for p, f in self.dir_cache.items():
A_ : Union[str, Any] = PurePosixPath(p.strip('''/''' ) )
A_ : Optional[Any] = p.parent
if root == path:
A_ : Union[str, Any] = f
A_ : str = list(paths.values() )
if detail:
return out
else:
return sorted(f['''name'''] for f in out )
| 4 |
'''simple docstring'''
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import torch
from ..models.clipseg import CLIPSegForImageSegmentation
from ..utils import is_vision_available, requires_backends
from .base import PipelineTool
if is_vision_available():
from PIL import Image
class UpperCamelCase_ (a__ ):
"""simple docstring"""
_lowerCAmelCase = (
'This is a tool that creates a segmentation mask of an image according to a label. It cannot create an image.'
'It takes two arguments named `image` which should be the original image, and `label` which should be a text '
'describing the elements what should be identified in the segmentation mask. The tool returns the mask.'
)
_lowerCAmelCase = 'CIDAS/clipseg-rd64-refined'
_lowerCAmelCase = 'image_segmenter'
_lowerCAmelCase = CLIPSegForImageSegmentation
_lowerCAmelCase = ['image', 'text']
_lowerCAmelCase = ['image']
def __init__( self : Optional[int] , *_lowerCamelCase : Optional[int] , **_lowerCamelCase : Union[str, Any] ):
"""simple docstring"""
requires_backends(self , ['''vision'''] )
super().__init__(*_lowerCamelCase , **_lowerCamelCase )
def _a ( self : List[str] , _lowerCamelCase : "Image" , _lowerCamelCase : str ):
"""simple docstring"""
return self.pre_processor(text=[label] , images=[image] , padding=_lowerCamelCase , return_tensors='''pt''' )
def _a ( self : Union[str, Any] , _lowerCamelCase : Optional[int] ):
"""simple docstring"""
with torch.no_grad():
A_ : Optional[int] = self.model(**_lowerCamelCase ).logits
return logits
def _a ( self : List[str] , _lowerCamelCase : Optional[int] ):
"""simple docstring"""
A_ : int = outputs.cpu().detach().numpy()
A_ : Tuple = 0
A_ : List[str] = 1
return Image.fromarray((array * 255).astype(np.uinta ) )
| 4 | 1 |
'''simple docstring'''
import inspect
import unittest
from transformers import BitConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import BitBackbone, BitForImageClassification, BitImageProcessor, BitModel
from transformers.models.bit.modeling_bit import BIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
class UpperCamelCase_ :
"""simple docstring"""
def __init__( self : Optional[Any] , _lowerCamelCase : int , _lowerCamelCase : List[str]=3 , _lowerCamelCase : Any=32 , _lowerCamelCase : Union[str, Any]=3 , _lowerCamelCase : int=10 , _lowerCamelCase : Union[str, Any]=[8, 16, 32, 64] , _lowerCamelCase : Dict=[1, 1, 2, 1] , _lowerCamelCase : Union[str, Any]=True , _lowerCamelCase : Optional[int]=True , _lowerCamelCase : Any="relu" , _lowerCamelCase : Optional[Any]=3 , _lowerCamelCase : Optional[Any]=None , _lowerCamelCase : Dict=["stage2", "stage3", "stage4"] , _lowerCamelCase : Union[str, Any]=[2, 3, 4] , _lowerCamelCase : Tuple=1 , ):
"""simple docstring"""
A_ : List[str] = parent
A_ : List[str] = batch_size
A_ : Union[str, Any] = image_size
A_ : Tuple = num_channels
A_ : Any = embeddings_size
A_ : int = hidden_sizes
A_ : Optional[Any] = depths
A_ : List[Any] = is_training
A_ : Optional[int] = use_labels
A_ : int = hidden_act
A_ : Tuple = num_labels
A_ : Union[str, Any] = scope
A_ : List[Any] = len(_lowerCamelCase )
A_ : Union[str, Any] = out_features
A_ : List[Any] = out_indices
A_ : Dict = num_groups
def _a ( self : Optional[int] ):
"""simple docstring"""
A_ : Dict = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
A_ : Union[str, Any] = None
if self.use_labels:
A_ : Any = ids_tensor([self.batch_size] , self.num_labels )
A_ : Any = self.get_config()
return config, pixel_values, labels
def _a ( self : Union[str, Any] ):
"""simple docstring"""
return BitConfig(
num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , out_features=self.out_features , out_indices=self.out_indices , num_groups=self.num_groups , )
def _a ( self : List[Any] , _lowerCamelCase : List[str] , _lowerCamelCase : List[str] , _lowerCamelCase : Optional[Any] ):
"""simple docstring"""
A_ : Any = BitModel(config=_lowerCamelCase )
model.to(_lowerCamelCase )
model.eval()
A_ : int = model(_lowerCamelCase )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def _a ( self : Optional[int] , _lowerCamelCase : List[Any] , _lowerCamelCase : str , _lowerCamelCase : Optional[int] ):
"""simple docstring"""
A_ : Dict = self.num_labels
A_ : Optional[Any] = BitForImageClassification(_lowerCamelCase )
model.to(_lowerCamelCase )
model.eval()
A_ : List[Any] = model(_lowerCamelCase , labels=_lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _a ( self : Any , _lowerCamelCase : int , _lowerCamelCase : int , _lowerCamelCase : List[Any] ):
"""simple docstring"""
A_ : List[Any] = BitBackbone(config=_lowerCamelCase )
model.to(_lowerCamelCase )
model.eval()
A_ : int = model(_lowerCamelCase )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[1], 4, 4] )
# verify channels
self.parent.assertEqual(len(model.channels ) , len(config.out_features ) )
self.parent.assertListEqual(model.channels , config.hidden_sizes[1:] )
# verify backbone works with out_features=None
A_ : Optional[Any] = None
A_ : int = BitBackbone(config=_lowerCamelCase )
model.to(_lowerCamelCase )
model.eval()
A_ : Optional[int] = model(_lowerCamelCase )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , 1 )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[-1], 1, 1] )
# verify channels
self.parent.assertEqual(len(model.channels ) , 1 )
self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]] )
def _a ( self : List[Any] ):
"""simple docstring"""
A_ : Union[str, Any] = self.prepare_config_and_inputs()
A_ ,A_ ,A_ : Union[str, Any] = config_and_inputs
A_ : str = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class UpperCamelCase_ (a__, a__, unittest.TestCase ):
"""simple docstring"""
_lowerCAmelCase = (BitModel, BitForImageClassification, BitBackbone) if is_torch_available() else ()
_lowerCAmelCase = (
{'feature-extraction': BitModel, 'image-classification': BitForImageClassification}
if is_torch_available()
else {}
)
_lowerCAmelCase = False
_lowerCAmelCase = False
_lowerCAmelCase = False
_lowerCAmelCase = False
_lowerCAmelCase = False
def _a ( self : Optional[Any] ):
"""simple docstring"""
A_ : List[str] = BitModelTester(self )
A_ : Optional[Any] = ConfigTester(self , config_class=_lowerCamelCase , has_text_modality=_lowerCamelCase )
def _a ( self : Optional[Any] ):
"""simple docstring"""
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def _a ( self : List[Any] ):
"""simple docstring"""
return
@unittest.skip(reason='''Bit does not output attentions''' )
def _a ( self : str ):
"""simple docstring"""
pass
@unittest.skip(reason='''Bit does not use inputs_embeds''' )
def _a ( self : Union[str, Any] ):
"""simple docstring"""
pass
@unittest.skip(reason='''Bit does not support input and output embeddings''' )
def _a ( self : Any ):
"""simple docstring"""
pass
def _a ( self : List[Any] ):
"""simple docstring"""
A_ ,A_ : str = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A_ : Dict = model_class(_lowerCamelCase )
A_ : Dict = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
A_ : int = [*signature.parameters.keys()]
A_ : Union[str, Any] = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , _lowerCamelCase )
def _a ( self : Optional[Any] ):
"""simple docstring"""
A_ : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_lowerCamelCase )
def _a ( self : Optional[Any] ):
"""simple docstring"""
A_ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_backbone(*_lowerCamelCase )
def _a ( self : Tuple ):
"""simple docstring"""
A_ ,A_ : Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A_ : str = model_class(config=_lowerCamelCase )
for name, module in model.named_modules():
if isinstance(_lowerCamelCase , (nn.BatchNormad, nn.GroupNorm) ):
self.assertTrue(
torch.all(module.weight == 1 ) , msg=f'Parameter {name} of model {model_class} seems not properly initialized' , )
self.assertTrue(
torch.all(module.bias == 0 ) , msg=f'Parameter {name} of model {model_class} seems not properly initialized' , )
def _a ( self : int ):
"""simple docstring"""
def check_hidden_states_output(_lowerCamelCase : Union[str, Any] , _lowerCamelCase : Dict , _lowerCamelCase : int ):
A_ : Union[str, Any] = model_class(_lowerCamelCase )
model.to(_lowerCamelCase )
model.eval()
with torch.no_grad():
A_ : Union[str, Any] = model(**self._prepare_for_class(_lowerCamelCase , _lowerCamelCase ) )
A_ : int = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
A_ : List[Any] = self.model_tester.num_stages
self.assertEqual(len(_lowerCamelCase ) , expected_num_stages + 1 )
# Bit's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , )
A_ ,A_ : str = self.model_tester.prepare_config_and_inputs_for_common()
A_ : Tuple = ['''preactivation''', '''bottleneck''']
for model_class in self.all_model_classes:
for layer_type in layers_type:
A_ : Tuple = layer_type
A_ : Optional[Any] = True
check_hidden_states_output(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
A_ : List[str] = True
check_hidden_states_output(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
@unittest.skip(reason='''Bit does not use feedforward chunking''' )
def _a ( self : Tuple ):
"""simple docstring"""
pass
def _a ( self : str ):
"""simple docstring"""
A_ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_lowerCamelCase )
@slow
def _a ( self : Union[str, Any] ):
"""simple docstring"""
for model_name in BIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
A_ : List[Any] = BitModel.from_pretrained(_lowerCamelCase )
self.assertIsNotNone(_lowerCamelCase )
def snake_case__ ( ) -> Optional[int]:
A_ : Optional[int] = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_torch
@require_vision
class UpperCamelCase_ (unittest.TestCase ):
"""simple docstring"""
@cached_property
def _a ( self : List[Any] ):
"""simple docstring"""
return (
BitImageProcessor.from_pretrained(BIT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ) if is_vision_available() else None
)
@slow
def _a ( self : Dict ):
"""simple docstring"""
A_ : Optional[int] = BitForImageClassification.from_pretrained(BIT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ).to(_lowerCamelCase )
A_ : Union[str, Any] = self.default_image_processor
A_ : Optional[int] = prepare_img()
A_ : int = image_processor(images=_lowerCamelCase , return_tensors='''pt''' ).to(_lowerCamelCase )
# forward pass
with torch.no_grad():
A_ : Union[str, Any] = model(**_lowerCamelCase )
# verify the logits
A_ : Dict = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape , _lowerCamelCase )
A_ : Tuple = torch.tensor([[-0.65_26, -0.52_63, -1.43_98]] ).to(_lowerCamelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , _lowerCamelCase , atol=1E-4 ) )
@require_torch
class UpperCamelCase_ (a__, unittest.TestCase ):
"""simple docstring"""
_lowerCAmelCase = (BitBackbone,) if is_torch_available() else ()
_lowerCAmelCase = BitConfig
_lowerCAmelCase = False
def _a ( self : List[str] ):
"""simple docstring"""
A_ : Union[str, Any] = BitModelTester(self )
| 4 |
'''simple docstring'''
from collections.abc import Sequence
def snake_case__ ( lowerCamelCase__ : Sequence[float] , lowerCamelCase__ : bool = False ) -> float:
if not arr:
return 0
A_ : Union[str, Any] = 0 if allow_empty_subarrays else float('''-inf''' )
A_ : str = 0.0
for num in arr:
A_ : Any = max(0 if allow_empty_subarrays else num , curr_sum + num )
A_ : Tuple = max(lowerCamelCase__ , lowerCamelCase__ )
return max_sum
if __name__ == "__main__":
from doctest import testmod
testmod()
snake_case__ = [-2, 1, -3, 4, -1, 2, 1, -5, 4]
print(F'{max_subarray_sum(nums) = }')
| 4 | 1 |
'''simple docstring'''
import os
import sys
from contextlib import contextmanager
# Windows only
if os.name == "nt":
import ctypes
import msvcrt # noqa
class UpperCamelCase_ (ctypes.Structure ):
"""simple docstring"""
_lowerCAmelCase = [('size', ctypes.c_int), ('visible', ctypes.c_byte)]
def snake_case__ ( ) -> Tuple:
if os.name == "nt":
A_ : str = CursorInfo()
A_ : int = ctypes.windll.kernelaa.GetStdHandle(-1_1 )
ctypes.windll.kernelaa.GetConsoleCursorInfo(lowerCamelCase__ , ctypes.byref(lowerCamelCase__ ) )
A_ : Any = False
ctypes.windll.kernelaa.SetConsoleCursorInfo(lowerCamelCase__ , ctypes.byref(lowerCamelCase__ ) )
elif os.name == "posix":
sys.stdout.write('''\033[?25l''' )
sys.stdout.flush()
def snake_case__ ( ) -> int:
if os.name == "nt":
A_ : Tuple = CursorInfo()
A_ : List[str] = ctypes.windll.kernelaa.GetStdHandle(-1_1 )
ctypes.windll.kernelaa.GetConsoleCursorInfo(lowerCamelCase__ , ctypes.byref(lowerCamelCase__ ) )
A_ : int = True
ctypes.windll.kernelaa.SetConsoleCursorInfo(lowerCamelCase__ , ctypes.byref(lowerCamelCase__ ) )
elif os.name == "posix":
sys.stdout.write('''\033[?25h''' )
sys.stdout.flush()
@contextmanager
def snake_case__ ( ) -> Dict:
try:
hide_cursor()
yield
finally:
show_cursor()
| 4 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
snake_case__ = logging.get_logger(__name__)
snake_case__ = {
"""facebook/s2t-wav2vec2-large-en-de""": (
"""https://huggingface.co/facebook/s2t-wav2vec2-large-en-de/resolve/main/config.json"""
),
# See all Speech2Text models at https://huggingface.co/models?filter=speech2text2
}
class UpperCamelCase_ (a__ ):
"""simple docstring"""
_lowerCAmelCase = 'speech_to_text_2'
_lowerCAmelCase = ['past_key_values']
_lowerCAmelCase = {'num_attention_heads': 'decoder_attention_heads', 'hidden_size': 'd_model'}
def __init__( self : Optional[Any] , _lowerCamelCase : Optional[Any]=10000 , _lowerCamelCase : List[Any]=6 , _lowerCamelCase : int=2048 , _lowerCamelCase : Dict=4 , _lowerCamelCase : str=0.0 , _lowerCamelCase : int=True , _lowerCamelCase : int="relu" , _lowerCamelCase : Any=256 , _lowerCamelCase : List[Any]=0.1 , _lowerCamelCase : Tuple=0.0 , _lowerCamelCase : Union[str, Any]=0.0 , _lowerCamelCase : Optional[Any]=0.02 , _lowerCamelCase : int=2 , _lowerCamelCase : List[str]=True , _lowerCamelCase : str=1 , _lowerCamelCase : List[Any]=0 , _lowerCamelCase : Optional[int]=2 , _lowerCamelCase : Tuple=1024 , **_lowerCamelCase : int , ):
"""simple docstring"""
A_ : Optional[int] = vocab_size
A_ : Tuple = d_model
A_ : List[str] = decoder_ffn_dim
A_ : str = decoder_layers
A_ : Any = decoder_attention_heads
A_ : int = dropout
A_ : str = attention_dropout
A_ : Optional[int] = activation_dropout
A_ : str = activation_function
A_ : List[Any] = init_std
A_ : Union[str, Any] = decoder_layerdrop
A_ : Any = use_cache
A_ : Optional[Any] = decoder_layers
A_ : Optional[int] = scale_embedding # scale factor will be sqrt(d_model) if True
A_ : Optional[Any] = max_target_positions
super().__init__(
pad_token_id=_lowerCamelCase , bos_token_id=_lowerCamelCase , eos_token_id=_lowerCamelCase , decoder_start_token_id=_lowerCamelCase , **_lowerCamelCase , )
| 4 | 1 |
'''simple docstring'''
import json
import os
import unittest
from transformers import DebertaTokenizer, DebertaTokenizerFast
from transformers.models.deberta.tokenization_deberta import VOCAB_FILES_NAMES
from transformers.testing_utils import slow
from ...test_tokenization_common import TokenizerTesterMixin
class UpperCamelCase_ (a__, unittest.TestCase ):
"""simple docstring"""
_lowerCAmelCase = DebertaTokenizer
_lowerCAmelCase = True
_lowerCAmelCase = DebertaTokenizerFast
def _a ( self : int ):
"""simple docstring"""
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
A_ : str = [
'''l''',
'''o''',
'''w''',
'''e''',
'''r''',
'''s''',
'''t''',
'''i''',
'''d''',
'''n''',
'''\u0120''',
'''\u0120l''',
'''\u0120n''',
'''\u0120lo''',
'''\u0120low''',
'''er''',
'''\u0120lowest''',
'''\u0120newer''',
'''\u0120wider''',
'''[UNK]''',
]
A_ : Union[str, Any] = dict(zip(_lowerCamelCase , range(len(_lowerCamelCase ) ) ) )
A_ : List[str] = ['''#version: 0.2''', '''\u0120 l''', '''\u0120l o''', '''\u0120lo w''', '''e r''', '''''']
A_ : Dict = {'''unk_token''': '''[UNK]'''}
A_ : List[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
A_ : Dict = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write(json.dumps(_lowerCamelCase ) + '''\n''' )
with open(self.merges_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write('''\n'''.join(_lowerCamelCase ) )
def _a ( self : Dict , **_lowerCamelCase : Dict ):
"""simple docstring"""
kwargs.update(self.special_tokens_map )
return self.tokenizer_class.from_pretrained(self.tmpdirname , **_lowerCamelCase )
def _a ( self : Optional[int] , _lowerCamelCase : Tuple ):
"""simple docstring"""
A_ : Dict = '''lower newer'''
A_ : Any = '''lower newer'''
return input_text, output_text
def _a ( self : List[Any] ):
"""simple docstring"""
A_ : List[str] = self.get_tokenizer()
A_ : List[Any] = '''lower newer'''
A_ : List[str] = ['''l''', '''o''', '''w''', '''er''', '''\u0120''', '''n''', '''e''', '''w''', '''er''']
A_ : Dict = tokenizer.tokenize(_lowerCamelCase )
self.assertListEqual(_lowerCamelCase , _lowerCamelCase )
A_ : Any = tokens + [tokenizer.unk_token]
A_ : Tuple = [0, 1, 2, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(tokenizer.convert_tokens_to_ids(_lowerCamelCase ) , _lowerCamelCase )
def _a ( self : int ):
"""simple docstring"""
A_ : Tuple = self.get_tokenizer()
A_ : int = tokenizer('''Hello''' , '''World''' )
A_ : Optional[int] = [0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1]
self.assertListEqual(tokd['''token_type_ids'''] , _lowerCamelCase )
@slow
def _a ( self : Tuple ):
"""simple docstring"""
A_ : List[str] = self.tokenizer_class.from_pretrained('''microsoft/deberta-base''' )
A_ : Optional[int] = tokenizer.encode('''sequence builders''' , add_special_tokens=_lowerCamelCase )
A_ : Union[str, Any] = tokenizer.encode('''multi-sequence build''' , add_special_tokens=_lowerCamelCase )
A_ : Dict = tokenizer.encode(
'''sequence builders''' , add_special_tokens=_lowerCamelCase , add_prefix_space=_lowerCamelCase )
A_ : Dict = tokenizer.encode(
'''sequence builders''' , '''multi-sequence build''' , add_special_tokens=_lowerCamelCase , add_prefix_space=_lowerCamelCase )
A_ : int = tokenizer.build_inputs_with_special_tokens(_lowerCamelCase )
A_ : Dict = tokenizer.build_inputs_with_special_tokens(_lowerCamelCase , _lowerCamelCase )
assert encoded_sentence == encoded_text_from_decode
assert encoded_pair == encoded_pair_from_decode
@slow
def _a ( self : Dict ):
"""simple docstring"""
A_ : List[str] = [self.tokenizer_class]
if self.test_rust_tokenizer:
tokenizer_classes.append(self.rust_tokenizer_class )
for tokenizer_class in tokenizer_classes:
A_ : Optional[int] = tokenizer_class.from_pretrained('''microsoft/deberta-base''' )
A_ : Dict = [
'''ALBERT: A Lite BERT for Self-supervised Learning of Language Representations''',
'''ALBERT incorporates two parameter reduction techniques''',
'''The first one is a factorized embedding parameterization. By decomposing the large vocabulary'''
''' embedding matrix into two small matrices, we separate the size of the hidden layers from the size of'''
''' vocabulary embedding.''',
]
A_ : Union[str, Any] = tokenizer(_lowerCamelCase , padding=_lowerCamelCase )
A_ : Optional[int] = [tokenizer.decode(_lowerCamelCase , skip_special_tokens=_lowerCamelCase ) for seq in encoding['''input_ids''']]
# fmt: off
A_ : List[str] = {
'''input_ids''': [
[1, 2118, 11126, 565, 35, 83, 25191, 163, 18854, 13, 12156, 12, 16101, 25376, 13807, 9, 22205, 27893, 1635, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 2118, 11126, 565, 24536, 80, 43797, 4878, 7373, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 133, 78, 65, 16, 10, 3724, 1538, 33183, 11303, 43797, 1938, 4, 870, 24165, 29105, 5, 739, 32644, 33183, 11303, 36173, 88, 80, 650, 7821, 45940, 6, 52, 2559, 5, 1836, 9, 5, 7397, 13171, 31, 5, 1836, 9, 32644, 33183, 11303, 4, 2]
],
'''token_type_ids''': [
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
],
'''attention_mask''': [
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]
]
}
# fmt: on
A_ : Optional[int] = [
'''ALBERT: A Lite BERT for Self-supervised Learning of Language Representations''',
'''ALBERT incorporates two parameter reduction techniques''',
'''The first one is a factorized embedding parameterization. By decomposing the large vocabulary'''
''' embedding matrix into two small matrices, we separate the size of the hidden layers from the size of'''
''' vocabulary embedding.''',
]
self.assertDictEqual(encoding.data , _lowerCamelCase )
for expected, decoded in zip(_lowerCamelCase , _lowerCamelCase ):
self.assertEqual(_lowerCamelCase , _lowerCamelCase )
| 4 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
snake_case__ = logging.get_logger(__name__)
snake_case__ = {
"""microsoft/table-transformer-detection""": (
"""https://huggingface.co/microsoft/table-transformer-detection/resolve/main/config.json"""
),
}
class UpperCamelCase_ (a__ ):
"""simple docstring"""
_lowerCAmelCase = 'table-transformer'
_lowerCAmelCase = ['past_key_values']
_lowerCAmelCase = {
'hidden_size': 'd_model',
'num_attention_heads': 'encoder_attention_heads',
}
def __init__( self : Any , _lowerCamelCase : Union[str, Any]=True , _lowerCamelCase : Dict=None , _lowerCamelCase : int=3 , _lowerCamelCase : Any=100 , _lowerCamelCase : List[Any]=6 , _lowerCamelCase : Tuple=2048 , _lowerCamelCase : Any=8 , _lowerCamelCase : Dict=6 , _lowerCamelCase : Tuple=2048 , _lowerCamelCase : int=8 , _lowerCamelCase : Optional[int]=0.0 , _lowerCamelCase : List[Any]=0.0 , _lowerCamelCase : List[Any]=True , _lowerCamelCase : Optional[int]="relu" , _lowerCamelCase : Union[str, Any]=256 , _lowerCamelCase : Any=0.1 , _lowerCamelCase : Tuple=0.0 , _lowerCamelCase : Optional[int]=0.0 , _lowerCamelCase : str=0.02 , _lowerCamelCase : Tuple=1.0 , _lowerCamelCase : Dict=False , _lowerCamelCase : str="sine" , _lowerCamelCase : str="resnet50" , _lowerCamelCase : Any=True , _lowerCamelCase : List[str]=False , _lowerCamelCase : Any=1 , _lowerCamelCase : int=5 , _lowerCamelCase : Tuple=2 , _lowerCamelCase : Optional[int]=1 , _lowerCamelCase : Any=1 , _lowerCamelCase : Dict=5 , _lowerCamelCase : str=2 , _lowerCamelCase : Union[str, Any]=0.1 , **_lowerCamelCase : int , ):
"""simple docstring"""
if backbone_config is not None and use_timm_backbone:
raise ValueError('''You can\'t specify both `backbone_config` and `use_timm_backbone`.''' )
if not use_timm_backbone:
if backbone_config is None:
logger.info('''`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.''' )
A_ : int = CONFIG_MAPPING['''resnet'''](out_features=['''stage4'''] )
elif isinstance(_lowerCamelCase , _lowerCamelCase ):
A_ : str = backbone_config.get('''model_type''' )
A_ : Optional[int] = CONFIG_MAPPING[backbone_model_type]
A_ : List[str] = config_class.from_dict(_lowerCamelCase )
# set timm attributes to None
A_ ,A_ ,A_ : Union[str, Any] = None, None, None
A_ : Optional[Any] = use_timm_backbone
A_ : Optional[int] = backbone_config
A_ : Optional[Any] = num_channels
A_ : Dict = num_queries
A_ : str = d_model
A_ : List[str] = encoder_ffn_dim
A_ : int = encoder_layers
A_ : Optional[Any] = encoder_attention_heads
A_ : List[str] = decoder_ffn_dim
A_ : Any = decoder_layers
A_ : List[str] = decoder_attention_heads
A_ : Tuple = dropout
A_ : Optional[Any] = attention_dropout
A_ : Any = activation_dropout
A_ : List[Any] = activation_function
A_ : Dict = init_std
A_ : Any = init_xavier_std
A_ : List[Any] = encoder_layerdrop
A_ : int = decoder_layerdrop
A_ : Any = encoder_layers
A_ : List[str] = auxiliary_loss
A_ : List[Any] = position_embedding_type
A_ : Optional[Any] = backbone
A_ : Tuple = use_pretrained_backbone
A_ : List[Any] = dilation
# Hungarian matcher
A_ : List[str] = class_cost
A_ : str = bbox_cost
A_ : Union[str, Any] = giou_cost
# Loss coefficients
A_ : Any = mask_loss_coefficient
A_ : Optional[int] = dice_loss_coefficient
A_ : Dict = bbox_loss_coefficient
A_ : int = giou_loss_coefficient
A_ : int = eos_coefficient
super().__init__(is_encoder_decoder=_lowerCamelCase , **_lowerCamelCase )
@property
def _a ( self : List[Any] ):
"""simple docstring"""
return self.encoder_attention_heads
@property
def _a ( self : Any ):
"""simple docstring"""
return self.d_model
class UpperCamelCase_ (a__ ):
"""simple docstring"""
_lowerCAmelCase = version.parse('1.11' )
@property
def _a ( self : Tuple ):
"""simple docstring"""
return OrderedDict(
[
('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}),
('''pixel_mask''', {0: '''batch'''}),
] )
@property
def _a ( self : Optional[int] ):
"""simple docstring"""
return 1E-5
@property
def _a ( self : str ):
"""simple docstring"""
return 12
| 4 | 1 |
'''simple docstring'''
import unittest
from transformers import BertGenerationTokenizer
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_torch, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
snake_case__ = """▁"""
snake_case__ = get_tests_dir("""fixtures/test_sentencepiece.model""")
@require_sentencepiece
class UpperCamelCase_ (a__, unittest.TestCase ):
"""simple docstring"""
_lowerCAmelCase = BertGenerationTokenizer
_lowerCAmelCase = False
_lowerCAmelCase = True
def _a ( self : Dict ):
"""simple docstring"""
super().setUp()
A_ : Optional[int] = BertGenerationTokenizer(_lowerCamelCase , keep_accents=_lowerCamelCase )
tokenizer.save_pretrained(self.tmpdirname )
def _a ( self : Tuple ):
"""simple docstring"""
A_ : str = '''<s>'''
A_ : Tuple = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(_lowerCamelCase ) , _lowerCamelCase )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(_lowerCamelCase ) , _lowerCamelCase )
def _a ( self : Optional[Any] ):
"""simple docstring"""
A_ : List[str] = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '''<unk>''' )
self.assertEqual(vocab_keys[1] , '''<s>''' )
self.assertEqual(vocab_keys[-1] , '''<pad>''' )
self.assertEqual(len(_lowerCamelCase ) , 1002 )
def _a ( self : Dict ):
"""simple docstring"""
self.assertEqual(self.get_tokenizer().vocab_size , 1000 )
def _a ( self : Tuple ):
"""simple docstring"""
A_ : int = BertGenerationTokenizer(_lowerCamelCase , keep_accents=_lowerCamelCase )
A_ : List[Any] = tokenizer.tokenize('''This is a test''' )
self.assertListEqual(_lowerCamelCase , ['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est'''] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(_lowerCamelCase ) , [285, 46, 10, 170, 382] , )
A_ : Any = tokenizer.tokenize('''I was born in 92000, and this is falsé.''' )
self.assertListEqual(
_lowerCamelCase , [
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''9''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''s''',
'''é''',
'''.''',
] , )
A_ : List[Any] = tokenizer.convert_tokens_to_ids(_lowerCamelCase )
self.assertListEqual(
_lowerCamelCase , [8, 21, 84, 55, 24, 19, 7, 0, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 0, 4] , )
A_ : List[Any] = tokenizer.convert_ids_to_tokens(_lowerCamelCase )
self.assertListEqual(
_lowerCamelCase , [
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''<unk>''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''s''',
'''<unk>''',
'''.''',
] , )
@cached_property
def _a ( self : List[str] ):
"""simple docstring"""
return BertGenerationTokenizer.from_pretrained('''google/bert_for_seq_generation_L-24_bbc_encoder''' )
@slow
def _a ( self : str ):
"""simple docstring"""
A_ : List[str] = '''Hello World!'''
A_ : Any = [18536, 2260, 101]
self.assertListEqual(_lowerCamelCase , self.big_tokenizer.encode(_lowerCamelCase ) )
@slow
def _a ( self : int ):
"""simple docstring"""
A_ : Any = (
'''This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) " [ ] ! : - . Also we will'''
''' add words that should not exsist and be tokenized to <unk>, such as saoneuhaoesuth'''
)
A_ : List[str] = [
871,
419,
358,
946,
991,
2521,
452,
358,
1357,
387,
7751,
3536,
112,
985,
456,
126,
865,
938,
5400,
5734,
458,
1368,
467,
786,
2462,
5246,
1159,
633,
865,
4519,
457,
582,
852,
2557,
427,
916,
508,
405,
34324,
497,
391,
408,
11342,
1244,
385,
100,
938,
985,
456,
574,
362,
12597,
3200,
3129,
1172,
]
self.assertListEqual(_lowerCamelCase , self.big_tokenizer.encode(_lowerCamelCase ) )
@require_torch
@slow
def _a ( self : Optional[int] ):
"""simple docstring"""
import torch
from transformers import BertGenerationConfig, BertGenerationEncoder
# Build sequence
A_ : Union[str, Any] = list(self.big_tokenizer.get_vocab().keys() )[:10]
A_ : Dict = ''' '''.join(_lowerCamelCase )
A_ : List[Any] = self.big_tokenizer.encode_plus(_lowerCamelCase , return_tensors='''pt''' , return_token_type_ids=_lowerCamelCase )
A_ : Optional[int] = self.big_tokenizer.batch_encode_plus(
[sequence + ''' ''' + sequence] , return_tensors='''pt''' , return_token_type_ids=_lowerCamelCase )
A_ : List[Any] = BertGenerationConfig()
A_ : List[Any] = BertGenerationEncoder(_lowerCamelCase )
assert model.get_input_embeddings().weight.shape[0] >= self.big_tokenizer.vocab_size
with torch.no_grad():
model(**_lowerCamelCase )
model(**_lowerCamelCase )
@slow
def _a ( self : List[Any] ):
"""simple docstring"""
A_ : List[str] = {'''input_ids''': [[39286, 458, 36335, 2001, 456, 13073, 13266, 455, 113, 7746, 1741, 11157, 391, 13073, 13266, 455, 113, 3967, 35412, 113, 4936, 109, 3870, 2377, 113, 30084, 45720, 458, 134, 17496, 112, 503, 11672, 113, 118, 112, 5665, 13347, 38687, 112, 1496, 31389, 112, 3268, 47264, 134, 962, 112, 16377, 8035, 23130, 430, 12169, 15518, 28592, 458, 146, 41697, 109, 391, 12169, 15518, 16689, 458, 146, 41358, 109, 452, 726, 4034, 111, 763, 35412, 5082, 388, 1903, 111, 9051, 391, 2870, 48918, 1900, 1123, 550, 998, 112, 9586, 15985, 455, 391, 410, 22955, 37636, 114], [448, 17496, 419, 3663, 385, 763, 113, 27533, 2870, 3283, 13043, 1639, 24713, 523, 656, 24013, 18550, 2521, 517, 27014, 21244, 420, 1212, 1465, 391, 927, 4833, 388, 578, 11786, 114, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [484, 2169, 7687, 21932, 18146, 726, 363, 17032, 3391, 114, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], '''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=_lowerCamelCase , model_name='''google/bert_for_seq_generation_L-24_bbc_encoder''' , revision='''c817d1fd1be2ffa69431227a1fe320544943d4db''' , )
| 4 |
'''simple docstring'''
import inspect
import unittest
from transformers import BitConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import BitBackbone, BitForImageClassification, BitImageProcessor, BitModel
from transformers.models.bit.modeling_bit import BIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
class UpperCamelCase_ :
"""simple docstring"""
def __init__( self : Optional[Any] , _lowerCamelCase : int , _lowerCamelCase : List[str]=3 , _lowerCamelCase : Any=32 , _lowerCamelCase : Union[str, Any]=3 , _lowerCamelCase : int=10 , _lowerCamelCase : Union[str, Any]=[8, 16, 32, 64] , _lowerCamelCase : Dict=[1, 1, 2, 1] , _lowerCamelCase : Union[str, Any]=True , _lowerCamelCase : Optional[int]=True , _lowerCamelCase : Any="relu" , _lowerCamelCase : Optional[Any]=3 , _lowerCamelCase : Optional[Any]=None , _lowerCamelCase : Dict=["stage2", "stage3", "stage4"] , _lowerCamelCase : Union[str, Any]=[2, 3, 4] , _lowerCamelCase : Tuple=1 , ):
"""simple docstring"""
A_ : List[str] = parent
A_ : List[str] = batch_size
A_ : Union[str, Any] = image_size
A_ : Tuple = num_channels
A_ : Any = embeddings_size
A_ : int = hidden_sizes
A_ : Optional[Any] = depths
A_ : List[Any] = is_training
A_ : Optional[int] = use_labels
A_ : int = hidden_act
A_ : Tuple = num_labels
A_ : Union[str, Any] = scope
A_ : List[Any] = len(_lowerCamelCase )
A_ : Union[str, Any] = out_features
A_ : List[Any] = out_indices
A_ : Dict = num_groups
def _a ( self : Optional[int] ):
"""simple docstring"""
A_ : Dict = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
A_ : Union[str, Any] = None
if self.use_labels:
A_ : Any = ids_tensor([self.batch_size] , self.num_labels )
A_ : Any = self.get_config()
return config, pixel_values, labels
def _a ( self : Union[str, Any] ):
"""simple docstring"""
return BitConfig(
num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , out_features=self.out_features , out_indices=self.out_indices , num_groups=self.num_groups , )
def _a ( self : List[Any] , _lowerCamelCase : List[str] , _lowerCamelCase : List[str] , _lowerCamelCase : Optional[Any] ):
"""simple docstring"""
A_ : Any = BitModel(config=_lowerCamelCase )
model.to(_lowerCamelCase )
model.eval()
A_ : int = model(_lowerCamelCase )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def _a ( self : Optional[int] , _lowerCamelCase : List[Any] , _lowerCamelCase : str , _lowerCamelCase : Optional[int] ):
"""simple docstring"""
A_ : Dict = self.num_labels
A_ : Optional[Any] = BitForImageClassification(_lowerCamelCase )
model.to(_lowerCamelCase )
model.eval()
A_ : List[Any] = model(_lowerCamelCase , labels=_lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _a ( self : Any , _lowerCamelCase : int , _lowerCamelCase : int , _lowerCamelCase : List[Any] ):
"""simple docstring"""
A_ : List[Any] = BitBackbone(config=_lowerCamelCase )
model.to(_lowerCamelCase )
model.eval()
A_ : int = model(_lowerCamelCase )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[1], 4, 4] )
# verify channels
self.parent.assertEqual(len(model.channels ) , len(config.out_features ) )
self.parent.assertListEqual(model.channels , config.hidden_sizes[1:] )
# verify backbone works with out_features=None
A_ : Optional[Any] = None
A_ : int = BitBackbone(config=_lowerCamelCase )
model.to(_lowerCamelCase )
model.eval()
A_ : Optional[int] = model(_lowerCamelCase )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , 1 )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[-1], 1, 1] )
# verify channels
self.parent.assertEqual(len(model.channels ) , 1 )
self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]] )
def _a ( self : List[Any] ):
"""simple docstring"""
A_ : Union[str, Any] = self.prepare_config_and_inputs()
A_ ,A_ ,A_ : Union[str, Any] = config_and_inputs
A_ : str = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class UpperCamelCase_ (a__, a__, unittest.TestCase ):
"""simple docstring"""
_lowerCAmelCase = (BitModel, BitForImageClassification, BitBackbone) if is_torch_available() else ()
_lowerCAmelCase = (
{'feature-extraction': BitModel, 'image-classification': BitForImageClassification}
if is_torch_available()
else {}
)
_lowerCAmelCase = False
_lowerCAmelCase = False
_lowerCAmelCase = False
_lowerCAmelCase = False
_lowerCAmelCase = False
def _a ( self : Optional[Any] ):
"""simple docstring"""
A_ : List[str] = BitModelTester(self )
A_ : Optional[Any] = ConfigTester(self , config_class=_lowerCamelCase , has_text_modality=_lowerCamelCase )
def _a ( self : Optional[Any] ):
"""simple docstring"""
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def _a ( self : List[Any] ):
"""simple docstring"""
return
@unittest.skip(reason='''Bit does not output attentions''' )
def _a ( self : str ):
"""simple docstring"""
pass
@unittest.skip(reason='''Bit does not use inputs_embeds''' )
def _a ( self : Union[str, Any] ):
"""simple docstring"""
pass
@unittest.skip(reason='''Bit does not support input and output embeddings''' )
def _a ( self : Any ):
"""simple docstring"""
pass
def _a ( self : List[Any] ):
"""simple docstring"""
A_ ,A_ : str = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A_ : Dict = model_class(_lowerCamelCase )
A_ : Dict = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
A_ : int = [*signature.parameters.keys()]
A_ : Union[str, Any] = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , _lowerCamelCase )
def _a ( self : Optional[Any] ):
"""simple docstring"""
A_ : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_lowerCamelCase )
def _a ( self : Optional[Any] ):
"""simple docstring"""
A_ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_backbone(*_lowerCamelCase )
def _a ( self : Tuple ):
"""simple docstring"""
A_ ,A_ : Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A_ : str = model_class(config=_lowerCamelCase )
for name, module in model.named_modules():
if isinstance(_lowerCamelCase , (nn.BatchNormad, nn.GroupNorm) ):
self.assertTrue(
torch.all(module.weight == 1 ) , msg=f'Parameter {name} of model {model_class} seems not properly initialized' , )
self.assertTrue(
torch.all(module.bias == 0 ) , msg=f'Parameter {name} of model {model_class} seems not properly initialized' , )
def _a ( self : int ):
"""simple docstring"""
def check_hidden_states_output(_lowerCamelCase : Union[str, Any] , _lowerCamelCase : Dict , _lowerCamelCase : int ):
A_ : Union[str, Any] = model_class(_lowerCamelCase )
model.to(_lowerCamelCase )
model.eval()
with torch.no_grad():
A_ : Union[str, Any] = model(**self._prepare_for_class(_lowerCamelCase , _lowerCamelCase ) )
A_ : int = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
A_ : List[Any] = self.model_tester.num_stages
self.assertEqual(len(_lowerCamelCase ) , expected_num_stages + 1 )
# Bit's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , )
A_ ,A_ : str = self.model_tester.prepare_config_and_inputs_for_common()
A_ : Tuple = ['''preactivation''', '''bottleneck''']
for model_class in self.all_model_classes:
for layer_type in layers_type:
A_ : Tuple = layer_type
A_ : Optional[Any] = True
check_hidden_states_output(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
A_ : List[str] = True
check_hidden_states_output(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
@unittest.skip(reason='''Bit does not use feedforward chunking''' )
def _a ( self : Tuple ):
"""simple docstring"""
pass
def _a ( self : str ):
"""simple docstring"""
A_ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_lowerCamelCase )
@slow
def _a ( self : Union[str, Any] ):
"""simple docstring"""
for model_name in BIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
A_ : List[Any] = BitModel.from_pretrained(_lowerCamelCase )
self.assertIsNotNone(_lowerCamelCase )
def snake_case__ ( ) -> Optional[int]:
A_ : Optional[int] = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_torch
@require_vision
class UpperCamelCase_ (unittest.TestCase ):
"""simple docstring"""
@cached_property
def _a ( self : List[Any] ):
"""simple docstring"""
return (
BitImageProcessor.from_pretrained(BIT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ) if is_vision_available() else None
)
@slow
def _a ( self : Dict ):
"""simple docstring"""
A_ : Optional[int] = BitForImageClassification.from_pretrained(BIT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ).to(_lowerCamelCase )
A_ : Union[str, Any] = self.default_image_processor
A_ : Optional[int] = prepare_img()
A_ : int = image_processor(images=_lowerCamelCase , return_tensors='''pt''' ).to(_lowerCamelCase )
# forward pass
with torch.no_grad():
A_ : Union[str, Any] = model(**_lowerCamelCase )
# verify the logits
A_ : Dict = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape , _lowerCamelCase )
A_ : Tuple = torch.tensor([[-0.65_26, -0.52_63, -1.43_98]] ).to(_lowerCamelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , _lowerCamelCase , atol=1E-4 ) )
@require_torch
class UpperCamelCase_ (a__, unittest.TestCase ):
"""simple docstring"""
_lowerCAmelCase = (BitBackbone,) if is_torch_available() else ()
_lowerCAmelCase = BitConfig
_lowerCAmelCase = False
def _a ( self : List[str] ):
"""simple docstring"""
A_ : Union[str, Any] = BitModelTester(self )
| 4 | 1 |
'''simple docstring'''
import random
import unittest
from torch.utils.data import BatchSampler, DataLoader, IterableDataset
from accelerate import Accelerator
from accelerate.data_loader import (
BatchSamplerShard,
DataLoaderDispatcher,
DataLoaderShard,
IterableDatasetShard,
SkipBatchSampler,
SkipDataLoader,
skip_first_batches,
)
class UpperCamelCase_ (a__ ):
"""simple docstring"""
def __init__( self : Union[str, Any] , _lowerCamelCase : Any=0.01 , _lowerCamelCase : List[str]=1000 ):
"""simple docstring"""
A_ : Optional[Any] = p_stop
A_ : Dict = max_length
def __iter__( self : Optional[Any] ):
"""simple docstring"""
A_ : Optional[int] = 0
A_ : Optional[int] = False
while not stop and count < self.max_length:
yield count
count += 1
A_ : Tuple = random.random() < self.p_stop
class UpperCamelCase_ (unittest.TestCase ):
"""simple docstring"""
def _a ( self : List[str] , _lowerCamelCase : int , _lowerCamelCase : int , _lowerCamelCase : str=False , _lowerCamelCase : List[Any]=True ):
"""simple docstring"""
A_ : Dict = [
BatchSamplerShard(_lowerCamelCase , 2 , _lowerCamelCase , split_batches=_lowerCamelCase , even_batches=_lowerCamelCase )
for i in range(2 )
]
A_ : int = [list(_lowerCamelCase ) for batch_sampler_shard in batch_sampler_shards]
if not split_batches:
self.assertListEqual([len(_lowerCamelCase ) for shard in batch_sampler_shards] , [len(_lowerCamelCase ) for e in expected] )
self.assertListEqual(_lowerCamelCase , _lowerCamelCase )
def _a ( self : List[str] ):
"""simple docstring"""
A_ : Tuple = BatchSampler(range(24 ) , batch_size=3 , drop_last=_lowerCamelCase )
A_ : Optional[int] = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [21, 22, 23]],
]
self.check_batch_sampler_shards(_lowerCamelCase , _lowerCamelCase )
A_ : List[Any] = BatchSampler(range(24 ) , batch_size=3 , drop_last=_lowerCamelCase )
# Expected shouldn't change
self.check_batch_sampler_shards(_lowerCamelCase , _lowerCamelCase )
# Check the shards when the dataset is a round multiple of batch size but not total batch size.
A_ : Optional[int] = BatchSampler(range(21 ) , batch_size=3 , drop_last=_lowerCamelCase )
A_ : Any = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [0, 1, 2]],
]
self.check_batch_sampler_shards(_lowerCamelCase , _lowerCamelCase )
A_ : str = BatchSampler(range(21 ) , batch_size=3 , drop_last=_lowerCamelCase )
A_ : Tuple = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(_lowerCamelCase , _lowerCamelCase )
# Check the shards when the dataset is not a round multiple of batch size but has a multiple of
# num_processes batch.
A_ : Dict = BatchSampler(range(22 ) , batch_size=3 , drop_last=_lowerCamelCase )
A_ : Dict = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [21, 0, 1]],
]
self.check_batch_sampler_shards(_lowerCamelCase , _lowerCamelCase )
A_ : Union[str, Any] = BatchSampler(range(22 ) , batch_size=3 , drop_last=_lowerCamelCase )
A_ : List[Any] = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(_lowerCamelCase , _lowerCamelCase )
# Check the shards when the dataset is not a round multiple of batch size but and has not a multiple of
# num_processes batch.
A_ : Any = BatchSampler(range(20 ) , batch_size=3 , drop_last=_lowerCamelCase )
A_ : Dict = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 0]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [1, 2, 3]],
]
self.check_batch_sampler_shards(_lowerCamelCase , _lowerCamelCase )
A_ : List[Any] = BatchSampler(range(20 ) , batch_size=3 , drop_last=_lowerCamelCase )
A_ : List[str] = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(_lowerCamelCase , _lowerCamelCase )
# Check the shards when the dataset is very small.
A_ : str = BatchSampler(range(2 ) , batch_size=3 , drop_last=_lowerCamelCase )
A_ : Union[str, Any] = [[[0, 1, 0]], [[1, 0, 1]]]
self.check_batch_sampler_shards(_lowerCamelCase , _lowerCamelCase )
A_ : int = BatchSampler(range(2 ) , batch_size=3 , drop_last=_lowerCamelCase )
A_ : Union[str, Any] = [[], []]
self.check_batch_sampler_shards(_lowerCamelCase , _lowerCamelCase )
def _a ( self : str ):
"""simple docstring"""
A_ : Dict = BatchSampler(range(24 ) , batch_size=4 , drop_last=_lowerCamelCase )
A_ : List[Any] = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 21]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19], [22, 23]],
]
self.check_batch_sampler_shards(_lowerCamelCase , _lowerCamelCase , split_batches=_lowerCamelCase )
A_ : List[str] = BatchSampler(range(24 ) , batch_size=4 , drop_last=_lowerCamelCase )
# Expected shouldn't change
self.check_batch_sampler_shards(_lowerCamelCase , _lowerCamelCase , split_batches=_lowerCamelCase )
# Check the shards when the dataset is not a round multiple of batch size.
A_ : List[Any] = BatchSampler(range(22 ) , batch_size=4 , drop_last=_lowerCamelCase )
A_ : Any = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 21]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19], [0, 1]],
]
self.check_batch_sampler_shards(_lowerCamelCase , _lowerCamelCase , split_batches=_lowerCamelCase )
A_ : Any = BatchSampler(range(22 ) , batch_size=4 , drop_last=_lowerCamelCase )
A_ : List[str] = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(_lowerCamelCase , _lowerCamelCase , split_batches=_lowerCamelCase )
# Check the shards when the dataset is not a round multiple of batch size or num_processes.
A_ : Dict = BatchSampler(range(21 ) , batch_size=4 , drop_last=_lowerCamelCase )
A_ : Tuple = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 0]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19], [1, 2]],
]
self.check_batch_sampler_shards(_lowerCamelCase , _lowerCamelCase , split_batches=_lowerCamelCase )
A_ : Optional[Any] = BatchSampler(range(21 ) , batch_size=4 , drop_last=_lowerCamelCase )
A_ : Optional[int] = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(_lowerCamelCase , _lowerCamelCase , split_batches=_lowerCamelCase )
# Check the shards when the dataset is very small.
A_ : Optional[Any] = BatchSampler(range(2 ) , batch_size=4 , drop_last=_lowerCamelCase )
A_ : Tuple = [[[0, 1]], [[0, 1]]]
self.check_batch_sampler_shards(_lowerCamelCase , _lowerCamelCase , split_batches=_lowerCamelCase )
A_ : str = BatchSampler(range(2 ) , batch_size=4 , drop_last=_lowerCamelCase )
A_ : Any = [[], []]
self.check_batch_sampler_shards(_lowerCamelCase , _lowerCamelCase , split_batches=_lowerCamelCase )
def _a ( self : Optional[int] ):
"""simple docstring"""
A_ : Any = BatchSampler(range(24 ) , batch_size=3 , drop_last=_lowerCamelCase )
A_ : Optional[Any] = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [21, 22, 23]],
]
self.check_batch_sampler_shards(_lowerCamelCase , _lowerCamelCase , even_batches=_lowerCamelCase )
A_ : Tuple = BatchSampler(range(24 ) , batch_size=3 , drop_last=_lowerCamelCase )
# Expected shouldn't change
self.check_batch_sampler_shards(_lowerCamelCase , _lowerCamelCase , even_batches=_lowerCamelCase )
# Check the shards when the dataset is a round multiple of batch size but not total batch size.
A_ : Tuple = BatchSampler(range(21 ) , batch_size=3 , drop_last=_lowerCamelCase )
A_ : List[str] = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(_lowerCamelCase , _lowerCamelCase , even_batches=_lowerCamelCase )
A_ : List[str] = BatchSampler(range(21 ) , batch_size=3 , drop_last=_lowerCamelCase )
A_ : Dict = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(_lowerCamelCase , _lowerCamelCase , even_batches=_lowerCamelCase )
# Check the shards when the dataset is not a round multiple of batch size but has a multiple of
# num_processes batch.
A_ : Optional[int] = BatchSampler(range(22 ) , batch_size=3 , drop_last=_lowerCamelCase )
A_ : Optional[Any] = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [21]],
]
self.check_batch_sampler_shards(_lowerCamelCase , _lowerCamelCase , even_batches=_lowerCamelCase )
A_ : List[str] = BatchSampler(range(22 ) , batch_size=3 , drop_last=_lowerCamelCase )
A_ : Any = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(_lowerCamelCase , _lowerCamelCase , even_batches=_lowerCamelCase )
# Check the shards when the dataset is not a round multiple of batch size but and has not a multiple of
# num_processes batch.
A_ : Tuple = BatchSampler(range(20 ) , batch_size=3 , drop_last=_lowerCamelCase )
A_ : str = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(_lowerCamelCase , _lowerCamelCase , even_batches=_lowerCamelCase )
A_ : Optional[int] = BatchSampler(range(20 ) , batch_size=3 , drop_last=_lowerCamelCase )
A_ : Dict = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(_lowerCamelCase , _lowerCamelCase , even_batches=_lowerCamelCase )
# Check the shards when the dataset is very small.
A_ : str = BatchSampler(range(2 ) , batch_size=3 , drop_last=_lowerCamelCase )
A_ : Optional[Any] = [[[0, 1]], []]
self.check_batch_sampler_shards(_lowerCamelCase , _lowerCamelCase , even_batches=_lowerCamelCase )
A_ : List[str] = BatchSampler(range(2 ) , batch_size=3 , drop_last=_lowerCamelCase )
A_ : int = [[], []]
self.check_batch_sampler_shards(_lowerCamelCase , _lowerCamelCase , even_batches=_lowerCamelCase )
def _a ( self : Dict ):
"""simple docstring"""
A_ : List[str] = BatchSampler(range(24 ) , batch_size=4 , drop_last=_lowerCamelCase )
A_ : Optional[int] = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 21]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19], [22, 23]],
]
self.check_batch_sampler_shards(_lowerCamelCase , _lowerCamelCase , split_batches=_lowerCamelCase , even_batches=_lowerCamelCase )
A_ : Union[str, Any] = BatchSampler(range(24 ) , batch_size=4 , drop_last=_lowerCamelCase )
# Expected shouldn't change
self.check_batch_sampler_shards(_lowerCamelCase , _lowerCamelCase , split_batches=_lowerCamelCase , even_batches=_lowerCamelCase )
# Check the shards when the dataset is not a round multiple of batch size.
A_ : Optional[int] = BatchSampler(range(22 ) , batch_size=4 , drop_last=_lowerCamelCase )
A_ : int = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 21]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(_lowerCamelCase , _lowerCamelCase , split_batches=_lowerCamelCase , even_batches=_lowerCamelCase )
A_ : int = BatchSampler(range(22 ) , batch_size=4 , drop_last=_lowerCamelCase )
A_ : List[str] = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(_lowerCamelCase , _lowerCamelCase , split_batches=_lowerCamelCase , even_batches=_lowerCamelCase )
# Check the shards when the dataset is not a round multiple of batch size or num_processes.
A_ : Optional[Any] = BatchSampler(range(21 ) , batch_size=4 , drop_last=_lowerCamelCase )
A_ : List[str] = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(_lowerCamelCase , _lowerCamelCase , split_batches=_lowerCamelCase , even_batches=_lowerCamelCase )
A_ : List[str] = BatchSampler(range(21 ) , batch_size=4 , drop_last=_lowerCamelCase )
A_ : Optional[int] = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(_lowerCamelCase , _lowerCamelCase , split_batches=_lowerCamelCase , even_batches=_lowerCamelCase )
# Check the shards when the dataset is very small.
A_ : Union[str, Any] = BatchSampler(range(2 ) , batch_size=4 , drop_last=_lowerCamelCase )
A_ : str = [[[0, 1]], []]
self.check_batch_sampler_shards(_lowerCamelCase , _lowerCamelCase , split_batches=_lowerCamelCase , even_batches=_lowerCamelCase )
A_ : Any = BatchSampler(range(2 ) , batch_size=4 , drop_last=_lowerCamelCase )
A_ : Union[str, Any] = [[], []]
self.check_batch_sampler_shards(_lowerCamelCase , _lowerCamelCase , split_batches=_lowerCamelCase , even_batches=_lowerCamelCase )
def _a ( self : Tuple ):
"""simple docstring"""
A_ : Tuple = [[0, 1, 2], [3, 4], [5, 6, 7, 8], [9, 10, 11], [12, 13]]
A_ : Union[str, Any] = [BatchSamplerShard(_lowerCamelCase , 2 , _lowerCamelCase , even_batches=_lowerCamelCase ) for i in range(2 )]
self.assertEqual(len(batch_sampler_shards[0] ) , 3 )
self.assertEqual(len(batch_sampler_shards[1] ) , 2 )
self.assertListEqual(list(batch_sampler_shards[0] ) , [[0, 1, 2], [5, 6, 7, 8], [12, 13]] )
self.assertListEqual(list(batch_sampler_shards[1] ) , [[3, 4], [9, 10, 11]] )
def _a ( self : List[str] , _lowerCamelCase : Optional[int] , _lowerCamelCase : Any , _lowerCamelCase : List[str] , _lowerCamelCase : Any=False , _lowerCamelCase : int=2 , _lowerCamelCase : Union[str, Any]=False ):
"""simple docstring"""
random.seed(_lowerCamelCase )
A_ : Dict = list(_lowerCamelCase )
A_ : Dict = [
IterableDatasetShard(
_lowerCamelCase , batch_size=_lowerCamelCase , drop_last=_lowerCamelCase , num_processes=_lowerCamelCase , process_index=_lowerCamelCase , split_batches=_lowerCamelCase , )
for i in range(_lowerCamelCase )
]
A_ : List[Any] = []
for iterable_dataset_shard in iterable_dataset_shards:
# Since our random iterable dataset will be... random... we need to use a seed to get reproducible results.
random.seed(_lowerCamelCase )
iterable_dataset_lists.append(list(_lowerCamelCase ) )
A_ : Any = batch_size // num_processes if split_batches else batch_size
# All iterable dataset shard should have the same length, a round multiple of shard_batch_size
A_ : int = iterable_dataset_lists[0]
for l in iterable_dataset_lists[1:]:
self.assertEqual(len(_lowerCamelCase ) , len(_lowerCamelCase ) )
self.assertTrue(len(_lowerCamelCase ) % shard_batch_size == 0 )
A_ : Union[str, Any] = []
for idx in range(0 , len(_lowerCamelCase ) , _lowerCamelCase ):
for l in iterable_dataset_lists:
observed += l[idx : idx + shard_batch_size]
if not drop_last:
while len(_lowerCamelCase ) < len(_lowerCamelCase ):
reference += reference
self.assertListEqual(_lowerCamelCase , reference[: len(_lowerCamelCase )] )
def _a ( self : Optional[int] ):
"""simple docstring"""
A_ : Dict = 42
A_ : Optional[int] = RandomIterableDataset()
self.check_iterable_dataset_shards(_lowerCamelCase , _lowerCamelCase , batch_size=4 , drop_last=_lowerCamelCase , split_batches=_lowerCamelCase )
self.check_iterable_dataset_shards(_lowerCamelCase , _lowerCamelCase , batch_size=4 , drop_last=_lowerCamelCase , split_batches=_lowerCamelCase )
self.check_iterable_dataset_shards(_lowerCamelCase , _lowerCamelCase , batch_size=4 , drop_last=_lowerCamelCase , split_batches=_lowerCamelCase )
self.check_iterable_dataset_shards(_lowerCamelCase , _lowerCamelCase , batch_size=4 , drop_last=_lowerCamelCase , split_batches=_lowerCamelCase )
# Edge case with a very small dataset
A_ : List[Any] = RandomIterableDataset(max_length=2 )
self.check_iterable_dataset_shards(_lowerCamelCase , _lowerCamelCase , batch_size=4 , drop_last=_lowerCamelCase , split_batches=_lowerCamelCase )
self.check_iterable_dataset_shards(_lowerCamelCase , _lowerCamelCase , batch_size=4 , drop_last=_lowerCamelCase , split_batches=_lowerCamelCase )
self.check_iterable_dataset_shards(_lowerCamelCase , _lowerCamelCase , batch_size=4 , drop_last=_lowerCamelCase , split_batches=_lowerCamelCase )
self.check_iterable_dataset_shards(_lowerCamelCase , _lowerCamelCase , batch_size=4 , drop_last=_lowerCamelCase , split_batches=_lowerCamelCase )
def _a ( self : List[str] ):
"""simple docstring"""
A_ : Union[str, Any] = BatchSampler(range(16 ) , batch_size=4 , drop_last=_lowerCamelCase )
A_ : int = SkipBatchSampler(_lowerCamelCase , 2 )
self.assertListEqual(list(_lowerCamelCase ) , [[8, 9, 10, 11], [12, 13, 14, 15]] )
def _a ( self : List[Any] ):
"""simple docstring"""
A_ : str = SkipDataLoader(list(range(16 ) ) , batch_size=4 , skip_batches=2 )
self.assertListEqual([t.tolist() for t in dataloader] , [[8, 9, 10, 11], [12, 13, 14, 15]] )
def _a ( self : List[Any] ):
"""simple docstring"""
A_ : int = DataLoader(list(range(16 ) ) , batch_size=4 )
A_ : Tuple = skip_first_batches(_lowerCamelCase , num_batches=2 )
self.assertListEqual([t.tolist() for t in new_dataloader] , [[8, 9, 10, 11], [12, 13, 14, 15]] )
def _a ( self : Optional[int] ):
"""simple docstring"""
A_ : Optional[Any] = DataLoaderShard(list(range(16 ) ) , batch_size=4 )
for idx, _ in enumerate(_lowerCamelCase ):
self.assertEqual(dataloader.end_of_dataloader , idx == 3 )
# Test it also works on the second iteration
for idx, _ in enumerate(_lowerCamelCase ):
self.assertEqual(dataloader.end_of_dataloader , idx == 3 )
def _a ( self : Tuple ):
"""simple docstring"""
Accelerator()
A_ : int = DataLoaderDispatcher(range(16 ) , batch_size=4 )
for idx, _ in enumerate(_lowerCamelCase ):
self.assertEqual(dataloader.end_of_dataloader , idx == 3 )
# Test it also works on the second iteration
for idx, _ in enumerate(_lowerCamelCase ):
self.assertEqual(dataloader.end_of_dataloader , idx == 3 )
| 4 |
'''simple docstring'''
import pprint
import requests
snake_case__ = """https://zenquotes.io/api"""
def snake_case__ ( ) -> list:
return requests.get(API_ENDPOINT_URL + '''/today''' ).json()
def snake_case__ ( ) -> list:
return requests.get(API_ENDPOINT_URL + '''/random''' ).json()
if __name__ == "__main__":
snake_case__ = random_quotes()
pprint.pprint(response)
| 4 | 1 |
'''simple docstring'''
import argparse
import json
import os
import sys
import tempfile
import unittest
from argparse import Namespace
from dataclasses import dataclass, field
from enum import Enum
from pathlib import Path
from typing import List, Literal, Optional
import yaml
from transformers import HfArgumentParser, TrainingArguments
from transformers.hf_argparser import make_choice_type_function, string_to_bool
# Since Python 3.10, we can use the builtin `|` operator for Union types
# See PEP 604: https://peps.python.org/pep-0604
snake_case__ = sys.version_info >= (3, 10)
def snake_case__ ( lowerCamelCase__ : Union[str, Any]=None , lowerCamelCase__ : str=None ) -> List[Any]:
return field(default_factory=lambda: default , metadata=lowerCamelCase__ )
@dataclass
class UpperCamelCase_ :
"""simple docstring"""
_lowerCAmelCase = 42
_lowerCAmelCase = 42
_lowerCAmelCase = 42
_lowerCAmelCase = 42
@dataclass
class UpperCamelCase_ :
"""simple docstring"""
_lowerCAmelCase = 4_2
_lowerCAmelCase = field(default='toto', metadata={'help': 'help message'} )
@dataclass
class UpperCamelCase_ :
"""simple docstring"""
_lowerCAmelCase = False
_lowerCAmelCase = True
_lowerCAmelCase = None
class UpperCamelCase_ (a__ ):
"""simple docstring"""
_lowerCAmelCase = 'titi'
_lowerCAmelCase = 'toto'
class UpperCamelCase_ (a__ ):
"""simple docstring"""
_lowerCAmelCase = 'titi'
_lowerCAmelCase = 'toto'
_lowerCAmelCase = 4_2
@dataclass
class UpperCamelCase_ :
"""simple docstring"""
_lowerCAmelCase = "toto"
def _a ( self : Optional[Any] ):
"""simple docstring"""
A_ : Optional[int] = BasicEnum(self.foo )
@dataclass
class UpperCamelCase_ :
"""simple docstring"""
_lowerCAmelCase = "toto"
def _a ( self : Tuple ):
"""simple docstring"""
A_ : Optional[Any] = MixedTypeEnum(self.foo )
@dataclass
class UpperCamelCase_ :
"""simple docstring"""
_lowerCAmelCase = None
_lowerCAmelCase = field(default=a__, metadata={'help': 'help message'} )
_lowerCAmelCase = None
_lowerCAmelCase = list_field(default=[] )
_lowerCAmelCase = list_field(default=[] )
@dataclass
class UpperCamelCase_ :
"""simple docstring"""
_lowerCAmelCase = list_field(default=[] )
_lowerCAmelCase = list_field(default=[1, 2, 3] )
_lowerCAmelCase = list_field(default=['Hallo', 'Bonjour', 'Hello'] )
_lowerCAmelCase = list_field(default=[0.1, 0.2, 0.3] )
@dataclass
class UpperCamelCase_ :
"""simple docstring"""
_lowerCAmelCase = field()
_lowerCAmelCase = field()
_lowerCAmelCase = field()
def _a ( self : Tuple ):
"""simple docstring"""
A_ : Tuple = BasicEnum(self.required_enum )
@dataclass
class UpperCamelCase_ :
"""simple docstring"""
_lowerCAmelCase = 42
_lowerCAmelCase = field()
_lowerCAmelCase = None
_lowerCAmelCase = field(default='toto', metadata={'help': 'help message'} )
_lowerCAmelCase = list_field(default=['Hallo', 'Bonjour', 'Hello'] )
if is_python_no_less_than_3_10:
@dataclass
class UpperCamelCase_ :
"""simple docstring"""
_lowerCAmelCase = False
_lowerCAmelCase = True
_lowerCAmelCase = None
@dataclass
class UpperCamelCase_ :
"""simple docstring"""
_lowerCAmelCase = None
_lowerCAmelCase = field(default=a__, metadata={'help': 'help message'} )
_lowerCAmelCase = None
_lowerCAmelCase = list_field(default=[] )
_lowerCAmelCase = list_field(default=[] )
class UpperCamelCase_ (unittest.TestCase ):
"""simple docstring"""
def _a ( self : List[str] , _lowerCamelCase : argparse.ArgumentParser , _lowerCamelCase : argparse.ArgumentParser ):
"""simple docstring"""
self.assertEqual(len(a._actions ) , len(b._actions ) )
for x, y in zip(a._actions , b._actions ):
A_ : Union[str, Any] = {k: v for k, v in vars(_lowerCamelCase ).items() if k != '''container'''}
A_ : Optional[Any] = {k: v for k, v in vars(_lowerCamelCase ).items() if k != '''container'''}
# Choices with mixed type have custom function as "type"
# So we need to compare results directly for equality
if xx.get('''choices''' , _lowerCamelCase ) and yy.get('''choices''' , _lowerCamelCase ):
for expected_choice in yy["choices"] + xx["choices"]:
self.assertEqual(xx['''type'''](_lowerCamelCase ) , yy['''type'''](_lowerCamelCase ) )
del xx["type"], yy["type"]
self.assertEqual(_lowerCamelCase , _lowerCamelCase )
def _a ( self : Optional[int] ):
"""simple docstring"""
A_ : Union[str, Any] = HfArgumentParser(_lowerCamelCase )
A_ : Optional[Any] = argparse.ArgumentParser()
expected.add_argument('''--foo''' , type=_lowerCamelCase , required=_lowerCamelCase )
expected.add_argument('''--bar''' , type=_lowerCamelCase , required=_lowerCamelCase )
expected.add_argument('''--baz''' , type=_lowerCamelCase , required=_lowerCamelCase )
expected.add_argument('''--flag''' , type=_lowerCamelCase , default=_lowerCamelCase , const=_lowerCamelCase , nargs='''?''' )
self.argparsersEqual(_lowerCamelCase , _lowerCamelCase )
A_ : Union[str, Any] = ['''--foo''', '''1''', '''--baz''', '''quux''', '''--bar''', '''0.5''']
((A_) ,) : List[str] = parser.parse_args_into_dataclasses(_lowerCamelCase , look_for_args_file=_lowerCamelCase )
self.assertFalse(example.flag )
def _a ( self : Dict ):
"""simple docstring"""
A_ : int = HfArgumentParser(_lowerCamelCase )
A_ : int = argparse.ArgumentParser()
expected.add_argument('''--foo''' , default=42 , type=_lowerCamelCase )
expected.add_argument('''--baz''' , default='''toto''' , type=_lowerCamelCase , help='''help message''' )
self.argparsersEqual(_lowerCamelCase , _lowerCamelCase )
def _a ( self : Dict ):
"""simple docstring"""
A_ : Any = argparse.ArgumentParser()
expected.add_argument('''--foo''' , type=_lowerCamelCase , default=_lowerCamelCase , const=_lowerCamelCase , nargs='''?''' )
expected.add_argument('''--baz''' , type=_lowerCamelCase , default=_lowerCamelCase , const=_lowerCamelCase , nargs='''?''' )
# A boolean no_* argument always has to come after its "default: True" regular counter-part
# and its default must be set to False
expected.add_argument('''--no_baz''' , action='''store_false''' , default=_lowerCamelCase , dest='''baz''' )
expected.add_argument('''--opt''' , type=_lowerCamelCase , default=_lowerCamelCase )
A_ : Dict = [WithDefaultBoolExample]
if is_python_no_less_than_3_10:
dataclass_types.append(_lowerCamelCase )
for dataclass_type in dataclass_types:
A_ : Any = HfArgumentParser(_lowerCamelCase )
self.argparsersEqual(_lowerCamelCase , _lowerCamelCase )
A_ : List[Any] = parser.parse_args([] )
self.assertEqual(_lowerCamelCase , Namespace(foo=_lowerCamelCase , baz=_lowerCamelCase , opt=_lowerCamelCase ) )
A_ : Optional[int] = parser.parse_args(['''--foo''', '''--no_baz'''] )
self.assertEqual(_lowerCamelCase , Namespace(foo=_lowerCamelCase , baz=_lowerCamelCase , opt=_lowerCamelCase ) )
A_ : Union[str, Any] = parser.parse_args(['''--foo''', '''--baz'''] )
self.assertEqual(_lowerCamelCase , Namespace(foo=_lowerCamelCase , baz=_lowerCamelCase , opt=_lowerCamelCase ) )
A_ : List[str] = parser.parse_args(['''--foo''', '''True''', '''--baz''', '''True''', '''--opt''', '''True'''] )
self.assertEqual(_lowerCamelCase , Namespace(foo=_lowerCamelCase , baz=_lowerCamelCase , opt=_lowerCamelCase ) )
A_ : List[Any] = parser.parse_args(['''--foo''', '''False''', '''--baz''', '''False''', '''--opt''', '''False'''] )
self.assertEqual(_lowerCamelCase , Namespace(foo=_lowerCamelCase , baz=_lowerCamelCase , opt=_lowerCamelCase ) )
def _a ( self : List[Any] ):
"""simple docstring"""
A_ : str = HfArgumentParser(_lowerCamelCase )
A_ : Optional[int] = argparse.ArgumentParser()
expected.add_argument(
'''--foo''' , default='''toto''' , choices=['''titi''', '''toto''', 42] , type=make_choice_type_function(['''titi''', '''toto''', 42] ) , )
self.argparsersEqual(_lowerCamelCase , _lowerCamelCase )
A_ : str = parser.parse_args([] )
self.assertEqual(args.foo , '''toto''' )
A_ : List[Any] = parser.parse_args_into_dataclasses([] )[0]
self.assertEqual(enum_ex.foo , MixedTypeEnum.toto )
A_ : int = parser.parse_args(['''--foo''', '''titi'''] )
self.assertEqual(args.foo , '''titi''' )
A_ : Dict = parser.parse_args_into_dataclasses(['''--foo''', '''titi'''] )[0]
self.assertEqual(enum_ex.foo , MixedTypeEnum.titi )
A_ : Tuple = parser.parse_args(['''--foo''', '''42'''] )
self.assertEqual(args.foo , 42 )
A_ : List[str] = parser.parse_args_into_dataclasses(['''--foo''', '''42'''] )[0]
self.assertEqual(enum_ex.foo , MixedTypeEnum.fourtytwo )
def _a ( self : Optional[int] ):
"""simple docstring"""
@dataclass
class UpperCamelCase_ :
"""simple docstring"""
_lowerCAmelCase = "toto"
A_ : List[str] = HfArgumentParser(_lowerCamelCase )
A_ : Tuple = argparse.ArgumentParser()
expected.add_argument(
'''--foo''' , default='''toto''' , choices=('''titi''', '''toto''', 42) , type=make_choice_type_function(['''titi''', '''toto''', 42] ) , )
self.argparsersEqual(_lowerCamelCase , _lowerCamelCase )
A_ : Tuple = parser.parse_args([] )
self.assertEqual(args.foo , '''toto''' )
A_ : List[str] = parser.parse_args(['''--foo''', '''titi'''] )
self.assertEqual(args.foo , '''titi''' )
A_ : int = parser.parse_args(['''--foo''', '''42'''] )
self.assertEqual(args.foo , 42 )
def _a ( self : Dict ):
"""simple docstring"""
A_ : int = HfArgumentParser(_lowerCamelCase )
A_ : List[Any] = argparse.ArgumentParser()
expected.add_argument('''--foo_int''' , nargs='''+''' , default=[] , type=_lowerCamelCase )
expected.add_argument('''--bar_int''' , nargs='''+''' , default=[1, 2, 3] , type=_lowerCamelCase )
expected.add_argument('''--foo_str''' , nargs='''+''' , default=['''Hallo''', '''Bonjour''', '''Hello'''] , type=_lowerCamelCase )
expected.add_argument('''--foo_float''' , nargs='''+''' , default=[0.1, 0.2, 0.3] , type=_lowerCamelCase )
self.argparsersEqual(_lowerCamelCase , _lowerCamelCase )
A_ : Optional[int] = parser.parse_args([] )
self.assertEqual(
_lowerCamelCase , Namespace(foo_int=[] , bar_int=[1, 2, 3] , foo_str=['''Hallo''', '''Bonjour''', '''Hello'''] , foo_float=[0.1, 0.2, 0.3] ) , )
A_ : str = parser.parse_args('''--foo_int 1 --bar_int 2 3 --foo_str a b c --foo_float 0.1 0.7'''.split() )
self.assertEqual(_lowerCamelCase , Namespace(foo_int=[1] , bar_int=[2, 3] , foo_str=['''a''', '''b''', '''c'''] , foo_float=[0.1, 0.7] ) )
def _a ( self : Dict ):
"""simple docstring"""
A_ : Optional[Any] = argparse.ArgumentParser()
expected.add_argument('''--foo''' , default=_lowerCamelCase , type=_lowerCamelCase )
expected.add_argument('''--bar''' , default=_lowerCamelCase , type=_lowerCamelCase , help='''help message''' )
expected.add_argument('''--baz''' , default=_lowerCamelCase , type=_lowerCamelCase )
expected.add_argument('''--ces''' , nargs='''+''' , default=[] , type=_lowerCamelCase )
expected.add_argument('''--des''' , nargs='''+''' , default=[] , type=_lowerCamelCase )
A_ : Tuple = [OptionalExample]
if is_python_no_less_than_3_10:
dataclass_types.append(_lowerCamelCase )
for dataclass_type in dataclass_types:
A_ : int = HfArgumentParser(_lowerCamelCase )
self.argparsersEqual(_lowerCamelCase , _lowerCamelCase )
A_ : List[Any] = parser.parse_args([] )
self.assertEqual(_lowerCamelCase , Namespace(foo=_lowerCamelCase , bar=_lowerCamelCase , baz=_lowerCamelCase , ces=[] , des=[] ) )
A_ : Optional[Any] = parser.parse_args('''--foo 12 --bar 3.14 --baz 42 --ces a b c --des 1 2 3'''.split() )
self.assertEqual(_lowerCamelCase , Namespace(foo=12 , bar=3.14 , baz='''42''' , ces=['''a''', '''b''', '''c'''] , des=[1, 2, 3] ) )
def _a ( self : List[Any] ):
"""simple docstring"""
A_ : List[Any] = HfArgumentParser(_lowerCamelCase )
A_ : Dict = argparse.ArgumentParser()
expected.add_argument('''--required_list''' , nargs='''+''' , type=_lowerCamelCase , required=_lowerCamelCase )
expected.add_argument('''--required_str''' , type=_lowerCamelCase , required=_lowerCamelCase )
expected.add_argument(
'''--required_enum''' , type=make_choice_type_function(['''titi''', '''toto'''] ) , choices=['''titi''', '''toto'''] , required=_lowerCamelCase , )
self.argparsersEqual(_lowerCamelCase , _lowerCamelCase )
def _a ( self : Optional[Any] ):
"""simple docstring"""
A_ : Union[str, Any] = HfArgumentParser(_lowerCamelCase )
A_ : List[Any] = argparse.ArgumentParser()
expected.add_argument('''--foo''' , type=_lowerCamelCase , required=_lowerCamelCase )
expected.add_argument(
'''--required_enum''' , type=make_choice_type_function(['''titi''', '''toto'''] ) , choices=['''titi''', '''toto'''] , required=_lowerCamelCase , )
expected.add_argument('''--opt''' , type=_lowerCamelCase , default=_lowerCamelCase )
expected.add_argument('''--baz''' , default='''toto''' , type=_lowerCamelCase , help='''help message''' )
expected.add_argument('''--foo_str''' , nargs='''+''' , default=['''Hallo''', '''Bonjour''', '''Hello'''] , type=_lowerCamelCase )
self.argparsersEqual(_lowerCamelCase , _lowerCamelCase )
def _a ( self : Tuple ):
"""simple docstring"""
A_ : List[Any] = HfArgumentParser(_lowerCamelCase )
A_ : Union[str, Any] = {
'''foo''': 12,
'''bar''': 3.14,
'''baz''': '''42''',
'''flag''': True,
}
A_ : Optional[int] = parser.parse_dict(_lowerCamelCase )[0]
A_ : str = BasicExample(**_lowerCamelCase )
self.assertEqual(_lowerCamelCase , _lowerCamelCase )
def _a ( self : List[str] ):
"""simple docstring"""
A_ : Any = HfArgumentParser(_lowerCamelCase )
A_ : List[str] = {
'''foo''': 12,
'''bar''': 3.14,
'''baz''': '''42''',
'''flag''': True,
'''extra''': 42,
}
self.assertRaises(_lowerCamelCase , parser.parse_dict , _lowerCamelCase , allow_extra_keys=_lowerCamelCase )
def _a ( self : Optional[Any] ):
"""simple docstring"""
A_ : Union[str, Any] = HfArgumentParser(_lowerCamelCase )
A_ : List[str] = {
'''foo''': 12,
'''bar''': 3.14,
'''baz''': '''42''',
'''flag''': True,
}
with tempfile.TemporaryDirectory() as tmp_dir:
A_ : Tuple = os.path.join(_lowerCamelCase , '''temp_json''' )
os.mkdir(_lowerCamelCase )
with open(temp_local_path + '''.json''' , '''w+''' ) as f:
json.dump(_lowerCamelCase , _lowerCamelCase )
A_ : List[str] = parser.parse_yaml_file(Path(temp_local_path + '''.json''' ) )[0]
A_ : Optional[Any] = BasicExample(**_lowerCamelCase )
self.assertEqual(_lowerCamelCase , _lowerCamelCase )
def _a ( self : int ):
"""simple docstring"""
A_ : int = HfArgumentParser(_lowerCamelCase )
A_ : Tuple = {
'''foo''': 12,
'''bar''': 3.14,
'''baz''': '''42''',
'''flag''': True,
}
with tempfile.TemporaryDirectory() as tmp_dir:
A_ : int = os.path.join(_lowerCamelCase , '''temp_yaml''' )
os.mkdir(_lowerCamelCase )
with open(temp_local_path + '''.yaml''' , '''w+''' ) as f:
yaml.dump(_lowerCamelCase , _lowerCamelCase )
A_ : Optional[Any] = parser.parse_yaml_file(Path(temp_local_path + '''.yaml''' ) )[0]
A_ : int = BasicExample(**_lowerCamelCase )
self.assertEqual(_lowerCamelCase , _lowerCamelCase )
def _a ( self : Union[str, Any] ):
"""simple docstring"""
A_ : Dict = HfArgumentParser(_lowerCamelCase )
self.assertIsNotNone(_lowerCamelCase )
| 4 |
'''simple docstring'''
from __future__ import annotations
class UpperCamelCase_ :
"""simple docstring"""
def __init__( self : Optional[int] , _lowerCamelCase : int ):
"""simple docstring"""
A_ : Union[str, Any] = order
# a_{0} ... a_{k}
A_ : Union[str, Any] = [1.0] + [0.0] * order
# b_{0} ... b_{k}
A_ : int = [1.0] + [0.0] * order
# x[n-1] ... x[n-k]
A_ : str = [0.0] * self.order
# y[n-1] ... y[n-k]
A_ : Optional[Any] = [0.0] * self.order
def _a ( self : Dict , _lowerCamelCase : list[float] , _lowerCamelCase : list[float] ):
"""simple docstring"""
if len(_lowerCamelCase ) < self.order:
A_ : Any = [1.0, *a_coeffs]
if len(_lowerCamelCase ) != self.order + 1:
A_ : List[Any] = (
f'Expected a_coeffs to have {self.order + 1} elements '
f'for {self.order}-order filter, got {len(_lowerCamelCase )}'
)
raise ValueError(_lowerCamelCase )
if len(_lowerCamelCase ) != self.order + 1:
A_ : Union[str, Any] = (
f'Expected b_coeffs to have {self.order + 1} elements '
f'for {self.order}-order filter, got {len(_lowerCamelCase )}'
)
raise ValueError(_lowerCamelCase )
A_ : Tuple = a_coeffs
A_ : str = b_coeffs
def _a ( self : Tuple , _lowerCamelCase : float ):
"""simple docstring"""
A_ : Any = 0.0
# Start at index 1 and do index 0 at the end.
for i in range(1 , self.order + 1 ):
result += (
self.b_coeffs[i] * self.input_history[i - 1]
- self.a_coeffs[i] * self.output_history[i - 1]
)
A_ : str = (result + self.b_coeffs[0] * sample) / self.a_coeffs[0]
A_ : Optional[Any] = self.input_history[:-1]
A_ : List[str] = self.output_history[:-1]
A_ : Tuple = sample
A_ : Tuple = result
return result
| 4 | 1 |
'''simple docstring'''
def snake_case__ ( ) -> int:
return 1
def snake_case__ ( lowerCamelCase__ : int ) -> int:
return 0 if x < 0 else two_pence(x - 2 ) + one_pence()
def snake_case__ ( lowerCamelCase__ : int ) -> int:
return 0 if x < 0 else five_pence(x - 5 ) + two_pence(lowerCamelCase__ )
def snake_case__ ( lowerCamelCase__ : int ) -> int:
return 0 if x < 0 else ten_pence(x - 1_0 ) + five_pence(lowerCamelCase__ )
def snake_case__ ( lowerCamelCase__ : int ) -> int:
return 0 if x < 0 else twenty_pence(x - 2_0 ) + ten_pence(lowerCamelCase__ )
def snake_case__ ( lowerCamelCase__ : int ) -> int:
return 0 if x < 0 else fifty_pence(x - 5_0 ) + twenty_pence(lowerCamelCase__ )
def snake_case__ ( lowerCamelCase__ : int ) -> int:
return 0 if x < 0 else one_pound(x - 1_0_0 ) + fifty_pence(lowerCamelCase__ )
def snake_case__ ( lowerCamelCase__ : int ) -> int:
return 0 if x < 0 else two_pound(x - 2_0_0 ) + one_pound(lowerCamelCase__ )
def snake_case__ ( lowerCamelCase__ : int = 2_0_0 ) -> int:
return two_pound(lowerCamelCase__ )
if __name__ == "__main__":
print(solution(int(input().strip())))
| 4 |
'''simple docstring'''
class UpperCamelCase_ :
"""simple docstring"""
def __init__( self : Optional[Any] , _lowerCamelCase : Union[str, Any] ):
"""simple docstring"""
A_ : Union[str, Any] = val
A_ : Tuple = None
A_ : Any = None
def _a ( self : Tuple , _lowerCamelCase : List[Any] ):
"""simple docstring"""
if self.val:
if val < self.val:
if self.left is None:
A_ : int = Node(_lowerCamelCase )
else:
self.left.insert(_lowerCamelCase )
elif val > self.val:
if self.right is None:
A_ : List[str] = Node(_lowerCamelCase )
else:
self.right.insert(_lowerCamelCase )
else:
A_ : Any = val
def snake_case__ ( lowerCamelCase__ : Any , lowerCamelCase__ : Optional[int] ) -> str:
# Recursive traversal
if root:
inorder(root.left , lowerCamelCase__ )
res.append(root.val )
inorder(root.right , lowerCamelCase__ )
def snake_case__ ( lowerCamelCase__ : Optional[int] ) -> Tuple:
# Build BST
if len(lowerCamelCase__ ) == 0:
return arr
A_ : Dict = Node(arr[0] )
for i in range(1 , len(lowerCamelCase__ ) ):
root.insert(arr[i] )
# Traverse BST in order.
A_ : Tuple = []
inorder(lowerCamelCase__ , lowerCamelCase__ )
return res
if __name__ == "__main__":
print(tree_sort([10, 1, 3, 2, 9, 14, 13]))
| 4 | 1 |
'''simple docstring'''
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto.configuration_auto import CONFIG_MAPPING
snake_case__ = logging.get_logger(__name__)
class UpperCamelCase_ (a__ ):
"""simple docstring"""
_lowerCAmelCase = 'upernet'
def __init__( self : int , _lowerCamelCase : str=None , _lowerCamelCase : Union[str, Any]=512 , _lowerCamelCase : Optional[int]=0.02 , _lowerCamelCase : str=[1, 2, 3, 6] , _lowerCamelCase : Any=True , _lowerCamelCase : Tuple=0.4 , _lowerCamelCase : List[str]=384 , _lowerCamelCase : Tuple=256 , _lowerCamelCase : Optional[Any]=1 , _lowerCamelCase : Tuple=False , _lowerCamelCase : int=255 , **_lowerCamelCase : Tuple , ):
"""simple docstring"""
super().__init__(**_lowerCamelCase )
if backbone_config is None:
logger.info('''`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.''' )
A_ : Optional[Any] = CONFIG_MAPPING['''resnet'''](out_features=['''stage1''', '''stage2''', '''stage3''', '''stage4'''] )
elif isinstance(_lowerCamelCase , _lowerCamelCase ):
A_ : Optional[Any] = backbone_config.get('''model_type''' )
A_ : List[Any] = CONFIG_MAPPING[backbone_model_type]
A_ : Tuple = config_class.from_dict(_lowerCamelCase )
A_ : int = backbone_config
A_ : str = hidden_size
A_ : List[str] = initializer_range
A_ : Union[str, Any] = pool_scales
A_ : List[Any] = use_auxiliary_head
A_ : Dict = auxiliary_loss_weight
A_ : Tuple = auxiliary_in_channels
A_ : List[str] = auxiliary_channels
A_ : str = auxiliary_num_convs
A_ : List[Any] = auxiliary_concat_input
A_ : Optional[int] = loss_ignore_index
def _a ( self : Any ):
"""simple docstring"""
A_ : List[str] = copy.deepcopy(self.__dict__ )
A_ : List[str] = self.backbone_config.to_dict()
A_ : Tuple = self.__class__.model_type
return output
| 4 |
'''simple docstring'''
def snake_case__ ( lowerCamelCase__ : list ) -> list:
if len(lowerCamelCase__ ) <= 1:
return [tuple(lowerCamelCase__ )]
A_ : List[str] = []
def generate(lowerCamelCase__ : int , lowerCamelCase__ : list ):
if k == 1:
res.append(tuple(arr[:] ) )
return
generate(k - 1 , lowerCamelCase__ )
for i in range(k - 1 ):
if k % 2 == 0: # k is even
A_ ,A_ : Optional[int] = arr[k - 1], arr[i]
else: # k is odd
A_ ,A_ : Union[str, Any] = arr[k - 1], arr[0]
generate(k - 1 , lowerCamelCase__ )
generate(len(lowerCamelCase__ ) , lowerCamelCase__ )
return res
if __name__ == "__main__":
snake_case__ = input("""Enter numbers separated by a comma:\n""").strip()
snake_case__ = [int(item) for item in user_input.split(""",""")]
print(heaps(arr))
| 4 | 1 |
'''simple docstring'''
def snake_case__ ( lowerCamelCase__ : list ) -> list:
if len(lowerCamelCase__ ) <= 1:
return [tuple(lowerCamelCase__ )]
A_ : List[str] = []
def generate(lowerCamelCase__ : int , lowerCamelCase__ : list ):
if k == 1:
res.append(tuple(arr[:] ) )
return
generate(k - 1 , lowerCamelCase__ )
for i in range(k - 1 ):
if k % 2 == 0: # k is even
A_ ,A_ : Optional[int] = arr[k - 1], arr[i]
else: # k is odd
A_ ,A_ : Union[str, Any] = arr[k - 1], arr[0]
generate(k - 1 , lowerCamelCase__ )
generate(len(lowerCamelCase__ ) , lowerCamelCase__ )
return res
if __name__ == "__main__":
snake_case__ = input("""Enter numbers separated by a comma:\n""").strip()
snake_case__ = [int(item) for item in user_input.split(""",""")]
print(heaps(arr))
| 4 |
'''simple docstring'''
import unittest
from queue import Empty
from threading import Thread
from transformers import AutoTokenizer, TextIteratorStreamer, TextStreamer, is_torch_available
from transformers.testing_utils import CaptureStdout, require_torch, torch_device
from ..test_modeling_common import ids_tensor
if is_torch_available():
import torch
from transformers import AutoModelForCausalLM
@require_torch
class UpperCamelCase_ (unittest.TestCase ):
"""simple docstring"""
def _a ( self : Dict ):
"""simple docstring"""
A_ : Optional[int] = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' )
A_ : Tuple = AutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ).to(_lowerCamelCase )
A_ : Dict = -1
A_ : List[str] = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(_lowerCamelCase )
A_ : Any = model.generate(_lowerCamelCase , max_new_tokens=10 , do_sample=_lowerCamelCase )
A_ : List[str] = tokenizer.decode(greedy_ids[0] )
with CaptureStdout() as cs:
A_ : List[str] = TextStreamer(_lowerCamelCase )
model.generate(_lowerCamelCase , max_new_tokens=10 , do_sample=_lowerCamelCase , streamer=_lowerCamelCase )
# The greedy text should be printed to stdout, except for the final "\n" in the streamer
A_ : Dict = cs.out[:-1]
self.assertEqual(_lowerCamelCase , _lowerCamelCase )
def _a ( self : Tuple ):
"""simple docstring"""
A_ : Optional[int] = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' )
A_ : List[str] = AutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ).to(_lowerCamelCase )
A_ : Dict = -1
A_ : List[str] = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(_lowerCamelCase )
A_ : Optional[int] = model.generate(_lowerCamelCase , max_new_tokens=10 , do_sample=_lowerCamelCase )
A_ : str = tokenizer.decode(greedy_ids[0] )
A_ : int = TextIteratorStreamer(_lowerCamelCase )
A_ : List[Any] = {'''input_ids''': input_ids, '''max_new_tokens''': 10, '''do_sample''': False, '''streamer''': streamer}
A_ : List[Any] = Thread(target=model.generate , kwargs=_lowerCamelCase )
thread.start()
A_ : List[Any] = ''''''
for new_text in streamer:
streamer_text += new_text
self.assertEqual(_lowerCamelCase , _lowerCamelCase )
def _a ( self : int ):
"""simple docstring"""
A_ : List[str] = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' )
A_ : List[Any] = AutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ).to(_lowerCamelCase )
A_ : List[str] = -1
A_ : Any = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(_lowerCamelCase )
A_ : Tuple = model.generate(_lowerCamelCase , max_new_tokens=10 , do_sample=_lowerCamelCase )
A_ : Tuple = greedy_ids[:, input_ids.shape[1] :]
A_ : Tuple = tokenizer.decode(new_greedy_ids[0] )
with CaptureStdout() as cs:
A_ : Any = TextStreamer(_lowerCamelCase , skip_prompt=_lowerCamelCase )
model.generate(_lowerCamelCase , max_new_tokens=10 , do_sample=_lowerCamelCase , streamer=_lowerCamelCase )
# The greedy text should be printed to stdout, except for the final "\n" in the streamer
A_ : Any = cs.out[:-1]
self.assertEqual(_lowerCamelCase , _lowerCamelCase )
def _a ( self : List[Any] ):
"""simple docstring"""
A_ : List[Any] = AutoTokenizer.from_pretrained('''distilgpt2''' )
A_ : Tuple = AutoModelForCausalLM.from_pretrained('''distilgpt2''' ).to(_lowerCamelCase )
A_ : List[Any] = -1
A_ : Union[str, Any] = torch.ones((1, 5) , device=_lowerCamelCase ).long() * model.config.bos_token_id
with CaptureStdout() as cs:
A_ : List[Any] = TextStreamer(_lowerCamelCase , skip_special_tokens=_lowerCamelCase )
model.generate(_lowerCamelCase , max_new_tokens=1 , do_sample=_lowerCamelCase , streamer=_lowerCamelCase )
# The prompt contains a special token, so the streamer should not print it. As such, the output text, when
# re-tokenized, must only contain one token
A_ : List[str] = cs.out[:-1] # Remove the final "\n"
A_ : List[Any] = tokenizer(_lowerCamelCase , return_tensors='''pt''' )
self.assertEqual(streamer_text_tokenized.input_ids.shape , (1, 1) )
def _a ( self : Union[str, Any] ):
"""simple docstring"""
A_ : str = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' )
A_ : str = AutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ).to(_lowerCamelCase )
A_ : Union[str, Any] = -1
A_ : Union[str, Any] = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(_lowerCamelCase )
A_ : List[str] = TextIteratorStreamer(_lowerCamelCase , timeout=0.0_01 )
A_ : str = {'''input_ids''': input_ids, '''max_new_tokens''': 10, '''do_sample''': False, '''streamer''': streamer}
A_ : List[str] = Thread(target=model.generate , kwargs=_lowerCamelCase )
thread.start()
# The streamer will timeout after 0.001 seconds, so an exception will be raised
with self.assertRaises(_lowerCamelCase ):
A_ : str = ''''''
for new_text in streamer:
streamer_text += new_text
| 4 | 1 |
'''simple docstring'''
snake_case__ = frozenset(
[
"""prompt""",
"""height""",
"""width""",
"""guidance_scale""",
"""negative_prompt""",
"""prompt_embeds""",
"""negative_prompt_embeds""",
"""cross_attention_kwargs""",
]
)
snake_case__ = frozenset(["""prompt""", """negative_prompt"""])
snake_case__ = frozenset([])
snake_case__ = frozenset(["""image"""])
snake_case__ = frozenset(
[
"""image""",
"""height""",
"""width""",
"""guidance_scale""",
]
)
snake_case__ = frozenset(["""image"""])
snake_case__ = frozenset(
[
"""prompt""",
"""image""",
"""height""",
"""width""",
"""guidance_scale""",
"""negative_prompt""",
"""prompt_embeds""",
"""negative_prompt_embeds""",
]
)
snake_case__ = frozenset(["""prompt""", """image""", """negative_prompt"""])
snake_case__ = frozenset(
[
# Text guided image variation with an image mask
"""prompt""",
"""image""",
"""mask_image""",
"""height""",
"""width""",
"""guidance_scale""",
"""negative_prompt""",
"""prompt_embeds""",
"""negative_prompt_embeds""",
]
)
snake_case__ = frozenset(["""prompt""", """image""", """mask_image""", """negative_prompt"""])
snake_case__ = frozenset(
[
# image variation with an image mask
"""image""",
"""mask_image""",
"""height""",
"""width""",
"""guidance_scale""",
]
)
snake_case__ = frozenset(["""image""", """mask_image"""])
snake_case__ = frozenset(
[
"""example_image""",
"""image""",
"""mask_image""",
"""height""",
"""width""",
"""guidance_scale""",
]
)
snake_case__ = frozenset(["""example_image""", """image""", """mask_image"""])
snake_case__ = frozenset(["""class_labels"""])
snake_case__ = frozenset(["""class_labels"""])
snake_case__ = frozenset(["""batch_size"""])
snake_case__ = frozenset([])
snake_case__ = frozenset(["""batch_size"""])
snake_case__ = frozenset([])
snake_case__ = frozenset(
[
"""prompt""",
"""audio_length_in_s""",
"""guidance_scale""",
"""negative_prompt""",
"""prompt_embeds""",
"""negative_prompt_embeds""",
"""cross_attention_kwargs""",
]
)
snake_case__ = frozenset(["""prompt""", """negative_prompt"""])
snake_case__ = frozenset(["""input_tokens"""])
snake_case__ = frozenset(["""input_tokens"""])
| 4 |
'''simple docstring'''
import heapq
def snake_case__ ( lowerCamelCase__ : dict ) -> set[int]:
A_ : list[list] = []
# for each node and his adjacency list add them and the rank of the node to queue
# using heapq module the queue will be filled like a Priority Queue
# heapq works with a min priority queue, so I used -1*len(v) to build it
for key, value in graph.items():
# O(log(n))
heapq.heappush(lowerCamelCase__ , [-1 * len(lowerCamelCase__ ), (key, value)] )
# chosen_vertices = set of chosen vertices
A_ : str = set()
# while queue isn't empty and there are still edges
# (queue[0][0] is the rank of the node with max rank)
while queue and queue[0][0] != 0:
# extract vertex with max rank from queue and add it to chosen_vertices
A_ : Tuple = heapq.heappop(lowerCamelCase__ )[1][0]
chosen_vertices.add(lowerCamelCase__ )
# Remove all arcs adjacent to argmax
for elem in queue:
# if v haven't adjacent node, skip
if elem[0] == 0:
continue
# if argmax is reachable from elem
# remove argmax from elem's adjacent list and update his rank
if argmax in elem[1][1]:
A_ : List[str] = elem[1][1].index(lowerCamelCase__ )
del elem[1][1][index]
elem[0] += 1
# re-order the queue
heapq.heapify(lowerCamelCase__ )
return chosen_vertices
if __name__ == "__main__":
import doctest
doctest.testmod()
snake_case__ = {0: [1, 3], 1: [0, 3], 2: [0, 3, 4], 3: [0, 1, 2], 4: [2, 3]}
print(F'Minimum vertex cover:\n{greedy_min_vertex_cover(graph)}')
| 4 | 1 |
'''simple docstring'''
import argparse
import json
import pickle
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import MaskFormerConfig, MaskFormerForInstanceSegmentation, MaskFormerImageProcessor, SwinConfig
from transformers.utils import logging
logging.set_verbosity_info()
snake_case__ = logging.get_logger(__name__)
def snake_case__ ( lowerCamelCase__ : str ) -> Any:
A_ : Dict = SwinConfig.from_pretrained(
'''microsoft/swin-tiny-patch4-window7-224''' , out_features=['''stage1''', '''stage2''', '''stage3''', '''stage4'''] )
A_ : int = MaskFormerConfig(backbone_config=lowerCamelCase__ )
A_ : int = '''huggingface/label-files'''
if "ade20k-full" in model_name:
# this should be ok
A_ : str = 8_4_7
A_ : int = '''maskformer-ade20k-full-id2label.json'''
elif "ade" in model_name:
# this should be ok
A_ : str = 1_5_0
A_ : List[Any] = '''ade20k-id2label.json'''
elif "coco-stuff" in model_name:
# this should be ok
A_ : List[str] = 1_7_1
A_ : List[str] = '''maskformer-coco-stuff-id2label.json'''
elif "coco" in model_name:
# TODO
A_ : int = 1_3_3
A_ : Optional[int] = '''coco-panoptic-id2label.json'''
elif "cityscapes" in model_name:
# this should be ok
A_ : Optional[int] = 1_9
A_ : List[Any] = '''cityscapes-id2label.json'''
elif "vistas" in model_name:
# this should be ok
A_ : Optional[Any] = 6_5
A_ : Dict = '''mapillary-vistas-id2label.json'''
A_ : str = json.load(open(hf_hub_download(lowerCamelCase__ , lowerCamelCase__ , repo_type='''dataset''' ) , '''r''' ) )
A_ : Tuple = {int(lowerCamelCase__ ): v for k, v in idalabel.items()}
return config
def snake_case__ ( lowerCamelCase__ : Optional[Any] ) -> Optional[int]:
A_ : Optional[int] = []
# stem
# fmt: off
rename_keys.append(('''backbone.patch_embed.proj.weight''', '''model.pixel_level_module.encoder.model.embeddings.patch_embeddings.projection.weight''') )
rename_keys.append(('''backbone.patch_embed.proj.bias''', '''model.pixel_level_module.encoder.model.embeddings.patch_embeddings.projection.bias''') )
rename_keys.append(('''backbone.patch_embed.norm.weight''', '''model.pixel_level_module.encoder.model.embeddings.norm.weight''') )
rename_keys.append(('''backbone.patch_embed.norm.bias''', '''model.pixel_level_module.encoder.model.embeddings.norm.bias''') )
# stages
for i in range(len(config.backbone_config.depths ) ):
for j in range(config.backbone_config.depths[i] ):
rename_keys.append((f'backbone.layers.{i}.blocks.{j}.norm1.weight', f'model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_before.weight') )
rename_keys.append((f'backbone.layers.{i}.blocks.{j}.norm1.bias', f'model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_before.bias') )
rename_keys.append((f'backbone.layers.{i}.blocks.{j}.attn.relative_position_bias_table', f'model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_bias_table') )
rename_keys.append((f'backbone.layers.{i}.blocks.{j}.attn.relative_position_index', f'model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_index') )
rename_keys.append((f'backbone.layers.{i}.blocks.{j}.attn.proj.weight', f'model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.output.dense.weight') )
rename_keys.append((f'backbone.layers.{i}.blocks.{j}.attn.proj.bias', f'model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.output.dense.bias') )
rename_keys.append((f'backbone.layers.{i}.blocks.{j}.norm2.weight', f'model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_after.weight') )
rename_keys.append((f'backbone.layers.{i}.blocks.{j}.norm2.bias', f'model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_after.bias') )
rename_keys.append((f'backbone.layers.{i}.blocks.{j}.mlp.fc1.weight', f'model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.intermediate.dense.weight') )
rename_keys.append((f'backbone.layers.{i}.blocks.{j}.mlp.fc1.bias', f'model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.intermediate.dense.bias') )
rename_keys.append((f'backbone.layers.{i}.blocks.{j}.mlp.fc2.weight', f'model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.output.dense.weight') )
rename_keys.append((f'backbone.layers.{i}.blocks.{j}.mlp.fc2.bias', f'model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.output.dense.bias') )
if i < 3:
rename_keys.append((f'backbone.layers.{i}.downsample.reduction.weight', f'model.pixel_level_module.encoder.model.encoder.layers.{i}.downsample.reduction.weight') )
rename_keys.append((f'backbone.layers.{i}.downsample.norm.weight', f'model.pixel_level_module.encoder.model.encoder.layers.{i}.downsample.norm.weight') )
rename_keys.append((f'backbone.layers.{i}.downsample.norm.bias', f'model.pixel_level_module.encoder.model.encoder.layers.{i}.downsample.norm.bias') )
rename_keys.append((f'backbone.norm{i}.weight', f'model.pixel_level_module.encoder.hidden_states_norms.{i}.weight') )
rename_keys.append((f'backbone.norm{i}.bias', f'model.pixel_level_module.encoder.hidden_states_norms.{i}.bias') )
# FPN
rename_keys.append(('''sem_seg_head.layer_4.weight''', '''model.pixel_level_module.decoder.fpn.stem.0.weight''') )
rename_keys.append(('''sem_seg_head.layer_4.norm.weight''', '''model.pixel_level_module.decoder.fpn.stem.1.weight''') )
rename_keys.append(('''sem_seg_head.layer_4.norm.bias''', '''model.pixel_level_module.decoder.fpn.stem.1.bias''') )
for source_index, target_index in zip(range(3 , 0 , -1 ) , range(0 , 3 ) ):
rename_keys.append((f'sem_seg_head.adapter_{source_index}.weight', f'model.pixel_level_module.decoder.fpn.layers.{target_index}.proj.0.weight') )
rename_keys.append((f'sem_seg_head.adapter_{source_index}.norm.weight', f'model.pixel_level_module.decoder.fpn.layers.{target_index}.proj.1.weight') )
rename_keys.append((f'sem_seg_head.adapter_{source_index}.norm.bias', f'model.pixel_level_module.decoder.fpn.layers.{target_index}.proj.1.bias') )
rename_keys.append((f'sem_seg_head.layer_{source_index}.weight', f'model.pixel_level_module.decoder.fpn.layers.{target_index}.block.0.weight') )
rename_keys.append((f'sem_seg_head.layer_{source_index}.norm.weight', f'model.pixel_level_module.decoder.fpn.layers.{target_index}.block.1.weight') )
rename_keys.append((f'sem_seg_head.layer_{source_index}.norm.bias', f'model.pixel_level_module.decoder.fpn.layers.{target_index}.block.1.bias') )
rename_keys.append(('''sem_seg_head.mask_features.weight''', '''model.pixel_level_module.decoder.mask_projection.weight''') )
rename_keys.append(('''sem_seg_head.mask_features.bias''', '''model.pixel_level_module.decoder.mask_projection.bias''') )
# Transformer decoder
for idx in range(config.decoder_config.decoder_layers ):
# self-attention out projection
rename_keys.append((f'sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.out_proj.weight', f'model.transformer_module.decoder.layers.{idx}.self_attn.out_proj.weight') )
rename_keys.append((f'sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.out_proj.bias', f'model.transformer_module.decoder.layers.{idx}.self_attn.out_proj.bias') )
# cross-attention out projection
rename_keys.append((f'sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.out_proj.weight', f'model.transformer_module.decoder.layers.{idx}.encoder_attn.out_proj.weight') )
rename_keys.append((f'sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.out_proj.bias', f'model.transformer_module.decoder.layers.{idx}.encoder_attn.out_proj.bias') )
# MLP 1
rename_keys.append((f'sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear1.weight', f'model.transformer_module.decoder.layers.{idx}.fc1.weight') )
rename_keys.append((f'sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear1.bias', f'model.transformer_module.decoder.layers.{idx}.fc1.bias') )
# MLP 2
rename_keys.append((f'sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear2.weight', f'model.transformer_module.decoder.layers.{idx}.fc2.weight') )
rename_keys.append((f'sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear2.bias', f'model.transformer_module.decoder.layers.{idx}.fc2.bias') )
# layernorm 1 (self-attention layernorm)
rename_keys.append((f'sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm1.weight', f'model.transformer_module.decoder.layers.{idx}.self_attn_layer_norm.weight') )
rename_keys.append((f'sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm1.bias', f'model.transformer_module.decoder.layers.{idx}.self_attn_layer_norm.bias') )
# layernorm 2 (cross-attention layernorm)
rename_keys.append((f'sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm2.weight', f'model.transformer_module.decoder.layers.{idx}.encoder_attn_layer_norm.weight') )
rename_keys.append((f'sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm2.bias', f'model.transformer_module.decoder.layers.{idx}.encoder_attn_layer_norm.bias') )
# layernorm 3 (final layernorm)
rename_keys.append((f'sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm3.weight', f'model.transformer_module.decoder.layers.{idx}.final_layer_norm.weight') )
rename_keys.append((f'sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm3.bias', f'model.transformer_module.decoder.layers.{idx}.final_layer_norm.bias') )
rename_keys.append(('''sem_seg_head.predictor.transformer.decoder.norm.weight''', '''model.transformer_module.decoder.layernorm.weight''') )
rename_keys.append(('''sem_seg_head.predictor.transformer.decoder.norm.bias''', '''model.transformer_module.decoder.layernorm.bias''') )
# heads on top
rename_keys.append(('''sem_seg_head.predictor.query_embed.weight''', '''model.transformer_module.queries_embedder.weight''') )
rename_keys.append(('''sem_seg_head.predictor.input_proj.weight''', '''model.transformer_module.input_projection.weight''') )
rename_keys.append(('''sem_seg_head.predictor.input_proj.bias''', '''model.transformer_module.input_projection.bias''') )
rename_keys.append(('''sem_seg_head.predictor.class_embed.weight''', '''class_predictor.weight''') )
rename_keys.append(('''sem_seg_head.predictor.class_embed.bias''', '''class_predictor.bias''') )
for i in range(3 ):
rename_keys.append((f'sem_seg_head.predictor.mask_embed.layers.{i}.weight', f'mask_embedder.{i}.0.weight') )
rename_keys.append((f'sem_seg_head.predictor.mask_embed.layers.{i}.bias', f'mask_embedder.{i}.0.bias') )
# fmt: on
return rename_keys
def snake_case__ ( lowerCamelCase__ : Union[str, Any] , lowerCamelCase__ : List[Any] , lowerCamelCase__ : Dict ) -> int:
A_ : str = dct.pop(lowerCamelCase__ )
A_ : Dict = val
def snake_case__ ( lowerCamelCase__ : Any , lowerCamelCase__ : Optional[int] ) -> int:
A_ : Any = [int(backbone_config.embed_dim * 2**i ) for i in range(len(backbone_config.depths ) )]
for i in range(len(backbone_config.depths ) ):
A_ : List[str] = num_features[i]
for j in range(backbone_config.depths[i] ):
# fmt: off
# read in weights + bias of input projection layer (in original implementation, this is a single matrix + bias)
A_ : Any = state_dict.pop(f'backbone.layers.{i}.blocks.{j}.attn.qkv.weight' )
A_ : Dict = state_dict.pop(f'backbone.layers.{i}.blocks.{j}.attn.qkv.bias' )
# next, add query, keys and values (in that order) to the state dict
A_ : Tuple = in_proj_weight[:dim, :]
A_ : Any = in_proj_bias[: dim]
A_ : str = in_proj_weight[
dim : dim * 2, :
]
A_ : Union[str, Any] = in_proj_bias[
dim : dim * 2
]
A_ : str = in_proj_weight[
-dim :, :
]
A_ : str = in_proj_bias[-dim :]
# fmt: on
def snake_case__ ( lowerCamelCase__ : str , lowerCamelCase__ : Tuple ) -> Union[str, Any]:
# fmt: off
A_ : List[Any] = config.decoder_config.hidden_size
for idx in range(config.decoder_config.decoder_layers ):
# read in weights + bias of self-attention input projection layer (in the original implementation, this is a single matrix + bias)
A_ : List[Any] = state_dict.pop(f'sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.in_proj_weight' )
A_ : Tuple = state_dict.pop(f'sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.in_proj_bias' )
# next, add query, keys and values (in that order) to the state dict
A_ : Optional[int] = in_proj_weight[: hidden_size, :]
A_ : str = in_proj_bias[:config.hidden_size]
A_ : int = in_proj_weight[hidden_size : hidden_size * 2, :]
A_ : str = in_proj_bias[hidden_size : hidden_size * 2]
A_ : Dict = in_proj_weight[-hidden_size :, :]
A_ : Tuple = in_proj_bias[-hidden_size :]
# read in weights + bias of cross-attention input projection layer (in the original implementation, this is a single matrix + bias)
A_ : str = state_dict.pop(f'sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.in_proj_weight' )
A_ : Optional[Any] = state_dict.pop(f'sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.in_proj_bias' )
# next, add query, keys and values (in that order) to the state dict
A_ : List[Any] = in_proj_weight[: hidden_size, :]
A_ : Optional[Any] = in_proj_bias[:config.hidden_size]
A_ : Optional[Any] = in_proj_weight[hidden_size : hidden_size * 2, :]
A_ : Dict = in_proj_bias[hidden_size : hidden_size * 2]
A_ : Dict = in_proj_weight[-hidden_size :, :]
A_ : Any = in_proj_bias[-hidden_size :]
# fmt: on
def snake_case__ ( ) -> torch.Tensor:
A_ : Optional[int] = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
A_ : Any = Image.open(requests.get(lowerCamelCase__ , stream=lowerCamelCase__ ).raw )
return im
@torch.no_grad()
def snake_case__ ( lowerCamelCase__ : str , lowerCamelCase__ : str , lowerCamelCase__ : str , lowerCamelCase__ : bool = False ) -> Tuple:
A_ : Tuple = get_maskformer_config(lowerCamelCase__ )
# load original state_dict
with open(lowerCamelCase__ , '''rb''' ) as f:
A_ : List[Any] = pickle.load(lowerCamelCase__ )
A_ : List[str] = data['''model''']
# for name, param in state_dict.items():
# print(name, param.shape)
# rename keys
A_ : Union[str, Any] = create_rename_keys(lowerCamelCase__ )
for src, dest in rename_keys:
rename_key(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
read_in_swin_q_k_v(lowerCamelCase__ , config.backbone_config )
read_in_decoder_q_k_v(lowerCamelCase__ , lowerCamelCase__ )
# update to torch tensors
for key, value in state_dict.items():
A_ : List[str] = torch.from_numpy(lowerCamelCase__ )
# load 🤗 model
A_ : List[str] = MaskFormerForInstanceSegmentation(lowerCamelCase__ )
model.eval()
for name, param in model.named_parameters():
print(lowerCamelCase__ , param.shape )
A_ ,A_ : Dict = model.load_state_dict(lowerCamelCase__ , strict=lowerCamelCase__ )
assert missing_keys == [
"model.pixel_level_module.encoder.model.layernorm.weight",
"model.pixel_level_module.encoder.model.layernorm.bias",
]
assert len(lowerCamelCase__ ) == 0, f'Unexpected keys: {unexpected_keys}'
# verify results
A_ : Tuple = prepare_img()
if "vistas" in model_name:
A_ : Any = 6_5
elif "cityscapes" in model_name:
A_ : Dict = 6_5_5_3_5
else:
A_ : Dict = 2_5_5
A_ : Dict = True if '''ade''' in model_name else False
A_ : List[Any] = MaskFormerImageProcessor(ignore_index=lowerCamelCase__ , reduce_labels=lowerCamelCase__ )
A_ : Optional[Any] = image_processor(lowerCamelCase__ , return_tensors='''pt''' )
A_ : List[str] = model(**lowerCamelCase__ )
print('''Logits:''' , outputs.class_queries_logits[0, :3, :3] )
if model_name == "maskformer-swin-tiny-ade":
A_ : str = torch.tensor(
[[3.6353, -4.4770, -2.6065], [0.5081, -4.2394, -3.5343], [2.1909, -5.0353, -1.9323]] )
assert torch.allclose(outputs.class_queries_logits[0, :3, :3] , lowerCamelCase__ , atol=1e-4 )
print('''Looks ok!''' )
if pytorch_dump_folder_path is not None:
print(f'Saving model and image processor to {pytorch_dump_folder_path}' )
Path(lowerCamelCase__ ).mkdir(exist_ok=lowerCamelCase__ )
model.save_pretrained(lowerCamelCase__ )
image_processor.save_pretrained(lowerCamelCase__ )
if push_to_hub:
print('''Pushing model and image processor to the hub...''' )
model.push_to_hub(f'nielsr/{model_name}' )
image_processor.push_to_hub(f'nielsr/{model_name}' )
if __name__ == "__main__":
snake_case__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--model_name""",
default="""maskformer-swin-tiny-ade""",
type=str,
help=("""Name of the MaskFormer model you'd like to convert""",),
)
parser.add_argument(
"""--checkpoint_path""",
default="""/Users/nielsrogge/Documents/MaskFormer_checkpoints/MaskFormer-Swin-tiny-ADE20k/model.pkl""",
type=str,
help="""Path to the original state dict (.pth file).""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory."""
)
parser.add_argument(
"""--push_to_hub""", action="""store_true""", help="""Whether or not to push the converted model to the 🤗 hub."""
)
snake_case__ = parser.parse_args()
convert_maskformer_checkpoint(
args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub
)
| 4 |
'''simple docstring'''
import argparse
from collections import OrderedDict
from pathlib import Path
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from torchvision.transforms import functional as F
from transformers import DetrImageProcessor, TableTransformerConfig, TableTransformerForObjectDetection
from transformers.utils import logging
logging.set_verbosity_info()
snake_case__ = logging.get_logger(__name__)
# here we list all keys to be renamed (original name on the left, our name on the right)
snake_case__ = []
for i in range(6):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append(
(F'transformer.encoder.layers.{i}.self_attn.out_proj.weight', F'encoder.layers.{i}.self_attn.out_proj.weight')
)
rename_keys.append(
(F'transformer.encoder.layers.{i}.self_attn.out_proj.bias', F'encoder.layers.{i}.self_attn.out_proj.bias')
)
rename_keys.append((F'transformer.encoder.layers.{i}.linear1.weight', F'encoder.layers.{i}.fc1.weight'))
rename_keys.append((F'transformer.encoder.layers.{i}.linear1.bias', F'encoder.layers.{i}.fc1.bias'))
rename_keys.append((F'transformer.encoder.layers.{i}.linear2.weight', F'encoder.layers.{i}.fc2.weight'))
rename_keys.append((F'transformer.encoder.layers.{i}.linear2.bias', F'encoder.layers.{i}.fc2.bias'))
rename_keys.append(
(F'transformer.encoder.layers.{i}.norm1.weight', F'encoder.layers.{i}.self_attn_layer_norm.weight')
)
rename_keys.append((F'transformer.encoder.layers.{i}.norm1.bias', F'encoder.layers.{i}.self_attn_layer_norm.bias'))
rename_keys.append((F'transformer.encoder.layers.{i}.norm2.weight', F'encoder.layers.{i}.final_layer_norm.weight'))
rename_keys.append((F'transformer.encoder.layers.{i}.norm2.bias', F'encoder.layers.{i}.final_layer_norm.bias'))
# decoder layers: 2 times output projection, 2 feedforward neural networks and 3 layernorms
rename_keys.append(
(F'transformer.decoder.layers.{i}.self_attn.out_proj.weight', F'decoder.layers.{i}.self_attn.out_proj.weight')
)
rename_keys.append(
(F'transformer.decoder.layers.{i}.self_attn.out_proj.bias', F'decoder.layers.{i}.self_attn.out_proj.bias')
)
rename_keys.append(
(
F'transformer.decoder.layers.{i}.multihead_attn.out_proj.weight',
F'decoder.layers.{i}.encoder_attn.out_proj.weight',
)
)
rename_keys.append(
(
F'transformer.decoder.layers.{i}.multihead_attn.out_proj.bias',
F'decoder.layers.{i}.encoder_attn.out_proj.bias',
)
)
rename_keys.append((F'transformer.decoder.layers.{i}.linear1.weight', F'decoder.layers.{i}.fc1.weight'))
rename_keys.append((F'transformer.decoder.layers.{i}.linear1.bias', F'decoder.layers.{i}.fc1.bias'))
rename_keys.append((F'transformer.decoder.layers.{i}.linear2.weight', F'decoder.layers.{i}.fc2.weight'))
rename_keys.append((F'transformer.decoder.layers.{i}.linear2.bias', F'decoder.layers.{i}.fc2.bias'))
rename_keys.append(
(F'transformer.decoder.layers.{i}.norm1.weight', F'decoder.layers.{i}.self_attn_layer_norm.weight')
)
rename_keys.append((F'transformer.decoder.layers.{i}.norm1.bias', F'decoder.layers.{i}.self_attn_layer_norm.bias'))
rename_keys.append(
(F'transformer.decoder.layers.{i}.norm2.weight', F'decoder.layers.{i}.encoder_attn_layer_norm.weight')
)
rename_keys.append(
(F'transformer.decoder.layers.{i}.norm2.bias', F'decoder.layers.{i}.encoder_attn_layer_norm.bias')
)
rename_keys.append((F'transformer.decoder.layers.{i}.norm3.weight', F'decoder.layers.{i}.final_layer_norm.weight'))
rename_keys.append((F'transformer.decoder.layers.{i}.norm3.bias', F'decoder.layers.{i}.final_layer_norm.bias'))
# convolutional projection + query embeddings + layernorm of encoder + layernorm of decoder + class and bounding box heads
rename_keys.extend(
[
("""input_proj.weight""", """input_projection.weight"""),
("""input_proj.bias""", """input_projection.bias"""),
("""query_embed.weight""", """query_position_embeddings.weight"""),
("""transformer.encoder.norm.weight""", """encoder.layernorm.weight"""),
("""transformer.encoder.norm.bias""", """encoder.layernorm.bias"""),
("""transformer.decoder.norm.weight""", """decoder.layernorm.weight"""),
("""transformer.decoder.norm.bias""", """decoder.layernorm.bias"""),
("""class_embed.weight""", """class_labels_classifier.weight"""),
("""class_embed.bias""", """class_labels_classifier.bias"""),
("""bbox_embed.layers.0.weight""", """bbox_predictor.layers.0.weight"""),
("""bbox_embed.layers.0.bias""", """bbox_predictor.layers.0.bias"""),
("""bbox_embed.layers.1.weight""", """bbox_predictor.layers.1.weight"""),
("""bbox_embed.layers.1.bias""", """bbox_predictor.layers.1.bias"""),
("""bbox_embed.layers.2.weight""", """bbox_predictor.layers.2.weight"""),
("""bbox_embed.layers.2.bias""", """bbox_predictor.layers.2.bias"""),
]
)
def snake_case__ ( lowerCamelCase__ : List[Any] , lowerCamelCase__ : List[Any] , lowerCamelCase__ : List[Any] ) -> Optional[Any]:
A_ : Tuple = state_dict.pop(lowerCamelCase__ )
A_ : Optional[Any] = val
def snake_case__ ( lowerCamelCase__ : Dict ) -> Any:
A_ : int = OrderedDict()
for key, value in state_dict.items():
if "backbone.0.body" in key:
A_ : int = key.replace('''backbone.0.body''' , '''backbone.conv_encoder.model''' )
A_ : List[str] = value
else:
A_ : Optional[int] = value
return new_state_dict
def snake_case__ ( lowerCamelCase__ : Union[str, Any] ) -> Optional[Any]:
A_ : Any = ''''''
# first: transformer encoder
for i in range(6 ):
# read in weights + bias of input projection layer (in PyTorch's MultiHeadAttention, this is a single matrix + bias)
A_ : Tuple = state_dict.pop(f'{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_weight' )
A_ : Dict = state_dict.pop(f'{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_bias' )
# next, add query, keys and values (in that order) to the state dict
A_ : str = in_proj_weight[:2_5_6, :]
A_ : Optional[Any] = in_proj_bias[:2_5_6]
A_ : Dict = in_proj_weight[2_5_6:5_1_2, :]
A_ : Tuple = in_proj_bias[2_5_6:5_1_2]
A_ : Tuple = in_proj_weight[-2_5_6:, :]
A_ : Optional[int] = in_proj_bias[-2_5_6:]
# next: transformer decoder (which is a bit more complex because it also includes cross-attention)
for i in range(6 ):
# read in weights + bias of input projection layer of self-attention
A_ : Union[str, Any] = state_dict.pop(f'{prefix}transformer.decoder.layers.{i}.self_attn.in_proj_weight' )
A_ : Dict = state_dict.pop(f'{prefix}transformer.decoder.layers.{i}.self_attn.in_proj_bias' )
# next, add query, keys and values (in that order) to the state dict
A_ : List[str] = in_proj_weight[:2_5_6, :]
A_ : int = in_proj_bias[:2_5_6]
A_ : Any = in_proj_weight[2_5_6:5_1_2, :]
A_ : List[str] = in_proj_bias[2_5_6:5_1_2]
A_ : Union[str, Any] = in_proj_weight[-2_5_6:, :]
A_ : Optional[Any] = in_proj_bias[-2_5_6:]
# read in weights + bias of input projection layer of cross-attention
A_ : Tuple = state_dict.pop(
f'{prefix}transformer.decoder.layers.{i}.multihead_attn.in_proj_weight' )
A_ : Optional[Any] = state_dict.pop(f'{prefix}transformer.decoder.layers.{i}.multihead_attn.in_proj_bias' )
# next, add query, keys and values (in that order) of cross-attention to the state dict
A_ : Dict = in_proj_weight_cross_attn[:2_5_6, :]
A_ : Tuple = in_proj_bias_cross_attn[:2_5_6]
A_ : int = in_proj_weight_cross_attn[2_5_6:5_1_2, :]
A_ : List[str] = in_proj_bias_cross_attn[2_5_6:5_1_2]
A_ : Any = in_proj_weight_cross_attn[-2_5_6:, :]
A_ : Any = in_proj_bias_cross_attn[-2_5_6:]
def snake_case__ ( lowerCamelCase__ : List[str] , lowerCamelCase__ : Tuple ) -> Dict:
A_ ,A_ : int = image.size
A_ : Tuple = max(lowerCamelCase__ , lowerCamelCase__ )
A_ : Optional[Any] = 8_0_0 if '''detection''' in checkpoint_url else 1_0_0_0
A_ : Union[str, Any] = target_max_size / current_max_size
A_ : Any = image.resize((int(round(scale * width ) ), int(round(scale * height ) )) )
return resized_image
def snake_case__ ( lowerCamelCase__ : Tuple ) -> str:
A_ : Any = F.to_tensor(lowerCamelCase__ )
A_ : Optional[Any] = F.normalize(lowerCamelCase__ , mean=[0.485, 0.456, 0.406] , std=[0.229, 0.224, 0.225] )
return image
@torch.no_grad()
def snake_case__ ( lowerCamelCase__ : List[Any] , lowerCamelCase__ : int , lowerCamelCase__ : int ) -> str:
logger.info('''Converting model...''' )
# load original state dict
A_ : Tuple = torch.hub.load_state_dict_from_url(lowerCamelCase__ , map_location='''cpu''' )
# rename keys
for src, dest in rename_keys:
rename_key(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
A_ : str = rename_backbone_keys(lowerCamelCase__ )
# query, key and value matrices need special treatment
read_in_q_k_v(lowerCamelCase__ )
# important: we need to prepend a prefix to each of the base model keys as the head models use different attributes for them
A_ : List[Any] = '''model.'''
for key in state_dict.copy().keys():
if not key.startswith('''class_labels_classifier''' ) and not key.startswith('''bbox_predictor''' ):
A_ : List[Any] = state_dict.pop(lowerCamelCase__ )
A_ : str = val
# create HuggingFace model and load state dict
A_ : Union[str, Any] = TableTransformerConfig(
backbone='''resnet18''' , mask_loss_coefficient=1 , dice_loss_coefficient=1 , ce_loss_coefficient=1 , bbox_loss_coefficient=5 , giou_loss_coefficient=2 , eos_coefficient=0.4 , class_cost=1 , bbox_cost=5 , giou_cost=2 , )
if "detection" in checkpoint_url:
A_ : Dict = 1_5
A_ : Dict = 2
A_ : int = {0: '''table''', 1: '''table rotated'''}
A_ : List[str] = idalabel
A_ : Optional[int] = {v: k for k, v in idalabel.items()}
else:
A_ : Union[str, Any] = 1_2_5
A_ : Optional[Any] = 6
A_ : Optional[Any] = {
0: '''table''',
1: '''table column''',
2: '''table row''',
3: '''table column header''',
4: '''table projected row header''',
5: '''table spanning cell''',
}
A_ : int = idalabel
A_ : Tuple = {v: k for k, v in idalabel.items()}
A_ : Optional[Any] = DetrImageProcessor(
format='''coco_detection''' , max_size=8_0_0 if '''detection''' in checkpoint_url else 1_0_0_0 )
A_ : int = TableTransformerForObjectDetection(lowerCamelCase__ )
model.load_state_dict(lowerCamelCase__ )
model.eval()
# verify our conversion
A_ : Optional[int] = '''example_pdf.png''' if '''detection''' in checkpoint_url else '''example_table.png'''
A_ : Union[str, Any] = hf_hub_download(repo_id='''nielsr/example-pdf''' , repo_type='''dataset''' , filename=lowerCamelCase__ )
A_ : Tuple = Image.open(lowerCamelCase__ ).convert('''RGB''' )
A_ : int = normalize(resize(lowerCamelCase__ , lowerCamelCase__ ) ).unsqueeze(0 )
A_ : str = model(lowerCamelCase__ )
if "detection" in checkpoint_url:
A_ : str = (1, 1_5, 3)
A_ : int = torch.tensor(
[[-6.7897, -16.9985, 6.7937], [-8.0186, -22.2192, 6.9677], [-7.3117, -21.0708, 7.4055]] )
A_ : Tuple = torch.tensor([[0.4867, 0.1767, 0.6732], [0.6718, 0.4479, 0.3830], [0.4716, 0.1760, 0.6364]] )
else:
A_ : Optional[int] = (1, 1_2_5, 7)
A_ : Dict = torch.tensor(
[[-18.1430, -8.3214, 4.8274], [-18.4685, -7.1361, -4.2667], [-26.3693, -9.3429, -4.9962]] )
A_ : Any = torch.tensor([[0.4983, 0.5595, 0.9440], [0.4916, 0.6315, 0.5954], [0.6108, 0.8637, 0.1135]] )
assert outputs.logits.shape == expected_shape
assert torch.allclose(outputs.logits[0, :3, :3] , lowerCamelCase__ , atol=1e-4 )
assert torch.allclose(outputs.pred_boxes[0, :3, :3] , lowerCamelCase__ , atol=1e-4 )
print('''Looks ok!''' )
if pytorch_dump_folder_path is not None:
# Save model and image processor
logger.info(f'Saving PyTorch model and image processor to {pytorch_dump_folder_path}...' )
Path(lowerCamelCase__ ).mkdir(exist_ok=lowerCamelCase__ )
model.save_pretrained(lowerCamelCase__ )
image_processor.save_pretrained(lowerCamelCase__ )
if push_to_hub:
# Push model to HF hub
logger.info('''Pushing model to the hub...''' )
A_ : List[Any] = (
'''microsoft/table-transformer-detection'''
if '''detection''' in checkpoint_url
else '''microsoft/table-transformer-structure-recognition'''
)
model.push_to_hub(lowerCamelCase__ )
image_processor.push_to_hub(lowerCamelCase__ )
if __name__ == "__main__":
snake_case__ = argparse.ArgumentParser()
parser.add_argument(
"""--checkpoint_url""",
default="""https://pubtables1m.blob.core.windows.net/model/pubtables1m_detection_detr_r18.pth""",
type=str,
choices=[
"""https://pubtables1m.blob.core.windows.net/model/pubtables1m_detection_detr_r18.pth""",
"""https://pubtables1m.blob.core.windows.net/model/pubtables1m_structure_detr_r18.pth""",
],
help="""URL of the Table Transformer checkpoint you'd like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the folder to output PyTorch model."""
)
parser.add_argument(
"""--push_to_hub""", action="""store_true""", help="""Whether or not to push the converted model to the 🤗 hub."""
)
snake_case__ = parser.parse_args()
convert_table_transformer_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub)
| 4 | 1 |
'''simple docstring'''
from decimal import Decimal, getcontext
from math import ceil, factorial
def snake_case__ ( lowerCamelCase__ : int ) -> str:
if not isinstance(lowerCamelCase__ , lowerCamelCase__ ):
raise TypeError('''Undefined for non-integers''' )
elif precision < 1:
raise ValueError('''Undefined for non-natural numbers''' )
A_ : Tuple = precision
A_ : Dict = ceil(precision / 1_4 )
A_ : Any = 4_2_6_8_8_0 * Decimal(1_0_0_0_5 ).sqrt()
A_ : Dict = 1
A_ : Tuple = 1_3_5_9_1_4_0_9
A_ : Tuple = Decimal(lowerCamelCase__ )
for k in range(1 , lowerCamelCase__ ):
A_ : Tuple = factorial(6 * k ) // (factorial(3 * k ) * factorial(lowerCamelCase__ ) ** 3)
linear_term += 5_4_5_1_4_0_1_3_4
exponential_term *= -2_6_2_5_3_7_4_1_2_6_4_0_7_6_8_0_0_0
partial_sum += Decimal(multinomial_term * linear_term ) / exponential_term
return str(constant_term / partial_sum )[:-1]
if __name__ == "__main__":
snake_case__ = 50
print(F'The first {n} digits of pi is: {pi(n)}')
| 4 |
'''simple docstring'''
import logging
import os
from dataclasses import dataclass
from typing import List, Optional, Union
import tqdm
from filelock import FileLock
from transformers import (
BartTokenizer,
BartTokenizerFast,
DataProcessor,
PreTrainedTokenizer,
RobertaTokenizer,
RobertaTokenizerFast,
XLMRobertaTokenizer,
is_tf_available,
is_torch_available,
)
snake_case__ = logging.getLogger(__name__)
@dataclass(frozen=a__ )
class UpperCamelCase_ :
"""simple docstring"""
_lowerCAmelCase = 42
_lowerCAmelCase = 42
_lowerCAmelCase = None
_lowerCAmelCase = None
_lowerCAmelCase = None
@dataclass(frozen=a__ )
class UpperCamelCase_ :
"""simple docstring"""
_lowerCAmelCase = 42
_lowerCAmelCase = None
_lowerCAmelCase = None
_lowerCAmelCase = None
_lowerCAmelCase = None
if is_torch_available():
import torch
from torch.utils.data import Dataset
class UpperCamelCase_ (a__ ):
"""simple docstring"""
_lowerCAmelCase = 42
def __init__( self : Optional[int] , _lowerCamelCase : str , _lowerCamelCase : PreTrainedTokenizer , _lowerCamelCase : str , _lowerCamelCase : Optional[int] = None , _lowerCamelCase : List[Any]=False , _lowerCamelCase : bool = False , ):
"""simple docstring"""
A_ : Optional[int] = hans_processors[task]()
A_ : int = os.path.join(
_lowerCamelCase , '''cached_{}_{}_{}_{}'''.format(
'''dev''' if evaluate else '''train''' , tokenizer.__class__.__name__ , str(_lowerCamelCase ) , _lowerCamelCase , ) , )
A_ : Dict = processor.get_labels()
if tokenizer.__class__ in (
RobertaTokenizer,
RobertaTokenizerFast,
XLMRobertaTokenizer,
BartTokenizer,
BartTokenizerFast,
):
# HACK(label indices are swapped in RoBERTa pretrained model)
A_ ,A_ : List[str] = label_list[2], label_list[1]
A_ : Optional[int] = label_list
# Make sure only the first process in distributed training processes the dataset,
# and the others will use the cache.
A_ : str = cached_features_file + '''.lock'''
with FileLock(_lowerCamelCase ):
if os.path.exists(_lowerCamelCase ) and not overwrite_cache:
logger.info(f'Loading features from cached file {cached_features_file}' )
A_ : List[str] = torch.load(_lowerCamelCase )
else:
logger.info(f'Creating features from dataset file at {data_dir}' )
A_ : Optional[int] = (
processor.get_dev_examples(_lowerCamelCase ) if evaluate else processor.get_train_examples(_lowerCamelCase )
)
logger.info('''Training examples: %s''' , len(_lowerCamelCase ) )
A_ : Optional[int] = hans_convert_examples_to_features(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
logger.info('''Saving features into cached file %s''' , _lowerCamelCase )
torch.save(self.features , _lowerCamelCase )
def __len__( self : List[str] ):
"""simple docstring"""
return len(self.features )
def __getitem__( self : List[str] , _lowerCamelCase : Optional[int] ):
"""simple docstring"""
return self.features[i]
def _a ( self : str ):
"""simple docstring"""
return self.label_list
if is_tf_available():
import tensorflow as tf
class UpperCamelCase_ :
"""simple docstring"""
_lowerCAmelCase = 42
def __init__( self : Optional[int] , _lowerCamelCase : str , _lowerCamelCase : PreTrainedTokenizer , _lowerCamelCase : str , _lowerCamelCase : Optional[int] = 128 , _lowerCamelCase : Dict=False , _lowerCamelCase : bool = False , ):
"""simple docstring"""
A_ : Optional[int] = hans_processors[task]()
A_ : Optional[int] = processor.get_labels()
if tokenizer.__class__ in (
RobertaTokenizer,
RobertaTokenizerFast,
XLMRobertaTokenizer,
BartTokenizer,
BartTokenizerFast,
):
# HACK(label indices are swapped in RoBERTa pretrained model)
A_ ,A_ : Union[str, Any] = label_list[2], label_list[1]
A_ : Tuple = label_list
A_ : Optional[int] = processor.get_dev_examples(_lowerCamelCase ) if evaluate else processor.get_train_examples(_lowerCamelCase )
A_ : Tuple = hans_convert_examples_to_features(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
def gen():
for ex_index, ex in tqdm.tqdm(enumerate(self.features ) , desc='''convert examples to features''' ):
if ex_index % 10000 == 0:
logger.info('''Writing example %d of %d''' % (ex_index, len(_lowerCamelCase )) )
yield (
{
"example_id": 0,
"input_ids": ex.input_ids,
"attention_mask": ex.attention_mask,
"token_type_ids": ex.token_type_ids,
},
ex.label,
)
A_ : List[Any] = tf.data.Dataset.from_generator(
_lowerCamelCase , (
{
'''example_id''': tf.intaa,
'''input_ids''': tf.intaa,
'''attention_mask''': tf.intaa,
'''token_type_ids''': tf.intaa,
},
tf.intaa,
) , (
{
'''example_id''': tf.TensorShape([] ),
'''input_ids''': tf.TensorShape([None, None] ),
'''attention_mask''': tf.TensorShape([None, None] ),
'''token_type_ids''': tf.TensorShape([None, None] ),
},
tf.TensorShape([] ),
) , )
def _a ( self : Any ):
"""simple docstring"""
return self.dataset
def __len__( self : Dict ):
"""simple docstring"""
return len(self.features )
def __getitem__( self : Optional[int] , _lowerCamelCase : List[str] ):
"""simple docstring"""
return self.features[i]
def _a ( self : Tuple ):
"""simple docstring"""
return self.label_list
class UpperCamelCase_ (a__ ):
"""simple docstring"""
def _a ( self : List[str] , _lowerCamelCase : Union[str, Any] ):
"""simple docstring"""
return self._create_examples(self._read_tsv(os.path.join(_lowerCamelCase , '''heuristics_train_set.txt''' ) ) , '''train''' )
def _a ( self : List[str] , _lowerCamelCase : Tuple ):
"""simple docstring"""
return self._create_examples(self._read_tsv(os.path.join(_lowerCamelCase , '''heuristics_evaluation_set.txt''' ) ) , '''dev''' )
def _a ( self : Any ):
"""simple docstring"""
return ["contradiction", "entailment", "neutral"]
def _a ( self : Optional[Any] , _lowerCamelCase : Optional[Any] , _lowerCamelCase : Any ):
"""simple docstring"""
A_ : Tuple = []
for i, line in enumerate(_lowerCamelCase ):
if i == 0:
continue
A_ : str = '''%s-%s''' % (set_type, line[0])
A_ : Optional[Any] = line[5]
A_ : Union[str, Any] = line[6]
A_ : List[str] = line[7][2:] if line[7].startswith('''ex''' ) else line[7]
A_ : str = line[0]
examples.append(InputExample(guid=_lowerCamelCase , text_a=_lowerCamelCase , text_b=_lowerCamelCase , label=_lowerCamelCase , pairID=_lowerCamelCase ) )
return examples
def snake_case__ ( lowerCamelCase__ : List[InputExample] , lowerCamelCase__ : List[str] , lowerCamelCase__ : int , lowerCamelCase__ : PreTrainedTokenizer , ) -> int:
A_ : Union[str, Any] = {label: i for i, label in enumerate(lowerCamelCase__ )}
A_ : Optional[Any] = []
for ex_index, example in tqdm.tqdm(enumerate(lowerCamelCase__ ) , desc='''convert examples to features''' ):
if ex_index % 1_0_0_0_0 == 0:
logger.info('''Writing example %d''' % (ex_index) )
A_ : Optional[int] = tokenizer(
example.text_a , example.text_b , add_special_tokens=lowerCamelCase__ , max_length=lowerCamelCase__ , padding='''max_length''' , truncation=lowerCamelCase__ , return_overflowing_tokens=lowerCamelCase__ , )
A_ : List[str] = label_map[example.label] if example.label in label_map else 0
A_ : Tuple = int(example.pairID )
features.append(InputFeatures(**lowerCamelCase__ , label=lowerCamelCase__ , pairID=lowerCamelCase__ ) )
for i, example in enumerate(examples[:5] ):
logger.info('''*** Example ***''' )
logger.info(f'guid: {example}' )
logger.info(f'features: {features[i]}' )
return features
snake_case__ = {
"""hans""": 3,
}
snake_case__ = {
"""hans""": HansProcessor,
}
| 4 | 1 |
'''simple docstring'''
import argparse
from collections import defaultdict
import yaml
snake_case__ = """docs/source/en/_toctree.yml"""
def snake_case__ ( lowerCamelCase__ : Optional[Any] ) -> List[str]:
A_ : List[str] = defaultdict(lowerCamelCase__ )
for doc in model_doc:
counts[doc["local"]] += 1
A_ : Tuple = [key for key, value in counts.items() if value > 1]
A_ : Union[str, Any] = []
for duplicate_key in duplicates:
A_ : Tuple = list({doc['''title'''] for doc in model_doc if doc['''local'''] == duplicate_key} )
if len(lowerCamelCase__ ) > 1:
raise ValueError(
f'{duplicate_key} is present several times in the documentation table of content at '
'''`docs/source/en/_toctree.yml` with different *Title* values. Choose one of those and remove the '''
'''others.''' )
# Only add this once
new_doc.append({'''local''': duplicate_key, '''title''': titles[0]} )
# Add none duplicate-keys
new_doc.extend([doc for doc in model_doc if counts[doc['''local''']] == 1] )
# Sort
return sorted(lowerCamelCase__ , key=lambda lowerCamelCase__ : s["title"].lower() )
def snake_case__ ( lowerCamelCase__ : Optional[Any]=False ) -> Any:
with open(lowerCamelCase__ , encoding='''utf-8''' ) as f:
A_ : str = yaml.safe_load(f.read() )
# Get to the API doc
A_ : Dict = 0
while content[api_idx]["title"] != "API":
api_idx += 1
A_ : Optional[Any] = content[api_idx]['''sections''']
# Then to the model doc
A_ : Dict = 0
while api_doc[model_idx]["title"] != "Models":
model_idx += 1
A_ : str = api_doc[model_idx]['''sections''']
A_ : Any = [(idx, section) for idx, section in enumerate(lowerCamelCase__ ) if '''sections''' in section]
A_ : int = False
for idx, modality_doc in modalities_docs:
A_ : List[Any] = modality_doc['''sections''']
A_ : Any = clean_model_doc_toc(lowerCamelCase__ )
if old_modality_doc != new_modality_doc:
A_ : Tuple = True
if overwrite:
A_ : List[str] = new_modality_doc
if diff:
if overwrite:
A_ : str = model_doc
A_ : List[Any] = api_doc
with open(lowerCamelCase__ , '''w''' , encoding='''utf-8''' ) as f:
f.write(yaml.dump(lowerCamelCase__ , allow_unicode=lowerCamelCase__ ) )
else:
raise ValueError(
'''The model doc part of the table of content is not properly sorted, run `make style` to fix this.''' )
if __name__ == "__main__":
snake_case__ = argparse.ArgumentParser()
parser.add_argument("""--fix_and_overwrite""", action="""store_true""", help="""Whether to fix inconsistencies.""")
snake_case__ = parser.parse_args()
check_model_doc(args.fix_and_overwrite)
| 4 |
'''simple docstring'''
import io
import itertools
import json
from dataclasses import dataclass
from typing import Optional
import pyarrow as pa
import pyarrow.json as paj
import datasets
from datasets.table import table_cast
from datasets.utils.file_utils import readline
snake_case__ = datasets.utils.logging.get_logger(__name__)
@dataclass
class UpperCamelCase_ (datasets.BuilderConfig ):
"""simple docstring"""
_lowerCAmelCase = None
_lowerCAmelCase = "utf-8"
_lowerCAmelCase = None
_lowerCAmelCase = None
_lowerCAmelCase = True # deprecated
_lowerCAmelCase = None # deprecated
_lowerCAmelCase = 1_0 << 2_0 # 10MB
_lowerCAmelCase = None
class UpperCamelCase_ (datasets.ArrowBasedBuilder ):
"""simple docstring"""
_lowerCAmelCase = JsonConfig
def _a ( self : int ):
"""simple docstring"""
if self.config.block_size is not None:
logger.warning('''The JSON loader parameter `block_size` is deprecated. Please use `chunksize` instead''' )
A_ : List[Any] = self.config.block_size
if self.config.use_threads is not True:
logger.warning(
'''The JSON loader parameter `use_threads` is deprecated and doesn\'t have any effect anymore.''' )
if self.config.newlines_in_values is not None:
raise ValueError('''The JSON loader parameter `newlines_in_values` is no longer supported''' )
return datasets.DatasetInfo(features=self.config.features )
def _a ( self : Any , _lowerCamelCase : List[str] ):
"""simple docstring"""
if not self.config.data_files:
raise ValueError(f'At least one data file must be specified, but got data_files={self.config.data_files}' )
A_ : int = dl_manager.download_and_extract(self.config.data_files )
if isinstance(_lowerCamelCase , (str, list, tuple) ):
A_ : Union[str, Any] = data_files
if isinstance(_lowerCamelCase , _lowerCamelCase ):
A_ : List[str] = [files]
A_ : List[Any] = [dl_manager.iter_files(_lowerCamelCase ) for file in files]
return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={'''files''': files} )]
A_ : Tuple = []
for split_name, files in data_files.items():
if isinstance(_lowerCamelCase , _lowerCamelCase ):
A_ : int = [files]
A_ : Union[str, Any] = [dl_manager.iter_files(_lowerCamelCase ) for file in files]
splits.append(datasets.SplitGenerator(name=_lowerCamelCase , gen_kwargs={'''files''': files} ) )
return splits
def _a ( self : int , _lowerCamelCase : pa.Table ):
"""simple docstring"""
if self.config.features is not None:
# adding missing columns
for column_name in set(self.config.features ) - set(pa_table.column_names ):
A_ : Optional[int] = self.config.features.arrow_schema.field(_lowerCamelCase ).type
A_ : Optional[int] = pa_table.append_column(_lowerCamelCase , pa.array([None] * len(_lowerCamelCase ) , type=_lowerCamelCase ) )
# more expensive cast to support nested structures with keys in a different order
# allows str <-> int/float or str to Audio for example
A_ : str = table_cast(_lowerCamelCase , self.config.features.arrow_schema )
return pa_table
def _a ( self : List[str] , _lowerCamelCase : int ):
"""simple docstring"""
for file_idx, file in enumerate(itertools.chain.from_iterable(_lowerCamelCase ) ):
# If the file is one json object and if we need to look at the list of items in one specific field
if self.config.field is not None:
with open(_lowerCamelCase , encoding=self.config.encoding , errors=self.config.encoding_errors ) as f:
A_ : int = json.load(_lowerCamelCase )
# We keep only the field we are interested in
A_ : List[str] = dataset[self.config.field]
# We accept two format: a list of dicts or a dict of lists
if isinstance(_lowerCamelCase , (list, tuple) ):
A_ : int = set().union(*[row.keys() for row in dataset] )
A_ : List[str] = {col: [row.get(_lowerCamelCase ) for row in dataset] for col in keys}
else:
A_ : Tuple = dataset
A_ : Dict = pa.Table.from_pydict(_lowerCamelCase )
yield file_idx, self._cast_table(_lowerCamelCase )
# If the file has one json object per line
else:
with open(_lowerCamelCase , '''rb''' ) as f:
A_ : int = 0
# Use block_size equal to the chunk size divided by 32 to leverage multithreading
# Set a default minimum value of 16kB if the chunk size is really small
A_ : int = max(self.config.chunksize // 32 , 16 << 10 )
A_ : int = (
self.config.encoding_errors if self.config.encoding_errors is not None else '''strict'''
)
while True:
A_ : Any = f.read(self.config.chunksize )
if not batch:
break
# Finish current line
try:
batch += f.readline()
except (AttributeError, io.UnsupportedOperation):
batch += readline(_lowerCamelCase )
# PyArrow only accepts utf-8 encoded bytes
if self.config.encoding != "utf-8":
A_ : Optional[Any] = batch.decode(self.config.encoding , errors=_lowerCamelCase ).encode('''utf-8''' )
try:
while True:
try:
A_ : List[Any] = paj.read_json(
io.BytesIO(_lowerCamelCase ) , read_options=paj.ReadOptions(block_size=_lowerCamelCase ) )
break
except (pa.ArrowInvalid, pa.ArrowNotImplementedError) as e:
if (
isinstance(_lowerCamelCase , pa.ArrowInvalid )
and "straddling" not in str(_lowerCamelCase )
or block_size > len(_lowerCamelCase )
):
raise
else:
# Increase the block size in case it was too small.
# The block size will be reset for the next file.
logger.debug(
f'Batch of {len(_lowerCamelCase )} bytes couldn\'t be parsed with block_size={block_size}. Retrying with block_size={block_size * 2}.' )
block_size *= 2
except pa.ArrowInvalid as e:
try:
with open(
_lowerCamelCase , encoding=self.config.encoding , errors=self.config.encoding_errors ) as f:
A_ : Optional[Any] = json.load(_lowerCamelCase )
except json.JSONDecodeError:
logger.error(f'Failed to read file \'{file}\' with error {type(_lowerCamelCase )}: {e}' )
raise e
# If possible, parse the file as a list of json objects and exit the loop
if isinstance(_lowerCamelCase , _lowerCamelCase ): # list is the only sequence type supported in JSON
try:
A_ : Optional[int] = set().union(*[row.keys() for row in dataset] )
A_ : Tuple = {col: [row.get(_lowerCamelCase ) for row in dataset] for col in keys}
A_ : int = pa.Table.from_pydict(_lowerCamelCase )
except (pa.ArrowInvalid, AttributeError) as e:
logger.error(f'Failed to read file \'{file}\' with error {type(_lowerCamelCase )}: {e}' )
raise ValueError(f'Not able to read records in the JSON file at {file}.' ) from None
yield file_idx, self._cast_table(_lowerCamelCase )
break
else:
logger.error(f'Failed to read file \'{file}\' with error {type(_lowerCamelCase )}: {e}' )
raise ValueError(
f'Not able to read records in the JSON file at {file}. '
f'You should probably indicate the field of the JSON file containing your records. '
f'This JSON file contain the following fields: {str(list(dataset.keys() ) )}. '
f'Select the correct one and provide it as `field=\'XXX\'` to the dataset loading method. ' ) from None
# Uncomment for debugging (will print the Arrow table size and elements)
# logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}")
# logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows)))
yield (file_idx, batch_idx), self._cast_table(_lowerCamelCase )
batch_idx += 1
| 4 | 1 |
'''simple docstring'''
import warnings
from .generation import TFGenerationMixin
class UpperCamelCase_ (a__ ):
"""simple docstring"""
warnings.warn(
'Importing `TFGenerationMixin` from `src/transformers/generation_tf_utils.py` is deprecated and will '
'be removed in Transformers v5. Import as `from transformers import TFGenerationMixin` instead.', a__, )
| 4 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
snake_case__ = logging.get_logger(__name__)
snake_case__ = {
"""microsoft/swin-tiny-patch4-window7-224""": (
"""https://huggingface.co/microsoft/swin-tiny-patch4-window7-224/resolve/main/config.json"""
),
# See all Swin models at https://huggingface.co/models?filter=swin
}
class UpperCamelCase_ (a__, a__ ):
"""simple docstring"""
_lowerCAmelCase = 'swin'
_lowerCAmelCase = {
'num_attention_heads': 'num_heads',
'num_hidden_layers': 'num_layers',
}
def __init__( self : Any , _lowerCamelCase : Optional[Any]=224 , _lowerCamelCase : List[str]=4 , _lowerCamelCase : Optional[Any]=3 , _lowerCamelCase : Tuple=96 , _lowerCamelCase : List[Any]=[2, 2, 6, 2] , _lowerCamelCase : List[str]=[3, 6, 12, 24] , _lowerCamelCase : List[Any]=7 , _lowerCamelCase : Optional[int]=4.0 , _lowerCamelCase : List[str]=True , _lowerCamelCase : List[str]=0.0 , _lowerCamelCase : Any=0.0 , _lowerCamelCase : Dict=0.1 , _lowerCamelCase : List[str]="gelu" , _lowerCamelCase : Tuple=False , _lowerCamelCase : Dict=0.02 , _lowerCamelCase : Optional[Any]=1E-5 , _lowerCamelCase : Any=32 , _lowerCamelCase : Tuple=None , _lowerCamelCase : Any=None , **_lowerCamelCase : str , ):
"""simple docstring"""
super().__init__(**_lowerCamelCase )
A_ : Optional[int] = image_size
A_ : Optional[int] = patch_size
A_ : Optional[int] = num_channels
A_ : Any = embed_dim
A_ : List[Any] = depths
A_ : Any = len(_lowerCamelCase )
A_ : List[Any] = num_heads
A_ : Tuple = window_size
A_ : Tuple = mlp_ratio
A_ : Dict = qkv_bias
A_ : List[str] = hidden_dropout_prob
A_ : List[str] = attention_probs_dropout_prob
A_ : Any = drop_path_rate
A_ : List[Any] = hidden_act
A_ : Tuple = use_absolute_embeddings
A_ : int = layer_norm_eps
A_ : Optional[Any] = initializer_range
A_ : Union[str, Any] = encoder_stride
# we set the hidden_size attribute in order to make Swin work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
A_ : str = int(embed_dim * 2 ** (len(_lowerCamelCase ) - 1) )
A_ : str = ['''stem'''] + [f'stage{idx}' for idx in range(1 , len(_lowerCamelCase ) + 1 )]
A_ ,A_ : Optional[Any] = get_aligned_output_features_output_indices(
out_features=_lowerCamelCase , out_indices=_lowerCamelCase , stage_names=self.stage_names )
class UpperCamelCase_ (a__ ):
"""simple docstring"""
_lowerCAmelCase = version.parse('1.11' )
@property
def _a ( self : str ):
"""simple docstring"""
return OrderedDict(
[
('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}),
] )
@property
def _a ( self : Union[str, Any] ):
"""simple docstring"""
return 1E-4
| 4 | 1 |
'''simple docstring'''
import argparse
import json
import os
import torch
from transformers.file_utils import has_file
from diffusers import UNetaDConditionModel, UNetaDModel
snake_case__ = False
snake_case__ = True
snake_case__ = False
if __name__ == "__main__":
snake_case__ = argparse.ArgumentParser()
parser.add_argument(
"""--repo_path""",
default=None,
type=str,
required=True,
help="""The config json file corresponding to the architecture.""",
)
parser.add_argument("""--dump_path""", default=None, type=str, required=True, help="""Path to the output model.""")
snake_case__ = parser.parse_args()
snake_case__ = {
"""image_size""": """sample_size""",
"""num_res_blocks""": """layers_per_block""",
"""block_channels""": """block_out_channels""",
"""down_blocks""": """down_block_types""",
"""up_blocks""": """up_block_types""",
"""downscale_freq_shift""": """freq_shift""",
"""resnet_num_groups""": """norm_num_groups""",
"""resnet_act_fn""": """act_fn""",
"""resnet_eps""": """norm_eps""",
"""num_head_channels""": """attention_head_dim""",
}
snake_case__ = {
"""time_steps""": """time_proj""",
"""mid""": """mid_block""",
"""downsample_blocks""": """down_blocks""",
"""upsample_blocks""": """up_blocks""",
}
snake_case__ = """""" if has_file(args.repo_path, """config.json""") else """unet"""
with open(os.path.join(args.repo_path, subfolder, """config.json"""), """r""", encoding="""utf-8""") as reader:
snake_case__ = reader.read()
snake_case__ = json.loads(text)
if do_only_config:
for key in config_parameters_to_change.keys():
config.pop(key, None)
if has_file(args.repo_path, """config.json"""):
snake_case__ = UNetaDModel(**config)
else:
snake_case__ = UNetaDConditionModel if """ldm-text2im-large-256""" in args.repo_path else UNetaDModel
snake_case__ = class_name(**config)
if do_only_config:
model.save_config(os.path.join(args.repo_path, subfolder))
snake_case__ = dict(model.config)
if do_only_renaming:
for key, value in config_parameters_to_change.items():
if key in config:
snake_case__ = config[key]
del config[key]
snake_case__ = [k.replace("""UNetRes""", """""") for k in config["""down_block_types"""]]
snake_case__ = [k.replace("""UNetRes""", """""") for k in config["""up_block_types"""]]
if do_only_weights:
snake_case__ = torch.load(os.path.join(args.repo_path, subfolder, """diffusion_pytorch_model.bin"""))
snake_case__ = {}
for param_key, param_value in state_dict.items():
if param_key.endswith(""".op.bias""") or param_key.endswith(""".op.weight"""):
continue
snake_case__ = False
for key, new_key in key_parameters_to_change.items():
if not has_changed and param_key.split(""".""")[0] == key:
snake_case__ = param_value
snake_case__ = True
if not has_changed:
snake_case__ = param_value
model.load_state_dict(new_state_dict)
model.save_pretrained(os.path.join(args.repo_path, subfolder))
| 4 |
'''simple docstring'''
from __future__ import annotations
def snake_case__ ( lowerCamelCase__ : list[int] , lowerCamelCase__ : int ) -> list[int]:
A_ : int = 0
A_ : str = len(lowerCamelCase__ ) - 1
while i < j:
if nums[i] + nums[j] == target:
return [i, j]
elif nums[i] + nums[j] < target:
A_ : Tuple = i + 1
else:
A_ : List[str] = j - 1
return []
if __name__ == "__main__":
import doctest
doctest.testmod()
print(F'{two_pointer([2, 7, 11, 15], 9) = }')
| 4 | 1 |
'''simple docstring'''
snake_case__ = {str(digit): digit**5 for digit in range(10)}
def snake_case__ ( lowerCamelCase__ : int ) -> int:
return sum(DIGITS_FIFTH_POWER[digit] for digit in str(lowerCamelCase__ ) )
def snake_case__ ( ) -> int:
return sum(
number
for number in range(1_0_0_0 , 1_0_0_0_0_0_0 )
if number == digits_fifth_powers_sum(lowerCamelCase__ ) )
if __name__ == "__main__":
print(solution())
| 4 |
'''simple docstring'''
def snake_case__ ( lowerCamelCase__ : list[int] , lowerCamelCase__ : list[int] , lowerCamelCase__ : int ) -> bool:
return not any(
neighbour == 1 and colored_vertices[i] == color
for i, neighbour in enumerate(lowerCamelCase__ ) )
def snake_case__ ( lowerCamelCase__ : list[list[int]] , lowerCamelCase__ : int , lowerCamelCase__ : list[int] , lowerCamelCase__ : int ) -> bool:
# Base Case
if index == len(lowerCamelCase__ ):
return True
# Recursive Step
for i in range(lowerCamelCase__ ):
if valid_coloring(graph[index] , lowerCamelCase__ , lowerCamelCase__ ):
# Color current vertex
A_ : int = i
# Validate coloring
if util_color(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , index + 1 ):
return True
# Backtrack
A_ : str = -1
return False
def snake_case__ ( lowerCamelCase__ : list[list[int]] , lowerCamelCase__ : int ) -> list[int]:
A_ : List[str] = [-1] * len(lowerCamelCase__ )
if util_color(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , 0 ):
return colored_vertices
return []
| 4 | 1 |
'''simple docstring'''
import unittest
from .lib import (
Matrix,
Vector,
axpy,
square_zero_matrix,
unit_basis_vector,
zero_vector,
)
class UpperCamelCase_ (unittest.TestCase ):
"""simple docstring"""
def _a ( self : Tuple ):
"""simple docstring"""
A_ : List[Any] = Vector([1, 2, 3] )
self.assertEqual(x.component(0 ) , 1 )
self.assertEqual(x.component(2 ) , 3 )
A_ : Tuple = Vector()
def _a ( self : Any ):
"""simple docstring"""
A_ : int = Vector([0, 0, 0, 0, 0, 1] )
self.assertEqual(str(_lowerCamelCase ) , '''(0,0,0,0,0,1)''' )
def _a ( self : int ):
"""simple docstring"""
A_ : Tuple = Vector([1, 2, 3, 4] )
self.assertEqual(len(_lowerCamelCase ) , 4 )
def _a ( self : Optional[Any] ):
"""simple docstring"""
A_ : str = Vector([1, 2] )
A_ : int = Vector([1, 2, 3, 4, 5] )
A_ : List[str] = Vector([0, 0, 0, 0, 0, 0, 0, 0, 0, 0] )
A_ : List[str] = Vector([1, -1, 1, -1, 2, -3, 4, -5] )
self.assertAlmostEqual(x.euclidean_length() , 2.2_36 , 3 )
self.assertAlmostEqual(y.euclidean_length() , 7.4_16 , 3 )
self.assertEqual(z.euclidean_length() , 0 )
self.assertAlmostEqual(w.euclidean_length() , 7.6_16 , 3 )
def _a ( self : Optional[int] ):
"""simple docstring"""
A_ : List[Any] = Vector([1, 2, 3] )
A_ : Union[str, Any] = Vector([1, 1, 1] )
self.assertEqual((x + y).component(0 ) , 2 )
self.assertEqual((x + y).component(1 ) , 3 )
self.assertEqual((x + y).component(2 ) , 4 )
def _a ( self : int ):
"""simple docstring"""
A_ : Union[str, Any] = Vector([1, 2, 3] )
A_ : List[Any] = Vector([1, 1, 1] )
self.assertEqual((x - y).component(0 ) , 0 )
self.assertEqual((x - y).component(1 ) , 1 )
self.assertEqual((x - y).component(2 ) , 2 )
def _a ( self : Dict ):
"""simple docstring"""
A_ : Tuple = Vector([1, 2, 3] )
A_ : int = Vector([2, -1, 4] ) # for test of dot product
A_ : str = Vector([1, -2, -1] )
self.assertEqual(str(x * 3.0 ) , '''(3.0,6.0,9.0)''' )
self.assertEqual((a * b) , 0 )
def _a ( self : Tuple ):
"""simple docstring"""
self.assertEqual(str(zero_vector(10 ) ).count('''0''' ) , 10 )
def _a ( self : List[str] ):
"""simple docstring"""
self.assertEqual(str(unit_basis_vector(3 , 1 ) ) , '''(0,1,0)''' )
def _a ( self : List[Any] ):
"""simple docstring"""
A_ : Tuple = Vector([1, 2, 3] )
A_ : Tuple = Vector([1, 0, 1] )
self.assertEqual(str(axpy(2 , _lowerCamelCase , _lowerCamelCase ) ) , '''(3,4,7)''' )
def _a ( self : str ):
"""simple docstring"""
A_ : Optional[int] = Vector([1, 0, 0, 0, 0, 0] )
A_ : Tuple = x.copy()
self.assertEqual(str(_lowerCamelCase ) , str(_lowerCamelCase ) )
def _a ( self : Tuple ):
"""simple docstring"""
A_ : List[str] = Vector([1, 0, 0] )
x.change_component(0 , 0 )
x.change_component(1 , 1 )
self.assertEqual(str(_lowerCamelCase ) , '''(0,1,0)''' )
def _a ( self : Any ):
"""simple docstring"""
A_ : Dict = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
self.assertEqual('''|1,2,3|\n|2,4,5|\n|6,7,8|\n''' , str(_lowerCamelCase ) )
def _a ( self : List[Any] ):
"""simple docstring"""
A_ : List[Any] = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
A_ : List[str] = [[-3, -14, -10], [-5, -10, -5], [-2, -1, 0]]
for x in range(a.height() ):
for y in range(a.width() ):
self.assertEqual(minors[x][y] , a.minor(_lowerCamelCase , _lowerCamelCase ) )
def _a ( self : Optional[int] ):
"""simple docstring"""
A_ : Dict = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
A_ : Any = [[-3, 14, -10], [5, -10, 5], [-2, 1, 0]]
for x in range(a.height() ):
for y in range(a.width() ):
self.assertEqual(cofactors[x][y] , a.cofactor(_lowerCamelCase , _lowerCamelCase ) )
def _a ( self : List[str] ):
"""simple docstring"""
A_ : Tuple = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
self.assertEqual(-5 , a.determinant() )
def _a ( self : Optional[Any] ):
"""simple docstring"""
A_ : List[Any] = Matrix([[1, 2, 3], [4, 5, 6], [7, 8, 9]] , 3 , 3 )
A_ : List[str] = Vector([1, 2, 3] )
self.assertEqual('''(14,32,50)''' , str(a * x ) )
self.assertEqual('''|2,4,6|\n|8,10,12|\n|14,16,18|\n''' , str(a * 2 ) )
def _a ( self : List[str] ):
"""simple docstring"""
A_ : Dict = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
a.change_component(0 , 2 , 5 )
self.assertEqual('''|1,2,5|\n|2,4,5|\n|6,7,8|\n''' , str(_lowerCamelCase ) )
def _a ( self : Dict ):
"""simple docstring"""
A_ : Dict = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
self.assertEqual(7 , a.component(2 , 1 ) , 0.01 )
def _a ( self : List[str] ):
"""simple docstring"""
A_ : Dict = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
A_ : Optional[int] = Matrix([[1, 2, 7], [2, 4, 5], [6, 7, 10]] , 3 , 3 )
self.assertEqual('''|2,4,10|\n|4,8,10|\n|12,14,18|\n''' , str(a + b ) )
def _a ( self : Union[str, Any] ):
"""simple docstring"""
A_ : int = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
A_ : List[str] = Matrix([[1, 2, 7], [2, 4, 5], [6, 7, 10]] , 3 , 3 )
self.assertEqual('''|0,0,-4|\n|0,0,0|\n|0,0,-2|\n''' , str(a - b ) )
def _a ( self : int ):
"""simple docstring"""
self.assertEqual(
'''|0,0,0,0,0|\n|0,0,0,0,0|\n|0,0,0,0,0|\n|0,0,0,0,0|\n|0,0,0,0,0|\n''' , str(square_zero_matrix(5 ) ) , )
if __name__ == "__main__":
unittest.main()
| 4 |
'''simple docstring'''
from __future__ import annotations
from PIL import Image
# Define glider example
snake_case__ = [
[0, 1, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0, 0],
[1, 1, 1, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
]
# Define blinker example
snake_case__ = [[0, 1, 0], [0, 1, 0], [0, 1, 0]]
def snake_case__ ( lowerCamelCase__ : list[list[int]] ) -> list[list[int]]:
A_ : str = []
for i in range(len(lowerCamelCase__ ) ):
A_ : Optional[Any] = []
for j in range(len(cells[i] ) ):
# Get the number of live neighbours
A_ : Optional[int] = 0
if i > 0 and j > 0:
neighbour_count += cells[i - 1][j - 1]
if i > 0:
neighbour_count += cells[i - 1][j]
if i > 0 and j < len(cells[i] ) - 1:
neighbour_count += cells[i - 1][j + 1]
if j > 0:
neighbour_count += cells[i][j - 1]
if j < len(cells[i] ) - 1:
neighbour_count += cells[i][j + 1]
if i < len(lowerCamelCase__ ) - 1 and j > 0:
neighbour_count += cells[i + 1][j - 1]
if i < len(lowerCamelCase__ ) - 1:
neighbour_count += cells[i + 1][j]
if i < len(lowerCamelCase__ ) - 1 and j < len(cells[i] ) - 1:
neighbour_count += cells[i + 1][j + 1]
# Rules of the game of life (excerpt from Wikipedia):
# 1. Any live cell with two or three live neighbours survives.
# 2. Any dead cell with three live neighbours becomes a live cell.
# 3. All other live cells die in the next generation.
# Similarly, all other dead cells stay dead.
A_ : List[str] = cells[i][j] == 1
if (
(alive and 2 <= neighbour_count <= 3)
or not alive
and neighbour_count == 3
):
next_generation_row.append(1 )
else:
next_generation_row.append(0 )
next_generation.append(lowerCamelCase__ )
return next_generation
def snake_case__ ( lowerCamelCase__ : list[list[int]] , lowerCamelCase__ : int ) -> list[Image.Image]:
A_ : List[Any] = []
for _ in range(lowerCamelCase__ ):
# Create output image
A_ : Optional[int] = Image.new('''RGB''' , (len(cells[0] ), len(lowerCamelCase__ )) )
A_ : int = img.load()
# Save cells to image
for x in range(len(lowerCamelCase__ ) ):
for y in range(len(cells[0] ) ):
A_ : Optional[Any] = 2_5_5 - cells[y][x] * 2_5_5
A_ : str = (colour, colour, colour)
# Save image
images.append(lowerCamelCase__ )
A_ : Optional[int] = new_generation(lowerCamelCase__ )
return images
if __name__ == "__main__":
snake_case__ = generate_images(GLIDER, 16)
images[0].save("""out.gif""", save_all=True, append_images=images[1:])
| 4 | 1 |
'''simple docstring'''
import itertools
import random
import unittest
import numpy as np
from transformers import ASTFeatureExtractor
from transformers.testing_utils import require_torch, require_torchaudio
from transformers.utils.import_utils import is_torch_available
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
snake_case__ = random.Random()
if is_torch_available():
import torch
def snake_case__ ( lowerCamelCase__ : int , lowerCamelCase__ : Any=1.0 , lowerCamelCase__ : str=None , lowerCamelCase__ : Optional[Any]=None ) -> int:
if rng is None:
A_ : List[Any] = global_rng
A_ : Any = []
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
class UpperCamelCase_ (unittest.TestCase ):
"""simple docstring"""
def __init__( self : Optional[int] , _lowerCamelCase : Tuple , _lowerCamelCase : Tuple=7 , _lowerCamelCase : List[Any]=400 , _lowerCamelCase : Dict=2000 , _lowerCamelCase : Dict=1 , _lowerCamelCase : Dict=0.0 , _lowerCamelCase : int=16000 , _lowerCamelCase : Optional[Any]=True , _lowerCamelCase : int=True , ):
"""simple docstring"""
A_ : List[Any] = parent
A_ : int = batch_size
A_ : List[Any] = min_seq_length
A_ : List[Any] = max_seq_length
A_ : List[str] = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
A_ : Union[str, Any] = feature_size
A_ : Tuple = padding_value
A_ : Any = sampling_rate
A_ : List[Any] = return_attention_mask
A_ : int = do_normalize
def _a ( self : List[Any] ):
"""simple docstring"""
return {
"feature_size": self.feature_size,
"padding_value": self.padding_value,
"sampling_rate": self.sampling_rate,
"return_attention_mask": self.return_attention_mask,
"do_normalize": self.do_normalize,
}
def _a ( self : List[Any] , _lowerCamelCase : str=False , _lowerCamelCase : Dict=False ):
"""simple docstring"""
def _flatten(_lowerCamelCase : str ):
return list(itertools.chain(*_lowerCamelCase ) )
if equal_length:
A_ : str = floats_list((self.batch_size, self.max_seq_length) )
else:
# make sure that inputs increase in size
A_ : Optional[int] = [
_flatten(floats_list((x, self.feature_size) ) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
A_ : Optional[Any] = [np.asarray(_lowerCamelCase ) for x in speech_inputs]
return speech_inputs
@require_torch
@require_torchaudio
class UpperCamelCase_ (a__, unittest.TestCase ):
"""simple docstring"""
_lowerCAmelCase = ASTFeatureExtractor
def _a ( self : int ):
"""simple docstring"""
A_ : Any = ASTFeatureExtractionTester(self )
def _a ( self : Union[str, Any] ):
"""simple docstring"""
A_ : List[str] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
# create three inputs of length 800, 1000, and 1200
A_ : Optional[Any] = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )]
A_ : List[Any] = [np.asarray(_lowerCamelCase ) for speech_input in speech_inputs]
# Test not batched input
A_ : Optional[int] = feat_extract(speech_inputs[0] , return_tensors='''np''' ).input_values
A_ : int = feat_extract(np_speech_inputs[0] , return_tensors='''np''' ).input_values
self.assertTrue(np.allclose(_lowerCamelCase , _lowerCamelCase , atol=1E-3 ) )
# Test batched
A_ : str = feat_extract(_lowerCamelCase , padding=_lowerCamelCase , return_tensors='''np''' ).input_values
A_ : Tuple = feat_extract(_lowerCamelCase , padding=_lowerCamelCase , return_tensors='''np''' ).input_values
for enc_seq_a, enc_seq_a in zip(_lowerCamelCase , _lowerCamelCase ):
self.assertTrue(np.allclose(_lowerCamelCase , _lowerCamelCase , atol=1E-3 ) )
# Test 2-D numpy arrays are batched.
A_ : Dict = [floats_list((1, x) )[0] for x in (800, 800, 800)]
A_ : int = np.asarray(_lowerCamelCase )
A_ : int = feat_extract(_lowerCamelCase , return_tensors='''np''' ).input_values
A_ : List[Any] = feat_extract(_lowerCamelCase , return_tensors='''np''' ).input_values
for enc_seq_a, enc_seq_a in zip(_lowerCamelCase , _lowerCamelCase ):
self.assertTrue(np.allclose(_lowerCamelCase , _lowerCamelCase , atol=1E-3 ) )
@require_torch
def _a ( self : str ):
"""simple docstring"""
import torch
A_ : Dict = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
A_ : Dict = np.random.rand(100 ).astype(np.floataa )
A_ : int = np_speech_inputs.tolist()
for inputs in [py_speech_inputs, np_speech_inputs]:
A_ : Optional[int] = feature_extractor.pad([{'''input_values''': inputs}] , return_tensors='''np''' )
self.assertTrue(np_processed.input_values.dtype == np.floataa )
A_ : int = feature_extractor.pad([{'''input_values''': inputs}] , return_tensors='''pt''' )
self.assertTrue(pt_processed.input_values.dtype == torch.floataa )
def _a ( self : Union[str, Any] , _lowerCamelCase : Tuple ):
"""simple docstring"""
from datasets import load_dataset
A_ : Any = load_dataset('''hf-internal-testing/librispeech_asr_dummy''' , '''clean''' , split='''validation''' )
# automatic decoding with librispeech
A_ : Any = ds.sort('''id''' ).select(range(_lowerCamelCase ) )[:num_samples]['''audio''']
return [x["array"] for x in speech_samples]
@require_torch
def _a ( self : Union[str, Any] ):
"""simple docstring"""
A_ : str = torch.tensor(
[-0.98_94, -1.27_76, -0.90_66, -1.27_76, -0.93_49, -1.26_09, -1.03_86, -1.27_76,
-1.15_61, -1.27_76, -1.20_52, -1.27_23, -1.21_90, -1.21_32, -1.27_76, -1.11_33,
-1.19_53, -1.13_43, -1.15_84, -1.22_03, -1.17_70, -1.24_74, -1.23_81, -1.19_36,
-0.92_70, -0.83_17, -0.80_49, -0.77_06, -0.75_65, -0.78_69] )
# fmt: on
A_ : Union[str, Any] = self._load_datasamples(1 )
A_ : Tuple = ASTFeatureExtractor()
A_ : Union[str, Any] = feature_extractor(_lowerCamelCase , return_tensors='''pt''' ).input_values
self.assertEquals(input_values.shape , (1, 1024, 128) )
self.assertTrue(torch.allclose(input_values[0, 0, :30] , _lowerCamelCase , atol=1E-4 ) )
| 4 |
'''simple docstring'''
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import BertTokenizer, BertTokenizerFast
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import AlignProcessor, EfficientNetImageProcessor
@require_vision
class UpperCamelCase_ (unittest.TestCase ):
"""simple docstring"""
def _a ( self : Union[str, Any] ):
"""simple docstring"""
A_ : Any = tempfile.mkdtemp()
A_ : List[Any] = [
'''[UNK]''',
'''[CLS]''',
'''[SEP]''',
'''[PAD]''',
'''[MASK]''',
'''want''',
'''##want''',
'''##ed''',
'''wa''',
'''un''',
'''runn''',
'''##ing''',
''',''',
'''low''',
'''lowest''',
]
A_ : str = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) )
A_ : Tuple = {
'''do_resize''': True,
'''size''': 20,
'''do_center_crop''': True,
'''crop_size''': 18,
'''do_normalize''': True,
'''image_mean''': [0.48_14_54_66, 0.4_57_82_75, 0.40_82_10_73],
'''image_std''': [0.26_86_29_54, 0.26_13_02_58, 0.27_57_77_11],
}
A_ : List[Any] = os.path.join(self.tmpdirname , _lowerCamelCase )
with open(self.image_processor_file , '''w''' , encoding='''utf-8''' ) as fp:
json.dump(_lowerCamelCase , _lowerCamelCase )
def _a ( self : Dict , **_lowerCamelCase : Tuple ):
"""simple docstring"""
return BertTokenizer.from_pretrained(self.tmpdirname , **_lowerCamelCase )
def _a ( self : Optional[int] , **_lowerCamelCase : Optional[int] ):
"""simple docstring"""
return BertTokenizerFast.from_pretrained(self.tmpdirname , **_lowerCamelCase )
def _a ( self : Optional[Any] , **_lowerCamelCase : Tuple ):
"""simple docstring"""
return EfficientNetImageProcessor.from_pretrained(self.tmpdirname , **_lowerCamelCase )
def _a ( self : Tuple ):
"""simple docstring"""
shutil.rmtree(self.tmpdirname )
def _a ( self : int ):
"""simple docstring"""
A_ : Union[str, Any] = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
A_ : Any = [Image.fromarray(np.moveaxis(_lowerCamelCase , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def _a ( self : int ):
"""simple docstring"""
A_ : Tuple = self.get_tokenizer()
A_ : Tuple = self.get_rust_tokenizer()
A_ : Dict = self.get_image_processor()
A_ : List[Any] = AlignProcessor(tokenizer=_lowerCamelCase , image_processor=_lowerCamelCase )
processor_slow.save_pretrained(self.tmpdirname )
A_ : str = AlignProcessor.from_pretrained(self.tmpdirname , use_fast=_lowerCamelCase )
A_ : Any = AlignProcessor(tokenizer=_lowerCamelCase , image_processor=_lowerCamelCase )
processor_fast.save_pretrained(self.tmpdirname )
A_ : List[Any] = AlignProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() )
self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() )
self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() )
self.assertIsInstance(processor_slow.tokenizer , _lowerCamelCase )
self.assertIsInstance(processor_fast.tokenizer , _lowerCamelCase )
self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor_slow.image_processor , _lowerCamelCase )
self.assertIsInstance(processor_fast.image_processor , _lowerCamelCase )
def _a ( self : List[Any] ):
"""simple docstring"""
A_ : List[str] = AlignProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
A_ : Optional[int] = self.get_tokenizer(bos_token='''(BOS)''' , eos_token='''(EOS)''' )
A_ : Tuple = self.get_image_processor(do_normalize=_lowerCamelCase , padding_value=1.0 )
A_ : List[str] = AlignProcessor.from_pretrained(
self.tmpdirname , bos_token='''(BOS)''' , eos_token='''(EOS)''' , do_normalize=_lowerCamelCase , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , _lowerCamelCase )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , _lowerCamelCase )
def _a ( self : Union[str, Any] ):
"""simple docstring"""
A_ : Dict = self.get_image_processor()
A_ : Any = self.get_tokenizer()
A_ : List[str] = AlignProcessor(tokenizer=_lowerCamelCase , image_processor=_lowerCamelCase )
A_ : Any = self.prepare_image_inputs()
A_ : List[Any] = image_processor(_lowerCamelCase , return_tensors='''np''' )
A_ : str = processor(images=_lowerCamelCase , return_tensors='''np''' )
for key in input_image_proc.keys():
self.assertAlmostEqual(input_image_proc[key].sum() , input_processor[key].sum() , delta=1E-2 )
def _a ( self : Dict ):
"""simple docstring"""
A_ : str = self.get_image_processor()
A_ : List[str] = self.get_tokenizer()
A_ : Optional[int] = AlignProcessor(tokenizer=_lowerCamelCase , image_processor=_lowerCamelCase )
A_ : int = '''lower newer'''
A_ : str = processor(text=_lowerCamelCase )
A_ : Dict = tokenizer(_lowerCamelCase , padding='''max_length''' , max_length=64 )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def _a ( self : str ):
"""simple docstring"""
A_ : Optional[int] = self.get_image_processor()
A_ : Optional[Any] = self.get_tokenizer()
A_ : List[str] = AlignProcessor(tokenizer=_lowerCamelCase , image_processor=_lowerCamelCase )
A_ : List[Any] = '''lower newer'''
A_ : Optional[int] = self.prepare_image_inputs()
A_ : List[Any] = processor(text=_lowerCamelCase , images=_lowerCamelCase )
self.assertListEqual(list(inputs.keys() ) , ['''input_ids''', '''token_type_ids''', '''attention_mask''', '''pixel_values'''] )
# test if it raises when no input is passed
with pytest.raises(_lowerCamelCase ):
processor()
def _a ( self : List[str] ):
"""simple docstring"""
A_ : Optional[Any] = self.get_image_processor()
A_ : Optional[int] = self.get_tokenizer()
A_ : List[Any] = AlignProcessor(tokenizer=_lowerCamelCase , image_processor=_lowerCamelCase )
A_ : str = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
A_ : str = processor.batch_decode(_lowerCamelCase )
A_ : Union[str, Any] = tokenizer.batch_decode(_lowerCamelCase )
self.assertListEqual(_lowerCamelCase , _lowerCamelCase )
def _a ( self : Tuple ):
"""simple docstring"""
A_ : str = self.get_image_processor()
A_ : Tuple = self.get_tokenizer()
A_ : Any = AlignProcessor(tokenizer=_lowerCamelCase , image_processor=_lowerCamelCase )
A_ : str = '''lower newer'''
A_ : List[str] = self.prepare_image_inputs()
A_ : Tuple = processor(text=_lowerCamelCase , images=_lowerCamelCase )
self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
| 4 | 1 |
'''simple docstring'''
import pickle
import unittest
import torch
from accelerate import Accelerator
from accelerate.state import AcceleratorState
from accelerate.test_utils import require_cpu
@require_cpu
class UpperCamelCase_ (unittest.TestCase ):
"""simple docstring"""
def _a ( self : int ):
"""simple docstring"""
A_ : List[str] = torch.nn.Linear(10 , 10 )
A_ : List[Any] = torch.optim.SGD(model.parameters() , 0.1 )
A_ : List[Any] = Accelerator()
A_ : int = accelerator.prepare(_lowerCamelCase )
try:
pickle.loads(pickle.dumps(_lowerCamelCase ) )
except Exception as e:
self.fail(f'Accelerated optimizer pickling failed with {e}' )
AcceleratorState._reset_state()
| 4 |
'''simple docstring'''
import json
import os
from pathlib import Path
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple, Union
import sentencepiece
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
snake_case__ = logging.get_logger(__name__)
snake_case__ = """▁"""
snake_case__ = {
"""vocab_file""": """vocab.json""",
"""spm_file""": """sentencepiece.bpe.model""",
}
snake_case__ = {
"""vocab_file""": {
"""facebook/s2t-small-librispeech-asr""": (
"""https://huggingface.co/facebook/s2t-small-librispeech-asr/resolve/main/vocab.json"""
),
},
"""spm_file""": {
"""facebook/s2t-small-librispeech-asr""": (
"""https://huggingface.co/facebook/s2t-small-librispeech-asr/resolve/main/sentencepiece.bpe.model"""
)
},
}
snake_case__ = {
"""facebook/s2t-small-librispeech-asr""": 10_24,
}
snake_case__ = ["""pt""", """fr""", """ru""", """nl""", """ro""", """it""", """es""", """de"""]
snake_case__ = {"""mustc""": MUSTC_LANGS}
class UpperCamelCase_ (a__ ):
"""simple docstring"""
_lowerCAmelCase = VOCAB_FILES_NAMES
_lowerCAmelCase = PRETRAINED_VOCAB_FILES_MAP
_lowerCAmelCase = MAX_MODEL_INPUT_SIZES
_lowerCAmelCase = ['input_ids', 'attention_mask']
_lowerCAmelCase = []
def __init__( self : Optional[int] , _lowerCamelCase : List[str] , _lowerCamelCase : List[str] , _lowerCamelCase : str="<s>" , _lowerCamelCase : Union[str, Any]="</s>" , _lowerCamelCase : Dict="<pad>" , _lowerCamelCase : str="<unk>" , _lowerCamelCase : Union[str, Any]=False , _lowerCamelCase : int=False , _lowerCamelCase : Any=None , _lowerCamelCase : Any=None , _lowerCamelCase : Optional[Dict[str, Any]] = None , **_lowerCamelCase : Optional[int] , ):
"""simple docstring"""
A_ : Union[str, Any] = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=_lowerCamelCase , eos_token=_lowerCamelCase , unk_token=_lowerCamelCase , pad_token=_lowerCamelCase , do_upper_case=_lowerCamelCase , do_lower_case=_lowerCamelCase , tgt_lang=_lowerCamelCase , lang_codes=_lowerCamelCase , sp_model_kwargs=self.sp_model_kwargs , **_lowerCamelCase , )
A_ : Optional[int] = do_upper_case
A_ : Tuple = do_lower_case
A_ : Tuple = load_json(_lowerCamelCase )
A_ : Tuple = {v: k for k, v in self.encoder.items()}
A_ : List[Any] = spm_file
A_ : List[str] = load_spm(_lowerCamelCase , self.sp_model_kwargs )
if lang_codes is not None:
A_ : Any = lang_codes
A_ : Optional[Any] = LANGUAGES[lang_codes]
A_ : Optional[Any] = [f'<lang:{lang}>' for lang in self.langs]
A_ : Union[str, Any] = {lang: self.sp_model.PieceToId(f'<lang:{lang}>' ) for lang in self.langs}
A_ : Optional[int] = self.lang_tokens
A_ : int = tgt_lang if tgt_lang is not None else self.langs[0]
self.set_tgt_lang_special_tokens(self._tgt_lang )
else:
A_ : Dict = {}
@property
def _a ( self : Tuple ):
"""simple docstring"""
return len(self.encoder )
@property
def _a ( self : int ):
"""simple docstring"""
return self._tgt_lang
@tgt_lang.setter
def _a ( self : List[str] , _lowerCamelCase : Any ):
"""simple docstring"""
A_ : int = new_tgt_lang
self.set_tgt_lang_special_tokens(_lowerCamelCase )
def _a ( self : Tuple , _lowerCamelCase : str ):
"""simple docstring"""
A_ : List[str] = self.lang_code_to_id[tgt_lang]
A_ : Optional[Any] = [lang_code_id]
def _a ( self : Optional[Any] , _lowerCamelCase : str ):
"""simple docstring"""
return self.sp_model.encode(_lowerCamelCase , out_type=_lowerCamelCase )
def _a ( self : List[Any] , _lowerCamelCase : int ):
"""simple docstring"""
return self.encoder.get(_lowerCamelCase , self.encoder[self.unk_token] )
def _a ( self : int , _lowerCamelCase : int ):
"""simple docstring"""
return self.decoder.get(_lowerCamelCase , self.unk_token )
def _a ( self : int , _lowerCamelCase : List[str] ):
"""simple docstring"""
A_ : List[Any] = []
A_ : Any = ''''''
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
A_ : Union[str, Any] = self.sp_model.decode(_lowerCamelCase )
out_string += (decoded.upper() if self.do_upper_case else decoded) + token + " "
A_ : Optional[Any] = []
else:
current_sub_tokens.append(_lowerCamelCase )
A_ : Tuple = self.sp_model.decode(_lowerCamelCase )
out_string += decoded.upper() if self.do_upper_case else decoded
return out_string.strip()
def _a ( self : int , _lowerCamelCase : Dict , _lowerCamelCase : Any=None ):
"""simple docstring"""
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + [self.eos_token_id]
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + [self.eos_token_id]
def _a ( self : List[Any] , _lowerCamelCase : List[int] , _lowerCamelCase : Optional[List[int]] = None , _lowerCamelCase : bool = False ):
"""simple docstring"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_lowerCamelCase , token_ids_a=_lowerCamelCase , already_has_special_tokens=_lowerCamelCase )
A_ : Tuple = [1] * len(self.prefix_tokens )
A_ : Tuple = [1]
if token_ids_a is None:
return prefix_ones + ([0] * len(_lowerCamelCase )) + suffix_ones
return prefix_ones + ([0] * len(_lowerCamelCase )) + ([0] * len(_lowerCamelCase )) + suffix_ones
def _a ( self : Dict ):
"""simple docstring"""
A_ : Union[str, Any] = self.encoder.copy()
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self : Union[str, Any] ):
"""simple docstring"""
A_ : Dict = self.__dict__.copy()
A_ : List[Any] = None
return state
def __setstate__( self : List[str] , _lowerCamelCase : Dict ):
"""simple docstring"""
A_ : Dict = d
# for backward compatibility
if not hasattr(self , '''sp_model_kwargs''' ):
A_ : Optional[int] = {}
A_ : int = load_spm(self.spm_file , self.sp_model_kwargs )
def _a ( self : Optional[Any] , _lowerCamelCase : str , _lowerCamelCase : Optional[str] = None ):
"""simple docstring"""
A_ : Dict = Path(_lowerCamelCase )
assert save_dir.is_dir(), f'{save_directory} should be a directory'
A_ : Optional[int] = save_dir / (
(filename_prefix + '''-''' if filename_prefix else '''''') + self.vocab_files_names['''vocab_file''']
)
A_ : Optional[int] = save_dir / (
(filename_prefix + '''-''' if filename_prefix else '''''') + self.vocab_files_names['''spm_file''']
)
save_json(self.encoder , _lowerCamelCase )
if os.path.abspath(self.spm_file ) != os.path.abspath(_lowerCamelCase ) and os.path.isfile(self.spm_file ):
copyfile(self.spm_file , _lowerCamelCase )
elif not os.path.isfile(self.spm_file ):
with open(_lowerCamelCase , '''wb''' ) as fi:
A_ : List[str] = self.sp_model.serialized_model_proto()
fi.write(_lowerCamelCase )
return (str(_lowerCamelCase ), str(_lowerCamelCase ))
def snake_case__ ( lowerCamelCase__ : str , lowerCamelCase__ : Dict[str, Any] ) -> sentencepiece.SentencePieceProcessor:
A_ : Tuple = sentencepiece.SentencePieceProcessor(**lowerCamelCase__ )
spm.Load(str(lowerCamelCase__ ) )
return spm
def snake_case__ ( lowerCamelCase__ : str ) -> Union[Dict, List]:
with open(lowerCamelCase__ , '''r''' ) as f:
return json.load(lowerCamelCase__ )
def snake_case__ ( lowerCamelCase__ : Any , lowerCamelCase__ : str ) -> None:
with open(lowerCamelCase__ , '''w''' ) as f:
json.dump(lowerCamelCase__ , lowerCamelCase__ , indent=2 )
| 4 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
snake_case__ = {
"""configuration_lilt""": ["""LILT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """LiltConfig"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case__ = [
"""LILT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""LiltForQuestionAnswering""",
"""LiltForSequenceClassification""",
"""LiltForTokenClassification""",
"""LiltModel""",
"""LiltPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_lilt import LILT_PRETRAINED_CONFIG_ARCHIVE_MAP, LiltConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_lilt import (
LILT_PRETRAINED_MODEL_ARCHIVE_LIST,
LiltForQuestionAnswering,
LiltForSequenceClassification,
LiltForTokenClassification,
LiltModel,
LiltPreTrainedModel,
)
else:
import sys
snake_case__ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 4 |
'''simple docstring'''
import argparse
import json
import os
import sys
import tempfile
import unittest
from argparse import Namespace
from dataclasses import dataclass, field
from enum import Enum
from pathlib import Path
from typing import List, Literal, Optional
import yaml
from transformers import HfArgumentParser, TrainingArguments
from transformers.hf_argparser import make_choice_type_function, string_to_bool
# Since Python 3.10, we can use the builtin `|` operator for Union types
# See PEP 604: https://peps.python.org/pep-0604
snake_case__ = sys.version_info >= (3, 10)
def snake_case__ ( lowerCamelCase__ : Union[str, Any]=None , lowerCamelCase__ : str=None ) -> List[Any]:
return field(default_factory=lambda: default , metadata=lowerCamelCase__ )
@dataclass
class UpperCamelCase_ :
"""simple docstring"""
_lowerCAmelCase = 42
_lowerCAmelCase = 42
_lowerCAmelCase = 42
_lowerCAmelCase = 42
@dataclass
class UpperCamelCase_ :
"""simple docstring"""
_lowerCAmelCase = 4_2
_lowerCAmelCase = field(default='toto', metadata={'help': 'help message'} )
@dataclass
class UpperCamelCase_ :
"""simple docstring"""
_lowerCAmelCase = False
_lowerCAmelCase = True
_lowerCAmelCase = None
class UpperCamelCase_ (a__ ):
"""simple docstring"""
_lowerCAmelCase = 'titi'
_lowerCAmelCase = 'toto'
class UpperCamelCase_ (a__ ):
"""simple docstring"""
_lowerCAmelCase = 'titi'
_lowerCAmelCase = 'toto'
_lowerCAmelCase = 4_2
@dataclass
class UpperCamelCase_ :
"""simple docstring"""
_lowerCAmelCase = "toto"
def _a ( self : Optional[Any] ):
"""simple docstring"""
A_ : Optional[int] = BasicEnum(self.foo )
@dataclass
class UpperCamelCase_ :
"""simple docstring"""
_lowerCAmelCase = "toto"
def _a ( self : Tuple ):
"""simple docstring"""
A_ : Optional[Any] = MixedTypeEnum(self.foo )
@dataclass
class UpperCamelCase_ :
"""simple docstring"""
_lowerCAmelCase = None
_lowerCAmelCase = field(default=a__, metadata={'help': 'help message'} )
_lowerCAmelCase = None
_lowerCAmelCase = list_field(default=[] )
_lowerCAmelCase = list_field(default=[] )
@dataclass
class UpperCamelCase_ :
"""simple docstring"""
_lowerCAmelCase = list_field(default=[] )
_lowerCAmelCase = list_field(default=[1, 2, 3] )
_lowerCAmelCase = list_field(default=['Hallo', 'Bonjour', 'Hello'] )
_lowerCAmelCase = list_field(default=[0.1, 0.2, 0.3] )
@dataclass
class UpperCamelCase_ :
"""simple docstring"""
_lowerCAmelCase = field()
_lowerCAmelCase = field()
_lowerCAmelCase = field()
def _a ( self : Tuple ):
"""simple docstring"""
A_ : Tuple = BasicEnum(self.required_enum )
@dataclass
class UpperCamelCase_ :
"""simple docstring"""
_lowerCAmelCase = 42
_lowerCAmelCase = field()
_lowerCAmelCase = None
_lowerCAmelCase = field(default='toto', metadata={'help': 'help message'} )
_lowerCAmelCase = list_field(default=['Hallo', 'Bonjour', 'Hello'] )
if is_python_no_less_than_3_10:
@dataclass
class UpperCamelCase_ :
"""simple docstring"""
_lowerCAmelCase = False
_lowerCAmelCase = True
_lowerCAmelCase = None
@dataclass
class UpperCamelCase_ :
"""simple docstring"""
_lowerCAmelCase = None
_lowerCAmelCase = field(default=a__, metadata={'help': 'help message'} )
_lowerCAmelCase = None
_lowerCAmelCase = list_field(default=[] )
_lowerCAmelCase = list_field(default=[] )
class UpperCamelCase_ (unittest.TestCase ):
"""simple docstring"""
def _a ( self : List[str] , _lowerCamelCase : argparse.ArgumentParser , _lowerCamelCase : argparse.ArgumentParser ):
"""simple docstring"""
self.assertEqual(len(a._actions ) , len(b._actions ) )
for x, y in zip(a._actions , b._actions ):
A_ : Union[str, Any] = {k: v for k, v in vars(_lowerCamelCase ).items() if k != '''container'''}
A_ : Optional[Any] = {k: v for k, v in vars(_lowerCamelCase ).items() if k != '''container'''}
# Choices with mixed type have custom function as "type"
# So we need to compare results directly for equality
if xx.get('''choices''' , _lowerCamelCase ) and yy.get('''choices''' , _lowerCamelCase ):
for expected_choice in yy["choices"] + xx["choices"]:
self.assertEqual(xx['''type'''](_lowerCamelCase ) , yy['''type'''](_lowerCamelCase ) )
del xx["type"], yy["type"]
self.assertEqual(_lowerCamelCase , _lowerCamelCase )
def _a ( self : Optional[int] ):
"""simple docstring"""
A_ : Union[str, Any] = HfArgumentParser(_lowerCamelCase )
A_ : Optional[Any] = argparse.ArgumentParser()
expected.add_argument('''--foo''' , type=_lowerCamelCase , required=_lowerCamelCase )
expected.add_argument('''--bar''' , type=_lowerCamelCase , required=_lowerCamelCase )
expected.add_argument('''--baz''' , type=_lowerCamelCase , required=_lowerCamelCase )
expected.add_argument('''--flag''' , type=_lowerCamelCase , default=_lowerCamelCase , const=_lowerCamelCase , nargs='''?''' )
self.argparsersEqual(_lowerCamelCase , _lowerCamelCase )
A_ : Union[str, Any] = ['''--foo''', '''1''', '''--baz''', '''quux''', '''--bar''', '''0.5''']
((A_) ,) : List[str] = parser.parse_args_into_dataclasses(_lowerCamelCase , look_for_args_file=_lowerCamelCase )
self.assertFalse(example.flag )
def _a ( self : Dict ):
"""simple docstring"""
A_ : int = HfArgumentParser(_lowerCamelCase )
A_ : int = argparse.ArgumentParser()
expected.add_argument('''--foo''' , default=42 , type=_lowerCamelCase )
expected.add_argument('''--baz''' , default='''toto''' , type=_lowerCamelCase , help='''help message''' )
self.argparsersEqual(_lowerCamelCase , _lowerCamelCase )
def _a ( self : Dict ):
"""simple docstring"""
A_ : Any = argparse.ArgumentParser()
expected.add_argument('''--foo''' , type=_lowerCamelCase , default=_lowerCamelCase , const=_lowerCamelCase , nargs='''?''' )
expected.add_argument('''--baz''' , type=_lowerCamelCase , default=_lowerCamelCase , const=_lowerCamelCase , nargs='''?''' )
# A boolean no_* argument always has to come after its "default: True" regular counter-part
# and its default must be set to False
expected.add_argument('''--no_baz''' , action='''store_false''' , default=_lowerCamelCase , dest='''baz''' )
expected.add_argument('''--opt''' , type=_lowerCamelCase , default=_lowerCamelCase )
A_ : Dict = [WithDefaultBoolExample]
if is_python_no_less_than_3_10:
dataclass_types.append(_lowerCamelCase )
for dataclass_type in dataclass_types:
A_ : Any = HfArgumentParser(_lowerCamelCase )
self.argparsersEqual(_lowerCamelCase , _lowerCamelCase )
A_ : List[Any] = parser.parse_args([] )
self.assertEqual(_lowerCamelCase , Namespace(foo=_lowerCamelCase , baz=_lowerCamelCase , opt=_lowerCamelCase ) )
A_ : Optional[int] = parser.parse_args(['''--foo''', '''--no_baz'''] )
self.assertEqual(_lowerCamelCase , Namespace(foo=_lowerCamelCase , baz=_lowerCamelCase , opt=_lowerCamelCase ) )
A_ : Union[str, Any] = parser.parse_args(['''--foo''', '''--baz'''] )
self.assertEqual(_lowerCamelCase , Namespace(foo=_lowerCamelCase , baz=_lowerCamelCase , opt=_lowerCamelCase ) )
A_ : List[str] = parser.parse_args(['''--foo''', '''True''', '''--baz''', '''True''', '''--opt''', '''True'''] )
self.assertEqual(_lowerCamelCase , Namespace(foo=_lowerCamelCase , baz=_lowerCamelCase , opt=_lowerCamelCase ) )
A_ : List[Any] = parser.parse_args(['''--foo''', '''False''', '''--baz''', '''False''', '''--opt''', '''False'''] )
self.assertEqual(_lowerCamelCase , Namespace(foo=_lowerCamelCase , baz=_lowerCamelCase , opt=_lowerCamelCase ) )
def _a ( self : List[Any] ):
"""simple docstring"""
A_ : str = HfArgumentParser(_lowerCamelCase )
A_ : Optional[int] = argparse.ArgumentParser()
expected.add_argument(
'''--foo''' , default='''toto''' , choices=['''titi''', '''toto''', 42] , type=make_choice_type_function(['''titi''', '''toto''', 42] ) , )
self.argparsersEqual(_lowerCamelCase , _lowerCamelCase )
A_ : str = parser.parse_args([] )
self.assertEqual(args.foo , '''toto''' )
A_ : List[Any] = parser.parse_args_into_dataclasses([] )[0]
self.assertEqual(enum_ex.foo , MixedTypeEnum.toto )
A_ : int = parser.parse_args(['''--foo''', '''titi'''] )
self.assertEqual(args.foo , '''titi''' )
A_ : Dict = parser.parse_args_into_dataclasses(['''--foo''', '''titi'''] )[0]
self.assertEqual(enum_ex.foo , MixedTypeEnum.titi )
A_ : Tuple = parser.parse_args(['''--foo''', '''42'''] )
self.assertEqual(args.foo , 42 )
A_ : List[str] = parser.parse_args_into_dataclasses(['''--foo''', '''42'''] )[0]
self.assertEqual(enum_ex.foo , MixedTypeEnum.fourtytwo )
def _a ( self : Optional[int] ):
"""simple docstring"""
@dataclass
class UpperCamelCase_ :
"""simple docstring"""
_lowerCAmelCase = "toto"
A_ : List[str] = HfArgumentParser(_lowerCamelCase )
A_ : Tuple = argparse.ArgumentParser()
expected.add_argument(
'''--foo''' , default='''toto''' , choices=('''titi''', '''toto''', 42) , type=make_choice_type_function(['''titi''', '''toto''', 42] ) , )
self.argparsersEqual(_lowerCamelCase , _lowerCamelCase )
A_ : Tuple = parser.parse_args([] )
self.assertEqual(args.foo , '''toto''' )
A_ : List[str] = parser.parse_args(['''--foo''', '''titi'''] )
self.assertEqual(args.foo , '''titi''' )
A_ : int = parser.parse_args(['''--foo''', '''42'''] )
self.assertEqual(args.foo , 42 )
def _a ( self : Dict ):
"""simple docstring"""
A_ : int = HfArgumentParser(_lowerCamelCase )
A_ : List[Any] = argparse.ArgumentParser()
expected.add_argument('''--foo_int''' , nargs='''+''' , default=[] , type=_lowerCamelCase )
expected.add_argument('''--bar_int''' , nargs='''+''' , default=[1, 2, 3] , type=_lowerCamelCase )
expected.add_argument('''--foo_str''' , nargs='''+''' , default=['''Hallo''', '''Bonjour''', '''Hello'''] , type=_lowerCamelCase )
expected.add_argument('''--foo_float''' , nargs='''+''' , default=[0.1, 0.2, 0.3] , type=_lowerCamelCase )
self.argparsersEqual(_lowerCamelCase , _lowerCamelCase )
A_ : Optional[int] = parser.parse_args([] )
self.assertEqual(
_lowerCamelCase , Namespace(foo_int=[] , bar_int=[1, 2, 3] , foo_str=['''Hallo''', '''Bonjour''', '''Hello'''] , foo_float=[0.1, 0.2, 0.3] ) , )
A_ : str = parser.parse_args('''--foo_int 1 --bar_int 2 3 --foo_str a b c --foo_float 0.1 0.7'''.split() )
self.assertEqual(_lowerCamelCase , Namespace(foo_int=[1] , bar_int=[2, 3] , foo_str=['''a''', '''b''', '''c'''] , foo_float=[0.1, 0.7] ) )
def _a ( self : Dict ):
"""simple docstring"""
A_ : Optional[Any] = argparse.ArgumentParser()
expected.add_argument('''--foo''' , default=_lowerCamelCase , type=_lowerCamelCase )
expected.add_argument('''--bar''' , default=_lowerCamelCase , type=_lowerCamelCase , help='''help message''' )
expected.add_argument('''--baz''' , default=_lowerCamelCase , type=_lowerCamelCase )
expected.add_argument('''--ces''' , nargs='''+''' , default=[] , type=_lowerCamelCase )
expected.add_argument('''--des''' , nargs='''+''' , default=[] , type=_lowerCamelCase )
A_ : Tuple = [OptionalExample]
if is_python_no_less_than_3_10:
dataclass_types.append(_lowerCamelCase )
for dataclass_type in dataclass_types:
A_ : int = HfArgumentParser(_lowerCamelCase )
self.argparsersEqual(_lowerCamelCase , _lowerCamelCase )
A_ : List[Any] = parser.parse_args([] )
self.assertEqual(_lowerCamelCase , Namespace(foo=_lowerCamelCase , bar=_lowerCamelCase , baz=_lowerCamelCase , ces=[] , des=[] ) )
A_ : Optional[Any] = parser.parse_args('''--foo 12 --bar 3.14 --baz 42 --ces a b c --des 1 2 3'''.split() )
self.assertEqual(_lowerCamelCase , Namespace(foo=12 , bar=3.14 , baz='''42''' , ces=['''a''', '''b''', '''c'''] , des=[1, 2, 3] ) )
def _a ( self : List[Any] ):
"""simple docstring"""
A_ : List[Any] = HfArgumentParser(_lowerCamelCase )
A_ : Dict = argparse.ArgumentParser()
expected.add_argument('''--required_list''' , nargs='''+''' , type=_lowerCamelCase , required=_lowerCamelCase )
expected.add_argument('''--required_str''' , type=_lowerCamelCase , required=_lowerCamelCase )
expected.add_argument(
'''--required_enum''' , type=make_choice_type_function(['''titi''', '''toto'''] ) , choices=['''titi''', '''toto'''] , required=_lowerCamelCase , )
self.argparsersEqual(_lowerCamelCase , _lowerCamelCase )
def _a ( self : Optional[Any] ):
"""simple docstring"""
A_ : Union[str, Any] = HfArgumentParser(_lowerCamelCase )
A_ : List[Any] = argparse.ArgumentParser()
expected.add_argument('''--foo''' , type=_lowerCamelCase , required=_lowerCamelCase )
expected.add_argument(
'''--required_enum''' , type=make_choice_type_function(['''titi''', '''toto'''] ) , choices=['''titi''', '''toto'''] , required=_lowerCamelCase , )
expected.add_argument('''--opt''' , type=_lowerCamelCase , default=_lowerCamelCase )
expected.add_argument('''--baz''' , default='''toto''' , type=_lowerCamelCase , help='''help message''' )
expected.add_argument('''--foo_str''' , nargs='''+''' , default=['''Hallo''', '''Bonjour''', '''Hello'''] , type=_lowerCamelCase )
self.argparsersEqual(_lowerCamelCase , _lowerCamelCase )
def _a ( self : Tuple ):
"""simple docstring"""
A_ : List[Any] = HfArgumentParser(_lowerCamelCase )
A_ : Union[str, Any] = {
'''foo''': 12,
'''bar''': 3.14,
'''baz''': '''42''',
'''flag''': True,
}
A_ : Optional[int] = parser.parse_dict(_lowerCamelCase )[0]
A_ : str = BasicExample(**_lowerCamelCase )
self.assertEqual(_lowerCamelCase , _lowerCamelCase )
def _a ( self : List[str] ):
"""simple docstring"""
A_ : Any = HfArgumentParser(_lowerCamelCase )
A_ : List[str] = {
'''foo''': 12,
'''bar''': 3.14,
'''baz''': '''42''',
'''flag''': True,
'''extra''': 42,
}
self.assertRaises(_lowerCamelCase , parser.parse_dict , _lowerCamelCase , allow_extra_keys=_lowerCamelCase )
def _a ( self : Optional[Any] ):
"""simple docstring"""
A_ : Union[str, Any] = HfArgumentParser(_lowerCamelCase )
A_ : List[str] = {
'''foo''': 12,
'''bar''': 3.14,
'''baz''': '''42''',
'''flag''': True,
}
with tempfile.TemporaryDirectory() as tmp_dir:
A_ : Tuple = os.path.join(_lowerCamelCase , '''temp_json''' )
os.mkdir(_lowerCamelCase )
with open(temp_local_path + '''.json''' , '''w+''' ) as f:
json.dump(_lowerCamelCase , _lowerCamelCase )
A_ : List[str] = parser.parse_yaml_file(Path(temp_local_path + '''.json''' ) )[0]
A_ : Optional[Any] = BasicExample(**_lowerCamelCase )
self.assertEqual(_lowerCamelCase , _lowerCamelCase )
def _a ( self : int ):
"""simple docstring"""
A_ : int = HfArgumentParser(_lowerCamelCase )
A_ : Tuple = {
'''foo''': 12,
'''bar''': 3.14,
'''baz''': '''42''',
'''flag''': True,
}
with tempfile.TemporaryDirectory() as tmp_dir:
A_ : int = os.path.join(_lowerCamelCase , '''temp_yaml''' )
os.mkdir(_lowerCamelCase )
with open(temp_local_path + '''.yaml''' , '''w+''' ) as f:
yaml.dump(_lowerCamelCase , _lowerCamelCase )
A_ : Optional[Any] = parser.parse_yaml_file(Path(temp_local_path + '''.yaml''' ) )[0]
A_ : int = BasicExample(**_lowerCamelCase )
self.assertEqual(_lowerCamelCase , _lowerCamelCase )
def _a ( self : Union[str, Any] ):
"""simple docstring"""
A_ : Dict = HfArgumentParser(_lowerCamelCase )
self.assertIsNotNone(_lowerCamelCase )
| 4 | 1 |
'''simple docstring'''
from typing import Optional
import torch
import torch.utils.checkpoint
from torch import Tensor, nn
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
from ...activations import ACTaFN
from ...file_utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward
from ...modeling_outputs import (
BaseModelOutputWithNoAttention,
BaseModelOutputWithPoolingAndNoAttention,
ImageClassifierOutputWithNoAttention,
)
from ...modeling_utils import PreTrainedModel
from ...utils import logging
from .configuration_regnet import RegNetConfig
snake_case__ = logging.get_logger(__name__)
# General docstring
snake_case__ = """RegNetConfig"""
# Base docstring
snake_case__ = """facebook/regnet-y-040"""
snake_case__ = [1, 10_88, 7, 7]
# Image classification docstring
snake_case__ = """facebook/regnet-y-040"""
snake_case__ = """tabby, tabby cat"""
snake_case__ = [
"""facebook/regnet-y-040""",
# See all regnet models at https://huggingface.co/models?filter=regnet
]
class UpperCamelCase_ (nn.Module ):
"""simple docstring"""
def __init__( self : Dict , _lowerCamelCase : int , _lowerCamelCase : int , _lowerCamelCase : int = 3 , _lowerCamelCase : int = 1 , _lowerCamelCase : int = 1 , _lowerCamelCase : Optional[str] = "relu" , ):
"""simple docstring"""
super().__init__()
A_ : Tuple = nn.Convad(
_lowerCamelCase , _lowerCamelCase , kernel_size=_lowerCamelCase , stride=_lowerCamelCase , padding=kernel_size // 2 , groups=_lowerCamelCase , bias=_lowerCamelCase , )
A_ : str = nn.BatchNormad(_lowerCamelCase )
A_ : Optional[int] = ACTaFN[activation] if activation is not None else nn.Identity()
def _a ( self : Optional[Any] , _lowerCamelCase : Union[str, Any] ):
"""simple docstring"""
A_ : Union[str, Any] = self.convolution(_lowerCamelCase )
A_ : Optional[int] = self.normalization(_lowerCamelCase )
A_ : Optional[Any] = self.activation(_lowerCamelCase )
return hidden_state
class UpperCamelCase_ (nn.Module ):
"""simple docstring"""
def __init__( self : List[Any] , _lowerCamelCase : RegNetConfig ):
"""simple docstring"""
super().__init__()
A_ : List[Any] = RegNetConvLayer(
config.num_channels , config.embedding_size , kernel_size=3 , stride=2 , activation=config.hidden_act )
A_ : Any = config.num_channels
def _a ( self : Dict , _lowerCamelCase : List[str] ):
"""simple docstring"""
A_ : Optional[int] = pixel_values.shape[1]
if num_channels != self.num_channels:
raise ValueError(
'''Make sure that the channel dimension of the pixel values match with the one set in the configuration.''' )
A_ : Dict = self.embedder(_lowerCamelCase )
return hidden_state
class UpperCamelCase_ (nn.Module ):
"""simple docstring"""
def __init__( self : List[str] , _lowerCamelCase : int , _lowerCamelCase : int , _lowerCamelCase : int = 2 ):
"""simple docstring"""
super().__init__()
A_ : Union[str, Any] = nn.Convad(_lowerCamelCase , _lowerCamelCase , kernel_size=1 , stride=_lowerCamelCase , bias=_lowerCamelCase )
A_ : Optional[int] = nn.BatchNormad(_lowerCamelCase )
def _a ( self : List[Any] , _lowerCamelCase : Tensor ):
"""simple docstring"""
A_ : Tuple = self.convolution(_lowerCamelCase )
A_ : List[str] = self.normalization(_lowerCamelCase )
return hidden_state
class UpperCamelCase_ (nn.Module ):
"""simple docstring"""
def __init__( self : Tuple , _lowerCamelCase : int , _lowerCamelCase : int ):
"""simple docstring"""
super().__init__()
A_ : Union[str, Any] = nn.AdaptiveAvgPoolad((1, 1) )
A_ : Union[str, Any] = nn.Sequential(
nn.Convad(_lowerCamelCase , _lowerCamelCase , kernel_size=1 ) , nn.ReLU() , nn.Convad(_lowerCamelCase , _lowerCamelCase , kernel_size=1 ) , nn.Sigmoid() , )
def _a ( self : List[str] , _lowerCamelCase : List[str] ):
"""simple docstring"""
A_ : Any = self.pooler(_lowerCamelCase )
A_ : Union[str, Any] = self.attention(_lowerCamelCase )
A_ : List[str] = hidden_state * attention
return hidden_state
class UpperCamelCase_ (nn.Module ):
"""simple docstring"""
def __init__( self : Union[str, Any] , _lowerCamelCase : RegNetConfig , _lowerCamelCase : int , _lowerCamelCase : int , _lowerCamelCase : int = 1 ):
"""simple docstring"""
super().__init__()
A_ : int = in_channels != out_channels or stride != 1
A_ : Dict = max(1 , out_channels // config.groups_width )
A_ : int = (
RegNetShortCut(_lowerCamelCase , _lowerCamelCase , stride=_lowerCamelCase ) if should_apply_shortcut else nn.Identity()
)
A_ : Tuple = nn.Sequential(
RegNetConvLayer(_lowerCamelCase , _lowerCamelCase , kernel_size=1 , activation=config.hidden_act ) , RegNetConvLayer(_lowerCamelCase , _lowerCamelCase , stride=_lowerCamelCase , groups=_lowerCamelCase , activation=config.hidden_act ) , RegNetConvLayer(_lowerCamelCase , _lowerCamelCase , kernel_size=1 , activation=_lowerCamelCase ) , )
A_ : Any = ACTaFN[config.hidden_act]
def _a ( self : Union[str, Any] , _lowerCamelCase : Tuple ):
"""simple docstring"""
A_ : int = hidden_state
A_ : Any = self.layer(_lowerCamelCase )
A_ : str = self.shortcut(_lowerCamelCase )
hidden_state += residual
A_ : Optional[int] = self.activation(_lowerCamelCase )
return hidden_state
class UpperCamelCase_ (nn.Module ):
"""simple docstring"""
def __init__( self : Dict , _lowerCamelCase : RegNetConfig , _lowerCamelCase : int , _lowerCamelCase : int , _lowerCamelCase : int = 1 ):
"""simple docstring"""
super().__init__()
A_ : Any = in_channels != out_channels or stride != 1
A_ : Tuple = max(1 , out_channels // config.groups_width )
A_ : Tuple = (
RegNetShortCut(_lowerCamelCase , _lowerCamelCase , stride=_lowerCamelCase ) if should_apply_shortcut else nn.Identity()
)
A_ : Dict = nn.Sequential(
RegNetConvLayer(_lowerCamelCase , _lowerCamelCase , kernel_size=1 , activation=config.hidden_act ) , RegNetConvLayer(_lowerCamelCase , _lowerCamelCase , stride=_lowerCamelCase , groups=_lowerCamelCase , activation=config.hidden_act ) , RegNetSELayer(_lowerCamelCase , reduced_channels=int(round(in_channels / 4 ) ) ) , RegNetConvLayer(_lowerCamelCase , _lowerCamelCase , kernel_size=1 , activation=_lowerCamelCase ) , )
A_ : Dict = ACTaFN[config.hidden_act]
def _a ( self : List[str] , _lowerCamelCase : List[Any] ):
"""simple docstring"""
A_ : Any = hidden_state
A_ : Optional[int] = self.layer(_lowerCamelCase )
A_ : int = self.shortcut(_lowerCamelCase )
hidden_state += residual
A_ : Union[str, Any] = self.activation(_lowerCamelCase )
return hidden_state
class UpperCamelCase_ (nn.Module ):
"""simple docstring"""
def __init__( self : str , _lowerCamelCase : RegNetConfig , _lowerCamelCase : int , _lowerCamelCase : int , _lowerCamelCase : int = 2 , _lowerCamelCase : int = 2 , ):
"""simple docstring"""
super().__init__()
A_ : Union[str, Any] = RegNetXLayer if config.layer_type == '''x''' else RegNetYLayer
A_ : Union[str, Any] = nn.Sequential(
# downsampling is done in the first layer with stride of 2
layer(
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , stride=_lowerCamelCase , ) , *[layer(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) for _ in range(depth - 1 )] , )
def _a ( self : Union[str, Any] , _lowerCamelCase : Dict ):
"""simple docstring"""
A_ : List[Any] = self.layers(_lowerCamelCase )
return hidden_state
class UpperCamelCase_ (nn.Module ):
"""simple docstring"""
def __init__( self : Tuple , _lowerCamelCase : RegNetConfig ):
"""simple docstring"""
super().__init__()
A_ : Optional[int] = nn.ModuleList([] )
# based on `downsample_in_first_stage`, the first layer of the first stage may or may not downsample the input
self.stages.append(
RegNetStage(
_lowerCamelCase , config.embedding_size , config.hidden_sizes[0] , stride=2 if config.downsample_in_first_stage else 1 , depth=config.depths[0] , ) )
A_ : Optional[Any] = zip(config.hidden_sizes , config.hidden_sizes[1:] )
for (in_channels, out_channels), depth in zip(_lowerCamelCase , config.depths[1:] ):
self.stages.append(RegNetStage(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , depth=_lowerCamelCase ) )
def _a ( self : Optional[int] , _lowerCamelCase : Tensor , _lowerCamelCase : bool = False , _lowerCamelCase : bool = True ):
"""simple docstring"""
A_ : Tuple = () if output_hidden_states else None
for stage_module in self.stages:
if output_hidden_states:
A_ : Optional[int] = hidden_states + (hidden_state,)
A_ : Optional[Any] = stage_module(_lowerCamelCase )
if output_hidden_states:
A_ : Optional[int] = hidden_states + (hidden_state,)
if not return_dict:
return tuple(v for v in [hidden_state, hidden_states] if v is not None )
return BaseModelOutputWithNoAttention(last_hidden_state=_lowerCamelCase , hidden_states=_lowerCamelCase )
class UpperCamelCase_ (a__ ):
"""simple docstring"""
_lowerCAmelCase = RegNetConfig
_lowerCAmelCase = 'regnet'
_lowerCAmelCase = 'pixel_values'
_lowerCAmelCase = True
def _a ( self : Dict , _lowerCamelCase : Tuple ):
"""simple docstring"""
if isinstance(_lowerCamelCase , nn.Convad ):
nn.init.kaiming_normal_(module.weight , mode='''fan_out''' , nonlinearity='''relu''' )
elif isinstance(_lowerCamelCase , (nn.BatchNormad, nn.GroupNorm) ):
nn.init.constant_(module.weight , 1 )
nn.init.constant_(module.bias , 0 )
def _a ( self : Tuple , _lowerCamelCase : str , _lowerCamelCase : List[str]=False ):
"""simple docstring"""
if isinstance(_lowerCamelCase , _lowerCamelCase ):
A_ : str = value
snake_case__ = R"""
This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it
as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and
behavior.
Parameters:
config ([`RegNetConfig`]): Model configuration class with all the parameters of the model.
Initializing with a config file does not load the weights associated with the model, only the
configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
"""
snake_case__ = R"""
Args:
pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):
Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See
[`ConvNextImageProcessor.__call__`] for details.
output_hidden_states (`bool`, *optional*):
Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
more detail.
return_dict (`bool`, *optional*):
Whether or not to return a [`~file_utils.ModelOutput`] instead of a plain tuple.
"""
@add_start_docstrings(
'The bare RegNet model outputting raw features without any specific head on top.', a__, )
# Copied from transformers.models.resnet.modeling_resnet.ResNetModel with RESNET->REGNET,ResNet->RegNet
class UpperCamelCase_ (a__ ):
"""simple docstring"""
def __init__( self : Optional[int] , _lowerCamelCase : str ):
"""simple docstring"""
super().__init__(_lowerCamelCase )
A_ : List[str] = config
A_ : Optional[Any] = RegNetEmbeddings(_lowerCamelCase )
A_ : Union[str, Any] = RegNetEncoder(_lowerCamelCase )
A_ : Optional[Any] = nn.AdaptiveAvgPoolad((1, 1) )
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(_lowerCamelCase )
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC , output_type=_lowerCamelCase , config_class=_CONFIG_FOR_DOC , modality='''vision''' , expected_output=_EXPECTED_OUTPUT_SHAPE , )
def _a ( self : Tuple , _lowerCamelCase : Tensor , _lowerCamelCase : Optional[bool] = None , _lowerCamelCase : Optional[bool] = None ):
"""simple docstring"""
A_ : List[Any] = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
A_ : Dict = return_dict if return_dict is not None else self.config.use_return_dict
A_ : Union[str, Any] = self.embedder(_lowerCamelCase )
A_ : Dict = self.encoder(
_lowerCamelCase , output_hidden_states=_lowerCamelCase , return_dict=_lowerCamelCase )
A_ : Optional[Any] = encoder_outputs[0]
A_ : Union[str, Any] = self.pooler(_lowerCamelCase )
if not return_dict:
return (last_hidden_state, pooled_output) + encoder_outputs[1:]
return BaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=_lowerCamelCase , pooler_output=_lowerCamelCase , hidden_states=encoder_outputs.hidden_states , )
@add_start_docstrings(
'\n RegNet Model with an image classification head on top (a linear layer on top of the pooled features), e.g. for\n ImageNet.\n ', a__, )
# Copied from transformers.models.resnet.modeling_resnet.ResNetForImageClassification with RESNET->REGNET,ResNet->RegNet,resnet->regnet
class UpperCamelCase_ (a__ ):
"""simple docstring"""
def __init__( self : Tuple , _lowerCamelCase : Optional[int] ):
"""simple docstring"""
super().__init__(_lowerCamelCase )
A_ : Union[str, Any] = config.num_labels
A_ : List[Any] = RegNetModel(_lowerCamelCase )
# classification head
A_ : str = nn.Sequential(
nn.Flatten() , nn.Linear(config.hidden_sizes[-1] , config.num_labels ) if config.num_labels > 0 else nn.Identity() , )
# initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(_lowerCamelCase )
@add_code_sample_docstrings(
checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=_lowerCamelCase , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , )
def _a ( self : Any , _lowerCamelCase : Optional[torch.FloatTensor] = None , _lowerCamelCase : Optional[torch.LongTensor] = None , _lowerCamelCase : Optional[bool] = None , _lowerCamelCase : Optional[bool] = None , ):
"""simple docstring"""
A_ : int = return_dict if return_dict is not None else self.config.use_return_dict
A_ : Any = self.regnet(_lowerCamelCase , output_hidden_states=_lowerCamelCase , return_dict=_lowerCamelCase )
A_ : Union[str, Any] = outputs.pooler_output if return_dict else outputs[1]
A_ : Optional[Any] = self.classifier(_lowerCamelCase )
A_ : Optional[Any] = None
if labels is not None:
if self.config.problem_type is None:
if self.num_labels == 1:
A_ : List[str] = '''regression'''
elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
A_ : List[Any] = '''single_label_classification'''
else:
A_ : str = '''multi_label_classification'''
if self.config.problem_type == "regression":
A_ : Dict = MSELoss()
if self.num_labels == 1:
A_ : Optional[int] = loss_fct(logits.squeeze() , labels.squeeze() )
else:
A_ : List[str] = loss_fct(_lowerCamelCase , _lowerCamelCase )
elif self.config.problem_type == "single_label_classification":
A_ : List[Any] = CrossEntropyLoss()
A_ : Union[str, Any] = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
elif self.config.problem_type == "multi_label_classification":
A_ : Tuple = BCEWithLogitsLoss()
A_ : Optional[int] = loss_fct(_lowerCamelCase , _lowerCamelCase )
if not return_dict:
A_ : Optional[int] = (logits,) + outputs[2:]
return (loss,) + output if loss is not None else output
return ImageClassifierOutputWithNoAttention(loss=_lowerCamelCase , logits=_lowerCamelCase , hidden_states=outputs.hidden_states )
| 4 |
'''simple docstring'''
import sys
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from huggingface_hub import HfFolder, delete_repo
from requests.exceptions import HTTPError
from transformers import AutoImageProcessor, ViTImageProcessor
from transformers.testing_utils import TOKEN, USER, get_tests_dir, is_staging_test
sys.path.append(str(Path(__file__).parent.parent / """utils"""))
from test_module.custom_image_processing import CustomImageProcessor # noqa E402
snake_case__ = get_tests_dir("""fixtures""")
class UpperCamelCase_ (unittest.TestCase ):
"""simple docstring"""
def _a ( self : List[str] ):
"""simple docstring"""
A_ : List[Any] = mock.Mock()
A_ : List[str] = 500
A_ : Tuple = {}
A_ : int = HTTPError
A_ : Optional[Any] = {}
# Download this model to make sure it's in the cache.
A_ : Tuple = ViTImageProcessor.from_pretrained('''hf-internal-testing/tiny-random-vit''' )
# Under the mock environment we get a 500 error when trying to reach the model.
with mock.patch('''requests.Session.request''' , return_value=_lowerCamelCase ) as mock_head:
A_ : List[Any] = ViTImageProcessor.from_pretrained('''hf-internal-testing/tiny-random-vit''' )
# This check we did call the fake head request
mock_head.assert_called()
def _a ( self : Tuple ):
"""simple docstring"""
A_ : Tuple = ViTImageProcessor.from_pretrained(
'''https://huggingface.co/hf-internal-testing/tiny-random-vit/resolve/main/preprocessor_config.json''' )
def _a ( self : Dict ):
"""simple docstring"""
with self.assertRaises(_lowerCamelCase ):
# config is in subfolder, the following should not work without specifying the subfolder
A_ : Any = AutoImageProcessor.from_pretrained('''hf-internal-testing/stable-diffusion-all-variants''' )
A_ : Tuple = AutoImageProcessor.from_pretrained(
'''hf-internal-testing/stable-diffusion-all-variants''' , subfolder='''feature_extractor''' )
self.assertIsNotNone(_lowerCamelCase )
@is_staging_test
class UpperCamelCase_ (unittest.TestCase ):
"""simple docstring"""
@classmethod
def _a ( cls : Tuple ):
"""simple docstring"""
A_ : int = TOKEN
HfFolder.save_token(_lowerCamelCase )
@classmethod
def _a ( cls : str ):
"""simple docstring"""
try:
delete_repo(token=cls._token , repo_id='''test-image-processor''' )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id='''valid_org/test-image-processor-org''' )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id='''test-dynamic-image-processor''' )
except HTTPError:
pass
def _a ( self : List[Any] ):
"""simple docstring"""
A_ : Dict = ViTImageProcessor.from_pretrained(_lowerCamelCase )
image_processor.push_to_hub('''test-image-processor''' , use_auth_token=self._token )
A_ : Optional[int] = ViTImageProcessor.from_pretrained(f'{USER}/test-image-processor' )
for k, v in image_processor.__dict__.items():
self.assertEqual(_lowerCamelCase , getattr(_lowerCamelCase , _lowerCamelCase ) )
# Reset repo
delete_repo(token=self._token , repo_id='''test-image-processor''' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
image_processor.save_pretrained(
_lowerCamelCase , repo_id='''test-image-processor''' , push_to_hub=_lowerCamelCase , use_auth_token=self._token )
A_ : List[Any] = ViTImageProcessor.from_pretrained(f'{USER}/test-image-processor' )
for k, v in image_processor.__dict__.items():
self.assertEqual(_lowerCamelCase , getattr(_lowerCamelCase , _lowerCamelCase ) )
def _a ( self : Optional[Any] ):
"""simple docstring"""
A_ : int = ViTImageProcessor.from_pretrained(_lowerCamelCase )
image_processor.push_to_hub('''valid_org/test-image-processor''' , use_auth_token=self._token )
A_ : List[str] = ViTImageProcessor.from_pretrained('''valid_org/test-image-processor''' )
for k, v in image_processor.__dict__.items():
self.assertEqual(_lowerCamelCase , getattr(_lowerCamelCase , _lowerCamelCase ) )
# Reset repo
delete_repo(token=self._token , repo_id='''valid_org/test-image-processor''' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
image_processor.save_pretrained(
_lowerCamelCase , repo_id='''valid_org/test-image-processor-org''' , push_to_hub=_lowerCamelCase , use_auth_token=self._token )
A_ : Any = ViTImageProcessor.from_pretrained('''valid_org/test-image-processor-org''' )
for k, v in image_processor.__dict__.items():
self.assertEqual(_lowerCamelCase , getattr(_lowerCamelCase , _lowerCamelCase ) )
def _a ( self : Optional[Any] ):
"""simple docstring"""
CustomImageProcessor.register_for_auto_class()
A_ : Any = CustomImageProcessor.from_pretrained(_lowerCamelCase )
image_processor.push_to_hub('''test-dynamic-image-processor''' , use_auth_token=self._token )
# This has added the proper auto_map field to the config
self.assertDictEqual(
image_processor.auto_map , {'''AutoImageProcessor''': '''custom_image_processing.CustomImageProcessor'''} , )
A_ : str = AutoImageProcessor.from_pretrained(
f'{USER}/test-dynamic-image-processor' , trust_remote_code=_lowerCamelCase )
# Can't make an isinstance check because the new_image_processor is from the CustomImageProcessor class of a dynamic module
self.assertEqual(new_image_processor.__class__.__name__ , '''CustomImageProcessor''' )
| 4 | 1 |
'''simple docstring'''
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import re
from ..utils import cached_file
# docstyle-ignore
snake_case__ = """
Human: <<task>>
Assistant: """
snake_case__ = """huggingface-tools/default-prompts"""
snake_case__ = {"""chat""": """chat_prompt_template.txt""", """run""": """run_prompt_template.txt"""}
def snake_case__ ( lowerCamelCase__ : Optional[Any] , lowerCamelCase__ : Tuple , lowerCamelCase__ : Dict="run" ) -> Union[str, Any]:
if prompt_or_repo_id is None:
A_ : Optional[int] = DEFAULT_PROMPTS_REPO
# prompt is considered a repo ID when it does not contain any kind of space
if re.search('''\\s''' , lowerCamelCase__ ) is not None:
return prompt_or_repo_id
A_ : Optional[Any] = cached_file(
lowerCamelCase__ , PROMPT_FILES[mode] , repo_type='''dataset''' , user_agent={'''agent''': agent_name} )
with open(lowerCamelCase__ , '''r''' , encoding='''utf-8''' ) as f:
return f.read()
| 4 |
'''simple docstring'''
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import torch
from ..models.clipseg import CLIPSegForImageSegmentation
from ..utils import is_vision_available, requires_backends
from .base import PipelineTool
if is_vision_available():
from PIL import Image
class UpperCamelCase_ (a__ ):
"""simple docstring"""
_lowerCAmelCase = (
'This is a tool that creates a segmentation mask of an image according to a label. It cannot create an image.'
'It takes two arguments named `image` which should be the original image, and `label` which should be a text '
'describing the elements what should be identified in the segmentation mask. The tool returns the mask.'
)
_lowerCAmelCase = 'CIDAS/clipseg-rd64-refined'
_lowerCAmelCase = 'image_segmenter'
_lowerCAmelCase = CLIPSegForImageSegmentation
_lowerCAmelCase = ['image', 'text']
_lowerCAmelCase = ['image']
def __init__( self : Optional[int] , *_lowerCamelCase : Optional[int] , **_lowerCamelCase : Union[str, Any] ):
"""simple docstring"""
requires_backends(self , ['''vision'''] )
super().__init__(*_lowerCamelCase , **_lowerCamelCase )
def _a ( self : List[str] , _lowerCamelCase : "Image" , _lowerCamelCase : str ):
"""simple docstring"""
return self.pre_processor(text=[label] , images=[image] , padding=_lowerCamelCase , return_tensors='''pt''' )
def _a ( self : Union[str, Any] , _lowerCamelCase : Optional[int] ):
"""simple docstring"""
with torch.no_grad():
A_ : Optional[int] = self.model(**_lowerCamelCase ).logits
return logits
def _a ( self : List[str] , _lowerCamelCase : Optional[int] ):
"""simple docstring"""
A_ : int = outputs.cpu().detach().numpy()
A_ : Tuple = 0
A_ : List[str] = 1
return Image.fromarray((array * 255).astype(np.uinta ) )
| 4 | 1 |
'''simple docstring'''
import unittest
import numpy as np
from transformers import RoFormerConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax.numpy as jnp
from transformers.models.roformer.modeling_flax_roformer import (
FlaxRoFormerForMaskedLM,
FlaxRoFormerForMultipleChoice,
FlaxRoFormerForQuestionAnswering,
FlaxRoFormerForSequenceClassification,
FlaxRoFormerForTokenClassification,
FlaxRoFormerModel,
)
class UpperCamelCase_ (unittest.TestCase ):
"""simple docstring"""
def __init__( self : List[str] , _lowerCamelCase : Dict , _lowerCamelCase : Optional[Any]=13 , _lowerCamelCase : List[str]=7 , _lowerCamelCase : Any=True , _lowerCamelCase : Dict=True , _lowerCamelCase : Any=True , _lowerCamelCase : Any=True , _lowerCamelCase : Tuple=99 , _lowerCamelCase : List[str]=32 , _lowerCamelCase : int=5 , _lowerCamelCase : Dict=4 , _lowerCamelCase : Dict=37 , _lowerCamelCase : Dict="gelu" , _lowerCamelCase : Optional[Any]=0.1 , _lowerCamelCase : int=0.1 , _lowerCamelCase : Optional[int]=512 , _lowerCamelCase : int=16 , _lowerCamelCase : List[str]=2 , _lowerCamelCase : str=0.02 , _lowerCamelCase : Tuple=4 , ):
"""simple docstring"""
A_ : List[Any] = parent
A_ : List[str] = batch_size
A_ : Any = seq_length
A_ : str = is_training
A_ : int = use_attention_mask
A_ : Optional[int] = use_token_type_ids
A_ : Tuple = use_labels
A_ : Any = vocab_size
A_ : Any = hidden_size
A_ : Tuple = num_hidden_layers
A_ : Tuple = num_attention_heads
A_ : Optional[Any] = intermediate_size
A_ : str = hidden_act
A_ : Optional[Any] = hidden_dropout_prob
A_ : int = attention_probs_dropout_prob
A_ : Any = max_position_embeddings
A_ : Optional[Any] = type_vocab_size
A_ : Dict = type_sequence_label_size
A_ : Tuple = initializer_range
A_ : List[str] = num_choices
def _a ( self : str ):
"""simple docstring"""
A_ : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
A_ : Tuple = None
if self.use_attention_mask:
A_ : Union[str, Any] = random_attention_mask([self.batch_size, self.seq_length] )
A_ : Dict = None
if self.use_token_type_ids:
A_ : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
A_ : Tuple = RoFormerConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=_lowerCamelCase , initializer_range=self.initializer_range , )
return config, input_ids, token_type_ids, attention_mask
def _a ( self : Any ):
"""simple docstring"""
A_ : int = self.prepare_config_and_inputs()
A_ ,A_ ,A_ ,A_ : Dict = config_and_inputs
A_ : List[Any] = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': attention_mask}
return config, inputs_dict
@require_flax
class UpperCamelCase_ (a__, unittest.TestCase ):
"""simple docstring"""
_lowerCAmelCase = True
_lowerCAmelCase = (
(
FlaxRoFormerModel,
FlaxRoFormerForMaskedLM,
FlaxRoFormerForSequenceClassification,
FlaxRoFormerForTokenClassification,
FlaxRoFormerForMultipleChoice,
FlaxRoFormerForQuestionAnswering,
)
if is_flax_available()
else ()
)
def _a ( self : str ):
"""simple docstring"""
A_ : List[Any] = FlaxRoFormerModelTester(self )
@slow
def _a ( self : Optional[int] ):
"""simple docstring"""
for model_class_name in self.all_model_classes:
A_ : int = model_class_name.from_pretrained('''junnyu/roformer_chinese_small''' , from_pt=_lowerCamelCase )
A_ : List[Any] = model(np.ones((1, 1) ) )
self.assertIsNotNone(_lowerCamelCase )
@require_flax
class UpperCamelCase_ (unittest.TestCase ):
"""simple docstring"""
@slow
def _a ( self : Optional[int] ):
"""simple docstring"""
A_ : int = FlaxRoFormerForMaskedLM.from_pretrained('''junnyu/roformer_chinese_base''' )
A_ : Tuple = jnp.array([[0, 1, 2, 3, 4, 5]] )
A_ : Optional[Any] = model(_lowerCamelCase )[0]
A_ : Optional[Any] = 50000
A_ : Dict = (1, 6, vocab_size)
self.assertEqual(output.shape , _lowerCamelCase )
A_ : Any = jnp.array(
[[[-0.12_05, -1.02_65, 0.29_22], [-1.51_34, 0.19_74, 0.15_19], [-5.01_35, -3.90_03, -0.84_04]]] )
self.assertTrue(jnp.allclose(output[:, :3, :3] , _lowerCamelCase , atol=1E-4 ) )
| 4 |
'''simple docstring'''
from collections.abc import Sequence
def snake_case__ ( lowerCamelCase__ : Sequence[float] , lowerCamelCase__ : bool = False ) -> float:
if not arr:
return 0
A_ : Union[str, Any] = 0 if allow_empty_subarrays else float('''-inf''' )
A_ : str = 0.0
for num in arr:
A_ : Any = max(0 if allow_empty_subarrays else num , curr_sum + num )
A_ : Tuple = max(lowerCamelCase__ , lowerCamelCase__ )
return max_sum
if __name__ == "__main__":
from doctest import testmod
testmod()
snake_case__ = [-2, 1, -3, 4, -1, 2, 1, -5, 4]
print(F'{max_subarray_sum(nums) = }')
| 4 | 1 |
'''simple docstring'''
import argparse
import torch
from ...utils import logging
from . import AlbertConfig, AlbertForPreTraining, load_tf_weights_in_albert
logging.set_verbosity_info()
def snake_case__ ( lowerCamelCase__ : str , lowerCamelCase__ : Tuple , lowerCamelCase__ : Optional[int] ) -> int:
# Initialise PyTorch model
A_ : int = AlbertConfig.from_json_file(lowerCamelCase__ )
print(f'Building PyTorch model from configuration: {config}' )
A_ : List[Any] = AlbertForPreTraining(lowerCamelCase__ )
# Load weights from tf checkpoint
load_tf_weights_in_albert(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
# Save pytorch-model
print(f'Save PyTorch model to {pytorch_dump_path}' )
torch.save(model.state_dict() , lowerCamelCase__ )
if __name__ == "__main__":
snake_case__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--tf_checkpoint_path""", default=None, type=str, required=True, help="""Path to the TensorFlow checkpoint path."""
)
parser.add_argument(
"""--albert_config_file""",
default=None,
type=str,
required=True,
help=(
"""The config json file corresponding to the pre-trained ALBERT model. \n"""
"""This specifies the model architecture."""
),
)
parser.add_argument(
"""--pytorch_dump_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
snake_case__ = parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.albert_config_file, args.pytorch_dump_path)
| 4 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
snake_case__ = logging.get_logger(__name__)
snake_case__ = {
"""facebook/s2t-wav2vec2-large-en-de""": (
"""https://huggingface.co/facebook/s2t-wav2vec2-large-en-de/resolve/main/config.json"""
),
# See all Speech2Text models at https://huggingface.co/models?filter=speech2text2
}
class UpperCamelCase_ (a__ ):
"""simple docstring"""
_lowerCAmelCase = 'speech_to_text_2'
_lowerCAmelCase = ['past_key_values']
_lowerCAmelCase = {'num_attention_heads': 'decoder_attention_heads', 'hidden_size': 'd_model'}
def __init__( self : Optional[Any] , _lowerCamelCase : Optional[Any]=10000 , _lowerCamelCase : List[Any]=6 , _lowerCamelCase : int=2048 , _lowerCamelCase : Dict=4 , _lowerCamelCase : str=0.0 , _lowerCamelCase : int=True , _lowerCamelCase : int="relu" , _lowerCamelCase : Any=256 , _lowerCamelCase : List[Any]=0.1 , _lowerCamelCase : Tuple=0.0 , _lowerCamelCase : Union[str, Any]=0.0 , _lowerCamelCase : Optional[Any]=0.02 , _lowerCamelCase : int=2 , _lowerCamelCase : List[str]=True , _lowerCamelCase : str=1 , _lowerCamelCase : List[Any]=0 , _lowerCamelCase : Optional[int]=2 , _lowerCamelCase : Tuple=1024 , **_lowerCamelCase : int , ):
"""simple docstring"""
A_ : Optional[int] = vocab_size
A_ : Tuple = d_model
A_ : List[str] = decoder_ffn_dim
A_ : str = decoder_layers
A_ : Any = decoder_attention_heads
A_ : int = dropout
A_ : str = attention_dropout
A_ : Optional[int] = activation_dropout
A_ : str = activation_function
A_ : List[Any] = init_std
A_ : Union[str, Any] = decoder_layerdrop
A_ : Any = use_cache
A_ : Optional[Any] = decoder_layers
A_ : Optional[int] = scale_embedding # scale factor will be sqrt(d_model) if True
A_ : Optional[Any] = max_target_positions
super().__init__(
pad_token_id=_lowerCamelCase , bos_token_id=_lowerCamelCase , eos_token_id=_lowerCamelCase , decoder_start_token_id=_lowerCamelCase , **_lowerCamelCase , )
| 4 | 1 |
'''simple docstring'''
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import BertTokenizer, BertTokenizerFast
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import AlignProcessor, EfficientNetImageProcessor
@require_vision
class UpperCamelCase_ (unittest.TestCase ):
"""simple docstring"""
def _a ( self : Union[str, Any] ):
"""simple docstring"""
A_ : Any = tempfile.mkdtemp()
A_ : List[Any] = [
'''[UNK]''',
'''[CLS]''',
'''[SEP]''',
'''[PAD]''',
'''[MASK]''',
'''want''',
'''##want''',
'''##ed''',
'''wa''',
'''un''',
'''runn''',
'''##ing''',
''',''',
'''low''',
'''lowest''',
]
A_ : str = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) )
A_ : Tuple = {
'''do_resize''': True,
'''size''': 20,
'''do_center_crop''': True,
'''crop_size''': 18,
'''do_normalize''': True,
'''image_mean''': [0.48_14_54_66, 0.4_57_82_75, 0.40_82_10_73],
'''image_std''': [0.26_86_29_54, 0.26_13_02_58, 0.27_57_77_11],
}
A_ : List[Any] = os.path.join(self.tmpdirname , _lowerCamelCase )
with open(self.image_processor_file , '''w''' , encoding='''utf-8''' ) as fp:
json.dump(_lowerCamelCase , _lowerCamelCase )
def _a ( self : Dict , **_lowerCamelCase : Tuple ):
"""simple docstring"""
return BertTokenizer.from_pretrained(self.tmpdirname , **_lowerCamelCase )
def _a ( self : Optional[int] , **_lowerCamelCase : Optional[int] ):
"""simple docstring"""
return BertTokenizerFast.from_pretrained(self.tmpdirname , **_lowerCamelCase )
def _a ( self : Optional[Any] , **_lowerCamelCase : Tuple ):
"""simple docstring"""
return EfficientNetImageProcessor.from_pretrained(self.tmpdirname , **_lowerCamelCase )
def _a ( self : Tuple ):
"""simple docstring"""
shutil.rmtree(self.tmpdirname )
def _a ( self : int ):
"""simple docstring"""
A_ : Union[str, Any] = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
A_ : Any = [Image.fromarray(np.moveaxis(_lowerCamelCase , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def _a ( self : int ):
"""simple docstring"""
A_ : Tuple = self.get_tokenizer()
A_ : Tuple = self.get_rust_tokenizer()
A_ : Dict = self.get_image_processor()
A_ : List[Any] = AlignProcessor(tokenizer=_lowerCamelCase , image_processor=_lowerCamelCase )
processor_slow.save_pretrained(self.tmpdirname )
A_ : str = AlignProcessor.from_pretrained(self.tmpdirname , use_fast=_lowerCamelCase )
A_ : Any = AlignProcessor(tokenizer=_lowerCamelCase , image_processor=_lowerCamelCase )
processor_fast.save_pretrained(self.tmpdirname )
A_ : List[Any] = AlignProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() )
self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() )
self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() )
self.assertIsInstance(processor_slow.tokenizer , _lowerCamelCase )
self.assertIsInstance(processor_fast.tokenizer , _lowerCamelCase )
self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor_slow.image_processor , _lowerCamelCase )
self.assertIsInstance(processor_fast.image_processor , _lowerCamelCase )
def _a ( self : List[Any] ):
"""simple docstring"""
A_ : List[str] = AlignProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
A_ : Optional[int] = self.get_tokenizer(bos_token='''(BOS)''' , eos_token='''(EOS)''' )
A_ : Tuple = self.get_image_processor(do_normalize=_lowerCamelCase , padding_value=1.0 )
A_ : List[str] = AlignProcessor.from_pretrained(
self.tmpdirname , bos_token='''(BOS)''' , eos_token='''(EOS)''' , do_normalize=_lowerCamelCase , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , _lowerCamelCase )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , _lowerCamelCase )
def _a ( self : Union[str, Any] ):
"""simple docstring"""
A_ : Dict = self.get_image_processor()
A_ : Any = self.get_tokenizer()
A_ : List[str] = AlignProcessor(tokenizer=_lowerCamelCase , image_processor=_lowerCamelCase )
A_ : Any = self.prepare_image_inputs()
A_ : List[Any] = image_processor(_lowerCamelCase , return_tensors='''np''' )
A_ : str = processor(images=_lowerCamelCase , return_tensors='''np''' )
for key in input_image_proc.keys():
self.assertAlmostEqual(input_image_proc[key].sum() , input_processor[key].sum() , delta=1E-2 )
def _a ( self : Dict ):
"""simple docstring"""
A_ : str = self.get_image_processor()
A_ : List[str] = self.get_tokenizer()
A_ : Optional[int] = AlignProcessor(tokenizer=_lowerCamelCase , image_processor=_lowerCamelCase )
A_ : int = '''lower newer'''
A_ : str = processor(text=_lowerCamelCase )
A_ : Dict = tokenizer(_lowerCamelCase , padding='''max_length''' , max_length=64 )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def _a ( self : str ):
"""simple docstring"""
A_ : Optional[int] = self.get_image_processor()
A_ : Optional[Any] = self.get_tokenizer()
A_ : List[str] = AlignProcessor(tokenizer=_lowerCamelCase , image_processor=_lowerCamelCase )
A_ : List[Any] = '''lower newer'''
A_ : Optional[int] = self.prepare_image_inputs()
A_ : List[Any] = processor(text=_lowerCamelCase , images=_lowerCamelCase )
self.assertListEqual(list(inputs.keys() ) , ['''input_ids''', '''token_type_ids''', '''attention_mask''', '''pixel_values'''] )
# test if it raises when no input is passed
with pytest.raises(_lowerCamelCase ):
processor()
def _a ( self : List[str] ):
"""simple docstring"""
A_ : Optional[Any] = self.get_image_processor()
A_ : Optional[int] = self.get_tokenizer()
A_ : List[Any] = AlignProcessor(tokenizer=_lowerCamelCase , image_processor=_lowerCamelCase )
A_ : str = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
A_ : str = processor.batch_decode(_lowerCamelCase )
A_ : Union[str, Any] = tokenizer.batch_decode(_lowerCamelCase )
self.assertListEqual(_lowerCamelCase , _lowerCamelCase )
def _a ( self : Tuple ):
"""simple docstring"""
A_ : str = self.get_image_processor()
A_ : Tuple = self.get_tokenizer()
A_ : Any = AlignProcessor(tokenizer=_lowerCamelCase , image_processor=_lowerCamelCase )
A_ : str = '''lower newer'''
A_ : List[str] = self.prepare_image_inputs()
A_ : Tuple = processor(text=_lowerCamelCase , images=_lowerCamelCase )
self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
| 4 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
snake_case__ = logging.get_logger(__name__)
snake_case__ = {
"""microsoft/table-transformer-detection""": (
"""https://huggingface.co/microsoft/table-transformer-detection/resolve/main/config.json"""
),
}
class UpperCamelCase_ (a__ ):
"""simple docstring"""
_lowerCAmelCase = 'table-transformer'
_lowerCAmelCase = ['past_key_values']
_lowerCAmelCase = {
'hidden_size': 'd_model',
'num_attention_heads': 'encoder_attention_heads',
}
def __init__( self : Any , _lowerCamelCase : Union[str, Any]=True , _lowerCamelCase : Dict=None , _lowerCamelCase : int=3 , _lowerCamelCase : Any=100 , _lowerCamelCase : List[Any]=6 , _lowerCamelCase : Tuple=2048 , _lowerCamelCase : Any=8 , _lowerCamelCase : Dict=6 , _lowerCamelCase : Tuple=2048 , _lowerCamelCase : int=8 , _lowerCamelCase : Optional[int]=0.0 , _lowerCamelCase : List[Any]=0.0 , _lowerCamelCase : List[Any]=True , _lowerCamelCase : Optional[int]="relu" , _lowerCamelCase : Union[str, Any]=256 , _lowerCamelCase : Any=0.1 , _lowerCamelCase : Tuple=0.0 , _lowerCamelCase : Optional[int]=0.0 , _lowerCamelCase : str=0.02 , _lowerCamelCase : Tuple=1.0 , _lowerCamelCase : Dict=False , _lowerCamelCase : str="sine" , _lowerCamelCase : str="resnet50" , _lowerCamelCase : Any=True , _lowerCamelCase : List[str]=False , _lowerCamelCase : Any=1 , _lowerCamelCase : int=5 , _lowerCamelCase : Tuple=2 , _lowerCamelCase : Optional[int]=1 , _lowerCamelCase : Any=1 , _lowerCamelCase : Dict=5 , _lowerCamelCase : str=2 , _lowerCamelCase : Union[str, Any]=0.1 , **_lowerCamelCase : int , ):
"""simple docstring"""
if backbone_config is not None and use_timm_backbone:
raise ValueError('''You can\'t specify both `backbone_config` and `use_timm_backbone`.''' )
if not use_timm_backbone:
if backbone_config is None:
logger.info('''`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.''' )
A_ : int = CONFIG_MAPPING['''resnet'''](out_features=['''stage4'''] )
elif isinstance(_lowerCamelCase , _lowerCamelCase ):
A_ : str = backbone_config.get('''model_type''' )
A_ : Optional[int] = CONFIG_MAPPING[backbone_model_type]
A_ : List[str] = config_class.from_dict(_lowerCamelCase )
# set timm attributes to None
A_ ,A_ ,A_ : Union[str, Any] = None, None, None
A_ : Optional[Any] = use_timm_backbone
A_ : Optional[int] = backbone_config
A_ : Optional[Any] = num_channels
A_ : Dict = num_queries
A_ : str = d_model
A_ : List[str] = encoder_ffn_dim
A_ : int = encoder_layers
A_ : Optional[Any] = encoder_attention_heads
A_ : List[str] = decoder_ffn_dim
A_ : Any = decoder_layers
A_ : List[str] = decoder_attention_heads
A_ : Tuple = dropout
A_ : Optional[Any] = attention_dropout
A_ : Any = activation_dropout
A_ : List[Any] = activation_function
A_ : Dict = init_std
A_ : Any = init_xavier_std
A_ : List[Any] = encoder_layerdrop
A_ : int = decoder_layerdrop
A_ : Any = encoder_layers
A_ : List[str] = auxiliary_loss
A_ : List[Any] = position_embedding_type
A_ : Optional[Any] = backbone
A_ : Tuple = use_pretrained_backbone
A_ : List[Any] = dilation
# Hungarian matcher
A_ : List[str] = class_cost
A_ : str = bbox_cost
A_ : Union[str, Any] = giou_cost
# Loss coefficients
A_ : Any = mask_loss_coefficient
A_ : Optional[int] = dice_loss_coefficient
A_ : Dict = bbox_loss_coefficient
A_ : int = giou_loss_coefficient
A_ : int = eos_coefficient
super().__init__(is_encoder_decoder=_lowerCamelCase , **_lowerCamelCase )
@property
def _a ( self : List[Any] ):
"""simple docstring"""
return self.encoder_attention_heads
@property
def _a ( self : Any ):
"""simple docstring"""
return self.d_model
class UpperCamelCase_ (a__ ):
"""simple docstring"""
_lowerCAmelCase = version.parse('1.11' )
@property
def _a ( self : Tuple ):
"""simple docstring"""
return OrderedDict(
[
('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}),
('''pixel_mask''', {0: '''batch'''}),
] )
@property
def _a ( self : Optional[int] ):
"""simple docstring"""
return 1E-5
@property
def _a ( self : str ):
"""simple docstring"""
return 12
| 4 | 1 |
'''simple docstring'''
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import torch
import torch.nn as nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, apply_forward_hook
from .modeling_utils import ModelMixin
from .vae import Decoder, DecoderOutput, Encoder, VectorQuantizer
@dataclass
class UpperCamelCase_ (a__ ):
"""simple docstring"""
_lowerCAmelCase = 42
class UpperCamelCase_ (a__, a__ ):
"""simple docstring"""
@register_to_config
def __init__( self : Any , _lowerCamelCase : int = 3 , _lowerCamelCase : int = 3 , _lowerCamelCase : Tuple[str] = ("DownEncoderBlock2D",) , _lowerCamelCase : Tuple[str] = ("UpDecoderBlock2D",) , _lowerCamelCase : Tuple[int] = (64,) , _lowerCamelCase : int = 1 , _lowerCamelCase : str = "silu" , _lowerCamelCase : int = 3 , _lowerCamelCase : int = 32 , _lowerCamelCase : int = 256 , _lowerCamelCase : int = 32 , _lowerCamelCase : Optional[int] = None , _lowerCamelCase : float = 0.1_82_15 , _lowerCamelCase : str = "group" , ):
"""simple docstring"""
super().__init__()
# pass init params to Encoder
A_ : Optional[Any] = Encoder(
in_channels=_lowerCamelCase , out_channels=_lowerCamelCase , down_block_types=_lowerCamelCase , block_out_channels=_lowerCamelCase , layers_per_block=_lowerCamelCase , act_fn=_lowerCamelCase , norm_num_groups=_lowerCamelCase , double_z=_lowerCamelCase , )
A_ : int = vq_embed_dim if vq_embed_dim is not None else latent_channels
A_ : Tuple = nn.Convad(_lowerCamelCase , _lowerCamelCase , 1 )
A_ : Dict = VectorQuantizer(_lowerCamelCase , _lowerCamelCase , beta=0.25 , remap=_lowerCamelCase , sane_index_shape=_lowerCamelCase )
A_ : str = nn.Convad(_lowerCamelCase , _lowerCamelCase , 1 )
# pass init params to Decoder
A_ : List[Any] = Decoder(
in_channels=_lowerCamelCase , out_channels=_lowerCamelCase , up_block_types=_lowerCamelCase , block_out_channels=_lowerCamelCase , layers_per_block=_lowerCamelCase , act_fn=_lowerCamelCase , norm_num_groups=_lowerCamelCase , norm_type=_lowerCamelCase , )
@apply_forward_hook
def _a ( self : List[Any] , _lowerCamelCase : torch.FloatTensor , _lowerCamelCase : bool = True ):
"""simple docstring"""
A_ : Any = self.encoder(_lowerCamelCase )
A_ : str = self.quant_conv(_lowerCamelCase )
if not return_dict:
return (h,)
return VQEncoderOutput(latents=_lowerCamelCase )
@apply_forward_hook
def _a ( self : int , _lowerCamelCase : torch.FloatTensor , _lowerCamelCase : bool = False , _lowerCamelCase : bool = True ):
"""simple docstring"""
if not force_not_quantize:
A_ ,A_ ,A_ : Union[str, Any] = self.quantize(_lowerCamelCase )
else:
A_ : Tuple = h
A_ : int = self.post_quant_conv(_lowerCamelCase )
A_ : List[Any] = self.decoder(_lowerCamelCase , quant if self.config.norm_type == '''spatial''' else None )
if not return_dict:
return (dec,)
return DecoderOutput(sample=_lowerCamelCase )
def _a ( self : Optional[Any] , _lowerCamelCase : torch.FloatTensor , _lowerCamelCase : bool = True ):
"""simple docstring"""
A_ : List[str] = sample
A_ : Dict = self.encode(_lowerCamelCase ).latents
A_ : Optional[Any] = self.decode(_lowerCamelCase ).sample
if not return_dict:
return (dec,)
return DecoderOutput(sample=_lowerCamelCase )
| 4 |
'''simple docstring'''
import inspect
import unittest
from transformers import BitConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import BitBackbone, BitForImageClassification, BitImageProcessor, BitModel
from transformers.models.bit.modeling_bit import BIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
class UpperCamelCase_ :
"""simple docstring"""
def __init__( self : Optional[Any] , _lowerCamelCase : int , _lowerCamelCase : List[str]=3 , _lowerCamelCase : Any=32 , _lowerCamelCase : Union[str, Any]=3 , _lowerCamelCase : int=10 , _lowerCamelCase : Union[str, Any]=[8, 16, 32, 64] , _lowerCamelCase : Dict=[1, 1, 2, 1] , _lowerCamelCase : Union[str, Any]=True , _lowerCamelCase : Optional[int]=True , _lowerCamelCase : Any="relu" , _lowerCamelCase : Optional[Any]=3 , _lowerCamelCase : Optional[Any]=None , _lowerCamelCase : Dict=["stage2", "stage3", "stage4"] , _lowerCamelCase : Union[str, Any]=[2, 3, 4] , _lowerCamelCase : Tuple=1 , ):
"""simple docstring"""
A_ : List[str] = parent
A_ : List[str] = batch_size
A_ : Union[str, Any] = image_size
A_ : Tuple = num_channels
A_ : Any = embeddings_size
A_ : int = hidden_sizes
A_ : Optional[Any] = depths
A_ : List[Any] = is_training
A_ : Optional[int] = use_labels
A_ : int = hidden_act
A_ : Tuple = num_labels
A_ : Union[str, Any] = scope
A_ : List[Any] = len(_lowerCamelCase )
A_ : Union[str, Any] = out_features
A_ : List[Any] = out_indices
A_ : Dict = num_groups
def _a ( self : Optional[int] ):
"""simple docstring"""
A_ : Dict = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
A_ : Union[str, Any] = None
if self.use_labels:
A_ : Any = ids_tensor([self.batch_size] , self.num_labels )
A_ : Any = self.get_config()
return config, pixel_values, labels
def _a ( self : Union[str, Any] ):
"""simple docstring"""
return BitConfig(
num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , out_features=self.out_features , out_indices=self.out_indices , num_groups=self.num_groups , )
def _a ( self : List[Any] , _lowerCamelCase : List[str] , _lowerCamelCase : List[str] , _lowerCamelCase : Optional[Any] ):
"""simple docstring"""
A_ : Any = BitModel(config=_lowerCamelCase )
model.to(_lowerCamelCase )
model.eval()
A_ : int = model(_lowerCamelCase )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def _a ( self : Optional[int] , _lowerCamelCase : List[Any] , _lowerCamelCase : str , _lowerCamelCase : Optional[int] ):
"""simple docstring"""
A_ : Dict = self.num_labels
A_ : Optional[Any] = BitForImageClassification(_lowerCamelCase )
model.to(_lowerCamelCase )
model.eval()
A_ : List[Any] = model(_lowerCamelCase , labels=_lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _a ( self : Any , _lowerCamelCase : int , _lowerCamelCase : int , _lowerCamelCase : List[Any] ):
"""simple docstring"""
A_ : List[Any] = BitBackbone(config=_lowerCamelCase )
model.to(_lowerCamelCase )
model.eval()
A_ : int = model(_lowerCamelCase )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[1], 4, 4] )
# verify channels
self.parent.assertEqual(len(model.channels ) , len(config.out_features ) )
self.parent.assertListEqual(model.channels , config.hidden_sizes[1:] )
# verify backbone works with out_features=None
A_ : Optional[Any] = None
A_ : int = BitBackbone(config=_lowerCamelCase )
model.to(_lowerCamelCase )
model.eval()
A_ : Optional[int] = model(_lowerCamelCase )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , 1 )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[-1], 1, 1] )
# verify channels
self.parent.assertEqual(len(model.channels ) , 1 )
self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]] )
def _a ( self : List[Any] ):
"""simple docstring"""
A_ : Union[str, Any] = self.prepare_config_and_inputs()
A_ ,A_ ,A_ : Union[str, Any] = config_and_inputs
A_ : str = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class UpperCamelCase_ (a__, a__, unittest.TestCase ):
"""simple docstring"""
_lowerCAmelCase = (BitModel, BitForImageClassification, BitBackbone) if is_torch_available() else ()
_lowerCAmelCase = (
{'feature-extraction': BitModel, 'image-classification': BitForImageClassification}
if is_torch_available()
else {}
)
_lowerCAmelCase = False
_lowerCAmelCase = False
_lowerCAmelCase = False
_lowerCAmelCase = False
_lowerCAmelCase = False
def _a ( self : Optional[Any] ):
"""simple docstring"""
A_ : List[str] = BitModelTester(self )
A_ : Optional[Any] = ConfigTester(self , config_class=_lowerCamelCase , has_text_modality=_lowerCamelCase )
def _a ( self : Optional[Any] ):
"""simple docstring"""
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def _a ( self : List[Any] ):
"""simple docstring"""
return
@unittest.skip(reason='''Bit does not output attentions''' )
def _a ( self : str ):
"""simple docstring"""
pass
@unittest.skip(reason='''Bit does not use inputs_embeds''' )
def _a ( self : Union[str, Any] ):
"""simple docstring"""
pass
@unittest.skip(reason='''Bit does not support input and output embeddings''' )
def _a ( self : Any ):
"""simple docstring"""
pass
def _a ( self : List[Any] ):
"""simple docstring"""
A_ ,A_ : str = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A_ : Dict = model_class(_lowerCamelCase )
A_ : Dict = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
A_ : int = [*signature.parameters.keys()]
A_ : Union[str, Any] = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , _lowerCamelCase )
def _a ( self : Optional[Any] ):
"""simple docstring"""
A_ : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_lowerCamelCase )
def _a ( self : Optional[Any] ):
"""simple docstring"""
A_ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_backbone(*_lowerCamelCase )
def _a ( self : Tuple ):
"""simple docstring"""
A_ ,A_ : Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A_ : str = model_class(config=_lowerCamelCase )
for name, module in model.named_modules():
if isinstance(_lowerCamelCase , (nn.BatchNormad, nn.GroupNorm) ):
self.assertTrue(
torch.all(module.weight == 1 ) , msg=f'Parameter {name} of model {model_class} seems not properly initialized' , )
self.assertTrue(
torch.all(module.bias == 0 ) , msg=f'Parameter {name} of model {model_class} seems not properly initialized' , )
def _a ( self : int ):
"""simple docstring"""
def check_hidden_states_output(_lowerCamelCase : Union[str, Any] , _lowerCamelCase : Dict , _lowerCamelCase : int ):
A_ : Union[str, Any] = model_class(_lowerCamelCase )
model.to(_lowerCamelCase )
model.eval()
with torch.no_grad():
A_ : Union[str, Any] = model(**self._prepare_for_class(_lowerCamelCase , _lowerCamelCase ) )
A_ : int = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
A_ : List[Any] = self.model_tester.num_stages
self.assertEqual(len(_lowerCamelCase ) , expected_num_stages + 1 )
# Bit's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , )
A_ ,A_ : str = self.model_tester.prepare_config_and_inputs_for_common()
A_ : Tuple = ['''preactivation''', '''bottleneck''']
for model_class in self.all_model_classes:
for layer_type in layers_type:
A_ : Tuple = layer_type
A_ : Optional[Any] = True
check_hidden_states_output(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
A_ : List[str] = True
check_hidden_states_output(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
@unittest.skip(reason='''Bit does not use feedforward chunking''' )
def _a ( self : Tuple ):
"""simple docstring"""
pass
def _a ( self : str ):
"""simple docstring"""
A_ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_lowerCamelCase )
@slow
def _a ( self : Union[str, Any] ):
"""simple docstring"""
for model_name in BIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
A_ : List[Any] = BitModel.from_pretrained(_lowerCamelCase )
self.assertIsNotNone(_lowerCamelCase )
def snake_case__ ( ) -> Optional[int]:
A_ : Optional[int] = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_torch
@require_vision
class UpperCamelCase_ (unittest.TestCase ):
"""simple docstring"""
@cached_property
def _a ( self : List[Any] ):
"""simple docstring"""
return (
BitImageProcessor.from_pretrained(BIT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ) if is_vision_available() else None
)
@slow
def _a ( self : Dict ):
"""simple docstring"""
A_ : Optional[int] = BitForImageClassification.from_pretrained(BIT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ).to(_lowerCamelCase )
A_ : Union[str, Any] = self.default_image_processor
A_ : Optional[int] = prepare_img()
A_ : int = image_processor(images=_lowerCamelCase , return_tensors='''pt''' ).to(_lowerCamelCase )
# forward pass
with torch.no_grad():
A_ : Union[str, Any] = model(**_lowerCamelCase )
# verify the logits
A_ : Dict = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape , _lowerCamelCase )
A_ : Tuple = torch.tensor([[-0.65_26, -0.52_63, -1.43_98]] ).to(_lowerCamelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , _lowerCamelCase , atol=1E-4 ) )
@require_torch
class UpperCamelCase_ (a__, unittest.TestCase ):
"""simple docstring"""
_lowerCAmelCase = (BitBackbone,) if is_torch_available() else ()
_lowerCAmelCase = BitConfig
_lowerCAmelCase = False
def _a ( self : List[str] ):
"""simple docstring"""
A_ : Union[str, Any] = BitModelTester(self )
| 4 | 1 |
'''simple docstring'''
from collections.abc import Callable
import numpy as np
def snake_case__ ( lowerCamelCase__ : Callable , lowerCamelCase__ : float , lowerCamelCase__ : float , lowerCamelCase__ : float , lowerCamelCase__ : float ) -> np.array:
A_ : str = int(np.ceil((x_end - xa) / step_size ) )
A_ : List[str] = np.zeros((n + 1,) )
A_ : List[str] = ya
A_ : Union[str, Any] = xa
for k in range(lowerCamelCase__ ):
A_ : Union[str, Any] = y[k] + step_size * ode_func(lowerCamelCase__ , y[k] )
A_ : Optional[Any] = y[k] + (
(step_size / 2) * (ode_func(lowerCamelCase__ , y[k] ) + ode_func(x + step_size , lowerCamelCase__ ))
)
x += step_size
return y
if __name__ == "__main__":
import doctest
doctest.testmod()
| 4 |
'''simple docstring'''
import pprint
import requests
snake_case__ = """https://zenquotes.io/api"""
def snake_case__ ( ) -> list:
return requests.get(API_ENDPOINT_URL + '''/today''' ).json()
def snake_case__ ( ) -> list:
return requests.get(API_ENDPOINT_URL + '''/random''' ).json()
if __name__ == "__main__":
snake_case__ = random_quotes()
pprint.pprint(response)
| 4 | 1 |
'''simple docstring'''
def snake_case__ ( lowerCamelCase__ : str , lowerCamelCase__ : str ) -> float:
def get_matched_characters(lowerCamelCase__ : str , lowerCamelCase__ : str ) -> str:
A_ : Any = []
A_ : int = min(len(_stra ) , len(_stra ) ) // 2
for i, l in enumerate(_stra ):
A_ : Union[str, Any] = int(max(0 , i - limit ) )
A_ : Any = int(min(i + limit + 1 , len(_stra ) ) )
if l in _stra[left:right]:
matched.append(lowerCamelCase__ )
A_ : List[str] = f'{_stra[0:_stra.index(lowerCamelCase__ )]} {_stra[_stra.index(lowerCamelCase__ ) + 1:]}'
return "".join(lowerCamelCase__ )
# matching characters
A_ : Any = get_matched_characters(lowerCamelCase__ , lowerCamelCase__ )
A_ : Tuple = get_matched_characters(lowerCamelCase__ , lowerCamelCase__ )
A_ : str = len(lowerCamelCase__ )
# transposition
A_ : Any = (
len([(ca, ca) for ca, ca in zip(lowerCamelCase__ , lowerCamelCase__ ) if ca != ca] ) // 2
)
if not match_count:
A_ : Optional[Any] = 0.0
else:
A_ : List[Any] = (
1
/ 3
* (
match_count / len(lowerCamelCase__ )
+ match_count / len(lowerCamelCase__ )
+ (match_count - transpositions) / match_count
)
)
# common prefix up to 4 characters
A_ : Optional[int] = 0
for ca, ca in zip(stra[:4] , stra[:4] ):
if ca == ca:
prefix_len += 1
else:
break
return jaro + 0.1 * prefix_len * (1 - jaro)
if __name__ == "__main__":
import doctest
doctest.testmod()
print(jaro_winkler("""hello""", """world"""))
| 4 |
'''simple docstring'''
from __future__ import annotations
class UpperCamelCase_ :
"""simple docstring"""
def __init__( self : Optional[int] , _lowerCamelCase : int ):
"""simple docstring"""
A_ : Union[str, Any] = order
# a_{0} ... a_{k}
A_ : Union[str, Any] = [1.0] + [0.0] * order
# b_{0} ... b_{k}
A_ : int = [1.0] + [0.0] * order
# x[n-1] ... x[n-k]
A_ : str = [0.0] * self.order
# y[n-1] ... y[n-k]
A_ : Optional[Any] = [0.0] * self.order
def _a ( self : Dict , _lowerCamelCase : list[float] , _lowerCamelCase : list[float] ):
"""simple docstring"""
if len(_lowerCamelCase ) < self.order:
A_ : Any = [1.0, *a_coeffs]
if len(_lowerCamelCase ) != self.order + 1:
A_ : List[Any] = (
f'Expected a_coeffs to have {self.order + 1} elements '
f'for {self.order}-order filter, got {len(_lowerCamelCase )}'
)
raise ValueError(_lowerCamelCase )
if len(_lowerCamelCase ) != self.order + 1:
A_ : Union[str, Any] = (
f'Expected b_coeffs to have {self.order + 1} elements '
f'for {self.order}-order filter, got {len(_lowerCamelCase )}'
)
raise ValueError(_lowerCamelCase )
A_ : Tuple = a_coeffs
A_ : str = b_coeffs
def _a ( self : Tuple , _lowerCamelCase : float ):
"""simple docstring"""
A_ : Any = 0.0
# Start at index 1 and do index 0 at the end.
for i in range(1 , self.order + 1 ):
result += (
self.b_coeffs[i] * self.input_history[i - 1]
- self.a_coeffs[i] * self.output_history[i - 1]
)
A_ : str = (result + self.b_coeffs[0] * sample) / self.a_coeffs[0]
A_ : Optional[Any] = self.input_history[:-1]
A_ : List[str] = self.output_history[:-1]
A_ : Tuple = sample
A_ : Tuple = result
return result
| 4 | 1 |
'''simple docstring'''
import gc
import random
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
import diffusers
from diffusers import (
AutoencoderKL,
EulerDiscreteScheduler,
StableDiffusionLatentUpscalePipeline,
StableDiffusionPipeline,
UNetaDConditionModel,
)
from diffusers.schedulers import KarrasDiffusionSchedulers
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
def snake_case__ ( lowerCamelCase__ : Optional[Any] ) -> Optional[int]:
A_ : Optional[Any] = [tensor.shape for tensor in tensor_list]
return all(shape == shapes[0] for shape in shapes[1:] )
class UpperCamelCase_ (a__, a__, a__, unittest.TestCase ):
"""simple docstring"""
_lowerCAmelCase = StableDiffusionLatentUpscalePipeline
_lowerCAmelCase = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {
'height',
'width',
'cross_attention_kwargs',
'negative_prompt_embeds',
'prompt_embeds',
}
_lowerCAmelCase = PipelineTesterMixin.required_optional_params - {'num_images_per_prompt'}
_lowerCAmelCase = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS
_lowerCAmelCase = frozenset(
[] ) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess
_lowerCAmelCase = frozenset([] )
_lowerCAmelCase = True
@property
def _a ( self : str ):
"""simple docstring"""
A_ : Any = 1
A_ : Dict = 4
A_ : Union[str, Any] = (16, 16)
A_ : str = floats_tensor((batch_size, num_channels) + sizes , rng=random.Random(0 ) ).to(_lowerCamelCase )
return image
def _a ( self : Union[str, Any] ):
"""simple docstring"""
torch.manual_seed(0 )
A_ : int = UNetaDConditionModel(
act_fn='''gelu''' , attention_head_dim=8 , norm_num_groups=_lowerCamelCase , block_out_channels=[32, 32, 64, 64] , time_cond_proj_dim=160 , conv_in_kernel=1 , conv_out_kernel=1 , cross_attention_dim=32 , down_block_types=(
'''KDownBlock2D''',
'''KCrossAttnDownBlock2D''',
'''KCrossAttnDownBlock2D''',
'''KCrossAttnDownBlock2D''',
) , in_channels=8 , mid_block_type=_lowerCamelCase , only_cross_attention=_lowerCamelCase , out_channels=5 , resnet_time_scale_shift='''scale_shift''' , time_embedding_type='''fourier''' , timestep_post_act='''gelu''' , up_block_types=('''KCrossAttnUpBlock2D''', '''KCrossAttnUpBlock2D''', '''KCrossAttnUpBlock2D''', '''KUpBlock2D''') , )
A_ : Optional[Any] = AutoencoderKL(
block_out_channels=[32, 32, 64, 64] , in_channels=3 , out_channels=3 , down_block_types=[
'''DownEncoderBlock2D''',
'''DownEncoderBlock2D''',
'''DownEncoderBlock2D''',
'''DownEncoderBlock2D''',
] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D''', '''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , )
A_ : List[str] = EulerDiscreteScheduler(prediction_type='''sample''' )
A_ : Optional[Any] = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , hidden_act='''quick_gelu''' , projection_dim=512 , )
A_ : List[Any] = CLIPTextModel(_lowerCamelCase )
A_ : List[str] = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
A_ : List[str] = {
'''unet''': model.eval(),
'''vae''': vae.eval(),
'''scheduler''': scheduler,
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
}
return components
def _a ( self : int , _lowerCamelCase : Dict , _lowerCamelCase : Dict=0 ):
"""simple docstring"""
if str(_lowerCamelCase ).startswith('''mps''' ):
A_ : Dict = torch.manual_seed(_lowerCamelCase )
else:
A_ : Optional[int] = torch.Generator(device=_lowerCamelCase ).manual_seed(_lowerCamelCase )
A_ : List[Any] = {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''image''': self.dummy_image.cpu(),
'''generator''': generator,
'''num_inference_steps''': 2,
'''output_type''': '''numpy''',
}
return inputs
def _a ( self : Any ):
"""simple docstring"""
A_ : Union[str, Any] = '''cpu'''
A_ : str = self.get_dummy_components()
A_ : int = self.pipeline_class(**_lowerCamelCase )
pipe.to(_lowerCamelCase )
pipe.set_progress_bar_config(disable=_lowerCamelCase )
A_ : int = self.get_dummy_inputs(_lowerCamelCase )
A_ : List[Any] = pipe(**_lowerCamelCase ).images
A_ : Dict = image[0, -3:, -3:, -1]
self.assertEqual(image.shape , (1, 256, 256, 3) )
A_ : Tuple = np.array(
[0.47_22_24_12, 0.41_92_16_33, 0.44_71_74_34, 0.46_87_41_92, 0.42_58_82_58, 0.46_15_07_26, 0.4_67_75_34, 0.45_58_38_32, 0.48_57_90_55] )
A_ : Optional[int] = np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(_lowerCamelCase , 1E-3 )
def _a ( self : Tuple ):
"""simple docstring"""
super().test_attention_slicing_forward_pass(expected_max_diff=7E-3 )
def _a ( self : List[str] ):
"""simple docstring"""
super().test_cpu_offload_forward_pass(expected_max_diff=3E-3 )
def _a ( self : List[Any] ):
"""simple docstring"""
super().test_dict_tuple_outputs_equivalent(expected_max_difference=3E-3 )
def _a ( self : int ):
"""simple docstring"""
super().test_inference_batch_single_identical(expected_max_diff=7E-3 )
def _a ( self : Any ):
"""simple docstring"""
super().test_pt_np_pil_outputs_equivalent(expected_max_diff=3E-3 )
def _a ( self : Any ):
"""simple docstring"""
super().test_save_load_local(expected_max_difference=3E-3 )
def _a ( self : Any ):
"""simple docstring"""
super().test_save_load_optional_components(expected_max_difference=3E-3 )
def _a ( self : List[Any] ):
"""simple docstring"""
A_ : Tuple = [
'''DDIMScheduler''',
'''DDPMScheduler''',
'''PNDMScheduler''',
'''HeunDiscreteScheduler''',
'''EulerAncestralDiscreteScheduler''',
'''KDPM2DiscreteScheduler''',
'''KDPM2AncestralDiscreteScheduler''',
'''DPMSolverSDEScheduler''',
]
A_ : Dict = self.get_dummy_components()
A_ : Optional[Any] = self.pipeline_class(**_lowerCamelCase )
# make sure that PNDM does not need warm-up
pipe.scheduler.register_to_config(skip_prk_steps=_lowerCamelCase )
pipe.to(_lowerCamelCase )
pipe.set_progress_bar_config(disable=_lowerCamelCase )
A_ : Optional[Any] = self.get_dummy_inputs(_lowerCamelCase )
A_ : int = 2
A_ : int = []
for scheduler_enum in KarrasDiffusionSchedulers:
if scheduler_enum.name in skip_schedulers:
# no sigma schedulers are not supported
# no schedulers
continue
A_ : List[str] = getattr(_lowerCamelCase , scheduler_enum.name )
A_ : List[str] = scheduler_cls.from_config(pipe.scheduler.config )
A_ : Optional[Any] = pipe(**_lowerCamelCase )[0]
outputs.append(_lowerCamelCase )
assert check_same_shape(_lowerCamelCase )
@require_torch_gpu
@slow
class UpperCamelCase_ (unittest.TestCase ):
"""simple docstring"""
def _a ( self : Optional[int] ):
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _a ( self : int ):
"""simple docstring"""
A_ : Tuple = torch.manual_seed(33 )
A_ : List[Any] = StableDiffusionPipeline.from_pretrained('''CompVis/stable-diffusion-v1-4''' , torch_dtype=torch.floataa )
pipe.to('''cuda''' )
A_ : Optional[Any] = StableDiffusionLatentUpscalePipeline.from_pretrained(
'''stabilityai/sd-x2-latent-upscaler''' , torch_dtype=torch.floataa )
upscaler.to('''cuda''' )
A_ : List[str] = '''a photo of an astronaut high resolution, unreal engine, ultra realistic'''
A_ : Union[str, Any] = pipe(_lowerCamelCase , generator=_lowerCamelCase , output_type='''latent''' ).images
A_ : Dict = upscaler(
prompt=_lowerCamelCase , image=_lowerCamelCase , num_inference_steps=20 , guidance_scale=0 , generator=_lowerCamelCase , output_type='''np''' , ).images[0]
A_ : str = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/latent-upscaler/astronaut_1024.npy''' )
assert np.abs((expected_image - image).mean() ) < 5E-2
def _a ( self : List[Any] ):
"""simple docstring"""
A_ : int = torch.manual_seed(33 )
A_ : Tuple = StableDiffusionLatentUpscalePipeline.from_pretrained(
'''stabilityai/sd-x2-latent-upscaler''' , torch_dtype=torch.floataa )
upscaler.to('''cuda''' )
A_ : str = '''the temple of fire by Ross Tran and Gerardo Dottori, oil on canvas'''
A_ : Dict = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/latent-upscaler/fire_temple_512.png''' )
A_ : Dict = upscaler(
prompt=_lowerCamelCase , image=_lowerCamelCase , num_inference_steps=20 , guidance_scale=0 , generator=_lowerCamelCase , output_type='''np''' , ).images[0]
A_ : Optional[Any] = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/latent-upscaler/fire_temple_1024.npy''' )
assert np.abs((expected_image - image).max() ) < 5E-2
| 4 |
'''simple docstring'''
class UpperCamelCase_ :
"""simple docstring"""
def __init__( self : Optional[Any] , _lowerCamelCase : Union[str, Any] ):
"""simple docstring"""
A_ : Union[str, Any] = val
A_ : Tuple = None
A_ : Any = None
def _a ( self : Tuple , _lowerCamelCase : List[Any] ):
"""simple docstring"""
if self.val:
if val < self.val:
if self.left is None:
A_ : int = Node(_lowerCamelCase )
else:
self.left.insert(_lowerCamelCase )
elif val > self.val:
if self.right is None:
A_ : List[str] = Node(_lowerCamelCase )
else:
self.right.insert(_lowerCamelCase )
else:
A_ : Any = val
def snake_case__ ( lowerCamelCase__ : Any , lowerCamelCase__ : Optional[int] ) -> str:
# Recursive traversal
if root:
inorder(root.left , lowerCamelCase__ )
res.append(root.val )
inorder(root.right , lowerCamelCase__ )
def snake_case__ ( lowerCamelCase__ : Optional[int] ) -> Tuple:
# Build BST
if len(lowerCamelCase__ ) == 0:
return arr
A_ : Dict = Node(arr[0] )
for i in range(1 , len(lowerCamelCase__ ) ):
root.insert(arr[i] )
# Traverse BST in order.
A_ : Tuple = []
inorder(lowerCamelCase__ , lowerCamelCase__ )
return res
if __name__ == "__main__":
print(tree_sort([10, 1, 3, 2, 9, 14, 13]))
| 4 | 1 |
'''simple docstring'''
import heapq
def snake_case__ ( lowerCamelCase__ : dict ) -> set[int]:
A_ : list[list] = []
# for each node and his adjacency list add them and the rank of the node to queue
# using heapq module the queue will be filled like a Priority Queue
# heapq works with a min priority queue, so I used -1*len(v) to build it
for key, value in graph.items():
# O(log(n))
heapq.heappush(lowerCamelCase__ , [-1 * len(lowerCamelCase__ ), (key, value)] )
# chosen_vertices = set of chosen vertices
A_ : str = set()
# while queue isn't empty and there are still edges
# (queue[0][0] is the rank of the node with max rank)
while queue and queue[0][0] != 0:
# extract vertex with max rank from queue and add it to chosen_vertices
A_ : Tuple = heapq.heappop(lowerCamelCase__ )[1][0]
chosen_vertices.add(lowerCamelCase__ )
# Remove all arcs adjacent to argmax
for elem in queue:
# if v haven't adjacent node, skip
if elem[0] == 0:
continue
# if argmax is reachable from elem
# remove argmax from elem's adjacent list and update his rank
if argmax in elem[1][1]:
A_ : List[str] = elem[1][1].index(lowerCamelCase__ )
del elem[1][1][index]
elem[0] += 1
# re-order the queue
heapq.heapify(lowerCamelCase__ )
return chosen_vertices
if __name__ == "__main__":
import doctest
doctest.testmod()
snake_case__ = {0: [1, 3], 1: [0, 3], 2: [0, 3, 4], 3: [0, 1, 2], 4: [2, 3]}
print(F'Minimum vertex cover:\n{greedy_min_vertex_cover(graph)}')
| 4 |
'''simple docstring'''
def snake_case__ ( lowerCamelCase__ : list ) -> list:
if len(lowerCamelCase__ ) <= 1:
return [tuple(lowerCamelCase__ )]
A_ : List[str] = []
def generate(lowerCamelCase__ : int , lowerCamelCase__ : list ):
if k == 1:
res.append(tuple(arr[:] ) )
return
generate(k - 1 , lowerCamelCase__ )
for i in range(k - 1 ):
if k % 2 == 0: # k is even
A_ ,A_ : Optional[int] = arr[k - 1], arr[i]
else: # k is odd
A_ ,A_ : Union[str, Any] = arr[k - 1], arr[0]
generate(k - 1 , lowerCamelCase__ )
generate(len(lowerCamelCase__ ) , lowerCamelCase__ )
return res
if __name__ == "__main__":
snake_case__ = input("""Enter numbers separated by a comma:\n""").strip()
snake_case__ = [int(item) for item in user_input.split(""",""")]
print(heaps(arr))
| 4 | 1 |
'''simple docstring'''
import logging
import os
import sys
from dataclasses import dataclass, field
from itertools import chain
from typing import Optional, Union
import datasets
import numpy as np
import torch
from datasets import load_dataset
import transformers
from transformers import (
AutoConfig,
AutoModelForMultipleChoice,
AutoTokenizer,
HfArgumentParser,
Trainer,
TrainingArguments,
default_data_collator,
set_seed,
)
from transformers.tokenization_utils_base import PreTrainedTokenizerBase
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import PaddingStrategy, check_min_version, send_example_telemetry
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version("""4.31.0""")
snake_case__ = logging.getLogger(__name__)
@dataclass
class UpperCamelCase_ :
"""simple docstring"""
_lowerCAmelCase = field(
metadata={'help': 'Path to pretrained model or model identifier from huggingface.co/models'} )
_lowerCAmelCase = field(
default=a__, metadata={'help': 'Pretrained config name or path if not the same as model_name'} )
_lowerCAmelCase = field(
default=a__, metadata={'help': 'Pretrained tokenizer name or path if not the same as model_name'} )
_lowerCAmelCase = field(
default=a__, metadata={'help': 'Where do you want to store the pretrained models downloaded from huggingface.co'}, )
_lowerCAmelCase = field(
default=a__, metadata={'help': 'Whether to use one of the fast tokenizer (backed by the tokenizers library) or not.'}, )
_lowerCAmelCase = field(
default='main', metadata={'help': 'The specific model version to use (can be a branch name, tag name or commit id).'}, )
_lowerCAmelCase = field(
default=a__, metadata={
'help': (
'Will use the token generated when running `huggingface-cli login` (necessary to use this script '
'with private models).'
)
}, )
@dataclass
class UpperCamelCase_ :
"""simple docstring"""
_lowerCAmelCase = field(default=a__, metadata={'help': 'The input training data file (a text file).'} )
_lowerCAmelCase = field(
default=a__, metadata={'help': 'An optional input evaluation data file to evaluate the perplexity on (a text file).'}, )
_lowerCAmelCase = field(
default=a__, metadata={'help': 'Overwrite the cached training and evaluation sets'} )
_lowerCAmelCase = field(
default=a__, metadata={'help': 'The number of processes to use for the preprocessing.'}, )
_lowerCAmelCase = field(
default=a__, metadata={
'help': (
'The maximum total input sequence length after tokenization. If passed, sequences longer '
'than this will be truncated, sequences shorter will be padded.'
)
}, )
_lowerCAmelCase = field(
default=a__, metadata={
'help': (
'Whether to pad all samples to the maximum sentence length. '
'If False, will pad the samples dynamically when batching to the maximum length in the batch. More '
'efficient on GPU but very bad for TPU.'
)
}, )
_lowerCAmelCase = field(
default=a__, metadata={
'help': (
'For debugging purposes or quicker training, truncate the number of training examples to this '
'value if set.'
)
}, )
_lowerCAmelCase = field(
default=a__, metadata={
'help': (
'For debugging purposes or quicker training, truncate the number of evaluation examples to this '
'value if set.'
)
}, )
def _a ( self : str ):
"""simple docstring"""
if self.train_file is not None:
A_ : Any = self.train_file.split('''.''' )[-1]
assert extension in ["csv", "json"], "`train_file` should be a csv or a json file."
if self.validation_file is not None:
A_ : str = self.validation_file.split('''.''' )[-1]
assert extension in ["csv", "json"], "`validation_file` should be a csv or a json file."
@dataclass
class UpperCamelCase_ :
"""simple docstring"""
_lowerCAmelCase = 42
_lowerCAmelCase = True
_lowerCAmelCase = None
_lowerCAmelCase = None
def __call__( self : int , _lowerCamelCase : List[str] ):
"""simple docstring"""
A_ : int = '''label''' if '''label''' in features[0].keys() else '''labels'''
A_ : Any = [feature.pop(_lowerCamelCase ) for feature in features]
A_ : Dict = len(_lowerCamelCase )
A_ : Dict = len(features[0]['''input_ids'''] )
A_ : List[Any] = [
[{k: v[i] for k, v in feature.items()} for i in range(_lowerCamelCase )] for feature in features
]
A_ : Optional[int] = list(chain(*_lowerCamelCase ) )
A_ : Optional[int] = self.tokenizer.pad(
_lowerCamelCase , padding=self.padding , max_length=self.max_length , pad_to_multiple_of=self.pad_to_multiple_of , return_tensors='''pt''' , )
# Un-flatten
A_ : str = {k: v.view(_lowerCamelCase , _lowerCamelCase , -1 ) for k, v in batch.items()}
# Add back labels
A_ : Optional[Any] = torch.tensor(_lowerCamelCase , dtype=torch.intaa )
return batch
def snake_case__ ( ) -> List[str]:
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
A_ : Tuple = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith('''.json''' ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
A_ ,A_ ,A_ : List[str] = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
A_ ,A_ ,A_ : str = parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry('''run_swag''' , lowerCamelCase__ , lowerCamelCase__ )
# Setup logging
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' , datefmt='''%m/%d/%Y %H:%M:%S''' , handlers=[logging.StreamHandler(sys.stdout )] , )
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
A_ : Optional[Any] = training_args.get_process_log_level()
logger.setLevel(lowerCamelCase__ )
datasets.utils.logging.set_verbosity(lowerCamelCase__ )
transformers.utils.logging.set_verbosity(lowerCamelCase__ )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
f'Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}'
+ f'distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}' )
logger.info(f'Training/evaluation parameters {training_args}' )
# Detecting last checkpoint.
A_ : List[str] = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
A_ : Any = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
f'Output directory ({training_args.output_dir}) already exists and is not empty. '
'''Use --overwrite_output_dir to overcome.''' )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
f'Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change '
'''the `--output_dir` or add `--overwrite_output_dir` to train from scratch.''' )
# Set seed before initializing model.
set_seed(training_args.seed )
# Get the datasets: you can either provide your own CSV/JSON/TXT training and evaluation files (see below)
# or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/
# (the dataset will be downloaded automatically from the datasets Hub).
# For CSV/JSON files, this script will use the column called 'text' or the first column if no column called
# 'text' is found. You can easily tweak this behavior (see below).
# In distributed training, the load_dataset function guarantee that only one local process can concurrently
# download the dataset.
if data_args.train_file is not None or data_args.validation_file is not None:
A_ : Optional[Any] = {}
if data_args.train_file is not None:
A_ : Any = data_args.train_file
if data_args.validation_file is not None:
A_ : Tuple = data_args.validation_file
A_ : Optional[int] = data_args.train_file.split('''.''' )[-1]
A_ : str = load_dataset(
lowerCamelCase__ , data_files=lowerCamelCase__ , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
else:
# Downloading and loading the swag dataset from the hub.
A_ : List[Any] = load_dataset(
'''swag''' , '''regular''' , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
# See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at
# https://huggingface.co/docs/datasets/loading_datasets.html.
# Load pretrained model and tokenizer
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
A_ : Any = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
A_ : Union[str, Any] = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , use_fast=model_args.use_fast_tokenizer , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
A_ : Optional[Any] = AutoModelForMultipleChoice.from_pretrained(
model_args.model_name_or_path , from_tf=bool('''.ckpt''' in model_args.model_name_or_path ) , config=lowerCamelCase__ , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
# When using your own dataset or a different dataset from swag, you will probably need to change this.
A_ : str = [f'ending{i}' for i in range(4 )]
A_ : Union[str, Any] = '''sent1'''
A_ : List[Any] = '''sent2'''
if data_args.max_seq_length is None:
A_ : int = tokenizer.model_max_length
if max_seq_length > 1_0_2_4:
logger.warning(
'''The chosen tokenizer supports a `model_max_length` that is longer than the default `block_size` value'''
''' of 1024. If you would like to use a longer `block_size` up to `tokenizer.model_max_length` you can'''
''' override this default with `--block_size xxx`.''' )
A_ : Tuple = 1_0_2_4
else:
if data_args.max_seq_length > tokenizer.model_max_length:
logger.warning(
f'The max_seq_length passed ({data_args.max_seq_length}) is larger than the maximum length for the'
f'model ({tokenizer.model_max_length}). Using max_seq_length={tokenizer.model_max_length}.' )
A_ : List[Any] = min(data_args.max_seq_length , tokenizer.model_max_length )
# Preprocessing the datasets.
def preprocess_function(lowerCamelCase__ : str ):
A_ : Tuple = [[context] * 4 for context in examples[context_name]]
A_ : Union[str, Any] = examples[question_header_name]
A_ : Optional[Any] = [
[f'{header} {examples[end][i]}' for end in ending_names] for i, header in enumerate(lowerCamelCase__ )
]
# Flatten out
A_ : int = list(chain(*lowerCamelCase__ ) )
A_ : int = list(chain(*lowerCamelCase__ ) )
# Tokenize
A_ : Tuple = tokenizer(
lowerCamelCase__ , lowerCamelCase__ , truncation=lowerCamelCase__ , max_length=lowerCamelCase__ , padding='''max_length''' if data_args.pad_to_max_length else False , )
# Un-flatten
return {k: [v[i : i + 4] for i in range(0 , len(lowerCamelCase__ ) , 4 )] for k, v in tokenized_examples.items()}
if training_args.do_train:
if "train" not in raw_datasets:
raise ValueError('''--do_train requires a train dataset''' )
A_ : Tuple = raw_datasets['''train''']
if data_args.max_train_samples is not None:
A_ : Union[str, Any] = min(len(lowerCamelCase__ ) , data_args.max_train_samples )
A_ : Any = train_dataset.select(range(lowerCamelCase__ ) )
with training_args.main_process_first(desc='''train dataset map pre-processing''' ):
A_ : Optional[Any] = train_dataset.map(
lowerCamelCase__ , batched=lowerCamelCase__ , num_proc=data_args.preprocessing_num_workers , load_from_cache_file=not data_args.overwrite_cache , )
if training_args.do_eval:
if "validation" not in raw_datasets:
raise ValueError('''--do_eval requires a validation dataset''' )
A_ : int = raw_datasets['''validation''']
if data_args.max_eval_samples is not None:
A_ : Optional[Any] = min(len(lowerCamelCase__ ) , data_args.max_eval_samples )
A_ : Any = eval_dataset.select(range(lowerCamelCase__ ) )
with training_args.main_process_first(desc='''validation dataset map pre-processing''' ):
A_ : List[str] = eval_dataset.map(
lowerCamelCase__ , batched=lowerCamelCase__ , num_proc=data_args.preprocessing_num_workers , load_from_cache_file=not data_args.overwrite_cache , )
# Data collator
A_ : Optional[Any] = (
default_data_collator
if data_args.pad_to_max_length
else DataCollatorForMultipleChoice(tokenizer=lowerCamelCase__ , pad_to_multiple_of=8 if training_args.fpaa else None )
)
# Metric
def compute_metrics(lowerCamelCase__ : List[str] ):
A_ ,A_ : Any = eval_predictions
A_ : str = np.argmax(lowerCamelCase__ , axis=1 )
return {"accuracy": (preds == label_ids).astype(np.floataa ).mean().item()}
# Initialize our Trainer
A_ : Any = Trainer(
model=lowerCamelCase__ , args=lowerCamelCase__ , train_dataset=train_dataset if training_args.do_train else None , eval_dataset=eval_dataset if training_args.do_eval else None , tokenizer=lowerCamelCase__ , data_collator=lowerCamelCase__ , compute_metrics=lowerCamelCase__ , )
# Training
if training_args.do_train:
A_ : Tuple = None
if training_args.resume_from_checkpoint is not None:
A_ : Any = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
A_ : Any = last_checkpoint
A_ : List[str] = trainer.train(resume_from_checkpoint=lowerCamelCase__ )
trainer.save_model() # Saves the tokenizer too for easy upload
A_ : str = train_result.metrics
A_ : List[str] = (
data_args.max_train_samples if data_args.max_train_samples is not None else len(lowerCamelCase__ )
)
A_ : Dict = min(lowerCamelCase__ , len(lowerCamelCase__ ) )
trainer.log_metrics('''train''' , lowerCamelCase__ )
trainer.save_metrics('''train''' , lowerCamelCase__ )
trainer.save_state()
# Evaluation
if training_args.do_eval:
logger.info('''*** Evaluate ***''' )
A_ : Union[str, Any] = trainer.evaluate()
A_ : Union[str, Any] = data_args.max_eval_samples if data_args.max_eval_samples is not None else len(lowerCamelCase__ )
A_ : Union[str, Any] = min(lowerCamelCase__ , len(lowerCamelCase__ ) )
trainer.log_metrics('''eval''' , lowerCamelCase__ )
trainer.save_metrics('''eval''' , lowerCamelCase__ )
A_ : Optional[int] = {
'''finetuned_from''': model_args.model_name_or_path,
'''tasks''': '''multiple-choice''',
'''dataset_tags''': '''swag''',
'''dataset_args''': '''regular''',
'''dataset''': '''SWAG''',
'''language''': '''en''',
}
if training_args.push_to_hub:
trainer.push_to_hub(**lowerCamelCase__ )
else:
trainer.create_model_card(**lowerCamelCase__ )
def snake_case__ ( lowerCamelCase__ : Dict ) -> Tuple:
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 4 |
'''simple docstring'''
import unittest
from queue import Empty
from threading import Thread
from transformers import AutoTokenizer, TextIteratorStreamer, TextStreamer, is_torch_available
from transformers.testing_utils import CaptureStdout, require_torch, torch_device
from ..test_modeling_common import ids_tensor
if is_torch_available():
import torch
from transformers import AutoModelForCausalLM
@require_torch
class UpperCamelCase_ (unittest.TestCase ):
"""simple docstring"""
def _a ( self : Dict ):
"""simple docstring"""
A_ : Optional[int] = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' )
A_ : Tuple = AutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ).to(_lowerCamelCase )
A_ : Dict = -1
A_ : List[str] = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(_lowerCamelCase )
A_ : Any = model.generate(_lowerCamelCase , max_new_tokens=10 , do_sample=_lowerCamelCase )
A_ : List[str] = tokenizer.decode(greedy_ids[0] )
with CaptureStdout() as cs:
A_ : List[str] = TextStreamer(_lowerCamelCase )
model.generate(_lowerCamelCase , max_new_tokens=10 , do_sample=_lowerCamelCase , streamer=_lowerCamelCase )
# The greedy text should be printed to stdout, except for the final "\n" in the streamer
A_ : Dict = cs.out[:-1]
self.assertEqual(_lowerCamelCase , _lowerCamelCase )
def _a ( self : Tuple ):
"""simple docstring"""
A_ : Optional[int] = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' )
A_ : List[str] = AutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ).to(_lowerCamelCase )
A_ : Dict = -1
A_ : List[str] = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(_lowerCamelCase )
A_ : Optional[int] = model.generate(_lowerCamelCase , max_new_tokens=10 , do_sample=_lowerCamelCase )
A_ : str = tokenizer.decode(greedy_ids[0] )
A_ : int = TextIteratorStreamer(_lowerCamelCase )
A_ : List[Any] = {'''input_ids''': input_ids, '''max_new_tokens''': 10, '''do_sample''': False, '''streamer''': streamer}
A_ : List[Any] = Thread(target=model.generate , kwargs=_lowerCamelCase )
thread.start()
A_ : List[Any] = ''''''
for new_text in streamer:
streamer_text += new_text
self.assertEqual(_lowerCamelCase , _lowerCamelCase )
def _a ( self : int ):
"""simple docstring"""
A_ : List[str] = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' )
A_ : List[Any] = AutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ).to(_lowerCamelCase )
A_ : List[str] = -1
A_ : Any = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(_lowerCamelCase )
A_ : Tuple = model.generate(_lowerCamelCase , max_new_tokens=10 , do_sample=_lowerCamelCase )
A_ : Tuple = greedy_ids[:, input_ids.shape[1] :]
A_ : Tuple = tokenizer.decode(new_greedy_ids[0] )
with CaptureStdout() as cs:
A_ : Any = TextStreamer(_lowerCamelCase , skip_prompt=_lowerCamelCase )
model.generate(_lowerCamelCase , max_new_tokens=10 , do_sample=_lowerCamelCase , streamer=_lowerCamelCase )
# The greedy text should be printed to stdout, except for the final "\n" in the streamer
A_ : Any = cs.out[:-1]
self.assertEqual(_lowerCamelCase , _lowerCamelCase )
def _a ( self : List[Any] ):
"""simple docstring"""
A_ : List[Any] = AutoTokenizer.from_pretrained('''distilgpt2''' )
A_ : Tuple = AutoModelForCausalLM.from_pretrained('''distilgpt2''' ).to(_lowerCamelCase )
A_ : List[Any] = -1
A_ : Union[str, Any] = torch.ones((1, 5) , device=_lowerCamelCase ).long() * model.config.bos_token_id
with CaptureStdout() as cs:
A_ : List[Any] = TextStreamer(_lowerCamelCase , skip_special_tokens=_lowerCamelCase )
model.generate(_lowerCamelCase , max_new_tokens=1 , do_sample=_lowerCamelCase , streamer=_lowerCamelCase )
# The prompt contains a special token, so the streamer should not print it. As such, the output text, when
# re-tokenized, must only contain one token
A_ : List[str] = cs.out[:-1] # Remove the final "\n"
A_ : List[Any] = tokenizer(_lowerCamelCase , return_tensors='''pt''' )
self.assertEqual(streamer_text_tokenized.input_ids.shape , (1, 1) )
def _a ( self : Union[str, Any] ):
"""simple docstring"""
A_ : str = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' )
A_ : str = AutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ).to(_lowerCamelCase )
A_ : Union[str, Any] = -1
A_ : Union[str, Any] = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(_lowerCamelCase )
A_ : List[str] = TextIteratorStreamer(_lowerCamelCase , timeout=0.0_01 )
A_ : str = {'''input_ids''': input_ids, '''max_new_tokens''': 10, '''do_sample''': False, '''streamer''': streamer}
A_ : List[str] = Thread(target=model.generate , kwargs=_lowerCamelCase )
thread.start()
# The streamer will timeout after 0.001 seconds, so an exception will be raised
with self.assertRaises(_lowerCamelCase ):
A_ : str = ''''''
for new_text in streamer:
streamer_text += new_text
| 4 | 1 |
'''simple docstring'''
import unittest
from transformers import AutoTokenizer, NystromformerConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
NystromformerForMaskedLM,
NystromformerForMultipleChoice,
NystromformerForQuestionAnswering,
NystromformerForSequenceClassification,
NystromformerForTokenClassification,
NystromformerModel,
)
from transformers.models.nystromformer.modeling_nystromformer import NYSTROMFORMER_PRETRAINED_MODEL_ARCHIVE_LIST
class UpperCamelCase_ :
"""simple docstring"""
def __init__( self : List[Any] , _lowerCamelCase : List[Any] , _lowerCamelCase : List[str]=13 , _lowerCamelCase : List[Any]=7 , _lowerCamelCase : Optional[int]=True , _lowerCamelCase : str=True , _lowerCamelCase : Optional[int]=True , _lowerCamelCase : str=True , _lowerCamelCase : List[str]=99 , _lowerCamelCase : int=32 , _lowerCamelCase : Optional[int]=5 , _lowerCamelCase : Dict=4 , _lowerCamelCase : str=37 , _lowerCamelCase : Union[str, Any]="gelu" , _lowerCamelCase : Optional[int]=0.1 , _lowerCamelCase : Optional[int]=0.1 , _lowerCamelCase : Tuple=512 , _lowerCamelCase : str=16 , _lowerCamelCase : List[str]=2 , _lowerCamelCase : Optional[Any]=0.02 , _lowerCamelCase : str=3 , _lowerCamelCase : str=4 , _lowerCamelCase : int=None , ):
"""simple docstring"""
A_ : Union[str, Any] = parent
A_ : Union[str, Any] = batch_size
A_ : Union[str, Any] = seq_length
A_ : Any = is_training
A_ : Optional[int] = use_input_mask
A_ : str = use_token_type_ids
A_ : Any = use_labels
A_ : List[Any] = vocab_size
A_ : Any = hidden_size
A_ : Optional[Any] = num_hidden_layers
A_ : Union[str, Any] = num_attention_heads
A_ : Dict = intermediate_size
A_ : Any = hidden_act
A_ : Tuple = hidden_dropout_prob
A_ : Tuple = attention_probs_dropout_prob
A_ : Optional[Any] = max_position_embeddings
A_ : Dict = type_vocab_size
A_ : List[Any] = type_sequence_label_size
A_ : int = initializer_range
A_ : Optional[int] = num_labels
A_ : Tuple = num_choices
A_ : Optional[Any] = scope
def _a ( self : Optional[int] ):
"""simple docstring"""
A_ : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
A_ : List[str] = None
if self.use_input_mask:
A_ : Dict = random_attention_mask([self.batch_size, self.seq_length] )
A_ : List[str] = None
if self.use_token_type_ids:
A_ : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
A_ : Tuple = None
A_ : List[str] = None
A_ : int = None
if self.use_labels:
A_ : List[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
A_ : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
A_ : Any = ids_tensor([self.batch_size] , self.num_choices )
A_ : Dict = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def _a ( self : str ):
"""simple docstring"""
return NystromformerConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=_lowerCamelCase , initializer_range=self.initializer_range , )
def _a ( self : Optional[Any] , _lowerCamelCase : int , _lowerCamelCase : List[str] , _lowerCamelCase : List[Any] , _lowerCamelCase : Optional[Any] , _lowerCamelCase : Optional[Any] , _lowerCamelCase : List[str] , _lowerCamelCase : Dict ):
"""simple docstring"""
A_ : Union[str, Any] = NystromformerModel(config=_lowerCamelCase )
model.to(_lowerCamelCase )
model.eval()
A_ : List[Any] = model(_lowerCamelCase , attention_mask=_lowerCamelCase , token_type_ids=_lowerCamelCase )
A_ : Tuple = model(_lowerCamelCase , token_type_ids=_lowerCamelCase )
A_ : Dict = model(_lowerCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _a ( self : List[Any] , _lowerCamelCase : Dict , _lowerCamelCase : str , _lowerCamelCase : Optional[Any] , _lowerCamelCase : int , _lowerCamelCase : Optional[int] , _lowerCamelCase : List[str] , _lowerCamelCase : Dict ):
"""simple docstring"""
A_ : Optional[int] = NystromformerForMaskedLM(config=_lowerCamelCase )
model.to(_lowerCamelCase )
model.eval()
A_ : Tuple = model(_lowerCamelCase , attention_mask=_lowerCamelCase , token_type_ids=_lowerCamelCase , labels=_lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _a ( self : List[str] , _lowerCamelCase : int , _lowerCamelCase : Any , _lowerCamelCase : int , _lowerCamelCase : Dict , _lowerCamelCase : Optional[int] , _lowerCamelCase : Union[str, Any] , _lowerCamelCase : str ):
"""simple docstring"""
A_ : Optional[Any] = NystromformerForQuestionAnswering(config=_lowerCamelCase )
model.to(_lowerCamelCase )
model.eval()
A_ : List[str] = model(
_lowerCamelCase , attention_mask=_lowerCamelCase , token_type_ids=_lowerCamelCase , start_positions=_lowerCamelCase , end_positions=_lowerCamelCase , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def _a ( self : List[str] , _lowerCamelCase : str , _lowerCamelCase : Optional[Any] , _lowerCamelCase : int , _lowerCamelCase : int , _lowerCamelCase : Optional[Any] , _lowerCamelCase : Tuple , _lowerCamelCase : Tuple ):
"""simple docstring"""
A_ : List[Any] = self.num_labels
A_ : Tuple = NystromformerForSequenceClassification(_lowerCamelCase )
model.to(_lowerCamelCase )
model.eval()
A_ : int = model(_lowerCamelCase , attention_mask=_lowerCamelCase , token_type_ids=_lowerCamelCase , labels=_lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _a ( self : Dict , _lowerCamelCase : Tuple , _lowerCamelCase : int , _lowerCamelCase : Optional[Any] , _lowerCamelCase : str , _lowerCamelCase : Optional[int] , _lowerCamelCase : Optional[Any] , _lowerCamelCase : Optional[int] ):
"""simple docstring"""
A_ : Union[str, Any] = self.num_labels
A_ : int = NystromformerForTokenClassification(config=_lowerCamelCase )
model.to(_lowerCamelCase )
model.eval()
A_ : Optional[int] = model(_lowerCamelCase , attention_mask=_lowerCamelCase , token_type_ids=_lowerCamelCase , labels=_lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def _a ( self : Optional[Any] , _lowerCamelCase : Dict , _lowerCamelCase : Any , _lowerCamelCase : int , _lowerCamelCase : Optional[int] , _lowerCamelCase : int , _lowerCamelCase : List[str] , _lowerCamelCase : Dict ):
"""simple docstring"""
A_ : Dict = self.num_choices
A_ : Optional[int] = NystromformerForMultipleChoice(config=_lowerCamelCase )
model.to(_lowerCamelCase )
model.eval()
A_ : List[Any] = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
A_ : List[str] = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
A_ : Union[str, Any] = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
A_ : List[Any] = model(
_lowerCamelCase , attention_mask=_lowerCamelCase , token_type_ids=_lowerCamelCase , labels=_lowerCamelCase , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def _a ( self : str ):
"""simple docstring"""
A_ : Union[str, Any] = self.prepare_config_and_inputs()
(
(
A_
) ,(
A_
) ,(
A_
) ,(
A_
) ,(
A_
) ,(
A_
) ,(
A_
) ,
) : Tuple = config_and_inputs
A_ : Optional[int] = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_torch
class UpperCamelCase_ (a__, a__, unittest.TestCase ):
"""simple docstring"""
_lowerCAmelCase = (
(
NystromformerModel,
NystromformerForMaskedLM,
NystromformerForMultipleChoice,
NystromformerForQuestionAnswering,
NystromformerForSequenceClassification,
NystromformerForTokenClassification,
)
if is_torch_available()
else ()
)
_lowerCAmelCase = (
{
'feature-extraction': NystromformerModel,
'fill-mask': NystromformerForMaskedLM,
'question-answering': NystromformerForQuestionAnswering,
'text-classification': NystromformerForSequenceClassification,
'token-classification': NystromformerForTokenClassification,
'zero-shot': NystromformerForSequenceClassification,
}
if is_torch_available()
else {}
)
_lowerCAmelCase = False
_lowerCAmelCase = False
def _a ( self : str ):
"""simple docstring"""
A_ : List[Any] = NystromformerModelTester(self )
A_ : int = ConfigTester(self , config_class=_lowerCamelCase , hidden_size=37 )
def _a ( self : Any ):
"""simple docstring"""
self.config_tester.run_common_tests()
def _a ( self : Optional[int] ):
"""simple docstring"""
A_ : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_lowerCamelCase )
def _a ( self : Optional[Any] ):
"""simple docstring"""
A_ : int = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
A_ : Any = type
self.model_tester.create_and_check_model(*_lowerCamelCase )
def _a ( self : List[str] ):
"""simple docstring"""
A_ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*_lowerCamelCase )
def _a ( self : Union[str, Any] ):
"""simple docstring"""
A_ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*_lowerCamelCase )
def _a ( self : Union[str, Any] ):
"""simple docstring"""
A_ : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*_lowerCamelCase )
def _a ( self : List[str] ):
"""simple docstring"""
A_ : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*_lowerCamelCase )
def _a ( self : Union[str, Any] ):
"""simple docstring"""
A_ : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*_lowerCamelCase )
@slow
def _a ( self : Dict ):
"""simple docstring"""
for model_name in NYSTROMFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
A_ : Optional[Any] = NystromformerModel.from_pretrained(_lowerCamelCase )
self.assertIsNotNone(_lowerCamelCase )
@require_torch
class UpperCamelCase_ (unittest.TestCase ):
"""simple docstring"""
@slow
def _a ( self : List[Any] ):
"""simple docstring"""
A_ : List[Any] = NystromformerModel.from_pretrained('''uw-madison/nystromformer-512''' )
A_ : Optional[int] = torch.tensor([[0, 1, 2, 3, 4, 5]] )
with torch.no_grad():
A_ : str = model(_lowerCamelCase )[0]
A_ : int = torch.Size((1, 6, 768) )
self.assertEqual(output.shape , _lowerCamelCase )
A_ : Union[str, Any] = torch.tensor(
[[[-0.45_32, -0.09_36, 0.51_37], [-0.26_76, 0.06_28, 0.61_86], [-0.36_29, -0.17_26, 0.47_16]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , _lowerCamelCase , atol=1E-4 ) )
@slow
def _a ( self : Optional[int] ):
"""simple docstring"""
A_ : List[Any] = '''the [MASK] of Belgium is Brussels'''
A_ : Union[str, Any] = AutoTokenizer.from_pretrained('''uw-madison/nystromformer-512''' )
A_ : Dict = NystromformerForMaskedLM.from_pretrained('''uw-madison/nystromformer-512''' )
A_ : Optional[int] = tokenizer(_lowerCamelCase , return_tensors='''pt''' )
with torch.no_grad():
A_ : Optional[int] = model(encoding.input_ids ).logits
A_ : Optional[int] = token_logits[:, 2, :].argmax(-1 )[0]
self.assertEqual(tokenizer.decode(_lowerCamelCase ) , '''capital''' )
| 4 |
'''simple docstring'''
import heapq
def snake_case__ ( lowerCamelCase__ : dict ) -> set[int]:
A_ : list[list] = []
# for each node and his adjacency list add them and the rank of the node to queue
# using heapq module the queue will be filled like a Priority Queue
# heapq works with a min priority queue, so I used -1*len(v) to build it
for key, value in graph.items():
# O(log(n))
heapq.heappush(lowerCamelCase__ , [-1 * len(lowerCamelCase__ ), (key, value)] )
# chosen_vertices = set of chosen vertices
A_ : str = set()
# while queue isn't empty and there are still edges
# (queue[0][0] is the rank of the node with max rank)
while queue and queue[0][0] != 0:
# extract vertex with max rank from queue and add it to chosen_vertices
A_ : Tuple = heapq.heappop(lowerCamelCase__ )[1][0]
chosen_vertices.add(lowerCamelCase__ )
# Remove all arcs adjacent to argmax
for elem in queue:
# if v haven't adjacent node, skip
if elem[0] == 0:
continue
# if argmax is reachable from elem
# remove argmax from elem's adjacent list and update his rank
if argmax in elem[1][1]:
A_ : List[str] = elem[1][1].index(lowerCamelCase__ )
del elem[1][1][index]
elem[0] += 1
# re-order the queue
heapq.heapify(lowerCamelCase__ )
return chosen_vertices
if __name__ == "__main__":
import doctest
doctest.testmod()
snake_case__ = {0: [1, 3], 1: [0, 3], 2: [0, 3, 4], 3: [0, 1, 2], 4: [2, 3]}
print(F'Minimum vertex cover:\n{greedy_min_vertex_cover(graph)}')
| 4 | 1 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
snake_case__ = logging.get_logger(__name__)
snake_case__ = {
"""microsoft/resnet-50""": """https://huggingface.co/microsoft/resnet-50/blob/main/config.json""",
}
class UpperCamelCase_ (a__, a__ ):
"""simple docstring"""
_lowerCAmelCase = 'resnet'
_lowerCAmelCase = ['basic', 'bottleneck']
def __init__( self : Union[str, Any] , _lowerCamelCase : List[str]=3 , _lowerCamelCase : str=64 , _lowerCamelCase : str=[256, 512, 1024, 2048] , _lowerCamelCase : Optional[Any]=[3, 4, 6, 3] , _lowerCamelCase : Any="bottleneck" , _lowerCamelCase : List[Any]="relu" , _lowerCamelCase : List[Any]=False , _lowerCamelCase : str=None , _lowerCamelCase : List[str]=None , **_lowerCamelCase : Tuple , ):
"""simple docstring"""
super().__init__(**_lowerCamelCase )
if layer_type not in self.layer_types:
raise ValueError(f'layer_type={layer_type} is not one of {",".join(self.layer_types )}' )
A_ : int = num_channels
A_ : Dict = embedding_size
A_ : Union[str, Any] = hidden_sizes
A_ : Dict = depths
A_ : str = layer_type
A_ : int = hidden_act
A_ : str = downsample_in_first_stage
A_ : Dict = ['''stem'''] + [f'stage{idx}' for idx in range(1 , len(_lowerCamelCase ) + 1 )]
A_ ,A_ : Any = get_aligned_output_features_output_indices(
out_features=_lowerCamelCase , out_indices=_lowerCamelCase , stage_names=self.stage_names )
class UpperCamelCase_ (a__ ):
"""simple docstring"""
_lowerCAmelCase = version.parse('1.11' )
@property
def _a ( self : int ):
"""simple docstring"""
return OrderedDict(
[
('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}),
] )
@property
def _a ( self : Optional[int] ):
"""simple docstring"""
return 1E-3
| 4 |
'''simple docstring'''
import argparse
from collections import OrderedDict
from pathlib import Path
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from torchvision.transforms import functional as F
from transformers import DetrImageProcessor, TableTransformerConfig, TableTransformerForObjectDetection
from transformers.utils import logging
logging.set_verbosity_info()
snake_case__ = logging.get_logger(__name__)
# here we list all keys to be renamed (original name on the left, our name on the right)
snake_case__ = []
for i in range(6):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append(
(F'transformer.encoder.layers.{i}.self_attn.out_proj.weight', F'encoder.layers.{i}.self_attn.out_proj.weight')
)
rename_keys.append(
(F'transformer.encoder.layers.{i}.self_attn.out_proj.bias', F'encoder.layers.{i}.self_attn.out_proj.bias')
)
rename_keys.append((F'transformer.encoder.layers.{i}.linear1.weight', F'encoder.layers.{i}.fc1.weight'))
rename_keys.append((F'transformer.encoder.layers.{i}.linear1.bias', F'encoder.layers.{i}.fc1.bias'))
rename_keys.append((F'transformer.encoder.layers.{i}.linear2.weight', F'encoder.layers.{i}.fc2.weight'))
rename_keys.append((F'transformer.encoder.layers.{i}.linear2.bias', F'encoder.layers.{i}.fc2.bias'))
rename_keys.append(
(F'transformer.encoder.layers.{i}.norm1.weight', F'encoder.layers.{i}.self_attn_layer_norm.weight')
)
rename_keys.append((F'transformer.encoder.layers.{i}.norm1.bias', F'encoder.layers.{i}.self_attn_layer_norm.bias'))
rename_keys.append((F'transformer.encoder.layers.{i}.norm2.weight', F'encoder.layers.{i}.final_layer_norm.weight'))
rename_keys.append((F'transformer.encoder.layers.{i}.norm2.bias', F'encoder.layers.{i}.final_layer_norm.bias'))
# decoder layers: 2 times output projection, 2 feedforward neural networks and 3 layernorms
rename_keys.append(
(F'transformer.decoder.layers.{i}.self_attn.out_proj.weight', F'decoder.layers.{i}.self_attn.out_proj.weight')
)
rename_keys.append(
(F'transformer.decoder.layers.{i}.self_attn.out_proj.bias', F'decoder.layers.{i}.self_attn.out_proj.bias')
)
rename_keys.append(
(
F'transformer.decoder.layers.{i}.multihead_attn.out_proj.weight',
F'decoder.layers.{i}.encoder_attn.out_proj.weight',
)
)
rename_keys.append(
(
F'transformer.decoder.layers.{i}.multihead_attn.out_proj.bias',
F'decoder.layers.{i}.encoder_attn.out_proj.bias',
)
)
rename_keys.append((F'transformer.decoder.layers.{i}.linear1.weight', F'decoder.layers.{i}.fc1.weight'))
rename_keys.append((F'transformer.decoder.layers.{i}.linear1.bias', F'decoder.layers.{i}.fc1.bias'))
rename_keys.append((F'transformer.decoder.layers.{i}.linear2.weight', F'decoder.layers.{i}.fc2.weight'))
rename_keys.append((F'transformer.decoder.layers.{i}.linear2.bias', F'decoder.layers.{i}.fc2.bias'))
rename_keys.append(
(F'transformer.decoder.layers.{i}.norm1.weight', F'decoder.layers.{i}.self_attn_layer_norm.weight')
)
rename_keys.append((F'transformer.decoder.layers.{i}.norm1.bias', F'decoder.layers.{i}.self_attn_layer_norm.bias'))
rename_keys.append(
(F'transformer.decoder.layers.{i}.norm2.weight', F'decoder.layers.{i}.encoder_attn_layer_norm.weight')
)
rename_keys.append(
(F'transformer.decoder.layers.{i}.norm2.bias', F'decoder.layers.{i}.encoder_attn_layer_norm.bias')
)
rename_keys.append((F'transformer.decoder.layers.{i}.norm3.weight', F'decoder.layers.{i}.final_layer_norm.weight'))
rename_keys.append((F'transformer.decoder.layers.{i}.norm3.bias', F'decoder.layers.{i}.final_layer_norm.bias'))
# convolutional projection + query embeddings + layernorm of encoder + layernorm of decoder + class and bounding box heads
rename_keys.extend(
[
("""input_proj.weight""", """input_projection.weight"""),
("""input_proj.bias""", """input_projection.bias"""),
("""query_embed.weight""", """query_position_embeddings.weight"""),
("""transformer.encoder.norm.weight""", """encoder.layernorm.weight"""),
("""transformer.encoder.norm.bias""", """encoder.layernorm.bias"""),
("""transformer.decoder.norm.weight""", """decoder.layernorm.weight"""),
("""transformer.decoder.norm.bias""", """decoder.layernorm.bias"""),
("""class_embed.weight""", """class_labels_classifier.weight"""),
("""class_embed.bias""", """class_labels_classifier.bias"""),
("""bbox_embed.layers.0.weight""", """bbox_predictor.layers.0.weight"""),
("""bbox_embed.layers.0.bias""", """bbox_predictor.layers.0.bias"""),
("""bbox_embed.layers.1.weight""", """bbox_predictor.layers.1.weight"""),
("""bbox_embed.layers.1.bias""", """bbox_predictor.layers.1.bias"""),
("""bbox_embed.layers.2.weight""", """bbox_predictor.layers.2.weight"""),
("""bbox_embed.layers.2.bias""", """bbox_predictor.layers.2.bias"""),
]
)
def snake_case__ ( lowerCamelCase__ : List[Any] , lowerCamelCase__ : List[Any] , lowerCamelCase__ : List[Any] ) -> Optional[Any]:
A_ : Tuple = state_dict.pop(lowerCamelCase__ )
A_ : Optional[Any] = val
def snake_case__ ( lowerCamelCase__ : Dict ) -> Any:
A_ : int = OrderedDict()
for key, value in state_dict.items():
if "backbone.0.body" in key:
A_ : int = key.replace('''backbone.0.body''' , '''backbone.conv_encoder.model''' )
A_ : List[str] = value
else:
A_ : Optional[int] = value
return new_state_dict
def snake_case__ ( lowerCamelCase__ : Union[str, Any] ) -> Optional[Any]:
A_ : Any = ''''''
# first: transformer encoder
for i in range(6 ):
# read in weights + bias of input projection layer (in PyTorch's MultiHeadAttention, this is a single matrix + bias)
A_ : Tuple = state_dict.pop(f'{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_weight' )
A_ : Dict = state_dict.pop(f'{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_bias' )
# next, add query, keys and values (in that order) to the state dict
A_ : str = in_proj_weight[:2_5_6, :]
A_ : Optional[Any] = in_proj_bias[:2_5_6]
A_ : Dict = in_proj_weight[2_5_6:5_1_2, :]
A_ : Tuple = in_proj_bias[2_5_6:5_1_2]
A_ : Tuple = in_proj_weight[-2_5_6:, :]
A_ : Optional[int] = in_proj_bias[-2_5_6:]
# next: transformer decoder (which is a bit more complex because it also includes cross-attention)
for i in range(6 ):
# read in weights + bias of input projection layer of self-attention
A_ : Union[str, Any] = state_dict.pop(f'{prefix}transformer.decoder.layers.{i}.self_attn.in_proj_weight' )
A_ : Dict = state_dict.pop(f'{prefix}transformer.decoder.layers.{i}.self_attn.in_proj_bias' )
# next, add query, keys and values (in that order) to the state dict
A_ : List[str] = in_proj_weight[:2_5_6, :]
A_ : int = in_proj_bias[:2_5_6]
A_ : Any = in_proj_weight[2_5_6:5_1_2, :]
A_ : List[str] = in_proj_bias[2_5_6:5_1_2]
A_ : Union[str, Any] = in_proj_weight[-2_5_6:, :]
A_ : Optional[Any] = in_proj_bias[-2_5_6:]
# read in weights + bias of input projection layer of cross-attention
A_ : Tuple = state_dict.pop(
f'{prefix}transformer.decoder.layers.{i}.multihead_attn.in_proj_weight' )
A_ : Optional[Any] = state_dict.pop(f'{prefix}transformer.decoder.layers.{i}.multihead_attn.in_proj_bias' )
# next, add query, keys and values (in that order) of cross-attention to the state dict
A_ : Dict = in_proj_weight_cross_attn[:2_5_6, :]
A_ : Tuple = in_proj_bias_cross_attn[:2_5_6]
A_ : int = in_proj_weight_cross_attn[2_5_6:5_1_2, :]
A_ : List[str] = in_proj_bias_cross_attn[2_5_6:5_1_2]
A_ : Any = in_proj_weight_cross_attn[-2_5_6:, :]
A_ : Any = in_proj_bias_cross_attn[-2_5_6:]
def snake_case__ ( lowerCamelCase__ : List[str] , lowerCamelCase__ : Tuple ) -> Dict:
A_ ,A_ : int = image.size
A_ : Tuple = max(lowerCamelCase__ , lowerCamelCase__ )
A_ : Optional[Any] = 8_0_0 if '''detection''' in checkpoint_url else 1_0_0_0
A_ : Union[str, Any] = target_max_size / current_max_size
A_ : Any = image.resize((int(round(scale * width ) ), int(round(scale * height ) )) )
return resized_image
def snake_case__ ( lowerCamelCase__ : Tuple ) -> str:
A_ : Any = F.to_tensor(lowerCamelCase__ )
A_ : Optional[Any] = F.normalize(lowerCamelCase__ , mean=[0.485, 0.456, 0.406] , std=[0.229, 0.224, 0.225] )
return image
@torch.no_grad()
def snake_case__ ( lowerCamelCase__ : List[Any] , lowerCamelCase__ : int , lowerCamelCase__ : int ) -> str:
logger.info('''Converting model...''' )
# load original state dict
A_ : Tuple = torch.hub.load_state_dict_from_url(lowerCamelCase__ , map_location='''cpu''' )
# rename keys
for src, dest in rename_keys:
rename_key(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
A_ : str = rename_backbone_keys(lowerCamelCase__ )
# query, key and value matrices need special treatment
read_in_q_k_v(lowerCamelCase__ )
# important: we need to prepend a prefix to each of the base model keys as the head models use different attributes for them
A_ : List[Any] = '''model.'''
for key in state_dict.copy().keys():
if not key.startswith('''class_labels_classifier''' ) and not key.startswith('''bbox_predictor''' ):
A_ : List[Any] = state_dict.pop(lowerCamelCase__ )
A_ : str = val
# create HuggingFace model and load state dict
A_ : Union[str, Any] = TableTransformerConfig(
backbone='''resnet18''' , mask_loss_coefficient=1 , dice_loss_coefficient=1 , ce_loss_coefficient=1 , bbox_loss_coefficient=5 , giou_loss_coefficient=2 , eos_coefficient=0.4 , class_cost=1 , bbox_cost=5 , giou_cost=2 , )
if "detection" in checkpoint_url:
A_ : Dict = 1_5
A_ : Dict = 2
A_ : int = {0: '''table''', 1: '''table rotated'''}
A_ : List[str] = idalabel
A_ : Optional[int] = {v: k for k, v in idalabel.items()}
else:
A_ : Union[str, Any] = 1_2_5
A_ : Optional[Any] = 6
A_ : Optional[Any] = {
0: '''table''',
1: '''table column''',
2: '''table row''',
3: '''table column header''',
4: '''table projected row header''',
5: '''table spanning cell''',
}
A_ : int = idalabel
A_ : Tuple = {v: k for k, v in idalabel.items()}
A_ : Optional[Any] = DetrImageProcessor(
format='''coco_detection''' , max_size=8_0_0 if '''detection''' in checkpoint_url else 1_0_0_0 )
A_ : int = TableTransformerForObjectDetection(lowerCamelCase__ )
model.load_state_dict(lowerCamelCase__ )
model.eval()
# verify our conversion
A_ : Optional[int] = '''example_pdf.png''' if '''detection''' in checkpoint_url else '''example_table.png'''
A_ : Union[str, Any] = hf_hub_download(repo_id='''nielsr/example-pdf''' , repo_type='''dataset''' , filename=lowerCamelCase__ )
A_ : Tuple = Image.open(lowerCamelCase__ ).convert('''RGB''' )
A_ : int = normalize(resize(lowerCamelCase__ , lowerCamelCase__ ) ).unsqueeze(0 )
A_ : str = model(lowerCamelCase__ )
if "detection" in checkpoint_url:
A_ : str = (1, 1_5, 3)
A_ : int = torch.tensor(
[[-6.7897, -16.9985, 6.7937], [-8.0186, -22.2192, 6.9677], [-7.3117, -21.0708, 7.4055]] )
A_ : Tuple = torch.tensor([[0.4867, 0.1767, 0.6732], [0.6718, 0.4479, 0.3830], [0.4716, 0.1760, 0.6364]] )
else:
A_ : Optional[int] = (1, 1_2_5, 7)
A_ : Dict = torch.tensor(
[[-18.1430, -8.3214, 4.8274], [-18.4685, -7.1361, -4.2667], [-26.3693, -9.3429, -4.9962]] )
A_ : Any = torch.tensor([[0.4983, 0.5595, 0.9440], [0.4916, 0.6315, 0.5954], [0.6108, 0.8637, 0.1135]] )
assert outputs.logits.shape == expected_shape
assert torch.allclose(outputs.logits[0, :3, :3] , lowerCamelCase__ , atol=1e-4 )
assert torch.allclose(outputs.pred_boxes[0, :3, :3] , lowerCamelCase__ , atol=1e-4 )
print('''Looks ok!''' )
if pytorch_dump_folder_path is not None:
# Save model and image processor
logger.info(f'Saving PyTorch model and image processor to {pytorch_dump_folder_path}...' )
Path(lowerCamelCase__ ).mkdir(exist_ok=lowerCamelCase__ )
model.save_pretrained(lowerCamelCase__ )
image_processor.save_pretrained(lowerCamelCase__ )
if push_to_hub:
# Push model to HF hub
logger.info('''Pushing model to the hub...''' )
A_ : List[Any] = (
'''microsoft/table-transformer-detection'''
if '''detection''' in checkpoint_url
else '''microsoft/table-transformer-structure-recognition'''
)
model.push_to_hub(lowerCamelCase__ )
image_processor.push_to_hub(lowerCamelCase__ )
if __name__ == "__main__":
snake_case__ = argparse.ArgumentParser()
parser.add_argument(
"""--checkpoint_url""",
default="""https://pubtables1m.blob.core.windows.net/model/pubtables1m_detection_detr_r18.pth""",
type=str,
choices=[
"""https://pubtables1m.blob.core.windows.net/model/pubtables1m_detection_detr_r18.pth""",
"""https://pubtables1m.blob.core.windows.net/model/pubtables1m_structure_detr_r18.pth""",
],
help="""URL of the Table Transformer checkpoint you'd like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the folder to output PyTorch model."""
)
parser.add_argument(
"""--push_to_hub""", action="""store_true""", help="""Whether or not to push the converted model to the 🤗 hub."""
)
snake_case__ = parser.parse_args()
convert_table_transformer_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub)
| 4 | 1 |
'''simple docstring'''
import inspect
import unittest
import numpy as np
from transformers import BeitConfig
from transformers.testing_utils import require_flax, require_vision, slow
from transformers.utils import cached_property, is_flax_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor, ids_tensor
if is_flax_available():
import jax
from transformers import FlaxBeitForImageClassification, FlaxBeitForMaskedImageModeling, FlaxBeitModel
if is_vision_available():
from PIL import Image
from transformers import BeitImageProcessor
class UpperCamelCase_ (unittest.TestCase ):
"""simple docstring"""
def __init__( self : Tuple , _lowerCamelCase : List[Any] , _lowerCamelCase : List[Any]=100 , _lowerCamelCase : Tuple=13 , _lowerCamelCase : List[str]=30 , _lowerCamelCase : Dict=2 , _lowerCamelCase : str=3 , _lowerCamelCase : List[Any]=True , _lowerCamelCase : Any=True , _lowerCamelCase : List[str]=32 , _lowerCamelCase : Optional[int]=5 , _lowerCamelCase : Any=4 , _lowerCamelCase : Dict=37 , _lowerCamelCase : Any="gelu" , _lowerCamelCase : Optional[Any]=0.1 , _lowerCamelCase : Any=0.1 , _lowerCamelCase : Any=10 , _lowerCamelCase : Optional[int]=0.02 , _lowerCamelCase : Optional[Any]=3 , ):
"""simple docstring"""
A_ : Any = parent
A_ : Dict = vocab_size
A_ : Tuple = batch_size
A_ : Dict = image_size
A_ : Tuple = patch_size
A_ : Any = num_channels
A_ : Dict = is_training
A_ : Dict = use_labels
A_ : List[Any] = hidden_size
A_ : Optional[Any] = num_hidden_layers
A_ : List[str] = num_attention_heads
A_ : Dict = intermediate_size
A_ : Optional[int] = hidden_act
A_ : Any = hidden_dropout_prob
A_ : str = attention_probs_dropout_prob
A_ : Optional[Any] = type_sequence_label_size
A_ : Any = initializer_range
# in BeiT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
A_ : Tuple = (image_size // patch_size) ** 2
A_ : str = num_patches + 1
def _a ( self : Optional[Any] ):
"""simple docstring"""
A_ : List[str] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
A_ : Tuple = None
if self.use_labels:
A_ : Any = ids_tensor([self.batch_size] , self.type_sequence_label_size )
A_ : List[str] = BeitConfig(
vocab_size=self.vocab_size , image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=_lowerCamelCase , initializer_range=self.initializer_range , )
return config, pixel_values, labels
def _a ( self : Union[str, Any] , _lowerCamelCase : List[str] , _lowerCamelCase : Union[str, Any] , _lowerCamelCase : Tuple ):
"""simple docstring"""
A_ : Any = FlaxBeitModel(config=_lowerCamelCase )
A_ : Union[str, Any] = model(_lowerCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _a ( self : Optional[int] , _lowerCamelCase : Optional[int] , _lowerCamelCase : Optional[Any] , _lowerCamelCase : List[Any] ):
"""simple docstring"""
A_ : Union[str, Any] = FlaxBeitForMaskedImageModeling(config=_lowerCamelCase )
A_ : List[str] = model(_lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length - 1, self.vocab_size) )
def _a ( self : str , _lowerCamelCase : Tuple , _lowerCamelCase : Any , _lowerCamelCase : Union[str, Any] ):
"""simple docstring"""
A_ : Optional[int] = self.type_sequence_label_size
A_ : Optional[int] = FlaxBeitForImageClassification(config=_lowerCamelCase )
A_ : int = model(_lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
A_ : str = 1
A_ : Tuple = FlaxBeitForImageClassification(_lowerCamelCase )
A_ : Optional[int] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
A_ : Tuple = model(_lowerCamelCase )
def _a ( self : Any ):
"""simple docstring"""
A_ : Dict = self.prepare_config_and_inputs()
(
(
A_
) ,(
A_
) ,(
A_
) ,
) : Dict = config_and_inputs
A_ : Dict = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_flax
class UpperCamelCase_ (a__, unittest.TestCase ):
"""simple docstring"""
_lowerCAmelCase = (
(FlaxBeitModel, FlaxBeitForImageClassification, FlaxBeitForMaskedImageModeling) if is_flax_available() else ()
)
def _a ( self : Optional[Any] ):
"""simple docstring"""
A_ : List[str] = FlaxBeitModelTester(self )
A_ : Optional[Any] = ConfigTester(self , config_class=_lowerCamelCase , has_text_modality=_lowerCamelCase , hidden_size=37 )
def _a ( self : str ):
"""simple docstring"""
self.config_tester.run_common_tests()
def _a ( self : Any ):
"""simple docstring"""
A_ ,A_ : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A_ : Tuple = model_class(_lowerCamelCase )
A_ : Tuple = inspect.signature(model.__call__ )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
A_ : List[str] = [*signature.parameters.keys()]
A_ : str = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , _lowerCamelCase )
def _a ( self : Optional[int] ):
"""simple docstring"""
A_ ,A_ : int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
A_ : int = self._prepare_for_class(_lowerCamelCase , _lowerCamelCase )
A_ : Optional[Any] = model_class(_lowerCamelCase )
@jax.jit
def model_jitted(_lowerCamelCase : Optional[Any] , **_lowerCamelCase : int ):
return model(pixel_values=_lowerCamelCase , **_lowerCamelCase )
with self.subTest('''JIT Enabled''' ):
A_ : List[Any] = model_jitted(**_lowerCamelCase ).to_tuple()
with self.subTest('''JIT Disabled''' ):
with jax.disable_jit():
A_ : int = model_jitted(**_lowerCamelCase ).to_tuple()
self.assertEqual(len(_lowerCamelCase ) , len(_lowerCamelCase ) )
for jitted_output, output in zip(_lowerCamelCase , _lowerCamelCase ):
self.assertEqual(jitted_output.shape , output.shape )
def _a ( self : Union[str, Any] ):
"""simple docstring"""
A_ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_lowerCamelCase )
def _a ( self : int ):
"""simple docstring"""
A_ : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*_lowerCamelCase )
def _a ( self : List[Any] ):
"""simple docstring"""
A_ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_lowerCamelCase )
@slow
def _a ( self : int ):
"""simple docstring"""
for model_class_name in self.all_model_classes:
A_ : Tuple = model_class_name.from_pretrained('''microsoft/beit-base-patch16-224''' )
A_ : Union[str, Any] = model(np.ones((1, 3, 224, 224) ) )
self.assertIsNotNone(_lowerCamelCase )
def snake_case__ ( ) -> str:
A_ : Tuple = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_vision
@require_flax
class UpperCamelCase_ (unittest.TestCase ):
"""simple docstring"""
@cached_property
def _a ( self : Any ):
"""simple docstring"""
return BeitImageProcessor.from_pretrained('''microsoft/beit-base-patch16-224''' ) if is_vision_available() else None
@slow
def _a ( self : str ):
"""simple docstring"""
A_ : str = FlaxBeitForMaskedImageModeling.from_pretrained('''microsoft/beit-base-patch16-224-pt22k''' )
A_ : str = self.default_image_processor
A_ : str = prepare_img()
A_ : Any = image_processor(images=_lowerCamelCase , return_tensors='''np''' ).pixel_values
# prepare bool_masked_pos
A_ : Any = np.ones((1, 196) , dtype=_lowerCamelCase )
# forward pass
A_ : int = model(pixel_values=_lowerCamelCase , bool_masked_pos=_lowerCamelCase )
A_ : Dict = outputs.logits
# verify the logits
A_ : List[str] = (1, 196, 8192)
self.assertEqual(logits.shape , _lowerCamelCase )
A_ : List[Any] = np.array(
[[-3.24_37, 0.50_72, -13.91_74], [-3.24_56, 0.49_48, -13.94_01], [-3.20_33, 0.51_21, -13.85_50]] )
self.assertTrue(np.allclose(logits[bool_masked_pos][:3, :3] , _lowerCamelCase , atol=1E-2 ) )
@slow
def _a ( self : Tuple ):
"""simple docstring"""
A_ : List[str] = FlaxBeitForImageClassification.from_pretrained('''microsoft/beit-base-patch16-224''' )
A_ : List[str] = self.default_image_processor
A_ : List[str] = prepare_img()
A_ : List[Any] = image_processor(images=_lowerCamelCase , return_tensors='''np''' )
# forward pass
A_ : Union[str, Any] = model(**_lowerCamelCase )
A_ : Any = outputs.logits
# verify the logits
A_ : int = (1, 1000)
self.assertEqual(logits.shape , _lowerCamelCase )
A_ : List[str] = np.array([-1.23_85, -1.09_87, -1.01_08] )
self.assertTrue(np.allclose(logits[0, :3] , _lowerCamelCase , atol=1E-4 ) )
A_ : Tuple = 281
self.assertEqual(logits.argmax(-1 ).item() , _lowerCamelCase )
@slow
def _a ( self : Tuple ):
"""simple docstring"""
A_ : Any = FlaxBeitForImageClassification.from_pretrained('''microsoft/beit-large-patch16-224-pt22k-ft22k''' )
A_ : int = self.default_image_processor
A_ : Optional[int] = prepare_img()
A_ : Union[str, Any] = image_processor(images=_lowerCamelCase , return_tensors='''np''' )
# forward pass
A_ : Tuple = model(**_lowerCamelCase )
A_ : Optional[int] = outputs.logits
# verify the logits
A_ : List[Any] = (1, 21841)
self.assertEqual(logits.shape , _lowerCamelCase )
A_ : int = np.array([1.68_81, -0.27_87, 0.59_01] )
self.assertTrue(np.allclose(logits[0, :3] , _lowerCamelCase , atol=1E-4 ) )
A_ : int = 2396
self.assertEqual(logits.argmax(-1 ).item() , _lowerCamelCase )
| 4 |
'''simple docstring'''
import logging
import os
from dataclasses import dataclass
from typing import List, Optional, Union
import tqdm
from filelock import FileLock
from transformers import (
BartTokenizer,
BartTokenizerFast,
DataProcessor,
PreTrainedTokenizer,
RobertaTokenizer,
RobertaTokenizerFast,
XLMRobertaTokenizer,
is_tf_available,
is_torch_available,
)
snake_case__ = logging.getLogger(__name__)
@dataclass(frozen=a__ )
class UpperCamelCase_ :
"""simple docstring"""
_lowerCAmelCase = 42
_lowerCAmelCase = 42
_lowerCAmelCase = None
_lowerCAmelCase = None
_lowerCAmelCase = None
@dataclass(frozen=a__ )
class UpperCamelCase_ :
"""simple docstring"""
_lowerCAmelCase = 42
_lowerCAmelCase = None
_lowerCAmelCase = None
_lowerCAmelCase = None
_lowerCAmelCase = None
if is_torch_available():
import torch
from torch.utils.data import Dataset
class UpperCamelCase_ (a__ ):
"""simple docstring"""
_lowerCAmelCase = 42
def __init__( self : Optional[int] , _lowerCamelCase : str , _lowerCamelCase : PreTrainedTokenizer , _lowerCamelCase : str , _lowerCamelCase : Optional[int] = None , _lowerCamelCase : List[Any]=False , _lowerCamelCase : bool = False , ):
"""simple docstring"""
A_ : Optional[int] = hans_processors[task]()
A_ : int = os.path.join(
_lowerCamelCase , '''cached_{}_{}_{}_{}'''.format(
'''dev''' if evaluate else '''train''' , tokenizer.__class__.__name__ , str(_lowerCamelCase ) , _lowerCamelCase , ) , )
A_ : Dict = processor.get_labels()
if tokenizer.__class__ in (
RobertaTokenizer,
RobertaTokenizerFast,
XLMRobertaTokenizer,
BartTokenizer,
BartTokenizerFast,
):
# HACK(label indices are swapped in RoBERTa pretrained model)
A_ ,A_ : List[str] = label_list[2], label_list[1]
A_ : Optional[int] = label_list
# Make sure only the first process in distributed training processes the dataset,
# and the others will use the cache.
A_ : str = cached_features_file + '''.lock'''
with FileLock(_lowerCamelCase ):
if os.path.exists(_lowerCamelCase ) and not overwrite_cache:
logger.info(f'Loading features from cached file {cached_features_file}' )
A_ : List[str] = torch.load(_lowerCamelCase )
else:
logger.info(f'Creating features from dataset file at {data_dir}' )
A_ : Optional[int] = (
processor.get_dev_examples(_lowerCamelCase ) if evaluate else processor.get_train_examples(_lowerCamelCase )
)
logger.info('''Training examples: %s''' , len(_lowerCamelCase ) )
A_ : Optional[int] = hans_convert_examples_to_features(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
logger.info('''Saving features into cached file %s''' , _lowerCamelCase )
torch.save(self.features , _lowerCamelCase )
def __len__( self : List[str] ):
"""simple docstring"""
return len(self.features )
def __getitem__( self : List[str] , _lowerCamelCase : Optional[int] ):
"""simple docstring"""
return self.features[i]
def _a ( self : str ):
"""simple docstring"""
return self.label_list
if is_tf_available():
import tensorflow as tf
class UpperCamelCase_ :
"""simple docstring"""
_lowerCAmelCase = 42
def __init__( self : Optional[int] , _lowerCamelCase : str , _lowerCamelCase : PreTrainedTokenizer , _lowerCamelCase : str , _lowerCamelCase : Optional[int] = 128 , _lowerCamelCase : Dict=False , _lowerCamelCase : bool = False , ):
"""simple docstring"""
A_ : Optional[int] = hans_processors[task]()
A_ : Optional[int] = processor.get_labels()
if tokenizer.__class__ in (
RobertaTokenizer,
RobertaTokenizerFast,
XLMRobertaTokenizer,
BartTokenizer,
BartTokenizerFast,
):
# HACK(label indices are swapped in RoBERTa pretrained model)
A_ ,A_ : Union[str, Any] = label_list[2], label_list[1]
A_ : Tuple = label_list
A_ : Optional[int] = processor.get_dev_examples(_lowerCamelCase ) if evaluate else processor.get_train_examples(_lowerCamelCase )
A_ : Tuple = hans_convert_examples_to_features(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
def gen():
for ex_index, ex in tqdm.tqdm(enumerate(self.features ) , desc='''convert examples to features''' ):
if ex_index % 10000 == 0:
logger.info('''Writing example %d of %d''' % (ex_index, len(_lowerCamelCase )) )
yield (
{
"example_id": 0,
"input_ids": ex.input_ids,
"attention_mask": ex.attention_mask,
"token_type_ids": ex.token_type_ids,
},
ex.label,
)
A_ : List[Any] = tf.data.Dataset.from_generator(
_lowerCamelCase , (
{
'''example_id''': tf.intaa,
'''input_ids''': tf.intaa,
'''attention_mask''': tf.intaa,
'''token_type_ids''': tf.intaa,
},
tf.intaa,
) , (
{
'''example_id''': tf.TensorShape([] ),
'''input_ids''': tf.TensorShape([None, None] ),
'''attention_mask''': tf.TensorShape([None, None] ),
'''token_type_ids''': tf.TensorShape([None, None] ),
},
tf.TensorShape([] ),
) , )
def _a ( self : Any ):
"""simple docstring"""
return self.dataset
def __len__( self : Dict ):
"""simple docstring"""
return len(self.features )
def __getitem__( self : Optional[int] , _lowerCamelCase : List[str] ):
"""simple docstring"""
return self.features[i]
def _a ( self : Tuple ):
"""simple docstring"""
return self.label_list
class UpperCamelCase_ (a__ ):
"""simple docstring"""
def _a ( self : List[str] , _lowerCamelCase : Union[str, Any] ):
"""simple docstring"""
return self._create_examples(self._read_tsv(os.path.join(_lowerCamelCase , '''heuristics_train_set.txt''' ) ) , '''train''' )
def _a ( self : List[str] , _lowerCamelCase : Tuple ):
"""simple docstring"""
return self._create_examples(self._read_tsv(os.path.join(_lowerCamelCase , '''heuristics_evaluation_set.txt''' ) ) , '''dev''' )
def _a ( self : Any ):
"""simple docstring"""
return ["contradiction", "entailment", "neutral"]
def _a ( self : Optional[Any] , _lowerCamelCase : Optional[Any] , _lowerCamelCase : Any ):
"""simple docstring"""
A_ : Tuple = []
for i, line in enumerate(_lowerCamelCase ):
if i == 0:
continue
A_ : str = '''%s-%s''' % (set_type, line[0])
A_ : Optional[Any] = line[5]
A_ : Union[str, Any] = line[6]
A_ : List[str] = line[7][2:] if line[7].startswith('''ex''' ) else line[7]
A_ : str = line[0]
examples.append(InputExample(guid=_lowerCamelCase , text_a=_lowerCamelCase , text_b=_lowerCamelCase , label=_lowerCamelCase , pairID=_lowerCamelCase ) )
return examples
def snake_case__ ( lowerCamelCase__ : List[InputExample] , lowerCamelCase__ : List[str] , lowerCamelCase__ : int , lowerCamelCase__ : PreTrainedTokenizer , ) -> int:
A_ : Union[str, Any] = {label: i for i, label in enumerate(lowerCamelCase__ )}
A_ : Optional[Any] = []
for ex_index, example in tqdm.tqdm(enumerate(lowerCamelCase__ ) , desc='''convert examples to features''' ):
if ex_index % 1_0_0_0_0 == 0:
logger.info('''Writing example %d''' % (ex_index) )
A_ : Optional[int] = tokenizer(
example.text_a , example.text_b , add_special_tokens=lowerCamelCase__ , max_length=lowerCamelCase__ , padding='''max_length''' , truncation=lowerCamelCase__ , return_overflowing_tokens=lowerCamelCase__ , )
A_ : List[str] = label_map[example.label] if example.label in label_map else 0
A_ : Tuple = int(example.pairID )
features.append(InputFeatures(**lowerCamelCase__ , label=lowerCamelCase__ , pairID=lowerCamelCase__ ) )
for i, example in enumerate(examples[:5] ):
logger.info('''*** Example ***''' )
logger.info(f'guid: {example}' )
logger.info(f'features: {features[i]}' )
return features
snake_case__ = {
"""hans""": 3,
}
snake_case__ = {
"""hans""": HansProcessor,
}
| 4 | 1 |
'''simple docstring'''
import os
import unittest
from transformers import BertTokenizerFast
from transformers.models.bert.tokenization_bert import (
VOCAB_FILES_NAMES,
BasicTokenizer,
BertTokenizer,
WordpieceTokenizer,
_is_control,
_is_punctuation,
_is_whitespace,
)
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin, filter_non_english
@require_tokenizers
class UpperCamelCase_ (a__, unittest.TestCase ):
"""simple docstring"""
_lowerCAmelCase = BertTokenizer
_lowerCAmelCase = BertTokenizerFast
_lowerCAmelCase = True
_lowerCAmelCase = True
_lowerCAmelCase = filter_non_english
def _a ( self : List[str] ):
"""simple docstring"""
super().setUp()
A_ : Union[str, Any] = [
'''[UNK]''',
'''[CLS]''',
'''[SEP]''',
'''[PAD]''',
'''[MASK]''',
'''want''',
'''##want''',
'''##ed''',
'''wa''',
'''un''',
'''runn''',
'''##ing''',
''',''',
'''low''',
'''lowest''',
]
A_ : List[str] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) )
def _a ( self : Optional[Any] , _lowerCamelCase : Optional[int] ):
"""simple docstring"""
A_ : Union[str, Any] = '''UNwant\u00E9d,running'''
A_ : Any = '''unwanted, running'''
return input_text, output_text
def _a ( self : List[str] ):
"""simple docstring"""
A_ : List[Any] = self.tokenizer_class(self.vocab_file )
A_ : Union[str, Any] = tokenizer.tokenize('''UNwant\u00E9d,running''' )
self.assertListEqual(_lowerCamelCase , ['''un''', '''##want''', '''##ed''', ''',''', '''runn''', '''##ing'''] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(_lowerCamelCase ) , [9, 6, 7, 12, 10, 11] )
def _a ( self : Optional[int] ):
"""simple docstring"""
if not self.test_rust_tokenizer:
return
A_ : str = self.get_tokenizer()
A_ : Any = self.get_rust_tokenizer()
A_ : Union[str, Any] = '''UNwant\u00E9d,running'''
A_ : Any = tokenizer.tokenize(_lowerCamelCase )
A_ : Any = rust_tokenizer.tokenize(_lowerCamelCase )
self.assertListEqual(_lowerCamelCase , _lowerCamelCase )
A_ : Union[str, Any] = tokenizer.encode(_lowerCamelCase , add_special_tokens=_lowerCamelCase )
A_ : Optional[int] = rust_tokenizer.encode(_lowerCamelCase , add_special_tokens=_lowerCamelCase )
self.assertListEqual(_lowerCamelCase , _lowerCamelCase )
A_ : Dict = self.get_rust_tokenizer()
A_ : List[str] = tokenizer.encode(_lowerCamelCase )
A_ : int = rust_tokenizer.encode(_lowerCamelCase )
self.assertListEqual(_lowerCamelCase , _lowerCamelCase )
# With lower casing
A_ : int = self.get_tokenizer(do_lower_case=_lowerCamelCase )
A_ : Optional[int] = self.get_rust_tokenizer(do_lower_case=_lowerCamelCase )
A_ : List[Any] = '''UNwant\u00E9d,running'''
A_ : Optional[int] = tokenizer.tokenize(_lowerCamelCase )
A_ : Union[str, Any] = rust_tokenizer.tokenize(_lowerCamelCase )
self.assertListEqual(_lowerCamelCase , _lowerCamelCase )
A_ : Any = tokenizer.encode(_lowerCamelCase , add_special_tokens=_lowerCamelCase )
A_ : str = rust_tokenizer.encode(_lowerCamelCase , add_special_tokens=_lowerCamelCase )
self.assertListEqual(_lowerCamelCase , _lowerCamelCase )
A_ : Union[str, Any] = self.get_rust_tokenizer()
A_ : List[str] = tokenizer.encode(_lowerCamelCase )
A_ : Dict = rust_tokenizer.encode(_lowerCamelCase )
self.assertListEqual(_lowerCamelCase , _lowerCamelCase )
def _a ( self : List[Any] ):
"""simple docstring"""
A_ : Union[str, Any] = BasicTokenizer()
self.assertListEqual(tokenizer.tokenize('''ah\u535A\u63A8zz''' ) , ['''ah''', '''\u535A''', '''\u63A8''', '''zz'''] )
def _a ( self : str ):
"""simple docstring"""
A_ : Optional[int] = BasicTokenizer(do_lower_case=_lowerCamelCase )
self.assertListEqual(
tokenizer.tokenize(''' \tHeLLo!how \n Are yoU? ''' ) , ['''hello''', '''!''', '''how''', '''are''', '''you''', '''?'''] )
self.assertListEqual(tokenizer.tokenize('''H\u00E9llo''' ) , ['''hello'''] )
def _a ( self : Any ):
"""simple docstring"""
A_ : str = BasicTokenizer(do_lower_case=_lowerCamelCase , strip_accents=_lowerCamelCase )
self.assertListEqual(
tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''hällo''', '''!''', '''how''', '''are''', '''you''', '''?'''] )
self.assertListEqual(tokenizer.tokenize('''H\u00E9llo''' ) , ['''h\u00E9llo'''] )
def _a ( self : Any ):
"""simple docstring"""
A_ : Optional[int] = BasicTokenizer(do_lower_case=_lowerCamelCase , strip_accents=_lowerCamelCase )
self.assertListEqual(
tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''hallo''', '''!''', '''how''', '''are''', '''you''', '''?'''] )
self.assertListEqual(tokenizer.tokenize('''H\u00E9llo''' ) , ['''hello'''] )
def _a ( self : List[Any] ):
"""simple docstring"""
A_ : List[str] = BasicTokenizer(do_lower_case=_lowerCamelCase )
self.assertListEqual(
tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''hallo''', '''!''', '''how''', '''are''', '''you''', '''?'''] )
self.assertListEqual(tokenizer.tokenize('''H\u00E9llo''' ) , ['''hello'''] )
def _a ( self : Union[str, Any] ):
"""simple docstring"""
A_ : Dict = BasicTokenizer(do_lower_case=_lowerCamelCase )
self.assertListEqual(
tokenizer.tokenize(''' \tHeLLo!how \n Are yoU? ''' ) , ['''HeLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?'''] )
def _a ( self : Optional[int] ):
"""simple docstring"""
A_ : Union[str, Any] = BasicTokenizer(do_lower_case=_lowerCamelCase , strip_accents=_lowerCamelCase )
self.assertListEqual(
tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''HäLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?'''] )
def _a ( self : str ):
"""simple docstring"""
A_ : str = BasicTokenizer(do_lower_case=_lowerCamelCase , strip_accents=_lowerCamelCase )
self.assertListEqual(
tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''HaLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?'''] )
def _a ( self : Optional[Any] ):
"""simple docstring"""
A_ : List[str] = BasicTokenizer(do_lower_case=_lowerCamelCase , never_split=['''[UNK]'''] )
self.assertListEqual(
tokenizer.tokenize(''' \tHeLLo!how \n Are yoU? [UNK]''' ) , ['''HeLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?''', '''[UNK]'''] )
def _a ( self : Optional[Any] ):
"""simple docstring"""
A_ : Tuple = BasicTokenizer()
A_ : str = '''a\n\'ll !!to?\'d of, can\'t.'''
A_ : Optional[Any] = ['''a''', '''\'''', '''ll''', '''!''', '''!''', '''to''', '''?''', '''\'''', '''d''', '''of''', ''',''', '''can''', '''\'''', '''t''', '''.''']
self.assertListEqual(tokenizer.tokenize(_lowerCamelCase ) , _lowerCamelCase )
def _a ( self : List[str] ):
"""simple docstring"""
A_ : Optional[int] = ['''[UNK]''', '''[CLS]''', '''[SEP]''', '''want''', '''##want''', '''##ed''', '''wa''', '''un''', '''runn''', '''##ing''']
A_ : str = {}
for i, token in enumerate(_lowerCamelCase ):
A_ : Optional[Any] = i
A_ : int = WordpieceTokenizer(vocab=_lowerCamelCase , unk_token='''[UNK]''' )
self.assertListEqual(tokenizer.tokenize('''''' ) , [] )
self.assertListEqual(tokenizer.tokenize('''unwanted running''' ) , ['''un''', '''##want''', '''##ed''', '''runn''', '''##ing'''] )
self.assertListEqual(tokenizer.tokenize('''unwantedX running''' ) , ['''[UNK]''', '''runn''', '''##ing'''] )
def _a ( self : Optional[int] ):
"""simple docstring"""
self.assertTrue(_is_whitespace(''' ''' ) )
self.assertTrue(_is_whitespace('''\t''' ) )
self.assertTrue(_is_whitespace('''\r''' ) )
self.assertTrue(_is_whitespace('''\n''' ) )
self.assertTrue(_is_whitespace('''\u00A0''' ) )
self.assertFalse(_is_whitespace('''A''' ) )
self.assertFalse(_is_whitespace('''-''' ) )
def _a ( self : Dict ):
"""simple docstring"""
self.assertTrue(_is_control('''\u0005''' ) )
self.assertFalse(_is_control('''A''' ) )
self.assertFalse(_is_control(''' ''' ) )
self.assertFalse(_is_control('''\t''' ) )
self.assertFalse(_is_control('''\r''' ) )
def _a ( self : Optional[Any] ):
"""simple docstring"""
self.assertTrue(_is_punctuation('''-''' ) )
self.assertTrue(_is_punctuation('''$''' ) )
self.assertTrue(_is_punctuation('''`''' ) )
self.assertTrue(_is_punctuation('''.''' ) )
self.assertFalse(_is_punctuation('''A''' ) )
self.assertFalse(_is_punctuation(''' ''' ) )
def _a ( self : Union[str, Any] ):
"""simple docstring"""
A_ : Optional[int] = self.get_tokenizer()
A_ : List[str] = self.get_rust_tokenizer()
# Example taken from the issue https://github.com/huggingface/tokenizers/issues/340
self.assertListEqual([tokenizer.tokenize(_lowerCamelCase ) for t in ['''Test''', '''\xad''', '''test''']] , [['''[UNK]'''], [], ['''[UNK]''']] )
self.assertListEqual(
[rust_tokenizer.tokenize(_lowerCamelCase ) for t in ['''Test''', '''\xad''', '''test''']] , [['''[UNK]'''], [], ['''[UNK]''']] )
@slow
def _a ( self : Any ):
"""simple docstring"""
A_ : Union[str, Any] = self.tokenizer_class.from_pretrained('''bert-base-uncased''' )
A_ : List[str] = tokenizer.encode('''sequence builders''' , add_special_tokens=_lowerCamelCase )
A_ : str = tokenizer.encode('''multi-sequence build''' , add_special_tokens=_lowerCamelCase )
A_ : Dict = tokenizer.build_inputs_with_special_tokens(_lowerCamelCase )
A_ : List[Any] = tokenizer.build_inputs_with_special_tokens(_lowerCamelCase , _lowerCamelCase )
assert encoded_sentence == [101] + text + [102]
assert encoded_pair == [101] + text + [102] + text_a + [102]
def _a ( self : int ):
"""simple docstring"""
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'{tokenizer.__class__.__name__} ({pretrained_name})' ):
A_ : int = self.rust_tokenizer_class.from_pretrained(_lowerCamelCase , **_lowerCamelCase )
A_ : List[str] = f'A, naïve {tokenizer_r.mask_token} AllenNLP sentence.'
A_ : Optional[int] = tokenizer_r.encode_plus(
_lowerCamelCase , return_attention_mask=_lowerCamelCase , return_token_type_ids=_lowerCamelCase , return_offsets_mapping=_lowerCamelCase , add_special_tokens=_lowerCamelCase , )
A_ : Tuple = tokenizer_r.do_lower_case if hasattr(_lowerCamelCase , '''do_lower_case''' ) else False
A_ : Any = (
[
((0, 0), tokenizer_r.cls_token),
((0, 1), '''A'''),
((1, 2), ''','''),
((3, 5), '''na'''),
((5, 6), '''##ï'''),
((6, 8), '''##ve'''),
((9, 15), tokenizer_r.mask_token),
((16, 21), '''Allen'''),
((21, 23), '''##NL'''),
((23, 24), '''##P'''),
((25, 33), '''sentence'''),
((33, 34), '''.'''),
((0, 0), tokenizer_r.sep_token),
]
if not do_lower_case
else [
((0, 0), tokenizer_r.cls_token),
((0, 1), '''a'''),
((1, 2), ''','''),
((3, 8), '''naive'''),
((9, 15), tokenizer_r.mask_token),
((16, 21), '''allen'''),
((21, 23), '''##nl'''),
((23, 24), '''##p'''),
((25, 33), '''sentence'''),
((33, 34), '''.'''),
((0, 0), tokenizer_r.sep_token),
]
)
self.assertEqual(
[e[1] for e in expected_results] , tokenizer_r.convert_ids_to_tokens(tokens['''input_ids'''] ) )
self.assertEqual([e[0] for e in expected_results] , tokens['''offset_mapping'''] )
def _a ( self : List[Any] ):
"""simple docstring"""
A_ : Tuple = ['''的''', '''人''', '''有''']
A_ : Union[str, Any] = ''''''.join(_lowerCamelCase )
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'{tokenizer.__class__.__name__} ({pretrained_name})' ):
A_ : Optional[Any] = True
A_ : List[Any] = self.tokenizer_class.from_pretrained(_lowerCamelCase , **_lowerCamelCase )
A_ : Union[str, Any] = self.rust_tokenizer_class.from_pretrained(_lowerCamelCase , **_lowerCamelCase )
A_ : Union[str, Any] = tokenizer_p.encode(_lowerCamelCase , add_special_tokens=_lowerCamelCase )
A_ : List[Any] = tokenizer_r.encode(_lowerCamelCase , add_special_tokens=_lowerCamelCase )
A_ : Union[str, Any] = tokenizer_r.convert_ids_to_tokens(_lowerCamelCase )
A_ : List[Any] = tokenizer_p.convert_ids_to_tokens(_lowerCamelCase )
# it is expected that each Chinese character is not preceded by "##"
self.assertListEqual(_lowerCamelCase , _lowerCamelCase )
self.assertListEqual(_lowerCamelCase , _lowerCamelCase )
A_ : Any = False
A_ : Optional[int] = self.rust_tokenizer_class.from_pretrained(_lowerCamelCase , **_lowerCamelCase )
A_ : List[str] = self.tokenizer_class.from_pretrained(_lowerCamelCase , **_lowerCamelCase )
A_ : Tuple = tokenizer_r.encode(_lowerCamelCase , add_special_tokens=_lowerCamelCase )
A_ : Any = tokenizer_p.encode(_lowerCamelCase , add_special_tokens=_lowerCamelCase )
A_ : Optional[Any] = tokenizer_r.convert_ids_to_tokens(_lowerCamelCase )
A_ : List[str] = tokenizer_p.convert_ids_to_tokens(_lowerCamelCase )
# it is expected that only the first Chinese character is not preceded by "##".
A_ : List[Any] = [
f'##{token}' if idx != 0 else token for idx, token in enumerate(_lowerCamelCase )
]
self.assertListEqual(_lowerCamelCase , _lowerCamelCase )
self.assertListEqual(_lowerCamelCase , _lowerCamelCase )
| 4 |
'''simple docstring'''
import io
import itertools
import json
from dataclasses import dataclass
from typing import Optional
import pyarrow as pa
import pyarrow.json as paj
import datasets
from datasets.table import table_cast
from datasets.utils.file_utils import readline
snake_case__ = datasets.utils.logging.get_logger(__name__)
@dataclass
class UpperCamelCase_ (datasets.BuilderConfig ):
"""simple docstring"""
_lowerCAmelCase = None
_lowerCAmelCase = "utf-8"
_lowerCAmelCase = None
_lowerCAmelCase = None
_lowerCAmelCase = True # deprecated
_lowerCAmelCase = None # deprecated
_lowerCAmelCase = 1_0 << 2_0 # 10MB
_lowerCAmelCase = None
class UpperCamelCase_ (datasets.ArrowBasedBuilder ):
"""simple docstring"""
_lowerCAmelCase = JsonConfig
def _a ( self : int ):
"""simple docstring"""
if self.config.block_size is not None:
logger.warning('''The JSON loader parameter `block_size` is deprecated. Please use `chunksize` instead''' )
A_ : List[Any] = self.config.block_size
if self.config.use_threads is not True:
logger.warning(
'''The JSON loader parameter `use_threads` is deprecated and doesn\'t have any effect anymore.''' )
if self.config.newlines_in_values is not None:
raise ValueError('''The JSON loader parameter `newlines_in_values` is no longer supported''' )
return datasets.DatasetInfo(features=self.config.features )
def _a ( self : Any , _lowerCamelCase : List[str] ):
"""simple docstring"""
if not self.config.data_files:
raise ValueError(f'At least one data file must be specified, but got data_files={self.config.data_files}' )
A_ : int = dl_manager.download_and_extract(self.config.data_files )
if isinstance(_lowerCamelCase , (str, list, tuple) ):
A_ : Union[str, Any] = data_files
if isinstance(_lowerCamelCase , _lowerCamelCase ):
A_ : List[str] = [files]
A_ : List[Any] = [dl_manager.iter_files(_lowerCamelCase ) for file in files]
return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={'''files''': files} )]
A_ : Tuple = []
for split_name, files in data_files.items():
if isinstance(_lowerCamelCase , _lowerCamelCase ):
A_ : int = [files]
A_ : Union[str, Any] = [dl_manager.iter_files(_lowerCamelCase ) for file in files]
splits.append(datasets.SplitGenerator(name=_lowerCamelCase , gen_kwargs={'''files''': files} ) )
return splits
def _a ( self : int , _lowerCamelCase : pa.Table ):
"""simple docstring"""
if self.config.features is not None:
# adding missing columns
for column_name in set(self.config.features ) - set(pa_table.column_names ):
A_ : Optional[int] = self.config.features.arrow_schema.field(_lowerCamelCase ).type
A_ : Optional[int] = pa_table.append_column(_lowerCamelCase , pa.array([None] * len(_lowerCamelCase ) , type=_lowerCamelCase ) )
# more expensive cast to support nested structures with keys in a different order
# allows str <-> int/float or str to Audio for example
A_ : str = table_cast(_lowerCamelCase , self.config.features.arrow_schema )
return pa_table
def _a ( self : List[str] , _lowerCamelCase : int ):
"""simple docstring"""
for file_idx, file in enumerate(itertools.chain.from_iterable(_lowerCamelCase ) ):
# If the file is one json object and if we need to look at the list of items in one specific field
if self.config.field is not None:
with open(_lowerCamelCase , encoding=self.config.encoding , errors=self.config.encoding_errors ) as f:
A_ : int = json.load(_lowerCamelCase )
# We keep only the field we are interested in
A_ : List[str] = dataset[self.config.field]
# We accept two format: a list of dicts or a dict of lists
if isinstance(_lowerCamelCase , (list, tuple) ):
A_ : int = set().union(*[row.keys() for row in dataset] )
A_ : List[str] = {col: [row.get(_lowerCamelCase ) for row in dataset] for col in keys}
else:
A_ : Tuple = dataset
A_ : Dict = pa.Table.from_pydict(_lowerCamelCase )
yield file_idx, self._cast_table(_lowerCamelCase )
# If the file has one json object per line
else:
with open(_lowerCamelCase , '''rb''' ) as f:
A_ : int = 0
# Use block_size equal to the chunk size divided by 32 to leverage multithreading
# Set a default minimum value of 16kB if the chunk size is really small
A_ : int = max(self.config.chunksize // 32 , 16 << 10 )
A_ : int = (
self.config.encoding_errors if self.config.encoding_errors is not None else '''strict'''
)
while True:
A_ : Any = f.read(self.config.chunksize )
if not batch:
break
# Finish current line
try:
batch += f.readline()
except (AttributeError, io.UnsupportedOperation):
batch += readline(_lowerCamelCase )
# PyArrow only accepts utf-8 encoded bytes
if self.config.encoding != "utf-8":
A_ : Optional[Any] = batch.decode(self.config.encoding , errors=_lowerCamelCase ).encode('''utf-8''' )
try:
while True:
try:
A_ : List[Any] = paj.read_json(
io.BytesIO(_lowerCamelCase ) , read_options=paj.ReadOptions(block_size=_lowerCamelCase ) )
break
except (pa.ArrowInvalid, pa.ArrowNotImplementedError) as e:
if (
isinstance(_lowerCamelCase , pa.ArrowInvalid )
and "straddling" not in str(_lowerCamelCase )
or block_size > len(_lowerCamelCase )
):
raise
else:
# Increase the block size in case it was too small.
# The block size will be reset for the next file.
logger.debug(
f'Batch of {len(_lowerCamelCase )} bytes couldn\'t be parsed with block_size={block_size}. Retrying with block_size={block_size * 2}.' )
block_size *= 2
except pa.ArrowInvalid as e:
try:
with open(
_lowerCamelCase , encoding=self.config.encoding , errors=self.config.encoding_errors ) as f:
A_ : Optional[Any] = json.load(_lowerCamelCase )
except json.JSONDecodeError:
logger.error(f'Failed to read file \'{file}\' with error {type(_lowerCamelCase )}: {e}' )
raise e
# If possible, parse the file as a list of json objects and exit the loop
if isinstance(_lowerCamelCase , _lowerCamelCase ): # list is the only sequence type supported in JSON
try:
A_ : Optional[int] = set().union(*[row.keys() for row in dataset] )
A_ : Tuple = {col: [row.get(_lowerCamelCase ) for row in dataset] for col in keys}
A_ : int = pa.Table.from_pydict(_lowerCamelCase )
except (pa.ArrowInvalid, AttributeError) as e:
logger.error(f'Failed to read file \'{file}\' with error {type(_lowerCamelCase )}: {e}' )
raise ValueError(f'Not able to read records in the JSON file at {file}.' ) from None
yield file_idx, self._cast_table(_lowerCamelCase )
break
else:
logger.error(f'Failed to read file \'{file}\' with error {type(_lowerCamelCase )}: {e}' )
raise ValueError(
f'Not able to read records in the JSON file at {file}. '
f'You should probably indicate the field of the JSON file containing your records. '
f'This JSON file contain the following fields: {str(list(dataset.keys() ) )}. '
f'Select the correct one and provide it as `field=\'XXX\'` to the dataset loading method. ' ) from None
# Uncomment for debugging (will print the Arrow table size and elements)
# logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}")
# logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows)))
yield (file_idx, batch_idx), self._cast_table(_lowerCamelCase )
batch_idx += 1
| 4 | 1 |
'''simple docstring'''
import unittest
from transformers import MraConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
if is_torch_available():
import torch
from transformers import (
MraForMaskedLM,
MraForMultipleChoice,
MraForQuestionAnswering,
MraForSequenceClassification,
MraForTokenClassification,
MraModel,
)
from transformers.models.mra.modeling_mra import MRA_PRETRAINED_MODEL_ARCHIVE_LIST
class UpperCamelCase_ :
"""simple docstring"""
def __init__( self : Optional[Any] , _lowerCamelCase : str , _lowerCamelCase : int=2 , _lowerCamelCase : Any=8 , _lowerCamelCase : Tuple=True , _lowerCamelCase : List[Any]=True , _lowerCamelCase : int=True , _lowerCamelCase : Tuple=True , _lowerCamelCase : Tuple=99 , _lowerCamelCase : List[str]=16 , _lowerCamelCase : Dict=5 , _lowerCamelCase : str=2 , _lowerCamelCase : Dict=36 , _lowerCamelCase : Union[str, Any]="gelu" , _lowerCamelCase : Optional[int]=0.0 , _lowerCamelCase : Union[str, Any]=0.0 , _lowerCamelCase : Dict=512 , _lowerCamelCase : str=16 , _lowerCamelCase : Optional[Any]=2 , _lowerCamelCase : Union[str, Any]=0.02 , _lowerCamelCase : Tuple=3 , _lowerCamelCase : Optional[int]=4 , _lowerCamelCase : List[Any]=None , ):
"""simple docstring"""
A_ : Optional[Any] = parent
A_ : Dict = batch_size
A_ : str = seq_length
A_ : Any = is_training
A_ : List[Any] = use_input_mask
A_ : Union[str, Any] = use_token_type_ids
A_ : Optional[int] = use_labels
A_ : Any = vocab_size
A_ : List[str] = hidden_size
A_ : Union[str, Any] = num_hidden_layers
A_ : Any = num_attention_heads
A_ : List[str] = intermediate_size
A_ : List[str] = hidden_act
A_ : Optional[Any] = hidden_dropout_prob
A_ : Union[str, Any] = attention_probs_dropout_prob
A_ : Optional[Any] = max_position_embeddings
A_ : int = type_vocab_size
A_ : List[str] = type_sequence_label_size
A_ : Tuple = initializer_range
A_ : int = num_labels
A_ : Optional[Any] = num_choices
A_ : Union[str, Any] = scope
def _a ( self : str ):
"""simple docstring"""
A_ : Any = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
A_ : Any = None
if self.use_input_mask:
A_ : List[Any] = random_attention_mask([self.batch_size, self.seq_length] )
A_ : List[Any] = None
if self.use_token_type_ids:
A_ : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
A_ : List[Any] = None
A_ : Union[str, Any] = None
A_ : Tuple = None
if self.use_labels:
A_ : str = ids_tensor([self.batch_size] , self.type_sequence_label_size )
A_ : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
A_ : Dict = ids_tensor([self.batch_size] , self.num_choices )
A_ : int = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def _a ( self : str ):
"""simple docstring"""
return MraConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=_lowerCamelCase , initializer_range=self.initializer_range , )
def _a ( self : Any ):
"""simple docstring"""
A_ : List[Any] = self.get_config()
A_ : str = 300
return config
def _a ( self : Dict ):
"""simple docstring"""
(
(
A_
) ,(
A_
) ,(
A_
) ,(
A_
) ,(
A_
) ,(
A_
) ,(
A_
) ,
) : List[Any] = self.prepare_config_and_inputs()
A_ : List[str] = True
A_ : Dict = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
A_ : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
token_type_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
encoder_hidden_states,
encoder_attention_mask,
)
def _a ( self : str , _lowerCamelCase : Union[str, Any] , _lowerCamelCase : Optional[int] , _lowerCamelCase : int , _lowerCamelCase : Dict , _lowerCamelCase : Any , _lowerCamelCase : Any , _lowerCamelCase : Dict ):
"""simple docstring"""
A_ : List[str] = MraModel(config=_lowerCamelCase )
model.to(_lowerCamelCase )
model.eval()
A_ : List[str] = model(_lowerCamelCase , attention_mask=_lowerCamelCase , token_type_ids=_lowerCamelCase )
A_ : Any = model(_lowerCamelCase , token_type_ids=_lowerCamelCase )
A_ : Optional[Any] = model(_lowerCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _a ( self : Dict , _lowerCamelCase : Tuple , _lowerCamelCase : Any , _lowerCamelCase : Dict , _lowerCamelCase : Optional[Any] , _lowerCamelCase : Optional[int] , _lowerCamelCase : List[Any] , _lowerCamelCase : List[str] , _lowerCamelCase : List[Any] , _lowerCamelCase : List[str] , ):
"""simple docstring"""
A_ : Dict = True
A_ : str = MraModel(_lowerCamelCase )
model.to(_lowerCamelCase )
model.eval()
A_ : Union[str, Any] = model(
_lowerCamelCase , attention_mask=_lowerCamelCase , token_type_ids=_lowerCamelCase , encoder_hidden_states=_lowerCamelCase , encoder_attention_mask=_lowerCamelCase , )
A_ : str = model(
_lowerCamelCase , attention_mask=_lowerCamelCase , token_type_ids=_lowerCamelCase , encoder_hidden_states=_lowerCamelCase , )
A_ : List[str] = model(_lowerCamelCase , attention_mask=_lowerCamelCase , token_type_ids=_lowerCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _a ( self : List[str] , _lowerCamelCase : List[str] , _lowerCamelCase : int , _lowerCamelCase : List[str] , _lowerCamelCase : Optional[int] , _lowerCamelCase : Optional[Any] , _lowerCamelCase : Union[str, Any] , _lowerCamelCase : int ):
"""simple docstring"""
A_ : List[Any] = MraForMaskedLM(config=_lowerCamelCase )
model.to(_lowerCamelCase )
model.eval()
A_ : Optional[Any] = model(_lowerCamelCase , attention_mask=_lowerCamelCase , token_type_ids=_lowerCamelCase , labels=_lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _a ( self : Dict , _lowerCamelCase : int , _lowerCamelCase : Optional[Any] , _lowerCamelCase : Dict , _lowerCamelCase : str , _lowerCamelCase : Union[str, Any] , _lowerCamelCase : Optional[Any] , _lowerCamelCase : Optional[Any] ):
"""simple docstring"""
A_ : Optional[int] = MraForQuestionAnswering(config=_lowerCamelCase )
model.to(_lowerCamelCase )
model.eval()
A_ : List[str] = model(
_lowerCamelCase , attention_mask=_lowerCamelCase , token_type_ids=_lowerCamelCase , start_positions=_lowerCamelCase , end_positions=_lowerCamelCase , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def _a ( self : Any , _lowerCamelCase : List[str] , _lowerCamelCase : Dict , _lowerCamelCase : List[str] , _lowerCamelCase : Tuple , _lowerCamelCase : Tuple , _lowerCamelCase : List[str] , _lowerCamelCase : int ):
"""simple docstring"""
A_ : Optional[Any] = self.num_labels
A_ : Any = MraForSequenceClassification(_lowerCamelCase )
model.to(_lowerCamelCase )
model.eval()
A_ : List[str] = model(_lowerCamelCase , attention_mask=_lowerCamelCase , token_type_ids=_lowerCamelCase , labels=_lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _a ( self : Tuple , _lowerCamelCase : str , _lowerCamelCase : Any , _lowerCamelCase : Dict , _lowerCamelCase : Any , _lowerCamelCase : Any , _lowerCamelCase : Any , _lowerCamelCase : Optional[Any] ):
"""simple docstring"""
A_ : int = self.num_labels
A_ : Optional[int] = MraForTokenClassification(config=_lowerCamelCase )
model.to(_lowerCamelCase )
model.eval()
A_ : Any = model(_lowerCamelCase , attention_mask=_lowerCamelCase , token_type_ids=_lowerCamelCase , labels=_lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def _a ( self : int , _lowerCamelCase : Union[str, Any] , _lowerCamelCase : List[str] , _lowerCamelCase : int , _lowerCamelCase : List[str] , _lowerCamelCase : List[str] , _lowerCamelCase : List[str] , _lowerCamelCase : Optional[int] ):
"""simple docstring"""
A_ : Union[str, Any] = self.num_choices
A_ : str = MraForMultipleChoice(config=_lowerCamelCase )
model.to(_lowerCamelCase )
model.eval()
A_ : List[Any] = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
A_ : Optional[Any] = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
A_ : List[str] = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
A_ : Any = model(
_lowerCamelCase , attention_mask=_lowerCamelCase , token_type_ids=_lowerCamelCase , labels=_lowerCamelCase , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def _a ( self : Optional[Any] ):
"""simple docstring"""
A_ : Tuple = self.prepare_config_and_inputs()
(
(
A_
) ,(
A_
) ,(
A_
) ,(
A_
) ,(
A_
) ,(
A_
) ,(
A_
) ,
) : Optional[Any] = config_and_inputs
A_ : Dict = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_torch
class UpperCamelCase_ (a__, unittest.TestCase ):
"""simple docstring"""
_lowerCAmelCase = (
(
MraModel,
MraForMaskedLM,
MraForMultipleChoice,
MraForQuestionAnswering,
MraForSequenceClassification,
MraForTokenClassification,
)
if is_torch_available()
else ()
)
_lowerCAmelCase = False
_lowerCAmelCase = False
_lowerCAmelCase = False
_lowerCAmelCase = False
_lowerCAmelCase = ()
def _a ( self : str ):
"""simple docstring"""
A_ : List[str] = MraModelTester(self )
A_ : Optional[int] = ConfigTester(self , config_class=_lowerCamelCase , hidden_size=37 )
def _a ( self : int ):
"""simple docstring"""
self.config_tester.run_common_tests()
def _a ( self : str ):
"""simple docstring"""
A_ : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_lowerCamelCase )
def _a ( self : Any ):
"""simple docstring"""
A_ : List[str] = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
A_ : List[Any] = type
self.model_tester.create_and_check_model(*_lowerCamelCase )
def _a ( self : Optional[Any] ):
"""simple docstring"""
A_ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*_lowerCamelCase )
def _a ( self : Tuple ):
"""simple docstring"""
A_ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*_lowerCamelCase )
def _a ( self : List[Any] ):
"""simple docstring"""
A_ : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*_lowerCamelCase )
def _a ( self : Dict ):
"""simple docstring"""
A_ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*_lowerCamelCase )
def _a ( self : Union[str, Any] ):
"""simple docstring"""
A_ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*_lowerCamelCase )
@slow
def _a ( self : int ):
"""simple docstring"""
for model_name in MRA_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
A_ : Optional[Any] = MraModel.from_pretrained(_lowerCamelCase )
self.assertIsNotNone(_lowerCamelCase )
@unittest.skip(reason='''MRA does not output attentions''' )
def _a ( self : Union[str, Any] ):
"""simple docstring"""
return
@require_torch
class UpperCamelCase_ (unittest.TestCase ):
"""simple docstring"""
@slow
def _a ( self : Dict ):
"""simple docstring"""
A_ : str = MraModel.from_pretrained('''uw-madison/mra-base-512-4''' )
A_ : int = torch.arange(256 ).unsqueeze(0 )
with torch.no_grad():
A_ : List[str] = model(_lowerCamelCase )[0]
A_ : List[str] = torch.Size((1, 256, 768) )
self.assertEqual(output.shape , _lowerCamelCase )
A_ : Any = torch.tensor(
[[[-0.01_40, 0.08_30, -0.03_81], [0.15_46, 0.14_02, 0.02_20], [0.11_62, 0.08_51, 0.01_65]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , _lowerCamelCase , atol=1E-4 ) )
@slow
def _a ( self : Tuple ):
"""simple docstring"""
A_ : Dict = MraForMaskedLM.from_pretrained('''uw-madison/mra-base-512-4''' )
A_ : int = torch.arange(256 ).unsqueeze(0 )
with torch.no_grad():
A_ : Tuple = model(_lowerCamelCase )[0]
A_ : List[str] = 50265
A_ : Union[str, Any] = torch.Size((1, 256, vocab_size) )
self.assertEqual(output.shape , _lowerCamelCase )
A_ : Optional[int] = torch.tensor(
[[[9.25_95, -3.60_38, 11.88_19], [9.38_69, -3.26_93, 11.09_56], [11.85_24, -3.49_38, 13.12_10]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , _lowerCamelCase , atol=1E-4 ) )
@slow
def _a ( self : Union[str, Any] ):
"""simple docstring"""
A_ : int = MraForMaskedLM.from_pretrained('''uw-madison/mra-base-4096-8-d3''' )
A_ : Optional[Any] = torch.arange(4096 ).unsqueeze(0 )
with torch.no_grad():
A_ : str = model(_lowerCamelCase )[0]
A_ : Dict = 50265
A_ : Any = torch.Size((1, 4096, vocab_size) )
self.assertEqual(output.shape , _lowerCamelCase )
A_ : List[Any] = torch.tensor(
[[[5.47_89, -2.35_64, 7.50_64], [7.90_67, -1.33_69, 9.96_68], [9.07_12, -1.81_06, 7.03_80]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , _lowerCamelCase , atol=1E-4 ) )
| 4 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
snake_case__ = logging.get_logger(__name__)
snake_case__ = {
"""microsoft/swin-tiny-patch4-window7-224""": (
"""https://huggingface.co/microsoft/swin-tiny-patch4-window7-224/resolve/main/config.json"""
),
# See all Swin models at https://huggingface.co/models?filter=swin
}
class UpperCamelCase_ (a__, a__ ):
"""simple docstring"""
_lowerCAmelCase = 'swin'
_lowerCAmelCase = {
'num_attention_heads': 'num_heads',
'num_hidden_layers': 'num_layers',
}
def __init__( self : Any , _lowerCamelCase : Optional[Any]=224 , _lowerCamelCase : List[str]=4 , _lowerCamelCase : Optional[Any]=3 , _lowerCamelCase : Tuple=96 , _lowerCamelCase : List[Any]=[2, 2, 6, 2] , _lowerCamelCase : List[str]=[3, 6, 12, 24] , _lowerCamelCase : List[Any]=7 , _lowerCamelCase : Optional[int]=4.0 , _lowerCamelCase : List[str]=True , _lowerCamelCase : List[str]=0.0 , _lowerCamelCase : Any=0.0 , _lowerCamelCase : Dict=0.1 , _lowerCamelCase : List[str]="gelu" , _lowerCamelCase : Tuple=False , _lowerCamelCase : Dict=0.02 , _lowerCamelCase : Optional[Any]=1E-5 , _lowerCamelCase : Any=32 , _lowerCamelCase : Tuple=None , _lowerCamelCase : Any=None , **_lowerCamelCase : str , ):
"""simple docstring"""
super().__init__(**_lowerCamelCase )
A_ : Optional[int] = image_size
A_ : Optional[int] = patch_size
A_ : Optional[int] = num_channels
A_ : Any = embed_dim
A_ : List[Any] = depths
A_ : Any = len(_lowerCamelCase )
A_ : List[Any] = num_heads
A_ : Tuple = window_size
A_ : Tuple = mlp_ratio
A_ : Dict = qkv_bias
A_ : List[str] = hidden_dropout_prob
A_ : List[str] = attention_probs_dropout_prob
A_ : Any = drop_path_rate
A_ : List[Any] = hidden_act
A_ : Tuple = use_absolute_embeddings
A_ : int = layer_norm_eps
A_ : Optional[Any] = initializer_range
A_ : Union[str, Any] = encoder_stride
# we set the hidden_size attribute in order to make Swin work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
A_ : str = int(embed_dim * 2 ** (len(_lowerCamelCase ) - 1) )
A_ : str = ['''stem'''] + [f'stage{idx}' for idx in range(1 , len(_lowerCamelCase ) + 1 )]
A_ ,A_ : Optional[Any] = get_aligned_output_features_output_indices(
out_features=_lowerCamelCase , out_indices=_lowerCamelCase , stage_names=self.stage_names )
class UpperCamelCase_ (a__ ):
"""simple docstring"""
_lowerCAmelCase = version.parse('1.11' )
@property
def _a ( self : str ):
"""simple docstring"""
return OrderedDict(
[
('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}),
] )
@property
def _a ( self : Union[str, Any] ):
"""simple docstring"""
return 1E-4
| 4 | 1 |
'''simple docstring'''
from typing import Any, Dict, Optional
import torch
import torch.nn.functional as F
from torch import nn
from ..utils import maybe_allow_in_graph
from .activations import get_activation
from .attention_processor import Attention
from .embeddings import CombinedTimestepLabelEmbeddings
@maybe_allow_in_graph
class UpperCamelCase_ (nn.Module ):
"""simple docstring"""
def __init__( self : Union[str, Any] , _lowerCamelCase : int , _lowerCamelCase : int , _lowerCamelCase : int , _lowerCamelCase : List[str]=0.0 , _lowerCamelCase : Optional[int] = None , _lowerCamelCase : str = "geglu" , _lowerCamelCase : Optional[int] = None , _lowerCamelCase : bool = False , _lowerCamelCase : bool = False , _lowerCamelCase : bool = False , _lowerCamelCase : bool = False , _lowerCamelCase : bool = True , _lowerCamelCase : str = "layer_norm" , _lowerCamelCase : bool = False , ):
"""simple docstring"""
super().__init__()
A_ : Union[str, Any] = only_cross_attention
A_ : str = (num_embeds_ada_norm is not None) and norm_type == '''ada_norm_zero'''
A_ : str = (num_embeds_ada_norm is not None) and norm_type == '''ada_norm'''
if norm_type in ("ada_norm", "ada_norm_zero") and num_embeds_ada_norm is None:
raise ValueError(
f'`norm_type` is set to {norm_type}, but `num_embeds_ada_norm` is not defined. Please make sure to'
f' define `num_embeds_ada_norm` if setting `norm_type` to {norm_type}.' )
# Define 3 blocks. Each block has its own normalization layer.
# 1. Self-Attn
if self.use_ada_layer_norm:
A_ : List[str] = AdaLayerNorm(_lowerCamelCase , _lowerCamelCase )
elif self.use_ada_layer_norm_zero:
A_ : List[Any] = AdaLayerNormZero(_lowerCamelCase , _lowerCamelCase )
else:
A_ : List[Any] = nn.LayerNorm(_lowerCamelCase , elementwise_affine=_lowerCamelCase )
A_ : int = Attention(
query_dim=_lowerCamelCase , heads=_lowerCamelCase , dim_head=_lowerCamelCase , dropout=_lowerCamelCase , bias=_lowerCamelCase , cross_attention_dim=cross_attention_dim if only_cross_attention else None , upcast_attention=_lowerCamelCase , )
# 2. Cross-Attn
if cross_attention_dim is not None or double_self_attention:
# We currently only use AdaLayerNormZero for self attention where there will only be one attention block.
# I.e. the number of returned modulation chunks from AdaLayerZero would not make sense if returned during
# the second cross attention block.
A_ : str = (
AdaLayerNorm(_lowerCamelCase , _lowerCamelCase )
if self.use_ada_layer_norm
else nn.LayerNorm(_lowerCamelCase , elementwise_affine=_lowerCamelCase )
)
A_ : str = Attention(
query_dim=_lowerCamelCase , cross_attention_dim=cross_attention_dim if not double_self_attention else None , heads=_lowerCamelCase , dim_head=_lowerCamelCase , dropout=_lowerCamelCase , bias=_lowerCamelCase , upcast_attention=_lowerCamelCase , ) # is self-attn if encoder_hidden_states is none
else:
A_ : Optional[Any] = None
A_ : List[Any] = None
# 3. Feed-forward
A_ : List[str] = nn.LayerNorm(_lowerCamelCase , elementwise_affine=_lowerCamelCase )
A_ : List[str] = FeedForward(_lowerCamelCase , dropout=_lowerCamelCase , activation_fn=_lowerCamelCase , final_dropout=_lowerCamelCase )
# let chunk size default to None
A_ : Tuple = None
A_ : str = 0
def _a ( self : Union[str, Any] , _lowerCamelCase : Optional[int] , _lowerCamelCase : int ):
"""simple docstring"""
A_ : List[str] = chunk_size
A_ : Dict = dim
def _a ( self : Any , _lowerCamelCase : torch.FloatTensor , _lowerCamelCase : Optional[torch.FloatTensor] = None , _lowerCamelCase : Optional[torch.FloatTensor] = None , _lowerCamelCase : Optional[torch.FloatTensor] = None , _lowerCamelCase : Optional[torch.LongTensor] = None , _lowerCamelCase : Dict[str, Any] = None , _lowerCamelCase : Optional[torch.LongTensor] = None , ):
"""simple docstring"""
if self.use_ada_layer_norm:
A_ : Tuple = self.norma(_lowerCamelCase , _lowerCamelCase )
elif self.use_ada_layer_norm_zero:
A_ ,A_ ,A_ ,A_ ,A_ : Dict = self.norma(
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , hidden_dtype=hidden_states.dtype )
else:
A_ : Optional[int] = self.norma(_lowerCamelCase )
A_ : List[Any] = cross_attention_kwargs if cross_attention_kwargs is not None else {}
A_ : Union[str, Any] = self.attna(
_lowerCamelCase , encoder_hidden_states=encoder_hidden_states if self.only_cross_attention else None , attention_mask=_lowerCamelCase , **_lowerCamelCase , )
if self.use_ada_layer_norm_zero:
A_ : Optional[int] = gate_msa.unsqueeze(1 ) * attn_output
A_ : Tuple = attn_output + hidden_states
# 2. Cross-Attention
if self.attna is not None:
A_ : List[str] = (
self.norma(_lowerCamelCase , _lowerCamelCase ) if self.use_ada_layer_norm else self.norma(_lowerCamelCase )
)
A_ : Dict = self.attna(
_lowerCamelCase , encoder_hidden_states=_lowerCamelCase , attention_mask=_lowerCamelCase , **_lowerCamelCase , )
A_ : Optional[int] = attn_output + hidden_states
# 3. Feed-forward
A_ : List[str] = self.norma(_lowerCamelCase )
if self.use_ada_layer_norm_zero:
A_ : Optional[int] = norm_hidden_states * (1 + scale_mlp[:, None]) + shift_mlp[:, None]
if self._chunk_size is not None:
# "feed_forward_chunk_size" can be used to save memory
if norm_hidden_states.shape[self._chunk_dim] % self._chunk_size != 0:
raise ValueError(
f'`hidden_states` dimension to be chunked: {norm_hidden_states.shape[self._chunk_dim]} has to be divisible by chunk size: {self._chunk_size}. Make sure to set an appropriate `chunk_size` when calling `unet.enable_forward_chunking`.' )
A_ : Optional[int] = norm_hidden_states.shape[self._chunk_dim] // self._chunk_size
A_ : Tuple = torch.cat(
[self.ff(_lowerCamelCase ) for hid_slice in norm_hidden_states.chunk(_lowerCamelCase , dim=self._chunk_dim )] , dim=self._chunk_dim , )
else:
A_ : Any = self.ff(_lowerCamelCase )
if self.use_ada_layer_norm_zero:
A_ : Optional[int] = gate_mlp.unsqueeze(1 ) * ff_output
A_ : int = ff_output + hidden_states
return hidden_states
class UpperCamelCase_ (nn.Module ):
"""simple docstring"""
def __init__( self : List[Any] , _lowerCamelCase : int , _lowerCamelCase : Optional[int] = None , _lowerCamelCase : int = 4 , _lowerCamelCase : float = 0.0 , _lowerCamelCase : str = "geglu" , _lowerCamelCase : bool = False , ):
"""simple docstring"""
super().__init__()
A_ : Optional[int] = int(dim * mult )
A_ : List[str] = dim_out if dim_out is not None else dim
if activation_fn == "gelu":
A_ : Tuple = GELU(_lowerCamelCase , _lowerCamelCase )
if activation_fn == "gelu-approximate":
A_ : Tuple = GELU(_lowerCamelCase , _lowerCamelCase , approximate='''tanh''' )
elif activation_fn == "geglu":
A_ : Any = GEGLU(_lowerCamelCase , _lowerCamelCase )
elif activation_fn == "geglu-approximate":
A_ : List[Any] = ApproximateGELU(_lowerCamelCase , _lowerCamelCase )
A_ : Union[str, Any] = nn.ModuleList([] )
# project in
self.net.append(_lowerCamelCase )
# project dropout
self.net.append(nn.Dropout(_lowerCamelCase ) )
# project out
self.net.append(nn.Linear(_lowerCamelCase , _lowerCamelCase ) )
# FF as used in Vision Transformer, MLP-Mixer, etc. have a final dropout
if final_dropout:
self.net.append(nn.Dropout(_lowerCamelCase ) )
def _a ( self : Dict , _lowerCamelCase : List[Any] ):
"""simple docstring"""
for module in self.net:
A_ : List[str] = module(_lowerCamelCase )
return hidden_states
class UpperCamelCase_ (nn.Module ):
"""simple docstring"""
def __init__( self : int , _lowerCamelCase : int , _lowerCamelCase : int , _lowerCamelCase : str = "none" ):
"""simple docstring"""
super().__init__()
A_ : int = nn.Linear(_lowerCamelCase , _lowerCamelCase )
A_ : Union[str, Any] = approximate
def _a ( self : Any , _lowerCamelCase : Dict ):
"""simple docstring"""
if gate.device.type != "mps":
return F.gelu(_lowerCamelCase , approximate=self.approximate )
# mps: gelu is not implemented for float16
return F.gelu(gate.to(dtype=torch.floataa ) , approximate=self.approximate ).to(dtype=gate.dtype )
def _a ( self : List[Any] , _lowerCamelCase : Union[str, Any] ):
"""simple docstring"""
A_ : str = self.proj(_lowerCamelCase )
A_ : Any = self.gelu(_lowerCamelCase )
return hidden_states
class UpperCamelCase_ (nn.Module ):
"""simple docstring"""
def __init__( self : Union[str, Any] , _lowerCamelCase : int , _lowerCamelCase : int ):
"""simple docstring"""
super().__init__()
A_ : List[str] = nn.Linear(_lowerCamelCase , dim_out * 2 )
def _a ( self : Optional[int] , _lowerCamelCase : List[Any] ):
"""simple docstring"""
if gate.device.type != "mps":
return F.gelu(_lowerCamelCase )
# mps: gelu is not implemented for float16
return F.gelu(gate.to(dtype=torch.floataa ) ).to(dtype=gate.dtype )
def _a ( self : Any , _lowerCamelCase : Tuple ):
"""simple docstring"""
A_ ,A_ : Tuple = self.proj(_lowerCamelCase ).chunk(2 , dim=-1 )
return hidden_states * self.gelu(_lowerCamelCase )
class UpperCamelCase_ (nn.Module ):
"""simple docstring"""
def __init__( self : Tuple , _lowerCamelCase : int , _lowerCamelCase : int ):
"""simple docstring"""
super().__init__()
A_ : str = nn.Linear(_lowerCamelCase , _lowerCamelCase )
def _a ( self : int , _lowerCamelCase : Optional[Any] ):
"""simple docstring"""
A_ : Tuple = self.proj(_lowerCamelCase )
return x * torch.sigmoid(1.7_02 * x )
class UpperCamelCase_ (nn.Module ):
"""simple docstring"""
def __init__( self : str , _lowerCamelCase : int , _lowerCamelCase : List[str] ):
"""simple docstring"""
super().__init__()
A_ : List[Any] = nn.Embedding(_lowerCamelCase , _lowerCamelCase )
A_ : Dict = nn.SiLU()
A_ : List[Any] = nn.Linear(_lowerCamelCase , embedding_dim * 2 )
A_ : Union[str, Any] = nn.LayerNorm(_lowerCamelCase , elementwise_affine=_lowerCamelCase )
def _a ( self : str , _lowerCamelCase : Optional[int] , _lowerCamelCase : Union[str, Any] ):
"""simple docstring"""
A_ : Any = self.linear(self.silu(self.emb(_lowerCamelCase ) ) )
A_ ,A_ : Optional[Any] = torch.chunk(_lowerCamelCase , 2 )
A_ : List[Any] = self.norm(_lowerCamelCase ) * (1 + scale) + shift
return x
class UpperCamelCase_ (nn.Module ):
"""simple docstring"""
def __init__( self : List[Any] , _lowerCamelCase : Any , _lowerCamelCase : Union[str, Any] ):
"""simple docstring"""
super().__init__()
A_ : str = CombinedTimestepLabelEmbeddings(_lowerCamelCase , _lowerCamelCase )
A_ : Optional[int] = nn.SiLU()
A_ : Any = nn.Linear(_lowerCamelCase , 6 * embedding_dim , bias=_lowerCamelCase )
A_ : List[str] = nn.LayerNorm(_lowerCamelCase , elementwise_affine=_lowerCamelCase , eps=1E-6 )
def _a ( self : Optional[Any] , _lowerCamelCase : Dict , _lowerCamelCase : Union[str, Any] , _lowerCamelCase : List[str] , _lowerCamelCase : Dict=None ):
"""simple docstring"""
A_ : List[str] = self.linear(self.silu(self.emb(_lowerCamelCase , _lowerCamelCase , hidden_dtype=_lowerCamelCase ) ) )
A_ ,A_ ,A_ ,A_ ,A_ ,A_ : Tuple = emb.chunk(6 , dim=1 )
A_ : Optional[Any] = self.norm(_lowerCamelCase ) * (1 + scale_msa[:, None]) + shift_msa[:, None]
return x, gate_msa, shift_mlp, scale_mlp, gate_mlp
class UpperCamelCase_ (nn.Module ):
"""simple docstring"""
def __init__( self : Dict , _lowerCamelCase : int , _lowerCamelCase : int , _lowerCamelCase : int , _lowerCamelCase : Optional[str] = None , _lowerCamelCase : float = 1E-5 ):
"""simple docstring"""
super().__init__()
A_ : str = num_groups
A_ : int = eps
if act_fn is None:
A_ : int = None
else:
A_ : Optional[Any] = get_activation(_lowerCamelCase )
A_ : Union[str, Any] = nn.Linear(_lowerCamelCase , out_dim * 2 )
def _a ( self : Tuple , _lowerCamelCase : Dict , _lowerCamelCase : Dict ):
"""simple docstring"""
if self.act:
A_ : Union[str, Any] = self.act(_lowerCamelCase )
A_ : Union[str, Any] = self.linear(_lowerCamelCase )
A_ : List[Any] = emb[:, :, None, None]
A_ ,A_ : Any = emb.chunk(2 , dim=1 )
A_ : Union[str, Any] = F.group_norm(_lowerCamelCase , self.num_groups , eps=self.eps )
A_ : Optional[int] = x * (1 + scale) + shift
return x
| 4 |
'''simple docstring'''
from __future__ import annotations
def snake_case__ ( lowerCamelCase__ : list[int] , lowerCamelCase__ : int ) -> list[int]:
A_ : int = 0
A_ : str = len(lowerCamelCase__ ) - 1
while i < j:
if nums[i] + nums[j] == target:
return [i, j]
elif nums[i] + nums[j] < target:
A_ : Tuple = i + 1
else:
A_ : List[str] = j - 1
return []
if __name__ == "__main__":
import doctest
doctest.testmod()
print(F'{two_pointer([2, 7, 11, 15], 9) = }')
| 4 | 1 |
'''simple docstring'''
import os
from tempfile import TemporaryDirectory
from unittest import TestCase
import pytest
from absl.testing import parameterized
from datasets import config
from datasets.arrow_reader import HF_GCP_BASE_URL
from datasets.builder import DatasetBuilder
from datasets.dataset_dict import IterableDatasetDict
from datasets.iterable_dataset import IterableDataset
from datasets.load import dataset_module_factory, import_main_class
from datasets.utils.file_utils import cached_path
snake_case__ = [
{"""dataset""": """wikipedia""", """config_name""": """20220301.de"""},
{"""dataset""": """wikipedia""", """config_name""": """20220301.en"""},
{"""dataset""": """wikipedia""", """config_name""": """20220301.fr"""},
{"""dataset""": """wikipedia""", """config_name""": """20220301.frr"""},
{"""dataset""": """wikipedia""", """config_name""": """20220301.it"""},
{"""dataset""": """wikipedia""", """config_name""": """20220301.simple"""},
{"""dataset""": """snli""", """config_name""": """plain_text"""},
{"""dataset""": """eli5""", """config_name""": """LFQA_reddit"""},
{"""dataset""": """wiki40b""", """config_name""": """en"""},
{"""dataset""": """wiki_dpr""", """config_name""": """psgs_w100.nq.compressed"""},
{"""dataset""": """wiki_dpr""", """config_name""": """psgs_w100.nq.no_index"""},
{"""dataset""": """wiki_dpr""", """config_name""": """psgs_w100.multiset.no_index"""},
{"""dataset""": """natural_questions""", """config_name""": """default"""},
]
def snake_case__ ( lowerCamelCase__ : List[str]=True ) -> str:
if with_config:
return [
{
"testcase_name": d["dataset"] + "/" + d["config_name"],
"dataset": d["dataset"],
"config_name": d["config_name"],
}
for d in DATASETS_ON_HF_GCP
]
else:
return [
{"testcase_name": dataset, "dataset": dataset} for dataset in {d["dataset"] for d in DATASETS_ON_HF_GCP}
]
@parameterized.named_parameters(list_datasets_on_hf_gcp_parameters(with_config=a__ ) )
class UpperCamelCase_ (a__ ):
"""simple docstring"""
_lowerCAmelCase = None
_lowerCAmelCase = None
def _a ( self : Optional[Any] , _lowerCamelCase : int , _lowerCamelCase : List[Any] ):
"""simple docstring"""
with TemporaryDirectory() as tmp_dir:
A_ : List[Any] = dataset_module_factory(_lowerCamelCase , cache_dir=_lowerCamelCase )
A_ : Tuple = import_main_class(dataset_module.module_path , dataset=_lowerCamelCase )
A_ : DatasetBuilder = builder_cls(
cache_dir=_lowerCamelCase , config_name=_lowerCamelCase , hash=dataset_module.hash , )
A_ : List[str] = '''/'''.join(
[
HF_GCP_BASE_URL,
builder_instance._relative_data_dir(with_hash=_lowerCamelCase ).replace(os.sep , '''/''' ),
config.DATASET_INFO_FILENAME,
] )
A_ : Optional[int] = cached_path(_lowerCamelCase , cache_dir=_lowerCamelCase )
self.assertTrue(os.path.exists(_lowerCamelCase ) )
@pytest.mark.integration
def snake_case__ ( lowerCamelCase__ : Optional[int] ) -> Tuple:
A_ : Tuple = tmp_path_factory.mktemp('''test_hf_gcp''' ) / '''test_wikipedia_simple'''
A_ : Optional[Any] = dataset_module_factory('''wikipedia''' , cache_dir=lowerCamelCase__ )
A_ : str = import_main_class(dataset_module.module_path )
A_ : DatasetBuilder = builder_cls(
cache_dir=lowerCamelCase__ , config_name='''20220301.frr''' , hash=dataset_module.hash , )
# use the HF cloud storage, not the original download_and_prepare that uses apache-beam
A_ : Union[str, Any] = None
builder_instance.download_and_prepare()
A_ : Union[str, Any] = builder_instance.as_dataset()
assert ds
@pytest.mark.integration
def snake_case__ ( lowerCamelCase__ : Any ) -> Dict:
A_ : Dict = dataset_module_factory('''wikipedia''' , cache_dir=lowerCamelCase__ )
A_ : Union[str, Any] = import_main_class(dataset_module.module_path , dataset=lowerCamelCase__ )
A_ : DatasetBuilder = builder_cls(
cache_dir=lowerCamelCase__ , config_name='''20220301.frr''' , hash=dataset_module.hash , )
A_ : Optional[int] = builder_instance.as_streaming_dataset()
assert ds
assert isinstance(lowerCamelCase__ , lowerCamelCase__ )
assert "train" in ds
assert isinstance(ds['''train'''] , lowerCamelCase__ )
assert next(iter(ds['''train'''] ) )
| 4 |
'''simple docstring'''
def snake_case__ ( lowerCamelCase__ : list[int] , lowerCamelCase__ : list[int] , lowerCamelCase__ : int ) -> bool:
return not any(
neighbour == 1 and colored_vertices[i] == color
for i, neighbour in enumerate(lowerCamelCase__ ) )
def snake_case__ ( lowerCamelCase__ : list[list[int]] , lowerCamelCase__ : int , lowerCamelCase__ : list[int] , lowerCamelCase__ : int ) -> bool:
# Base Case
if index == len(lowerCamelCase__ ):
return True
# Recursive Step
for i in range(lowerCamelCase__ ):
if valid_coloring(graph[index] , lowerCamelCase__ , lowerCamelCase__ ):
# Color current vertex
A_ : int = i
# Validate coloring
if util_color(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , index + 1 ):
return True
# Backtrack
A_ : str = -1
return False
def snake_case__ ( lowerCamelCase__ : list[list[int]] , lowerCamelCase__ : int ) -> list[int]:
A_ : List[str] = [-1] * len(lowerCamelCase__ )
if util_color(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , 0 ):
return colored_vertices
return []
| 4 | 1 |
'''simple docstring'''
import datasets
from .nmt_bleu import compute_bleu # From: https://github.com/tensorflow/nmt/blob/master/nmt/scripts/bleu.py
snake_case__ = """\
@INPROCEEDINGS{Papineni02bleu:a,
author = {Kishore Papineni and Salim Roukos and Todd Ward and Wei-jing Zhu},
title = {BLEU: a Method for Automatic Evaluation of Machine Translation},
booktitle = {},
year = {2002},
pages = {311--318}
}
@inproceedings{lin-och-2004-orange,
title = \"{ORANGE}: a Method for Evaluating Automatic Evaluation Metrics for Machine Translation\",
author = \"Lin, Chin-Yew and
Och, Franz Josef\",
booktitle = \"{COLING} 2004: Proceedings of the 20th International Conference on Computational Linguistics\",
month = \"aug 23{--}aug 27\",
year = \"2004\",
address = \"Geneva, Switzerland\",
publisher = \"COLING\",
url = \"https://www.aclweb.org/anthology/C04-1072\",
pages = \"501--507\",
}
"""
snake_case__ = """\
BLEU (bilingual evaluation understudy) is an algorithm for evaluating the quality of text which has been machine-translated from one natural language to another.
Quality is considered to be the correspondence between a machine's output and that of a human: \"the closer a machine translation is to a professional human translation,
the better it is\" – this is the central idea behind BLEU. BLEU was one of the first metrics to claim a high correlation with human judgements of quality, and
remains one of the most popular automated and inexpensive metrics.
Scores are calculated for individual translated segments—generally sentences—by comparing them with a set of good quality reference translations.
Those scores are then averaged over the whole corpus to reach an estimate of the translation's overall quality. Intelligibility or grammatical correctness
are not taken into account[citation needed].
BLEU's output is always a number between 0 and 1. This value indicates how similar the candidate text is to the reference texts, with values closer to 1
representing more similar texts. Few human translations will attain a score of 1, since this would indicate that the candidate is identical to one of the
reference translations. For this reason, it is not necessary to attain a score of 1. Because there are more opportunities to match, adding additional
reference translations will increase the BLEU score.
"""
snake_case__ = """
Computes BLEU score of translated segments against one or more references.
Args:
predictions: list of translations to score.
Each translation should be tokenized into a list of tokens.
references: list of lists of references for each translation.
Each reference should be tokenized into a list of tokens.
max_order: Maximum n-gram order to use when computing BLEU score.
smooth: Whether or not to apply Lin et al. 2004 smoothing.
Returns:
'bleu': bleu score,
'precisions': geometric mean of n-gram precisions,
'brevity_penalty': brevity penalty,
'length_ratio': ratio of lengths,
'translation_length': translation_length,
'reference_length': reference_length
Examples:
>>> predictions = [
... [\"hello\", \"there\", \"general\", \"kenobi\"], # tokenized prediction of the first sample
... [\"foo\", \"bar\", \"foobar\"] # tokenized prediction of the second sample
... ]
>>> references = [
... [[\"hello\", \"there\", \"general\", \"kenobi\"], [\"hello\", \"there\", \"!\"]], # tokenized references for the first sample (2 references)
... [[\"foo\", \"bar\", \"foobar\"]] # tokenized references for the second sample (1 reference)
... ]
>>> bleu = datasets.load_metric(\"bleu\")
>>> results = bleu.compute(predictions=predictions, references=references)
>>> print(results[\"bleu\"])
1.0
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION, _KWARGS_DESCRIPTION )
class UpperCamelCase_ (datasets.Metric ):
"""simple docstring"""
def _a ( self : Union[str, Any] ):
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Sequence(datasets.Value('''string''' , id='''token''' ) , id='''sequence''' ),
'''references''': datasets.Sequence(
datasets.Sequence(datasets.Value('''string''' , id='''token''' ) , id='''sequence''' ) , id='''references''' ),
} ) , codebase_urls=['''https://github.com/tensorflow/nmt/blob/master/nmt/scripts/bleu.py'''] , reference_urls=[
'''https://en.wikipedia.org/wiki/BLEU''',
'''https://towardsdatascience.com/evaluating-text-output-in-nlp-bleu-at-your-own-risk-e8609665a213''',
] , )
def _a ( self : Union[str, Any] , _lowerCamelCase : Tuple , _lowerCamelCase : Optional[Any] , _lowerCamelCase : Optional[int]=4 , _lowerCamelCase : int=False ):
"""simple docstring"""
A_ : Optional[Any] = compute_bleu(
reference_corpus=_lowerCamelCase , translation_corpus=_lowerCamelCase , max_order=_lowerCamelCase , smooth=_lowerCamelCase )
((A_) ,(A_) ,(A_) ,(A_) ,(A_) ,(A_)) : Optional[Any] = score
return {
"bleu": bleu,
"precisions": precisions,
"brevity_penalty": bp,
"length_ratio": ratio,
"translation_length": translation_length,
"reference_length": reference_length,
}
| 4 |
'''simple docstring'''
from __future__ import annotations
from PIL import Image
# Define glider example
snake_case__ = [
[0, 1, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0, 0],
[1, 1, 1, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
]
# Define blinker example
snake_case__ = [[0, 1, 0], [0, 1, 0], [0, 1, 0]]
def snake_case__ ( lowerCamelCase__ : list[list[int]] ) -> list[list[int]]:
A_ : str = []
for i in range(len(lowerCamelCase__ ) ):
A_ : Optional[Any] = []
for j in range(len(cells[i] ) ):
# Get the number of live neighbours
A_ : Optional[int] = 0
if i > 0 and j > 0:
neighbour_count += cells[i - 1][j - 1]
if i > 0:
neighbour_count += cells[i - 1][j]
if i > 0 and j < len(cells[i] ) - 1:
neighbour_count += cells[i - 1][j + 1]
if j > 0:
neighbour_count += cells[i][j - 1]
if j < len(cells[i] ) - 1:
neighbour_count += cells[i][j + 1]
if i < len(lowerCamelCase__ ) - 1 and j > 0:
neighbour_count += cells[i + 1][j - 1]
if i < len(lowerCamelCase__ ) - 1:
neighbour_count += cells[i + 1][j]
if i < len(lowerCamelCase__ ) - 1 and j < len(cells[i] ) - 1:
neighbour_count += cells[i + 1][j + 1]
# Rules of the game of life (excerpt from Wikipedia):
# 1. Any live cell with two or three live neighbours survives.
# 2. Any dead cell with three live neighbours becomes a live cell.
# 3. All other live cells die in the next generation.
# Similarly, all other dead cells stay dead.
A_ : List[str] = cells[i][j] == 1
if (
(alive and 2 <= neighbour_count <= 3)
or not alive
and neighbour_count == 3
):
next_generation_row.append(1 )
else:
next_generation_row.append(0 )
next_generation.append(lowerCamelCase__ )
return next_generation
def snake_case__ ( lowerCamelCase__ : list[list[int]] , lowerCamelCase__ : int ) -> list[Image.Image]:
A_ : List[Any] = []
for _ in range(lowerCamelCase__ ):
# Create output image
A_ : Optional[int] = Image.new('''RGB''' , (len(cells[0] ), len(lowerCamelCase__ )) )
A_ : int = img.load()
# Save cells to image
for x in range(len(lowerCamelCase__ ) ):
for y in range(len(cells[0] ) ):
A_ : Optional[Any] = 2_5_5 - cells[y][x] * 2_5_5
A_ : str = (colour, colour, colour)
# Save image
images.append(lowerCamelCase__ )
A_ : Optional[int] = new_generation(lowerCamelCase__ )
return images
if __name__ == "__main__":
snake_case__ = generate_images(GLIDER, 16)
images[0].save("""out.gif""", save_all=True, append_images=images[1:])
| 4 | 1 |
'''simple docstring'''
from typing import List, Optional
from tokenizers import ByteLevelBPETokenizer
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_blenderbot_small import BlenderbotSmallTokenizer
snake_case__ = logging.get_logger(__name__)
snake_case__ = {
"""vocab_file""": """vocab.json""",
"""merges_file""": """merges.txt""",
"""tokenizer_config_file""": """tokenizer_config.json""",
}
snake_case__ = {
"""vocab_file""": {
"""facebook/blenderbot_small-90M""": """https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/vocab.json"""
},
"""merges_file""": {
"""facebook/blenderbot_small-90M""": """https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/merges.txt"""
},
"""tokenizer_config_file""": {
"""facebook/blenderbot_small-90M""": (
"""https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/tokenizer_config.json"""
)
},
}
snake_case__ = {
"""facebook/blenderbot_small-90M""": 5_12,
}
class UpperCamelCase_ (a__ ):
"""simple docstring"""
_lowerCAmelCase = VOCAB_FILES_NAMES
_lowerCAmelCase = PRETRAINED_VOCAB_FILES_MAP
_lowerCAmelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_lowerCAmelCase = BlenderbotSmallTokenizer
def __init__( self : Optional[Any] , _lowerCamelCase : Optional[Any]=None , _lowerCamelCase : int=None , _lowerCamelCase : Optional[Any]="<|endoftext|>" , _lowerCamelCase : str="<|endoftext|>" , _lowerCamelCase : List[Any]="<|endoftext|>" , _lowerCamelCase : Dict=False , _lowerCamelCase : Any=True , **_lowerCamelCase : Optional[int] , ):
"""simple docstring"""
super().__init__(
ByteLevelBPETokenizer(
vocab=_lowerCamelCase , merges=_lowerCamelCase , add_prefix_space=_lowerCamelCase , trim_offsets=_lowerCamelCase , ) , bos_token=_lowerCamelCase , eos_token=_lowerCamelCase , unk_token=_lowerCamelCase , **_lowerCamelCase , )
A_ : int = add_prefix_space
def _a ( self : int , _lowerCamelCase : Tuple , _lowerCamelCase : List[Any]=None ):
"""simple docstring"""
A_ : Dict = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def _a ( self : List[str] , _lowerCamelCase : List[int] , _lowerCamelCase : Optional[List[int]] = None ):
"""simple docstring"""
A_ : str = [self.sep_token_id]
A_ : Dict = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
| 4 |
'''simple docstring'''
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import BertTokenizer, BertTokenizerFast
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import AlignProcessor, EfficientNetImageProcessor
@require_vision
class UpperCamelCase_ (unittest.TestCase ):
"""simple docstring"""
def _a ( self : Union[str, Any] ):
"""simple docstring"""
A_ : Any = tempfile.mkdtemp()
A_ : List[Any] = [
'''[UNK]''',
'''[CLS]''',
'''[SEP]''',
'''[PAD]''',
'''[MASK]''',
'''want''',
'''##want''',
'''##ed''',
'''wa''',
'''un''',
'''runn''',
'''##ing''',
''',''',
'''low''',
'''lowest''',
]
A_ : str = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) )
A_ : Tuple = {
'''do_resize''': True,
'''size''': 20,
'''do_center_crop''': True,
'''crop_size''': 18,
'''do_normalize''': True,
'''image_mean''': [0.48_14_54_66, 0.4_57_82_75, 0.40_82_10_73],
'''image_std''': [0.26_86_29_54, 0.26_13_02_58, 0.27_57_77_11],
}
A_ : List[Any] = os.path.join(self.tmpdirname , _lowerCamelCase )
with open(self.image_processor_file , '''w''' , encoding='''utf-8''' ) as fp:
json.dump(_lowerCamelCase , _lowerCamelCase )
def _a ( self : Dict , **_lowerCamelCase : Tuple ):
"""simple docstring"""
return BertTokenizer.from_pretrained(self.tmpdirname , **_lowerCamelCase )
def _a ( self : Optional[int] , **_lowerCamelCase : Optional[int] ):
"""simple docstring"""
return BertTokenizerFast.from_pretrained(self.tmpdirname , **_lowerCamelCase )
def _a ( self : Optional[Any] , **_lowerCamelCase : Tuple ):
"""simple docstring"""
return EfficientNetImageProcessor.from_pretrained(self.tmpdirname , **_lowerCamelCase )
def _a ( self : Tuple ):
"""simple docstring"""
shutil.rmtree(self.tmpdirname )
def _a ( self : int ):
"""simple docstring"""
A_ : Union[str, Any] = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
A_ : Any = [Image.fromarray(np.moveaxis(_lowerCamelCase , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def _a ( self : int ):
"""simple docstring"""
A_ : Tuple = self.get_tokenizer()
A_ : Tuple = self.get_rust_tokenizer()
A_ : Dict = self.get_image_processor()
A_ : List[Any] = AlignProcessor(tokenizer=_lowerCamelCase , image_processor=_lowerCamelCase )
processor_slow.save_pretrained(self.tmpdirname )
A_ : str = AlignProcessor.from_pretrained(self.tmpdirname , use_fast=_lowerCamelCase )
A_ : Any = AlignProcessor(tokenizer=_lowerCamelCase , image_processor=_lowerCamelCase )
processor_fast.save_pretrained(self.tmpdirname )
A_ : List[Any] = AlignProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() )
self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() )
self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() )
self.assertIsInstance(processor_slow.tokenizer , _lowerCamelCase )
self.assertIsInstance(processor_fast.tokenizer , _lowerCamelCase )
self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor_slow.image_processor , _lowerCamelCase )
self.assertIsInstance(processor_fast.image_processor , _lowerCamelCase )
def _a ( self : List[Any] ):
"""simple docstring"""
A_ : List[str] = AlignProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
A_ : Optional[int] = self.get_tokenizer(bos_token='''(BOS)''' , eos_token='''(EOS)''' )
A_ : Tuple = self.get_image_processor(do_normalize=_lowerCamelCase , padding_value=1.0 )
A_ : List[str] = AlignProcessor.from_pretrained(
self.tmpdirname , bos_token='''(BOS)''' , eos_token='''(EOS)''' , do_normalize=_lowerCamelCase , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , _lowerCamelCase )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , _lowerCamelCase )
def _a ( self : Union[str, Any] ):
"""simple docstring"""
A_ : Dict = self.get_image_processor()
A_ : Any = self.get_tokenizer()
A_ : List[str] = AlignProcessor(tokenizer=_lowerCamelCase , image_processor=_lowerCamelCase )
A_ : Any = self.prepare_image_inputs()
A_ : List[Any] = image_processor(_lowerCamelCase , return_tensors='''np''' )
A_ : str = processor(images=_lowerCamelCase , return_tensors='''np''' )
for key in input_image_proc.keys():
self.assertAlmostEqual(input_image_proc[key].sum() , input_processor[key].sum() , delta=1E-2 )
def _a ( self : Dict ):
"""simple docstring"""
A_ : str = self.get_image_processor()
A_ : List[str] = self.get_tokenizer()
A_ : Optional[int] = AlignProcessor(tokenizer=_lowerCamelCase , image_processor=_lowerCamelCase )
A_ : int = '''lower newer'''
A_ : str = processor(text=_lowerCamelCase )
A_ : Dict = tokenizer(_lowerCamelCase , padding='''max_length''' , max_length=64 )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def _a ( self : str ):
"""simple docstring"""
A_ : Optional[int] = self.get_image_processor()
A_ : Optional[Any] = self.get_tokenizer()
A_ : List[str] = AlignProcessor(tokenizer=_lowerCamelCase , image_processor=_lowerCamelCase )
A_ : List[Any] = '''lower newer'''
A_ : Optional[int] = self.prepare_image_inputs()
A_ : List[Any] = processor(text=_lowerCamelCase , images=_lowerCamelCase )
self.assertListEqual(list(inputs.keys() ) , ['''input_ids''', '''token_type_ids''', '''attention_mask''', '''pixel_values'''] )
# test if it raises when no input is passed
with pytest.raises(_lowerCamelCase ):
processor()
def _a ( self : List[str] ):
"""simple docstring"""
A_ : Optional[Any] = self.get_image_processor()
A_ : Optional[int] = self.get_tokenizer()
A_ : List[Any] = AlignProcessor(tokenizer=_lowerCamelCase , image_processor=_lowerCamelCase )
A_ : str = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
A_ : str = processor.batch_decode(_lowerCamelCase )
A_ : Union[str, Any] = tokenizer.batch_decode(_lowerCamelCase )
self.assertListEqual(_lowerCamelCase , _lowerCamelCase )
def _a ( self : Tuple ):
"""simple docstring"""
A_ : str = self.get_image_processor()
A_ : Tuple = self.get_tokenizer()
A_ : Any = AlignProcessor(tokenizer=_lowerCamelCase , image_processor=_lowerCamelCase )
A_ : str = '''lower newer'''
A_ : List[str] = self.prepare_image_inputs()
A_ : Tuple = processor(text=_lowerCamelCase , images=_lowerCamelCase )
self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
| 4 | 1 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.